1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> /* for proc0 declaration */ 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #ifdef STATS 52 #include <sys/qmath.h> 53 #include <sys/tree.h> 54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 55 #else 56 #include <sys/tree.h> 57 #endif 58 #include <sys/refcount.h> 59 #include <sys/queue.h> 60 #include <sys/tim_filter.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 #include <sys/protosw.h> 65 #ifdef TCP_ACCOUNTING 66 #include <sys/sched.h> 67 #include <machine/cpu.h> 68 #endif 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/route/nhop.h> 73 #include <net/vnet.h> 74 75 #define TCPSTATES /* for logging */ 76 77 #include <netinet/in.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/ip.h> 81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 83 #include <netinet/ip_var.h> 84 #include <netinet/ip6.h> 85 #include <netinet6/in6_pcb.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/tcp.h> 88 #define TCPOUTFLAGS 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_log_buf.h> 94 #include <netinet/tcp_syncache.h> 95 #include <netinet/tcp_hpts.h> 96 #include <netinet/tcp_ratelimit.h> 97 #include <netinet/tcp_accounting.h> 98 #include <netinet/tcpip.h> 99 #include <netinet/cc/cc.h> 100 #include <netinet/cc/cc_newreno.h> 101 #include <netinet/tcp_fastopen.h> 102 #include <netinet/tcp_lro.h> 103 #ifdef NETFLIX_SHARED_CWND 104 #include <netinet/tcp_shared_cwnd.h> 105 #endif 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_ecn.h> 113 114 #include <netipsec/ipsec_support.h> 115 116 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 117 #include <netipsec/ipsec.h> 118 #include <netipsec/ipsec6.h> 119 #endif /* IPSEC */ 120 121 #include <netinet/udp.h> 122 #include <netinet/udp_var.h> 123 #include <machine/in_cksum.h> 124 125 #ifdef MAC 126 #include <security/mac/mac_framework.h> 127 #endif 128 #include "sack_filter.h" 129 #include "tcp_rack.h" 130 #include "tailq_hash.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 146 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 150 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 151 152 struct sysctl_ctx_list rack_sysctl_ctx; 153 struct sysctl_oid *rack_sysctl_root; 154 155 #define CUM_ACKED 1 156 #define SACKED 2 157 158 /* 159 * The RACK module incorporates a number of 160 * TCP ideas that have been put out into the IETF 161 * over the last few years: 162 * - Matt Mathis's Rate Halving which slowly drops 163 * the congestion window so that the ack clock can 164 * be maintained during a recovery. 165 * - Yuchung Cheng's RACK TCP (for which its named) that 166 * will stop us using the number of dup acks and instead 167 * use time as the gage of when we retransmit. 168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 169 * of Dukkipati et.al. 170 * RACK depends on SACK, so if an endpoint arrives that 171 * cannot do SACK the state machine below will shuttle the 172 * connection back to using the "default" TCP stack that is 173 * in FreeBSD. 174 * 175 * To implement RACK the original TCP stack was first decomposed 176 * into a functional state machine with individual states 177 * for each of the possible TCP connection states. The do_segment 178 * functions role in life is to mandate the connection supports SACK 179 * initially and then assure that the RACK state matches the conenction 180 * state before calling the states do_segment function. Each 181 * state is simplified due to the fact that the original do_segment 182 * has been decomposed and we *know* what state we are in (no 183 * switches on the state) and all tests for SACK are gone. This 184 * greatly simplifies what each state does. 185 * 186 * TCP output is also over-written with a new version since it 187 * must maintain the new rack scoreboard. 188 * 189 */ 190 static int32_t rack_tlp_thresh = 1; 191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 192 static int32_t rack_tlp_use_greater = 1; 193 static int32_t rack_reorder_thresh = 2; 194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 195 * - 60 seconds */ 196 static uint32_t rack_pcm_every_n_rounds = 100; 197 static uint32_t rack_pcm_blast = 0; 198 static uint32_t rack_pcm_is_enabled = 1; 199 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 200 201 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */ 202 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 203 204 205 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 206 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 207 static int32_t rack_rxt_controls = 0; 208 static int32_t rack_fill_cw_state = 0; 209 static uint8_t rack_req_measurements = 1; 210 /* Attack threshold detections */ 211 static uint32_t rack_highest_sack_thresh_seen = 0; 212 static uint32_t rack_highest_move_thresh_seen = 0; 213 static uint32_t rack_merge_out_sacks_on_attack = 0; 214 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 215 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */ 216 static int32_t rack_hw_rate_caps = 0; /* 1; */ 217 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 218 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 219 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 220 static int32_t rack_hw_up_only = 0; 221 static int32_t rack_stats_gets_ms_rtt = 1; 222 static int32_t rack_prr_addbackmax = 2; 223 static int32_t rack_do_hystart = 0; 224 static int32_t rack_apply_rtt_with_reduced_conf = 0; 225 static int32_t rack_hibeta_setting = 0; 226 static int32_t rack_default_pacing_divisor = 250; 227 static uint16_t rack_pacing_min_seg = 0; 228 static int32_t rack_timely_off = 0; 229 230 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 231 static int32_t rack_pkt_delay = 1000; 232 static int32_t rack_send_a_lot_in_prr = 1; 233 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 234 static int32_t rack_verbose_logging = 0; 235 static int32_t rack_ignore_data_after_close = 1; 236 static int32_t rack_enable_shared_cwnd = 1; 237 static int32_t rack_use_cmp_acks = 1; 238 static int32_t rack_use_fsb = 1; 239 static int32_t rack_use_rfo = 1; 240 static int32_t rack_use_rsm_rfo = 1; 241 static int32_t rack_max_abc_post_recovery = 2; 242 static int32_t rack_client_low_buf = 0; 243 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 244 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 245 #ifdef TCP_ACCOUNTING 246 static int32_t rack_tcp_accounting = 0; 247 #endif 248 static int32_t rack_limits_scwnd = 1; 249 static int32_t rack_enable_mqueue_for_nonpaced = 0; 250 static int32_t rack_hybrid_allow_set_maxseg = 0; 251 static int32_t rack_disable_prr = 0; 252 static int32_t use_rack_rr = 1; 253 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 254 static int32_t rack_persist_min = 250000; /* 250usec */ 255 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 256 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 257 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ 258 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 259 static int32_t rack_limit_time_with_srtt = 0; 260 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 261 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 262 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 263 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 264 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 265 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 266 static int32_t rack_full_buffer_discount = 10; 267 /* 268 * Currently regular tcp has a rto_min of 30ms 269 * the backoff goes 12 times so that ends up 270 * being a total of 122.850 seconds before a 271 * connection is killed. 272 */ 273 static uint32_t rack_def_data_window = 20; 274 static uint32_t rack_goal_bdp = 2; 275 static uint32_t rack_min_srtts = 1; 276 static uint32_t rack_min_measure_usec = 0; 277 static int32_t rack_tlp_min = 10000; /* 10ms */ 278 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 279 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 280 static const int32_t rack_free_cache = 2; 281 static int32_t rack_hptsi_segments = 40; 282 static int32_t rack_rate_sample_method = USE_RTT_LOW; 283 static int32_t rack_pace_every_seg = 0; 284 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 285 static int32_t rack_slot_reduction = 4; 286 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 287 static int32_t rack_cwnd_block_ends_measure = 0; 288 static int32_t rack_rwnd_block_ends_measure = 0; 289 static int32_t rack_def_profile = 0; 290 291 static int32_t rack_lower_cwnd_at_tlp = 0; 292 static int32_t rack_always_send_oldest = 0; 293 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 294 295 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 296 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 297 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 298 299 /* Probertt */ 300 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 301 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 302 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 303 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 304 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 305 306 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 307 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 308 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 309 static uint32_t rack_probertt_use_min_rtt_exit = 0; 310 static uint32_t rack_probe_rtt_sets_cwnd = 0; 311 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 312 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 313 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 314 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 315 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 316 static uint32_t rack_probertt_filter_life = 10000000; 317 static uint32_t rack_probertt_lower_within = 10; 318 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 319 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 320 static int32_t rack_probertt_clear_is = 1; 321 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 322 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 323 324 /* Part of pacing */ 325 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 326 327 /* Timely information: 328 * 329 * Here we have various control parameters on how 330 * timely may change the multiplier. rack_gain_p5_ub 331 * is associated with timely but not directly influencing 332 * the rate decision like the other variables. It controls 333 * the way fill-cw interacts with timely and caps how much 334 * timely can boost the fill-cw b/w. 335 * 336 * The other values are various boost/shrink numbers as well 337 * as potential caps when adjustments are made to the timely 338 * gain (returned by rack_get_output_gain(). Remember too that 339 * the gain returned can be overriden by other factors such as 340 * probeRTT as well as fixed-rate-pacing. 341 */ 342 static int32_t rack_gain_p5_ub = 250; 343 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 344 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 345 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 346 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 347 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 348 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 349 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 350 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 351 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 352 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 353 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 354 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 355 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 356 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 357 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 358 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 359 static int32_t rack_use_max_for_nobackoff = 0; 360 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 361 static int32_t rack_timely_no_stopping = 0; 362 static int32_t rack_down_raise_thresh = 100; 363 static int32_t rack_req_segs = 1; 364 static uint64_t rack_bw_rate_cap = 0; 365 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 366 367 368 /* Rack specific counters */ 369 counter_u64_t rack_saw_enobuf; 370 counter_u64_t rack_saw_enobuf_hw; 371 counter_u64_t rack_saw_enetunreach; 372 counter_u64_t rack_persists_sends; 373 counter_u64_t rack_persists_acks; 374 counter_u64_t rack_persists_loss; 375 counter_u64_t rack_persists_lost_ends; 376 counter_u64_t rack_total_bytes; 377 #ifdef INVARIANTS 378 counter_u64_t rack_adjust_map_bw; 379 #endif 380 /* Tail loss probe counters */ 381 counter_u64_t rack_tlp_tot; 382 counter_u64_t rack_tlp_newdata; 383 counter_u64_t rack_tlp_retran; 384 counter_u64_t rack_tlp_retran_bytes; 385 counter_u64_t rack_to_tot; 386 counter_u64_t rack_hot_alloc; 387 counter_u64_t rack_to_alloc; 388 counter_u64_t rack_to_alloc_hard; 389 counter_u64_t rack_to_alloc_emerg; 390 counter_u64_t rack_to_alloc_limited; 391 counter_u64_t rack_alloc_limited_conns; 392 counter_u64_t rack_split_limited; 393 counter_u64_t rack_rxt_clamps_cwnd; 394 counter_u64_t rack_rxt_clamps_cwnd_uniq; 395 396 counter_u64_t rack_multi_single_eq; 397 counter_u64_t rack_proc_non_comp_ack; 398 399 counter_u64_t rack_fto_send; 400 counter_u64_t rack_fto_rsm_send; 401 counter_u64_t rack_nfto_resend; 402 counter_u64_t rack_non_fto_send; 403 counter_u64_t rack_extended_rfo; 404 405 counter_u64_t rack_sack_proc_all; 406 counter_u64_t rack_sack_proc_short; 407 counter_u64_t rack_sack_proc_restart; 408 counter_u64_t rack_sack_attacks_detected; 409 counter_u64_t rack_sack_attacks_reversed; 410 counter_u64_t rack_sack_attacks_suspect; 411 counter_u64_t rack_sack_used_next_merge; 412 counter_u64_t rack_sack_splits; 413 counter_u64_t rack_sack_used_prev_merge; 414 counter_u64_t rack_sack_skipped_acked; 415 counter_u64_t rack_ack_total; 416 counter_u64_t rack_express_sack; 417 counter_u64_t rack_sack_total; 418 counter_u64_t rack_move_none; 419 counter_u64_t rack_move_some; 420 421 counter_u64_t rack_input_idle_reduces; 422 counter_u64_t rack_collapsed_win; 423 counter_u64_t rack_collapsed_win_seen; 424 counter_u64_t rack_collapsed_win_rxt; 425 counter_u64_t rack_collapsed_win_rxt_bytes; 426 counter_u64_t rack_try_scwnd; 427 counter_u64_t rack_hw_pace_init_fail; 428 counter_u64_t rack_hw_pace_lost; 429 430 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 431 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 432 433 434 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 435 436 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 437 (tv) = (value) + slop; \ 438 if ((u_long)(tv) < (u_long)(tvmin)) \ 439 (tv) = (tvmin); \ 440 if ((u_long)(tv) > (u_long)(tvmax)) \ 441 (tv) = (tvmax); \ 442 } while (0) 443 444 static void 445 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 446 447 static int 448 rack_process_ack(struct mbuf *m, struct tcphdr *th, 449 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 450 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 451 static int 452 rack_process_data(struct mbuf *m, struct tcphdr *th, 453 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 454 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 455 static void 456 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 457 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 458 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 459 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 460 uint8_t limit_type); 461 static struct rack_sendmap * 462 rack_check_recovery_mode(struct tcpcb *tp, 463 uint32_t tsused); 464 static uint32_t 465 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 466 static void 467 rack_cong_signal(struct tcpcb *tp, 468 uint32_t type, uint32_t ack, int ); 469 static void rack_counter_destroy(void); 470 static int 471 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 472 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 473 static void 474 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 475 static void 476 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 477 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 478 static void rack_dtor(void *mem, int32_t size, void *arg); 479 static void 480 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 481 uint32_t flex1, uint32_t flex2, 482 uint32_t flex3, uint32_t flex4, 483 uint32_t flex5, uint32_t flex6, 484 uint16_t flex7, uint8_t mod); 485 486 static void 487 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 488 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 489 struct rack_sendmap *rsm, uint8_t quality); 490 static struct rack_sendmap * 491 rack_find_high_nonack(struct tcp_rack *rack, 492 struct rack_sendmap *rsm); 493 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 494 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 495 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 496 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 497 static void 498 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 499 tcp_seq th_ack, int line, uint8_t quality); 500 static void 501 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 502 503 static uint32_t 504 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 505 static int32_t rack_handoff_ok(struct tcpcb *tp); 506 static int32_t rack_init(struct tcpcb *tp, void **ptr); 507 static void rack_init_sysctls(void); 508 509 static void 510 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 511 struct tcphdr *th, int entered_rec, int dup_ack_struck, 512 int *dsack_seen, int *sacks_seen); 513 static void 514 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 515 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 516 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 517 518 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 519 520 521 static void 522 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 523 struct rack_sendmap *rsm, uint32_t cts); 524 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 525 static int32_t rack_output(struct tcpcb *tp); 526 527 static uint32_t 528 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 529 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 530 uint32_t cts, uint32_t segsiz); 531 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 532 static void rack_remxt_tmr(struct tcpcb *tp); 533 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 534 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 535 static int32_t rack_stopall(struct tcpcb *tp); 536 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 537 static uint32_t 538 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 539 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 540 static void 541 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 542 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 543 static int 544 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 545 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 546 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 547 static int 548 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 549 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 550 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 551 552 static int 553 rack_do_closing(struct mbuf *m, struct tcphdr *th, 554 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 555 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 556 static int 557 rack_do_established(struct mbuf *m, struct tcphdr *th, 558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 559 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 560 static int 561 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 562 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 563 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 564 static int 565 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 566 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 567 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 568 static int 569 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 570 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 571 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 572 static int 573 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 574 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 575 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 576 static int 577 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 578 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 579 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 580 static int 581 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 582 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 583 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 584 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 585 struct rack_sendmap * 586 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 587 uint32_t tsused); 588 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 589 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 590 static void 591 tcp_rack_partialack(struct tcpcb *tp); 592 static int 593 rack_set_profile(struct tcp_rack *rack, int prof); 594 static void 595 rack_apply_deferred_options(struct tcp_rack *rack); 596 597 int32_t rack_clear_counter=0; 598 599 static uint64_t 600 rack_get_lt_bw(struct tcp_rack *rack) 601 { 602 struct timeval tv; 603 uint64_t tim, bytes; 604 605 tim = rack->r_ctl.lt_bw_time; 606 bytes = rack->r_ctl.lt_bw_bytes; 607 if (rack->lt_bw_up) { 608 /* Include all the current bytes too */ 609 microuptime(&tv); 610 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 611 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 612 } 613 if ((bytes != 0) && (tim != 0)) 614 return ((bytes * (uint64_t)1000000) / tim); 615 else 616 return (0); 617 } 618 619 static void 620 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 621 { 622 struct sockopt sopt; 623 struct cc_newreno_opts opt; 624 struct newreno old; 625 struct tcpcb *tp; 626 int error, failed = 0; 627 628 tp = rack->rc_tp; 629 if (tp->t_cc == NULL) { 630 /* Tcb is leaving */ 631 return; 632 } 633 rack->rc_pacing_cc_set = 1; 634 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 635 /* Not new-reno we can't play games with beta! */ 636 failed = 1; 637 goto out; 638 639 } 640 if (CC_ALGO(tp)->ctl_output == NULL) { 641 /* Huh, not using new-reno so no swaps.? */ 642 failed = 2; 643 goto out; 644 } 645 /* Get the current values out */ 646 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 647 sopt.sopt_dir = SOPT_GET; 648 opt.name = CC_NEWRENO_BETA; 649 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 650 if (error) { 651 failed = 3; 652 goto out; 653 } 654 old.beta = opt.val; 655 opt.name = CC_NEWRENO_BETA_ECN; 656 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 657 if (error) { 658 failed = 4; 659 goto out; 660 } 661 old.beta_ecn = opt.val; 662 663 /* Now lets set in the values we have stored */ 664 sopt.sopt_dir = SOPT_SET; 665 opt.name = CC_NEWRENO_BETA; 666 opt.val = rack->r_ctl.rc_saved_beta.beta; 667 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 668 if (error) { 669 failed = 5; 670 goto out; 671 } 672 opt.name = CC_NEWRENO_BETA_ECN; 673 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 674 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 675 if (error) { 676 failed = 6; 677 goto out; 678 } 679 /* Save off the values for restoral */ 680 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 681 out: 682 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 683 union tcp_log_stackspecific log; 684 struct timeval tv; 685 struct newreno *ptr; 686 687 ptr = ((struct newreno *)tp->t_ccv.cc_data); 688 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 689 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 690 log.u_bbr.flex1 = ptr->beta; 691 log.u_bbr.flex2 = ptr->beta_ecn; 692 log.u_bbr.flex3 = ptr->newreno_flags; 693 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 694 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 695 log.u_bbr.flex6 = failed; 696 log.u_bbr.flex7 = rack->gp_ready; 697 log.u_bbr.flex7 <<= 1; 698 log.u_bbr.flex7 |= rack->use_fixed_rate; 699 log.u_bbr.flex7 <<= 1; 700 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 701 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 702 log.u_bbr.flex8 = flex8; 703 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 704 0, &log, false, NULL, NULL, 0, &tv); 705 } 706 } 707 708 static void 709 rack_set_cc_pacing(struct tcp_rack *rack) 710 { 711 if (rack->rc_pacing_cc_set) 712 return; 713 /* 714 * Use the swap utility placing in 3 for flex8 to id a 715 * set of a new set of values. 716 */ 717 rack->rc_pacing_cc_set = 1; 718 rack_swap_beta_values(rack, 3); 719 } 720 721 static void 722 rack_undo_cc_pacing(struct tcp_rack *rack) 723 { 724 if (rack->rc_pacing_cc_set == 0) 725 return; 726 /* 727 * Use the swap utility placing in 4 for flex8 to id a 728 * restoral of the old values. 729 */ 730 rack->rc_pacing_cc_set = 0; 731 rack_swap_beta_values(rack, 4); 732 } 733 734 static void 735 rack_remove_pacing(struct tcp_rack *rack) 736 { 737 if (rack->rc_pacing_cc_set) 738 rack_undo_cc_pacing(rack); 739 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 740 tcp_decrement_paced_conn(); 741 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 742 tcp_dec_dgp_pacing_cnt(); 743 rack->rc_always_pace = 0; 744 rack->r_ctl.pacing_method = RACK_PACING_NONE; 745 rack->dgp_on = 0; 746 rack->rc_hybrid_mode = 0; 747 rack->use_fixed_rate = 0; 748 } 749 750 static void 751 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 752 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 753 { 754 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 755 union tcp_log_stackspecific log; 756 struct timeval tv; 757 758 memset(&log, 0, sizeof(log)); 759 log.u_bbr.flex1 = seq_end; 760 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 761 log.u_bbr.flex3 = ack_end_t; 762 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 763 log.u_bbr.flex5 = send_end_t; 764 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 765 log.u_bbr.flex7 = mode; 766 log.u_bbr.flex8 = 69; 767 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 768 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 769 log.u_bbr.pkts_out = line; 770 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 771 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 772 log.u_bbr.epoch = rack->r_ctl.current_round; 773 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 774 if (rsm != NULL) { 775 log.u_bbr.applimited = rsm->r_start; 776 log.u_bbr.delivered = rsm->r_end; 777 log.u_bbr.epoch = rsm->r_flags; 778 } 779 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 780 TCP_LOG_EVENTP(rack->rc_tp, NULL, 781 &rack->rc_inp->inp_socket->so_rcv, 782 &rack->rc_inp->inp_socket->so_snd, 783 BBR_LOG_HPTSI_CALC, 0, 784 0, &log, false, &tv); 785 } 786 } 787 788 static int 789 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 790 { 791 uint32_t stat; 792 int32_t error; 793 794 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 795 if (error || req->newptr == NULL) 796 return error; 797 798 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 799 if (error) 800 return (error); 801 if (stat == 1) { 802 #ifdef INVARIANTS 803 printf("Clearing RACK counters\n"); 804 #endif 805 counter_u64_zero(rack_tlp_tot); 806 counter_u64_zero(rack_tlp_newdata); 807 counter_u64_zero(rack_tlp_retran); 808 counter_u64_zero(rack_tlp_retran_bytes); 809 counter_u64_zero(rack_to_tot); 810 counter_u64_zero(rack_saw_enobuf); 811 counter_u64_zero(rack_saw_enobuf_hw); 812 counter_u64_zero(rack_saw_enetunreach); 813 counter_u64_zero(rack_persists_sends); 814 counter_u64_zero(rack_total_bytes); 815 counter_u64_zero(rack_persists_acks); 816 counter_u64_zero(rack_persists_loss); 817 counter_u64_zero(rack_persists_lost_ends); 818 #ifdef INVARIANTS 819 counter_u64_zero(rack_adjust_map_bw); 820 #endif 821 counter_u64_zero(rack_to_alloc_hard); 822 counter_u64_zero(rack_to_alloc_emerg); 823 counter_u64_zero(rack_sack_proc_all); 824 counter_u64_zero(rack_fto_send); 825 counter_u64_zero(rack_fto_rsm_send); 826 counter_u64_zero(rack_extended_rfo); 827 counter_u64_zero(rack_hw_pace_init_fail); 828 counter_u64_zero(rack_hw_pace_lost); 829 counter_u64_zero(rack_non_fto_send); 830 counter_u64_zero(rack_nfto_resend); 831 counter_u64_zero(rack_sack_proc_short); 832 counter_u64_zero(rack_sack_proc_restart); 833 counter_u64_zero(rack_to_alloc); 834 counter_u64_zero(rack_to_alloc_limited); 835 counter_u64_zero(rack_alloc_limited_conns); 836 counter_u64_zero(rack_split_limited); 837 counter_u64_zero(rack_rxt_clamps_cwnd); 838 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 839 counter_u64_zero(rack_multi_single_eq); 840 counter_u64_zero(rack_proc_non_comp_ack); 841 counter_u64_zero(rack_sack_attacks_detected); 842 counter_u64_zero(rack_sack_attacks_reversed); 843 counter_u64_zero(rack_sack_attacks_suspect); 844 counter_u64_zero(rack_sack_used_next_merge); 845 counter_u64_zero(rack_sack_used_prev_merge); 846 counter_u64_zero(rack_sack_splits); 847 counter_u64_zero(rack_sack_skipped_acked); 848 counter_u64_zero(rack_ack_total); 849 counter_u64_zero(rack_express_sack); 850 counter_u64_zero(rack_sack_total); 851 counter_u64_zero(rack_move_none); 852 counter_u64_zero(rack_move_some); 853 counter_u64_zero(rack_try_scwnd); 854 counter_u64_zero(rack_collapsed_win); 855 counter_u64_zero(rack_collapsed_win_rxt); 856 counter_u64_zero(rack_collapsed_win_seen); 857 counter_u64_zero(rack_collapsed_win_rxt_bytes); 858 } else if (stat == 2) { 859 #ifdef INVARIANTS 860 printf("Clearing RACK option array\n"); 861 #endif 862 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 863 } else if (stat == 3) { 864 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 865 } else if (stat == 4) { 866 #ifdef INVARIANTS 867 printf("Clearing RACK out size array\n"); 868 #endif 869 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 870 } 871 rack_clear_counter = 0; 872 return (0); 873 } 874 875 static void 876 rack_init_sysctls(void) 877 { 878 struct sysctl_oid *rack_counters; 879 struct sysctl_oid *rack_attack; 880 struct sysctl_oid *rack_pacing; 881 struct sysctl_oid *rack_timely; 882 struct sysctl_oid *rack_timers; 883 struct sysctl_oid *rack_tlp; 884 struct sysctl_oid *rack_misc; 885 struct sysctl_oid *rack_features; 886 struct sysctl_oid *rack_measure; 887 struct sysctl_oid *rack_probertt; 888 struct sysctl_oid *rack_hw_pacing; 889 890 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 891 SYSCTL_CHILDREN(rack_sysctl_root), 892 OID_AUTO, 893 "sack_attack", 894 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 895 "Rack Sack Attack Counters and Controls"); 896 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 897 SYSCTL_CHILDREN(rack_sysctl_root), 898 OID_AUTO, 899 "stats", 900 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 901 "Rack Counters"); 902 SYSCTL_ADD_S32(&rack_sysctl_ctx, 903 SYSCTL_CHILDREN(rack_sysctl_root), 904 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 905 &rack_rate_sample_method , USE_RTT_LOW, 906 "What method should we use for rate sampling 0=high, 1=low "); 907 /* Probe rtt related controls */ 908 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 909 SYSCTL_CHILDREN(rack_sysctl_root), 910 OID_AUTO, 911 "probertt", 912 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 913 "ProbeRTT related Controls"); 914 SYSCTL_ADD_U16(&rack_sysctl_ctx, 915 SYSCTL_CHILDREN(rack_probertt), 916 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 917 &rack_atexit_prtt_hbp, 130, 918 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 919 SYSCTL_ADD_U16(&rack_sysctl_ctx, 920 SYSCTL_CHILDREN(rack_probertt), 921 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 922 &rack_atexit_prtt, 130, 923 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 924 SYSCTL_ADD_U16(&rack_sysctl_ctx, 925 SYSCTL_CHILDREN(rack_probertt), 926 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 927 &rack_per_of_gp_probertt, 60, 928 "What percentage of goodput do we pace at in probertt"); 929 SYSCTL_ADD_U16(&rack_sysctl_ctx, 930 SYSCTL_CHILDREN(rack_probertt), 931 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 932 &rack_per_of_gp_probertt_reduce, 10, 933 "What percentage of goodput do we reduce every gp_srtt"); 934 SYSCTL_ADD_U16(&rack_sysctl_ctx, 935 SYSCTL_CHILDREN(rack_probertt), 936 OID_AUTO, "gp_per_low", CTLFLAG_RW, 937 &rack_per_of_gp_lowthresh, 40, 938 "What percentage of goodput do we allow the multiplier to fall to"); 939 SYSCTL_ADD_U32(&rack_sysctl_ctx, 940 SYSCTL_CHILDREN(rack_probertt), 941 OID_AUTO, "time_between", CTLFLAG_RW, 942 & rack_time_between_probertt, 96000000, 943 "How many useconds between the lowest rtt falling must past before we enter probertt"); 944 SYSCTL_ADD_U32(&rack_sysctl_ctx, 945 SYSCTL_CHILDREN(rack_probertt), 946 OID_AUTO, "safety", CTLFLAG_RW, 947 &rack_probe_rtt_safety_val, 2000000, 948 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 949 SYSCTL_ADD_U32(&rack_sysctl_ctx, 950 SYSCTL_CHILDREN(rack_probertt), 951 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 952 &rack_probe_rtt_sets_cwnd, 0, 953 "Do we set the cwnd too (if always_lower is on)"); 954 SYSCTL_ADD_U32(&rack_sysctl_ctx, 955 SYSCTL_CHILDREN(rack_probertt), 956 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 957 &rack_max_drain_wait, 2, 958 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 959 SYSCTL_ADD_U32(&rack_sysctl_ctx, 960 SYSCTL_CHILDREN(rack_probertt), 961 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 962 &rack_must_drain, 1, 963 "We must drain this many gp_srtt's waiting for flight to reach goal"); 964 SYSCTL_ADD_U32(&rack_sysctl_ctx, 965 SYSCTL_CHILDREN(rack_probertt), 966 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 967 &rack_probertt_use_min_rtt_entry, 1, 968 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 969 SYSCTL_ADD_U32(&rack_sysctl_ctx, 970 SYSCTL_CHILDREN(rack_probertt), 971 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 972 &rack_probertt_use_min_rtt_exit, 0, 973 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 974 SYSCTL_ADD_U32(&rack_sysctl_ctx, 975 SYSCTL_CHILDREN(rack_probertt), 976 OID_AUTO, "length_div", CTLFLAG_RW, 977 &rack_probertt_gpsrtt_cnt_div, 0, 978 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 979 SYSCTL_ADD_U32(&rack_sysctl_ctx, 980 SYSCTL_CHILDREN(rack_probertt), 981 OID_AUTO, "length_mul", CTLFLAG_RW, 982 &rack_probertt_gpsrtt_cnt_mul, 0, 983 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 984 SYSCTL_ADD_U32(&rack_sysctl_ctx, 985 SYSCTL_CHILDREN(rack_probertt), 986 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 987 &rack_min_probertt_hold, 200000, 988 "What is the minimum time we hold probertt at target"); 989 SYSCTL_ADD_U32(&rack_sysctl_ctx, 990 SYSCTL_CHILDREN(rack_probertt), 991 OID_AUTO, "filter_life", CTLFLAG_RW, 992 &rack_probertt_filter_life, 10000000, 993 "What is the time for the filters life in useconds"); 994 SYSCTL_ADD_U32(&rack_sysctl_ctx, 995 SYSCTL_CHILDREN(rack_probertt), 996 OID_AUTO, "lower_within", CTLFLAG_RW, 997 &rack_probertt_lower_within, 10, 998 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 999 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1000 SYSCTL_CHILDREN(rack_probertt), 1001 OID_AUTO, "must_move", CTLFLAG_RW, 1002 &rack_min_rtt_movement, 250, 1003 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1004 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1005 SYSCTL_CHILDREN(rack_probertt), 1006 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1007 &rack_probertt_clear_is, 1, 1008 "Do we clear I/S counts on exiting probe-rtt"); 1009 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1010 SYSCTL_CHILDREN(rack_probertt), 1011 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1012 &rack_max_drain_hbp, 1, 1013 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1014 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1015 SYSCTL_CHILDREN(rack_probertt), 1016 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1017 &rack_hbp_thresh, 3, 1018 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1019 /* Pacing related sysctls */ 1020 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1021 SYSCTL_CHILDREN(rack_sysctl_root), 1022 OID_AUTO, 1023 "pacing", 1024 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1025 "Pacing related Controls"); 1026 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1027 SYSCTL_CHILDREN(rack_pacing), 1028 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1029 &rack_pcm_is_enabled, 1, 1030 "Do we by default do PCM measurements?"); 1031 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1032 SYSCTL_CHILDREN(rack_pacing), 1033 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1034 &rack_pcm_every_n_rounds, 100, 1035 "How many rounds before we need to do a PCM measurement"); 1036 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1037 SYSCTL_CHILDREN(rack_pacing), 1038 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1039 &rack_pcm_blast, 0, 1040 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1041 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1042 SYSCTL_CHILDREN(rack_pacing), 1043 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1044 &rack_gp_gain_req, 1200, 1045 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1046 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1047 SYSCTL_CHILDREN(rack_pacing), 1048 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1049 &rack_rnd_cnt_req, 0x10005, 1050 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1051 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1052 SYSCTL_CHILDREN(rack_pacing), 1053 OID_AUTO, "no_timely", CTLFLAG_RW, 1054 &rack_timely_off, 0, 1055 "Do we not use timely in DGP?"); 1056 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1057 SYSCTL_CHILDREN(rack_pacing), 1058 OID_AUTO, "fullbufdisc", CTLFLAG_RW, 1059 &rack_full_buffer_discount, 10, 1060 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?"); 1061 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1062 SYSCTL_CHILDREN(rack_pacing), 1063 OID_AUTO, "fillcw", CTLFLAG_RW, 1064 &rack_fill_cw_state, 0, 1065 "Enable fillcw on new connections (default=0 off)?"); 1066 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1067 SYSCTL_CHILDREN(rack_pacing), 1068 OID_AUTO, "min_burst", CTLFLAG_RW, 1069 &rack_pacing_min_seg, 0, 1070 "What is the min burst size for pacing (0 disables)?"); 1071 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1072 SYSCTL_CHILDREN(rack_pacing), 1073 OID_AUTO, "divisor", CTLFLAG_RW, 1074 &rack_default_pacing_divisor, 250, 1075 "What is the default divisor given to the rl code?"); 1076 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1077 SYSCTL_CHILDREN(rack_pacing), 1078 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1079 &rack_bw_multipler, 0, 1080 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1081 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1082 SYSCTL_CHILDREN(rack_pacing), 1083 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1084 &rack_max_per_above, 30, 1085 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1086 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1087 SYSCTL_CHILDREN(rack_pacing), 1088 OID_AUTO, "allow1mss", CTLFLAG_RW, 1089 &rack_pace_one_seg, 0, 1090 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1091 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1092 SYSCTL_CHILDREN(rack_pacing), 1093 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1094 &rack_limit_time_with_srtt, 0, 1095 "Do we limit pacing time based on srtt"); 1096 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1097 SYSCTL_CHILDREN(rack_pacing), 1098 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1099 &rack_per_of_gp_ss, 250, 1100 "If non zero, what percentage of goodput to pace at in slow start"); 1101 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1102 SYSCTL_CHILDREN(rack_pacing), 1103 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1104 &rack_per_of_gp_ca, 150, 1105 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1106 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1107 SYSCTL_CHILDREN(rack_pacing), 1108 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1109 &rack_per_of_gp_rec, 200, 1110 "If non zero, what percentage of goodput to pace at in recovery"); 1111 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1112 SYSCTL_CHILDREN(rack_pacing), 1113 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1114 &rack_hptsi_segments, 40, 1115 "What size is the max for TSO segments in pacing and burst mitigation"); 1116 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1117 SYSCTL_CHILDREN(rack_pacing), 1118 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1119 &rack_slot_reduction, 4, 1120 "When doing only burst mitigation what is the reduce divisor"); 1121 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1122 SYSCTL_CHILDREN(rack_sysctl_root), 1123 OID_AUTO, "use_pacing", CTLFLAG_RW, 1124 &rack_pace_every_seg, 0, 1125 "If set we use pacing, if clear we use only the original burst mitigation"); 1126 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1127 SYSCTL_CHILDREN(rack_pacing), 1128 OID_AUTO, "rate_cap", CTLFLAG_RW, 1129 &rack_bw_rate_cap, 0, 1130 "If set we apply this value to the absolute rate cap used by pacing"); 1131 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1132 SYSCTL_CHILDREN(rack_pacing), 1133 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1134 &rack_fillcw_bw_cap, 3750000, 1135 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1136 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1137 SYSCTL_CHILDREN(rack_sysctl_root), 1138 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1139 &rack_req_measurements, 1, 1140 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1141 /* Hardware pacing */ 1142 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1143 SYSCTL_CHILDREN(rack_sysctl_root), 1144 OID_AUTO, 1145 "hdwr_pacing", 1146 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1147 "Pacing related Controls"); 1148 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1149 SYSCTL_CHILDREN(rack_hw_pacing), 1150 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1151 &rack_hw_rwnd_factor, 2, 1152 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1153 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1154 SYSCTL_CHILDREN(rack_hw_pacing), 1155 OID_AUTO, "precheck", CTLFLAG_RW, 1156 &rack_hw_check_queue, 0, 1157 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1158 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1159 SYSCTL_CHILDREN(rack_hw_pacing), 1160 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1161 &rack_enobuf_hw_boost_mult, 0, 1162 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1163 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1164 SYSCTL_CHILDREN(rack_hw_pacing), 1165 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1166 &rack_enobuf_hw_max, 2, 1167 "What is the max boost the pacing time if we see a ENOBUFS?"); 1168 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1169 SYSCTL_CHILDREN(rack_hw_pacing), 1170 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1171 &rack_enobuf_hw_min, 2, 1172 "What is the min boost the pacing time if we see a ENOBUFS?"); 1173 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1174 SYSCTL_CHILDREN(rack_hw_pacing), 1175 OID_AUTO, "enable", CTLFLAG_RW, 1176 &rack_enable_hw_pacing, 0, 1177 "Should RACK attempt to use hw pacing?"); 1178 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1179 SYSCTL_CHILDREN(rack_hw_pacing), 1180 OID_AUTO, "rate_cap", CTLFLAG_RW, 1181 &rack_hw_rate_caps, 0, 1182 "Does the highest hardware pacing rate cap the rate we will send at??"); 1183 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1184 SYSCTL_CHILDREN(rack_hw_pacing), 1185 OID_AUTO, "uncap_per", CTLFLAG_RW, 1186 &rack_hw_rate_cap_per, 0, 1187 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1188 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1189 SYSCTL_CHILDREN(rack_hw_pacing), 1190 OID_AUTO, "rate_min", CTLFLAG_RW, 1191 &rack_hw_rate_min, 0, 1192 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1193 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1194 SYSCTL_CHILDREN(rack_hw_pacing), 1195 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1196 &rack_hw_rate_to_low, 0, 1197 "If we fall below this rate, dis-engage hw pacing?"); 1198 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1199 SYSCTL_CHILDREN(rack_hw_pacing), 1200 OID_AUTO, "up_only", CTLFLAG_RW, 1201 &rack_hw_up_only, 0, 1202 "Do we allow hw pacing to lower the rate selected?"); 1203 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1204 SYSCTL_CHILDREN(rack_hw_pacing), 1205 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1206 &rack_hw_pace_extra_slots, 0, 1207 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1208 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1209 SYSCTL_CHILDREN(rack_sysctl_root), 1210 OID_AUTO, 1211 "timely", 1212 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1213 "Rack Timely RTT Controls"); 1214 /* Timely based GP dynmics */ 1215 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1216 SYSCTL_CHILDREN(rack_timely), 1217 OID_AUTO, "upper", CTLFLAG_RW, 1218 &rack_gp_per_bw_mul_up, 2, 1219 "Rack timely upper range for equal b/w (in percentage)"); 1220 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1221 SYSCTL_CHILDREN(rack_timely), 1222 OID_AUTO, "lower", CTLFLAG_RW, 1223 &rack_gp_per_bw_mul_down, 4, 1224 "Rack timely lower range for equal b/w (in percentage)"); 1225 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1226 SYSCTL_CHILDREN(rack_timely), 1227 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1228 &rack_gp_rtt_maxmul, 3, 1229 "Rack timely multiplier of lowest rtt for rtt_max"); 1230 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1231 SYSCTL_CHILDREN(rack_timely), 1232 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1233 &rack_gp_rtt_mindiv, 4, 1234 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1235 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1236 SYSCTL_CHILDREN(rack_timely), 1237 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1238 &rack_gp_rtt_minmul, 1, 1239 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1240 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1241 SYSCTL_CHILDREN(rack_timely), 1242 OID_AUTO, "decrease", CTLFLAG_RW, 1243 &rack_gp_decrease_per, 80, 1244 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1245 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1246 SYSCTL_CHILDREN(rack_timely), 1247 OID_AUTO, "increase", CTLFLAG_RW, 1248 &rack_gp_increase_per, 2, 1249 "Rack timely increase perentage of our GP multiplication factor"); 1250 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_timely), 1252 OID_AUTO, "lowerbound", CTLFLAG_RW, 1253 &rack_per_lower_bound, 50, 1254 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1255 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_timely), 1257 OID_AUTO, "p5_upper", CTLFLAG_RW, 1258 &rack_gain_p5_ub, 250, 1259 "Profile 5 upper bound to timely gain"); 1260 1261 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1262 SYSCTL_CHILDREN(rack_timely), 1263 OID_AUTO, "upperboundss", CTLFLAG_RW, 1264 &rack_per_upper_bound_ss, 0, 1265 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1266 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1267 SYSCTL_CHILDREN(rack_timely), 1268 OID_AUTO, "upperboundca", CTLFLAG_RW, 1269 &rack_per_upper_bound_ca, 0, 1270 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1271 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1272 SYSCTL_CHILDREN(rack_timely), 1273 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1274 &rack_do_dyn_mul, 0, 1275 "Rack timely do we enable dynmaic timely goodput by default"); 1276 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1277 SYSCTL_CHILDREN(rack_timely), 1278 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1279 &rack_gp_no_rec_chg, 1, 1280 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1281 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1282 SYSCTL_CHILDREN(rack_timely), 1283 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1284 &rack_timely_dec_clear, 6, 1285 "Rack timely what threshold do we count to before another boost during b/w decent"); 1286 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1287 SYSCTL_CHILDREN(rack_timely), 1288 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1289 &rack_timely_max_push_rise, 3, 1290 "Rack timely how many times do we push up with b/w increase"); 1291 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1292 SYSCTL_CHILDREN(rack_timely), 1293 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1294 &rack_timely_max_push_drop, 3, 1295 "Rack timely how many times do we push back on b/w decent"); 1296 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1297 SYSCTL_CHILDREN(rack_timely), 1298 OID_AUTO, "min_segs", CTLFLAG_RW, 1299 &rack_timely_min_segs, 4, 1300 "Rack timely when setting the cwnd what is the min num segments"); 1301 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1302 SYSCTL_CHILDREN(rack_timely), 1303 OID_AUTO, "noback_max", CTLFLAG_RW, 1304 &rack_use_max_for_nobackoff, 0, 1305 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1306 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1307 SYSCTL_CHILDREN(rack_timely), 1308 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1309 &rack_timely_int_timely_only, 0, 1310 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1311 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1312 SYSCTL_CHILDREN(rack_timely), 1313 OID_AUTO, "nonstop", CTLFLAG_RW, 1314 &rack_timely_no_stopping, 0, 1315 "Rack timely don't stop increase"); 1316 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1317 SYSCTL_CHILDREN(rack_timely), 1318 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1319 &rack_down_raise_thresh, 100, 1320 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1321 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1322 SYSCTL_CHILDREN(rack_timely), 1323 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1324 &rack_req_segs, 1, 1325 "Bottom dragging if not these many segments outstanding and room"); 1326 1327 /* TLP and Rack related parameters */ 1328 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1329 SYSCTL_CHILDREN(rack_sysctl_root), 1330 OID_AUTO, 1331 "tlp", 1332 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1333 "TLP and Rack related Controls"); 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_tlp), 1336 OID_AUTO, "use_rrr", CTLFLAG_RW, 1337 &use_rack_rr, 1, 1338 "Do we use Rack Rapid Recovery"); 1339 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1340 SYSCTL_CHILDREN(rack_tlp), 1341 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1342 &rack_max_abc_post_recovery, 2, 1343 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1344 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1345 SYSCTL_CHILDREN(rack_tlp), 1346 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1347 &rack_non_rxt_use_cr, 0, 1348 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1349 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1350 SYSCTL_CHILDREN(rack_tlp), 1351 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1352 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1353 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1354 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1355 SYSCTL_CHILDREN(rack_tlp), 1356 OID_AUTO, "limit", CTLFLAG_RW, 1357 &rack_tlp_limit, 2, 1358 "How many TLP's can be sent without sending new data"); 1359 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1360 SYSCTL_CHILDREN(rack_tlp), 1361 OID_AUTO, "use_greater", CTLFLAG_RW, 1362 &rack_tlp_use_greater, 1, 1363 "Should we use the rack_rtt time if its greater than srtt"); 1364 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1365 SYSCTL_CHILDREN(rack_tlp), 1366 OID_AUTO, "tlpminto", CTLFLAG_RW, 1367 &rack_tlp_min, 10000, 1368 "TLP minimum timeout per the specification (in microseconds)"); 1369 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1370 SYSCTL_CHILDREN(rack_tlp), 1371 OID_AUTO, "send_oldest", CTLFLAG_RW, 1372 &rack_always_send_oldest, 0, 1373 "Should we always send the oldest TLP and RACK-TLP"); 1374 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1375 SYSCTL_CHILDREN(rack_tlp), 1376 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1377 &rack_lower_cwnd_at_tlp, 0, 1378 "When a TLP completes a retran should we enter recovery"); 1379 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1380 SYSCTL_CHILDREN(rack_tlp), 1381 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1382 &rack_reorder_thresh, 2, 1383 "What factor for rack will be added when seeing reordering (shift right)"); 1384 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1385 SYSCTL_CHILDREN(rack_tlp), 1386 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1387 &rack_tlp_thresh, 1, 1388 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1389 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1390 SYSCTL_CHILDREN(rack_tlp), 1391 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1392 &rack_reorder_fade, 60000000, 1393 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1394 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1395 SYSCTL_CHILDREN(rack_tlp), 1396 OID_AUTO, "pktdelay", CTLFLAG_RW, 1397 &rack_pkt_delay, 1000, 1398 "Extra RACK time (in microseconds) besides reordering thresh"); 1399 1400 /* Timer related controls */ 1401 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1402 SYSCTL_CHILDREN(rack_sysctl_root), 1403 OID_AUTO, 1404 "timers", 1405 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1406 "Timer related controls"); 1407 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1408 SYSCTL_CHILDREN(rack_timers), 1409 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1410 &rack_ssthresh_rest_rto_rec, 0, 1411 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1412 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1413 SYSCTL_CHILDREN(rack_timers), 1414 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1415 &rack_rxt_scoreboard_clear_thresh, 2, 1416 "How many RTO's are allowed before we clear the scoreboard"); 1417 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1418 SYSCTL_CHILDREN(rack_timers), 1419 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1420 &rack_honors_hpts_min_to, 1, 1421 "Do rack pacing timers honor hpts min timeout"); 1422 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1423 SYSCTL_CHILDREN(rack_timers), 1424 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1425 &rack_max_reduce, 10, 1426 "Max percentage we will reduce slot by for pacing when we are behind"); 1427 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1428 SYSCTL_CHILDREN(rack_timers), 1429 OID_AUTO, "persmin", CTLFLAG_RW, 1430 &rack_persist_min, 250000, 1431 "What is the minimum time in microseconds between persists"); 1432 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1433 SYSCTL_CHILDREN(rack_timers), 1434 OID_AUTO, "persmax", CTLFLAG_RW, 1435 &rack_persist_max, 2000000, 1436 "What is the largest delay in microseconds between persists"); 1437 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1438 SYSCTL_CHILDREN(rack_timers), 1439 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1440 &rack_delayed_ack_time, 40000, 1441 "Delayed ack time (40ms in microseconds)"); 1442 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1443 SYSCTL_CHILDREN(rack_timers), 1444 OID_AUTO, "minrto", CTLFLAG_RW, 1445 &rack_rto_min, 30000, 1446 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1447 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1448 SYSCTL_CHILDREN(rack_timers), 1449 OID_AUTO, "maxrto", CTLFLAG_RW, 1450 &rack_rto_max, 4000000, 1451 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1452 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1453 SYSCTL_CHILDREN(rack_timers), 1454 OID_AUTO, "minto", CTLFLAG_RW, 1455 &rack_min_to, 1000, 1456 "Minimum rack timeout in microseconds"); 1457 /* Measure controls */ 1458 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1459 SYSCTL_CHILDREN(rack_sysctl_root), 1460 OID_AUTO, 1461 "measure", 1462 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1463 "Measure related controls"); 1464 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1465 SYSCTL_CHILDREN(rack_measure), 1466 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1467 &rack_wma_divisor, 8, 1468 "When doing b/w calculation what is the divisor for the WMA"); 1469 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1470 SYSCTL_CHILDREN(rack_measure), 1471 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1472 &rack_cwnd_block_ends_measure, 0, 1473 "Does a cwnd just-return end the measurement window (app limited)"); 1474 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1475 SYSCTL_CHILDREN(rack_measure), 1476 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1477 &rack_rwnd_block_ends_measure, 0, 1478 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1479 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1480 SYSCTL_CHILDREN(rack_measure), 1481 OID_AUTO, "min_target", CTLFLAG_RW, 1482 &rack_def_data_window, 20, 1483 "What is the minimum target window (in mss) for a GP measurements"); 1484 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1485 SYSCTL_CHILDREN(rack_measure), 1486 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1487 &rack_goal_bdp, 2, 1488 "What is the goal BDP to measure"); 1489 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1490 SYSCTL_CHILDREN(rack_measure), 1491 OID_AUTO, "min_srtts", CTLFLAG_RW, 1492 &rack_min_srtts, 1, 1493 "What is the goal BDP to measure"); 1494 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1495 SYSCTL_CHILDREN(rack_measure), 1496 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1497 &rack_min_measure_usec, 0, 1498 "What is the Minimum time time for a measurement if 0, this is off"); 1499 /* Features */ 1500 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1501 SYSCTL_CHILDREN(rack_sysctl_root), 1502 OID_AUTO, 1503 "features", 1504 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1505 "Feature controls"); 1506 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1507 SYSCTL_CHILDREN(rack_features), 1508 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1509 &rack_hybrid_allow_set_maxseg, 0, 1510 "Should hybrid pacing allow the setmss command"); 1511 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1512 SYSCTL_CHILDREN(rack_features), 1513 OID_AUTO, "cmpack", CTLFLAG_RW, 1514 &rack_use_cmp_acks, 1, 1515 "Should RACK have LRO send compressed acks"); 1516 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1517 SYSCTL_CHILDREN(rack_features), 1518 OID_AUTO, "fsb", CTLFLAG_RW, 1519 &rack_use_fsb, 1, 1520 "Should RACK use the fast send block?"); 1521 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_features), 1523 OID_AUTO, "rfo", CTLFLAG_RW, 1524 &rack_use_rfo, 1, 1525 "Should RACK use rack_fast_output()?"); 1526 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1527 SYSCTL_CHILDREN(rack_features), 1528 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1529 &rack_use_rsm_rfo, 1, 1530 "Should RACK use rack_fast_rsm_output()?"); 1531 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1532 SYSCTL_CHILDREN(rack_features), 1533 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1534 &rack_enable_mqueue_for_nonpaced, 0, 1535 "Should RACK use mbuf queuing for non-paced connections"); 1536 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1537 SYSCTL_CHILDREN(rack_features), 1538 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1539 &rack_do_hystart, 0, 1540 "Should RACK enable HyStart++ on connections?"); 1541 /* Misc rack controls */ 1542 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1543 SYSCTL_CHILDREN(rack_sysctl_root), 1544 OID_AUTO, 1545 "misc", 1546 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1547 "Misc related controls"); 1548 #ifdef TCP_ACCOUNTING 1549 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1550 SYSCTL_CHILDREN(rack_misc), 1551 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1552 &rack_tcp_accounting, 0, 1553 "Should we turn on TCP accounting for all rack sessions?"); 1554 #endif 1555 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1556 SYSCTL_CHILDREN(rack_misc), 1557 OID_AUTO, "dnd", CTLFLAG_RW, 1558 &rack_dnd_default, 0, 1559 "Do not disturb default for rack_rrr = 3"); 1560 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1561 SYSCTL_CHILDREN(rack_misc), 1562 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1563 &sad_seg_size_per, 800, 1564 "Percentage of segment size needed in a sack 800 = 80.0?"); 1565 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1566 SYSCTL_CHILDREN(rack_misc), 1567 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1568 &rack_rxt_controls, 0, 1569 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1570 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1571 SYSCTL_CHILDREN(rack_misc), 1572 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1573 &rack_hibeta_setting, 0, 1574 "Do we ue a high beta (80 instead of 50)?"); 1575 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1576 SYSCTL_CHILDREN(rack_misc), 1577 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1578 &rack_apply_rtt_with_reduced_conf, 0, 1579 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1580 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1581 SYSCTL_CHILDREN(rack_misc), 1582 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1583 &rack_dsack_std_based, 3, 1584 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1585 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1586 SYSCTL_CHILDREN(rack_misc), 1587 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1588 &rack_prr_addbackmax, 2, 1589 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1590 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1591 SYSCTL_CHILDREN(rack_misc), 1592 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1593 &rack_stats_gets_ms_rtt, 1, 1594 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1595 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1596 SYSCTL_CHILDREN(rack_misc), 1597 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1598 &rack_client_low_buf, 0, 1599 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1600 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1601 SYSCTL_CHILDREN(rack_misc), 1602 OID_AUTO, "defprofile", CTLFLAG_RW, 1603 &rack_def_profile, 0, 1604 "Should RACK use a default profile (0=no, num == profile num)?"); 1605 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1606 SYSCTL_CHILDREN(rack_misc), 1607 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1608 &rack_enable_shared_cwnd, 1, 1609 "Should RACK try to use the shared cwnd on connections where allowed"); 1610 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1611 SYSCTL_CHILDREN(rack_misc), 1612 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1613 &rack_limits_scwnd, 1, 1614 "Should RACK place low end time limits on the shared cwnd feature"); 1615 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1616 SYSCTL_CHILDREN(rack_misc), 1617 OID_AUTO, "no_prr", CTLFLAG_RW, 1618 &rack_disable_prr, 0, 1619 "Should RACK not use prr and only pace (must have pacing on)"); 1620 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1621 SYSCTL_CHILDREN(rack_misc), 1622 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1623 &rack_verbose_logging, 0, 1624 "Should RACK black box logging be verbose"); 1625 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_misc), 1627 OID_AUTO, "data_after_close", CTLFLAG_RW, 1628 &rack_ignore_data_after_close, 1, 1629 "Do we hold off sending a RST until all pending data is ack'd"); 1630 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1631 SYSCTL_CHILDREN(rack_misc), 1632 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1633 &rack_sack_not_required, 1, 1634 "Do we allow rack to run on connections not supporting SACK"); 1635 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_misc), 1637 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1638 &rack_send_a_lot_in_prr, 1, 1639 "Send a lot in prr"); 1640 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1641 SYSCTL_CHILDREN(rack_misc), 1642 OID_AUTO, "autoscale", CTLFLAG_RW, 1643 &rack_autosndbuf_inc, 20, 1644 "What percentage should rack scale up its snd buffer by?"); 1645 1646 1647 /* Sack Attacker detection stuff */ 1648 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1649 SYSCTL_CHILDREN(rack_attack), 1650 OID_AUTO, "merge_out", CTLFLAG_RW, 1651 &rack_merge_out_sacks_on_attack, 0, 1652 "Do we merge the sendmap when we decide we are being attacked?"); 1653 1654 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1655 SYSCTL_CHILDREN(rack_attack), 1656 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1657 &rack_highest_sack_thresh_seen, 0, 1658 "Highest sack to ack ratio seen"); 1659 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1660 SYSCTL_CHILDREN(rack_attack), 1661 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1662 &rack_highest_move_thresh_seen, 0, 1663 "Highest move to non-move ratio seen"); 1664 rack_ack_total = counter_u64_alloc(M_WAITOK); 1665 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_attack), 1667 OID_AUTO, "acktotal", CTLFLAG_RD, 1668 &rack_ack_total, 1669 "Total number of Ack's"); 1670 rack_express_sack = counter_u64_alloc(M_WAITOK); 1671 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1672 SYSCTL_CHILDREN(rack_attack), 1673 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1674 &rack_express_sack, 1675 "Total expresss number of Sack's"); 1676 rack_sack_total = counter_u64_alloc(M_WAITOK); 1677 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1678 SYSCTL_CHILDREN(rack_attack), 1679 OID_AUTO, "sacktotal", CTLFLAG_RD, 1680 &rack_sack_total, 1681 "Total number of SACKs"); 1682 rack_move_none = counter_u64_alloc(M_WAITOK); 1683 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1684 SYSCTL_CHILDREN(rack_attack), 1685 OID_AUTO, "move_none", CTLFLAG_RD, 1686 &rack_move_none, 1687 "Total number of SACK index reuse of positions under threshold"); 1688 rack_move_some = counter_u64_alloc(M_WAITOK); 1689 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1690 SYSCTL_CHILDREN(rack_attack), 1691 OID_AUTO, "move_some", CTLFLAG_RD, 1692 &rack_move_some, 1693 "Total number of SACK index reuse of positions over threshold"); 1694 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1695 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1696 SYSCTL_CHILDREN(rack_attack), 1697 OID_AUTO, "attacks", CTLFLAG_RD, 1698 &rack_sack_attacks_detected, 1699 "Total number of SACK attackers that had sack disabled"); 1700 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1701 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1702 SYSCTL_CHILDREN(rack_attack), 1703 OID_AUTO, "reversed", CTLFLAG_RD, 1704 &rack_sack_attacks_reversed, 1705 "Total number of SACK attackers that were later determined false positive"); 1706 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1707 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1708 SYSCTL_CHILDREN(rack_attack), 1709 OID_AUTO, "suspect", CTLFLAG_RD, 1710 &rack_sack_attacks_suspect, 1711 "Total number of SACKs that triggered early detection"); 1712 1713 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1714 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1715 SYSCTL_CHILDREN(rack_attack), 1716 OID_AUTO, "nextmerge", CTLFLAG_RD, 1717 &rack_sack_used_next_merge, 1718 "Total number of times we used the next merge"); 1719 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1720 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1721 SYSCTL_CHILDREN(rack_attack), 1722 OID_AUTO, "prevmerge", CTLFLAG_RD, 1723 &rack_sack_used_prev_merge, 1724 "Total number of times we used the prev merge"); 1725 /* Counters */ 1726 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1727 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1728 SYSCTL_CHILDREN(rack_counters), 1729 OID_AUTO, "totalbytes", CTLFLAG_RD, 1730 &rack_total_bytes, 1731 "Total number of bytes sent"); 1732 rack_fto_send = counter_u64_alloc(M_WAITOK); 1733 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1734 SYSCTL_CHILDREN(rack_counters), 1735 OID_AUTO, "fto_send", CTLFLAG_RD, 1736 &rack_fto_send, "Total number of rack_fast_output sends"); 1737 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1738 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1739 SYSCTL_CHILDREN(rack_counters), 1740 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1741 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1742 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_counters), 1745 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1746 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1747 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1748 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1749 SYSCTL_CHILDREN(rack_counters), 1750 OID_AUTO, "nfto_send", CTLFLAG_RD, 1751 &rack_non_fto_send, "Total number of rack_output first sends"); 1752 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1753 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1754 SYSCTL_CHILDREN(rack_counters), 1755 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1756 &rack_extended_rfo, "Total number of times we extended rfo"); 1757 1758 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1759 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1760 SYSCTL_CHILDREN(rack_counters), 1761 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1762 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1763 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1764 1765 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1766 SYSCTL_CHILDREN(rack_counters), 1767 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1768 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1769 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1770 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1771 SYSCTL_CHILDREN(rack_counters), 1772 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1773 &rack_tlp_tot, 1774 "Total number of tail loss probe expirations"); 1775 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1776 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1777 SYSCTL_CHILDREN(rack_counters), 1778 OID_AUTO, "tlp_new", CTLFLAG_RD, 1779 &rack_tlp_newdata, 1780 "Total number of tail loss probe sending new data"); 1781 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1783 SYSCTL_CHILDREN(rack_counters), 1784 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1785 &rack_tlp_retran, 1786 "Total number of tail loss probe sending retransmitted data"); 1787 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1788 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1789 SYSCTL_CHILDREN(rack_counters), 1790 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1791 &rack_tlp_retran_bytes, 1792 "Total bytes of tail loss probe sending retransmitted data"); 1793 rack_to_tot = counter_u64_alloc(M_WAITOK); 1794 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1795 SYSCTL_CHILDREN(rack_counters), 1796 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1797 &rack_to_tot, 1798 "Total number of times the rack to expired"); 1799 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1800 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1801 SYSCTL_CHILDREN(rack_counters), 1802 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1803 &rack_saw_enobuf, 1804 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1805 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1806 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1807 SYSCTL_CHILDREN(rack_counters), 1808 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1809 &rack_saw_enobuf_hw, 1810 "Total number of times a send returned enobuf for hdwr paced connections"); 1811 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1812 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1813 SYSCTL_CHILDREN(rack_counters), 1814 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1815 &rack_saw_enetunreach, 1816 "Total number of times a send received a enetunreachable"); 1817 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1818 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1819 SYSCTL_CHILDREN(rack_counters), 1820 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1821 &rack_hot_alloc, 1822 "Total allocations from the top of our list"); 1823 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1824 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1825 SYSCTL_CHILDREN(rack_counters), 1826 OID_AUTO, "allocs", CTLFLAG_RD, 1827 &rack_to_alloc, 1828 "Total allocations of tracking structures"); 1829 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1830 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1831 SYSCTL_CHILDREN(rack_counters), 1832 OID_AUTO, "allochard", CTLFLAG_RD, 1833 &rack_to_alloc_hard, 1834 "Total allocations done with sleeping the hard way"); 1835 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1836 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1837 SYSCTL_CHILDREN(rack_counters), 1838 OID_AUTO, "allocemerg", CTLFLAG_RD, 1839 &rack_to_alloc_emerg, 1840 "Total allocations done from emergency cache"); 1841 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1842 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1843 SYSCTL_CHILDREN(rack_counters), 1844 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1845 &rack_to_alloc_limited, 1846 "Total allocations dropped due to limit"); 1847 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1848 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1849 SYSCTL_CHILDREN(rack_counters), 1850 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1851 &rack_alloc_limited_conns, 1852 "Connections with allocations dropped due to limit"); 1853 rack_split_limited = counter_u64_alloc(M_WAITOK); 1854 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1855 SYSCTL_CHILDREN(rack_counters), 1856 OID_AUTO, "split_limited", CTLFLAG_RD, 1857 &rack_split_limited, 1858 "Split allocations dropped due to limit"); 1859 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1861 SYSCTL_CHILDREN(rack_counters), 1862 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1863 &rack_rxt_clamps_cwnd, 1864 "Number of times that excessive rxt clamped the cwnd down"); 1865 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1867 SYSCTL_CHILDREN(rack_counters), 1868 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1869 &rack_rxt_clamps_cwnd_uniq, 1870 "Number of connections that have had excessive rxt clamped the cwnd down"); 1871 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1873 SYSCTL_CHILDREN(rack_counters), 1874 OID_AUTO, "persist_sends", CTLFLAG_RD, 1875 &rack_persists_sends, 1876 "Number of times we sent a persist probe"); 1877 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1879 SYSCTL_CHILDREN(rack_counters), 1880 OID_AUTO, "persist_acks", CTLFLAG_RD, 1881 &rack_persists_acks, 1882 "Number of times a persist probe was acked"); 1883 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1884 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1885 SYSCTL_CHILDREN(rack_counters), 1886 OID_AUTO, "persist_loss", CTLFLAG_RD, 1887 &rack_persists_loss, 1888 "Number of times we detected a lost persist probe (no ack)"); 1889 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1890 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1891 SYSCTL_CHILDREN(rack_counters), 1892 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1893 &rack_persists_lost_ends, 1894 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1895 #ifdef INVARIANTS 1896 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1898 SYSCTL_CHILDREN(rack_counters), 1899 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1900 &rack_adjust_map_bw, 1901 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1902 #endif 1903 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1904 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1905 SYSCTL_CHILDREN(rack_counters), 1906 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1907 &rack_multi_single_eq, 1908 "Number of compressed acks total represented"); 1909 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1910 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1911 SYSCTL_CHILDREN(rack_counters), 1912 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1913 &rack_proc_non_comp_ack, 1914 "Number of non compresseds acks that we processed"); 1915 1916 1917 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1918 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1919 SYSCTL_CHILDREN(rack_counters), 1920 OID_AUTO, "sack_long", CTLFLAG_RD, 1921 &rack_sack_proc_all, 1922 "Total times we had to walk whole list for sack processing"); 1923 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1924 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1925 SYSCTL_CHILDREN(rack_counters), 1926 OID_AUTO, "sack_restart", CTLFLAG_RD, 1927 &rack_sack_proc_restart, 1928 "Total times we had to walk whole list due to a restart"); 1929 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1930 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1931 SYSCTL_CHILDREN(rack_counters), 1932 OID_AUTO, "sack_short", CTLFLAG_RD, 1933 &rack_sack_proc_short, 1934 "Total times we took shortcut for sack processing"); 1935 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1936 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1937 SYSCTL_CHILDREN(rack_attack), 1938 OID_AUTO, "skipacked", CTLFLAG_RD, 1939 &rack_sack_skipped_acked, 1940 "Total number of times we skipped previously sacked"); 1941 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1942 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1943 SYSCTL_CHILDREN(rack_attack), 1944 OID_AUTO, "ofsplit", CTLFLAG_RD, 1945 &rack_sack_splits, 1946 "Total number of times we did the old fashion tree split"); 1947 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1948 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1949 SYSCTL_CHILDREN(rack_counters), 1950 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1951 &rack_input_idle_reduces, 1952 "Total number of idle reductions on input"); 1953 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1954 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1955 SYSCTL_CHILDREN(rack_counters), 1956 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1957 &rack_collapsed_win_seen, 1958 "Total number of collapsed window events seen (where our window shrinks)"); 1959 1960 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1961 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1962 SYSCTL_CHILDREN(rack_counters), 1963 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1964 &rack_collapsed_win, 1965 "Total number of collapsed window events where we mark packets"); 1966 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1967 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1968 SYSCTL_CHILDREN(rack_counters), 1969 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1970 &rack_collapsed_win_rxt, 1971 "Total number of packets that were retransmitted"); 1972 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1973 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1974 SYSCTL_CHILDREN(rack_counters), 1975 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1976 &rack_collapsed_win_rxt_bytes, 1977 "Total number of bytes that were retransmitted"); 1978 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1979 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1980 SYSCTL_CHILDREN(rack_counters), 1981 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1982 &rack_try_scwnd, 1983 "Total number of scwnd attempts"); 1984 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1985 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1986 OID_AUTO, "outsize", CTLFLAG_RD, 1987 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1988 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1989 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1990 OID_AUTO, "opts", CTLFLAG_RD, 1991 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1992 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1993 SYSCTL_CHILDREN(rack_sysctl_root), 1994 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1995 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1996 } 1997 1998 static uint32_t 1999 rc_init_window(struct tcp_rack *rack) 2000 { 2001 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 2002 2003 } 2004 2005 static uint64_t 2006 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 2007 { 2008 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 2009 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 2010 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2011 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 2012 else 2013 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 2014 } 2015 2016 static void 2017 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 2018 uint64_t data, uint8_t mod, uint16_t aux, 2019 struct tcp_sendfile_track *cur, int line) 2020 { 2021 #ifdef TCP_REQUEST_TRK 2022 int do_log = 0; 2023 2024 /* 2025 * The rate cap one is noisy and only should come out when normal BB logging 2026 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2027 * once per chunk and make up the BBpoint that can be turned on by the client. 2028 */ 2029 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2030 /* 2031 * The very noisy two need to only come out when 2032 * we have verbose logging on. 2033 */ 2034 if (rack_verbose_logging != 0) 2035 do_log = tcp_bblogging_on(rack->rc_tp); 2036 else 2037 do_log = 0; 2038 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2039 /* 2040 * All other less noisy logs here except the measure which 2041 * also needs to come out on the point and the log. 2042 */ 2043 do_log = tcp_bblogging_on(rack->rc_tp); 2044 } else { 2045 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2046 } 2047 2048 if (do_log) { 2049 union tcp_log_stackspecific log; 2050 struct timeval tv; 2051 uint64_t lt_bw; 2052 2053 /* Convert our ms to a microsecond */ 2054 memset(&log, 0, sizeof(log)); 2055 2056 log.u_bbr.cwnd_gain = line; 2057 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2058 log.u_bbr.rttProp = tim; 2059 log.u_bbr.bw_inuse = cbw; 2060 log.u_bbr.delRate = rack_get_gp_est(rack); 2061 lt_bw = rack_get_lt_bw(rack); 2062 log.u_bbr.flex1 = seq; 2063 log.u_bbr.pacing_gain = aux; 2064 /* lt_bw = < flex3 | flex2 > */ 2065 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2066 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2067 /* Record the last obtained us rtt in inflight */ 2068 if (cur == NULL) { 2069 /* Make sure we are looking at the right log if an overide comes in */ 2070 cur = rack->r_ctl.rc_last_sft; 2071 } 2072 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2073 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2074 else { 2075 /* Use the last known rtt i.e. the rack-rtt */ 2076 log.u_bbr.inflight = rack->rc_rack_rtt; 2077 } 2078 if (cur != NULL) { 2079 uint64_t off; 2080 2081 log.u_bbr.cur_del_rate = cur->deadline; 2082 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2083 /* start = < lost | pkt_epoch > */ 2084 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2085 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2086 log.u_bbr.flex6 = cur->start_seq; 2087 log.u_bbr.pkts_out = cur->end_seq; 2088 } else { 2089 /* start = < lost | pkt_epoch > */ 2090 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2091 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2092 /* end = < pkts_out | flex6 > */ 2093 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2094 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2095 } 2096 /* first_send = <lt_epoch | epoch> */ 2097 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2098 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2099 /* localtime = <delivered | applimited>*/ 2100 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2101 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2102 #ifdef TCP_REQUEST_TRK 2103 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2104 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2105 #endif 2106 log.u_bbr.inhpts = 1; 2107 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2108 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2109 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2110 } else { 2111 log.u_bbr.flex7 = 0xffff; 2112 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2113 } 2114 /* 2115 * Compose bbr_state to be a bit wise 0000ADHF 2116 * where A is the always_pace flag 2117 * where D is the dgp_on flag 2118 * where H is the hybrid_mode on flag 2119 * where F is the use_fixed_rate flag. 2120 */ 2121 log.u_bbr.bbr_state = rack->rc_always_pace; 2122 log.u_bbr.bbr_state <<= 1; 2123 log.u_bbr.bbr_state |= rack->dgp_on; 2124 log.u_bbr.bbr_state <<= 1; 2125 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2126 log.u_bbr.bbr_state <<= 1; 2127 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2128 log.u_bbr.flex8 = mod; 2129 tcp_log_event(rack->rc_tp, NULL, 2130 &rack->rc_inp->inp_socket->so_rcv, 2131 &rack->rc_inp->inp_socket->so_snd, 2132 TCP_HYBRID_PACING_LOG, 0, 2133 0, &log, false, NULL, __func__, __LINE__, &tv); 2134 2135 } 2136 #endif 2137 } 2138 2139 #ifdef TCP_REQUEST_TRK 2140 static void 2141 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2142 { 2143 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2144 union tcp_log_stackspecific log; 2145 struct timeval tv; 2146 uint64_t off; 2147 2148 /* Convert our ms to a microsecond */ 2149 memset(&log, 0, sizeof(log)); 2150 2151 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2152 log.u_bbr.delRate = cur->sent_at_fs; 2153 2154 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2155 /* 2156 * We did not get a new Rules Applied to set so 2157 * no overlapping send occured, this means the 2158 * current byte counts are correct. 2159 */ 2160 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2161 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2162 } else { 2163 /* 2164 * Overlapping send case, we switched to a new 2165 * send and did a rules applied. 2166 */ 2167 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2168 log.u_bbr.rttProp = cur->rxt_at_ls; 2169 } 2170 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2171 log.u_bbr.cwnd_gain = line; 2172 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2173 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2174 /* start = < flex1 | flex2 > */ 2175 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2176 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2177 /* end = < flex3 | flex4 > */ 2178 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2179 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2180 2181 /* localtime = <delivered | applimited>*/ 2182 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2183 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2184 /* client timestamp = <lt_epoch | epoch>*/ 2185 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2186 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2187 /* now set all the flags in */ 2188 log.u_bbr.pkts_out = cur->hybrid_flags; 2189 log.u_bbr.lost = cur->playout_ms; 2190 log.u_bbr.flex6 = cur->flags; 2191 /* 2192 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2193 * where a false retransmit occurred so first_send <-> lastsend may 2194 * include longer time then it actually took if we have a false rxt. 2195 */ 2196 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2197 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2198 /* 2199 * Compose bbr_state to be a bit wise 0000ADHF 2200 * where A is the always_pace flag 2201 * where D is the dgp_on flag 2202 * where H is the hybrid_mode on flag 2203 * where F is the use_fixed_rate flag. 2204 */ 2205 log.u_bbr.bbr_state = rack->rc_always_pace; 2206 log.u_bbr.bbr_state <<= 1; 2207 log.u_bbr.bbr_state |= rack->dgp_on; 2208 log.u_bbr.bbr_state <<= 1; 2209 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2210 log.u_bbr.bbr_state <<= 1; 2211 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2212 2213 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2214 tcp_log_event(rack->rc_tp, NULL, 2215 &rack->rc_inp->inp_socket->so_rcv, 2216 &rack->rc_inp->inp_socket->so_snd, 2217 TCP_HYBRID_PACING_LOG, 0, 2218 0, &log, false, NULL, __func__, __LINE__, &tv); 2219 } 2220 } 2221 #endif 2222 2223 static inline uint64_t 2224 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2225 { 2226 uint64_t ret_bw, ether; 2227 uint64_t u_segsiz; 2228 2229 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2230 if (rack->r_is_v6){ 2231 #ifdef INET6 2232 ether += sizeof(struct ip6_hdr); 2233 #endif 2234 ether += 14; /* eheader size 6+6+2 */ 2235 } else { 2236 #ifdef INET 2237 ether += sizeof(struct ip); 2238 #endif 2239 ether += 14; /* eheader size 6+6+2 */ 2240 } 2241 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2242 ret_bw = bw; 2243 ret_bw *= ether; 2244 ret_bw /= u_segsiz; 2245 return (ret_bw); 2246 } 2247 2248 static void 2249 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2250 { 2251 #ifdef TCP_REQUEST_TRK 2252 struct timeval tv; 2253 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2254 #endif 2255 2256 if (rack->r_ctl.bw_rate_cap == 0) 2257 return; 2258 #ifdef TCP_REQUEST_TRK 2259 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2260 (rack->r_ctl.rc_last_sft != NULL)) { 2261 /* 2262 * We have a dynamic cap. The original target 2263 * is in bw_rate_cap, but we need to look at 2264 * how long it is until we hit the deadline. 2265 */ 2266 struct tcp_sendfile_track *ent; 2267 2268 ent = rack->r_ctl.rc_last_sft; 2269 microuptime(&tv); 2270 timenow = tcp_tv_to_lusectick(&tv); 2271 if (timenow >= ent->deadline) { 2272 /* No time left we do DGP only */ 2273 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2274 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2275 rack->r_ctl.bw_rate_cap = 0; 2276 return; 2277 } 2278 /* We have the time */ 2279 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2280 if (timeleft < HPTS_MSEC_IN_SEC) { 2281 /* If there is less than a ms left just use DGPs rate */ 2282 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2283 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2284 rack->r_ctl.bw_rate_cap = 0; 2285 return; 2286 } 2287 /* 2288 * Now lets find the amount of data left to send. 2289 * 2290 * Now ideally we want to use the end_seq to figure out how much more 2291 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2292 */ 2293 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2294 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2295 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2296 else { 2297 /* TSNH, we should catch it at the send */ 2298 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2299 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2300 rack->r_ctl.bw_rate_cap = 0; 2301 return; 2302 } 2303 } else { 2304 /* 2305 * The hard way, figure out how much is gone and then 2306 * take that away from the total the client asked for 2307 * (thats off by tls overhead if this is tls). 2308 */ 2309 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2310 lengone = rack->rc_tp->snd_una - ent->start_seq; 2311 else 2312 lengone = 0; 2313 if (lengone < (ent->end - ent->start)) 2314 lenleft = (ent->end - ent->start) - lengone; 2315 else { 2316 /* TSNH, we should catch it at the send */ 2317 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2318 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2319 rack->r_ctl.bw_rate_cap = 0; 2320 return; 2321 } 2322 } 2323 if (lenleft == 0) { 2324 /* We have it all sent */ 2325 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2326 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2327 if (rack->r_ctl.bw_rate_cap) 2328 goto normal_ratecap; 2329 else 2330 return; 2331 } 2332 calcbw = lenleft * HPTS_USEC_IN_SEC; 2333 calcbw /= timeleft; 2334 /* Now we must compensate for IP/TCP overhead */ 2335 calcbw = rack_compensate_for_linerate(rack, calcbw); 2336 /* Update the bit rate cap */ 2337 rack->r_ctl.bw_rate_cap = calcbw; 2338 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2339 (rack_hybrid_allow_set_maxseg == 1) && 2340 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2341 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2342 uint32_t orig_max; 2343 2344 orig_max = rack->r_ctl.rc_pace_max_segs; 2345 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2346 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2347 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2348 } 2349 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2350 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2351 if ((calcbw > 0) && (*bw > calcbw)) { 2352 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2353 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2354 *capped = 1; 2355 *bw = calcbw; 2356 } 2357 return; 2358 } 2359 normal_ratecap: 2360 #endif 2361 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2362 #ifdef TCP_REQUEST_TRK 2363 if (rack->rc_hybrid_mode && 2364 rack->rc_catch_up && 2365 (rack->r_ctl.rc_last_sft != NULL) && 2366 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2367 (rack_hybrid_allow_set_maxseg == 1) && 2368 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2369 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2370 uint32_t orig_max; 2371 2372 orig_max = rack->r_ctl.rc_pace_max_segs; 2373 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2374 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2375 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2376 } 2377 #endif 2378 *capped = 1; 2379 *bw = rack->r_ctl.bw_rate_cap; 2380 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2381 *bw, 0, 0, 2382 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2383 } 2384 } 2385 2386 static uint64_t 2387 rack_get_gp_est(struct tcp_rack *rack) 2388 { 2389 uint64_t bw, lt_bw, ret_bw; 2390 2391 if (rack->rc_gp_filled == 0) { 2392 /* 2393 * We have yet no b/w measurement, 2394 * if we have a user set initial bw 2395 * return it. If we don't have that and 2396 * we have an srtt, use the tcp IW (10) to 2397 * calculate a fictional b/w over the SRTT 2398 * which is more or less a guess. Note 2399 * we don't use our IW from rack on purpose 2400 * so if we have like IW=30, we are not 2401 * calculating a "huge" b/w. 2402 */ 2403 uint64_t srtt; 2404 2405 if (rack->dis_lt_bw == 1) 2406 lt_bw = 0; 2407 else 2408 lt_bw = rack_get_lt_bw(rack); 2409 if (lt_bw) { 2410 /* 2411 * No goodput bw but a long-term b/w does exist 2412 * lets use that. 2413 */ 2414 ret_bw = lt_bw; 2415 goto compensate; 2416 } 2417 if (rack->r_ctl.init_rate) 2418 return (rack->r_ctl.init_rate); 2419 2420 /* Ok lets come up with the IW guess, if we have a srtt */ 2421 if (rack->rc_tp->t_srtt == 0) { 2422 /* 2423 * Go with old pacing method 2424 * i.e. burst mitigation only. 2425 */ 2426 return (0); 2427 } 2428 /* Ok lets get the initial TCP win (not racks) */ 2429 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2430 srtt = (uint64_t)rack->rc_tp->t_srtt; 2431 bw *= (uint64_t)USECS_IN_SECOND; 2432 bw /= srtt; 2433 ret_bw = bw; 2434 goto compensate; 2435 2436 } 2437 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2438 /* Averaging is done, we can return the value */ 2439 bw = rack->r_ctl.gp_bw; 2440 } else { 2441 /* Still doing initial average must calculate */ 2442 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2443 } 2444 if (rack->dis_lt_bw) { 2445 /* We are not using lt-bw */ 2446 ret_bw = bw; 2447 goto compensate; 2448 } 2449 lt_bw = rack_get_lt_bw(rack); 2450 if (lt_bw == 0) { 2451 /* If we don't have one then equate it to the gp_bw */ 2452 lt_bw = rack->r_ctl.gp_bw; 2453 } 2454 if (rack->use_lesser_lt_bw) { 2455 if (lt_bw < bw) 2456 ret_bw = lt_bw; 2457 else 2458 ret_bw = bw; 2459 } else { 2460 if (lt_bw > bw) 2461 ret_bw = lt_bw; 2462 else 2463 ret_bw = bw; 2464 } 2465 /* 2466 * Now lets compensate based on the TCP/IP overhead. Our 2467 * Goodput estimate does not include this so we must pace out 2468 * a bit faster since our pacing calculations do. The pacing 2469 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2470 * we are using to do this, so we do that here in the opposite 2471 * direction as well. This means that if we are tunneled and the 2472 * segsiz is say 1200 bytes we will get quite a boost, but its 2473 * compensated for in the pacing time the opposite way. 2474 */ 2475 compensate: 2476 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2477 return(ret_bw); 2478 } 2479 2480 2481 static uint64_t 2482 rack_get_bw(struct tcp_rack *rack) 2483 { 2484 uint64_t bw; 2485 2486 if (rack->use_fixed_rate) { 2487 /* Return the fixed pacing rate */ 2488 return (rack_get_fixed_pacing_bw(rack)); 2489 } 2490 bw = rack_get_gp_est(rack); 2491 return (bw); 2492 } 2493 2494 static uint16_t 2495 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2496 { 2497 if (rack->use_fixed_rate) { 2498 return (100); 2499 } else if (rack->in_probe_rtt && (rsm == NULL)) 2500 return (rack->r_ctl.rack_per_of_gp_probertt); 2501 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2502 rack->r_ctl.rack_per_of_gp_rec)) { 2503 if (rsm) { 2504 /* a retransmission always use the recovery rate */ 2505 return (rack->r_ctl.rack_per_of_gp_rec); 2506 } else if (rack->rack_rec_nonrxt_use_cr) { 2507 /* Directed to use the configured rate */ 2508 goto configured_rate; 2509 } else if (rack->rack_no_prr && 2510 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2511 /* No PRR, lets just use the b/w estimate only */ 2512 return (100); 2513 } else { 2514 /* 2515 * Here we may have a non-retransmit but we 2516 * have no overrides, so just use the recovery 2517 * rate (prr is in effect). 2518 */ 2519 return (rack->r_ctl.rack_per_of_gp_rec); 2520 } 2521 } 2522 configured_rate: 2523 /* For the configured rate we look at our cwnd vs the ssthresh */ 2524 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2525 return (rack->r_ctl.rack_per_of_gp_ss); 2526 else 2527 return (rack->r_ctl.rack_per_of_gp_ca); 2528 } 2529 2530 static void 2531 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2532 { 2533 /* 2534 * Types of logs (mod value) 2535 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2536 * 2 = a dsack round begins, persist is reset to 16. 2537 * 3 = a dsack round ends 2538 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2539 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2540 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2541 */ 2542 if (tcp_bblogging_on(rack->rc_tp)) { 2543 union tcp_log_stackspecific log; 2544 struct timeval tv; 2545 2546 memset(&log, 0, sizeof(log)); 2547 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2548 log.u_bbr.flex1 <<= 1; 2549 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2550 log.u_bbr.flex1 <<= 1; 2551 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2552 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2553 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2554 log.u_bbr.flex4 = flex4; 2555 log.u_bbr.flex5 = flex5; 2556 log.u_bbr.flex6 = flex6; 2557 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2558 log.u_bbr.flex8 = mod; 2559 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2560 log.u_bbr.epoch = rack->r_ctl.current_round; 2561 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2562 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2563 &rack->rc_inp->inp_socket->so_rcv, 2564 &rack->rc_inp->inp_socket->so_snd, 2565 RACK_DSACK_HANDLING, 0, 2566 0, &log, false, &tv); 2567 } 2568 } 2569 2570 static void 2571 rack_log_hdwr_pacing(struct tcp_rack *rack, 2572 uint64_t rate, uint64_t hw_rate, int line, 2573 int error, uint16_t mod) 2574 { 2575 if (tcp_bblogging_on(rack->rc_tp)) { 2576 union tcp_log_stackspecific log; 2577 struct timeval tv; 2578 const struct ifnet *ifp; 2579 uint64_t ifp64; 2580 2581 memset(&log, 0, sizeof(log)); 2582 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2583 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2584 if (rack->r_ctl.crte) { 2585 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2586 } else if (rack->rc_inp->inp_route.ro_nh && 2587 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2588 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2589 } else 2590 ifp = NULL; 2591 if (ifp) { 2592 ifp64 = (uintptr_t)ifp; 2593 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff); 2594 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff); 2595 } 2596 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2597 log.u_bbr.bw_inuse = rate; 2598 log.u_bbr.flex5 = line; 2599 log.u_bbr.flex6 = error; 2600 log.u_bbr.flex7 = mod; 2601 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2602 log.u_bbr.flex8 = rack->use_fixed_rate; 2603 log.u_bbr.flex8 <<= 1; 2604 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2605 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2606 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2607 if (rack->r_ctl.crte) 2608 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2609 else 2610 log.u_bbr.cur_del_rate = 0; 2611 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2612 log.u_bbr.epoch = rack->r_ctl.current_round; 2613 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2614 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2615 &rack->rc_inp->inp_socket->so_rcv, 2616 &rack->rc_inp->inp_socket->so_snd, 2617 BBR_LOG_HDWR_PACE, 0, 2618 0, &log, false, &tv); 2619 } 2620 } 2621 2622 static uint64_t 2623 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2624 { 2625 /* 2626 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2627 */ 2628 uint64_t bw_est, high_rate; 2629 uint64_t gain; 2630 2631 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2632 bw_est = bw * gain; 2633 bw_est /= (uint64_t)100; 2634 /* Never fall below the minimum (def 64kbps) */ 2635 if (bw_est < RACK_MIN_BW) 2636 bw_est = RACK_MIN_BW; 2637 if (rack->r_rack_hw_rate_caps) { 2638 /* Rate caps are in place */ 2639 if (rack->r_ctl.crte != NULL) { 2640 /* We have a hdwr rate already */ 2641 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2642 if (bw_est >= high_rate) { 2643 /* We are capping bw at the highest rate table entry */ 2644 if (rack_hw_rate_cap_per && 2645 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2646 rack->r_rack_hw_rate_caps = 0; 2647 goto done; 2648 } 2649 rack_log_hdwr_pacing(rack, 2650 bw_est, high_rate, __LINE__, 2651 0, 3); 2652 bw_est = high_rate; 2653 if (capped) 2654 *capped = 1; 2655 } 2656 } else if ((rack->rack_hdrw_pacing == 0) && 2657 (rack->rack_hdw_pace_ena) && 2658 (rack->rack_attempt_hdwr_pace == 0) && 2659 (rack->rc_inp->inp_route.ro_nh != NULL) && 2660 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2661 /* 2662 * Special case, we have not yet attempted hardware 2663 * pacing, and yet we may, when we do, find out if we are 2664 * above the highest rate. We need to know the maxbw for the interface 2665 * in question (if it supports ratelimiting). We get back 2666 * a 0, if the interface is not found in the RL lists. 2667 */ 2668 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2669 if (high_rate) { 2670 /* Yep, we have a rate is it above this rate? */ 2671 if (bw_est > high_rate) { 2672 bw_est = high_rate; 2673 if (capped) 2674 *capped = 1; 2675 } 2676 } 2677 } 2678 } 2679 done: 2680 return (bw_est); 2681 } 2682 2683 static void 2684 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2685 { 2686 if (tcp_bblogging_on(rack->rc_tp)) { 2687 union tcp_log_stackspecific log; 2688 struct timeval tv; 2689 2690 if ((mod != 1) && (rack_verbose_logging == 0)) { 2691 /* 2692 * We get 3 values currently for mod 2693 * 1 - We are retransmitting and this tells the reason. 2694 * 2 - We are clearing a dup-ack count. 2695 * 3 - We are incrementing a dup-ack count. 2696 * 2697 * The clear/increment are only logged 2698 * if you have BBverbose on. 2699 */ 2700 return; 2701 } 2702 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2703 log.u_bbr.flex1 = tsused; 2704 log.u_bbr.flex2 = thresh; 2705 log.u_bbr.flex3 = rsm->r_flags; 2706 log.u_bbr.flex4 = rsm->r_dupack; 2707 log.u_bbr.flex5 = rsm->r_start; 2708 log.u_bbr.flex6 = rsm->r_end; 2709 log.u_bbr.flex8 = mod; 2710 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2711 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2712 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2713 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2714 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2715 log.u_bbr.pacing_gain = rack->r_must_retran; 2716 log.u_bbr.epoch = rack->r_ctl.current_round; 2717 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2718 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2719 &rack->rc_inp->inp_socket->so_rcv, 2720 &rack->rc_inp->inp_socket->so_snd, 2721 BBR_LOG_SETTINGS_CHG, 0, 2722 0, &log, false, &tv); 2723 } 2724 } 2725 2726 static void 2727 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2728 { 2729 if (tcp_bblogging_on(rack->rc_tp)) { 2730 union tcp_log_stackspecific log; 2731 struct timeval tv; 2732 2733 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2734 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2735 log.u_bbr.flex2 = to; 2736 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2737 log.u_bbr.flex4 = slot; 2738 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2739 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2740 log.u_bbr.flex7 = rack->rc_in_persist; 2741 log.u_bbr.flex8 = which; 2742 if (rack->rack_no_prr) 2743 log.u_bbr.pkts_out = 0; 2744 else 2745 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2746 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2747 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2748 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2749 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2750 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2751 log.u_bbr.pacing_gain = rack->r_must_retran; 2752 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2753 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2754 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2755 log.u_bbr.lost = rack_rto_min; 2756 log.u_bbr.epoch = rack->r_ctl.roundends; 2757 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2758 log.u_bbr.bw_inuse <<= 32; 2759 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2760 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2761 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2762 &rack->rc_inp->inp_socket->so_rcv, 2763 &rack->rc_inp->inp_socket->so_snd, 2764 BBR_LOG_TIMERSTAR, 0, 2765 0, &log, false, &tv); 2766 } 2767 } 2768 2769 static void 2770 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2771 { 2772 if (tcp_bblogging_on(rack->rc_tp)) { 2773 union tcp_log_stackspecific log; 2774 struct timeval tv; 2775 2776 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2777 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2778 log.u_bbr.flex8 = to_num; 2779 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2780 log.u_bbr.flex2 = rack->rc_rack_rtt; 2781 if (rsm == NULL) 2782 log.u_bbr.flex3 = 0; 2783 else 2784 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2785 if (rack->rack_no_prr) 2786 log.u_bbr.flex5 = 0; 2787 else 2788 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2789 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2790 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2791 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2792 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2793 log.u_bbr.pacing_gain = rack->r_must_retran; 2794 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2795 log.u_bbr.bw_inuse <<= 32; 2796 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2797 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2798 &rack->rc_inp->inp_socket->so_rcv, 2799 &rack->rc_inp->inp_socket->so_snd, 2800 BBR_LOG_RTO, 0, 2801 0, &log, false, &tv); 2802 } 2803 } 2804 2805 static void 2806 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2807 struct rack_sendmap *prev, 2808 struct rack_sendmap *rsm, 2809 struct rack_sendmap *next, 2810 int flag, uint32_t th_ack, int line) 2811 { 2812 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2813 union tcp_log_stackspecific log; 2814 struct timeval tv; 2815 2816 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2817 log.u_bbr.flex8 = flag; 2818 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2819 log.u_bbr.cur_del_rate = (uintptr_t)prev; 2820 log.u_bbr.delRate = (uintptr_t)rsm; 2821 log.u_bbr.rttProp = (uintptr_t)next; 2822 log.u_bbr.flex7 = 0; 2823 if (prev) { 2824 log.u_bbr.flex1 = prev->r_start; 2825 log.u_bbr.flex2 = prev->r_end; 2826 log.u_bbr.flex7 |= 0x4; 2827 } 2828 if (rsm) { 2829 log.u_bbr.flex3 = rsm->r_start; 2830 log.u_bbr.flex4 = rsm->r_end; 2831 log.u_bbr.flex7 |= 0x2; 2832 } 2833 if (next) { 2834 log.u_bbr.flex5 = next->r_start; 2835 log.u_bbr.flex6 = next->r_end; 2836 log.u_bbr.flex7 |= 0x1; 2837 } 2838 log.u_bbr.applimited = line; 2839 log.u_bbr.pkts_out = th_ack; 2840 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2841 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2842 if (rack->rack_no_prr) 2843 log.u_bbr.lost = 0; 2844 else 2845 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2846 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2847 log.u_bbr.bw_inuse <<= 32; 2848 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2849 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2850 &rack->rc_inp->inp_socket->so_rcv, 2851 &rack->rc_inp->inp_socket->so_snd, 2852 TCP_LOG_MAPCHG, 0, 2853 0, &log, false, &tv); 2854 } 2855 } 2856 2857 static void 2858 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2859 struct rack_sendmap *rsm, int conf) 2860 { 2861 if (tcp_bblogging_on(tp)) { 2862 union tcp_log_stackspecific log; 2863 struct timeval tv; 2864 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2865 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2866 log.u_bbr.flex1 = t; 2867 log.u_bbr.flex2 = len; 2868 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2869 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2870 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2871 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2872 log.u_bbr.flex7 = conf; 2873 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2874 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2875 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2876 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2877 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2878 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2879 if (rsm) { 2880 log.u_bbr.pkt_epoch = rsm->r_start; 2881 log.u_bbr.lost = rsm->r_end; 2882 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2883 /* We loose any upper of the 24 bits */ 2884 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2885 } else { 2886 /* Its a SYN */ 2887 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2888 log.u_bbr.lost = 0; 2889 log.u_bbr.cwnd_gain = 0; 2890 log.u_bbr.pacing_gain = 0; 2891 } 2892 /* Write out general bits of interest rrs here */ 2893 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2894 log.u_bbr.use_lt_bw <<= 1; 2895 log.u_bbr.use_lt_bw |= rack->forced_ack; 2896 log.u_bbr.use_lt_bw <<= 1; 2897 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2898 log.u_bbr.use_lt_bw <<= 1; 2899 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2900 log.u_bbr.use_lt_bw <<= 1; 2901 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2902 log.u_bbr.use_lt_bw <<= 1; 2903 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2904 log.u_bbr.use_lt_bw <<= 1; 2905 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2906 log.u_bbr.use_lt_bw <<= 1; 2907 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2908 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2909 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2910 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2911 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2912 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2913 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2914 log.u_bbr.bw_inuse <<= 32; 2915 if (rsm) 2916 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2917 TCP_LOG_EVENTP(tp, NULL, 2918 &rack->rc_inp->inp_socket->so_rcv, 2919 &rack->rc_inp->inp_socket->so_snd, 2920 BBR_LOG_BBRRTT, 0, 2921 0, &log, false, &tv); 2922 2923 2924 } 2925 } 2926 2927 static void 2928 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2929 { 2930 /* 2931 * Log the rtt sample we are 2932 * applying to the srtt algorithm in 2933 * useconds. 2934 */ 2935 if (tcp_bblogging_on(rack->rc_tp)) { 2936 union tcp_log_stackspecific log; 2937 struct timeval tv; 2938 2939 /* Convert our ms to a microsecond */ 2940 memset(&log, 0, sizeof(log)); 2941 log.u_bbr.flex1 = rtt; 2942 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2943 log.u_bbr.flex7 = 1; 2944 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2945 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2946 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2947 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2948 log.u_bbr.pacing_gain = rack->r_must_retran; 2949 /* 2950 * We capture in delRate the upper 32 bits as 2951 * the confidence level we had declared, and the 2952 * lower 32 bits as the actual RTT using the arrival 2953 * timestamp. 2954 */ 2955 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2956 log.u_bbr.delRate <<= 32; 2957 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2958 /* Lets capture all the things that make up t_rtxcur */ 2959 log.u_bbr.applimited = rack_rto_min; 2960 log.u_bbr.epoch = rack_rto_max; 2961 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2962 log.u_bbr.lost = rack_rto_min; 2963 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2964 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2965 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2966 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2967 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2968 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2969 &rack->rc_inp->inp_socket->so_rcv, 2970 &rack->rc_inp->inp_socket->so_snd, 2971 TCP_LOG_RTT, 0, 2972 0, &log, false, &tv); 2973 } 2974 } 2975 2976 static void 2977 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2978 { 2979 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2980 union tcp_log_stackspecific log; 2981 struct timeval tv; 2982 2983 /* Convert our ms to a microsecond */ 2984 memset(&log, 0, sizeof(log)); 2985 log.u_bbr.flex1 = rtt; 2986 log.u_bbr.flex2 = send_time; 2987 log.u_bbr.flex3 = ack_time; 2988 log.u_bbr.flex4 = where; 2989 log.u_bbr.flex7 = 2; 2990 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2991 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2992 log.u_bbr.bw_inuse <<= 32; 2993 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2994 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2995 &rack->rc_inp->inp_socket->so_rcv, 2996 &rack->rc_inp->inp_socket->so_snd, 2997 TCP_LOG_RTT, 0, 2998 0, &log, false, &tv); 2999 } 3000 } 3001 3002 3003 static void 3004 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 3005 { 3006 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3007 union tcp_log_stackspecific log; 3008 struct timeval tv; 3009 3010 /* Convert our ms to a microsecond */ 3011 memset(&log, 0, sizeof(log)); 3012 log.u_bbr.flex1 = idx; 3013 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 3014 log.u_bbr.flex3 = tsecho; 3015 log.u_bbr.flex7 = 3; 3016 log.u_bbr.rttProp = tsv; 3017 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3018 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3019 log.u_bbr.bw_inuse <<= 32; 3020 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3021 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3022 &rack->rc_inp->inp_socket->so_rcv, 3023 &rack->rc_inp->inp_socket->so_snd, 3024 TCP_LOG_RTT, 0, 3025 0, &log, false, &tv); 3026 } 3027 } 3028 3029 3030 static inline void 3031 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3032 { 3033 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3034 union tcp_log_stackspecific log; 3035 struct timeval tv; 3036 3037 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3038 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3039 log.u_bbr.flex1 = line; 3040 log.u_bbr.flex2 = tick; 3041 log.u_bbr.flex3 = tp->t_maxunacktime; 3042 log.u_bbr.flex4 = tp->t_acktime; 3043 log.u_bbr.flex8 = event; 3044 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3045 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3046 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3047 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3048 log.u_bbr.pacing_gain = rack->r_must_retran; 3049 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3050 log.u_bbr.bw_inuse <<= 32; 3051 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3052 TCP_LOG_EVENTP(tp, NULL, 3053 &rack->rc_inp->inp_socket->so_rcv, 3054 &rack->rc_inp->inp_socket->so_snd, 3055 BBR_LOG_PROGRESS, 0, 3056 0, &log, false, &tv); 3057 } 3058 } 3059 3060 static void 3061 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 3062 { 3063 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3064 union tcp_log_stackspecific log; 3065 3066 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3067 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3068 log.u_bbr.flex1 = slot; 3069 if (rack->rack_no_prr) 3070 log.u_bbr.flex2 = 0; 3071 else 3072 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3073 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3074 log.u_bbr.flex6 = line; 3075 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3076 log.u_bbr.flex8 = rack->rc_in_persist; 3077 log.u_bbr.timeStamp = cts; 3078 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3079 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3080 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3081 log.u_bbr.pacing_gain = rack->r_must_retran; 3082 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3083 &rack->rc_inp->inp_socket->so_rcv, 3084 &rack->rc_inp->inp_socket->so_snd, 3085 BBR_LOG_BBRSND, 0, 3086 0, &log, false, tv); 3087 } 3088 } 3089 3090 static void 3091 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3092 { 3093 if (tcp_bblogging_on(rack->rc_tp)) { 3094 union tcp_log_stackspecific log; 3095 struct timeval tv; 3096 3097 memset(&log, 0, sizeof(log)); 3098 log.u_bbr.flex1 = did_out; 3099 log.u_bbr.flex2 = nxt_pkt; 3100 log.u_bbr.flex3 = way_out; 3101 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3102 if (rack->rack_no_prr) 3103 log.u_bbr.flex5 = 0; 3104 else 3105 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3106 log.u_bbr.flex6 = nsegs; 3107 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3108 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3109 log.u_bbr.flex7 <<= 1; 3110 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3111 log.u_bbr.flex7 <<= 1; 3112 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3113 log.u_bbr.flex8 = rack->rc_in_persist; 3114 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3115 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3116 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3117 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3118 log.u_bbr.use_lt_bw <<= 1; 3119 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3120 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3121 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3122 log.u_bbr.pacing_gain = rack->r_must_retran; 3123 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3124 log.u_bbr.bw_inuse <<= 32; 3125 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3126 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3127 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3128 log.u_bbr.lost = rack->rc_tp->t_srtt; 3129 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3130 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3131 &rack->rc_inp->inp_socket->so_rcv, 3132 &rack->rc_inp->inp_socket->so_snd, 3133 BBR_LOG_DOSEG_DONE, 0, 3134 0, &log, false, &tv); 3135 } 3136 } 3137 3138 static void 3139 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3140 { 3141 if (tcp_bblogging_on(rack->rc_tp)) { 3142 union tcp_log_stackspecific log; 3143 struct timeval tv; 3144 3145 memset(&log, 0, sizeof(log)); 3146 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3147 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3148 log.u_bbr.flex4 = arg1; 3149 log.u_bbr.flex5 = arg2; 3150 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3151 log.u_bbr.flex6 = arg3; 3152 log.u_bbr.flex8 = frm; 3153 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3154 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3155 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3156 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3157 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3158 log.u_bbr.pacing_gain = rack->r_must_retran; 3159 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3160 &tptosocket(tp)->so_snd, 3161 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3162 } 3163 } 3164 3165 static void 3166 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3167 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3168 { 3169 if (tcp_bblogging_on(rack->rc_tp)) { 3170 union tcp_log_stackspecific log; 3171 struct timeval tv; 3172 3173 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3174 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3175 log.u_bbr.flex1 = slot; 3176 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3177 log.u_bbr.flex4 = reason; 3178 if (rack->rack_no_prr) 3179 log.u_bbr.flex5 = 0; 3180 else 3181 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3182 log.u_bbr.flex7 = hpts_calling; 3183 log.u_bbr.flex8 = rack->rc_in_persist; 3184 log.u_bbr.lt_epoch = cwnd_to_use; 3185 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3186 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3187 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3188 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3189 log.u_bbr.pacing_gain = rack->r_must_retran; 3190 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3191 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3192 log.u_bbr.bw_inuse <<= 32; 3193 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3194 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3195 &rack->rc_inp->inp_socket->so_rcv, 3196 &rack->rc_inp->inp_socket->so_snd, 3197 BBR_LOG_JUSTRET, 0, 3198 tlen, &log, false, &tv); 3199 } 3200 } 3201 3202 static void 3203 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3204 struct timeval *tv, uint32_t flags_on_entry) 3205 { 3206 if (tcp_bblogging_on(rack->rc_tp)) { 3207 union tcp_log_stackspecific log; 3208 3209 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3210 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3211 log.u_bbr.flex1 = line; 3212 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3213 log.u_bbr.flex3 = flags_on_entry; 3214 log.u_bbr.flex4 = us_cts; 3215 if (rack->rack_no_prr) 3216 log.u_bbr.flex5 = 0; 3217 else 3218 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3219 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3220 log.u_bbr.flex7 = hpts_removed; 3221 log.u_bbr.flex8 = 1; 3222 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3223 log.u_bbr.timeStamp = us_cts; 3224 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3225 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3226 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3227 log.u_bbr.pacing_gain = rack->r_must_retran; 3228 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3229 log.u_bbr.bw_inuse <<= 32; 3230 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3231 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3232 &rack->rc_inp->inp_socket->so_rcv, 3233 &rack->rc_inp->inp_socket->so_snd, 3234 BBR_LOG_TIMERCANC, 0, 3235 0, &log, false, tv); 3236 } 3237 } 3238 3239 static void 3240 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3241 uint32_t flex1, uint32_t flex2, 3242 uint32_t flex3, uint32_t flex4, 3243 uint32_t flex5, uint32_t flex6, 3244 uint16_t flex7, uint8_t mod) 3245 { 3246 if (tcp_bblogging_on(rack->rc_tp)) { 3247 union tcp_log_stackspecific log; 3248 struct timeval tv; 3249 3250 if (mod == 1) { 3251 /* No you can't use 1, its for the real to cancel */ 3252 return; 3253 } 3254 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3255 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3256 log.u_bbr.flex1 = flex1; 3257 log.u_bbr.flex2 = flex2; 3258 log.u_bbr.flex3 = flex3; 3259 log.u_bbr.flex4 = flex4; 3260 log.u_bbr.flex5 = flex5; 3261 log.u_bbr.flex6 = flex6; 3262 log.u_bbr.flex7 = flex7; 3263 log.u_bbr.flex8 = mod; 3264 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3265 &rack->rc_inp->inp_socket->so_rcv, 3266 &rack->rc_inp->inp_socket->so_snd, 3267 BBR_LOG_TIMERCANC, 0, 3268 0, &log, false, &tv); 3269 } 3270 } 3271 3272 static void 3273 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3274 { 3275 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3276 union tcp_log_stackspecific log; 3277 struct timeval tv; 3278 3279 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3280 log.u_bbr.flex1 = timers; 3281 log.u_bbr.flex2 = ret; 3282 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3283 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3284 log.u_bbr.flex5 = cts; 3285 if (rack->rack_no_prr) 3286 log.u_bbr.flex6 = 0; 3287 else 3288 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3289 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3290 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3291 log.u_bbr.pacing_gain = rack->r_must_retran; 3292 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3293 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3294 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3295 &rack->rc_inp->inp_socket->so_rcv, 3296 &rack->rc_inp->inp_socket->so_snd, 3297 BBR_LOG_TO_PROCESS, 0, 3298 0, &log, false, &tv); 3299 } 3300 } 3301 3302 static void 3303 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3304 { 3305 if (tcp_bblogging_on(rack->rc_tp)) { 3306 union tcp_log_stackspecific log; 3307 struct timeval tv; 3308 3309 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3310 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3311 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3312 if (rack->rack_no_prr) 3313 log.u_bbr.flex3 = 0; 3314 else 3315 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3316 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3317 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3318 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3319 log.u_bbr.flex7 = line; 3320 log.u_bbr.flex8 = frm; 3321 log.u_bbr.pkts_out = orig_cwnd; 3322 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3323 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3324 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3325 log.u_bbr.use_lt_bw <<= 1; 3326 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3327 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3328 &rack->rc_inp->inp_socket->so_rcv, 3329 &rack->rc_inp->inp_socket->so_snd, 3330 BBR_LOG_BBRUPD, 0, 3331 0, &log, false, &tv); 3332 } 3333 } 3334 3335 static void 3336 rack_counter_destroy(void) 3337 { 3338 counter_u64_free(rack_total_bytes); 3339 counter_u64_free(rack_fto_send); 3340 counter_u64_free(rack_fto_rsm_send); 3341 counter_u64_free(rack_nfto_resend); 3342 counter_u64_free(rack_hw_pace_init_fail); 3343 counter_u64_free(rack_hw_pace_lost); 3344 counter_u64_free(rack_non_fto_send); 3345 counter_u64_free(rack_extended_rfo); 3346 counter_u64_free(rack_ack_total); 3347 counter_u64_free(rack_express_sack); 3348 counter_u64_free(rack_sack_total); 3349 counter_u64_free(rack_move_none); 3350 counter_u64_free(rack_move_some); 3351 counter_u64_free(rack_sack_attacks_detected); 3352 counter_u64_free(rack_sack_attacks_reversed); 3353 counter_u64_free(rack_sack_attacks_suspect); 3354 counter_u64_free(rack_sack_used_next_merge); 3355 counter_u64_free(rack_sack_used_prev_merge); 3356 counter_u64_free(rack_tlp_tot); 3357 counter_u64_free(rack_tlp_newdata); 3358 counter_u64_free(rack_tlp_retran); 3359 counter_u64_free(rack_tlp_retran_bytes); 3360 counter_u64_free(rack_to_tot); 3361 counter_u64_free(rack_saw_enobuf); 3362 counter_u64_free(rack_saw_enobuf_hw); 3363 counter_u64_free(rack_saw_enetunreach); 3364 counter_u64_free(rack_hot_alloc); 3365 counter_u64_free(rack_to_alloc); 3366 counter_u64_free(rack_to_alloc_hard); 3367 counter_u64_free(rack_to_alloc_emerg); 3368 counter_u64_free(rack_to_alloc_limited); 3369 counter_u64_free(rack_alloc_limited_conns); 3370 counter_u64_free(rack_split_limited); 3371 counter_u64_free(rack_multi_single_eq); 3372 counter_u64_free(rack_rxt_clamps_cwnd); 3373 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3374 counter_u64_free(rack_proc_non_comp_ack); 3375 counter_u64_free(rack_sack_proc_all); 3376 counter_u64_free(rack_sack_proc_restart); 3377 counter_u64_free(rack_sack_proc_short); 3378 counter_u64_free(rack_sack_skipped_acked); 3379 counter_u64_free(rack_sack_splits); 3380 counter_u64_free(rack_input_idle_reduces); 3381 counter_u64_free(rack_collapsed_win); 3382 counter_u64_free(rack_collapsed_win_rxt); 3383 counter_u64_free(rack_collapsed_win_rxt_bytes); 3384 counter_u64_free(rack_collapsed_win_seen); 3385 counter_u64_free(rack_try_scwnd); 3386 counter_u64_free(rack_persists_sends); 3387 counter_u64_free(rack_persists_acks); 3388 counter_u64_free(rack_persists_loss); 3389 counter_u64_free(rack_persists_lost_ends); 3390 #ifdef INVARIANTS 3391 counter_u64_free(rack_adjust_map_bw); 3392 #endif 3393 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3394 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3395 } 3396 3397 static struct rack_sendmap * 3398 rack_alloc(struct tcp_rack *rack) 3399 { 3400 struct rack_sendmap *rsm; 3401 3402 /* 3403 * First get the top of the list it in 3404 * theory is the "hottest" rsm we have, 3405 * possibly just freed by ack processing. 3406 */ 3407 if (rack->rc_free_cnt > rack_free_cache) { 3408 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3409 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3410 counter_u64_add(rack_hot_alloc, 1); 3411 rack->rc_free_cnt--; 3412 return (rsm); 3413 } 3414 /* 3415 * Once we get under our free cache we probably 3416 * no longer have a "hot" one available. Lets 3417 * get one from UMA. 3418 */ 3419 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3420 if (rsm) { 3421 rack->r_ctl.rc_num_maps_alloced++; 3422 counter_u64_add(rack_to_alloc, 1); 3423 return (rsm); 3424 } 3425 /* 3426 * Dig in to our aux rsm's (the last two) since 3427 * UMA failed to get us one. 3428 */ 3429 if (rack->rc_free_cnt) { 3430 counter_u64_add(rack_to_alloc_emerg, 1); 3431 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3432 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3433 rack->rc_free_cnt--; 3434 return (rsm); 3435 } 3436 return (NULL); 3437 } 3438 3439 static struct rack_sendmap * 3440 rack_alloc_full_limit(struct tcp_rack *rack) 3441 { 3442 if ((V_tcp_map_entries_limit > 0) && 3443 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3444 counter_u64_add(rack_to_alloc_limited, 1); 3445 if (!rack->alloc_limit_reported) { 3446 rack->alloc_limit_reported = 1; 3447 counter_u64_add(rack_alloc_limited_conns, 1); 3448 } 3449 return (NULL); 3450 } 3451 return (rack_alloc(rack)); 3452 } 3453 3454 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3455 static struct rack_sendmap * 3456 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3457 { 3458 struct rack_sendmap *rsm; 3459 3460 if (limit_type) { 3461 /* currently there is only one limit type */ 3462 if (rack->r_ctl.rc_split_limit > 0 && 3463 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3464 counter_u64_add(rack_split_limited, 1); 3465 if (!rack->alloc_limit_reported) { 3466 rack->alloc_limit_reported = 1; 3467 counter_u64_add(rack_alloc_limited_conns, 1); 3468 } 3469 return (NULL); 3470 } 3471 } 3472 3473 /* allocate and mark in the limit type, if set */ 3474 rsm = rack_alloc(rack); 3475 if (rsm != NULL && limit_type) { 3476 rsm->r_limit_type = limit_type; 3477 rack->r_ctl.rc_num_split_allocs++; 3478 } 3479 return (rsm); 3480 } 3481 3482 static void 3483 rack_free_trim(struct tcp_rack *rack) 3484 { 3485 struct rack_sendmap *rsm; 3486 3487 /* 3488 * Free up all the tail entries until 3489 * we get our list down to the limit. 3490 */ 3491 while (rack->rc_free_cnt > rack_free_cache) { 3492 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3493 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3494 rack->rc_free_cnt--; 3495 rack->r_ctl.rc_num_maps_alloced--; 3496 uma_zfree(rack_zone, rsm); 3497 } 3498 } 3499 3500 static void 3501 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3502 { 3503 if (rsm->r_flags & RACK_APP_LIMITED) { 3504 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3505 rack->r_ctl.rc_app_limited_cnt--; 3506 } 3507 } 3508 if (rsm->r_limit_type) { 3509 /* currently there is only one limit type */ 3510 rack->r_ctl.rc_num_split_allocs--; 3511 } 3512 if (rsm == rack->r_ctl.rc_first_appl) { 3513 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); 3514 rack->r_ctl.cleared_app_ack = 1; 3515 if (rack->r_ctl.rc_app_limited_cnt == 0) 3516 rack->r_ctl.rc_first_appl = NULL; 3517 else 3518 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3519 } 3520 if (rsm == rack->r_ctl.rc_resend) 3521 rack->r_ctl.rc_resend = NULL; 3522 if (rsm == rack->r_ctl.rc_end_appl) 3523 rack->r_ctl.rc_end_appl = NULL; 3524 if (rack->r_ctl.rc_tlpsend == rsm) 3525 rack->r_ctl.rc_tlpsend = NULL; 3526 if (rack->r_ctl.rc_sacklast == rsm) 3527 rack->r_ctl.rc_sacklast = NULL; 3528 memset(rsm, 0, sizeof(struct rack_sendmap)); 3529 /* Make sure we are not going to overrun our count limit of 0xff */ 3530 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3531 rack_free_trim(rack); 3532 } 3533 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3534 rack->rc_free_cnt++; 3535 } 3536 3537 static uint32_t 3538 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3539 { 3540 uint64_t srtt, bw, len, tim; 3541 uint32_t segsiz, def_len, minl; 3542 3543 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3544 def_len = rack_def_data_window * segsiz; 3545 if (rack->rc_gp_filled == 0) { 3546 /* 3547 * We have no measurement (IW is in flight?) so 3548 * we can only guess using our data_window sysctl 3549 * value (usually 20MSS). 3550 */ 3551 return (def_len); 3552 } 3553 /* 3554 * Now we have a number of factors to consider. 3555 * 3556 * 1) We have a desired BDP which is usually 3557 * at least 2. 3558 * 2) We have a minimum number of rtt's usually 1 SRTT 3559 * but we allow it too to be more. 3560 * 3) We want to make sure a measurement last N useconds (if 3561 * we have set rack_min_measure_usec. 3562 * 3563 * We handle the first concern here by trying to create a data 3564 * window of max(rack_def_data_window, DesiredBDP). The 3565 * second concern we handle in not letting the measurement 3566 * window end normally until at least the required SRTT's 3567 * have gone by which is done further below in 3568 * rack_enough_for_measurement(). Finally the third concern 3569 * we also handle here by calculating how long that time 3570 * would take at the current BW and then return the 3571 * max of our first calculation and that length. Note 3572 * that if rack_min_measure_usec is 0, we don't deal 3573 * with concern 3. Also for both Concern 1 and 3 an 3574 * application limited period could end the measurement 3575 * earlier. 3576 * 3577 * So lets calculate the BDP with the "known" b/w using 3578 * the SRTT has our rtt and then multiply it by the 3579 * goal. 3580 */ 3581 bw = rack_get_bw(rack); 3582 srtt = (uint64_t)tp->t_srtt; 3583 len = bw * srtt; 3584 len /= (uint64_t)HPTS_USEC_IN_SEC; 3585 len *= max(1, rack_goal_bdp); 3586 /* Now we need to round up to the nearest MSS */ 3587 len = roundup(len, segsiz); 3588 if (rack_min_measure_usec) { 3589 /* Now calculate our min length for this b/w */ 3590 tim = rack_min_measure_usec; 3591 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3592 if (minl == 0) 3593 minl = 1; 3594 minl = roundup(minl, segsiz); 3595 if (len < minl) 3596 len = minl; 3597 } 3598 /* 3599 * Now if we have a very small window we want 3600 * to attempt to get the window that is 3601 * as small as possible. This happens on 3602 * low b/w connections and we don't want to 3603 * span huge numbers of rtt's between measurements. 3604 * 3605 * We basically include 2 over our "MIN window" so 3606 * that the measurement can be shortened (possibly) by 3607 * an ack'ed packet. 3608 */ 3609 if (len < def_len) 3610 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3611 else 3612 return (max((uint32_t)len, def_len)); 3613 3614 } 3615 3616 static int 3617 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3618 { 3619 uint32_t tim, srtts, segsiz; 3620 3621 /* 3622 * Has enough time passed for the GP measurement to be valid? 3623 */ 3624 if (SEQ_LT(th_ack, tp->gput_seq)) { 3625 /* Not enough bytes yet */ 3626 return (0); 3627 } 3628 if ((tp->snd_max == tp->snd_una) || 3629 (th_ack == tp->snd_max)){ 3630 /* 3631 * All is acked quality of all acked is 3632 * usually low or medium, but we in theory could split 3633 * all acked into two cases, where you got 3634 * a signifigant amount of your window and 3635 * where you did not. For now we leave it 3636 * but it is something to contemplate in the 3637 * future. The danger here is that delayed ack 3638 * is effecting the last byte (which is a 50:50 chance). 3639 */ 3640 *quality = RACK_QUALITY_ALLACKED; 3641 return (1); 3642 } 3643 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3644 /* 3645 * We obtained our entire window of data we wanted 3646 * no matter if we are in recovery or not then 3647 * its ok since expanding the window does not 3648 * make things fuzzy (or at least not as much). 3649 */ 3650 *quality = RACK_QUALITY_HIGH; 3651 return (1); 3652 } 3653 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3654 if (SEQ_LT(th_ack, tp->gput_ack) && 3655 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3656 /* Not enough bytes yet */ 3657 return (0); 3658 } 3659 if (rack->r_ctl.rc_first_appl && 3660 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3661 /* 3662 * We are up to the app limited send point 3663 * we have to measure irrespective of the time.. 3664 */ 3665 *quality = RACK_QUALITY_APPLIMITED; 3666 return (1); 3667 } 3668 /* Now what about time? */ 3669 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3670 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3671 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3672 /* 3673 * We do not allow a measurement if we are in recovery 3674 * that would shrink the goodput window we wanted. 3675 * This is to prevent cloudyness of when the last send 3676 * was actually made. 3677 */ 3678 *quality = RACK_QUALITY_HIGH; 3679 return (1); 3680 } 3681 /* Nope not even a full SRTT has passed */ 3682 return (0); 3683 } 3684 3685 static void 3686 rack_log_timely(struct tcp_rack *rack, 3687 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3688 uint64_t up_bnd, int line, uint8_t method) 3689 { 3690 if (tcp_bblogging_on(rack->rc_tp)) { 3691 union tcp_log_stackspecific log; 3692 struct timeval tv; 3693 3694 memset(&log, 0, sizeof(log)); 3695 log.u_bbr.flex1 = logged; 3696 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3697 log.u_bbr.flex2 <<= 4; 3698 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3699 log.u_bbr.flex2 <<= 4; 3700 log.u_bbr.flex2 |= rack->rc_gp_incr; 3701 log.u_bbr.flex2 <<= 4; 3702 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3703 log.u_bbr.flex3 = rack->rc_gp_incr; 3704 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3705 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3706 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3707 log.u_bbr.flex7 = rack->rc_gp_bwred; 3708 log.u_bbr.flex8 = method; 3709 log.u_bbr.cur_del_rate = cur_bw; 3710 log.u_bbr.delRate = low_bnd; 3711 log.u_bbr.bw_inuse = up_bnd; 3712 log.u_bbr.rttProp = rack_get_bw(rack); 3713 log.u_bbr.pkt_epoch = line; 3714 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3715 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3716 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3717 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3718 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3719 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3720 log.u_bbr.cwnd_gain <<= 1; 3721 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3722 log.u_bbr.cwnd_gain <<= 1; 3723 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3724 log.u_bbr.cwnd_gain <<= 1; 3725 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3726 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3727 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3728 &rack->rc_inp->inp_socket->so_rcv, 3729 &rack->rc_inp->inp_socket->so_snd, 3730 TCP_TIMELY_WORK, 0, 3731 0, &log, false, &tv); 3732 } 3733 } 3734 3735 static int 3736 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3737 { 3738 /* 3739 * Before we increase we need to know if 3740 * the estimate just made was less than 3741 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3742 * 3743 * If we already are pacing at a fast enough 3744 * rate to push us faster there is no sense of 3745 * increasing. 3746 * 3747 * We first caculate our actual pacing rate (ss or ca multiplier 3748 * times our cur_bw). 3749 * 3750 * Then we take the last measured rate and multipy by our 3751 * maximum pacing overage to give us a max allowable rate. 3752 * 3753 * If our act_rate is smaller than our max_allowable rate 3754 * then we should increase. Else we should hold steady. 3755 * 3756 */ 3757 uint64_t act_rate, max_allow_rate; 3758 3759 if (rack_timely_no_stopping) 3760 return (1); 3761 3762 if ((cur_bw == 0) || (last_bw_est == 0)) { 3763 /* 3764 * Initial startup case or 3765 * everything is acked case. 3766 */ 3767 rack_log_timely(rack, mult, cur_bw, 0, 0, 3768 __LINE__, 9); 3769 return (1); 3770 } 3771 if (mult <= 100) { 3772 /* 3773 * We can always pace at or slightly above our rate. 3774 */ 3775 rack_log_timely(rack, mult, cur_bw, 0, 0, 3776 __LINE__, 9); 3777 return (1); 3778 } 3779 act_rate = cur_bw * (uint64_t)mult; 3780 act_rate /= 100; 3781 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3782 max_allow_rate /= 100; 3783 if (act_rate < max_allow_rate) { 3784 /* 3785 * Here the rate we are actually pacing at 3786 * is smaller than 10% above our last measurement. 3787 * This means we are pacing below what we would 3788 * like to try to achieve (plus some wiggle room). 3789 */ 3790 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3791 __LINE__, 9); 3792 return (1); 3793 } else { 3794 /* 3795 * Here we are already pacing at least rack_max_per_above(10%) 3796 * what we are getting back. This indicates most likely 3797 * that we are being limited (cwnd/rwnd/app) and can't 3798 * get any more b/w. There is no sense of trying to 3799 * raise up the pacing rate its not speeding us up 3800 * and we already are pacing faster than we are getting. 3801 */ 3802 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3803 __LINE__, 8); 3804 return (0); 3805 } 3806 } 3807 3808 static void 3809 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3810 { 3811 /* 3812 * When we drag bottom, we want to assure 3813 * that no multiplier is below 1.0, if so 3814 * we want to restore it to at least that. 3815 */ 3816 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3817 /* This is unlikely we usually do not touch recovery */ 3818 rack->r_ctl.rack_per_of_gp_rec = 100; 3819 } 3820 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3821 rack->r_ctl.rack_per_of_gp_ca = 100; 3822 } 3823 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3824 rack->r_ctl.rack_per_of_gp_ss = 100; 3825 } 3826 } 3827 3828 static void 3829 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3830 { 3831 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3832 rack->r_ctl.rack_per_of_gp_ca = 100; 3833 } 3834 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3835 rack->r_ctl.rack_per_of_gp_ss = 100; 3836 } 3837 } 3838 3839 static void 3840 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3841 { 3842 int32_t calc, logged, plus; 3843 3844 logged = 0; 3845 3846 if (rack->rc_skip_timely) 3847 return; 3848 if (override) { 3849 /* 3850 * override is passed when we are 3851 * loosing b/w and making one last 3852 * gasp at trying to not loose out 3853 * to a new-reno flow. 3854 */ 3855 goto extra_boost; 3856 } 3857 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3858 if (rack->rc_gp_incr && 3859 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3860 /* 3861 * Reset and get 5 strokes more before the boost. Note 3862 * that the count is 0 based so we have to add one. 3863 */ 3864 extra_boost: 3865 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3866 rack->rc_gp_timely_inc_cnt = 0; 3867 } else 3868 plus = (uint32_t)rack_gp_increase_per; 3869 /* Must be at least 1% increase for true timely increases */ 3870 if ((plus < 1) && 3871 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3872 plus = 1; 3873 if (rack->rc_gp_saw_rec && 3874 (rack->rc_gp_no_rec_chg == 0) && 3875 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3876 rack->r_ctl.rack_per_of_gp_rec)) { 3877 /* We have been in recovery ding it too */ 3878 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3879 if (calc > 0xffff) 3880 calc = 0xffff; 3881 logged |= 1; 3882 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3883 if (rack->r_ctl.rack_per_upper_bound_ca && 3884 (rack->rc_dragged_bottom == 0) && 3885 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3886 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3887 } 3888 if (rack->rc_gp_saw_ca && 3889 (rack->rc_gp_saw_ss == 0) && 3890 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3891 rack->r_ctl.rack_per_of_gp_ca)) { 3892 /* In CA */ 3893 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3894 if (calc > 0xffff) 3895 calc = 0xffff; 3896 logged |= 2; 3897 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3898 if (rack->r_ctl.rack_per_upper_bound_ca && 3899 (rack->rc_dragged_bottom == 0) && 3900 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3901 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3902 } 3903 if (rack->rc_gp_saw_ss && 3904 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3905 rack->r_ctl.rack_per_of_gp_ss)) { 3906 /* In SS */ 3907 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3908 if (calc > 0xffff) 3909 calc = 0xffff; 3910 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3911 if (rack->r_ctl.rack_per_upper_bound_ss && 3912 (rack->rc_dragged_bottom == 0) && 3913 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3914 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3915 logged |= 4; 3916 } 3917 if (logged && 3918 (rack->rc_gp_incr == 0)){ 3919 /* Go into increment mode */ 3920 rack->rc_gp_incr = 1; 3921 rack->rc_gp_timely_inc_cnt = 0; 3922 } 3923 if (rack->rc_gp_incr && 3924 logged && 3925 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3926 rack->rc_gp_timely_inc_cnt++; 3927 } 3928 rack_log_timely(rack, logged, plus, 0, 0, 3929 __LINE__, 1); 3930 } 3931 3932 static uint32_t 3933 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3934 { 3935 /*- 3936 * norm_grad = rtt_diff / minrtt; 3937 * new_per = curper * (1 - B * norm_grad) 3938 * 3939 * B = rack_gp_decrease_per (default 80%) 3940 * rtt_dif = input var current rtt-diff 3941 * curper = input var current percentage 3942 * minrtt = from rack filter 3943 * 3944 * In order to do the floating point calculations above we 3945 * do an integer conversion. The code looks confusing so let me 3946 * translate it into something that use more variables and 3947 * is clearer for us humans :) 3948 * 3949 * uint64_t norm_grad, inverse, reduce_by, final_result; 3950 * uint32_t perf; 3951 * 3952 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 3953 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 3954 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 3955 * inverse /= 1000000; 3956 * reduce_by = (1000000 - inverse); 3957 * final_result = (cur_per * reduce_by) / 1000000; 3958 * perf = (uint32_t)final_result; 3959 */ 3960 uint64_t perf; 3961 3962 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3963 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3964 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3965 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3966 (uint64_t)1000000)) / 3967 (uint64_t)1000000); 3968 if (perf > curper) { 3969 /* TSNH */ 3970 perf = curper - 1; 3971 } 3972 return ((uint32_t)perf); 3973 } 3974 3975 static uint32_t 3976 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3977 { 3978 /* 3979 * highrttthresh 3980 * result = curper * (1 - (B * ( 1 - ------ )) 3981 * gp_srtt 3982 * 3983 * B = rack_gp_decrease_per (default .8 i.e. 80) 3984 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3985 */ 3986 uint64_t perf; 3987 uint32_t highrttthresh; 3988 3989 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3990 3991 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3992 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3993 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3994 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3995 if (tcp_bblogging_on(rack->rc_tp)) { 3996 uint64_t log1; 3997 3998 log1 = rtt; 3999 log1 <<= 32; 4000 log1 |= highrttthresh; 4001 rack_log_timely(rack, 4002 rack_gp_decrease_per, 4003 (uint64_t)curper, 4004 log1, 4005 perf, 4006 __LINE__, 4007 15); 4008 } 4009 return (perf); 4010 } 4011 4012 static void 4013 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 4014 { 4015 uint64_t logvar, logvar2, logvar3; 4016 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 4017 4018 if (rack->rc_skip_timely) 4019 return; 4020 if (rack->rc_gp_incr) { 4021 /* Turn off increment counting */ 4022 rack->rc_gp_incr = 0; 4023 rack->rc_gp_timely_inc_cnt = 0; 4024 } 4025 ss_red = ca_red = rec_red = 0; 4026 logged = 0; 4027 /* Calculate the reduction value */ 4028 if (rtt_diff < 0) { 4029 rtt_diff *= -1; 4030 } 4031 /* Must be at least 1% reduction */ 4032 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4033 /* We have been in recovery ding it too */ 4034 if (timely_says == 2) { 4035 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4036 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4037 if (alt < new_per) 4038 val = alt; 4039 else 4040 val = new_per; 4041 } else 4042 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4043 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4044 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4045 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4046 } else { 4047 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4048 rec_red = 0; 4049 } 4050 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4051 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4052 logged |= 1; 4053 } 4054 if (rack->rc_gp_saw_ss) { 4055 /* Sent in SS */ 4056 if (timely_says == 2) { 4057 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4058 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4059 if (alt < new_per) 4060 val = alt; 4061 else 4062 val = new_per; 4063 } else 4064 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4065 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4066 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4067 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4068 } else { 4069 ss_red = new_per; 4070 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4071 logvar = new_per; 4072 logvar <<= 32; 4073 logvar |= alt; 4074 logvar2 = (uint32_t)rtt; 4075 logvar2 <<= 32; 4076 logvar2 |= (uint32_t)rtt_diff; 4077 logvar3 = rack_gp_rtt_maxmul; 4078 logvar3 <<= 32; 4079 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4080 rack_log_timely(rack, timely_says, 4081 logvar2, logvar3, 4082 logvar, __LINE__, 10); 4083 } 4084 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4085 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4086 logged |= 4; 4087 } else if (rack->rc_gp_saw_ca) { 4088 /* Sent in CA */ 4089 if (timely_says == 2) { 4090 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4091 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4092 if (alt < new_per) 4093 val = alt; 4094 else 4095 val = new_per; 4096 } else 4097 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4098 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4099 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4100 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4101 } else { 4102 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4103 ca_red = 0; 4104 logvar = new_per; 4105 logvar <<= 32; 4106 logvar |= alt; 4107 logvar2 = (uint32_t)rtt; 4108 logvar2 <<= 32; 4109 logvar2 |= (uint32_t)rtt_diff; 4110 logvar3 = rack_gp_rtt_maxmul; 4111 logvar3 <<= 32; 4112 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4113 rack_log_timely(rack, timely_says, 4114 logvar2, logvar3, 4115 logvar, __LINE__, 10); 4116 } 4117 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4118 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4119 logged |= 2; 4120 } 4121 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4122 rack->rc_gp_timely_dec_cnt++; 4123 if (rack_timely_dec_clear && 4124 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4125 rack->rc_gp_timely_dec_cnt = 0; 4126 } 4127 logvar = ss_red; 4128 logvar <<= 32; 4129 logvar |= ca_red; 4130 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4131 __LINE__, 2); 4132 } 4133 4134 static void 4135 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4136 uint32_t rtt, uint32_t line, uint8_t reas) 4137 { 4138 if (tcp_bblogging_on(rack->rc_tp)) { 4139 union tcp_log_stackspecific log; 4140 struct timeval tv; 4141 4142 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4143 log.u_bbr.flex1 = line; 4144 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4145 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4146 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4147 log.u_bbr.flex5 = rtt; 4148 log.u_bbr.flex6 = rack->rc_highly_buffered; 4149 log.u_bbr.flex6 <<= 1; 4150 log.u_bbr.flex6 |= rack->forced_ack; 4151 log.u_bbr.flex6 <<= 1; 4152 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4153 log.u_bbr.flex6 <<= 1; 4154 log.u_bbr.flex6 |= rack->in_probe_rtt; 4155 log.u_bbr.flex6 <<= 1; 4156 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4157 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4158 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4159 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4160 log.u_bbr.flex8 = reas; 4161 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4162 log.u_bbr.delRate = rack_get_bw(rack); 4163 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4164 log.u_bbr.cur_del_rate <<= 32; 4165 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4166 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4167 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4168 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4169 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4170 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4171 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4172 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4173 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4174 log.u_bbr.rttProp = us_cts; 4175 log.u_bbr.rttProp <<= 32; 4176 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4177 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4178 &rack->rc_inp->inp_socket->so_rcv, 4179 &rack->rc_inp->inp_socket->so_snd, 4180 BBR_LOG_RTT_SHRINKS, 0, 4181 0, &log, false, &rack->r_ctl.act_rcv_time); 4182 } 4183 } 4184 4185 static void 4186 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4187 { 4188 uint64_t bwdp; 4189 4190 bwdp = rack_get_bw(rack); 4191 bwdp *= (uint64_t)rtt; 4192 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4193 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4194 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4195 /* 4196 * A window protocol must be able to have 4 packets 4197 * outstanding as the floor in order to function 4198 * (especially considering delayed ack :D). 4199 */ 4200 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4201 } 4202 } 4203 4204 static void 4205 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4206 { 4207 /** 4208 * ProbeRTT is a bit different in rack_pacing than in 4209 * BBR. It is like BBR in that it uses the lowering of 4210 * the RTT as a signal that we saw something new and 4211 * counts from there for how long between. But it is 4212 * different in that its quite simple. It does not 4213 * play with the cwnd and wait until we get down 4214 * to N segments outstanding and hold that for 4215 * 200ms. Instead it just sets the pacing reduction 4216 * rate to a set percentage (70 by default) and hold 4217 * that for a number of recent GP Srtt's. 4218 */ 4219 uint32_t segsiz; 4220 4221 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4222 if (rack->rc_gp_dyn_mul == 0) 4223 return; 4224 4225 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4226 /* We are idle */ 4227 return; 4228 } 4229 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4230 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4231 /* 4232 * Stop the goodput now, the idea here is 4233 * that future measurements with in_probe_rtt 4234 * won't register if they are not greater so 4235 * we want to get what info (if any) is available 4236 * now. 4237 */ 4238 rack_do_goodput_measurement(rack->rc_tp, rack, 4239 rack->rc_tp->snd_una, __LINE__, 4240 RACK_QUALITY_PROBERTT); 4241 } 4242 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4243 rack->r_ctl.rc_time_probertt_entered = us_cts; 4244 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4245 rack->r_ctl.rc_pace_min_segs); 4246 rack->in_probe_rtt = 1; 4247 rack->measure_saw_probe_rtt = 1; 4248 rack->r_ctl.rc_time_probertt_starts = 0; 4249 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4250 if (rack_probertt_use_min_rtt_entry) 4251 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4252 else 4253 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4254 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4255 __LINE__, RACK_RTTS_ENTERPROBE); 4256 } 4257 4258 static void 4259 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4260 { 4261 struct rack_sendmap *rsm; 4262 uint32_t segsiz; 4263 4264 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4265 rack->r_ctl.rc_pace_min_segs); 4266 rack->in_probe_rtt = 0; 4267 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4268 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4269 /* 4270 * Stop the goodput now, the idea here is 4271 * that future measurements with in_probe_rtt 4272 * won't register if they are not greater so 4273 * we want to get what info (if any) is available 4274 * now. 4275 */ 4276 rack_do_goodput_measurement(rack->rc_tp, rack, 4277 rack->rc_tp->snd_una, __LINE__, 4278 RACK_QUALITY_PROBERTT); 4279 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4280 /* 4281 * We don't have enough data to make a measurement. 4282 * So lets just stop and start here after exiting 4283 * probe-rtt. We probably are not interested in 4284 * the results anyway. 4285 */ 4286 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4287 } 4288 /* 4289 * Measurements through the current snd_max are going 4290 * to be limited by the slower pacing rate. 4291 * 4292 * We need to mark these as app-limited so we 4293 * don't collapse the b/w. 4294 */ 4295 rsm = tqhash_max(rack->r_ctl.tqh); 4296 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4297 if (rack->r_ctl.rc_app_limited_cnt == 0) 4298 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4299 else { 4300 /* 4301 * Go out to the end app limited and mark 4302 * this new one as next and move the end_appl up 4303 * to this guy. 4304 */ 4305 if (rack->r_ctl.rc_end_appl) 4306 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4307 rack->r_ctl.rc_end_appl = rsm; 4308 } 4309 rsm->r_flags |= RACK_APP_LIMITED; 4310 rack->r_ctl.rc_app_limited_cnt++; 4311 } 4312 /* 4313 * Now, we need to examine our pacing rate multipliers. 4314 * If its under 100%, we need to kick it back up to 4315 * 100%. We also don't let it be over our "max" above 4316 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4317 * Note setting clamp_atexit_prtt to 0 has the effect 4318 * of setting CA/SS to 100% always at exit (which is 4319 * the default behavior). 4320 */ 4321 if (rack_probertt_clear_is) { 4322 rack->rc_gp_incr = 0; 4323 rack->rc_gp_bwred = 0; 4324 rack->rc_gp_timely_inc_cnt = 0; 4325 rack->rc_gp_timely_dec_cnt = 0; 4326 } 4327 /* Do we do any clamping at exit? */ 4328 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4329 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4330 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4331 } 4332 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4333 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4334 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4335 } 4336 /* 4337 * Lets set rtt_diff to 0, so that we will get a "boost" 4338 * after exiting. 4339 */ 4340 rack->r_ctl.rc_rtt_diff = 0; 4341 4342 /* Clear all flags so we start fresh */ 4343 rack->rc_tp->t_bytes_acked = 0; 4344 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4345 /* 4346 * If configured to, set the cwnd and ssthresh to 4347 * our targets. 4348 */ 4349 if (rack_probe_rtt_sets_cwnd) { 4350 uint64_t ebdp; 4351 uint32_t setto; 4352 4353 /* Set ssthresh so we get into CA once we hit our target */ 4354 if (rack_probertt_use_min_rtt_exit == 1) { 4355 /* Set to min rtt */ 4356 rack_set_prtt_target(rack, segsiz, 4357 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4358 } else if (rack_probertt_use_min_rtt_exit == 2) { 4359 /* Set to current gp rtt */ 4360 rack_set_prtt_target(rack, segsiz, 4361 rack->r_ctl.rc_gp_srtt); 4362 } else if (rack_probertt_use_min_rtt_exit == 3) { 4363 /* Set to entry gp rtt */ 4364 rack_set_prtt_target(rack, segsiz, 4365 rack->r_ctl.rc_entry_gp_rtt); 4366 } else { 4367 uint64_t sum; 4368 uint32_t setval; 4369 4370 sum = rack->r_ctl.rc_entry_gp_rtt; 4371 sum *= 10; 4372 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4373 if (sum >= 20) { 4374 /* 4375 * A highly buffered path needs 4376 * cwnd space for timely to work. 4377 * Lets set things up as if 4378 * we are heading back here again. 4379 */ 4380 setval = rack->r_ctl.rc_entry_gp_rtt; 4381 } else if (sum >= 15) { 4382 /* 4383 * Lets take the smaller of the 4384 * two since we are just somewhat 4385 * buffered. 4386 */ 4387 setval = rack->r_ctl.rc_gp_srtt; 4388 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4389 setval = rack->r_ctl.rc_entry_gp_rtt; 4390 } else { 4391 /* 4392 * Here we are not highly buffered 4393 * and should pick the min we can to 4394 * keep from causing loss. 4395 */ 4396 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4397 } 4398 rack_set_prtt_target(rack, segsiz, 4399 setval); 4400 } 4401 if (rack_probe_rtt_sets_cwnd > 1) { 4402 /* There is a percentage here to boost */ 4403 ebdp = rack->r_ctl.rc_target_probertt_flight; 4404 ebdp *= rack_probe_rtt_sets_cwnd; 4405 ebdp /= 100; 4406 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4407 } else 4408 setto = rack->r_ctl.rc_target_probertt_flight; 4409 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4410 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4411 /* Enforce a min */ 4412 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4413 } 4414 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4415 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4416 } 4417 rack_log_rtt_shrinks(rack, us_cts, 4418 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4419 __LINE__, RACK_RTTS_EXITPROBE); 4420 /* Clear times last so log has all the info */ 4421 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4422 rack->r_ctl.rc_time_probertt_entered = us_cts; 4423 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4424 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4425 } 4426 4427 static void 4428 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4429 { 4430 /* Check in on probe-rtt */ 4431 4432 if (rack->rc_gp_filled == 0) { 4433 /* We do not do p-rtt unless we have gp measurements */ 4434 return; 4435 } 4436 if (rack->in_probe_rtt) { 4437 uint64_t no_overflow; 4438 uint32_t endtime, must_stay; 4439 4440 if (rack->r_ctl.rc_went_idle_time && 4441 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4442 /* 4443 * We went idle during prtt, just exit now. 4444 */ 4445 rack_exit_probertt(rack, us_cts); 4446 } else if (rack_probe_rtt_safety_val && 4447 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4448 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4449 /* 4450 * Probe RTT safety value triggered! 4451 */ 4452 rack_log_rtt_shrinks(rack, us_cts, 4453 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4454 __LINE__, RACK_RTTS_SAFETY); 4455 rack_exit_probertt(rack, us_cts); 4456 } 4457 /* Calculate the max we will wait */ 4458 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4459 if (rack->rc_highly_buffered) 4460 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4461 /* Calculate the min we must wait */ 4462 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4463 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4464 TSTMP_LT(us_cts, endtime)) { 4465 uint32_t calc; 4466 /* Do we lower more? */ 4467 no_exit: 4468 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4469 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4470 else 4471 calc = 0; 4472 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4473 if (calc) { 4474 /* Maybe */ 4475 calc *= rack_per_of_gp_probertt_reduce; 4476 if (calc > rack_per_of_gp_probertt) 4477 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4478 else 4479 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4480 /* Limit it too */ 4481 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4482 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4483 } 4484 /* We must reach target or the time set */ 4485 return; 4486 } 4487 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4488 if ((TSTMP_LT(us_cts, must_stay) && 4489 rack->rc_highly_buffered) || 4490 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4491 rack->r_ctl.rc_target_probertt_flight)) { 4492 /* We are not past the must_stay time */ 4493 goto no_exit; 4494 } 4495 rack_log_rtt_shrinks(rack, us_cts, 4496 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4497 __LINE__, RACK_RTTS_REACHTARGET); 4498 rack->r_ctl.rc_time_probertt_starts = us_cts; 4499 if (rack->r_ctl.rc_time_probertt_starts == 0) 4500 rack->r_ctl.rc_time_probertt_starts = 1; 4501 /* Restore back to our rate we want to pace at in prtt */ 4502 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4503 } 4504 /* 4505 * Setup our end time, some number of gp_srtts plus 200ms. 4506 */ 4507 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4508 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4509 if (rack_probertt_gpsrtt_cnt_div) 4510 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4511 else 4512 endtime = 0; 4513 endtime += rack_min_probertt_hold; 4514 endtime += rack->r_ctl.rc_time_probertt_starts; 4515 if (TSTMP_GEQ(us_cts, endtime)) { 4516 /* yes, exit probertt */ 4517 rack_exit_probertt(rack, us_cts); 4518 } 4519 4520 } else if ((rack->rc_skip_timely == 0) && 4521 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4522 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4523 /* Go into probertt, its been too long since we went lower */ 4524 rack_enter_probertt(rack, us_cts); 4525 } 4526 } 4527 4528 static void 4529 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4530 uint32_t rtt, int32_t rtt_diff) 4531 { 4532 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4533 uint32_t losses; 4534 4535 if ((rack->rc_gp_dyn_mul == 0) || 4536 (rack->use_fixed_rate) || 4537 (rack->in_probe_rtt) || 4538 (rack->rc_always_pace == 0)) { 4539 /* No dynamic GP multiplier in play */ 4540 return; 4541 } 4542 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4543 cur_bw = rack_get_bw(rack); 4544 /* Calculate our up and down range */ 4545 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4546 up_bnd /= 100; 4547 up_bnd += rack->r_ctl.last_gp_comp_bw; 4548 4549 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4550 subfr /= 100; 4551 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4552 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4553 /* 4554 * This is the case where our RTT is above 4555 * the max target and we have been configured 4556 * to just do timely no bonus up stuff in that case. 4557 * 4558 * There are two configurations, set to 1, and we 4559 * just do timely if we are over our max. If its 4560 * set above 1 then we slam the multipliers down 4561 * to 100 and then decrement per timely. 4562 */ 4563 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4564 __LINE__, 3); 4565 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4566 rack_validate_multipliers_at_or_below_100(rack); 4567 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4568 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4569 /* 4570 * We are decreasing this is a bit complicated this 4571 * means we are loosing ground. This could be 4572 * because another flow entered and we are competing 4573 * for b/w with it. This will push the RTT up which 4574 * makes timely unusable unless we want to get shoved 4575 * into a corner and just be backed off (the age 4576 * old problem with delay based CC). 4577 * 4578 * On the other hand if it was a route change we 4579 * would like to stay somewhat contained and not 4580 * blow out the buffers. 4581 */ 4582 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4583 __LINE__, 3); 4584 rack->r_ctl.last_gp_comp_bw = cur_bw; 4585 if (rack->rc_gp_bwred == 0) { 4586 /* Go into reduction counting */ 4587 rack->rc_gp_bwred = 1; 4588 rack->rc_gp_timely_dec_cnt = 0; 4589 } 4590 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4591 /* 4592 * Push another time with a faster pacing 4593 * to try to gain back (we include override to 4594 * get a full raise factor). 4595 */ 4596 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4597 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4598 (timely_says == 0) || 4599 (rack_down_raise_thresh == 0)) { 4600 /* 4601 * Do an override up in b/w if we were 4602 * below the threshold or if the threshold 4603 * is zero we always do the raise. 4604 */ 4605 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4606 } else { 4607 /* Log it stays the same */ 4608 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4609 __LINE__, 11); 4610 } 4611 rack->rc_gp_timely_dec_cnt++; 4612 /* We are not incrementing really no-count */ 4613 rack->rc_gp_incr = 0; 4614 rack->rc_gp_timely_inc_cnt = 0; 4615 } else { 4616 /* 4617 * Lets just use the RTT 4618 * information and give up 4619 * pushing. 4620 */ 4621 goto use_timely; 4622 } 4623 } else if ((timely_says != 2) && 4624 !losses && 4625 (last_bw_est > up_bnd)) { 4626 /* 4627 * We are increasing b/w lets keep going, updating 4628 * our b/w and ignoring any timely input, unless 4629 * of course we are at our max raise (if there is one). 4630 */ 4631 4632 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4633 __LINE__, 3); 4634 rack->r_ctl.last_gp_comp_bw = cur_bw; 4635 if (rack->rc_gp_saw_ss && 4636 rack->r_ctl.rack_per_upper_bound_ss && 4637 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4638 /* 4639 * In cases where we can't go higher 4640 * we should just use timely. 4641 */ 4642 goto use_timely; 4643 } 4644 if (rack->rc_gp_saw_ca && 4645 rack->r_ctl.rack_per_upper_bound_ca && 4646 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4647 /* 4648 * In cases where we can't go higher 4649 * we should just use timely. 4650 */ 4651 goto use_timely; 4652 } 4653 rack->rc_gp_bwred = 0; 4654 rack->rc_gp_timely_dec_cnt = 0; 4655 /* You get a set number of pushes if timely is trying to reduce */ 4656 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4657 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4658 } else { 4659 /* Log it stays the same */ 4660 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4661 __LINE__, 12); 4662 } 4663 return; 4664 } else { 4665 /* 4666 * We are staying between the lower and upper range bounds 4667 * so use timely to decide. 4668 */ 4669 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4670 __LINE__, 3); 4671 use_timely: 4672 if (timely_says) { 4673 rack->rc_gp_incr = 0; 4674 rack->rc_gp_timely_inc_cnt = 0; 4675 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4676 !losses && 4677 (last_bw_est < low_bnd)) { 4678 /* We are loosing ground */ 4679 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4680 rack->rc_gp_timely_dec_cnt++; 4681 /* We are not incrementing really no-count */ 4682 rack->rc_gp_incr = 0; 4683 rack->rc_gp_timely_inc_cnt = 0; 4684 } else 4685 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4686 } else { 4687 rack->rc_gp_bwred = 0; 4688 rack->rc_gp_timely_dec_cnt = 0; 4689 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4690 } 4691 } 4692 } 4693 4694 static int32_t 4695 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4696 { 4697 int32_t timely_says; 4698 uint64_t log_mult, log_rtt_a_diff; 4699 4700 log_rtt_a_diff = rtt; 4701 log_rtt_a_diff <<= 32; 4702 log_rtt_a_diff |= (uint32_t)rtt_diff; 4703 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4704 rack_gp_rtt_maxmul)) { 4705 /* Reduce the b/w multiplier */ 4706 timely_says = 2; 4707 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4708 log_mult <<= 32; 4709 log_mult |= prev_rtt; 4710 rack_log_timely(rack, timely_says, log_mult, 4711 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4712 log_rtt_a_diff, __LINE__, 4); 4713 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4714 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4715 max(rack_gp_rtt_mindiv , 1)))) { 4716 /* Increase the b/w multiplier */ 4717 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4718 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4719 max(rack_gp_rtt_mindiv , 1)); 4720 log_mult <<= 32; 4721 log_mult |= prev_rtt; 4722 timely_says = 0; 4723 rack_log_timely(rack, timely_says, log_mult , 4724 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4725 log_rtt_a_diff, __LINE__, 5); 4726 } else { 4727 /* 4728 * Use a gradient to find it the timely gradient 4729 * is: 4730 * grad = rc_rtt_diff / min_rtt; 4731 * 4732 * anything below or equal to 0 will be 4733 * a increase indication. Anything above 4734 * zero is a decrease. Note we take care 4735 * of the actual gradient calculation 4736 * in the reduction (its not needed for 4737 * increase). 4738 */ 4739 log_mult = prev_rtt; 4740 if (rtt_diff <= 0) { 4741 /* 4742 * Rttdiff is less than zero, increase the 4743 * b/w multiplier (its 0 or negative) 4744 */ 4745 timely_says = 0; 4746 rack_log_timely(rack, timely_says, log_mult, 4747 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4748 } else { 4749 /* Reduce the b/w multiplier */ 4750 timely_says = 1; 4751 rack_log_timely(rack, timely_says, log_mult, 4752 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4753 } 4754 } 4755 return (timely_says); 4756 } 4757 4758 static __inline int 4759 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4760 { 4761 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4762 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4763 /** 4764 * This covers the case that the 4765 * resent is completely inside 4766 * the gp range or up to it. 4767 * |----------------| 4768 * |-----| <or> 4769 * |----| 4770 * <or> |---| 4771 */ 4772 return (1); 4773 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4774 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4775 /** 4776 * This covers the case of 4777 * |--------------| 4778 * |-------->| 4779 */ 4780 return (1); 4781 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4782 SEQ_LT(rsm->r_start, tp->gput_ack) && 4783 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4784 4785 /** 4786 * This covers the case of 4787 * |--------------| 4788 * |-------->| 4789 */ 4790 return (1); 4791 } 4792 return (0); 4793 } 4794 4795 static __inline void 4796 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4797 { 4798 4799 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4800 return; 4801 /* 4802 * We have a Goodput measurement in progress. Mark 4803 * the send if its within the window. If its not 4804 * in the window make sure it does not have the mark. 4805 */ 4806 if (rack_in_gp_window(tp, rsm)) 4807 rsm->r_flags |= RACK_IN_GP_WIN; 4808 else 4809 rsm->r_flags &= ~RACK_IN_GP_WIN; 4810 } 4811 4812 static __inline void 4813 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4814 { 4815 /* A GP measurement is ending, clear all marks on the send map*/ 4816 struct rack_sendmap *rsm = NULL; 4817 4818 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4819 if (rsm == NULL) { 4820 rsm = tqhash_min(rack->r_ctl.tqh); 4821 } 4822 /* Nothing left? */ 4823 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4824 rsm->r_flags &= ~RACK_IN_GP_WIN; 4825 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4826 } 4827 } 4828 4829 4830 static __inline void 4831 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4832 { 4833 struct rack_sendmap *rsm = NULL; 4834 4835 if (tp->snd_una == tp->snd_max) { 4836 /* Nothing outstanding yet, nothing to do here */ 4837 return; 4838 } 4839 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4840 /* 4841 * We are measuring ahead of some outstanding 4842 * data. We need to walk through up until we get 4843 * to gp_seq marking so that no rsm is set incorrectly 4844 * with RACK_IN_GP_WIN. 4845 */ 4846 rsm = tqhash_min(rack->r_ctl.tqh); 4847 while (rsm != NULL) { 4848 rack_mark_in_gp_win(tp, rsm); 4849 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4850 break; 4851 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4852 } 4853 } 4854 if (rsm == NULL) { 4855 /* 4856 * Need to find the GP seq, if rsm is 4857 * set we stopped as we hit it. 4858 */ 4859 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4860 if (rsm == NULL) 4861 return; 4862 rack_mark_in_gp_win(tp, rsm); 4863 } 4864 /* 4865 * Now we may need to mark already sent rsm, ahead of 4866 * gput_seq in the window since they may have been sent 4867 * *before* we started our measurment. The rsm, if non-null 4868 * has been marked (note if rsm would have been NULL we would have 4869 * returned in the previous block). So we go to the next, and continue 4870 * until we run out of entries or we exceed the gp_ack value. 4871 */ 4872 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4873 while (rsm) { 4874 rack_mark_in_gp_win(tp, rsm); 4875 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4876 break; 4877 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4878 } 4879 } 4880 4881 static void 4882 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 4883 { 4884 if (tcp_bblogging_on(rack->rc_tp)) { 4885 union tcp_log_stackspecific log; 4886 struct timeval tv; 4887 4888 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4889 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4890 log.u_bbr.flex1 = add_part; 4891 log.u_bbr.flex2 = sub_part; 4892 log.u_bbr.flex3 = rack_wma_divisor; 4893 log.u_bbr.flex4 = srtt; 4894 log.u_bbr.flex7 = (uint16_t)line; 4895 log.u_bbr.flex8 = meth; 4896 log.u_bbr.delRate = rack->r_ctl.gp_bw; 4897 log.u_bbr.cur_del_rate = meas_bw; 4898 log.u_bbr.rttProp = utim; 4899 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4900 &rack->rc_inp->inp_socket->so_rcv, 4901 &rack->rc_inp->inp_socket->so_snd, 4902 BBR_LOG_THRESH_CALC, 0, 4903 0, &log, false, &rack->r_ctl.act_rcv_time); 4904 } 4905 } 4906 4907 static void 4908 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4909 tcp_seq th_ack, int line, uint8_t quality) 4910 { 4911 uint64_t tim, bytes_ps, stim, utim; 4912 uint32_t segsiz, bytes, reqbytes, us_cts; 4913 int32_t gput, new_rtt_diff, timely_says; 4914 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4915 int did_add = 0; 4916 4917 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4918 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4919 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4920 tim = us_cts - tp->gput_ts; 4921 else 4922 tim = 0; 4923 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4924 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4925 else 4926 stim = 0; 4927 /* 4928 * Use the larger of the send time or ack time. This prevents us 4929 * from being influenced by ack artifacts to come up with too 4930 * high of measurement. Note that since we are spanning over many more 4931 * bytes in most of our measurements hopefully that is less likely to 4932 * occur. 4933 */ 4934 if (tim > stim) 4935 utim = max(tim, 1); 4936 else 4937 utim = max(stim, 1); 4938 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4939 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 4940 if ((tim == 0) && (stim == 0)) { 4941 /* 4942 * Invalid measurement time, maybe 4943 * all on one ack/one send? 4944 */ 4945 bytes = 0; 4946 bytes_ps = 0; 4947 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4948 0, 0, 0, 10, __LINE__, NULL, quality); 4949 goto skip_measurement; 4950 } 4951 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4952 /* We never made a us_rtt measurement? */ 4953 bytes = 0; 4954 bytes_ps = 0; 4955 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4956 0, 0, 0, 10, __LINE__, NULL, quality); 4957 goto skip_measurement; 4958 } 4959 /* 4960 * Calculate the maximum possible b/w this connection 4961 * could have. We base our calculation on the lowest 4962 * rtt we have seen during the measurement and the 4963 * largest rwnd the client has given us in that time. This 4964 * forms a BDP that is the maximum that we could ever 4965 * get to the client. Anything larger is not valid. 4966 * 4967 * I originally had code here that rejected measurements 4968 * where the time was less than 1/2 the latest us_rtt. 4969 * But after thinking on that I realized its wrong since 4970 * say you had a 150Mbps or even 1Gbps link, and you 4971 * were a long way away.. example I am in Europe (100ms rtt) 4972 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4973 * bytes my time would be 1.2ms, and yet my rtt would say 4974 * the measurement was invalid the time was < 50ms. The 4975 * same thing is true for 150Mb (8ms of time). 4976 * 4977 * A better way I realized is to look at what the maximum 4978 * the connection could possibly do. This is gated on 4979 * the lowest RTT we have seen and the highest rwnd. 4980 * We should in theory never exceed that, if we are 4981 * then something on the path is storing up packets 4982 * and then feeding them all at once to our endpoint 4983 * messing up our measurement. 4984 */ 4985 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4986 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4987 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4988 if (SEQ_LT(th_ack, tp->gput_seq)) { 4989 /* No measurement can be made */ 4990 bytes = 0; 4991 bytes_ps = 0; 4992 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4993 0, 0, 0, 10, __LINE__, NULL, quality); 4994 goto skip_measurement; 4995 } else 4996 bytes = (th_ack - tp->gput_seq); 4997 bytes_ps = (uint64_t)bytes; 4998 /* 4999 * Don't measure a b/w for pacing unless we have gotten at least 5000 * an initial windows worth of data in this measurement interval. 5001 * 5002 * Small numbers of bytes get badly influenced by delayed ack and 5003 * other artifacts. Note we take the initial window or our 5004 * defined minimum GP (defaulting to 10 which hopefully is the 5005 * IW). 5006 */ 5007 if (rack->rc_gp_filled == 0) { 5008 /* 5009 * The initial estimate is special. We 5010 * have blasted out an IW worth of packets 5011 * without a real valid ack ts results. We 5012 * then setup the app_limited_needs_set flag, 5013 * this should get the first ack in (probably 2 5014 * MSS worth) to be recorded as the timestamp. 5015 * We thus allow a smaller number of bytes i.e. 5016 * IW - 2MSS. 5017 */ 5018 reqbytes -= (2 * segsiz); 5019 /* Also lets fill previous for our first measurement to be neutral */ 5020 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5021 } 5022 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 5023 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5024 rack->r_ctl.rc_app_limited_cnt, 5025 0, 0, 10, __LINE__, NULL, quality); 5026 goto skip_measurement; 5027 } 5028 /* 5029 * We now need to calculate the Timely like status so 5030 * we can update (possibly) the b/w multipliers. 5031 */ 5032 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5033 if (rack->rc_gp_filled == 0) { 5034 /* No previous reading */ 5035 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5036 } else { 5037 if (rack->measure_saw_probe_rtt == 0) { 5038 /* 5039 * We don't want a probertt to be counted 5040 * since it will be negative incorrectly. We 5041 * expect to be reducing the RTT when we 5042 * pace at a slower rate. 5043 */ 5044 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5045 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5046 } 5047 } 5048 timely_says = rack_make_timely_judgement(rack, 5049 rack->r_ctl.rc_gp_srtt, 5050 rack->r_ctl.rc_rtt_diff, 5051 rack->r_ctl.rc_prev_gp_srtt 5052 ); 5053 bytes_ps *= HPTS_USEC_IN_SEC; 5054 bytes_ps /= utim; 5055 if (bytes_ps > rack->r_ctl.last_max_bw) { 5056 /* 5057 * Something is on path playing 5058 * since this b/w is not possible based 5059 * on our BDP (highest rwnd and lowest rtt 5060 * we saw in the measurement window). 5061 * 5062 * Another option here would be to 5063 * instead skip the measurement. 5064 */ 5065 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5066 bytes_ps, rack->r_ctl.last_max_bw, 0, 5067 11, __LINE__, NULL, quality); 5068 bytes_ps = rack->r_ctl.last_max_bw; 5069 } 5070 /* We store gp for b/w in bytes per second */ 5071 if (rack->rc_gp_filled == 0) { 5072 /* Initial measurement */ 5073 if (bytes_ps) { 5074 rack->r_ctl.gp_bw = bytes_ps; 5075 rack->rc_gp_filled = 1; 5076 rack->r_ctl.num_measurements = 1; 5077 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5078 } else { 5079 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5080 rack->r_ctl.rc_app_limited_cnt, 5081 0, 0, 10, __LINE__, NULL, quality); 5082 } 5083 if (tcp_in_hpts(rack->rc_tp) && 5084 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5085 /* 5086 * Ok we can't trust the pacer in this case 5087 * where we transition from un-paced to paced. 5088 * Or for that matter when the burst mitigation 5089 * was making a wild guess and got it wrong. 5090 * Stop the pacer and clear up all the aggregate 5091 * delays etc. 5092 */ 5093 tcp_hpts_remove(rack->rc_tp); 5094 rack->r_ctl.rc_hpts_flags = 0; 5095 rack->r_ctl.rc_last_output_to = 0; 5096 } 5097 did_add = 2; 5098 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5099 /* Still a small number run an average */ 5100 rack->r_ctl.gp_bw += bytes_ps; 5101 addpart = rack->r_ctl.num_measurements; 5102 rack->r_ctl.num_measurements++; 5103 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5104 /* We have collected enough to move forward */ 5105 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5106 } 5107 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5108 did_add = 3; 5109 } else { 5110 /* 5111 * We want to take 1/wma of the goodput and add in to 7/8th 5112 * of the old value weighted by the srtt. So if your measurement 5113 * period is say 2 SRTT's long you would get 1/4 as the 5114 * value, if it was like 1/2 SRTT then you would get 1/16th. 5115 * 5116 * But we must be careful not to take too much i.e. if the 5117 * srtt is say 20ms and the measurement is taken over 5118 * 400ms our weight would be 400/20 i.e. 20. On the 5119 * other hand if we get a measurement over 1ms with a 5120 * 10ms rtt we only want to take a much smaller portion. 5121 */ 5122 uint8_t meth; 5123 5124 if (rack->r_ctl.num_measurements < 0xff) { 5125 rack->r_ctl.num_measurements++; 5126 } 5127 srtt = (uint64_t)tp->t_srtt; 5128 if (srtt == 0) { 5129 /* 5130 * Strange why did t_srtt go back to zero? 5131 */ 5132 if (rack->r_ctl.rc_rack_min_rtt) 5133 srtt = rack->r_ctl.rc_rack_min_rtt; 5134 else 5135 srtt = HPTS_USEC_IN_MSEC; 5136 } 5137 /* 5138 * XXXrrs: Note for reviewers, in playing with 5139 * dynamic pacing I discovered this GP calculation 5140 * as done originally leads to some undesired results. 5141 * Basically you can get longer measurements contributing 5142 * too much to the WMA. Thus I changed it if you are doing 5143 * dynamic adjustments to only do the aportioned adjustment 5144 * if we have a very small (time wise) measurement. Longer 5145 * measurements just get there weight (defaulting to 1/8) 5146 * add to the WMA. We may want to think about changing 5147 * this to always do that for both sides i.e. dynamic 5148 * and non-dynamic... but considering lots of folks 5149 * were playing with this I did not want to change the 5150 * calculation per.se. without your thoughts.. Lawerence? 5151 * Peter?? 5152 */ 5153 if (rack->rc_gp_dyn_mul == 0) { 5154 subpart = rack->r_ctl.gp_bw * utim; 5155 subpart /= (srtt * 8); 5156 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5157 /* 5158 * The b/w update takes no more 5159 * away then 1/2 our running total 5160 * so factor it in. 5161 */ 5162 addpart = bytes_ps * utim; 5163 addpart /= (srtt * 8); 5164 meth = 1; 5165 } else { 5166 /* 5167 * Don't allow a single measurement 5168 * to account for more than 1/2 of the 5169 * WMA. This could happen on a retransmission 5170 * where utim becomes huge compared to 5171 * srtt (multiple retransmissions when using 5172 * the sending rate which factors in all the 5173 * transmissions from the first one). 5174 */ 5175 subpart = rack->r_ctl.gp_bw / 2; 5176 addpart = bytes_ps / 2; 5177 meth = 2; 5178 } 5179 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5180 resid_bw = rack->r_ctl.gp_bw - subpart; 5181 rack->r_ctl.gp_bw = resid_bw + addpart; 5182 did_add = 1; 5183 } else { 5184 if ((utim / srtt) <= 1) { 5185 /* 5186 * The b/w update was over a small period 5187 * of time. The idea here is to prevent a small 5188 * measurement time period from counting 5189 * too much. So we scale it based on the 5190 * time so it attributes less than 1/rack_wma_divisor 5191 * of its measurement. 5192 */ 5193 subpart = rack->r_ctl.gp_bw * utim; 5194 subpart /= (srtt * rack_wma_divisor); 5195 addpart = bytes_ps * utim; 5196 addpart /= (srtt * rack_wma_divisor); 5197 meth = 3; 5198 } else { 5199 /* 5200 * The scaled measurement was long 5201 * enough so lets just add in the 5202 * portion of the measurement i.e. 1/rack_wma_divisor 5203 */ 5204 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5205 addpart = bytes_ps / rack_wma_divisor; 5206 meth = 4; 5207 } 5208 if ((rack->measure_saw_probe_rtt == 0) || 5209 (bytes_ps > rack->r_ctl.gp_bw)) { 5210 /* 5211 * For probe-rtt we only add it in 5212 * if its larger, all others we just 5213 * add in. 5214 */ 5215 did_add = 1; 5216 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5217 resid_bw = rack->r_ctl.gp_bw - subpart; 5218 rack->r_ctl.gp_bw = resid_bw + addpart; 5219 } 5220 } 5221 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5222 } 5223 /* 5224 * We only watch the growth of the GP during the initial startup 5225 * or first-slowstart that ensues. If we ever needed to watch 5226 * growth of gp outside of that period all we need to do is 5227 * remove the first clause of this if (rc_initial_ss_comp). 5228 */ 5229 if ((rack->rc_initial_ss_comp == 0) && 5230 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5231 uint64_t gp_est; 5232 5233 gp_est = bytes_ps; 5234 if (tcp_bblogging_on(rack->rc_tp)) { 5235 union tcp_log_stackspecific log; 5236 struct timeval tv; 5237 5238 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5239 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5240 log.u_bbr.flex1 = rack->r_ctl.current_round; 5241 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5242 log.u_bbr.delRate = gp_est; 5243 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5244 log.u_bbr.flex8 = 41; 5245 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5246 0, &log, false, NULL, __func__, __LINE__,&tv); 5247 } 5248 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5249 (rack->r_ctl.last_gpest == 0)) { 5250 /* 5251 * The round we get our measurement averaging going 5252 * is the base round so it always is the source point 5253 * for when we had our first increment. From there on 5254 * we only record the round that had a rise. 5255 */ 5256 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5257 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5258 } else if (gp_est >= rack->r_ctl.last_gpest) { 5259 /* 5260 * Test to see if its gone up enough 5261 * to set the round count up to now. Note 5262 * that on the seeding of the 4th measurement we 5263 */ 5264 gp_est *= 1000; 5265 gp_est /= rack->r_ctl.last_gpest; 5266 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5267 /* 5268 * We went up enough to record the round. 5269 */ 5270 if (tcp_bblogging_on(rack->rc_tp)) { 5271 union tcp_log_stackspecific log; 5272 struct timeval tv; 5273 5274 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5275 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5276 log.u_bbr.flex1 = rack->r_ctl.current_round; 5277 log.u_bbr.flex2 = (uint32_t)gp_est; 5278 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5279 log.u_bbr.delRate = gp_est; 5280 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5281 log.u_bbr.flex8 = 42; 5282 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5283 0, &log, false, NULL, __func__, __LINE__,&tv); 5284 } 5285 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5286 if (rack->r_ctl.use_gp_not_last == 1) 5287 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5288 else 5289 rack->r_ctl.last_gpest = bytes_ps; 5290 } 5291 } 5292 } 5293 if ((rack->gp_ready == 0) && 5294 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5295 /* We have enough measurements now */ 5296 rack->gp_ready = 1; 5297 if (rack->dgp_on || 5298 rack->rack_hibeta) 5299 rack_set_cc_pacing(rack); 5300 if (rack->defer_options) 5301 rack_apply_deferred_options(rack); 5302 } 5303 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5304 rack_get_bw(rack), 22, did_add, NULL, quality); 5305 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5306 5307 if ((rack->measure_saw_probe_rtt == 0) && 5308 rack->rc_gp_rtt_set) { 5309 if (rack->rc_skip_timely == 0) { 5310 rack_update_multiplier(rack, timely_says, bytes_ps, 5311 rack->r_ctl.rc_gp_srtt, 5312 rack->r_ctl.rc_rtt_diff); 5313 } 5314 } 5315 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5316 rack_get_bw(rack), 3, line, NULL, quality); 5317 rack_log_pacing_delay_calc(rack, 5318 bytes, /* flex2 */ 5319 tim, /* flex1 */ 5320 bytes_ps, /* bw_inuse */ 5321 rack->r_ctl.gp_bw, /* delRate */ 5322 rack_get_lt_bw(rack), /* rttProp */ 5323 20, line, NULL, 0); 5324 /* reset the gp srtt and setup the new prev */ 5325 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5326 /* Record the lost count for the next measurement */ 5327 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5328 skip_measurement: 5329 /* 5330 * We restart our diffs based on the gpsrtt in the 5331 * measurement window. 5332 */ 5333 rack->rc_gp_rtt_set = 0; 5334 rack->rc_gp_saw_rec = 0; 5335 rack->rc_gp_saw_ca = 0; 5336 rack->rc_gp_saw_ss = 0; 5337 rack->rc_dragged_bottom = 0; 5338 if (quality == RACK_QUALITY_HIGH) { 5339 /* 5340 * Gput in the stats world is in kbps where bytes_ps is 5341 * bytes per second so we do ((x * 8)/ 1000). 5342 */ 5343 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5344 #ifdef STATS 5345 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5346 gput); 5347 /* 5348 * XXXLAS: This is a temporary hack, and should be 5349 * chained off VOI_TCP_GPUT when stats(9) grows an 5350 * API to deal with chained VOIs. 5351 */ 5352 if (tp->t_stats_gput_prev > 0) 5353 stats_voi_update_abs_s32(tp->t_stats, 5354 VOI_TCP_GPUT_ND, 5355 ((gput - tp->t_stats_gput_prev) * 100) / 5356 tp->t_stats_gput_prev); 5357 #endif 5358 tp->t_stats_gput_prev = gput; 5359 } 5360 tp->t_flags &= ~TF_GPUTINPROG; 5361 /* 5362 * Now are we app limited now and there is space from where we 5363 * were to where we want to go? 5364 * 5365 * We don't do the other case i.e. non-applimited here since 5366 * the next send will trigger us picking up the missing data. 5367 */ 5368 if (rack->r_ctl.rc_first_appl && 5369 TCPS_HAVEESTABLISHED(tp->t_state) && 5370 rack->r_ctl.rc_app_limited_cnt && 5371 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5372 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5373 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5374 /* 5375 * Yep there is enough outstanding to make a measurement here. 5376 */ 5377 struct rack_sendmap *rsm; 5378 5379 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5380 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5381 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5382 rack->app_limited_needs_set = 0; 5383 tp->gput_seq = th_ack; 5384 if (rack->in_probe_rtt) 5385 rack->measure_saw_probe_rtt = 1; 5386 else if ((rack->measure_saw_probe_rtt) && 5387 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5388 rack->measure_saw_probe_rtt = 0; 5389 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5390 /* There is a full window to gain info from */ 5391 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5392 } else { 5393 /* We can only measure up to the applimited point */ 5394 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5395 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5396 /* 5397 * We don't have enough to make a measurement. 5398 */ 5399 tp->t_flags &= ~TF_GPUTINPROG; 5400 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5401 0, 0, 0, 6, __LINE__, NULL, quality); 5402 return; 5403 } 5404 } 5405 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5406 /* 5407 * We will get no more data into the SB 5408 * this means we need to have the data available 5409 * before we start a measurement. 5410 */ 5411 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5412 /* Nope not enough data. */ 5413 return; 5414 } 5415 } 5416 tp->t_flags |= TF_GPUTINPROG; 5417 /* 5418 * Now we need to find the timestamp of the send at tp->gput_seq 5419 * for the send based measurement. 5420 */ 5421 rack->r_ctl.rc_gp_cumack_ts = 0; 5422 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5423 if (rsm) { 5424 /* Ok send-based limit is set */ 5425 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5426 /* 5427 * Move back to include the earlier part 5428 * so our ack time lines up right (this may 5429 * make an overlapping measurement but thats 5430 * ok). 5431 */ 5432 tp->gput_seq = rsm->r_start; 5433 } 5434 if (rsm->r_flags & RACK_ACKED) { 5435 struct rack_sendmap *nrsm; 5436 5437 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5438 tp->gput_seq = rsm->r_end; 5439 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5440 if (nrsm) 5441 rsm = nrsm; 5442 else { 5443 rack->app_limited_needs_set = 1; 5444 } 5445 } else 5446 rack->app_limited_needs_set = 1; 5447 /* We always go from the first send */ 5448 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5449 } else { 5450 /* 5451 * If we don't find the rsm due to some 5452 * send-limit set the current time, which 5453 * basically disables the send-limit. 5454 */ 5455 struct timeval tv; 5456 5457 microuptime(&tv); 5458 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5459 } 5460 rack_tend_gp_marks(tp, rack); 5461 rack_log_pacing_delay_calc(rack, 5462 tp->gput_seq, 5463 tp->gput_ack, 5464 (uintptr_t)rsm, 5465 tp->gput_ts, 5466 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5467 9, 5468 __LINE__, rsm, quality); 5469 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5470 } else { 5471 /* 5472 * To make sure proper timestamp merging occurs, we need to clear 5473 * all GP marks if we don't start a measurement. 5474 */ 5475 rack_clear_gp_marks(tp, rack); 5476 } 5477 } 5478 5479 /* 5480 * CC wrapper hook functions 5481 */ 5482 static void 5483 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5484 uint16_t type, int32_t post_recovery) 5485 { 5486 uint32_t prior_cwnd, acked; 5487 struct tcp_log_buffer *lgb = NULL; 5488 uint8_t labc_to_use, quality; 5489 5490 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5491 tp->t_ccv.nsegs = nsegs; 5492 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5493 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5494 uint32_t max; 5495 5496 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5497 if (tp->t_ccv.bytes_this_ack > max) { 5498 tp->t_ccv.bytes_this_ack = max; 5499 } 5500 } 5501 #ifdef STATS 5502 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5503 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5504 #endif 5505 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5506 /* 5507 * We will ack all the data, time to end any 5508 * lt_bw_up we have running until something 5509 * new is sent. Note we need to use the actual 5510 * ack_rcv_time which with pacing may be different. 5511 */ 5512 uint64_t tmark; 5513 5514 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5515 rack->r_ctl.lt_seq = tp->snd_max; 5516 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 5517 if (tmark >= rack->r_ctl.lt_timemark) { 5518 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5519 } 5520 rack->r_ctl.lt_timemark = tmark; 5521 rack->lt_bw_up = 0; 5522 } 5523 quality = RACK_QUALITY_NONE; 5524 if ((tp->t_flags & TF_GPUTINPROG) && 5525 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5526 /* Measure the Goodput */ 5527 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5528 } 5529 /* Which way our we limited, if not cwnd limited no advance in CA */ 5530 if (tp->snd_cwnd <= tp->snd_wnd) 5531 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5532 else 5533 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5534 if (tp->snd_cwnd > tp->snd_ssthresh) { 5535 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5536 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5537 /* For the setting of a window past use the actual scwnd we are using */ 5538 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5539 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5540 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5541 } 5542 } else { 5543 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5544 tp->t_bytes_acked = 0; 5545 } 5546 prior_cwnd = tp->snd_cwnd; 5547 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5548 (rack_client_low_buf && rack->client_bufferlvl && 5549 (rack->client_bufferlvl < rack_client_low_buf))) 5550 labc_to_use = rack->rc_labc; 5551 else 5552 labc_to_use = rack_max_abc_post_recovery; 5553 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5554 union tcp_log_stackspecific log; 5555 struct timeval tv; 5556 5557 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5558 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5559 log.u_bbr.flex1 = th_ack; 5560 log.u_bbr.flex2 = tp->t_ccv.flags; 5561 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5562 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5563 log.u_bbr.flex5 = labc_to_use; 5564 log.u_bbr.flex6 = prior_cwnd; 5565 log.u_bbr.flex7 = V_tcp_do_newsack; 5566 log.u_bbr.flex8 = 1; 5567 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5568 0, &log, false, NULL, __func__, __LINE__,&tv); 5569 } 5570 if (CC_ALGO(tp)->ack_received != NULL) { 5571 /* XXXLAS: Find a way to live without this */ 5572 tp->t_ccv.curack = th_ack; 5573 tp->t_ccv.labc = labc_to_use; 5574 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5575 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5576 } 5577 if (lgb) { 5578 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5579 } 5580 if (rack->r_must_retran) { 5581 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5582 /* 5583 * We now are beyond the rxt point so lets disable 5584 * the flag. 5585 */ 5586 rack->r_ctl.rc_out_at_rto = 0; 5587 rack->r_must_retran = 0; 5588 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5589 /* 5590 * Only decrement the rc_out_at_rto if the cwnd advances 5591 * at least a whole segment. Otherwise next time the peer 5592 * acks, we won't be able to send this generaly happens 5593 * when we are in Congestion Avoidance. 5594 */ 5595 if (acked <= rack->r_ctl.rc_out_at_rto){ 5596 rack->r_ctl.rc_out_at_rto -= acked; 5597 } else { 5598 rack->r_ctl.rc_out_at_rto = 0; 5599 } 5600 } 5601 } 5602 #ifdef STATS 5603 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5604 #endif 5605 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5606 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5607 } 5608 if ((rack->rc_initial_ss_comp == 0) && 5609 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5610 /* 5611 * The cwnd has grown beyond ssthresh we have 5612 * entered ca and completed our first Slowstart. 5613 */ 5614 rack->rc_initial_ss_comp = 1; 5615 } 5616 } 5617 5618 static void 5619 tcp_rack_partialack(struct tcpcb *tp) 5620 { 5621 struct tcp_rack *rack; 5622 5623 rack = (struct tcp_rack *)tp->t_fb_ptr; 5624 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5625 /* 5626 * If we are doing PRR and have enough 5627 * room to send <or> we are pacing and prr 5628 * is disabled we will want to see if we 5629 * can send data (by setting r_wanted_output to 5630 * true). 5631 */ 5632 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5633 rack->rack_no_prr) 5634 rack->r_wanted_output = 1; 5635 } 5636 5637 static void 5638 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 5639 { 5640 /* 5641 * Now exit recovery. 5642 */ 5643 EXIT_RECOVERY(tp->t_flags); 5644 } 5645 5646 static void 5647 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 5648 { 5649 struct tcp_rack *rack; 5650 uint32_t orig_cwnd; 5651 5652 orig_cwnd = tp->snd_cwnd; 5653 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5654 rack = (struct tcp_rack *)tp->t_fb_ptr; 5655 /* only alert CC if we alerted when we entered */ 5656 if (CC_ALGO(tp)->post_recovery != NULL) { 5657 tp->t_ccv.curack = th_ack; 5658 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 5659 if (tp->snd_cwnd < tp->snd_ssthresh) { 5660 /* 5661 * Rack has burst control and pacing 5662 * so lets not set this any lower than 5663 * snd_ssthresh per RFC-6582 (option 2). 5664 */ 5665 tp->snd_cwnd = tp->snd_ssthresh; 5666 } 5667 } 5668 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5669 union tcp_log_stackspecific log; 5670 struct timeval tv; 5671 5672 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5673 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5674 log.u_bbr.flex1 = th_ack; 5675 log.u_bbr.flex2 = tp->t_ccv.flags; 5676 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5677 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5678 log.u_bbr.flex5 = V_tcp_abc_l_var; 5679 log.u_bbr.flex6 = orig_cwnd; 5680 log.u_bbr.flex7 = V_tcp_do_newsack; 5681 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 5682 log.u_bbr.flex8 = 2; 5683 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5684 0, &log, false, NULL, __func__, __LINE__, &tv); 5685 } 5686 if ((rack->rack_no_prr == 0) && 5687 (rack->no_prr_addback == 0) && 5688 (rack->r_ctl.rc_prr_sndcnt > 0)) { 5689 /* 5690 * Suck the next prr cnt back into cwnd, but 5691 * only do that if we are not application limited. 5692 */ 5693 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 5694 /* 5695 * We are allowed to add back to the cwnd the amount we did 5696 * not get out if: 5697 * a) no_prr_addback is off. 5698 * b) we are not app limited 5699 * c) we are doing prr 5700 * <and> 5701 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 5702 */ 5703 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 5704 rack->r_ctl.rc_prr_sndcnt); 5705 } 5706 rack->r_ctl.rc_prr_sndcnt = 0; 5707 rack_log_to_prr(rack, 1, 0, __LINE__); 5708 } 5709 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 5710 tp->snd_recover = tp->snd_una; 5711 if (rack->r_ctl.dsack_persist) { 5712 rack->r_ctl.dsack_persist--; 5713 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 5714 rack->r_ctl.num_dsack = 0; 5715 } 5716 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 5717 } 5718 if (rack->rto_from_rec == 1) { 5719 rack->rto_from_rec = 0; 5720 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 5721 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 5722 } 5723 rack_exit_recovery(tp, rack, 1); 5724 } 5725 5726 static void 5727 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 5728 { 5729 struct tcp_rack *rack; 5730 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 5731 5732 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5733 #ifdef STATS 5734 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 5735 #endif 5736 if (IN_RECOVERY(tp->t_flags) == 0) { 5737 in_rec_at_entry = 0; 5738 ssthresh_enter = tp->snd_ssthresh; 5739 cwnd_enter = tp->snd_cwnd; 5740 } else 5741 in_rec_at_entry = 1; 5742 rack = (struct tcp_rack *)tp->t_fb_ptr; 5743 switch (type) { 5744 case CC_NDUPACK: 5745 tp->t_flags &= ~TF_WASFRECOVERY; 5746 tp->t_flags &= ~TF_WASCRECOVERY; 5747 if (!IN_FASTRECOVERY(tp->t_flags)) { 5748 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 5749 if (rack->rc_initial_ss_comp == 0) { 5750 /* Yep it is the end of the initial slowstart */ 5751 rack->rc_initial_ss_comp = 1; 5752 } 5753 rack->r_ctl.rc_prr_delivered = 0; 5754 rack->r_ctl.rc_prr_out = 0; 5755 rack->r_fast_output = 0; 5756 if (rack->rack_no_prr == 0) { 5757 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5758 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 5759 } 5760 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 5761 tp->snd_recover = tp->snd_max; 5762 if (tp->t_flags2 & TF2_ECN_PERMIT) 5763 tp->t_flags2 |= TF2_ECN_SND_CWR; 5764 } 5765 break; 5766 case CC_ECN: 5767 if (!IN_CONGRECOVERY(tp->t_flags) || 5768 /* 5769 * Allow ECN reaction on ACK to CWR, if 5770 * that data segment was also CE marked. 5771 */ 5772 SEQ_GEQ(ack, tp->snd_recover)) { 5773 EXIT_CONGRECOVERY(tp->t_flags); 5774 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5775 rack->r_fast_output = 0; 5776 tp->snd_recover = tp->snd_max + 1; 5777 if (tp->t_flags2 & TF2_ECN_PERMIT) 5778 tp->t_flags2 |= TF2_ECN_SND_CWR; 5779 } 5780 break; 5781 case CC_RTO: 5782 tp->t_dupacks = 0; 5783 tp->t_bytes_acked = 0; 5784 rack->r_fast_output = 0; 5785 if (IN_RECOVERY(tp->t_flags)) 5786 rack_exit_recovery(tp, rack, 2); 5787 orig_cwnd = tp->snd_cwnd; 5788 rack_log_to_prr(rack, 16, orig_cwnd, line); 5789 if (CC_ALGO(tp)->cong_signal == NULL) { 5790 /* TSNH */ 5791 tp->snd_ssthresh = max(2, 5792 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5793 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5794 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5795 } 5796 if (tp->t_flags2 & TF2_ECN_PERMIT) 5797 tp->t_flags2 |= TF2_ECN_SND_CWR; 5798 break; 5799 case CC_RTO_ERR: 5800 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5801 /* RTO was unnecessary, so reset everything. */ 5802 tp->snd_cwnd = tp->snd_cwnd_prev; 5803 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5804 tp->snd_recover = tp->snd_recover_prev; 5805 if (tp->t_flags & TF_WASFRECOVERY) { 5806 ENTER_FASTRECOVERY(tp->t_flags); 5807 tp->t_flags &= ~TF_WASFRECOVERY; 5808 } 5809 if (tp->t_flags & TF_WASCRECOVERY) { 5810 ENTER_CONGRECOVERY(tp->t_flags); 5811 tp->t_flags &= ~TF_WASCRECOVERY; 5812 } 5813 tp->snd_nxt = tp->snd_max; 5814 tp->t_badrxtwin = 0; 5815 break; 5816 } 5817 if ((CC_ALGO(tp)->cong_signal != NULL) && 5818 (type != CC_RTO)){ 5819 tp->t_ccv.curack = ack; 5820 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 5821 } 5822 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5823 rack_log_to_prr(rack, 15, cwnd_enter, line); 5824 rack->r_ctl.dsack_byte_cnt = 0; 5825 rack->r_ctl.retran_during_recovery = 0; 5826 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5827 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5828 rack->r_ent_rec_ns = 1; 5829 } 5830 } 5831 5832 static inline void 5833 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5834 { 5835 uint32_t i_cwnd; 5836 5837 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5838 5839 if (CC_ALGO(tp)->after_idle != NULL) 5840 CC_ALGO(tp)->after_idle(&tp->t_ccv); 5841 5842 if (tp->snd_cwnd == 1) 5843 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5844 else 5845 i_cwnd = rc_init_window(rack); 5846 5847 /* 5848 * Being idle is no different than the initial window. If the cc 5849 * clamps it down below the initial window raise it to the initial 5850 * window. 5851 */ 5852 if (tp->snd_cwnd < i_cwnd) { 5853 tp->snd_cwnd = i_cwnd; 5854 } 5855 } 5856 5857 /* 5858 * Indicate whether this ack should be delayed. We can delay the ack if 5859 * following conditions are met: 5860 * - There is no delayed ack timer in progress. 5861 * - Our last ack wasn't a 0-sized window. We never want to delay 5862 * the ack that opens up a 0-sized window. 5863 * - LRO wasn't used for this segment. We make sure by checking that the 5864 * segment size is not larger than the MSS. 5865 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5866 * connection. 5867 */ 5868 #define DELAY_ACK(tp, tlen) \ 5869 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5870 ((tp->t_flags & TF_DELACK) == 0) && \ 5871 (tlen <= tp->t_maxseg) && \ 5872 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5873 5874 static struct rack_sendmap * 5875 rack_find_lowest_rsm(struct tcp_rack *rack) 5876 { 5877 struct rack_sendmap *rsm; 5878 5879 /* 5880 * Walk the time-order transmitted list looking for an rsm that is 5881 * not acked. This will be the one that was sent the longest time 5882 * ago that is still outstanding. 5883 */ 5884 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5885 if (rsm->r_flags & RACK_ACKED) { 5886 continue; 5887 } 5888 goto finish; 5889 } 5890 finish: 5891 return (rsm); 5892 } 5893 5894 static struct rack_sendmap * 5895 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5896 { 5897 struct rack_sendmap *prsm; 5898 5899 /* 5900 * Walk the sequence order list backward until we hit and arrive at 5901 * the highest seq not acked. In theory when this is called it 5902 * should be the last segment (which it was not). 5903 */ 5904 prsm = rsm; 5905 5906 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 5907 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5908 continue; 5909 } 5910 return (prsm); 5911 } 5912 return (NULL); 5913 } 5914 5915 static uint32_t 5916 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 5917 { 5918 int32_t lro; 5919 uint32_t thresh; 5920 5921 /* 5922 * lro is the flag we use to determine if we have seen reordering. 5923 * If it gets set we have seen reordering. The reorder logic either 5924 * works in one of two ways: 5925 * 5926 * If reorder-fade is configured, then we track the last time we saw 5927 * re-ordering occur. If we reach the point where enough time as 5928 * passed we no longer consider reordering has occuring. 5929 * 5930 * Or if reorder-face is 0, then once we see reordering we consider 5931 * the connection to alway be subject to reordering and just set lro 5932 * to 1. 5933 * 5934 * In the end if lro is non-zero we add the extra time for 5935 * reordering in. 5936 */ 5937 if (srtt == 0) 5938 srtt = 1; 5939 if (rack->r_ctl.rc_reorder_ts) { 5940 if (rack->r_ctl.rc_reorder_fade) { 5941 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5942 lro = cts - rack->r_ctl.rc_reorder_ts; 5943 if (lro == 0) { 5944 /* 5945 * No time as passed since the last 5946 * reorder, mark it as reordering. 5947 */ 5948 lro = 1; 5949 } 5950 } else { 5951 /* Negative time? */ 5952 lro = 0; 5953 } 5954 if (lro > rack->r_ctl.rc_reorder_fade) { 5955 /* Turn off reordering seen too */ 5956 rack->r_ctl.rc_reorder_ts = 0; 5957 lro = 0; 5958 } 5959 } else { 5960 /* Reodering does not fade */ 5961 lro = 1; 5962 } 5963 } else { 5964 lro = 0; 5965 } 5966 if (rack->rc_rack_tmr_std_based == 0) { 5967 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5968 } else { 5969 /* Standards based pkt-delay is 1/4 srtt */ 5970 thresh = srtt + (srtt >> 2); 5971 } 5972 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5973 /* It must be set, if not you get 1/4 rtt */ 5974 if (rack->r_ctl.rc_reorder_shift) 5975 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5976 else 5977 thresh += (srtt >> 2); 5978 } 5979 if (rack->rc_rack_use_dsack && 5980 lro && 5981 (rack->r_ctl.num_dsack > 0)) { 5982 /* 5983 * We only increase the reordering window if we 5984 * have seen reordering <and> we have a DSACK count. 5985 */ 5986 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5987 if (log_allowed) 5988 rack_log_dsack_event(rack, 4, line, srtt, thresh); 5989 } 5990 /* SRTT * 2 is the ceiling */ 5991 if (thresh > (srtt * 2)) { 5992 thresh = srtt * 2; 5993 } 5994 /* And we don't want it above the RTO max either */ 5995 if (thresh > rack_rto_max) { 5996 thresh = rack_rto_max; 5997 } 5998 if (log_allowed) 5999 rack_log_dsack_event(rack, 6, line, srtt, thresh); 6000 return (thresh); 6001 } 6002 6003 static uint32_t 6004 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 6005 struct rack_sendmap *rsm, uint32_t srtt) 6006 { 6007 struct rack_sendmap *prsm; 6008 uint32_t thresh, len; 6009 int segsiz; 6010 6011 if (srtt == 0) 6012 srtt = 1; 6013 if (rack->r_ctl.rc_tlp_threshold) 6014 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 6015 else 6016 thresh = (srtt * 2); 6017 6018 /* Get the previous sent packet, if any */ 6019 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6020 len = rsm->r_end - rsm->r_start; 6021 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6022 /* Exactly like the ID */ 6023 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6024 uint32_t alt_thresh; 6025 /* 6026 * Compensate for delayed-ack with the d-ack time. 6027 */ 6028 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6029 if (alt_thresh > thresh) 6030 thresh = alt_thresh; 6031 } 6032 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6033 /* 2.1 behavior */ 6034 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6035 if (prsm && (len <= segsiz)) { 6036 /* 6037 * Two packets outstanding, thresh should be (2*srtt) + 6038 * possible inter-packet delay (if any). 6039 */ 6040 uint32_t inter_gap = 0; 6041 int idx, nidx; 6042 6043 idx = rsm->r_rtr_cnt - 1; 6044 nidx = prsm->r_rtr_cnt - 1; 6045 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6046 /* Yes it was sent later (or at the same time) */ 6047 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6048 } 6049 thresh += inter_gap; 6050 } else if (len <= segsiz) { 6051 /* 6052 * Possibly compensate for delayed-ack. 6053 */ 6054 uint32_t alt_thresh; 6055 6056 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6057 if (alt_thresh > thresh) 6058 thresh = alt_thresh; 6059 } 6060 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6061 /* 2.2 behavior */ 6062 if (len <= segsiz) { 6063 uint32_t alt_thresh; 6064 /* 6065 * Compensate for delayed-ack with the d-ack time. 6066 */ 6067 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6068 if (alt_thresh > thresh) 6069 thresh = alt_thresh; 6070 } 6071 } 6072 /* Not above an RTO */ 6073 if (thresh > tp->t_rxtcur) { 6074 thresh = tp->t_rxtcur; 6075 } 6076 /* Not above a RTO max */ 6077 if (thresh > rack_rto_max) { 6078 thresh = rack_rto_max; 6079 } 6080 /* Apply user supplied min TLP */ 6081 if (thresh < rack_tlp_min) { 6082 thresh = rack_tlp_min; 6083 } 6084 return (thresh); 6085 } 6086 6087 static uint32_t 6088 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6089 { 6090 /* 6091 * We want the rack_rtt which is the 6092 * last rtt we measured. However if that 6093 * does not exist we fallback to the srtt (which 6094 * we probably will never do) and then as a last 6095 * resort we use RACK_INITIAL_RTO if no srtt is 6096 * yet set. 6097 */ 6098 if (rack->rc_rack_rtt) 6099 return (rack->rc_rack_rtt); 6100 else if (tp->t_srtt == 0) 6101 return (RACK_INITIAL_RTO); 6102 return (tp->t_srtt); 6103 } 6104 6105 static struct rack_sendmap * 6106 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6107 { 6108 /* 6109 * Check to see that we don't need to fall into recovery. We will 6110 * need to do so if our oldest transmit is past the time we should 6111 * have had an ack. 6112 */ 6113 struct tcp_rack *rack; 6114 struct rack_sendmap *rsm; 6115 int32_t idx; 6116 uint32_t srtt, thresh; 6117 6118 rack = (struct tcp_rack *)tp->t_fb_ptr; 6119 if (tqhash_empty(rack->r_ctl.tqh)) { 6120 return (NULL); 6121 } 6122 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6123 if (rsm == NULL) 6124 return (NULL); 6125 6126 6127 if (rsm->r_flags & RACK_ACKED) { 6128 rsm = rack_find_lowest_rsm(rack); 6129 if (rsm == NULL) 6130 return (NULL); 6131 } 6132 idx = rsm->r_rtr_cnt - 1; 6133 srtt = rack_grab_rtt(tp, rack); 6134 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6135 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6136 return (NULL); 6137 } 6138 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6139 return (NULL); 6140 } 6141 /* Ok if we reach here we are over-due and this guy can be sent */ 6142 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6143 return (rsm); 6144 } 6145 6146 static uint32_t 6147 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6148 { 6149 int32_t t; 6150 int32_t tt; 6151 uint32_t ret_val; 6152 6153 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6154 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6155 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6156 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6157 ret_val = (uint32_t)tt; 6158 return (ret_val); 6159 } 6160 6161 static uint32_t 6162 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6163 { 6164 /* 6165 * Start the FR timer, we do this based on getting the first one in 6166 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6167 * events we need to stop the running timer (if its running) before 6168 * starting the new one. 6169 */ 6170 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6171 uint32_t srtt_cur; 6172 int32_t idx; 6173 int32_t is_tlp_timer = 0; 6174 struct rack_sendmap *rsm; 6175 6176 if (rack->t_timers_stopped) { 6177 /* All timers have been stopped none are to run */ 6178 return (0); 6179 } 6180 if (rack->rc_in_persist) { 6181 /* We can't start any timer in persists */ 6182 return (rack_get_persists_timer_val(tp, rack)); 6183 } 6184 rack->rc_on_min_to = 0; 6185 if ((tp->t_state < TCPS_ESTABLISHED) || 6186 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6187 goto activate_rxt; 6188 } 6189 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6190 if ((rsm == NULL) || sup_rack) { 6191 /* Nothing on the send map or no rack */ 6192 activate_rxt: 6193 time_since_sent = 0; 6194 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6195 if (rsm) { 6196 /* 6197 * Should we discount the RTX timer any? 6198 * 6199 * We want to discount it the smallest amount. 6200 * If a timer (Rack/TLP or RXT) has gone off more 6201 * recently thats the discount we want to use (now - timer time). 6202 * If the retransmit of the oldest packet was more recent then 6203 * we want to use that (now - oldest-packet-last_transmit_time). 6204 * 6205 */ 6206 idx = rsm->r_rtr_cnt - 1; 6207 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6208 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6209 else 6210 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6211 if (TSTMP_GT(cts, tstmp_touse)) 6212 time_since_sent = cts - tstmp_touse; 6213 } 6214 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6215 sbavail(&tptosocket(tp)->so_snd)) { 6216 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6217 to = tp->t_rxtcur; 6218 if (to > time_since_sent) 6219 to -= time_since_sent; 6220 else 6221 to = rack->r_ctl.rc_min_to; 6222 if (to == 0) 6223 to = 1; 6224 /* Special case for KEEPINIT */ 6225 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6226 (TP_KEEPINIT(tp) != 0) && 6227 rsm) { 6228 /* 6229 * We have to put a ceiling on the rxt timer 6230 * of the keep-init timeout. 6231 */ 6232 uint32_t max_time, red; 6233 6234 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6235 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6236 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6237 if (red < max_time) 6238 max_time -= red; 6239 else 6240 max_time = 1; 6241 } 6242 /* Reduce timeout to the keep value if needed */ 6243 if (max_time < to) 6244 to = max_time; 6245 } 6246 return (to); 6247 } 6248 return (0); 6249 } 6250 if (rsm->r_flags & RACK_ACKED) { 6251 rsm = rack_find_lowest_rsm(rack); 6252 if (rsm == NULL) { 6253 /* No lowest? */ 6254 goto activate_rxt; 6255 } 6256 } 6257 /* Convert from ms to usecs */ 6258 if ((rsm->r_flags & RACK_SACK_PASSED) || 6259 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6260 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6261 if ((tp->t_flags & TF_SENTFIN) && 6262 ((tp->snd_max - tp->snd_una) == 1) && 6263 (rsm->r_flags & RACK_HAS_FIN)) { 6264 /* 6265 * We don't start a rack timer if all we have is a 6266 * FIN outstanding. 6267 */ 6268 goto activate_rxt; 6269 } 6270 if ((rack->use_rack_rr == 0) && 6271 (IN_FASTRECOVERY(tp->t_flags)) && 6272 (rack->rack_no_prr == 0) && 6273 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6274 /* 6275 * We are not cheating, in recovery and 6276 * not enough ack's to yet get our next 6277 * retransmission out. 6278 * 6279 * Note that classified attackers do not 6280 * get to use the rack-cheat. 6281 */ 6282 goto activate_tlp; 6283 } 6284 srtt = rack_grab_rtt(tp, rack); 6285 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6286 idx = rsm->r_rtr_cnt - 1; 6287 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6288 if (SEQ_GEQ(exp, cts)) { 6289 to = exp - cts; 6290 if (to < rack->r_ctl.rc_min_to) { 6291 to = rack->r_ctl.rc_min_to; 6292 if (rack->r_rr_config == 3) 6293 rack->rc_on_min_to = 1; 6294 } 6295 } else { 6296 to = rack->r_ctl.rc_min_to; 6297 if (rack->r_rr_config == 3) 6298 rack->rc_on_min_to = 1; 6299 } 6300 } else { 6301 /* Ok we need to do a TLP not RACK */ 6302 activate_tlp: 6303 if ((rack->rc_tlp_in_progress != 0) && 6304 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6305 /* 6306 * The previous send was a TLP and we have sent 6307 * N TLP's without sending new data. 6308 */ 6309 goto activate_rxt; 6310 } 6311 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6312 if (rsm == NULL) { 6313 /* We found no rsm to TLP with. */ 6314 goto activate_rxt; 6315 } 6316 if (rsm->r_flags & RACK_HAS_FIN) { 6317 /* If its a FIN we dont do TLP */ 6318 rsm = NULL; 6319 goto activate_rxt; 6320 } 6321 idx = rsm->r_rtr_cnt - 1; 6322 time_since_sent = 0; 6323 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6324 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6325 else 6326 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6327 if (TSTMP_GT(cts, tstmp_touse)) 6328 time_since_sent = cts - tstmp_touse; 6329 is_tlp_timer = 1; 6330 if (tp->t_srtt) { 6331 if ((rack->rc_srtt_measure_made == 0) && 6332 (tp->t_srtt == 1)) { 6333 /* 6334 * If another stack as run and set srtt to 1, 6335 * then the srtt was 0, so lets use the initial. 6336 */ 6337 srtt = RACK_INITIAL_RTO; 6338 } else { 6339 srtt_cur = tp->t_srtt; 6340 srtt = srtt_cur; 6341 } 6342 } else 6343 srtt = RACK_INITIAL_RTO; 6344 /* 6345 * If the SRTT is not keeping up and the 6346 * rack RTT has spiked we want to use 6347 * the last RTT not the smoothed one. 6348 */ 6349 if (rack_tlp_use_greater && 6350 tp->t_srtt && 6351 (srtt < rack_grab_rtt(tp, rack))) { 6352 srtt = rack_grab_rtt(tp, rack); 6353 } 6354 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6355 if (thresh > time_since_sent) { 6356 to = thresh - time_since_sent; 6357 } else { 6358 to = rack->r_ctl.rc_min_to; 6359 rack_log_alt_to_to_cancel(rack, 6360 thresh, /* flex1 */ 6361 time_since_sent, /* flex2 */ 6362 tstmp_touse, /* flex3 */ 6363 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6364 (uint32_t)rsm->r_tim_lastsent[idx], 6365 srtt, 6366 idx, 99); 6367 } 6368 if (to < rack_tlp_min) { 6369 to = rack_tlp_min; 6370 } 6371 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 6372 /* 6373 * If the TLP time works out to larger than the max 6374 * RTO lets not do TLP.. just RTO. 6375 */ 6376 goto activate_rxt; 6377 } 6378 } 6379 if (is_tlp_timer == 0) { 6380 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6381 } else { 6382 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6383 } 6384 if (to == 0) 6385 to = 1; 6386 return (to); 6387 } 6388 6389 static void 6390 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6391 { 6392 if (rack->rc_in_persist == 0) { 6393 if (tp->t_flags & TF_GPUTINPROG) { 6394 /* 6395 * Stop the goodput now, the calling of the 6396 * measurement function clears the flag. 6397 */ 6398 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6399 RACK_QUALITY_PERSIST); 6400 } 6401 #ifdef NETFLIX_SHARED_CWND 6402 if (rack->r_ctl.rc_scw) { 6403 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6404 rack->rack_scwnd_is_idle = 1; 6405 } 6406 #endif 6407 rack->r_ctl.rc_went_idle_time = cts; 6408 if (rack->r_ctl.rc_went_idle_time == 0) 6409 rack->r_ctl.rc_went_idle_time = 1; 6410 if (rack->lt_bw_up) { 6411 /* Suspend our LT BW measurement */ 6412 uint64_t tmark; 6413 6414 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6415 rack->r_ctl.lt_seq = snd_una; 6416 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 6417 if (tmark >= rack->r_ctl.lt_timemark) { 6418 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6419 } 6420 rack->r_ctl.lt_timemark = tmark; 6421 rack->lt_bw_up = 0; 6422 rack->r_persist_lt_bw_off = 1; 6423 } 6424 rack_timer_cancel(tp, rack, cts, __LINE__); 6425 rack->r_ctl.persist_lost_ends = 0; 6426 rack->probe_not_answered = 0; 6427 rack->forced_ack = 0; 6428 tp->t_rxtshift = 0; 6429 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6430 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6431 rack->rc_in_persist = 1; 6432 } 6433 } 6434 6435 static void 6436 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6437 { 6438 if (tcp_in_hpts(rack->rc_tp)) { 6439 tcp_hpts_remove(rack->rc_tp); 6440 rack->r_ctl.rc_hpts_flags = 0; 6441 } 6442 #ifdef NETFLIX_SHARED_CWND 6443 if (rack->r_ctl.rc_scw) { 6444 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6445 rack->rack_scwnd_is_idle = 0; 6446 } 6447 #endif 6448 if (rack->rc_gp_dyn_mul && 6449 (rack->use_fixed_rate == 0) && 6450 (rack->rc_always_pace)) { 6451 /* 6452 * Do we count this as if a probe-rtt just 6453 * finished? 6454 */ 6455 uint32_t time_idle, idle_min; 6456 6457 time_idle = cts - rack->r_ctl.rc_went_idle_time; 6458 idle_min = rack_min_probertt_hold; 6459 if (rack_probertt_gpsrtt_cnt_div) { 6460 uint64_t extra; 6461 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 6462 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 6463 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 6464 idle_min += (uint32_t)extra; 6465 } 6466 if (time_idle >= idle_min) { 6467 /* Yes, we count it as a probe-rtt. */ 6468 uint32_t us_cts; 6469 6470 us_cts = tcp_get_usecs(NULL); 6471 if (rack->in_probe_rtt == 0) { 6472 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 6473 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 6474 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 6475 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 6476 } else { 6477 rack_exit_probertt(rack, us_cts); 6478 } 6479 } 6480 } 6481 if (rack->r_persist_lt_bw_off) { 6482 /* Continue where we left off */ 6483 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 6484 rack->lt_bw_up = 1; 6485 rack->r_persist_lt_bw_off = 0; 6486 } 6487 rack->rc_in_persist = 0; 6488 rack->r_ctl.rc_went_idle_time = 0; 6489 tp->t_rxtshift = 0; 6490 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6491 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6492 rack->r_ctl.rc_agg_delayed = 0; 6493 rack->r_early = 0; 6494 rack->r_late = 0; 6495 rack->r_ctl.rc_agg_early = 0; 6496 } 6497 6498 static void 6499 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 6500 struct hpts_diag *diag, struct timeval *tv) 6501 { 6502 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6503 union tcp_log_stackspecific log; 6504 6505 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6506 log.u_bbr.flex1 = diag->p_nxt_slot; 6507 log.u_bbr.flex2 = diag->p_cur_slot; 6508 log.u_bbr.flex3 = diag->slot_req; 6509 log.u_bbr.flex4 = diag->inp_hptsslot; 6510 log.u_bbr.flex5 = diag->slot_remaining; 6511 log.u_bbr.flex6 = diag->need_new_to; 6512 log.u_bbr.flex7 = diag->p_hpts_active; 6513 log.u_bbr.flex8 = diag->p_on_min_sleep; 6514 /* Hijack other fields as needed */ 6515 log.u_bbr.epoch = diag->have_slept; 6516 log.u_bbr.lt_epoch = diag->yet_to_sleep; 6517 log.u_bbr.pkts_out = diag->co_ret; 6518 log.u_bbr.applimited = diag->hpts_sleep_time; 6519 log.u_bbr.delivered = diag->p_prev_slot; 6520 log.u_bbr.inflight = diag->p_runningslot; 6521 log.u_bbr.bw_inuse = diag->wheel_slot; 6522 log.u_bbr.rttProp = diag->wheel_cts; 6523 log.u_bbr.timeStamp = cts; 6524 log.u_bbr.delRate = diag->maxslots; 6525 log.u_bbr.cur_del_rate = diag->p_curtick; 6526 log.u_bbr.cur_del_rate <<= 32; 6527 log.u_bbr.cur_del_rate |= diag->p_lasttick; 6528 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6529 &rack->rc_inp->inp_socket->so_rcv, 6530 &rack->rc_inp->inp_socket->so_snd, 6531 BBR_LOG_HPTSDIAG, 0, 6532 0, &log, false, tv); 6533 } 6534 6535 } 6536 6537 static void 6538 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 6539 { 6540 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6541 union tcp_log_stackspecific log; 6542 struct timeval tv; 6543 6544 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6545 log.u_bbr.flex1 = sb->sb_flags; 6546 log.u_bbr.flex2 = len; 6547 log.u_bbr.flex3 = sb->sb_state; 6548 log.u_bbr.flex8 = type; 6549 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6550 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6551 &rack->rc_inp->inp_socket->so_rcv, 6552 &rack->rc_inp->inp_socket->so_snd, 6553 TCP_LOG_SB_WAKE, 0, 6554 len, &log, false, &tv); 6555 } 6556 } 6557 6558 static void 6559 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 6560 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 6561 { 6562 struct hpts_diag diag; 6563 struct inpcb *inp = tptoinpcb(tp); 6564 struct timeval tv; 6565 uint32_t delayed_ack = 0; 6566 uint32_t hpts_timeout; 6567 uint32_t entry_slot = slot; 6568 uint8_t stopped; 6569 uint32_t left = 0; 6570 uint32_t us_cts; 6571 6572 if ((tp->t_state == TCPS_CLOSED) || 6573 (tp->t_state == TCPS_LISTEN)) { 6574 return; 6575 } 6576 if (tcp_in_hpts(tp)) { 6577 /* Already on the pacer */ 6578 return; 6579 } 6580 stopped = rack->rc_tmr_stopped; 6581 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 6582 left = rack->r_ctl.rc_timer_exp - cts; 6583 } 6584 rack->r_ctl.rc_timer_exp = 0; 6585 rack->r_ctl.rc_hpts_flags = 0; 6586 us_cts = tcp_get_usecs(&tv); 6587 /* Now early/late accounting */ 6588 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 6589 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 6590 /* 6591 * We have a early carry over set, 6592 * we can always add more time so we 6593 * can always make this compensation. 6594 * 6595 * Note if ack's are allowed to wake us do not 6596 * penalize the next timer for being awoke 6597 * by an ack aka the rc_agg_early (non-paced mode). 6598 */ 6599 slot += rack->r_ctl.rc_agg_early; 6600 rack->r_early = 0; 6601 rack->r_ctl.rc_agg_early = 0; 6602 } 6603 if ((rack->r_late) && 6604 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 6605 /* 6606 * This is harder, we can 6607 * compensate some but it 6608 * really depends on what 6609 * the current pacing time is. 6610 */ 6611 if (rack->r_ctl.rc_agg_delayed >= slot) { 6612 /* 6613 * We can't compensate for it all. 6614 * And we have to have some time 6615 * on the clock. We always have a min 6616 * 10 slots (10 x 10 i.e. 100 usecs). 6617 */ 6618 if (slot <= HPTS_TICKS_PER_SLOT) { 6619 /* We gain delay */ 6620 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 6621 slot = HPTS_TICKS_PER_SLOT; 6622 } else { 6623 /* We take off some */ 6624 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 6625 slot = HPTS_TICKS_PER_SLOT; 6626 } 6627 } else { 6628 slot -= rack->r_ctl.rc_agg_delayed; 6629 rack->r_ctl.rc_agg_delayed = 0; 6630 /* Make sure we have 100 useconds at minimum */ 6631 if (slot < HPTS_TICKS_PER_SLOT) { 6632 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 6633 slot = HPTS_TICKS_PER_SLOT; 6634 } 6635 if (rack->r_ctl.rc_agg_delayed == 0) 6636 rack->r_late = 0; 6637 } 6638 } else if (rack->r_late) { 6639 /* r_use_hpts_min is on and so is DGP */ 6640 uint32_t max_red; 6641 6642 max_red = (slot * rack->r_ctl.max_reduction) / 100; 6643 if (max_red >= rack->r_ctl.rc_agg_delayed) { 6644 slot -= rack->r_ctl.rc_agg_delayed; 6645 rack->r_ctl.rc_agg_delayed = 0; 6646 } else { 6647 slot -= max_red; 6648 rack->r_ctl.rc_agg_delayed -= max_red; 6649 } 6650 } 6651 if ((rack->r_use_hpts_min == 1) && 6652 (slot > 0) && 6653 (rack->dgp_on == 1)) { 6654 /* 6655 * We are enforcing a min pacing timer 6656 * based on our hpts min timeout. 6657 */ 6658 uint32_t min; 6659 6660 min = get_hpts_min_sleep_time(); 6661 if (min > slot) { 6662 slot = min; 6663 } 6664 } 6665 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 6666 if (tp->t_flags & TF_DELACK) { 6667 delayed_ack = TICKS_2_USEC(tcp_delacktime); 6668 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 6669 } 6670 if (delayed_ack && ((hpts_timeout == 0) || 6671 (delayed_ack < hpts_timeout))) 6672 hpts_timeout = delayed_ack; 6673 else 6674 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6675 /* 6676 * If no timers are going to run and we will fall off the hptsi 6677 * wheel, we resort to a keep-alive timer if its configured. 6678 */ 6679 if ((hpts_timeout == 0) && 6680 (slot == 0)) { 6681 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6682 (tp->t_state <= TCPS_CLOSING)) { 6683 /* 6684 * Ok we have no timer (persists, rack, tlp, rxt or 6685 * del-ack), we don't have segments being paced. So 6686 * all that is left is the keepalive timer. 6687 */ 6688 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6689 /* Get the established keep-alive time */ 6690 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 6691 } else { 6692 /* 6693 * Get the initial setup keep-alive time, 6694 * note that this is probably not going to 6695 * happen, since rack will be running a rxt timer 6696 * if a SYN of some sort is outstanding. It is 6697 * actually handled in rack_timeout_rxt(). 6698 */ 6699 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 6700 } 6701 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 6702 if (rack->in_probe_rtt) { 6703 /* 6704 * We want to instead not wake up a long time from 6705 * now but to wake up about the time we would 6706 * exit probe-rtt and initiate a keep-alive ack. 6707 * This will get us out of probe-rtt and update 6708 * our min-rtt. 6709 */ 6710 hpts_timeout = rack_min_probertt_hold; 6711 } 6712 } 6713 } 6714 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 6715 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 6716 /* 6717 * RACK, TLP, persists and RXT timers all are restartable 6718 * based on actions input .. i.e we received a packet (ack 6719 * or sack) and that changes things (rw, or snd_una etc). 6720 * Thus we can restart them with a new value. For 6721 * keep-alive, delayed_ack we keep track of what was left 6722 * and restart the timer with a smaller value. 6723 */ 6724 if (left < hpts_timeout) 6725 hpts_timeout = left; 6726 } 6727 if (hpts_timeout) { 6728 /* 6729 * Hack alert for now we can't time-out over 2,147,483 6730 * seconds (a bit more than 596 hours), which is probably ok 6731 * :). 6732 */ 6733 if (hpts_timeout > 0x7ffffffe) 6734 hpts_timeout = 0x7ffffffe; 6735 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 6736 } 6737 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 6738 if ((rack->gp_ready == 0) && 6739 (rack->use_fixed_rate == 0) && 6740 (hpts_timeout < slot) && 6741 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 6742 /* 6743 * We have no good estimate yet for the 6744 * old clunky burst mitigation or the 6745 * real pacing. And the tlp or rxt is smaller 6746 * than the pacing calculation. Lets not 6747 * pace that long since we know the calculation 6748 * so far is not accurate. 6749 */ 6750 slot = hpts_timeout; 6751 } 6752 /** 6753 * Turn off all the flags for queuing by default. The 6754 * flags have important meanings to what happens when 6755 * LRO interacts with the transport. Most likely (by default now) 6756 * mbuf_queueing and ack compression are on. So the transport 6757 * has a couple of flags that control what happens (if those 6758 * are not on then these flags won't have any effect since it 6759 * won't go through the queuing LRO path). 6760 * 6761 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 6762 * pacing output, so don't disturb. But 6763 * it also means LRO can wake me if there 6764 * is a SACK arrival. 6765 * 6766 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 6767 * with the above flag (QUEUE_READY) and 6768 * when present it says don't even wake me 6769 * if a SACK arrives. 6770 * 6771 * The idea behind these flags is that if we are pacing we 6772 * set the MBUF_QUEUE_READY and only get woken up if 6773 * a SACK arrives (which could change things) or if 6774 * our pacing timer expires. If, however, we have a rack 6775 * timer running, then we don't even want a sack to wake 6776 * us since the rack timer has to expire before we can send. 6777 * 6778 * Other cases should usually have none of the flags set 6779 * so LRO can call into us. 6780 */ 6781 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 6782 if (slot) { 6783 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 6784 rack->r_ctl.rc_last_output_to = us_cts + slot; 6785 /* 6786 * A pacing timer (slot) is being set, in 6787 * such a case we cannot send (we are blocked by 6788 * the timer). So lets tell LRO that it should not 6789 * wake us unless there is a SACK. Note this only 6790 * will be effective if mbuf queueing is on or 6791 * compressed acks are being processed. 6792 */ 6793 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 6794 /* 6795 * But wait if we have a Rack timer running 6796 * even a SACK should not disturb us (with 6797 * the exception of r_rr_config 3). 6798 */ 6799 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 6800 (IN_RECOVERY(tp->t_flags))) { 6801 if (rack->r_rr_config != 3) 6802 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6803 else if (rack->rc_pace_dnd) { 6804 /* 6805 * When DND is on, we only let a sack 6806 * interrupt us if we are not in recovery. 6807 * 6808 * If DND is off, then we never hit here 6809 * and let all sacks wake us up. 6810 * 6811 */ 6812 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6813 } 6814 } 6815 if (rack->rc_ack_can_sendout_data) { 6816 /* 6817 * Ahh but wait, this is that special case 6818 * where the pacing timer can be disturbed 6819 * backout the changes (used for non-paced 6820 * burst limiting). 6821 */ 6822 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 6823 TF2_MBUF_QUEUE_READY); 6824 } 6825 if ((rack->use_rack_rr) && 6826 (rack->r_rr_config < 2) && 6827 ((hpts_timeout) && (hpts_timeout < slot))) { 6828 /* 6829 * Arrange for the hpts to kick back in after the 6830 * t-o if the t-o does not cause a send. 6831 */ 6832 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6833 __LINE__, &diag); 6834 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6835 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6836 } else { 6837 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 6838 __LINE__, &diag); 6839 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6840 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6841 } 6842 } else if (hpts_timeout) { 6843 /* 6844 * With respect to t_flags2(?) here, lets let any new acks wake 6845 * us up here. Since we are not pacing (no pacing timer), output 6846 * can happen so we should let it. If its a Rack timer, then any inbound 6847 * packet probably won't change the sending (we will be blocked) 6848 * but it may change the prr stats so letting it in (the set defaults 6849 * at the start of this block) are good enough. 6850 */ 6851 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6852 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6853 __LINE__, &diag); 6854 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6855 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6856 } else { 6857 /* No timer starting */ 6858 #ifdef INVARIANTS 6859 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6860 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6861 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6862 } 6863 #endif 6864 } 6865 rack->rc_tmr_stopped = 0; 6866 if (slot) 6867 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 6868 } 6869 6870 static void 6871 rack_mark_lost(struct tcpcb *tp, 6872 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 6873 { 6874 struct rack_sendmap *nrsm; 6875 uint32_t thresh, exp; 6876 6877 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 6878 nrsm = rsm; 6879 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 6880 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 6881 /* Got up to all that were marked sack-passed */ 6882 break; 6883 } 6884 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 6885 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 6886 if (TSTMP_LT(exp, cts) || (exp == cts)) { 6887 /* We now consider it lost */ 6888 nrsm->r_flags |= RACK_WAS_LOST; 6889 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 6890 } else { 6891 /* Past here it won't be lost so stop */ 6892 break; 6893 } 6894 } 6895 } 6896 } 6897 6898 /* 6899 * RACK Timer, here we simply do logging and house keeping. 6900 * the normal rack_output() function will call the 6901 * appropriate thing to check if we need to do a RACK retransmit. 6902 * We return 1, saying don't proceed with rack_output only 6903 * when all timers have been stopped (destroyed PCB?). 6904 */ 6905 static int 6906 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6907 { 6908 /* 6909 * This timer simply provides an internal trigger to send out data. 6910 * The check_recovery_mode call will see if there are needed 6911 * retransmissions, if so we will enter fast-recovery. The output 6912 * call may or may not do the same thing depending on sysctl 6913 * settings. 6914 */ 6915 struct rack_sendmap *rsm; 6916 6917 counter_u64_add(rack_to_tot, 1); 6918 if (rack->r_state && (rack->r_state != tp->t_state)) 6919 rack_set_state(tp, rack); 6920 rack->rc_on_min_to = 0; 6921 rsm = rack_check_recovery_mode(tp, cts); 6922 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6923 if (rsm) { 6924 /* We need to stroke any lost that are now declared as lost */ 6925 rack_mark_lost(tp, rack, rsm, cts); 6926 rack->r_ctl.rc_resend = rsm; 6927 rack->r_timer_override = 1; 6928 if (rack->use_rack_rr) { 6929 /* 6930 * Don't accumulate extra pacing delay 6931 * we are allowing the rack timer to 6932 * over-ride pacing i.e. rrr takes precedence 6933 * if the pacing interval is longer than the rrr 6934 * time (in other words we get the min pacing 6935 * time versus rrr pacing time). 6936 */ 6937 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6938 } 6939 } 6940 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6941 if (rsm == NULL) { 6942 /* restart a timer and return 1 */ 6943 rack_start_hpts_timer(rack, tp, cts, 6944 0, 0, 0); 6945 return (1); 6946 } 6947 return (0); 6948 } 6949 6950 6951 6952 static void 6953 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6954 { 6955 6956 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 6957 /* 6958 * The trailing space changed, mbufs can grow 6959 * at the tail but they can't shrink from 6960 * it, KASSERT that. Adjust the orig_m_len to 6961 * compensate for this change. 6962 */ 6963 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 6964 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 6965 rsm->m, 6966 rsm, 6967 (intmax_t)M_TRAILINGROOM(rsm->m), 6968 rsm->orig_t_space, 6969 rsm->orig_m_len, 6970 rsm->m->m_len)); 6971 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 6972 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 6973 } 6974 if (rsm->m->m_len < rsm->orig_m_len) { 6975 /* 6976 * Mbuf shrank, trimmed off the top by an ack, our 6977 * offset changes. 6978 */ 6979 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 6980 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 6981 rsm->m, rsm->m->m_len, 6982 rsm, rsm->orig_m_len, 6983 rsm->soff)); 6984 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 6985 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6986 else 6987 rsm->soff = 0; 6988 rsm->orig_m_len = rsm->m->m_len; 6989 #ifdef INVARIANTS 6990 } else if (rsm->m->m_len > rsm->orig_m_len) { 6991 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 6992 rsm, rsm->m); 6993 #endif 6994 } 6995 } 6996 6997 static void 6998 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6999 { 7000 struct mbuf *m; 7001 uint32_t soff; 7002 7003 if (src_rsm->m && 7004 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 7005 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 7006 /* Fix up the orig_m_len and possibly the mbuf offset */ 7007 rack_adjust_orig_mlen(src_rsm); 7008 } 7009 m = src_rsm->m; 7010 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 7011 while (soff >= m->m_len) { 7012 /* Move out past this mbuf */ 7013 soff -= m->m_len; 7014 m = m->m_next; 7015 KASSERT((m != NULL), 7016 ("rsm:%p nrsm:%p hit at soff:%u null m", 7017 src_rsm, rsm, soff)); 7018 if (m == NULL) { 7019 /* This should *not* happen which is why there is a kassert */ 7020 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7021 (src_rsm->r_start - rack->rc_tp->snd_una), 7022 &src_rsm->soff); 7023 src_rsm->orig_m_len = src_rsm->m->m_len; 7024 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7025 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7026 (rsm->r_start - rack->rc_tp->snd_una), 7027 &rsm->soff); 7028 rsm->orig_m_len = rsm->m->m_len; 7029 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7030 return; 7031 } 7032 } 7033 rsm->m = m; 7034 rsm->soff = soff; 7035 rsm->orig_m_len = m->m_len; 7036 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7037 } 7038 7039 static __inline void 7040 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7041 struct rack_sendmap *rsm, uint32_t start) 7042 { 7043 int idx; 7044 7045 nrsm->r_start = start; 7046 nrsm->r_end = rsm->r_end; 7047 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7048 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7049 nrsm->r_flags = rsm->r_flags; 7050 nrsm->r_dupack = rsm->r_dupack; 7051 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7052 nrsm->r_rtr_bytes = 0; 7053 nrsm->r_fas = rsm->r_fas; 7054 nrsm->r_bas = rsm->r_bas; 7055 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7056 nrsm->r_just_ret = rsm->r_just_ret; 7057 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7058 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7059 } 7060 /* Now if we have SYN flag we keep it on the left edge */ 7061 if (nrsm->r_flags & RACK_HAS_SYN) 7062 nrsm->r_flags &= ~RACK_HAS_SYN; 7063 /* Now if we have a FIN flag we keep it on the right edge */ 7064 if (rsm->r_flags & RACK_HAS_FIN) 7065 rsm->r_flags &= ~RACK_HAS_FIN; 7066 /* Push bit must go to the right edge as well */ 7067 if (rsm->r_flags & RACK_HAD_PUSH) 7068 rsm->r_flags &= ~RACK_HAD_PUSH; 7069 /* Clone over the state of the hw_tls flag */ 7070 nrsm->r_hw_tls = rsm->r_hw_tls; 7071 /* 7072 * Now we need to find nrsm's new location in the mbuf chain 7073 * we basically calculate a new offset, which is soff + 7074 * how much is left in original rsm. Then we walk out the mbuf 7075 * chain to find the righ position, it may be the same mbuf 7076 * or maybe not. 7077 */ 7078 KASSERT(((rsm->m != NULL) || 7079 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7080 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7081 if (rsm->m) 7082 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7083 } 7084 7085 static struct rack_sendmap * 7086 rack_merge_rsm(struct tcp_rack *rack, 7087 struct rack_sendmap *l_rsm, 7088 struct rack_sendmap *r_rsm) 7089 { 7090 /* 7091 * We are merging two ack'd RSM's, 7092 * the l_rsm is on the left (lower seq 7093 * values) and the r_rsm is on the right 7094 * (higher seq value). The simplest way 7095 * to merge these is to move the right 7096 * one into the left. I don't think there 7097 * is any reason we need to try to find 7098 * the oldest (or last oldest retransmitted). 7099 */ 7100 rack_log_map_chg(rack->rc_tp, rack, NULL, 7101 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7102 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7103 if (l_rsm->r_dupack < r_rsm->r_dupack) 7104 l_rsm->r_dupack = r_rsm->r_dupack; 7105 if (r_rsm->r_rtr_bytes) 7106 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7107 if (r_rsm->r_in_tmap) { 7108 /* This really should not happen */ 7109 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7110 r_rsm->r_in_tmap = 0; 7111 } 7112 7113 /* Now the flags */ 7114 if (r_rsm->r_flags & RACK_HAS_FIN) 7115 l_rsm->r_flags |= RACK_HAS_FIN; 7116 if (r_rsm->r_flags & RACK_TLP) 7117 l_rsm->r_flags |= RACK_TLP; 7118 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7119 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7120 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7121 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7122 /* 7123 * If both are app-limited then let the 7124 * free lower the count. If right is app 7125 * limited and left is not, transfer. 7126 */ 7127 l_rsm->r_flags |= RACK_APP_LIMITED; 7128 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7129 if (r_rsm == rack->r_ctl.rc_first_appl) 7130 rack->r_ctl.rc_first_appl = l_rsm; 7131 } 7132 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7133 /* 7134 * We keep the largest value, which is the newest 7135 * send. We do this in case a segment that is 7136 * joined together and not part of a GP estimate 7137 * later gets expanded into the GP estimate. 7138 * 7139 * We prohibit the merging of unlike kinds i.e. 7140 * all pieces that are in the GP estimate can be 7141 * merged and all pieces that are not in a GP estimate 7142 * can be merged, but not disimilar pieces. Combine 7143 * this with taking the highest here and we should 7144 * be ok unless of course the client reneges. Then 7145 * all bets are off. 7146 */ 7147 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7148 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7149 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7150 } 7151 /* 7152 * When merging two RSM's we also need to consider the ack time and keep 7153 * newest. If the ack gets merged into a measurement then that is the 7154 * one we will want to be using. 7155 */ 7156 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7157 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7158 7159 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7160 /* Transfer the split limit to the map we free */ 7161 r_rsm->r_limit_type = l_rsm->r_limit_type; 7162 l_rsm->r_limit_type = 0; 7163 } 7164 rack_free(rack, r_rsm); 7165 l_rsm->r_flags |= RACK_MERGED; 7166 return (l_rsm); 7167 } 7168 7169 /* 7170 * TLP Timer, here we simply setup what segment we want to 7171 * have the TLP expire on, the normal rack_output() will then 7172 * send it out. 7173 * 7174 * We return 1, saying don't proceed with rack_output only 7175 * when all timers have been stopped (destroyed PCB?). 7176 */ 7177 static int 7178 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7179 { 7180 /* 7181 * Tail Loss Probe. 7182 */ 7183 struct rack_sendmap *rsm = NULL; 7184 int insret __diagused; 7185 struct socket *so = tptosocket(tp); 7186 uint32_t amm; 7187 uint32_t out, avail; 7188 int collapsed_win = 0; 7189 7190 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7191 /* Its not time yet */ 7192 return (0); 7193 } 7194 if (ctf_progress_timeout_check(tp, true)) { 7195 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7196 return (-ETIMEDOUT); /* tcp_drop() */ 7197 } 7198 /* 7199 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7200 * need to figure out how to force a full MSS segment out. 7201 */ 7202 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7203 rack->r_ctl.retran_during_recovery = 0; 7204 rack->r_might_revert = 0; 7205 rack->r_ctl.dsack_byte_cnt = 0; 7206 counter_u64_add(rack_tlp_tot, 1); 7207 if (rack->r_state && (rack->r_state != tp->t_state)) 7208 rack_set_state(tp, rack); 7209 avail = sbavail(&so->so_snd); 7210 out = tp->snd_max - tp->snd_una; 7211 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7212 /* special case, we need a retransmission */ 7213 collapsed_win = 1; 7214 goto need_retran; 7215 } 7216 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7217 rack->r_ctl.dsack_persist--; 7218 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7219 rack->r_ctl.num_dsack = 0; 7220 } 7221 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7222 } 7223 if ((tp->t_flags & TF_GPUTINPROG) && 7224 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7225 /* 7226 * If this is the second in a row 7227 * TLP and we are doing a measurement 7228 * its time to abandon the measurement. 7229 * Something is likely broken on 7230 * the clients network and measuring a 7231 * broken network does us no good. 7232 */ 7233 tp->t_flags &= ~TF_GPUTINPROG; 7234 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7235 rack->r_ctl.rc_gp_srtt /*flex1*/, 7236 tp->gput_seq, 7237 0, 0, 18, __LINE__, NULL, 0); 7238 } 7239 /* 7240 * Check our send oldest always settings, and if 7241 * there is an oldest to send jump to the need_retran. 7242 */ 7243 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7244 goto need_retran; 7245 7246 if (avail > out) { 7247 /* New data is available */ 7248 amm = avail - out; 7249 if (amm > ctf_fixed_maxseg(tp)) { 7250 amm = ctf_fixed_maxseg(tp); 7251 if ((amm + out) > tp->snd_wnd) { 7252 /* We are rwnd limited */ 7253 goto need_retran; 7254 } 7255 } else if (amm < ctf_fixed_maxseg(tp)) { 7256 /* not enough to fill a MTU */ 7257 goto need_retran; 7258 } 7259 if (IN_FASTRECOVERY(tp->t_flags)) { 7260 /* Unlikely */ 7261 if (rack->rack_no_prr == 0) { 7262 if (out + amm <= tp->snd_wnd) { 7263 rack->r_ctl.rc_prr_sndcnt = amm; 7264 rack->r_ctl.rc_tlp_new_data = amm; 7265 rack_log_to_prr(rack, 4, 0, __LINE__); 7266 } 7267 } else 7268 goto need_retran; 7269 } else { 7270 /* Set the send-new override */ 7271 if (out + amm <= tp->snd_wnd) 7272 rack->r_ctl.rc_tlp_new_data = amm; 7273 else 7274 goto need_retran; 7275 } 7276 rack->r_ctl.rc_tlpsend = NULL; 7277 counter_u64_add(rack_tlp_newdata, 1); 7278 goto send; 7279 } 7280 need_retran: 7281 /* 7282 * Ok we need to arrange the last un-acked segment to be re-sent, or 7283 * optionally the first un-acked segment. 7284 */ 7285 if (collapsed_win == 0) { 7286 if (rack_always_send_oldest) 7287 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7288 else { 7289 rsm = tqhash_max(rack->r_ctl.tqh); 7290 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7291 rsm = rack_find_high_nonack(rack, rsm); 7292 } 7293 } 7294 if (rsm == NULL) { 7295 #ifdef TCP_BLACKBOX 7296 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7297 #endif 7298 goto out; 7299 } 7300 } else { 7301 /* 7302 * We had a collapsed window, lets find 7303 * the point before the collapse. 7304 */ 7305 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7306 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7307 else { 7308 rsm = tqhash_min(rack->r_ctl.tqh); 7309 } 7310 if (rsm == NULL) { 7311 /* Huh */ 7312 goto out; 7313 } 7314 } 7315 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7316 /* 7317 * We need to split this the last segment in two. 7318 */ 7319 struct rack_sendmap *nrsm; 7320 7321 nrsm = rack_alloc_full_limit(rack); 7322 if (nrsm == NULL) { 7323 /* 7324 * No memory to split, we will just exit and punt 7325 * off to the RXT timer. 7326 */ 7327 goto out; 7328 } 7329 rack_clone_rsm(rack, nrsm, rsm, 7330 (rsm->r_end - ctf_fixed_maxseg(tp))); 7331 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7332 #ifndef INVARIANTS 7333 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7334 #else 7335 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7336 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7337 nrsm, insret, rack, rsm); 7338 } 7339 #endif 7340 if (rsm->r_in_tmap) { 7341 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7342 nrsm->r_in_tmap = 1; 7343 } 7344 rsm = nrsm; 7345 } 7346 rack->r_ctl.rc_tlpsend = rsm; 7347 send: 7348 /* Make sure output path knows we are doing a TLP */ 7349 *doing_tlp = 1; 7350 rack->r_timer_override = 1; 7351 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7352 return (0); 7353 out: 7354 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7355 return (0); 7356 } 7357 7358 /* 7359 * Delayed ack Timer, here we simply need to setup the 7360 * ACK_NOW flag and remove the DELACK flag. From there 7361 * the output routine will send the ack out. 7362 * 7363 * We only return 1, saying don't proceed, if all timers 7364 * are stopped (destroyed PCB?). 7365 */ 7366 static int 7367 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7368 { 7369 7370 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7371 tp->t_flags &= ~TF_DELACK; 7372 tp->t_flags |= TF_ACKNOW; 7373 KMOD_TCPSTAT_INC(tcps_delack); 7374 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7375 return (0); 7376 } 7377 7378 static inline int 7379 rack_send_ack_challange(struct tcp_rack *rack) 7380 { 7381 struct tcptemp *t_template; 7382 7383 t_template = tcpip_maketemplate(rack->rc_inp); 7384 if (t_template) { 7385 if (rack->forced_ack == 0) { 7386 rack->forced_ack = 1; 7387 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7388 } else { 7389 rack->probe_not_answered = 1; 7390 } 7391 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 7392 &t_template->tt_t, (struct mbuf *)NULL, 7393 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 7394 free(t_template, M_TEMP); 7395 /* This does send an ack so kill any D-ack timer */ 7396 if (rack->rc_tp->t_flags & TF_DELACK) 7397 rack->rc_tp->t_flags &= ~TF_DELACK; 7398 return(1); 7399 } else 7400 return (0); 7401 7402 } 7403 7404 /* 7405 * Persists timer, here we simply send the 7406 * same thing as a keepalive will. 7407 * the one byte send. 7408 * 7409 * We only return 1, saying don't proceed, if all timers 7410 * are stopped (destroyed PCB?). 7411 */ 7412 static int 7413 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7414 { 7415 int32_t retval = 1; 7416 7417 if (rack->rc_in_persist == 0) 7418 return (0); 7419 if (ctf_progress_timeout_check(tp, false)) { 7420 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7421 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7422 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7423 return (-ETIMEDOUT); /* tcp_drop() */ 7424 } 7425 /* 7426 * Persistence timer into zero window. Force a byte to be output, if 7427 * possible. 7428 */ 7429 KMOD_TCPSTAT_INC(tcps_persisttimeo); 7430 /* 7431 * Hack: if the peer is dead/unreachable, we do not time out if the 7432 * window is closed. After a full backoff, drop the connection if 7433 * the idle time (no responses to probes) reaches the maximum 7434 * backoff that we would use if retransmitting. 7435 */ 7436 if (tp->t_rxtshift >= V_tcp_retries && 7437 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 7438 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 7439 KMOD_TCPSTAT_INC(tcps_persistdrop); 7440 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7441 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7442 retval = -ETIMEDOUT; /* tcp_drop() */ 7443 goto out; 7444 } 7445 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 7446 tp->snd_una == tp->snd_max) 7447 rack_exit_persist(tp, rack, cts); 7448 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 7449 /* 7450 * If the user has closed the socket then drop a persisting 7451 * connection after a much reduced timeout. 7452 */ 7453 if (tp->t_state > TCPS_CLOSE_WAIT && 7454 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 7455 KMOD_TCPSTAT_INC(tcps_persistdrop); 7456 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7457 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7458 retval = -ETIMEDOUT; /* tcp_drop() */ 7459 goto out; 7460 } 7461 if (rack_send_ack_challange(rack)) { 7462 /* only set it if we were answered */ 7463 if (rack->probe_not_answered) { 7464 counter_u64_add(rack_persists_loss, 1); 7465 rack->r_ctl.persist_lost_ends++; 7466 } 7467 counter_u64_add(rack_persists_sends, 1); 7468 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 7469 } 7470 if (tp->t_rxtshift < V_tcp_retries) 7471 tp->t_rxtshift++; 7472 out: 7473 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 7474 rack_start_hpts_timer(rack, tp, cts, 7475 0, 0, 0); 7476 return (retval); 7477 } 7478 7479 /* 7480 * If a keepalive goes off, we had no other timers 7481 * happening. We always return 1 here since this 7482 * routine either drops the connection or sends 7483 * out a segment with respond. 7484 */ 7485 static int 7486 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7487 { 7488 struct inpcb *inp = tptoinpcb(tp); 7489 7490 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 7491 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 7492 /* 7493 * Keep-alive timer went off; send something or drop connection if 7494 * idle for too long. 7495 */ 7496 KMOD_TCPSTAT_INC(tcps_keeptimeo); 7497 if (tp->t_state < TCPS_ESTABLISHED) 7498 goto dropit; 7499 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7500 tp->t_state <= TCPS_CLOSING) { 7501 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 7502 goto dropit; 7503 /* 7504 * Send a packet designed to force a response if the peer is 7505 * up and reachable: either an ACK if the connection is 7506 * still alive, or an RST if the peer has closed the 7507 * connection due to timeout or reboot. Using sequence 7508 * number tp->snd_una-1 causes the transmitted zero-length 7509 * segment to lie outside the receive window; by the 7510 * protocol spec, this requires the correspondent TCP to 7511 * respond. 7512 */ 7513 KMOD_TCPSTAT_INC(tcps_keepprobe); 7514 rack_send_ack_challange(rack); 7515 } 7516 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7517 return (1); 7518 dropit: 7519 KMOD_TCPSTAT_INC(tcps_keepdrops); 7520 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7521 return (-ETIMEDOUT); /* tcp_drop() */ 7522 } 7523 7524 /* 7525 * Retransmit helper function, clear up all the ack 7526 * flags and take care of important book keeping. 7527 */ 7528 static void 7529 rack_remxt_tmr(struct tcpcb *tp) 7530 { 7531 /* 7532 * The retransmit timer went off, all sack'd blocks must be 7533 * un-acked. 7534 */ 7535 struct rack_sendmap *rsm, *trsm = NULL; 7536 struct tcp_rack *rack; 7537 7538 rack = (struct tcp_rack *)tp->t_fb_ptr; 7539 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 7540 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 7541 rack->r_timer_override = 1; 7542 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 7543 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 7544 rack->r_late = 0; 7545 rack->r_early = 0; 7546 rack->r_ctl.rc_agg_delayed = 0; 7547 rack->r_ctl.rc_agg_early = 0; 7548 if (rack->r_state && (rack->r_state != tp->t_state)) 7549 rack_set_state(tp, rack); 7550 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 7551 /* 7552 * We do not clear the scoreboard until we have had 7553 * more than rack_rxt_scoreboard_clear_thresh time-outs. 7554 */ 7555 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7556 if (rack->r_ctl.rc_resend != NULL) 7557 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7558 7559 return; 7560 } 7561 /* 7562 * Ideally we would like to be able to 7563 * mark SACK-PASS on anything not acked here. 7564 * 7565 * However, if we do that we would burst out 7566 * all that data 1ms apart. This would be unwise, 7567 * so for now we will just let the normal rxt timer 7568 * and tlp timer take care of it. 7569 * 7570 * Also we really need to stick them back in sequence 7571 * order. This way we send in the proper order and any 7572 * sacks that come floating in will "re-ack" the data. 7573 * To do this we zap the tmap with an INIT and then 7574 * walk through and place every rsm in the tail queue 7575 * hash table back in its seq ordered place. 7576 */ 7577 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7578 7579 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 7580 rsm->r_dupack = 0; 7581 if (rack_verbose_logging) 7582 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7583 /* We must re-add it back to the tlist */ 7584 if (trsm == NULL) { 7585 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7586 } else { 7587 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 7588 } 7589 rsm->r_in_tmap = 1; 7590 trsm = rsm; 7591 if (rsm->r_flags & RACK_ACKED) 7592 rsm->r_flags |= RACK_WAS_ACKED; 7593 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 7594 rsm->r_flags |= RACK_MUST_RXT; 7595 } 7596 /* zero the lost since it's all gone */ 7597 rack->r_ctl.rc_considered_lost = 0; 7598 /* Clear the count (we just un-acked them) */ 7599 rack->r_ctl.rc_sacked = 0; 7600 rack->r_ctl.rc_sacklast = NULL; 7601 /* Clear the tlp rtx mark */ 7602 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7603 if (rack->r_ctl.rc_resend != NULL) 7604 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7605 rack->r_ctl.rc_prr_sndcnt = 0; 7606 rack_log_to_prr(rack, 6, 0, __LINE__); 7607 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7608 if (rack->r_ctl.rc_resend != NULL) 7609 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7610 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && 7611 ((tp->t_flags & TF_SENTFIN) == 0)) { 7612 /* 7613 * For non-sack customers new data 7614 * needs to go out as retransmits until 7615 * we retransmit up to snd_max. 7616 */ 7617 rack->r_must_retran = 1; 7618 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 7619 rack->r_ctl.rc_sacked); 7620 } 7621 } 7622 7623 static void 7624 rack_convert_rtts(struct tcpcb *tp) 7625 { 7626 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 7627 tp->t_rxtcur = RACK_REXMTVAL(tp); 7628 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7629 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 7630 } 7631 if (tp->t_rxtcur > rack_rto_max) { 7632 tp->t_rxtcur = rack_rto_max; 7633 } 7634 } 7635 7636 static void 7637 rack_cc_conn_init(struct tcpcb *tp) 7638 { 7639 struct tcp_rack *rack; 7640 uint32_t srtt; 7641 7642 rack = (struct tcp_rack *)tp->t_fb_ptr; 7643 srtt = tp->t_srtt; 7644 cc_conn_init(tp); 7645 /* 7646 * Now convert to rack's internal format, 7647 * if required. 7648 */ 7649 if ((srtt == 0) && (tp->t_srtt != 0)) 7650 rack_convert_rtts(tp); 7651 /* 7652 * We want a chance to stay in slowstart as 7653 * we create a connection. TCP spec says that 7654 * initially ssthresh is infinite. For our 7655 * purposes that is the snd_wnd. 7656 */ 7657 if (tp->snd_ssthresh < tp->snd_wnd) { 7658 tp->snd_ssthresh = tp->snd_wnd; 7659 } 7660 /* 7661 * We also want to assure a IW worth of 7662 * data can get inflight. 7663 */ 7664 if (rc_init_window(rack) < tp->snd_cwnd) 7665 tp->snd_cwnd = rc_init_window(rack); 7666 } 7667 7668 /* 7669 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 7670 * we will setup to retransmit the lowest seq number outstanding. 7671 */ 7672 static int 7673 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7674 { 7675 struct inpcb *inp = tptoinpcb(tp); 7676 int32_t rexmt; 7677 int32_t retval = 0; 7678 bool isipv6; 7679 7680 if ((tp->t_flags & TF_GPUTINPROG) && 7681 (tp->t_rxtshift)) { 7682 /* 7683 * We have had a second timeout 7684 * measurements on successive rxt's are not profitable. 7685 * It is unlikely to be of any use (the network is 7686 * broken or the client went away). 7687 */ 7688 tp->t_flags &= ~TF_GPUTINPROG; 7689 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7690 rack->r_ctl.rc_gp_srtt /*flex1*/, 7691 tp->gput_seq, 7692 0, 0, 18, __LINE__, NULL, 0); 7693 } 7694 if (ctf_progress_timeout_check(tp, false)) { 7695 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7696 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7697 return (-ETIMEDOUT); /* tcp_drop() */ 7698 } 7699 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 7700 rack->r_ctl.retran_during_recovery = 0; 7701 rack->rc_ack_required = 1; 7702 rack->r_ctl.dsack_byte_cnt = 0; 7703 if (IN_RECOVERY(tp->t_flags) && 7704 (rack->rto_from_rec == 0)) { 7705 /* 7706 * Mark that we had a rto while in recovery 7707 * and save the ssthresh so if we go back 7708 * into recovery we will have a chance 7709 * to slowstart back to the level. 7710 */ 7711 rack->rto_from_rec = 1; 7712 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 7713 } 7714 if (IN_FASTRECOVERY(tp->t_flags)) 7715 tp->t_flags |= TF_WASFRECOVERY; 7716 else 7717 tp->t_flags &= ~TF_WASFRECOVERY; 7718 if (IN_CONGRECOVERY(tp->t_flags)) 7719 tp->t_flags |= TF_WASCRECOVERY; 7720 else 7721 tp->t_flags &= ~TF_WASCRECOVERY; 7722 if (TCPS_HAVEESTABLISHED(tp->t_state) && 7723 (tp->snd_una == tp->snd_max)) { 7724 /* Nothing outstanding .. nothing to do */ 7725 return (0); 7726 } 7727 if (rack->r_ctl.dsack_persist) { 7728 rack->r_ctl.dsack_persist--; 7729 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7730 rack->r_ctl.num_dsack = 0; 7731 } 7732 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7733 } 7734 /* 7735 * Rack can only run one timer at a time, so we cannot 7736 * run a KEEPINIT (gating SYN sending) and a retransmit 7737 * timer for the SYN. So if we are in a front state and 7738 * have a KEEPINIT timer we need to check the first transmit 7739 * against now to see if we have exceeded the KEEPINIT time 7740 * (if one is set). 7741 */ 7742 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 7743 (TP_KEEPINIT(tp) != 0)) { 7744 struct rack_sendmap *rsm; 7745 7746 rsm = tqhash_min(rack->r_ctl.tqh); 7747 if (rsm) { 7748 /* Ok we have something outstanding to test keepinit with */ 7749 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 7750 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 7751 /* We have exceeded the KEEPINIT time */ 7752 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7753 goto drop_it; 7754 } 7755 } 7756 } 7757 /* 7758 * Retransmission timer went off. Message has not been acked within 7759 * retransmit interval. Back off to a longer retransmit interval 7760 * and retransmit one segment. 7761 */ 7762 if ((rack->r_ctl.rc_resend == NULL) || 7763 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 7764 /* 7765 * If the rwnd collapsed on 7766 * the one we are retransmitting 7767 * it does not count against the 7768 * rxt count. 7769 */ 7770 tp->t_rxtshift++; 7771 } 7772 rack_remxt_tmr(tp); 7773 if (tp->t_rxtshift > V_tcp_retries) { 7774 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7775 drop_it: 7776 tp->t_rxtshift = V_tcp_retries; 7777 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 7778 /* XXXGL: previously t_softerror was casted to uint16_t */ 7779 MPASS(tp->t_softerror >= 0); 7780 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 7781 goto out; /* tcp_drop() */ 7782 } 7783 if (tp->t_state == TCPS_SYN_SENT) { 7784 /* 7785 * If the SYN was retransmitted, indicate CWND to be limited 7786 * to 1 segment in cc_conn_init(). 7787 */ 7788 tp->snd_cwnd = 1; 7789 } else if (tp->t_rxtshift == 1) { 7790 /* 7791 * first retransmit; record ssthresh and cwnd so they can be 7792 * recovered if this turns out to be a "bad" retransmit. A 7793 * retransmit is considered "bad" if an ACK for this segment 7794 * is received within RTT/2 interval; the assumption here is 7795 * that the ACK was already in flight. See "On Estimating 7796 * End-to-End Network Path Properties" by Allman and Paxson 7797 * for more details. 7798 */ 7799 tp->snd_cwnd_prev = tp->snd_cwnd; 7800 tp->snd_ssthresh_prev = tp->snd_ssthresh; 7801 tp->snd_recover_prev = tp->snd_recover; 7802 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 7803 tp->t_flags |= TF_PREVVALID; 7804 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 7805 tp->t_flags &= ~TF_PREVVALID; 7806 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 7807 if ((tp->t_state == TCPS_SYN_SENT) || 7808 (tp->t_state == TCPS_SYN_RECEIVED)) 7809 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 7810 else 7811 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 7812 7813 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 7814 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 7815 /* 7816 * We enter the path for PLMTUD if connection is established or, if 7817 * connection is FIN_WAIT_1 status, reason for the last is that if 7818 * amount of data we send is very small, we could send it in couple 7819 * of packets and process straight to FIN. In that case we won't 7820 * catch ESTABLISHED state. 7821 */ 7822 #ifdef INET6 7823 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 7824 #else 7825 isipv6 = false; 7826 #endif 7827 if (((V_tcp_pmtud_blackhole_detect == 1) || 7828 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 7829 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 7830 ((tp->t_state == TCPS_ESTABLISHED) || 7831 (tp->t_state == TCPS_FIN_WAIT_1))) { 7832 /* 7833 * Idea here is that at each stage of mtu probe (usually, 7834 * 1448 -> 1188 -> 524) should be given 2 chances to recover 7835 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 7836 * should take care of that. 7837 */ 7838 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 7839 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 7840 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 7841 tp->t_rxtshift % 2 == 0)) { 7842 /* 7843 * Enter Path MTU Black-hole Detection mechanism: - 7844 * Disable Path MTU Discovery (IP "DF" bit). - 7845 * Reduce MTU to lower value than what we negotiated 7846 * with peer. 7847 */ 7848 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7849 /* Record that we may have found a black hole. */ 7850 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7851 /* Keep track of previous MSS. */ 7852 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7853 } 7854 7855 /* 7856 * Reduce the MSS to blackhole value or to the 7857 * default in an attempt to retransmit. 7858 */ 7859 #ifdef INET6 7860 if (isipv6 && 7861 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7862 /* Use the sysctl tuneable blackhole MSS. */ 7863 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7864 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7865 } else if (isipv6) { 7866 /* Use the default MSS. */ 7867 tp->t_maxseg = V_tcp_v6mssdflt; 7868 /* 7869 * Disable Path MTU Discovery when we switch 7870 * to minmss. 7871 */ 7872 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7873 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7874 } 7875 #endif 7876 #if defined(INET6) && defined(INET) 7877 else 7878 #endif 7879 #ifdef INET 7880 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7881 /* Use the sysctl tuneable blackhole MSS. */ 7882 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7883 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7884 } else { 7885 /* Use the default MSS. */ 7886 tp->t_maxseg = V_tcp_mssdflt; 7887 /* 7888 * Disable Path MTU Discovery when we switch 7889 * to minmss. 7890 */ 7891 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7892 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7893 } 7894 #endif 7895 } else { 7896 /* 7897 * If further retransmissions are still unsuccessful 7898 * with a lowered MTU, maybe this isn't a blackhole 7899 * and we restore the previous MSS and blackhole 7900 * detection flags. The limit '6' is determined by 7901 * giving each probe stage (1448, 1188, 524) 2 7902 * chances to recover. 7903 */ 7904 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7905 (tp->t_rxtshift >= 6)) { 7906 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7907 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7908 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7909 if (tp->t_maxseg < V_tcp_mssdflt) { 7910 /* 7911 * The MSS is so small we should not 7912 * process incoming SACK's since we are 7913 * subject to attack in such a case. 7914 */ 7915 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 7916 } else { 7917 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 7918 } 7919 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7920 } 7921 } 7922 } 7923 /* 7924 * Disable RFC1323 and SACK if we haven't got any response to 7925 * our third SYN to work-around some broken terminal servers 7926 * (most of which have hopefully been retired) that have bad VJ 7927 * header compression code which trashes TCP segments containing 7928 * unknown-to-them TCP options. 7929 */ 7930 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7931 (tp->t_rxtshift == 3)) 7932 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7933 /* 7934 * If we backed off this far, our srtt estimate is probably bogus. 7935 * Clobber it so we'll take the next rtt measurement as our srtt; 7936 * move the current srtt into rttvar to keep the current retransmit 7937 * times until then. 7938 */ 7939 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7940 #ifdef INET6 7941 if ((inp->inp_vflag & INP_IPV6) != 0) 7942 in6_losing(inp); 7943 else 7944 #endif 7945 in_losing(inp); 7946 tp->t_rttvar += tp->t_srtt; 7947 tp->t_srtt = 0; 7948 } 7949 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7950 tp->snd_recover = tp->snd_max; 7951 tp->t_flags |= TF_ACKNOW; 7952 tp->t_rtttime = 0; 7953 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 7954 out: 7955 return (retval); 7956 } 7957 7958 static int 7959 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7960 { 7961 int32_t ret = 0; 7962 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7963 7964 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7965 (tp->t_flags & TF_GPUTINPROG)) { 7966 /* 7967 * We have a goodput in progress 7968 * and we have entered a late state. 7969 * Do we have enough data in the sb 7970 * to handle the GPUT request? 7971 */ 7972 uint32_t bytes; 7973 7974 bytes = tp->gput_ack - tp->gput_seq; 7975 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7976 bytes += tp->gput_seq - tp->snd_una; 7977 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 7978 /* 7979 * There are not enough bytes in the socket 7980 * buffer that have been sent to cover this 7981 * measurement. Cancel it. 7982 */ 7983 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7984 rack->r_ctl.rc_gp_srtt /*flex1*/, 7985 tp->gput_seq, 7986 0, 0, 18, __LINE__, NULL, 0); 7987 tp->t_flags &= ~TF_GPUTINPROG; 7988 } 7989 } 7990 if (timers == 0) { 7991 return (0); 7992 } 7993 if (tp->t_state == TCPS_LISTEN) { 7994 /* no timers on listen sockets */ 7995 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7996 return (0); 7997 return (1); 7998 } 7999 if ((timers & PACE_TMR_RACK) && 8000 rack->rc_on_min_to) { 8001 /* 8002 * For the rack timer when we 8003 * are on a min-timeout (which means rrr_conf = 3) 8004 * we don't want to check the timer. It may 8005 * be going off for a pace and thats ok we 8006 * want to send the retransmit (if its ready). 8007 * 8008 * If its on a normal rack timer (non-min) then 8009 * we will check if its expired. 8010 */ 8011 goto skip_time_check; 8012 } 8013 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 8014 uint32_t left; 8015 8016 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8017 ret = -1; 8018 rack_log_to_processing(rack, cts, ret, 0); 8019 return (0); 8020 } 8021 if (hpts_calling == 0) { 8022 /* 8023 * A user send or queued mbuf (sack) has called us? We 8024 * return 0 and let the pacing guards 8025 * deal with it if they should or 8026 * should not cause a send. 8027 */ 8028 ret = -2; 8029 rack_log_to_processing(rack, cts, ret, 0); 8030 return (0); 8031 } 8032 /* 8033 * Ok our timer went off early and we are not paced false 8034 * alarm, go back to sleep. We make sure we don't have 8035 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8036 * flag in place. 8037 */ 8038 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8039 ret = -3; 8040 left = rack->r_ctl.rc_timer_exp - cts; 8041 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8042 rack_log_to_processing(rack, cts, ret, left); 8043 return (1); 8044 } 8045 skip_time_check: 8046 rack->rc_tmr_stopped = 0; 8047 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8048 if (timers & PACE_TMR_DELACK) { 8049 ret = rack_timeout_delack(tp, rack, cts); 8050 } else if (timers & PACE_TMR_RACK) { 8051 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8052 rack->r_fast_output = 0; 8053 ret = rack_timeout_rack(tp, rack, cts); 8054 } else if (timers & PACE_TMR_TLP) { 8055 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8056 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8057 } else if (timers & PACE_TMR_RXT) { 8058 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8059 rack->r_fast_output = 0; 8060 ret = rack_timeout_rxt(tp, rack, cts); 8061 } else if (timers & PACE_TMR_PERSIT) { 8062 ret = rack_timeout_persist(tp, rack, cts); 8063 } else if (timers & PACE_TMR_KEEP) { 8064 ret = rack_timeout_keepalive(tp, rack, cts); 8065 } 8066 rack_log_to_processing(rack, cts, ret, timers); 8067 return (ret); 8068 } 8069 8070 static void 8071 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8072 { 8073 struct timeval tv; 8074 uint32_t us_cts, flags_on_entry; 8075 uint8_t hpts_removed = 0; 8076 8077 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8078 us_cts = tcp_get_usecs(&tv); 8079 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8080 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8081 ((tp->snd_max - tp->snd_una) == 0))) { 8082 tcp_hpts_remove(rack->rc_tp); 8083 hpts_removed = 1; 8084 /* If we were not delayed cancel out the flag. */ 8085 if ((tp->snd_max - tp->snd_una) == 0) 8086 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8087 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8088 } 8089 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8090 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8091 if (tcp_in_hpts(rack->rc_tp) && 8092 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8093 /* 8094 * Canceling timer's when we have no output being 8095 * paced. We also must remove ourselves from the 8096 * hpts. 8097 */ 8098 tcp_hpts_remove(rack->rc_tp); 8099 hpts_removed = 1; 8100 } 8101 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8102 } 8103 if (hpts_removed == 0) 8104 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8105 } 8106 8107 static int 8108 rack_stopall(struct tcpcb *tp) 8109 { 8110 struct tcp_rack *rack; 8111 8112 rack = (struct tcp_rack *)tp->t_fb_ptr; 8113 rack->t_timers_stopped = 1; 8114 8115 tcp_hpts_remove(tp); 8116 8117 return (0); 8118 } 8119 8120 static void 8121 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8122 { 8123 /* 8124 * Assure no timers are running. 8125 */ 8126 if (tcp_timer_active(tp, TT_PERSIST)) { 8127 /* We enter in persists, set the flag appropriately */ 8128 rack->rc_in_persist = 1; 8129 } 8130 if (tcp_in_hpts(rack->rc_tp)) { 8131 tcp_hpts_remove(rack->rc_tp); 8132 } 8133 } 8134 8135 static void 8136 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8137 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8138 { 8139 int32_t idx; 8140 8141 rsm->r_rtr_cnt++; 8142 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8143 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8144 rsm->r_flags |= RACK_OVERMAX; 8145 } 8146 rsm->r_act_rxt_cnt++; 8147 /* Peg the count/index */ 8148 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8149 rsm->r_dupack = 0; 8150 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8151 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8152 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8153 } 8154 if (rsm->r_flags & RACK_WAS_LOST) { 8155 /* 8156 * We retransmitted it putting it back in flight 8157 * remove the lost desgination and reduce the 8158 * bytes considered lost. 8159 */ 8160 rsm->r_flags &= ~RACK_WAS_LOST; 8161 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 8162 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 8163 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 8164 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 8165 else 8166 rack->r_ctl.rc_considered_lost = 0; 8167 } 8168 idx = rsm->r_rtr_cnt - 1; 8169 rsm->r_tim_lastsent[idx] = ts; 8170 /* 8171 * Here we don't add in the len of send, since its already 8172 * in snduna <->snd_max. 8173 */ 8174 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8175 rack->r_ctl.rc_sacked); 8176 if (rsm->r_flags & RACK_ACKED) { 8177 /* Problably MTU discovery messing with us */ 8178 rsm->r_flags &= ~RACK_ACKED; 8179 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8180 } 8181 if (rsm->r_in_tmap) { 8182 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8183 rsm->r_in_tmap = 0; 8184 } 8185 /* Lets make sure it really is in or not the GP window */ 8186 rack_mark_in_gp_win(tp, rsm); 8187 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8188 rsm->r_in_tmap = 1; 8189 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8190 /* Take off the must retransmit flag, if its on */ 8191 if (rsm->r_flags & RACK_MUST_RXT) { 8192 if (rack->r_must_retran) 8193 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8194 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8195 /* 8196 * We have retransmitted all we need. Clear 8197 * any must retransmit flags. 8198 */ 8199 rack->r_must_retran = 0; 8200 rack->r_ctl.rc_out_at_rto = 0; 8201 } 8202 rsm->r_flags &= ~RACK_MUST_RXT; 8203 } 8204 /* Remove any collapsed flag */ 8205 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8206 if (rsm->r_flags & RACK_SACK_PASSED) { 8207 /* We have retransmitted due to the SACK pass */ 8208 rsm->r_flags &= ~RACK_SACK_PASSED; 8209 rsm->r_flags |= RACK_WAS_SACKPASS; 8210 } 8211 } 8212 8213 static uint32_t 8214 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8215 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8216 { 8217 /* 8218 * We (re-)transmitted starting at rsm->r_start for some length 8219 * (possibly less than r_end. 8220 */ 8221 struct rack_sendmap *nrsm; 8222 int insret __diagused; 8223 uint32_t c_end; 8224 int32_t len; 8225 8226 len = *lenp; 8227 c_end = rsm->r_start + len; 8228 if (SEQ_GEQ(c_end, rsm->r_end)) { 8229 /* 8230 * We retransmitted the whole piece or more than the whole 8231 * slopping into the next rsm. 8232 */ 8233 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8234 if (c_end == rsm->r_end) { 8235 *lenp = 0; 8236 return (0); 8237 } else { 8238 int32_t act_len; 8239 8240 /* Hangs over the end return whats left */ 8241 act_len = rsm->r_end - rsm->r_start; 8242 *lenp = (len - act_len); 8243 return (rsm->r_end); 8244 } 8245 /* We don't get out of this block. */ 8246 } 8247 /* 8248 * Here we retransmitted less than the whole thing which means we 8249 * have to split this into what was transmitted and what was not. 8250 */ 8251 nrsm = rack_alloc_full_limit(rack); 8252 if (nrsm == NULL) { 8253 /* 8254 * We can't get memory, so lets not proceed. 8255 */ 8256 *lenp = 0; 8257 return (0); 8258 } 8259 /* 8260 * So here we are going to take the original rsm and make it what we 8261 * retransmitted. nrsm will be the tail portion we did not 8262 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8263 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8264 * 1, 6 and the new piece will be 6, 11. 8265 */ 8266 rack_clone_rsm(rack, nrsm, rsm, c_end); 8267 nrsm->r_dupack = 0; 8268 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8269 #ifndef INVARIANTS 8270 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8271 #else 8272 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8273 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8274 nrsm, insret, rack, rsm); 8275 } 8276 #endif 8277 if (rsm->r_in_tmap) { 8278 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8279 nrsm->r_in_tmap = 1; 8280 } 8281 rsm->r_flags &= (~RACK_HAS_FIN); 8282 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8283 /* Log a split of rsm into rsm and nrsm */ 8284 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8285 *lenp = 0; 8286 return (0); 8287 } 8288 8289 static void 8290 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8291 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8292 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 8293 uint32_t s_moff, int hw_tls, int segsiz) 8294 { 8295 struct tcp_rack *rack; 8296 struct rack_sendmap *rsm, *nrsm; 8297 int insret __diagused; 8298 8299 register uint32_t snd_max, snd_una; 8300 8301 /* 8302 * Add to the RACK log of packets in flight or retransmitted. If 8303 * there is a TS option we will use the TS echoed, if not we will 8304 * grab a TS. 8305 * 8306 * Retransmissions will increment the count and move the ts to its 8307 * proper place. Note that if options do not include TS's then we 8308 * won't be able to effectively use the ACK for an RTT on a retran. 8309 * 8310 * Notes about r_start and r_end. Lets consider a send starting at 8311 * sequence 1 for 10 bytes. In such an example the r_start would be 8312 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8313 * This means that r_end is actually the first sequence for the next 8314 * slot (11). 8315 * 8316 */ 8317 /* 8318 * If err is set what do we do XXXrrs? should we not add the thing? 8319 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8320 * i.e. proceed with add ** do this for now. 8321 */ 8322 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8323 if (err) 8324 /* 8325 * We don't log errors -- we could but snd_max does not 8326 * advance in this case either. 8327 */ 8328 return; 8329 8330 if (th_flags & TH_RST) { 8331 /* 8332 * We don't log resets and we return immediately from 8333 * sending 8334 */ 8335 return; 8336 } 8337 rack = (struct tcp_rack *)tp->t_fb_ptr; 8338 snd_una = tp->snd_una; 8339 snd_max = tp->snd_max; 8340 if (th_flags & (TH_SYN | TH_FIN)) { 8341 /* 8342 * The call to rack_log_output is made before bumping 8343 * snd_max. This means we can record one extra byte on a SYN 8344 * or FIN if seq_out is adding more on and a FIN is present 8345 * (and we are not resending). 8346 */ 8347 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 8348 len++; 8349 if (th_flags & TH_FIN) 8350 len++; 8351 } 8352 if (SEQ_LEQ((seq_out + len), snd_una)) { 8353 /* Are sending an old segment to induce an ack (keep-alive)? */ 8354 return; 8355 } 8356 if (SEQ_LT(seq_out, snd_una)) { 8357 /* huh? should we panic? */ 8358 uint32_t end; 8359 8360 end = seq_out + len; 8361 seq_out = snd_una; 8362 if (SEQ_GEQ(end, seq_out)) 8363 len = end - seq_out; 8364 else 8365 len = 0; 8366 } 8367 if (len == 0) { 8368 /* We don't log zero window probes */ 8369 return; 8370 } 8371 if (IN_FASTRECOVERY(tp->t_flags)) { 8372 rack->r_ctl.rc_prr_out += len; 8373 } 8374 /* First question is it a retransmission or new? */ 8375 if (seq_out == snd_max) { 8376 /* Its new */ 8377 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 8378 again: 8379 rsm = rack_alloc(rack); 8380 if (rsm == NULL) { 8381 /* 8382 * Hmm out of memory and the tcb got destroyed while 8383 * we tried to wait. 8384 */ 8385 return; 8386 } 8387 if (th_flags & TH_FIN) { 8388 rsm->r_flags = RACK_HAS_FIN|add_flag; 8389 } else { 8390 rsm->r_flags = add_flag; 8391 } 8392 if (hw_tls) 8393 rsm->r_hw_tls = 1; 8394 rsm->r_tim_lastsent[0] = cts; 8395 rsm->r_rtr_cnt = 1; 8396 rsm->r_act_rxt_cnt = 0; 8397 rsm->r_rtr_bytes = 0; 8398 if (th_flags & TH_SYN) { 8399 /* The data space is one beyond snd_una */ 8400 rsm->r_flags |= RACK_HAS_SYN; 8401 } 8402 rsm->r_start = seq_out; 8403 rsm->r_end = rsm->r_start + len; 8404 rack_mark_in_gp_win(tp, rsm); 8405 rsm->r_dupack = 0; 8406 /* 8407 * save off the mbuf location that 8408 * sndmbuf_noadv returned (which is 8409 * where we started copying from).. 8410 */ 8411 rsm->m = s_mb; 8412 rsm->soff = s_moff; 8413 /* 8414 * Here we do add in the len of send, since its not yet 8415 * reflected in in snduna <->snd_max 8416 */ 8417 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 8418 rack->r_ctl.rc_sacked) + 8419 (rsm->r_end - rsm->r_start)); 8420 if ((rack->rc_initial_ss_comp == 0) && 8421 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 8422 rack->r_ctl.ss_hi_fs = rsm->r_fas; 8423 } 8424 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 8425 if (rsm->m) { 8426 if (rsm->m->m_len <= rsm->soff) { 8427 /* 8428 * XXXrrs Question, will this happen? 8429 * 8430 * If sbsndptr is set at the correct place 8431 * then s_moff should always be somewhere 8432 * within rsm->m. But if the sbsndptr was 8433 * off then that won't be true. If it occurs 8434 * we need to walkout to the correct location. 8435 */ 8436 struct mbuf *lm; 8437 8438 lm = rsm->m; 8439 while (lm->m_len <= rsm->soff) { 8440 rsm->soff -= lm->m_len; 8441 lm = lm->m_next; 8442 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 8443 __func__, rack, s_moff, s_mb, rsm->soff)); 8444 } 8445 rsm->m = lm; 8446 } 8447 rsm->orig_m_len = rsm->m->m_len; 8448 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 8449 } else { 8450 rsm->orig_m_len = 0; 8451 rsm->orig_t_space = 0; 8452 } 8453 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 8454 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8455 /* Log a new rsm */ 8456 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 8457 #ifndef INVARIANTS 8458 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 8459 #else 8460 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 8461 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8462 nrsm, insret, rack, rsm); 8463 } 8464 #endif 8465 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8466 rsm->r_in_tmap = 1; 8467 if (rsm->r_flags & RACK_IS_PCM) { 8468 rack->r_ctl.pcm_i.send_time = cts; 8469 rack->r_ctl.pcm_i.eseq = rsm->r_end; 8470 /* First time through we set the start too */ 8471 if (rack->pcm_in_progress == 0) 8472 rack->r_ctl.pcm_i.sseq = rsm->r_start; 8473 } 8474 /* 8475 * Special case detection, is there just a single 8476 * packet outstanding when we are not in recovery? 8477 * 8478 * If this is true mark it so. 8479 */ 8480 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 8481 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 8482 struct rack_sendmap *prsm; 8483 8484 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 8485 if (prsm) 8486 prsm->r_one_out_nr = 1; 8487 } 8488 return; 8489 } 8490 /* 8491 * If we reach here its a retransmission and we need to find it. 8492 */ 8493 more: 8494 if (hintrsm && (hintrsm->r_start == seq_out)) { 8495 rsm = hintrsm; 8496 hintrsm = NULL; 8497 } else { 8498 /* No hints sorry */ 8499 rsm = NULL; 8500 } 8501 if ((rsm) && (rsm->r_start == seq_out)) { 8502 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8503 if (len == 0) { 8504 return; 8505 } else { 8506 goto more; 8507 } 8508 } 8509 /* Ok it was not the last pointer go through it the hard way. */ 8510 refind: 8511 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 8512 if (rsm) { 8513 if (rsm->r_start == seq_out) { 8514 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8515 if (len == 0) { 8516 return; 8517 } else { 8518 goto refind; 8519 } 8520 } 8521 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 8522 /* Transmitted within this piece */ 8523 /* 8524 * Ok we must split off the front and then let the 8525 * update do the rest 8526 */ 8527 nrsm = rack_alloc_full_limit(rack); 8528 if (nrsm == NULL) { 8529 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 8530 return; 8531 } 8532 /* 8533 * copy rsm to nrsm and then trim the front of rsm 8534 * to not include this part. 8535 */ 8536 rack_clone_rsm(rack, nrsm, rsm, seq_out); 8537 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8538 #ifndef INVARIANTS 8539 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8540 #else 8541 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8542 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8543 nrsm, insret, rack, rsm); 8544 } 8545 #endif 8546 if (rsm->r_in_tmap) { 8547 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8548 nrsm->r_in_tmap = 1; 8549 } 8550 rsm->r_flags &= (~RACK_HAS_FIN); 8551 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 8552 if (len == 0) { 8553 return; 8554 } else if (len > 0) 8555 goto refind; 8556 } 8557 } 8558 /* 8559 * Hmm not found in map did they retransmit both old and on into the 8560 * new? 8561 */ 8562 if (seq_out == tp->snd_max) { 8563 goto again; 8564 } else if (SEQ_LT(seq_out, tp->snd_max)) { 8565 #ifdef INVARIANTS 8566 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 8567 seq_out, len, tp->snd_una, tp->snd_max); 8568 printf("Starting Dump of all rack entries\n"); 8569 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8570 printf("rsm:%p start:%u end:%u\n", 8571 rsm, rsm->r_start, rsm->r_end); 8572 } 8573 printf("Dump complete\n"); 8574 panic("seq_out not found rack:%p tp:%p", 8575 rack, tp); 8576 #endif 8577 } else { 8578 #ifdef INVARIANTS 8579 /* 8580 * Hmm beyond sndmax? (only if we are using the new rtt-pack 8581 * flag) 8582 */ 8583 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 8584 seq_out, len, tp->snd_max, tp); 8585 #endif 8586 } 8587 } 8588 8589 /* 8590 * Record one of the RTT updates from an ack into 8591 * our sample structure. 8592 */ 8593 8594 static void 8595 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 8596 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 8597 { 8598 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8599 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 8600 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 8601 } 8602 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8603 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 8604 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 8605 } 8606 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 8607 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 8608 rack->r_ctl.rc_gp_lowrtt = us_rtt; 8609 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 8610 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 8611 } 8612 if ((confidence == 1) && 8613 ((rsm == NULL) || 8614 (rsm->r_just_ret) || 8615 (rsm->r_one_out_nr && 8616 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 8617 /* 8618 * If the rsm had a just return 8619 * hit it then we can't trust the 8620 * rtt measurement for buffer deterimination 8621 * Note that a confidence of 2, indicates 8622 * SACK'd which overrides the r_just_ret or 8623 * the r_one_out_nr. If it was a CUM-ACK and 8624 * we had only two outstanding, but get an 8625 * ack for only 1. Then that also lowers our 8626 * confidence. 8627 */ 8628 confidence = 0; 8629 } 8630 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8631 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 8632 if (rack->r_ctl.rack_rs.confidence == 0) { 8633 /* 8634 * We take anything with no current confidence 8635 * saved. 8636 */ 8637 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8638 rack->r_ctl.rack_rs.confidence = confidence; 8639 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8640 } else if (confidence != 0) { 8641 /* 8642 * Once we have a confident number, 8643 * we can update it with a smaller 8644 * value since this confident number 8645 * may include the DSACK time until 8646 * the next segment (the second one) arrived. 8647 */ 8648 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8649 rack->r_ctl.rack_rs.confidence = confidence; 8650 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8651 } 8652 } 8653 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 8654 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 8655 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 8656 rack->r_ctl.rack_rs.rs_rtt_cnt++; 8657 } 8658 8659 /* 8660 * Collect new round-trip time estimate 8661 * and update averages and current timeout. 8662 */ 8663 static void 8664 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 8665 { 8666 int32_t delta; 8667 int32_t rtt; 8668 8669 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 8670 /* No valid sample */ 8671 return; 8672 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 8673 /* We are to use the lowest RTT seen in a single ack */ 8674 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 8675 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 8676 /* We are to use the highest RTT seen in a single ack */ 8677 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 8678 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 8679 /* We are to use the average RTT seen in a single ack */ 8680 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 8681 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 8682 } else { 8683 #ifdef INVARIANTS 8684 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 8685 #endif 8686 return; 8687 } 8688 if (rtt == 0) 8689 rtt = 1; 8690 if (rack->rc_gp_rtt_set == 0) { 8691 /* 8692 * With no RTT we have to accept 8693 * even one we are not confident of. 8694 */ 8695 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 8696 rack->rc_gp_rtt_set = 1; 8697 } else if (rack->r_ctl.rack_rs.confidence) { 8698 /* update the running gp srtt */ 8699 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 8700 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 8701 } 8702 if (rack->r_ctl.rack_rs.confidence) { 8703 /* 8704 * record the low and high for highly buffered path computation, 8705 * we only do this if we are confident (not a retransmission). 8706 */ 8707 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 8708 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8709 } 8710 if (rack->rc_highly_buffered == 0) { 8711 /* 8712 * Currently once we declare a path has 8713 * highly buffered there is no going 8714 * back, which may be a problem... 8715 */ 8716 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 8717 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 8718 rack->r_ctl.rc_highest_us_rtt, 8719 rack->r_ctl.rc_lowest_us_rtt, 8720 RACK_RTTS_SEEHBP); 8721 rack->rc_highly_buffered = 1; 8722 } 8723 } 8724 } 8725 if ((rack->r_ctl.rack_rs.confidence) || 8726 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 8727 /* 8728 * If we are highly confident of it <or> it was 8729 * never retransmitted we accept it as the last us_rtt. 8730 */ 8731 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8732 /* The lowest rtt can be set if its was not retransmited */ 8733 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 8734 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8735 if (rack->r_ctl.rc_lowest_us_rtt == 0) 8736 rack->r_ctl.rc_lowest_us_rtt = 1; 8737 } 8738 } 8739 rack = (struct tcp_rack *)tp->t_fb_ptr; 8740 if (tp->t_srtt != 0) { 8741 /* 8742 * We keep a simple srtt in microseconds, like our rtt 8743 * measurement. We don't need to do any tricks with shifting 8744 * etc. Instead we just add in 1/8th of the new measurement 8745 * and subtract out 1/8 of the old srtt. We do the same with 8746 * the variance after finding the absolute value of the 8747 * difference between this sample and the current srtt. 8748 */ 8749 delta = tp->t_srtt - rtt; 8750 /* Take off 1/8th of the current sRTT */ 8751 tp->t_srtt -= (tp->t_srtt >> 3); 8752 /* Add in 1/8th of the new RTT just measured */ 8753 tp->t_srtt += (rtt >> 3); 8754 if (tp->t_srtt <= 0) 8755 tp->t_srtt = 1; 8756 /* Now lets make the absolute value of the variance */ 8757 if (delta < 0) 8758 delta = -delta; 8759 /* Subtract out 1/8th */ 8760 tp->t_rttvar -= (tp->t_rttvar >> 3); 8761 /* Add in 1/8th of the new variance we just saw */ 8762 tp->t_rttvar += (delta >> 3); 8763 if (tp->t_rttvar <= 0) 8764 tp->t_rttvar = 1; 8765 } else { 8766 /* 8767 * No rtt measurement yet - use the unsmoothed rtt. Set the 8768 * variance to half the rtt (so our first retransmit happens 8769 * at 3*rtt). 8770 */ 8771 tp->t_srtt = rtt; 8772 tp->t_rttvar = rtt >> 1; 8773 } 8774 rack->rc_srtt_measure_made = 1; 8775 KMOD_TCPSTAT_INC(tcps_rttupdated); 8776 if (tp->t_rttupdated < UCHAR_MAX) 8777 tp->t_rttupdated++; 8778 #ifdef STATS 8779 if (rack_stats_gets_ms_rtt == 0) { 8780 /* Send in the microsecond rtt used for rxt timeout purposes */ 8781 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 8782 } else if (rack_stats_gets_ms_rtt == 1) { 8783 /* Send in the millisecond rtt used for rxt timeout purposes */ 8784 int32_t ms_rtt; 8785 8786 /* Round up */ 8787 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8788 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8789 } else if (rack_stats_gets_ms_rtt == 2) { 8790 /* Send in the millisecond rtt has close to the path RTT as we can get */ 8791 int32_t ms_rtt; 8792 8793 /* Round up */ 8794 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8795 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8796 } else { 8797 /* Send in the microsecond rtt has close to the path RTT as we can get */ 8798 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8799 } 8800 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8801 #endif 8802 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 8803 /* 8804 * the retransmit should happen at rtt + 4 * rttvar. Because of the 8805 * way we do the smoothing, srtt and rttvar will each average +1/2 8806 * tick of bias. When we compute the retransmit timer, we want 1/2 8807 * tick of rounding and 1 extra tick because of +-1/2 tick 8808 * uncertainty in the firing of the timer. The bias will give us 8809 * exactly the 1.5 tick we need. But, because the bias is 8810 * statistical, we have to test that we don't drop below the minimum 8811 * feasible timer (which is 2 ticks). 8812 */ 8813 tp->t_rxtshift = 0; 8814 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8815 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 8816 rack_log_rtt_sample(rack, rtt); 8817 tp->t_softerror = 0; 8818 } 8819 8820 8821 static void 8822 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 8823 { 8824 /* 8825 * Apply to filter the inbound us-rtt at us_cts. 8826 */ 8827 uint32_t old_rtt; 8828 8829 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 8830 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 8831 us_rtt, us_cts); 8832 if (old_rtt > us_rtt) { 8833 /* We just hit a new lower rtt time */ 8834 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 8835 __LINE__, RACK_RTTS_NEWRTT); 8836 /* 8837 * Only count it if its lower than what we saw within our 8838 * calculated range. 8839 */ 8840 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 8841 if (rack_probertt_lower_within && 8842 rack->rc_gp_dyn_mul && 8843 (rack->use_fixed_rate == 0) && 8844 (rack->rc_always_pace)) { 8845 /* 8846 * We are seeing a new lower rtt very close 8847 * to the time that we would have entered probe-rtt. 8848 * This is probably due to the fact that a peer flow 8849 * has entered probe-rtt. Lets go in now too. 8850 */ 8851 uint32_t val; 8852 8853 val = rack_probertt_lower_within * rack_time_between_probertt; 8854 val /= 100; 8855 if ((rack->in_probe_rtt == 0) && 8856 (rack->rc_skip_timely == 0) && 8857 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 8858 rack_enter_probertt(rack, us_cts); 8859 } 8860 } 8861 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 8862 } 8863 } 8864 } 8865 8866 static int 8867 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 8868 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 8869 { 8870 uint32_t us_rtt; 8871 int32_t i, all; 8872 uint32_t t, len_acked; 8873 8874 if ((rsm->r_flags & RACK_ACKED) || 8875 (rsm->r_flags & RACK_WAS_ACKED)) 8876 /* Already done */ 8877 return (0); 8878 if (rsm->r_no_rtt_allowed) { 8879 /* Not allowed */ 8880 return (0); 8881 } 8882 if (ack_type == CUM_ACKED) { 8883 if (SEQ_GT(th_ack, rsm->r_end)) { 8884 len_acked = rsm->r_end - rsm->r_start; 8885 all = 1; 8886 } else { 8887 len_acked = th_ack - rsm->r_start; 8888 all = 0; 8889 } 8890 } else { 8891 len_acked = rsm->r_end - rsm->r_start; 8892 all = 0; 8893 } 8894 if (rsm->r_rtr_cnt == 1) { 8895 8896 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8897 if ((int)t <= 0) 8898 t = 1; 8899 if (!tp->t_rttlow || tp->t_rttlow > t) 8900 tp->t_rttlow = t; 8901 if (!rack->r_ctl.rc_rack_min_rtt || 8902 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8903 rack->r_ctl.rc_rack_min_rtt = t; 8904 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8905 rack->r_ctl.rc_rack_min_rtt = 1; 8906 } 8907 } 8908 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8909 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8910 else 8911 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8912 if (us_rtt == 0) 8913 us_rtt = 1; 8914 if (CC_ALGO(tp)->rttsample != NULL) { 8915 /* Kick the RTT to the CC */ 8916 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8917 } 8918 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8919 if (ack_type == SACKED) { 8920 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8921 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8922 } else { 8923 /* 8924 * We need to setup what our confidence 8925 * is in this ack. 8926 * 8927 * If the rsm was app limited and it is 8928 * less than a mss in length (the end 8929 * of the send) then we have a gap. If we 8930 * were app limited but say we were sending 8931 * multiple MSS's then we are more confident 8932 * int it. 8933 * 8934 * When we are not app-limited then we see if 8935 * the rsm is being included in the current 8936 * measurement, we tell this by the app_limited_needs_set 8937 * flag. 8938 * 8939 * Note that being cwnd blocked is not applimited 8940 * as well as the pacing delay between packets which 8941 * are sending only 1 or 2 MSS's also will show up 8942 * in the RTT. We probably need to examine this algorithm 8943 * a bit more and enhance it to account for the delay 8944 * between rsm's. We could do that by saving off the 8945 * pacing delay of each rsm (in an rsm) and then 8946 * factoring that in somehow though for now I am 8947 * not sure how :) 8948 */ 8949 int calc_conf = 0; 8950 8951 if (rsm->r_flags & RACK_APP_LIMITED) { 8952 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8953 calc_conf = 0; 8954 else 8955 calc_conf = 1; 8956 } else if (rack->app_limited_needs_set == 0) { 8957 calc_conf = 1; 8958 } else { 8959 calc_conf = 0; 8960 } 8961 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8962 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8963 calc_conf, rsm, rsm->r_rtr_cnt); 8964 } 8965 if ((rsm->r_flags & RACK_TLP) && 8966 (!IN_FASTRECOVERY(tp->t_flags))) { 8967 /* Segment was a TLP and our retrans matched */ 8968 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8969 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 8970 } 8971 } 8972 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 8973 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8974 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 8975 /* New more recent rack_tmit_time */ 8976 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8977 if (rack->r_ctl.rc_rack_tmit_time == 0) 8978 rack->r_ctl.rc_rack_tmit_time = 1; 8979 rack->rc_rack_rtt = t; 8980 } 8981 return (1); 8982 } 8983 /* 8984 * We clear the soft/rxtshift since we got an ack. 8985 * There is no assurance we will call the commit() function 8986 * so we need to clear these to avoid incorrect handling. 8987 */ 8988 tp->t_rxtshift = 0; 8989 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8990 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8991 tp->t_softerror = 0; 8992 if (to && (to->to_flags & TOF_TS) && 8993 (ack_type == CUM_ACKED) && 8994 (to->to_tsecr) && 8995 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8996 /* 8997 * Now which timestamp does it match? In this block the ACK 8998 * must be coming from a previous transmission. 8999 */ 9000 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9001 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 9002 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9003 if ((int)t <= 0) 9004 t = 1; 9005 if (CC_ALGO(tp)->rttsample != NULL) { 9006 /* 9007 * Kick the RTT to the CC, here 9008 * we lie a bit in that we know the 9009 * retransmission is correct even though 9010 * we retransmitted. This is because 9011 * we match the timestamps. 9012 */ 9013 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 9014 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 9015 else 9016 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 9017 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9018 } 9019 if ((i + 1) < rsm->r_rtr_cnt) { 9020 /* 9021 * The peer ack'd from our previous 9022 * transmission. We have a spurious 9023 * retransmission and thus we dont 9024 * want to update our rack_rtt. 9025 * 9026 * Hmm should there be a CC revert here? 9027 * 9028 */ 9029 return (0); 9030 } 9031 if (!tp->t_rttlow || tp->t_rttlow > t) 9032 tp->t_rttlow = t; 9033 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9034 rack->r_ctl.rc_rack_min_rtt = t; 9035 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9036 rack->r_ctl.rc_rack_min_rtt = 1; 9037 } 9038 } 9039 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9040 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9041 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9042 /* New more recent rack_tmit_time */ 9043 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9044 if (rack->r_ctl.rc_rack_tmit_time == 0) 9045 rack->r_ctl.rc_rack_tmit_time = 1; 9046 rack->rc_rack_rtt = t; 9047 } 9048 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9049 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9050 rsm->r_rtr_cnt); 9051 return (1); 9052 } 9053 } 9054 /* If we are logging log out the sendmap */ 9055 if (tcp_bblogging_on(rack->rc_tp)) { 9056 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9057 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9058 } 9059 } 9060 goto ts_not_found; 9061 } else { 9062 /* 9063 * Ok its a SACK block that we retransmitted. or a windows 9064 * machine without timestamps. We can tell nothing from the 9065 * time-stamp since its not there or the time the peer last 9066 * received a segment that moved forward its cum-ack point. 9067 */ 9068 ts_not_found: 9069 i = rsm->r_rtr_cnt - 1; 9070 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9071 if ((int)t <= 0) 9072 t = 1; 9073 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9074 /* 9075 * We retransmitted and the ack came back in less 9076 * than the smallest rtt we have observed. We most 9077 * likely did an improper retransmit as outlined in 9078 * 6.2 Step 2 point 2 in the rack-draft so we 9079 * don't want to update our rack_rtt. We in 9080 * theory (in future) might want to think about reverting our 9081 * cwnd state but we won't for now. 9082 */ 9083 return (0); 9084 } else if (rack->r_ctl.rc_rack_min_rtt) { 9085 /* 9086 * We retransmitted it and the retransmit did the 9087 * job. 9088 */ 9089 if (!rack->r_ctl.rc_rack_min_rtt || 9090 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9091 rack->r_ctl.rc_rack_min_rtt = t; 9092 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9093 rack->r_ctl.rc_rack_min_rtt = 1; 9094 } 9095 } 9096 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9097 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9098 (uint32_t)rsm->r_tim_lastsent[i]))) { 9099 /* New more recent rack_tmit_time */ 9100 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9101 if (rack->r_ctl.rc_rack_tmit_time == 0) 9102 rack->r_ctl.rc_rack_tmit_time = 1; 9103 rack->rc_rack_rtt = t; 9104 } 9105 return (1); 9106 } 9107 } 9108 return (0); 9109 } 9110 9111 /* 9112 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9113 */ 9114 static void 9115 rack_log_sack_passed(struct tcpcb *tp, 9116 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9117 { 9118 struct rack_sendmap *nrsm; 9119 uint32_t thresh; 9120 9121 /* Get our rxt threshold for lost consideration */ 9122 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9123 /* Now start looking at rsm's */ 9124 nrsm = rsm; 9125 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9126 rack_head, r_tnext) { 9127 if (nrsm == rsm) { 9128 /* Skip original segment he is acked */ 9129 continue; 9130 } 9131 if (nrsm->r_flags & RACK_ACKED) { 9132 /* 9133 * Skip ack'd segments, though we 9134 * should not see these, since tmap 9135 * should not have ack'd segments. 9136 */ 9137 continue; 9138 } 9139 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9140 /* 9141 * If the peer dropped the rwnd on 9142 * these then we don't worry about them. 9143 */ 9144 continue; 9145 } 9146 /* Check lost state */ 9147 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9148 uint32_t exp; 9149 9150 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9151 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9152 /* We consider it lost */ 9153 nrsm->r_flags |= RACK_WAS_LOST; 9154 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9155 } 9156 } 9157 if (nrsm->r_flags & RACK_SACK_PASSED) { 9158 /* 9159 * We found one that is already marked 9160 * passed, we have been here before and 9161 * so all others below this are marked. 9162 */ 9163 break; 9164 } 9165 nrsm->r_flags |= RACK_SACK_PASSED; 9166 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9167 } 9168 } 9169 9170 static void 9171 rack_need_set_test(struct tcpcb *tp, 9172 struct tcp_rack *rack, 9173 struct rack_sendmap *rsm, 9174 tcp_seq th_ack, 9175 int line, 9176 int use_which) 9177 { 9178 struct rack_sendmap *s_rsm; 9179 9180 if ((tp->t_flags & TF_GPUTINPROG) && 9181 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9182 /* 9183 * We were app limited, and this ack 9184 * butts up or goes beyond the point where we want 9185 * to start our next measurement. We need 9186 * to record the new gput_ts as here and 9187 * possibly update the start sequence. 9188 */ 9189 uint32_t seq, ts; 9190 9191 if (rsm->r_rtr_cnt > 1) { 9192 /* 9193 * This is a retransmit, can we 9194 * really make any assessment at this 9195 * point? We are not really sure of 9196 * the timestamp, is it this or the 9197 * previous transmission? 9198 * 9199 * Lets wait for something better that 9200 * is not retransmitted. 9201 */ 9202 return; 9203 } 9204 seq = tp->gput_seq; 9205 ts = tp->gput_ts; 9206 rack->app_limited_needs_set = 0; 9207 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9208 /* Do we start at a new end? */ 9209 if ((use_which == RACK_USE_BEG) && 9210 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9211 /* 9212 * When we get an ACK that just eats 9213 * up some of the rsm, we set RACK_USE_BEG 9214 * since whats at r_start (i.e. th_ack) 9215 * is left unacked and thats where the 9216 * measurement now starts. 9217 */ 9218 tp->gput_seq = rsm->r_start; 9219 } 9220 if ((use_which == RACK_USE_END) && 9221 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9222 /* 9223 * We use the end when the cumack 9224 * is moving forward and completely 9225 * deleting the rsm passed so basically 9226 * r_end holds th_ack. 9227 * 9228 * For SACK's we also want to use the end 9229 * since this piece just got sacked and 9230 * we want to target anything after that 9231 * in our measurement. 9232 */ 9233 tp->gput_seq = rsm->r_end; 9234 } 9235 if (use_which == RACK_USE_END_OR_THACK) { 9236 /* 9237 * special case for ack moving forward, 9238 * not a sack, we need to move all the 9239 * way up to where this ack cum-ack moves 9240 * to. 9241 */ 9242 if (SEQ_GT(th_ack, rsm->r_end)) 9243 tp->gput_seq = th_ack; 9244 else 9245 tp->gput_seq = rsm->r_end; 9246 } 9247 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9248 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9249 else 9250 s_rsm = NULL; 9251 /* 9252 * Pick up the correct send time if we can the rsm passed in 9253 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9254 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9255 * find a different seq i.e. the next send up. 9256 * 9257 * If that has not been sent, s_rsm will be NULL and we must 9258 * arrange it so this function will get called again by setting 9259 * app_limited_needs_set. 9260 */ 9261 if (s_rsm) 9262 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9263 else { 9264 /* If we hit here we have to have *not* sent tp->gput_seq */ 9265 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9266 /* Set it up so we will go through here again */ 9267 rack->app_limited_needs_set = 1; 9268 } 9269 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9270 /* 9271 * We moved beyond this guy's range, re-calculate 9272 * the new end point. 9273 */ 9274 if (rack->rc_gp_filled == 0) { 9275 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9276 } else { 9277 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9278 } 9279 } 9280 /* 9281 * We are moving the goal post, we may be able to clear the 9282 * measure_saw_probe_rtt flag. 9283 */ 9284 if ((rack->in_probe_rtt == 0) && 9285 (rack->measure_saw_probe_rtt) && 9286 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9287 rack->measure_saw_probe_rtt = 0; 9288 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9289 seq, tp->gput_seq, 9290 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9291 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9292 5, line, NULL, 0); 9293 if (rack->rc_gp_filled && 9294 ((tp->gput_ack - tp->gput_seq) < 9295 max(rc_init_window(rack), (MIN_GP_WIN * 9296 ctf_fixed_maxseg(tp))))) { 9297 uint32_t ideal_amount; 9298 9299 ideal_amount = rack_get_measure_window(tp, rack); 9300 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9301 /* 9302 * There is no sense of continuing this measurement 9303 * because its too small to gain us anything we 9304 * trust. Skip it and that way we can start a new 9305 * measurement quicker. 9306 */ 9307 tp->t_flags &= ~TF_GPUTINPROG; 9308 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9309 0, 0, 9310 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9311 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9312 6, __LINE__, NULL, 0); 9313 } else { 9314 /* 9315 * Reset the window further out. 9316 */ 9317 tp->gput_ack = tp->gput_seq + ideal_amount; 9318 } 9319 } 9320 rack_tend_gp_marks(tp, rack); 9321 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9322 } 9323 } 9324 9325 static inline int 9326 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9327 { 9328 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 9329 /* Behind our TLP definition or right at */ 9330 return (0); 9331 } 9332 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 9333 /* The start is beyond or right at our end of TLP definition */ 9334 return (0); 9335 } 9336 /* It has to be a sub-part of the original TLP recorded */ 9337 return (1); 9338 } 9339 9340 static uint32_t 9341 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 9342 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 9343 uint32_t segsiz) 9344 { 9345 uint32_t start, end, changed = 0; 9346 struct rack_sendmap stack_map; 9347 struct rack_sendmap *rsm, *nrsm, *prev, *next; 9348 int insret __diagused; 9349 int32_t used_ref = 1; 9350 int can_use_hookery = 0; 9351 9352 start = sack->start; 9353 end = sack->end; 9354 rsm = *prsm; 9355 9356 do_rest_ofb: 9357 if ((rsm == NULL) || 9358 (SEQ_LT(end, rsm->r_start)) || 9359 (SEQ_GEQ(start, rsm->r_end)) || 9360 (SEQ_LT(start, rsm->r_start))) { 9361 /* 9362 * We are not in the right spot, 9363 * find the correct spot in the tree. 9364 */ 9365 used_ref = 0; 9366 rsm = tqhash_find(rack->r_ctl.tqh, start); 9367 } 9368 if (rsm == NULL) { 9369 /* TSNH */ 9370 goto out; 9371 } 9372 /* Ok we have an ACK for some piece of this rsm */ 9373 if (rsm->r_start != start) { 9374 if ((rsm->r_flags & RACK_ACKED) == 0) { 9375 /* 9376 * Before any splitting or hookery is 9377 * done is it a TLP of interest i.e. rxt? 9378 */ 9379 if ((rsm->r_flags & RACK_TLP) && 9380 (rsm->r_rtr_cnt > 1)) { 9381 /* 9382 * We are splitting a rxt TLP, check 9383 * if we need to save off the start/end 9384 */ 9385 if (rack->rc_last_tlp_acked_set && 9386 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9387 /* 9388 * We already turned this on since we are inside 9389 * the previous one was a partially sack now we 9390 * are getting another one (maybe all of it). 9391 * 9392 */ 9393 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9394 /* 9395 * Lets make sure we have all of it though. 9396 */ 9397 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9398 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9399 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9400 rack->r_ctl.last_tlp_acked_end); 9401 } 9402 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9403 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9404 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9405 rack->r_ctl.last_tlp_acked_end); 9406 } 9407 } else { 9408 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9409 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9410 rack->rc_last_tlp_past_cumack = 0; 9411 rack->rc_last_tlp_acked_set = 1; 9412 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9413 } 9414 } 9415 /** 9416 * Need to split this in two pieces the before and after, 9417 * the before remains in the map, the after must be 9418 * added. In other words we have: 9419 * rsm |--------------| 9420 * sackblk |-------> 9421 * rsm will become 9422 * rsm |---| 9423 * and nrsm will be the sacked piece 9424 * nrsm |----------| 9425 * 9426 * But before we start down that path lets 9427 * see if the sack spans over on top of 9428 * the next guy and it is already sacked. 9429 * 9430 */ 9431 /* 9432 * Hookery can only be used if the two entries 9433 * are in the same bucket and neither one of 9434 * them staddle the bucket line. 9435 */ 9436 next = tqhash_next(rack->r_ctl.tqh, rsm); 9437 if (next && 9438 (rsm->bindex == next->bindex) && 9439 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9440 ((next->r_flags & RACK_STRADDLE) == 0) && 9441 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9442 ((next->r_flags & RACK_IS_PCM) == 0) && 9443 (rsm->r_flags & RACK_IN_GP_WIN) && 9444 (next->r_flags & RACK_IN_GP_WIN)) 9445 can_use_hookery = 1; 9446 else 9447 can_use_hookery = 0; 9448 if (next && can_use_hookery && 9449 (next->r_flags & RACK_ACKED) && 9450 SEQ_GEQ(end, next->r_start)) { 9451 /** 9452 * So the next one is already acked, and 9453 * we can thus by hookery use our stack_map 9454 * to reflect the piece being sacked and 9455 * then adjust the two tree entries moving 9456 * the start and ends around. So we start like: 9457 * rsm |------------| (not-acked) 9458 * next |-----------| (acked) 9459 * sackblk |--------> 9460 * We want to end like so: 9461 * rsm |------| (not-acked) 9462 * next |-----------------| (acked) 9463 * nrsm |-----| 9464 * Where nrsm is a temporary stack piece we 9465 * use to update all the gizmos. 9466 */ 9467 /* Copy up our fudge block */ 9468 nrsm = &stack_map; 9469 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9470 /* Now adjust our tree blocks */ 9471 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 9472 next->r_start = start; 9473 rsm->r_flags |= RACK_SHUFFLED; 9474 next->r_flags |= RACK_SHUFFLED; 9475 /* Now we must adjust back where next->m is */ 9476 rack_setup_offset_for_rsm(rack, rsm, next); 9477 /* 9478 * Which timestamp do we keep? It is rather 9479 * important in GP measurements to have the 9480 * accurate end of the send window. 9481 * 9482 * We keep the largest value, which is the newest 9483 * send. We do this in case a segment that is 9484 * joined together and not part of a GP estimate 9485 * later gets expanded into the GP estimate. 9486 * 9487 * We prohibit the merging of unlike kinds i.e. 9488 * all pieces that are in the GP estimate can be 9489 * merged and all pieces that are not in a GP estimate 9490 * can be merged, but not disimilar pieces. Combine 9491 * this with taking the highest here and we should 9492 * be ok unless of course the client reneges. Then 9493 * all bets are off. 9494 */ 9495 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 9496 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 9497 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 9498 /* 9499 * And we must keep the newest ack arrival time. 9500 */ 9501 if (next->r_ack_arrival < 9502 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9503 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9504 9505 9506 /* We don't need to adjust rsm, it did not change */ 9507 /* Clear out the dup ack count of the remainder */ 9508 rsm->r_dupack = 0; 9509 rsm->r_just_ret = 0; 9510 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9511 /* Now lets make sure our fudge block is right */ 9512 nrsm->r_start = start; 9513 /* Now lets update all the stats and such */ 9514 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9515 if (rack->app_limited_needs_set) 9516 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9517 changed += (nrsm->r_end - nrsm->r_start); 9518 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9519 if (rsm->r_flags & RACK_WAS_LOST) { 9520 int my_chg; 9521 9522 my_chg = (nrsm->r_end - nrsm->r_start); 9523 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9524 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9525 if (my_chg <= rack->r_ctl.rc_considered_lost) 9526 rack->r_ctl.rc_considered_lost -= my_chg; 9527 else 9528 rack->r_ctl.rc_considered_lost = 0; 9529 } 9530 if (nrsm->r_flags & RACK_SACK_PASSED) { 9531 rack->r_ctl.rc_reorder_ts = cts; 9532 if (rack->r_ctl.rc_reorder_ts == 0) 9533 rack->r_ctl.rc_reorder_ts = 1; 9534 } 9535 /* 9536 * Now we want to go up from rsm (the 9537 * one left un-acked) to the next one 9538 * in the tmap. We do this so when 9539 * we walk backwards we include marking 9540 * sack-passed on rsm (The one passed in 9541 * is skipped since it is generally called 9542 * on something sacked before removing it 9543 * from the tmap). 9544 */ 9545 if (rsm->r_in_tmap) { 9546 nrsm = TAILQ_NEXT(rsm, r_tnext); 9547 /* 9548 * Now that we have the next 9549 * one walk backwards from there. 9550 */ 9551 if (nrsm && nrsm->r_in_tmap) 9552 rack_log_sack_passed(tp, rack, nrsm, cts); 9553 } 9554 /* Now are we done? */ 9555 if (SEQ_LT(end, next->r_end) || 9556 (end == next->r_end)) { 9557 /* Done with block */ 9558 goto out; 9559 } 9560 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 9561 counter_u64_add(rack_sack_used_next_merge, 1); 9562 /* Postion for the next block */ 9563 start = next->r_end; 9564 rsm = tqhash_next(rack->r_ctl.tqh, next); 9565 if (rsm == NULL) 9566 goto out; 9567 } else { 9568 /** 9569 * We can't use any hookery here, so we 9570 * need to split the map. We enter like 9571 * so: 9572 * rsm |--------| 9573 * sackblk |-----> 9574 * We will add the new block nrsm and 9575 * that will be the new portion, and then 9576 * fall through after reseting rsm. So we 9577 * split and look like this: 9578 * rsm |----| 9579 * sackblk |-----> 9580 * nrsm |---| 9581 * We then fall through reseting 9582 * rsm to nrsm, so the next block 9583 * picks it up. 9584 */ 9585 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9586 if (nrsm == NULL) { 9587 /* 9588 * failed XXXrrs what can we do but loose the sack 9589 * info? 9590 */ 9591 goto out; 9592 } 9593 counter_u64_add(rack_sack_splits, 1); 9594 rack_clone_rsm(rack, nrsm, rsm, start); 9595 rsm->r_just_ret = 0; 9596 #ifndef INVARIANTS 9597 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9598 #else 9599 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9600 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9601 nrsm, insret, rack, rsm); 9602 } 9603 #endif 9604 if (rsm->r_in_tmap) { 9605 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9606 nrsm->r_in_tmap = 1; 9607 } 9608 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 9609 rsm->r_flags &= (~RACK_HAS_FIN); 9610 /* Position us to point to the new nrsm that starts the sack blk */ 9611 rsm = nrsm; 9612 } 9613 } else { 9614 /* Already sacked this piece */ 9615 counter_u64_add(rack_sack_skipped_acked, 1); 9616 if (end == rsm->r_end) { 9617 /* Done with block */ 9618 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9619 goto out; 9620 } else if (SEQ_LT(end, rsm->r_end)) { 9621 /* A partial sack to a already sacked block */ 9622 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9623 goto out; 9624 } else { 9625 /* 9626 * The end goes beyond this guy 9627 * reposition the start to the 9628 * next block. 9629 */ 9630 start = rsm->r_end; 9631 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9632 if (rsm == NULL) 9633 goto out; 9634 } 9635 } 9636 } 9637 if (SEQ_GEQ(end, rsm->r_end)) { 9638 /** 9639 * The end of this block is either beyond this guy or right 9640 * at this guy. I.e.: 9641 * rsm --- |-----| 9642 * end |-----| 9643 * <or> 9644 * end |---------| 9645 */ 9646 if ((rsm->r_flags & RACK_ACKED) == 0) { 9647 /* 9648 * Is it a TLP of interest? 9649 */ 9650 if ((rsm->r_flags & RACK_TLP) && 9651 (rsm->r_rtr_cnt > 1)) { 9652 /* 9653 * We are splitting a rxt TLP, check 9654 * if we need to save off the start/end 9655 */ 9656 if (rack->rc_last_tlp_acked_set && 9657 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9658 /* 9659 * We already turned this on since we are inside 9660 * the previous one was a partially sack now we 9661 * are getting another one (maybe all of it). 9662 */ 9663 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9664 /* 9665 * Lets make sure we have all of it though. 9666 */ 9667 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9668 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9669 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9670 rack->r_ctl.last_tlp_acked_end); 9671 } 9672 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9673 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9674 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9675 rack->r_ctl.last_tlp_acked_end); 9676 } 9677 } else { 9678 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9679 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9680 rack->rc_last_tlp_past_cumack = 0; 9681 rack->rc_last_tlp_acked_set = 1; 9682 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9683 } 9684 } 9685 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9686 changed += (rsm->r_end - rsm->r_start); 9687 /* You get a count for acking a whole segment or more */ 9688 if (rsm->r_flags & RACK_WAS_LOST) { 9689 int my_chg; 9690 9691 my_chg = (rsm->r_end - rsm->r_start); 9692 rsm->r_flags &= ~RACK_WAS_LOST; 9693 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9694 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9695 if (my_chg <= rack->r_ctl.rc_considered_lost) 9696 rack->r_ctl.rc_considered_lost -= my_chg; 9697 else 9698 rack->r_ctl.rc_considered_lost = 0; 9699 } 9700 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9701 if (rsm->r_in_tmap) /* should be true */ 9702 rack_log_sack_passed(tp, rack, rsm, cts); 9703 /* Is Reordering occuring? */ 9704 if (rsm->r_flags & RACK_SACK_PASSED) { 9705 rsm->r_flags &= ~RACK_SACK_PASSED; 9706 rack->r_ctl.rc_reorder_ts = cts; 9707 if (rack->r_ctl.rc_reorder_ts == 0) 9708 rack->r_ctl.rc_reorder_ts = 1; 9709 } 9710 if (rack->app_limited_needs_set) 9711 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9712 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9713 rsm->r_flags |= RACK_ACKED; 9714 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9715 if (rsm->r_in_tmap) { 9716 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9717 rsm->r_in_tmap = 0; 9718 } 9719 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 9720 } else { 9721 counter_u64_add(rack_sack_skipped_acked, 1); 9722 } 9723 if (end == rsm->r_end) { 9724 /* This block only - done, setup for next */ 9725 goto out; 9726 } 9727 /* 9728 * There is more not coverend by this rsm move on 9729 * to the next block in the tail queue hash table. 9730 */ 9731 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 9732 start = rsm->r_end; 9733 rsm = nrsm; 9734 if (rsm == NULL) 9735 goto out; 9736 goto do_rest_ofb; 9737 } 9738 /** 9739 * The end of this sack block is smaller than 9740 * our rsm i.e.: 9741 * rsm --- |-----| 9742 * end |--| 9743 */ 9744 if ((rsm->r_flags & RACK_ACKED) == 0) { 9745 /* 9746 * Is it a TLP of interest? 9747 */ 9748 if ((rsm->r_flags & RACK_TLP) && 9749 (rsm->r_rtr_cnt > 1)) { 9750 /* 9751 * We are splitting a rxt TLP, check 9752 * if we need to save off the start/end 9753 */ 9754 if (rack->rc_last_tlp_acked_set && 9755 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9756 /* 9757 * We already turned this on since we are inside 9758 * the previous one was a partially sack now we 9759 * are getting another one (maybe all of it). 9760 */ 9761 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9762 /* 9763 * Lets make sure we have all of it though. 9764 */ 9765 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9766 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9767 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9768 rack->r_ctl.last_tlp_acked_end); 9769 } 9770 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9771 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9772 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9773 rack->r_ctl.last_tlp_acked_end); 9774 } 9775 } else { 9776 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9777 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9778 rack->rc_last_tlp_past_cumack = 0; 9779 rack->rc_last_tlp_acked_set = 1; 9780 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9781 } 9782 } 9783 /* 9784 * Hookery can only be used if the two entries 9785 * are in the same bucket and neither one of 9786 * them staddle the bucket line. 9787 */ 9788 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 9789 if (prev && 9790 (rsm->bindex == prev->bindex) && 9791 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9792 ((prev->r_flags & RACK_STRADDLE) == 0) && 9793 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9794 ((prev->r_flags & RACK_IS_PCM) == 0) && 9795 (rsm->r_flags & RACK_IN_GP_WIN) && 9796 (prev->r_flags & RACK_IN_GP_WIN)) 9797 can_use_hookery = 1; 9798 else 9799 can_use_hookery = 0; 9800 if (prev && can_use_hookery && 9801 (prev->r_flags & RACK_ACKED)) { 9802 /** 9803 * Goal, we want the right remainder of rsm to shrink 9804 * in place and span from (rsm->r_start = end) to rsm->r_end. 9805 * We want to expand prev to go all the way 9806 * to prev->r_end <- end. 9807 * so in the tree we have before: 9808 * prev |--------| (acked) 9809 * rsm |-------| (non-acked) 9810 * sackblk |-| 9811 * We churn it so we end up with 9812 * prev |----------| (acked) 9813 * rsm |-----| (non-acked) 9814 * nrsm |-| (temporary) 9815 * 9816 * Note if either prev/rsm is a TLP we don't 9817 * do this. 9818 */ 9819 nrsm = &stack_map; 9820 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9821 tqhash_update_end(rack->r_ctl.tqh, prev, end); 9822 rsm->r_start = end; 9823 rsm->r_flags |= RACK_SHUFFLED; 9824 prev->r_flags |= RACK_SHUFFLED; 9825 /* Now adjust nrsm (stack copy) to be 9826 * the one that is the small 9827 * piece that was "sacked". 9828 */ 9829 nrsm->r_end = end; 9830 rsm->r_dupack = 0; 9831 /* 9832 * Which timestamp do we keep? It is rather 9833 * important in GP measurements to have the 9834 * accurate end of the send window. 9835 * 9836 * We keep the largest value, which is the newest 9837 * send. We do this in case a segment that is 9838 * joined together and not part of a GP estimate 9839 * later gets expanded into the GP estimate. 9840 * 9841 * We prohibit the merging of unlike kinds i.e. 9842 * all pieces that are in the GP estimate can be 9843 * merged and all pieces that are not in a GP estimate 9844 * can be merged, but not disimilar pieces. Combine 9845 * this with taking the highest here and we should 9846 * be ok unless of course the client reneges. Then 9847 * all bets are off. 9848 */ 9849 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 9850 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 9851 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9852 } 9853 /* 9854 * And we must keep the newest ack arrival time. 9855 */ 9856 9857 if(prev->r_ack_arrival < 9858 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9859 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9860 9861 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9862 /* 9863 * Now that the rsm has had its start moved forward 9864 * lets go ahead and get its new place in the world. 9865 */ 9866 rack_setup_offset_for_rsm(rack, prev, rsm); 9867 /* 9868 * Now nrsm is our new little piece 9869 * that is acked (which was merged 9870 * to prev). Update the rtt and changed 9871 * based on that. Also check for reordering. 9872 */ 9873 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9874 if (rack->app_limited_needs_set) 9875 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9876 changed += (nrsm->r_end - nrsm->r_start); 9877 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9878 if (rsm->r_flags & RACK_WAS_LOST) { 9879 int my_chg; 9880 9881 my_chg = (nrsm->r_end - nrsm->r_start); 9882 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9883 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9884 if (my_chg <= rack->r_ctl.rc_considered_lost) 9885 rack->r_ctl.rc_considered_lost -= my_chg; 9886 else 9887 rack->r_ctl.rc_considered_lost = 0; 9888 } 9889 if (nrsm->r_flags & RACK_SACK_PASSED) { 9890 rack->r_ctl.rc_reorder_ts = cts; 9891 if (rack->r_ctl.rc_reorder_ts == 0) 9892 rack->r_ctl.rc_reorder_ts = 1; 9893 } 9894 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 9895 rsm = prev; 9896 counter_u64_add(rack_sack_used_prev_merge, 1); 9897 } else { 9898 /** 9899 * This is the case where our previous 9900 * block is not acked either, so we must 9901 * split the block in two. 9902 */ 9903 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9904 if (nrsm == NULL) { 9905 /* failed rrs what can we do but loose the sack info? */ 9906 goto out; 9907 } 9908 if ((rsm->r_flags & RACK_TLP) && 9909 (rsm->r_rtr_cnt > 1)) { 9910 /* 9911 * We are splitting a rxt TLP, check 9912 * if we need to save off the start/end 9913 */ 9914 if (rack->rc_last_tlp_acked_set && 9915 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9916 /* 9917 * We already turned this on since this block is inside 9918 * the previous one was a partially sack now we 9919 * are getting another one (maybe all of it). 9920 */ 9921 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9922 /* 9923 * Lets make sure we have all of it though. 9924 */ 9925 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9926 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9927 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9928 rack->r_ctl.last_tlp_acked_end); 9929 } 9930 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9931 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9932 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9933 rack->r_ctl.last_tlp_acked_end); 9934 } 9935 } else { 9936 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9937 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9938 rack->rc_last_tlp_acked_set = 1; 9939 rack->rc_last_tlp_past_cumack = 0; 9940 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9941 } 9942 } 9943 /** 9944 * In this case nrsm becomes 9945 * nrsm->r_start = end; 9946 * nrsm->r_end = rsm->r_end; 9947 * which is un-acked. 9948 * <and> 9949 * rsm->r_end = nrsm->r_start; 9950 * i.e. the remaining un-acked 9951 * piece is left on the left 9952 * hand side. 9953 * 9954 * So we start like this 9955 * rsm |----------| (not acked) 9956 * sackblk |---| 9957 * build it so we have 9958 * rsm |---| (acked) 9959 * nrsm |------| (not acked) 9960 */ 9961 counter_u64_add(rack_sack_splits, 1); 9962 rack_clone_rsm(rack, nrsm, rsm, end); 9963 rsm->r_flags &= (~RACK_HAS_FIN); 9964 rsm->r_just_ret = 0; 9965 #ifndef INVARIANTS 9966 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9967 #else 9968 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9969 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 9970 nrsm, insret, rack, rsm); 9971 } 9972 #endif 9973 if (rsm->r_in_tmap) { 9974 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9975 nrsm->r_in_tmap = 1; 9976 } 9977 nrsm->r_dupack = 0; 9978 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 9979 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9980 changed += (rsm->r_end - rsm->r_start); 9981 if (rsm->r_flags & RACK_WAS_LOST) { 9982 int my_chg; 9983 9984 my_chg = (rsm->r_end - rsm->r_start); 9985 rsm->r_flags &= ~RACK_WAS_LOST; 9986 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9987 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9988 if (my_chg <= rack->r_ctl.rc_considered_lost) 9989 rack->r_ctl.rc_considered_lost -= my_chg; 9990 else 9991 rack->r_ctl.rc_considered_lost = 0; 9992 } 9993 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9994 9995 if (rsm->r_in_tmap) /* should be true */ 9996 rack_log_sack_passed(tp, rack, rsm, cts); 9997 /* Is Reordering occuring? */ 9998 if (rsm->r_flags & RACK_SACK_PASSED) { 9999 rsm->r_flags &= ~RACK_SACK_PASSED; 10000 rack->r_ctl.rc_reorder_ts = cts; 10001 if (rack->r_ctl.rc_reorder_ts == 0) 10002 rack->r_ctl.rc_reorder_ts = 1; 10003 } 10004 if (rack->app_limited_needs_set) 10005 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10006 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10007 rsm->r_flags |= RACK_ACKED; 10008 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10009 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 10010 if (rsm->r_in_tmap) { 10011 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10012 rsm->r_in_tmap = 0; 10013 } 10014 } 10015 } else if (start != end){ 10016 /* 10017 * The block was already acked. 10018 */ 10019 counter_u64_add(rack_sack_skipped_acked, 1); 10020 } 10021 out: 10022 if (rsm && 10023 ((rsm->r_flags & RACK_TLP) == 0) && 10024 (rsm->r_flags & RACK_ACKED)) { 10025 /* 10026 * Now can we merge where we worked 10027 * with either the previous or 10028 * next block? 10029 */ 10030 next = tqhash_next(rack->r_ctl.tqh, rsm); 10031 while (next) { 10032 if (next->r_flags & RACK_TLP) 10033 break; 10034 /* Only allow merges between ones in or out of GP window */ 10035 if ((next->r_flags & RACK_IN_GP_WIN) && 10036 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10037 break; 10038 } 10039 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10040 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10041 break; 10042 } 10043 if (rsm->bindex != next->bindex) 10044 break; 10045 if (rsm->r_flags & RACK_STRADDLE) 10046 break; 10047 if (rsm->r_flags & RACK_IS_PCM) 10048 break; 10049 if (next->r_flags & RACK_STRADDLE) 10050 break; 10051 if (next->r_flags & RACK_IS_PCM) 10052 break; 10053 if (next->r_flags & RACK_ACKED) { 10054 /* yep this and next can be merged */ 10055 rsm = rack_merge_rsm(rack, rsm, next); 10056 next = tqhash_next(rack->r_ctl.tqh, rsm); 10057 } else 10058 break; 10059 } 10060 /* Now what about the previous? */ 10061 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10062 while (prev) { 10063 if (prev->r_flags & RACK_TLP) 10064 break; 10065 /* Only allow merges between ones in or out of GP window */ 10066 if ((prev->r_flags & RACK_IN_GP_WIN) && 10067 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10068 break; 10069 } 10070 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10071 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10072 break; 10073 } 10074 if (rsm->bindex != prev->bindex) 10075 break; 10076 if (rsm->r_flags & RACK_STRADDLE) 10077 break; 10078 if (rsm->r_flags & RACK_IS_PCM) 10079 break; 10080 if (prev->r_flags & RACK_STRADDLE) 10081 break; 10082 if (prev->r_flags & RACK_IS_PCM) 10083 break; 10084 if (prev->r_flags & RACK_ACKED) { 10085 /* yep the previous and this can be merged */ 10086 rsm = rack_merge_rsm(rack, prev, rsm); 10087 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10088 } else 10089 break; 10090 } 10091 } 10092 if (used_ref == 0) { 10093 counter_u64_add(rack_sack_proc_all, 1); 10094 } else { 10095 counter_u64_add(rack_sack_proc_short, 1); 10096 } 10097 /* Save off the next one for quick reference. */ 10098 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10099 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10100 return (changed); 10101 } 10102 10103 static void inline 10104 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10105 { 10106 struct rack_sendmap *tmap; 10107 10108 tmap = NULL; 10109 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10110 /* Its no longer sacked, mark it so */ 10111 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10112 #ifdef INVARIANTS 10113 if (rsm->r_in_tmap) { 10114 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10115 rack, rsm, rsm->r_flags); 10116 } 10117 #endif 10118 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10119 /* Rebuild it into our tmap */ 10120 if (tmap == NULL) { 10121 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10122 tmap = rsm; 10123 } else { 10124 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10125 tmap = rsm; 10126 } 10127 tmap->r_in_tmap = 1; 10128 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10129 } 10130 /* 10131 * Now lets possibly clear the sack filter so we start 10132 * recognizing sacks that cover this area. 10133 */ 10134 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10135 10136 } 10137 10138 10139 static void inline 10140 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10141 { 10142 /* 10143 * We look at advancing the end send time for our GP 10144 * measurement tracking only as the cumulative acknowledgment 10145 * moves forward. You might wonder about this, why not 10146 * at every transmission or retransmission within the 10147 * GP window update the rc_gp_cumack_ts? Well its rather 10148 * nuanced but basically the GP window *may* expand (as 10149 * it does below) or worse and harder to track it may shrink. 10150 * 10151 * This last makes it impossible to track at the time of 10152 * the send, since you may set forward your rc_gp_cumack_ts 10153 * when you send, because that send *is* in your currently 10154 * "guessed" window, but then it shrinks. Now which was 10155 * the send time of the last bytes in the window, by the 10156 * time you ask that question that part of the sendmap 10157 * is freed. So you don't know and you will have too 10158 * long of send window. Instead by updating the time 10159 * marker only when the cumack advances this assures us 10160 * that we will have only the sends in the window of our 10161 * GP measurement. 10162 * 10163 * Another complication from this is the 10164 * merging of sendmap entries. During SACK processing this 10165 * can happen to conserve the sendmap size. That breaks 10166 * everything down in tracking the send window of the GP 10167 * estimate. So to prevent that and keep it working with 10168 * a tiny bit more limited merging, we only allow like 10169 * types to be merged. I.e. if two sends are in the GP window 10170 * then its ok to merge them together. If two sends are not 10171 * in the GP window its ok to merge them together too. Though 10172 * one send in and one send out cannot be merged. We combine 10173 * this with never allowing the shrinking of the GP window when 10174 * we are in recovery so that we can properly calculate the 10175 * sending times. 10176 * 10177 * This all of course seems complicated, because it is.. :) 10178 * 10179 * The cum-ack is being advanced upon the sendmap. 10180 * If we are not doing a GP estimate don't 10181 * proceed. 10182 */ 10183 uint64_t ts; 10184 10185 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10186 return; 10187 /* 10188 * If this sendmap entry is going 10189 * beyond the measurement window we had picked, 10190 * expand the measurement window by that much. 10191 */ 10192 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10193 tp->gput_ack = rsm->r_end; 10194 } 10195 /* 10196 * If we have not setup a ack, then we 10197 * have no idea if the newly acked pieces 10198 * will be "in our seq measurement range". If 10199 * it is when we clear the app_limited_needs_set 10200 * flag the timestamp will be updated. 10201 */ 10202 if (rack->app_limited_needs_set) 10203 return; 10204 /* 10205 * Finally, we grab out the latest timestamp 10206 * that this packet was sent and then see 10207 * if: 10208 * a) The packet touches are newly defined GP range. 10209 * b) The time is greater than (newer) than the 10210 * one we currently have. If so we update 10211 * our sending end time window. 10212 * 10213 * Note we *do not* do this at send time. The reason 10214 * is that if you do you *may* pick up a newer timestamp 10215 * for a range you are not going to measure. We project 10216 * out how far and then sometimes modify that to be 10217 * smaller. If that occurs then you will have a send 10218 * that does not belong to the range included. 10219 */ 10220 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10221 rack->r_ctl.rc_gp_cumack_ts) 10222 return; 10223 if (rack_in_gp_window(tp, rsm)) { 10224 rack->r_ctl.rc_gp_cumack_ts = ts; 10225 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10226 __LINE__, from, rsm); 10227 } 10228 } 10229 10230 static void 10231 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10232 { 10233 struct rack_sendmap *rsm; 10234 /* 10235 * The ACK point is advancing to th_ack, we must drop off 10236 * the packets in the rack log and calculate any eligble 10237 * RTT's. 10238 */ 10239 10240 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 10241 /* 10242 * If we have some sack blocks in the filter 10243 * lets prune them out by calling sfb with no blocks. 10244 */ 10245 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 10246 } 10247 if (SEQ_GT(th_ack, tp->snd_una)) { 10248 /* Clear any app ack remembered settings */ 10249 rack->r_ctl.cleared_app_ack = 0; 10250 } 10251 rack->r_wanted_output = 1; 10252 if (SEQ_GT(th_ack, tp->snd_una)) 10253 rack->r_ctl.last_cumack_advance = acktime; 10254 10255 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10256 if ((rack->rc_last_tlp_acked_set == 1)&& 10257 (rack->rc_last_tlp_past_cumack == 1) && 10258 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10259 /* 10260 * We have reached the point where our last rack 10261 * tlp retransmit sequence is ahead of the cum-ack. 10262 * This can only happen when the cum-ack moves all 10263 * the way around (its been a full 2^^31+1 bytes 10264 * or more since we sent a retransmitted TLP). Lets 10265 * turn off the valid flag since its not really valid. 10266 * 10267 * Note since sack's also turn on this event we have 10268 * a complication, we have to wait to age it out until 10269 * the cum-ack is by the TLP before checking which is 10270 * what the next else clause does. 10271 */ 10272 rack_log_dsack_event(rack, 9, __LINE__, 10273 rack->r_ctl.last_tlp_acked_start, 10274 rack->r_ctl.last_tlp_acked_end); 10275 rack->rc_last_tlp_acked_set = 0; 10276 rack->rc_last_tlp_past_cumack = 0; 10277 } else if ((rack->rc_last_tlp_acked_set == 1) && 10278 (rack->rc_last_tlp_past_cumack == 0) && 10279 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10280 /* 10281 * It is safe to start aging TLP's out. 10282 */ 10283 rack->rc_last_tlp_past_cumack = 1; 10284 } 10285 /* We do the same for the tlp send seq as well */ 10286 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10287 (rack->rc_last_sent_tlp_past_cumack == 1) && 10288 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10289 rack_log_dsack_event(rack, 9, __LINE__, 10290 rack->r_ctl.last_sent_tlp_seq, 10291 (rack->r_ctl.last_sent_tlp_seq + 10292 rack->r_ctl.last_sent_tlp_len)); 10293 rack->rc_last_sent_tlp_seq_valid = 0; 10294 rack->rc_last_sent_tlp_past_cumack = 0; 10295 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10296 (rack->rc_last_sent_tlp_past_cumack == 0) && 10297 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10298 /* 10299 * It is safe to start aging TLP's send. 10300 */ 10301 rack->rc_last_sent_tlp_past_cumack = 1; 10302 } 10303 more: 10304 rsm = tqhash_min(rack->r_ctl.tqh); 10305 if (rsm == NULL) { 10306 if ((th_ack - 1) == tp->iss) { 10307 /* 10308 * For the SYN incoming case we will not 10309 * have called tcp_output for the sending of 10310 * the SYN, so there will be no map. All 10311 * other cases should probably be a panic. 10312 */ 10313 return; 10314 } 10315 if (tp->t_flags & TF_SENTFIN) { 10316 /* if we sent a FIN we often will not have map */ 10317 return; 10318 } 10319 #ifdef INVARIANTS 10320 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 10321 tp, 10322 tp->t_state, th_ack, rack, 10323 tp->snd_una, tp->snd_max); 10324 #endif 10325 return; 10326 } 10327 if (SEQ_LT(th_ack, rsm->r_start)) { 10328 /* Huh map is missing this */ 10329 #ifdef INVARIANTS 10330 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 10331 rsm->r_start, 10332 th_ack, tp->t_state, rack->r_state); 10333 #endif 10334 return; 10335 } 10336 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 10337 10338 /* Now was it a retransmitted TLP? */ 10339 if ((rsm->r_flags & RACK_TLP) && 10340 (rsm->r_rtr_cnt > 1)) { 10341 /* 10342 * Yes, this rsm was a TLP and retransmitted, remember that 10343 * since if a DSACK comes back on this we don't want 10344 * to think of it as a reordered segment. This may 10345 * get updated again with possibly even other TLPs 10346 * in flight, but thats ok. Only when we don't send 10347 * a retransmitted TLP for 1/2 the sequences space 10348 * will it get turned off (above). 10349 */ 10350 if (rack->rc_last_tlp_acked_set && 10351 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10352 /* 10353 * We already turned this on since the end matches, 10354 * the previous one was a partially ack now we 10355 * are getting another one (maybe all of it). 10356 */ 10357 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10358 /* 10359 * Lets make sure we have all of it though. 10360 */ 10361 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10362 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10363 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10364 rack->r_ctl.last_tlp_acked_end); 10365 } 10366 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10367 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10368 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10369 rack->r_ctl.last_tlp_acked_end); 10370 } 10371 } else { 10372 rack->rc_last_tlp_past_cumack = 1; 10373 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10374 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10375 rack->rc_last_tlp_acked_set = 1; 10376 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10377 } 10378 } 10379 /* Now do we consume the whole thing? */ 10380 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 10381 if (SEQ_GEQ(th_ack, rsm->r_end)) { 10382 /* Its all consumed. */ 10383 uint32_t left; 10384 uint8_t newly_acked; 10385 10386 if (rsm->r_flags & RACK_WAS_LOST) { 10387 /* 10388 * This can happen when we marked it as lost 10389 * and yet before retransmitting we get an ack 10390 * which can happen due to reordering. 10391 */ 10392 rsm->r_flags &= ~RACK_WAS_LOST; 10393 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 10394 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10395 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 10396 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 10397 else 10398 rack->r_ctl.rc_considered_lost = 0; 10399 } 10400 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 10401 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 10402 rsm->r_rtr_bytes = 0; 10403 /* 10404 * Record the time of highest cumack sent if its in our measurement 10405 * window and possibly bump out the end. 10406 */ 10407 rack_rsm_sender_update(rack, tp, rsm, 4); 10408 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 10409 if (rsm->r_in_tmap) { 10410 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10411 rsm->r_in_tmap = 0; 10412 } 10413 newly_acked = 1; 10414 if (rsm->r_flags & RACK_ACKED) { 10415 /* 10416 * It was acked on the scoreboard -- remove 10417 * it from total 10418 */ 10419 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10420 newly_acked = 0; 10421 } else if (rsm->r_flags & RACK_SACK_PASSED) { 10422 /* 10423 * There are segments ACKED on the 10424 * scoreboard further up. We are seeing 10425 * reordering. 10426 */ 10427 rsm->r_flags &= ~RACK_SACK_PASSED; 10428 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10429 rsm->r_flags |= RACK_ACKED; 10430 rack->r_ctl.rc_reorder_ts = cts; 10431 if (rack->r_ctl.rc_reorder_ts == 0) 10432 rack->r_ctl.rc_reorder_ts = 1; 10433 if (rack->r_ent_rec_ns) { 10434 /* 10435 * We have sent no more, and we saw an sack 10436 * then ack arrive. 10437 */ 10438 rack->r_might_revert = 1; 10439 } 10440 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10441 } else { 10442 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10443 } 10444 if ((rsm->r_flags & RACK_TO_REXT) && 10445 (tp->t_flags & TF_RCVD_TSTMP) && 10446 (to->to_flags & TOF_TS) && 10447 (to->to_tsecr != 0) && 10448 (tp->t_flags & TF_PREVVALID)) { 10449 /* 10450 * We can use the timestamp to see 10451 * if this retransmission was from the 10452 * first transmit. If so we made a mistake. 10453 */ 10454 tp->t_flags &= ~TF_PREVVALID; 10455 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 10456 /* The first transmit is what this ack is for */ 10457 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 10458 } 10459 } 10460 left = th_ack - rsm->r_end; 10461 if (rack->app_limited_needs_set && newly_acked) 10462 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 10463 /* Free back to zone */ 10464 rack_free(rack, rsm); 10465 if (left) { 10466 goto more; 10467 } 10468 /* Check for reneging */ 10469 rsm = tqhash_min(rack->r_ctl.tqh); 10470 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 10471 /* 10472 * The peer has moved snd_una up to 10473 * the edge of this send, i.e. one 10474 * that it had previously acked. The only 10475 * way that can be true if the peer threw 10476 * away data (space issues) that it had 10477 * previously sacked (else it would have 10478 * given us snd_una up to (rsm->r_end). 10479 * We need to undo the acked markings here. 10480 * 10481 * Note we have to look to make sure th_ack is 10482 * our rsm->r_start in case we get an old ack 10483 * where th_ack is behind snd_una. 10484 */ 10485 rack_peer_reneges(rack, rsm, th_ack); 10486 } 10487 return; 10488 } 10489 if (rsm->r_flags & RACK_ACKED) { 10490 /* 10491 * It was acked on the scoreboard -- remove it from 10492 * total for the part being cum-acked. 10493 */ 10494 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 10495 } else { 10496 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 10497 } 10498 /* And what about the lost flag? */ 10499 if (rsm->r_flags & RACK_WAS_LOST) { 10500 /* 10501 * This can happen when we marked it as lost 10502 * and yet before retransmitting we get an ack 10503 * which can happen due to reordering. In this 10504 * case its only a partial ack of the send. 10505 */ 10506 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), 10507 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); 10508 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) 10509 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; 10510 else 10511 rack->r_ctl.rc_considered_lost = 0; 10512 } 10513 /* 10514 * Clear the dup ack count for 10515 * the piece that remains. 10516 */ 10517 rsm->r_dupack = 0; 10518 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10519 if (rsm->r_rtr_bytes) { 10520 /* 10521 * It was retransmitted adjust the 10522 * sack holes for what was acked. 10523 */ 10524 int ack_am; 10525 10526 ack_am = (th_ack - rsm->r_start); 10527 if (ack_am >= rsm->r_rtr_bytes) { 10528 rack->r_ctl.rc_holes_rxt -= ack_am; 10529 rsm->r_rtr_bytes -= ack_am; 10530 } 10531 } 10532 /* 10533 * Update where the piece starts and record 10534 * the time of send of highest cumack sent if 10535 * its in our GP range. 10536 */ 10537 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 10538 /* Now we need to move our offset forward too */ 10539 if (rsm->m && 10540 ((rsm->orig_m_len != rsm->m->m_len) || 10541 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 10542 /* Fix up the orig_m_len and possibly the mbuf offset */ 10543 rack_adjust_orig_mlen(rsm); 10544 } 10545 rsm->soff += (th_ack - rsm->r_start); 10546 rack_rsm_sender_update(rack, tp, rsm, 5); 10547 /* The trim will move th_ack into r_start for us */ 10548 tqhash_trim(rack->r_ctl.tqh, th_ack); 10549 /* Now do we need to move the mbuf fwd too? */ 10550 { 10551 struct mbuf *m; 10552 uint32_t soff; 10553 10554 m = rsm->m; 10555 soff = rsm->soff; 10556 if (m) { 10557 while (soff >= m->m_len) { 10558 soff -= m->m_len; 10559 KASSERT((m->m_next != NULL), 10560 (" rsm:%p off:%u soff:%u m:%p", 10561 rsm, rsm->soff, soff, m)); 10562 m = m->m_next; 10563 if (m == NULL) { 10564 /* 10565 * This is a fall-back that prevents a panic. In reality 10566 * we should be able to walk the mbuf's and find our place. 10567 * At this point snd_una has not been updated with the sbcut() yet 10568 * but tqhash_trim did update rsm->r_start so the offset calcuation 10569 * should work fine. This is undesirable since we will take cache 10570 * hits to access the socket buffer. And even more puzzling is that 10571 * it happens occasionally. It should not :( 10572 */ 10573 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 10574 (rsm->r_start - tp->snd_una), 10575 &soff); 10576 break; 10577 } 10578 } 10579 /* 10580 * Now save in our updated values. 10581 */ 10582 rsm->m = m; 10583 rsm->soff = soff; 10584 rsm->orig_m_len = rsm->m->m_len; 10585 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 10586 } 10587 } 10588 if (rack->app_limited_needs_set && 10589 SEQ_GEQ(th_ack, tp->gput_seq)) 10590 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 10591 } 10592 10593 static void 10594 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 10595 { 10596 struct rack_sendmap *rsm; 10597 int sack_pass_fnd = 0; 10598 10599 if (rack->r_might_revert) { 10600 /* 10601 * Ok we have reordering, have not sent anything, we 10602 * might want to revert the congestion state if nothing 10603 * further has SACK_PASSED on it. Lets check. 10604 * 10605 * We also get here when we have DSACKs come in for 10606 * all the data that we FR'd. Note that a rxt or tlp 10607 * timer clears this from happening. 10608 */ 10609 10610 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 10611 if (rsm->r_flags & RACK_SACK_PASSED) { 10612 sack_pass_fnd = 1; 10613 break; 10614 } 10615 } 10616 if (sack_pass_fnd == 0) { 10617 /* 10618 * We went into recovery 10619 * incorrectly due to reordering! 10620 */ 10621 int orig_cwnd; 10622 10623 rack->r_ent_rec_ns = 0; 10624 orig_cwnd = tp->snd_cwnd; 10625 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 10626 tp->snd_recover = tp->snd_una; 10627 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 10628 if (IN_RECOVERY(tp->t_flags)) { 10629 rack_exit_recovery(tp, rack, 3); 10630 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 10631 /* 10632 * We were in recovery, had an RTO 10633 * and then re-entered recovery (more sack's arrived) 10634 * and we have properly recorded the old ssthresh from 10635 * the first recovery. We want to be able to slow-start 10636 * back to this level. The ssthresh from the timeout 10637 * and then back into recovery will end up most likely 10638 * to be min(cwnd=1mss, 2mss). Which makes it basically 10639 * so we get no slow-start after our RTO. 10640 */ 10641 rack->rto_from_rec = 0; 10642 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 10643 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 10644 } 10645 } 10646 } 10647 rack->r_might_revert = 0; 10648 } 10649 } 10650 10651 10652 static int 10653 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 10654 { 10655 10656 uint32_t am, l_end; 10657 int was_tlp = 0; 10658 10659 if (SEQ_GT(end, start)) 10660 am = end - start; 10661 else 10662 am = 0; 10663 if ((rack->rc_last_tlp_acked_set ) && 10664 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 10665 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 10666 /* 10667 * The DSACK is because of a TLP which we don't 10668 * do anything with the reordering window over since 10669 * it was not reordering that caused the DSACK but 10670 * our previous retransmit TLP. 10671 */ 10672 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10673 was_tlp = 1; 10674 goto skip_dsack_round; 10675 } 10676 if (rack->rc_last_sent_tlp_seq_valid) { 10677 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 10678 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 10679 (SEQ_LEQ(end, l_end))) { 10680 /* 10681 * This dsack is from the last sent TLP, ignore it 10682 * for reordering purposes. 10683 */ 10684 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10685 was_tlp = 1; 10686 goto skip_dsack_round; 10687 } 10688 } 10689 if (rack->rc_dsack_round_seen == 0) { 10690 rack->rc_dsack_round_seen = 1; 10691 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 10692 rack->r_ctl.num_dsack++; 10693 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 10694 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 10695 } 10696 skip_dsack_round: 10697 /* 10698 * We keep track of how many DSACK blocks we get 10699 * after a recovery incident. 10700 */ 10701 rack->r_ctl.dsack_byte_cnt += am; 10702 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 10703 rack->r_ctl.retran_during_recovery && 10704 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 10705 /* 10706 * False recovery most likely culprit is reordering. If 10707 * nothing else is missing we need to revert. 10708 */ 10709 rack->r_might_revert = 1; 10710 rack_handle_might_revert(rack->rc_tp, rack); 10711 rack->r_might_revert = 0; 10712 rack->r_ctl.retran_during_recovery = 0; 10713 rack->r_ctl.dsack_byte_cnt = 0; 10714 } 10715 return (was_tlp); 10716 } 10717 10718 static uint32_t 10719 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 10720 { 10721 return (((tp->snd_max - snd_una) - 10722 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 10723 } 10724 10725 static int32_t 10726 rack_compute_pipe(struct tcpcb *tp) 10727 { 10728 return ((int32_t)do_rack_compute_pipe(tp, 10729 (struct tcp_rack *)tp->t_fb_ptr, 10730 tp->snd_una)); 10731 } 10732 10733 static void 10734 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 10735 { 10736 /* Deal with changed and PRR here (in recovery only) */ 10737 uint32_t pipe, snd_una; 10738 10739 rack->r_ctl.rc_prr_delivered += changed; 10740 10741 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 10742 /* 10743 * It is all outstanding, we are application limited 10744 * and thus we don't need more room to send anything. 10745 * Note we use tp->snd_una here and not th_ack because 10746 * the data as yet not been cut from the sb. 10747 */ 10748 rack->r_ctl.rc_prr_sndcnt = 0; 10749 return; 10750 } 10751 /* Compute prr_sndcnt */ 10752 if (SEQ_GT(tp->snd_una, th_ack)) { 10753 snd_una = tp->snd_una; 10754 } else { 10755 snd_una = th_ack; 10756 } 10757 pipe = do_rack_compute_pipe(tp, rack, snd_una); 10758 if (pipe > tp->snd_ssthresh) { 10759 long sndcnt; 10760 10761 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 10762 if (rack->r_ctl.rc_prr_recovery_fs > 0) 10763 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 10764 else { 10765 rack->r_ctl.rc_prr_sndcnt = 0; 10766 rack_log_to_prr(rack, 9, 0, __LINE__); 10767 sndcnt = 0; 10768 } 10769 sndcnt++; 10770 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 10771 sndcnt -= rack->r_ctl.rc_prr_out; 10772 else 10773 sndcnt = 0; 10774 rack->r_ctl.rc_prr_sndcnt = sndcnt; 10775 rack_log_to_prr(rack, 10, 0, __LINE__); 10776 } else { 10777 uint32_t limit; 10778 10779 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 10780 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 10781 else 10782 limit = 0; 10783 if (changed > limit) 10784 limit = changed; 10785 limit += ctf_fixed_maxseg(tp); 10786 if (tp->snd_ssthresh > pipe) { 10787 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 10788 rack_log_to_prr(rack, 11, 0, __LINE__); 10789 } else { 10790 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 10791 rack_log_to_prr(rack, 12, 0, __LINE__); 10792 } 10793 } 10794 } 10795 10796 static void 10797 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 10798 int *dsack_seen, int *sacks_seen) 10799 { 10800 uint32_t changed; 10801 struct tcp_rack *rack; 10802 struct rack_sendmap *rsm; 10803 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 10804 register uint32_t th_ack; 10805 int32_t i, j, k, num_sack_blks = 0; 10806 uint32_t cts, acked, ack_point; 10807 int loop_start = 0; 10808 uint32_t tsused; 10809 uint32_t segsiz; 10810 10811 10812 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10813 if (tcp_get_flags(th) & TH_RST) { 10814 /* We don't log resets */ 10815 return; 10816 } 10817 rack = (struct tcp_rack *)tp->t_fb_ptr; 10818 cts = tcp_get_usecs(NULL); 10819 rsm = tqhash_min(rack->r_ctl.tqh); 10820 changed = 0; 10821 th_ack = th->th_ack; 10822 segsiz = ctf_fixed_maxseg(rack->rc_tp); 10823 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 10824 /* 10825 * You only get credit for 10826 * MSS and greater (and you get extra 10827 * credit for larger cum-ack moves). 10828 */ 10829 int ac; 10830 10831 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 10832 counter_u64_add(rack_ack_total, ac); 10833 } 10834 if (SEQ_GT(th_ack, tp->snd_una)) { 10835 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 10836 tp->t_acktime = ticks; 10837 } 10838 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 10839 changed = th_ack - rsm->r_start; 10840 if (changed) { 10841 rack_process_to_cumack(tp, rack, th_ack, cts, to, 10842 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 10843 } 10844 if ((to->to_flags & TOF_SACK) == 0) { 10845 /* We are done nothing left and no sack. */ 10846 rack_handle_might_revert(tp, rack); 10847 /* 10848 * For cases where we struck a dup-ack 10849 * with no SACK, add to the changes so 10850 * PRR will work right. 10851 */ 10852 if (dup_ack_struck && (changed == 0)) { 10853 changed += ctf_fixed_maxseg(rack->rc_tp); 10854 } 10855 goto out; 10856 } 10857 /* Sack block processing */ 10858 if (SEQ_GT(th_ack, tp->snd_una)) 10859 ack_point = th_ack; 10860 else 10861 ack_point = tp->snd_una; 10862 for (i = 0; i < to->to_nsacks; i++) { 10863 bcopy((to->to_sacks + i * TCPOLEN_SACK), 10864 &sack, sizeof(sack)); 10865 sack.start = ntohl(sack.start); 10866 sack.end = ntohl(sack.end); 10867 if (SEQ_GT(sack.end, sack.start) && 10868 SEQ_GT(sack.start, ack_point) && 10869 SEQ_LT(sack.start, tp->snd_max) && 10870 SEQ_GT(sack.end, ack_point) && 10871 SEQ_LEQ(sack.end, tp->snd_max)) { 10872 sack_blocks[num_sack_blks] = sack; 10873 num_sack_blks++; 10874 } else if (SEQ_LEQ(sack.start, th_ack) && 10875 SEQ_LEQ(sack.end, th_ack)) { 10876 int was_tlp; 10877 10878 if (dsack_seen != NULL) 10879 *dsack_seen = 1; 10880 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 10881 /* 10882 * Its a D-SACK block. 10883 */ 10884 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 10885 } 10886 } 10887 if (rack->rc_dsack_round_seen) { 10888 /* Is the dsack roound over? */ 10889 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 10890 /* Yes it is */ 10891 rack->rc_dsack_round_seen = 0; 10892 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 10893 } 10894 } 10895 /* 10896 * Sort the SACK blocks so we can update the rack scoreboard with 10897 * just one pass. 10898 */ 10899 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 10900 num_sack_blks, th->th_ack); 10901 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 10902 if (sacks_seen != NULL) 10903 *sacks_seen = num_sack_blks; 10904 if (num_sack_blks == 0) { 10905 /* Nothing to sack, but we need to update counts */ 10906 goto out_with_totals; 10907 } 10908 /* Its a sack of some sort */ 10909 if (num_sack_blks < 2) { 10910 /* Only one, we don't need to sort */ 10911 goto do_sack_work; 10912 } 10913 /* Sort the sacks */ 10914 for (i = 0; i < num_sack_blks; i++) { 10915 for (j = i + 1; j < num_sack_blks; j++) { 10916 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 10917 sack = sack_blocks[i]; 10918 sack_blocks[i] = sack_blocks[j]; 10919 sack_blocks[j] = sack; 10920 } 10921 } 10922 } 10923 /* 10924 * Now are any of the sack block ends the same (yes some 10925 * implementations send these)? 10926 */ 10927 again: 10928 if (num_sack_blks == 0) 10929 goto out_with_totals; 10930 if (num_sack_blks > 1) { 10931 for (i = 0; i < num_sack_blks; i++) { 10932 for (j = i + 1; j < num_sack_blks; j++) { 10933 if (sack_blocks[i].end == sack_blocks[j].end) { 10934 /* 10935 * Ok these two have the same end we 10936 * want the smallest end and then 10937 * throw away the larger and start 10938 * again. 10939 */ 10940 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 10941 /* 10942 * The second block covers 10943 * more area use that 10944 */ 10945 sack_blocks[i].start = sack_blocks[j].start; 10946 } 10947 /* 10948 * Now collapse out the dup-sack and 10949 * lower the count 10950 */ 10951 for (k = (j + 1); k < num_sack_blks; k++) { 10952 sack_blocks[j].start = sack_blocks[k].start; 10953 sack_blocks[j].end = sack_blocks[k].end; 10954 j++; 10955 } 10956 num_sack_blks--; 10957 goto again; 10958 } 10959 } 10960 } 10961 } 10962 do_sack_work: 10963 /* 10964 * First lets look to see if 10965 * we have retransmitted and 10966 * can use the transmit next? 10967 */ 10968 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 10969 if (rsm && 10970 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 10971 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 10972 /* 10973 * We probably did the FR and the next 10974 * SACK in continues as we would expect. 10975 */ 10976 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); 10977 if (acked) { 10978 rack->r_wanted_output = 1; 10979 changed += acked; 10980 } 10981 if (num_sack_blks == 1) { 10982 /* 10983 * This is what we would expect from 10984 * a normal implementation to happen 10985 * after we have retransmitted the FR, 10986 * i.e the sack-filter pushes down 10987 * to 1 block and the next to be retransmitted 10988 * is the sequence in the sack block (has more 10989 * are acked). Count this as ACK'd data to boost 10990 * up the chances of recovering any false positives. 10991 */ 10992 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 10993 counter_u64_add(rack_express_sack, 1); 10994 goto out_with_totals; 10995 } else { 10996 /* 10997 * Start the loop through the 10998 * rest of blocks, past the first block. 10999 */ 11000 loop_start = 1; 11001 } 11002 } 11003 counter_u64_add(rack_sack_total, 1); 11004 rsm = rack->r_ctl.rc_sacklast; 11005 for (i = loop_start; i < num_sack_blks; i++) { 11006 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); 11007 if (acked) { 11008 rack->r_wanted_output = 1; 11009 changed += acked; 11010 } 11011 } 11012 out_with_totals: 11013 if (num_sack_blks > 1) { 11014 /* 11015 * You get an extra stroke if 11016 * you have more than one sack-blk, this 11017 * could be where we are skipping forward 11018 * and the sack-filter is still working, or 11019 * it could be an attacker constantly 11020 * moving us. 11021 */ 11022 counter_u64_add(rack_move_some, 1); 11023 } 11024 out: 11025 if (changed) { 11026 /* Something changed cancel the rack timer */ 11027 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11028 } 11029 tsused = tcp_get_usecs(NULL); 11030 rsm = tcp_rack_output(tp, rack, tsused); 11031 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11032 rsm && 11033 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11034 /* Enter recovery */ 11035 entered_recovery = 1; 11036 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 11037 /* 11038 * When we enter recovery we need to assure we send 11039 * one packet. 11040 */ 11041 if (rack->rack_no_prr == 0) { 11042 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11043 rack_log_to_prr(rack, 8, 0, __LINE__); 11044 } 11045 rack->r_timer_override = 1; 11046 rack->r_early = 0; 11047 rack->r_ctl.rc_agg_early = 0; 11048 } else if (IN_FASTRECOVERY(tp->t_flags) && 11049 rsm && 11050 (rack->r_rr_config == 3)) { 11051 /* 11052 * Assure we can output and we get no 11053 * remembered pace time except the retransmit. 11054 */ 11055 rack->r_timer_override = 1; 11056 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11057 rack->r_ctl.rc_resend = rsm; 11058 } 11059 if (IN_FASTRECOVERY(tp->t_flags) && 11060 (rack->rack_no_prr == 0) && 11061 (entered_recovery == 0)) { 11062 rack_update_prr(tp, rack, changed, th_ack); 11063 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11064 ((tcp_in_hpts(rack->rc_tp) == 0) && 11065 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11066 /* 11067 * If you are pacing output you don't want 11068 * to override. 11069 */ 11070 rack->r_early = 0; 11071 rack->r_ctl.rc_agg_early = 0; 11072 rack->r_timer_override = 1; 11073 } 11074 } 11075 } 11076 11077 static void 11078 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 11079 { 11080 struct rack_sendmap *rsm; 11081 11082 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11083 while (rsm) { 11084 /* 11085 * We need to skip anything already set 11086 * to be retransmitted. 11087 */ 11088 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11089 (rsm->r_flags & RACK_MUST_RXT)) { 11090 rsm = TAILQ_NEXT(rsm, r_tnext); 11091 continue; 11092 } 11093 break; 11094 } 11095 if (rsm && (rsm->r_dupack < 0xff)) { 11096 rsm->r_dupack++; 11097 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11098 struct timeval tv; 11099 uint32_t cts; 11100 /* 11101 * Here we see if we need to retransmit. For 11102 * a SACK type connection if enough time has passed 11103 * we will get a return of the rsm. For a non-sack 11104 * connection we will get the rsm returned if the 11105 * dupack value is 3 or more. 11106 */ 11107 cts = tcp_get_usecs(&tv); 11108 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11109 if (rack->r_ctl.rc_resend != NULL) { 11110 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11111 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11112 th_ack, __LINE__); 11113 } 11114 rack->r_wanted_output = 1; 11115 rack->r_timer_override = 1; 11116 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11117 } 11118 } else { 11119 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11120 } 11121 } 11122 } 11123 11124 static void 11125 rack_check_bottom_drag(struct tcpcb *tp, 11126 struct tcp_rack *rack, 11127 struct socket *so) 11128 { 11129 /* 11130 * So what is dragging bottom? 11131 * 11132 * Dragging bottom means you were under pacing and had a 11133 * delay in processing inbound acks waiting on our pacing 11134 * timer to expire. While you were waiting all of the acknowledgments 11135 * for the packets you sent have arrived. This means we are pacing 11136 * way underneath the bottleneck to the point where our Goodput 11137 * measurements stop working, since they require more than one 11138 * ack (usually at least 8 packets worth with multiple acks so we can 11139 * gauge the inter-ack times). If that occurs we have a real problem 11140 * since we are stuck in a hole that we can't get out of without 11141 * something speeding us up. 11142 * 11143 * We also check to see if we are widdling down to just one segment 11144 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 11145 * then we are adding the delayed ack interval into our measurments and 11146 * we need to speed up slightly. 11147 */ 11148 uint32_t segsiz, minseg; 11149 11150 segsiz = ctf_fixed_maxseg(tp); 11151 minseg = segsiz; 11152 if (tp->snd_max == tp->snd_una) { 11153 /* 11154 * We are doing dynamic pacing and we are way 11155 * under. Basically everything got acked while 11156 * we were still waiting on the pacer to expire. 11157 * 11158 * This means we need to boost the b/w in 11159 * addition to any earlier boosting of 11160 * the multiplier. 11161 */ 11162 uint64_t lt_bw; 11163 11164 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 11165 lt_bw = rack_get_lt_bw(rack); 11166 rack->rc_dragged_bottom = 1; 11167 rack_validate_multipliers_at_or_above100(rack); 11168 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11169 (rack->dis_lt_bw == 0) && 11170 (rack->use_lesser_lt_bw == 0) && 11171 (lt_bw > 0)) { 11172 /* 11173 * Lets use the long-term b/w we have 11174 * been getting as a base. 11175 */ 11176 if (rack->rc_gp_filled == 0) { 11177 if (lt_bw > ONE_POINT_TWO_MEG) { 11178 /* 11179 * If we have no measurement 11180 * don't let us set in more than 11181 * 1.2Mbps. If we are still too 11182 * low after pacing with this we 11183 * will hopefully have a max b/w 11184 * available to sanity check things. 11185 */ 11186 lt_bw = ONE_POINT_TWO_MEG; 11187 } 11188 rack->r_ctl.rc_rtt_diff = 0; 11189 rack->r_ctl.gp_bw = lt_bw; 11190 rack->rc_gp_filled = 1; 11191 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11192 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11193 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11194 } else if (lt_bw > rack->r_ctl.gp_bw) { 11195 rack->r_ctl.rc_rtt_diff = 0; 11196 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11197 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11198 rack->r_ctl.gp_bw = lt_bw; 11199 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11200 } else 11201 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11202 if ((rack->gp_ready == 0) && 11203 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11204 /* We have enough measurements now */ 11205 rack->gp_ready = 1; 11206 if (rack->dgp_on || 11207 rack->rack_hibeta) 11208 rack_set_cc_pacing(rack); 11209 if (rack->defer_options) 11210 rack_apply_deferred_options(rack); 11211 } 11212 } else { 11213 /* 11214 * zero rtt possibly?, settle for just an old increase. 11215 */ 11216 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11217 } 11218 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11219 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11220 minseg)) && 11221 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11222 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11223 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11224 (segsiz * rack_req_segs))) { 11225 /* 11226 * We are doing dynamic GP pacing and 11227 * we have everything except 1MSS or less 11228 * bytes left out. We are still pacing away. 11229 * And there is data that could be sent, This 11230 * means we are inserting delayed ack time in 11231 * our measurements because we are pacing too slow. 11232 */ 11233 rack_validate_multipliers_at_or_above100(rack); 11234 rack->rc_dragged_bottom = 1; 11235 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11236 } 11237 } 11238 11239 #ifdef TCP_REQUEST_TRK 11240 static void 11241 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11242 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11243 { 11244 int do_log; 11245 11246 do_log = tcp_bblogging_on(rack->rc_tp); 11247 if (do_log == 0) { 11248 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11249 return; 11250 /* We only allow the three below with point logging on */ 11251 if ((mod != HYBRID_LOG_RULES_APP) && 11252 (mod != HYBRID_LOG_RULES_SET) && 11253 (mod != HYBRID_LOG_REQ_COMP)) 11254 return; 11255 11256 } 11257 if (do_log) { 11258 union tcp_log_stackspecific log; 11259 struct timeval tv; 11260 11261 /* Convert our ms to a microsecond */ 11262 memset(&log, 0, sizeof(log)); 11263 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11264 log.u_bbr.flex1 = seq; 11265 log.u_bbr.cwnd_gain = line; 11266 if (cur != NULL) { 11267 uint64_t off; 11268 11269 log.u_bbr.flex2 = cur->start_seq; 11270 log.u_bbr.flex3 = cur->end_seq; 11271 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11272 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11273 log.u_bbr.flex6 = cur->flags; 11274 log.u_bbr.pkts_out = cur->hybrid_flags; 11275 log.u_bbr.rttProp = cur->timestamp; 11276 log.u_bbr.cur_del_rate = cur->cspr; 11277 log.u_bbr.bw_inuse = cur->start; 11278 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11279 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11280 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11281 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11282 log.u_bbr.inhpts = 1; 11283 #ifdef TCP_REQUEST_TRK 11284 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11285 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11286 #endif 11287 } else { 11288 log.u_bbr.flex2 = err; 11289 } 11290 /* 11291 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11292 */ 11293 log.u_bbr.flex7 = rack->rc_catch_up; 11294 log.u_bbr.flex7 <<= 1; 11295 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11296 log.u_bbr.flex7 <<= 1; 11297 log.u_bbr.flex7 |= rack->dgp_on; 11298 /* 11299 * Compose bbr_state to be a bit wise 0000ADHF 11300 * where A is the always_pace flag 11301 * where D is the dgp_on flag 11302 * where H is the hybrid_mode on flag 11303 * where F is the use_fixed_rate flag. 11304 */ 11305 log.u_bbr.bbr_state = rack->rc_always_pace; 11306 log.u_bbr.bbr_state <<= 1; 11307 log.u_bbr.bbr_state |= rack->dgp_on; 11308 log.u_bbr.bbr_state <<= 1; 11309 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 11310 log.u_bbr.bbr_state <<= 1; 11311 log.u_bbr.bbr_state |= rack->use_fixed_rate; 11312 log.u_bbr.flex8 = mod; 11313 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11314 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11315 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11316 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 11317 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 11318 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 11319 tcp_log_event(rack->rc_tp, NULL, 11320 &rack->rc_inp->inp_socket->so_rcv, 11321 &rack->rc_inp->inp_socket->so_snd, 11322 TCP_HYBRID_PACING_LOG, 0, 11323 0, &log, false, NULL, __func__, __LINE__, &tv); 11324 } 11325 } 11326 #endif 11327 11328 #ifdef TCP_REQUEST_TRK 11329 static void 11330 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11331 { 11332 struct tcp_sendfile_track *rc_cur, *orig_ent; 11333 struct tcpcb *tp; 11334 int err = 0; 11335 11336 orig_ent = rack->r_ctl.rc_last_sft; 11337 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 11338 if (rc_cur == NULL) { 11339 /* If not in the beginning what about the end piece */ 11340 if (rack->rc_hybrid_mode) 11341 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11342 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 11343 } else { 11344 err = 12345; 11345 } 11346 /* If we find no parameters we are in straight DGP mode */ 11347 if(rc_cur == NULL) { 11348 /* None found for this seq, just DGP for now */ 11349 if (rack->rc_hybrid_mode) { 11350 rack->r_ctl.client_suggested_maxseg = 0; 11351 rack->rc_catch_up = 0; 11352 if (rack->cspr_is_fcc == 0) 11353 rack->r_ctl.bw_rate_cap = 0; 11354 else 11355 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11356 } 11357 if (rack->rc_hybrid_mode) { 11358 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11359 } 11360 if (rack->r_ctl.rc_last_sft) { 11361 rack->r_ctl.rc_last_sft = NULL; 11362 } 11363 return; 11364 } 11365 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 11366 /* This entry was never setup for hybrid pacing on/off etc */ 11367 if (rack->rc_hybrid_mode) { 11368 rack->r_ctl.client_suggested_maxseg = 0; 11369 rack->rc_catch_up = 0; 11370 rack->r_ctl.bw_rate_cap = 0; 11371 } 11372 if (rack->r_ctl.rc_last_sft) { 11373 rack->r_ctl.rc_last_sft = NULL; 11374 } 11375 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11376 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 11377 rc_cur->first_send = cts; 11378 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 11379 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11380 } 11381 return; 11382 } 11383 /* 11384 * Ok if we have a new entry *or* have never 11385 * set up an entry we need to proceed. If 11386 * we have already set it up this entry we 11387 * just continue along with what we already 11388 * setup. 11389 */ 11390 tp = rack->rc_tp; 11391 if ((rack->r_ctl.rc_last_sft != NULL) && 11392 (rack->r_ctl.rc_last_sft == rc_cur)) { 11393 /* Its already in place */ 11394 if (rack->rc_hybrid_mode) 11395 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 11396 return; 11397 } 11398 if (rack->rc_hybrid_mode == 0) { 11399 rack->r_ctl.rc_last_sft = rc_cur; 11400 if (orig_ent) { 11401 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11402 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11403 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11404 } 11405 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11406 return; 11407 } 11408 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 11409 /* Compensate for all the header overhead's */ 11410 if (rack->cspr_is_fcc == 0) 11411 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11412 else 11413 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11414 } else { 11415 if (rack->rc_hybrid_mode) { 11416 if (rack->cspr_is_fcc == 0) 11417 rack->r_ctl.bw_rate_cap = 0; 11418 else 11419 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11420 } 11421 } 11422 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 11423 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 11424 else 11425 rack->r_ctl.client_suggested_maxseg = 0; 11426 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 11427 /* 11428 * It is the same timestamp as the previous one 11429 * add the hybrid flag that will indicate we use 11430 * sendtime not arrival time for catch-up mode. 11431 */ 11432 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 11433 } 11434 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 11435 (rc_cur->cspr > 0)) { 11436 uint64_t len; 11437 11438 rack->rc_catch_up = 1; 11439 /* 11440 * Calculate the deadline time, first set the 11441 * time to when the request arrived. 11442 */ 11443 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 11444 /* 11445 * For cases where its a duplicate tm (we received more 11446 * than one request for a tm) we want to use now, the point 11447 * where we are just sending the first bit of the request. 11448 */ 11449 rc_cur->deadline = cts; 11450 } else { 11451 /* 11452 * Here we have a different tm from the last request 11453 * so we want to use arrival time as our base. 11454 */ 11455 rc_cur->deadline = rc_cur->localtime; 11456 } 11457 /* 11458 * Next calculate the length and compensate for 11459 * TLS if need be. 11460 */ 11461 len = rc_cur->end - rc_cur->start; 11462 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 11463 /* 11464 * This session is doing TLS. Take a swag guess 11465 * at the overhead. 11466 */ 11467 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 11468 } 11469 /* 11470 * Now considering the size, and the cspr, what is the time that 11471 * would be required at the cspr rate. Here we use the raw 11472 * cspr value since the client only looks at the raw data. We 11473 * do use len which includes TLS overhead, but not the TCP/IP etc. 11474 * That will get made up for in the CU pacing rate set. 11475 */ 11476 len *= HPTS_USEC_IN_SEC; 11477 len /= rc_cur->cspr; 11478 rc_cur->deadline += len; 11479 } else { 11480 rack->rc_catch_up = 0; 11481 rc_cur->deadline = 0; 11482 } 11483 if (rack->r_ctl.client_suggested_maxseg != 0) { 11484 /* 11485 * We need to reset the max pace segs if we have a 11486 * client_suggested_maxseg. 11487 */ 11488 rack_set_pace_segments(tp, rack, __LINE__, NULL); 11489 } 11490 if (orig_ent) { 11491 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11492 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11493 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11494 } 11495 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11496 /* Remember it for next time and for CU mode */ 11497 rack->r_ctl.rc_last_sft = rc_cur; 11498 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 11499 } 11500 #endif 11501 11502 static void 11503 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11504 { 11505 #ifdef TCP_REQUEST_TRK 11506 struct tcp_sendfile_track *ent; 11507 11508 ent = rack->r_ctl.rc_last_sft; 11509 if ((ent == NULL) || 11510 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 11511 (SEQ_GEQ(seq, ent->end_seq))) { 11512 /* Time to update the track. */ 11513 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 11514 ent = rack->r_ctl.rc_last_sft; 11515 } 11516 /* Out of all */ 11517 if (ent == NULL) { 11518 return; 11519 } 11520 if (SEQ_LT(ent->end_seq, (seq + len))) { 11521 /* 11522 * This is the case where our end_seq guess 11523 * was wrong. This is usually due to TLS having 11524 * more bytes then our guess. It could also be the 11525 * case that the client sent in two requests closely 11526 * and the SB is full of both so we are sending part 11527 * of each (end|beg). In such a case lets move this 11528 * guys end to match the end of this send. That 11529 * way it will complete when all of it is acked. 11530 */ 11531 ent->end_seq = (seq + len); 11532 if (rack->rc_hybrid_mode) 11533 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 11534 } 11535 /* Now validate we have set the send time of this one */ 11536 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11537 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 11538 ent->first_send = cts; 11539 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 11540 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11541 } 11542 #endif 11543 } 11544 11545 static void 11546 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 11547 { 11548 /* 11549 * The fast output path is enabled and we 11550 * have moved the cumack forward. Lets see if 11551 * we can expand forward the fast path length by 11552 * that amount. What we would ideally like to 11553 * do is increase the number of bytes in the 11554 * fast path block (left_to_send) by the 11555 * acked amount. However we have to gate that 11556 * by two factors: 11557 * 1) The amount outstanding and the rwnd of the peer 11558 * (i.e. we don't want to exceed the rwnd of the peer). 11559 * <and> 11560 * 2) The amount of data left in the socket buffer (i.e. 11561 * we can't send beyond what is in the buffer). 11562 * 11563 * Note that this does not take into account any increase 11564 * in the cwnd. We will only extend the fast path by 11565 * what was acked. 11566 */ 11567 uint32_t new_total, gating_val; 11568 11569 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 11570 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 11571 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 11572 if (new_total <= gating_val) { 11573 /* We can increase left_to_send by the acked amount */ 11574 counter_u64_add(rack_extended_rfo, 1); 11575 rack->r_ctl.fsb.left_to_send = new_total; 11576 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 11577 ("rack:%p left_to_send:%u sbavail:%u out:%u", 11578 rack, rack->r_ctl.fsb.left_to_send, 11579 sbavail(&rack->rc_inp->inp_socket->so_snd), 11580 (tp->snd_max - tp->snd_una))); 11581 11582 } 11583 } 11584 11585 static void 11586 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 11587 { 11588 /* 11589 * Here any sendmap entry that points to the 11590 * beginning mbuf must be adjusted to the correct 11591 * offset. This must be called with: 11592 * 1) The socket buffer locked 11593 * 2) snd_una adjusted to its new position. 11594 * 11595 * Note that (2) implies rack_ack_received has also 11596 * been called and all the sbcut's have been done. 11597 * 11598 * We grab the first mbuf in the socket buffer and 11599 * then go through the front of the sendmap, recalculating 11600 * the stored offset for any sendmap entry that has 11601 * that mbuf. We must use the sb functions to do this 11602 * since its possible an add was done has well as 11603 * the subtraction we may have just completed. This should 11604 * not be a penalty though, since we just referenced the sb 11605 * to go in and trim off the mbufs that we freed (of course 11606 * there will be a penalty for the sendmap references though). 11607 * 11608 * Note also with INVARIANT on, we validate with a KASSERT 11609 * that the first sendmap entry has a soff of 0. 11610 * 11611 */ 11612 struct mbuf *m; 11613 struct rack_sendmap *rsm; 11614 tcp_seq snd_una; 11615 #ifdef INVARIANTS 11616 int first_processed = 0; 11617 #endif 11618 11619 snd_una = rack->rc_tp->snd_una; 11620 SOCKBUF_LOCK_ASSERT(sb); 11621 m = sb->sb_mb; 11622 rsm = tqhash_min(rack->r_ctl.tqh); 11623 if ((rsm == NULL) || (m == NULL)) { 11624 /* Nothing outstanding */ 11625 return; 11626 } 11627 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 11628 KASSERT((rsm->m == m), 11629 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 11630 rack, sb, rsm)); 11631 while (rsm->m && (rsm->m == m)) { 11632 /* one to adjust */ 11633 #ifdef INVARIANTS 11634 struct mbuf *tm; 11635 uint32_t soff; 11636 11637 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 11638 if ((rsm->orig_m_len != m->m_len) || 11639 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 11640 rack_adjust_orig_mlen(rsm); 11641 } 11642 if (first_processed == 0) { 11643 KASSERT((rsm->soff == 0), 11644 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 11645 rack, rsm)); 11646 first_processed = 1; 11647 } 11648 if ((rsm->soff != soff) || (rsm->m != tm)) { 11649 /* 11650 * This is not a fatal error, we anticipate it 11651 * might happen (the else code), so we count it here 11652 * so that under invariant we can see that it really 11653 * does happen. 11654 */ 11655 counter_u64_add(rack_adjust_map_bw, 1); 11656 } 11657 rsm->m = tm; 11658 rsm->soff = soff; 11659 if (tm) { 11660 rsm->orig_m_len = rsm->m->m_len; 11661 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11662 } else { 11663 rsm->orig_m_len = 0; 11664 rsm->orig_t_space = 0; 11665 } 11666 #else 11667 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 11668 if (rsm->m) { 11669 rsm->orig_m_len = rsm->m->m_len; 11670 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11671 } else { 11672 rsm->orig_m_len = 0; 11673 rsm->orig_t_space = 0; 11674 } 11675 #endif 11676 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 11677 if (rsm == NULL) 11678 break; 11679 } 11680 } 11681 11682 #ifdef TCP_REQUEST_TRK 11683 static inline void 11684 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 11685 { 11686 struct tcp_sendfile_track *ent; 11687 int i; 11688 11689 if ((rack->rc_hybrid_mode == 0) && 11690 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 11691 /* 11692 * Just do normal completions hybrid pacing is not on 11693 * and CLDL is off as well. 11694 */ 11695 tcp_req_check_for_comp(rack->rc_tp, th_ack); 11696 return; 11697 } 11698 /* 11699 * Originally I was just going to find the th_ack associated 11700 * with an entry. But then I realized a large strech ack could 11701 * in theory ack two or more requests at once. So instead we 11702 * need to find all entries that are completed by th_ack not 11703 * just a single entry and do our logging. 11704 */ 11705 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11706 while (ent != NULL) { 11707 /* 11708 * We may be doing hybrid pacing or CLDL and need more details possibly 11709 * so we do it manually instead of calling 11710 * tcp_req_check_for_comp() 11711 */ 11712 uint64_t laa, tim, data, cbw, ftim; 11713 11714 /* Ok this ack frees it */ 11715 rack_log_hybrid(rack, th_ack, 11716 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 11717 rack_log_hybrid_sends(rack, ent, __LINE__); 11718 /* calculate the time based on the ack arrival */ 11719 data = ent->end - ent->start; 11720 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 11721 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 11722 if (ent->first_send > ent->localtime) 11723 ftim = ent->first_send; 11724 else 11725 ftim = ent->localtime; 11726 } else { 11727 /* TSNH */ 11728 ftim = ent->localtime; 11729 } 11730 if (laa > ent->localtime) 11731 tim = laa - ftim; 11732 else 11733 tim = 0; 11734 cbw = data * HPTS_USEC_IN_SEC; 11735 if (tim > 0) 11736 cbw /= tim; 11737 else 11738 cbw = 0; 11739 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 11740 /* 11741 * Check to see if we are freeing what we are pointing to send wise 11742 * if so be sure to NULL the pointer so we know we are no longer 11743 * set to anything. 11744 */ 11745 if (ent == rack->r_ctl.rc_last_sft) { 11746 rack->r_ctl.rc_last_sft = NULL; 11747 if (rack->rc_hybrid_mode) { 11748 rack->rc_catch_up = 0; 11749 if (rack->cspr_is_fcc == 0) 11750 rack->r_ctl.bw_rate_cap = 0; 11751 else 11752 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11753 rack->r_ctl.client_suggested_maxseg = 0; 11754 } 11755 } 11756 /* Generate the log that the tcp_netflix call would have */ 11757 tcp_req_log_req_info(rack->rc_tp, ent, 11758 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 11759 /* Free it and see if there is another one */ 11760 tcp_req_free_a_slot(rack->rc_tp, ent); 11761 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11762 } 11763 } 11764 #endif 11765 11766 11767 /* 11768 * Return value of 1, we do not need to call rack_process_data(). 11769 * return value of 0, rack_process_data can be called. 11770 * For ret_val if its 0 the TCP is locked, if its non-zero 11771 * its unlocked and probably unsafe to touch the TCB. 11772 */ 11773 static int 11774 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11775 struct tcpcb *tp, struct tcpopt *to, 11776 uint32_t tiwin, int32_t tlen, 11777 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 11778 { 11779 int32_t ourfinisacked = 0; 11780 int32_t nsegs, acked_amount; 11781 int32_t acked; 11782 struct mbuf *mfree; 11783 struct tcp_rack *rack; 11784 int32_t under_pacing = 0; 11785 int32_t post_recovery = 0; 11786 uint32_t p_cwnd; 11787 11788 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11789 11790 rack = (struct tcp_rack *)tp->t_fb_ptr; 11791 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { 11792 /* Checking SEG.ACK against ISS is definitely redundant. */ 11793 tp->t_flags2 |= TF2_NO_ISS_CHECK; 11794 } 11795 if (!V_tcp_insecure_ack) { 11796 tcp_seq seq_min; 11797 bool ghost_ack_check; 11798 11799 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { 11800 /* Check for too old ACKs (RFC 5961, Section 5.2). */ 11801 seq_min = tp->snd_una - tp->max_sndwnd; 11802 ghost_ack_check = false; 11803 } else { 11804 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { 11805 /* Checking for ghost ACKs is stricter. */ 11806 seq_min = tp->iss + 1; 11807 ghost_ack_check = true; 11808 } else { 11809 /* 11810 * Checking for too old ACKs (RFC 5961, 11811 * Section 5.2) is stricter. 11812 */ 11813 seq_min = tp->snd_una - tp->max_sndwnd; 11814 ghost_ack_check = false; 11815 } 11816 } 11817 if (SEQ_LT(th->th_ack, seq_min)) { 11818 if (ghost_ack_check) 11819 TCPSTAT_INC(tcps_rcvghostack); 11820 else 11821 TCPSTAT_INC(tcps_rcvacktooold); 11822 /* Send challenge ACK. */ 11823 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11824 rack->r_wanted_output = 1; 11825 return (1); 11826 } 11827 } 11828 if (SEQ_GT(th->th_ack, tp->snd_max)) { 11829 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11830 rack->r_wanted_output = 1; 11831 return (1); 11832 } 11833 if (rack->gp_ready && 11834 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11835 under_pacing = 1; 11836 } 11837 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 11838 int in_rec, dup_ack_struck = 0; 11839 int dsack_seen = 0, sacks_seen = 0; 11840 11841 in_rec = IN_FASTRECOVERY(tp->t_flags); 11842 if (rack->rc_in_persist) { 11843 tp->t_rxtshift = 0; 11844 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11845 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11846 } 11847 11848 if ((th->th_ack == tp->snd_una) && 11849 (tiwin == tp->snd_wnd) && 11850 (orig_tlen == 0) && 11851 ((to->to_flags & TOF_SACK) == 0)) { 11852 rack_strike_dupack(rack, th->th_ack); 11853 dup_ack_struck = 1; 11854 } 11855 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 11856 dup_ack_struck, &dsack_seen, &sacks_seen); 11857 11858 } 11859 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11860 /* 11861 * Old ack, behind (or duplicate to) the last one rcv'd 11862 * Note: We mark reordering is occuring if its 11863 * less than and we have not closed our window. 11864 */ 11865 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 11866 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 11867 if (rack->r_ctl.rc_reorder_ts == 0) 11868 rack->r_ctl.rc_reorder_ts = 1; 11869 } 11870 return (0); 11871 } 11872 /* 11873 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 11874 * something we sent. 11875 */ 11876 if (tp->t_flags & TF_NEEDSYN) { 11877 /* 11878 * T/TCP: Connection was half-synchronized, and our SYN has 11879 * been ACK'd (so connection is now fully synchronized). Go 11880 * to non-starred state, increment snd_una for ACK of SYN, 11881 * and check if we can do window scaling. 11882 */ 11883 tp->t_flags &= ~TF_NEEDSYN; 11884 tp->snd_una++; 11885 /* Do window scaling? */ 11886 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11887 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11888 tp->rcv_scale = tp->request_r_scale; 11889 /* Send window already scaled. */ 11890 } 11891 } 11892 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11893 11894 acked = BYTES_THIS_ACK(tp, th); 11895 if (acked) { 11896 /* 11897 * Any time we move the cum-ack forward clear 11898 * keep-alive tied probe-not-answered. The 11899 * persists clears its own on entry. 11900 */ 11901 rack->probe_not_answered = 0; 11902 } 11903 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11904 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11905 /* 11906 * If we just performed our first retransmit, and the ACK arrives 11907 * within our recovery window, then it was a mistake to do the 11908 * retransmit in the first place. Recover our original cwnd and 11909 * ssthresh, and proceed to transmit where we left off. 11910 */ 11911 if ((tp->t_flags & TF_PREVVALID) && 11912 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11913 tp->t_flags &= ~TF_PREVVALID; 11914 if (tp->t_rxtshift == 1 && 11915 (int)(ticks - tp->t_badrxtwin) < 0) 11916 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 11917 } 11918 if (acked) { 11919 /* assure we are not backed off */ 11920 tp->t_rxtshift = 0; 11921 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11922 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11923 rack->rc_tlp_in_progress = 0; 11924 rack->r_ctl.rc_tlp_cnt_out = 0; 11925 /* 11926 * If it is the RXT timer we want to 11927 * stop it, so we can restart a TLP. 11928 */ 11929 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11930 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11931 #ifdef TCP_REQUEST_TRK 11932 rack_req_check_for_comp(rack, th->th_ack); 11933 #endif 11934 } 11935 /* 11936 * If we have a timestamp reply, update smoothed round trip time. If 11937 * no timestamp is present but transmit timer is running and timed 11938 * sequence number was acked, update smoothed round trip time. Since 11939 * we now have an rtt measurement, cancel the timer backoff (cf., 11940 * Phil Karn's retransmit alg.). Recompute the initial retransmit 11941 * timer. 11942 * 11943 * Some boxes send broken timestamp replies during the SYN+ACK 11944 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11945 * and blow up the retransmit timer. 11946 */ 11947 /* 11948 * If all outstanding data is acked, stop retransmit timer and 11949 * remember to restart (more output or persist). If there is more 11950 * data to be acked, restart retransmit timer, using current 11951 * (possibly backed-off) value. 11952 */ 11953 if (acked == 0) { 11954 if (ofia) 11955 *ofia = ourfinisacked; 11956 return (0); 11957 } 11958 if (IN_RECOVERY(tp->t_flags)) { 11959 if (SEQ_LT(th->th_ack, tp->snd_recover) && 11960 (SEQ_LT(th->th_ack, tp->snd_max))) { 11961 tcp_rack_partialack(tp); 11962 } else { 11963 rack_post_recovery(tp, th->th_ack); 11964 post_recovery = 1; 11965 /* 11966 * Grab the segsiz, multiply by 2 and add the snd_cwnd 11967 * that is the max the CC should add if we are exiting 11968 * recovery and doing a late add. 11969 */ 11970 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 11971 p_cwnd <<= 1; 11972 p_cwnd += tp->snd_cwnd; 11973 } 11974 } else if ((rack->rto_from_rec == 1) && 11975 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 11976 /* 11977 * We were in recovery, hit a rxt timeout 11978 * and never re-entered recovery. The timeout(s) 11979 * made up all the lost data. In such a case 11980 * we need to clear the rto_from_rec flag. 11981 */ 11982 rack->rto_from_rec = 0; 11983 } 11984 /* 11985 * Let the congestion control algorithm update congestion control 11986 * related information. This typically means increasing the 11987 * congestion window. 11988 */ 11989 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 11990 if (post_recovery && 11991 (tp->snd_cwnd > p_cwnd)) { 11992 /* Must be non-newreno (cubic) getting too ahead of itself */ 11993 tp->snd_cwnd = p_cwnd; 11994 } 11995 SOCKBUF_LOCK(&so->so_snd); 11996 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 11997 tp->snd_wnd -= acked_amount; 11998 mfree = sbcut_locked(&so->so_snd, acked_amount); 11999 if ((sbused(&so->so_snd) == 0) && 12000 (acked > acked_amount) && 12001 (tp->t_state >= TCPS_FIN_WAIT_1) && 12002 (tp->t_flags & TF_SENTFIN)) { 12003 /* 12004 * We must be sure our fin 12005 * was sent and acked (we can be 12006 * in FIN_WAIT_1 without having 12007 * sent the fin). 12008 */ 12009 ourfinisacked = 1; 12010 } 12011 tp->snd_una = th->th_ack; 12012 /* wakeups? */ 12013 if (acked_amount && sbavail(&so->so_snd)) 12014 rack_adjust_sendmap_head(rack, &so->so_snd); 12015 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12016 /* NB: sowwakeup_locked() does an implicit unlock. */ 12017 sowwakeup_locked(so); 12018 m_freem(mfree); 12019 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12020 tp->snd_recover = tp->snd_una; 12021 12022 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12023 tp->snd_nxt = tp->snd_max; 12024 } 12025 if (under_pacing && 12026 (rack->use_fixed_rate == 0) && 12027 (rack->in_probe_rtt == 0) && 12028 rack->rc_gp_dyn_mul && 12029 rack->rc_always_pace) { 12030 /* Check if we are dragging bottom */ 12031 rack_check_bottom_drag(tp, rack, so); 12032 } 12033 if (tp->snd_una == tp->snd_max) { 12034 /* Nothing left outstanding */ 12035 tp->t_flags &= ~TF_PREVVALID; 12036 if (rack->r_ctl.rc_went_idle_time == 0) 12037 rack->r_ctl.rc_went_idle_time = 1; 12038 rack->r_ctl.retran_during_recovery = 0; 12039 rack->r_ctl.dsack_byte_cnt = 0; 12040 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12041 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12042 tp->t_acktime = 0; 12043 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12044 rack->rc_suspicious = 0; 12045 /* Set need output so persist might get set */ 12046 rack->r_wanted_output = 1; 12047 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12048 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12049 (sbavail(&so->so_snd) == 0) && 12050 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12051 /* 12052 * The socket was gone and the 12053 * peer sent data (now or in the past), time to 12054 * reset him. 12055 */ 12056 *ret_val = 1; 12057 /* tcp_close will kill the inp pre-log the Reset */ 12058 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12059 tp = tcp_close(tp); 12060 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12061 return (1); 12062 } 12063 } 12064 if (ofia) 12065 *ofia = ourfinisacked; 12066 return (0); 12067 } 12068 12069 12070 static void 12071 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12072 int dir, uint32_t flags, struct rack_sendmap *rsm) 12073 { 12074 if (tcp_bblogging_on(rack->rc_tp)) { 12075 union tcp_log_stackspecific log; 12076 struct timeval tv; 12077 12078 memset(&log, 0, sizeof(log)); 12079 log.u_bbr.flex1 = cnt; 12080 log.u_bbr.flex2 = split; 12081 log.u_bbr.flex3 = out; 12082 log.u_bbr.flex4 = line; 12083 log.u_bbr.flex5 = rack->r_must_retran; 12084 log.u_bbr.flex6 = flags; 12085 log.u_bbr.flex7 = rack->rc_has_collapsed; 12086 log.u_bbr.flex8 = dir; /* 12087 * 1 is collapsed, 0 is uncollapsed, 12088 * 2 is log of a rsm being marked, 3 is a split. 12089 */ 12090 if (rsm == NULL) 12091 log.u_bbr.rttProp = 0; 12092 else 12093 log.u_bbr.rttProp = (uintptr_t)rsm; 12094 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12095 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12096 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12097 &rack->rc_inp->inp_socket->so_rcv, 12098 &rack->rc_inp->inp_socket->so_snd, 12099 TCP_RACK_LOG_COLLAPSE, 0, 12100 0, &log, false, &tv); 12101 } 12102 } 12103 12104 static void 12105 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12106 { 12107 /* 12108 * Here all we do is mark the collapsed point and set the flag. 12109 * This may happen again and again, but there is no 12110 * sense splitting our map until we know where the 12111 * peer finally lands in the collapse. 12112 */ 12113 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12114 if ((rack->rc_has_collapsed == 0) || 12115 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12116 counter_u64_add(rack_collapsed_win_seen, 1); 12117 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12118 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12119 rack->rc_has_collapsed = 1; 12120 rack->r_collapse_point_valid = 1; 12121 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12122 } 12123 12124 static void 12125 rack_un_collapse_window(struct tcp_rack *rack, int line) 12126 { 12127 struct rack_sendmap *nrsm, *rsm; 12128 int cnt = 0, split = 0; 12129 int insret __diagused; 12130 12131 12132 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12133 rack->rc_has_collapsed = 0; 12134 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12135 if (rsm == NULL) { 12136 /* Nothing to do maybe the peer ack'ed it all */ 12137 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12138 return; 12139 } 12140 /* Now do we need to split this one? */ 12141 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12142 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12143 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12144 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12145 if (nrsm == NULL) { 12146 /* We can't get a rsm, mark all? */ 12147 nrsm = rsm; 12148 goto no_split; 12149 } 12150 /* Clone it */ 12151 split = 1; 12152 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12153 #ifndef INVARIANTS 12154 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12155 #else 12156 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12157 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12158 nrsm, insret, rack, rsm); 12159 } 12160 #endif 12161 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12162 rack->r_ctl.last_collapse_point, __LINE__); 12163 if (rsm->r_in_tmap) { 12164 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12165 nrsm->r_in_tmap = 1; 12166 } 12167 /* 12168 * Set in the new RSM as the 12169 * collapsed starting point 12170 */ 12171 rsm = nrsm; 12172 } 12173 12174 no_split: 12175 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12176 cnt++; 12177 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12178 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12179 cnt++; 12180 } 12181 if (cnt) { 12182 counter_u64_add(rack_collapsed_win, 1); 12183 } 12184 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12185 } 12186 12187 static void 12188 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12189 int32_t tlen, int32_t tfo_syn) 12190 { 12191 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12192 rack_timer_cancel(tp, rack, 12193 rack->r_ctl.rc_rcvtime, __LINE__); 12194 tp->t_flags |= TF_DELACK; 12195 } else { 12196 rack->r_wanted_output = 1; 12197 tp->t_flags |= TF_ACKNOW; 12198 } 12199 } 12200 12201 static void 12202 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12203 { 12204 /* 12205 * If fast output is in progress, lets validate that 12206 * the new window did not shrink on us and make it 12207 * so fast output should end. 12208 */ 12209 if (rack->r_fast_output) { 12210 uint32_t out; 12211 12212 /* 12213 * Calculate what we will send if left as is 12214 * and compare that to our send window. 12215 */ 12216 out = ctf_outstanding(tp); 12217 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12218 /* ok we have an issue */ 12219 if (out >= tp->snd_wnd) { 12220 /* Turn off fast output the window is met or collapsed */ 12221 rack->r_fast_output = 0; 12222 } else { 12223 /* we have some room left */ 12224 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12225 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12226 /* If not at least 1 full segment never mind */ 12227 rack->r_fast_output = 0; 12228 } 12229 } 12230 } 12231 } 12232 } 12233 12234 /* 12235 * Return value of 1, the TCB is unlocked and most 12236 * likely gone, return value of 0, the TCP is still 12237 * locked. 12238 */ 12239 static int 12240 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12241 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12242 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12243 { 12244 /* 12245 * Update window information. Don't look at window if no ACK: TAC's 12246 * send garbage on first SYN. 12247 */ 12248 int32_t nsegs; 12249 int32_t tfo_syn; 12250 struct tcp_rack *rack; 12251 12252 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12253 12254 rack = (struct tcp_rack *)tp->t_fb_ptr; 12255 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12256 if ((thflags & TH_ACK) && 12257 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12258 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12259 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12260 /* keep track of pure window updates */ 12261 if (tlen == 0 && 12262 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12263 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12264 tp->snd_wnd = tiwin; 12265 rack_validate_fo_sendwin_up(tp, rack); 12266 tp->snd_wl1 = th->th_seq; 12267 tp->snd_wl2 = th->th_ack; 12268 if (tp->snd_wnd > tp->max_sndwnd) 12269 tp->max_sndwnd = tp->snd_wnd; 12270 rack->r_wanted_output = 1; 12271 } else if (thflags & TH_ACK) { 12272 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12273 tp->snd_wnd = tiwin; 12274 rack_validate_fo_sendwin_up(tp, rack); 12275 tp->snd_wl1 = th->th_seq; 12276 tp->snd_wl2 = th->th_ack; 12277 } 12278 } 12279 if (tp->snd_wnd < ctf_outstanding(tp)) 12280 /* The peer collapsed the window */ 12281 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12282 else if (rack->rc_has_collapsed) 12283 rack_un_collapse_window(rack, __LINE__); 12284 if ((rack->r_collapse_point_valid) && 12285 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12286 rack->r_collapse_point_valid = 0; 12287 /* Was persist timer active and now we have window space? */ 12288 if ((rack->rc_in_persist != 0) && 12289 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12290 rack->r_ctl.rc_pace_min_segs))) { 12291 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12292 tp->snd_nxt = tp->snd_max; 12293 /* Make sure we output to start the timer */ 12294 rack->r_wanted_output = 1; 12295 } 12296 /* Do we enter persists? */ 12297 if ((rack->rc_in_persist == 0) && 12298 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12299 TCPS_HAVEESTABLISHED(tp->t_state) && 12300 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12301 sbavail(&tptosocket(tp)->so_snd) && 12302 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12303 /* 12304 * Here the rwnd is less than 12305 * the pacing size, we are established, 12306 * nothing is outstanding, and there is 12307 * data to send. Enter persists. 12308 */ 12309 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12310 } 12311 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12312 m_freem(m); 12313 return (0); 12314 } 12315 /* 12316 * don't process the URG bit, ignore them drag 12317 * along the up. 12318 */ 12319 tp->rcv_up = tp->rcv_nxt; 12320 12321 /* 12322 * Process the segment text, merging it into the TCP sequencing 12323 * queue, and arranging for acknowledgment of receipt if necessary. 12324 * This process logically involves adjusting tp->rcv_wnd as data is 12325 * presented to the user (this happens in tcp_usrreq.c, case 12326 * PRU_RCVD). If a FIN has already been received on this connection 12327 * then we just ignore the text. 12328 */ 12329 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12330 (tp->t_flags & TF_FASTOPEN)); 12331 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12332 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12333 tcp_seq save_start = th->th_seq; 12334 tcp_seq save_rnxt = tp->rcv_nxt; 12335 int save_tlen = tlen; 12336 12337 m_adj(m, drop_hdrlen); /* delayed header drop */ 12338 /* 12339 * Insert segment which includes th into TCP reassembly 12340 * queue with control block tp. Set thflags to whether 12341 * reassembly now includes a segment with FIN. This handles 12342 * the common case inline (segment is the next to be 12343 * received on an established connection, and the queue is 12344 * empty), avoiding linkage into and removal from the queue 12345 * and repetition of various conversions. Set DELACK for 12346 * segments received in order, but ack immediately when 12347 * segments are out of order (so fast retransmit can work). 12348 */ 12349 if (th->th_seq == tp->rcv_nxt && 12350 SEGQ_EMPTY(tp) && 12351 (TCPS_HAVEESTABLISHED(tp->t_state) || 12352 tfo_syn)) { 12353 #ifdef NETFLIX_SB_LIMITS 12354 u_int mcnt, appended; 12355 12356 if (so->so_rcv.sb_shlim) { 12357 mcnt = m_memcnt(m); 12358 appended = 0; 12359 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12360 CFO_NOSLEEP, NULL) == false) { 12361 counter_u64_add(tcp_sb_shlim_fails, 1); 12362 m_freem(m); 12363 return (0); 12364 } 12365 } 12366 #endif 12367 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 12368 tp->rcv_nxt += tlen; 12369 if (tlen && 12370 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12371 (tp->t_fbyte_in == 0)) { 12372 tp->t_fbyte_in = ticks; 12373 if (tp->t_fbyte_in == 0) 12374 tp->t_fbyte_in = 1; 12375 if (tp->t_fbyte_out && tp->t_fbyte_in) 12376 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12377 } 12378 thflags = tcp_get_flags(th) & TH_FIN; 12379 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12380 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12381 SOCKBUF_LOCK(&so->so_rcv); 12382 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12383 m_freem(m); 12384 } else { 12385 int32_t newsize; 12386 12387 if (tlen > 0) { 12388 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12389 if (newsize) 12390 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12391 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12392 } 12393 #ifdef NETFLIX_SB_LIMITS 12394 appended = 12395 #endif 12396 sbappendstream_locked(&so->so_rcv, m, 0); 12397 } 12398 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12399 /* NB: sorwakeup_locked() does an implicit unlock. */ 12400 sorwakeup_locked(so); 12401 #ifdef NETFLIX_SB_LIMITS 12402 if (so->so_rcv.sb_shlim && appended != mcnt) 12403 counter_fo_release(so->so_rcv.sb_shlim, 12404 mcnt - appended); 12405 #endif 12406 } else { 12407 /* 12408 * XXX: Due to the header drop above "th" is 12409 * theoretically invalid by now. Fortunately 12410 * m_adj() doesn't actually frees any mbufs when 12411 * trimming from the head. 12412 */ 12413 tcp_seq temp = save_start; 12414 12415 thflags = tcp_reass(tp, th, &temp, &tlen, m); 12416 tp->t_flags |= TF_ACKNOW; 12417 if (tp->t_flags & TF_WAKESOR) { 12418 tp->t_flags &= ~TF_WAKESOR; 12419 /* NB: sorwakeup_locked() does an implicit unlock. */ 12420 sorwakeup_locked(so); 12421 } 12422 } 12423 if ((tp->t_flags & TF_SACK_PERMIT) && 12424 (save_tlen > 0) && 12425 TCPS_HAVEESTABLISHED(tp->t_state)) { 12426 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 12427 /* 12428 * DSACK actually handled in the fastpath 12429 * above. 12430 */ 12431 tcp_update_sack_list(tp, save_start, 12432 save_start + save_tlen); 12433 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 12434 if ((tp->rcv_numsacks >= 1) && 12435 (tp->sackblks[0].end == save_start)) { 12436 /* 12437 * Partial overlap, recorded at todrop 12438 * above. 12439 */ 12440 tcp_update_sack_list(tp, 12441 tp->sackblks[0].start, 12442 tp->sackblks[0].end); 12443 } else { 12444 tcp_update_dsack_list(tp, save_start, 12445 save_start + save_tlen); 12446 } 12447 } else if (tlen >= save_tlen) { 12448 /* Update of sackblks. */ 12449 tcp_update_dsack_list(tp, save_start, 12450 save_start + save_tlen); 12451 } else if (tlen > 0) { 12452 tcp_update_dsack_list(tp, save_start, 12453 save_start + tlen); 12454 } 12455 } 12456 } else { 12457 m_freem(m); 12458 thflags &= ~TH_FIN; 12459 } 12460 12461 /* 12462 * If FIN is received ACK the FIN and let the user know that the 12463 * connection is closing. 12464 */ 12465 if (thflags & TH_FIN) { 12466 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12467 /* The socket upcall is handled by socantrcvmore. */ 12468 socantrcvmore(so); 12469 /* 12470 * If connection is half-synchronized (ie NEEDSYN 12471 * flag on) then delay ACK, so it may be piggybacked 12472 * when SYN is sent. Otherwise, since we received a 12473 * FIN then no more input can be expected, send ACK 12474 * now. 12475 */ 12476 if (tp->t_flags & TF_NEEDSYN) { 12477 rack_timer_cancel(tp, rack, 12478 rack->r_ctl.rc_rcvtime, __LINE__); 12479 tp->t_flags |= TF_DELACK; 12480 } else { 12481 tp->t_flags |= TF_ACKNOW; 12482 } 12483 tp->rcv_nxt++; 12484 } 12485 switch (tp->t_state) { 12486 /* 12487 * In SYN_RECEIVED and ESTABLISHED STATES enter the 12488 * CLOSE_WAIT state. 12489 */ 12490 case TCPS_SYN_RECEIVED: 12491 tp->t_starttime = ticks; 12492 /* FALLTHROUGH */ 12493 case TCPS_ESTABLISHED: 12494 rack_timer_cancel(tp, rack, 12495 rack->r_ctl.rc_rcvtime, __LINE__); 12496 tcp_state_change(tp, TCPS_CLOSE_WAIT); 12497 break; 12498 12499 /* 12500 * If still in FIN_WAIT_1 STATE FIN has not been 12501 * acked so enter the CLOSING state. 12502 */ 12503 case TCPS_FIN_WAIT_1: 12504 rack_timer_cancel(tp, rack, 12505 rack->r_ctl.rc_rcvtime, __LINE__); 12506 tcp_state_change(tp, TCPS_CLOSING); 12507 break; 12508 12509 /* 12510 * In FIN_WAIT_2 state enter the TIME_WAIT state, 12511 * starting the time-wait timer, turning off the 12512 * other standard timers. 12513 */ 12514 case TCPS_FIN_WAIT_2: 12515 rack_timer_cancel(tp, rack, 12516 rack->r_ctl.rc_rcvtime, __LINE__); 12517 tcp_twstart(tp); 12518 return (1); 12519 } 12520 } 12521 /* 12522 * Return any desired output. 12523 */ 12524 if ((tp->t_flags & TF_ACKNOW) || 12525 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 12526 rack->r_wanted_output = 1; 12527 } 12528 return (0); 12529 } 12530 12531 /* 12532 * Here nothing is really faster, its just that we 12533 * have broken out the fast-data path also just like 12534 * the fast-ack. 12535 */ 12536 static int 12537 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 12538 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12539 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 12540 { 12541 int32_t nsegs; 12542 int32_t newsize = 0; /* automatic sockbuf scaling */ 12543 struct tcp_rack *rack; 12544 #ifdef NETFLIX_SB_LIMITS 12545 u_int mcnt, appended; 12546 #endif 12547 12548 /* 12549 * If last ACK falls within this segment's sequence numbers, record 12550 * the timestamp. NOTE that the test is modified according to the 12551 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12552 */ 12553 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 12554 return (0); 12555 } 12556 if (tiwin && tiwin != tp->snd_wnd) { 12557 return (0); 12558 } 12559 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 12560 return (0); 12561 } 12562 if (__predict_false((to->to_flags & TOF_TS) && 12563 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 12564 return (0); 12565 } 12566 if (__predict_false((th->th_ack != tp->snd_una))) { 12567 return (0); 12568 } 12569 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 12570 return (0); 12571 } 12572 if ((to->to_flags & TOF_TS) != 0 && 12573 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12574 tp->ts_recent_age = tcp_ts_getticks(); 12575 tp->ts_recent = to->to_tsval; 12576 } 12577 rack = (struct tcp_rack *)tp->t_fb_ptr; 12578 /* 12579 * This is a pure, in-sequence data packet with nothing on the 12580 * reassembly queue and we have enough buffer space to take it. 12581 */ 12582 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12583 12584 #ifdef NETFLIX_SB_LIMITS 12585 if (so->so_rcv.sb_shlim) { 12586 mcnt = m_memcnt(m); 12587 appended = 0; 12588 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12589 CFO_NOSLEEP, NULL) == false) { 12590 counter_u64_add(tcp_sb_shlim_fails, 1); 12591 m_freem(m); 12592 return (1); 12593 } 12594 } 12595 #endif 12596 /* Clean receiver SACK report if present */ 12597 if (tp->rcv_numsacks) 12598 tcp_clean_sackreport(tp); 12599 KMOD_TCPSTAT_INC(tcps_preddat); 12600 tp->rcv_nxt += tlen; 12601 if (tlen && 12602 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12603 (tp->t_fbyte_in == 0)) { 12604 tp->t_fbyte_in = ticks; 12605 if (tp->t_fbyte_in == 0) 12606 tp->t_fbyte_in = 1; 12607 if (tp->t_fbyte_out && tp->t_fbyte_in) 12608 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12609 } 12610 /* 12611 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 12612 */ 12613 tp->snd_wl1 = th->th_seq; 12614 /* 12615 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 12616 */ 12617 tp->rcv_up = tp->rcv_nxt; 12618 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12619 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12620 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12621 12622 /* Add data to socket buffer. */ 12623 SOCKBUF_LOCK(&so->so_rcv); 12624 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12625 m_freem(m); 12626 } else { 12627 /* 12628 * Set new socket buffer size. Give up when limit is 12629 * reached. 12630 */ 12631 if (newsize) 12632 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12633 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12634 m_adj(m, drop_hdrlen); /* delayed header drop */ 12635 #ifdef NETFLIX_SB_LIMITS 12636 appended = 12637 #endif 12638 sbappendstream_locked(&so->so_rcv, m, 0); 12639 ctf_calc_rwin(so, tp); 12640 } 12641 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12642 /* NB: sorwakeup_locked() does an implicit unlock. */ 12643 sorwakeup_locked(so); 12644 #ifdef NETFLIX_SB_LIMITS 12645 if (so->so_rcv.sb_shlim && mcnt != appended) 12646 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 12647 #endif 12648 rack_handle_delayed_ack(tp, rack, tlen, 0); 12649 if (tp->snd_una == tp->snd_max) 12650 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12651 return (1); 12652 } 12653 12654 /* 12655 * This subfunction is used to try to highly optimize the 12656 * fast path. We again allow window updates that are 12657 * in sequence to remain in the fast-path. We also add 12658 * in the __predict's to attempt to help the compiler. 12659 * Note that if we return a 0, then we can *not* process 12660 * it and the caller should push the packet into the 12661 * slow-path. 12662 */ 12663 static int 12664 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12665 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12666 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 12667 { 12668 int32_t acked; 12669 int32_t nsegs; 12670 int32_t under_pacing = 0; 12671 struct tcp_rack *rack; 12672 12673 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12674 /* Old ack, behind (or duplicate to) the last one rcv'd */ 12675 return (0); 12676 } 12677 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 12678 /* Above what we have sent? */ 12679 return (0); 12680 } 12681 if (__predict_false(tiwin == 0)) { 12682 /* zero window */ 12683 return (0); 12684 } 12685 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 12686 /* We need a SYN or a FIN, unlikely.. */ 12687 return (0); 12688 } 12689 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 12690 /* Timestamp is behind .. old ack with seq wrap? */ 12691 return (0); 12692 } 12693 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 12694 /* Still recovering */ 12695 return (0); 12696 } 12697 rack = (struct tcp_rack *)tp->t_fb_ptr; 12698 if (rack->r_ctl.rc_sacked) { 12699 /* We have sack holes on our scoreboard */ 12700 return (0); 12701 } 12702 /* Ok if we reach here, we can process a fast-ack */ 12703 if (rack->gp_ready && 12704 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12705 under_pacing = 1; 12706 } 12707 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12708 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 12709 /* Did the window get updated? */ 12710 if (tiwin != tp->snd_wnd) { 12711 tp->snd_wnd = tiwin; 12712 rack_validate_fo_sendwin_up(tp, rack); 12713 tp->snd_wl1 = th->th_seq; 12714 if (tp->snd_wnd > tp->max_sndwnd) 12715 tp->max_sndwnd = tp->snd_wnd; 12716 } 12717 /* Do we exit persists? */ 12718 if ((rack->rc_in_persist != 0) && 12719 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12720 rack->r_ctl.rc_pace_min_segs))) { 12721 rack_exit_persist(tp, rack, cts); 12722 } 12723 /* Do we enter persists? */ 12724 if ((rack->rc_in_persist == 0) && 12725 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12726 TCPS_HAVEESTABLISHED(tp->t_state) && 12727 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12728 sbavail(&tptosocket(tp)->so_snd) && 12729 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12730 /* 12731 * Here the rwnd is less than 12732 * the pacing size, we are established, 12733 * nothing is outstanding, and there is 12734 * data to send. Enter persists. 12735 */ 12736 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 12737 } 12738 /* 12739 * If last ACK falls within this segment's sequence numbers, record 12740 * the timestamp. NOTE that the test is modified according to the 12741 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12742 */ 12743 if ((to->to_flags & TOF_TS) != 0 && 12744 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12745 tp->ts_recent_age = tcp_ts_getticks(); 12746 tp->ts_recent = to->to_tsval; 12747 } 12748 /* 12749 * This is a pure ack for outstanding data. 12750 */ 12751 KMOD_TCPSTAT_INC(tcps_predack); 12752 12753 /* 12754 * "bad retransmit" recovery. 12755 */ 12756 if ((tp->t_flags & TF_PREVVALID) && 12757 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12758 tp->t_flags &= ~TF_PREVVALID; 12759 if (tp->t_rxtshift == 1 && 12760 (int)(ticks - tp->t_badrxtwin) < 0) 12761 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12762 } 12763 /* 12764 * Recalculate the transmit timer / rtt. 12765 * 12766 * Some boxes send broken timestamp replies during the SYN+ACK 12767 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12768 * and blow up the retransmit timer. 12769 */ 12770 acked = BYTES_THIS_ACK(tp, th); 12771 12772 #ifdef TCP_HHOOK 12773 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 12774 hhook_run_tcp_est_in(tp, th, to); 12775 #endif 12776 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12777 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12778 if (acked) { 12779 struct mbuf *mfree; 12780 12781 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 12782 SOCKBUF_LOCK(&so->so_snd); 12783 mfree = sbcut_locked(&so->so_snd, acked); 12784 tp->snd_una = th->th_ack; 12785 /* Note we want to hold the sb lock through the sendmap adjust */ 12786 rack_adjust_sendmap_head(rack, &so->so_snd); 12787 /* Wake up the socket if we have room to write more */ 12788 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12789 sowwakeup_locked(so); 12790 m_freem(mfree); 12791 tp->t_rxtshift = 0; 12792 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12793 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12794 rack->rc_tlp_in_progress = 0; 12795 rack->r_ctl.rc_tlp_cnt_out = 0; 12796 /* 12797 * If it is the RXT timer we want to 12798 * stop it, so we can restart a TLP. 12799 */ 12800 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12801 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12802 12803 #ifdef TCP_REQUEST_TRK 12804 rack_req_check_for_comp(rack, th->th_ack); 12805 #endif 12806 } 12807 /* 12808 * Let the congestion control algorithm update congestion control 12809 * related information. This typically means increasing the 12810 * congestion window. 12811 */ 12812 if (tp->snd_wnd < ctf_outstanding(tp)) { 12813 /* The peer collapsed the window */ 12814 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12815 } else if (rack->rc_has_collapsed) 12816 rack_un_collapse_window(rack, __LINE__); 12817 if ((rack->r_collapse_point_valid) && 12818 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 12819 rack->r_collapse_point_valid = 0; 12820 /* 12821 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 12822 */ 12823 tp->snd_wl2 = th->th_ack; 12824 tp->t_dupacks = 0; 12825 m_freem(m); 12826 /* ND6_HINT(tp); *//* Some progress has been made. */ 12827 12828 /* 12829 * If all outstanding data are acked, stop retransmit timer, 12830 * otherwise restart timer using current (possibly backed-off) 12831 * value. If process is waiting for space, wakeup/selwakeup/signal. 12832 * If data are ready to send, let tcp_output decide between more 12833 * output or persist. 12834 */ 12835 if (under_pacing && 12836 (rack->use_fixed_rate == 0) && 12837 (rack->in_probe_rtt == 0) && 12838 rack->rc_gp_dyn_mul && 12839 rack->rc_always_pace) { 12840 /* Check if we are dragging bottom */ 12841 rack_check_bottom_drag(tp, rack, so); 12842 } 12843 if (tp->snd_una == tp->snd_max) { 12844 tp->t_flags &= ~TF_PREVVALID; 12845 rack->r_ctl.retran_during_recovery = 0; 12846 rack->rc_suspicious = 0; 12847 rack->r_ctl.dsack_byte_cnt = 0; 12848 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12849 if (rack->r_ctl.rc_went_idle_time == 0) 12850 rack->r_ctl.rc_went_idle_time = 1; 12851 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12852 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12853 tp->t_acktime = 0; 12854 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12855 } 12856 if (acked && rack->r_fast_output) 12857 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 12858 if (sbavail(&so->so_snd)) { 12859 rack->r_wanted_output = 1; 12860 } 12861 return (1); 12862 } 12863 12864 /* 12865 * Return value of 1, the TCB is unlocked and most 12866 * likely gone, return value of 0, the TCP is still 12867 * locked. 12868 */ 12869 static int 12870 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 12871 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12872 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12873 { 12874 int32_t ret_val = 0; 12875 int32_t orig_tlen = tlen; 12876 int32_t todrop; 12877 int32_t ourfinisacked = 0; 12878 struct tcp_rack *rack; 12879 12880 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12881 12882 ctf_calc_rwin(so, tp); 12883 /* 12884 * If the state is SYN_SENT: if seg contains an ACK, but not for our 12885 * SYN, drop the input. if seg contains a RST, then drop the 12886 * connection. if seg does not contain SYN, then drop it. Otherwise 12887 * this is an acceptable SYN segment initialize tp->rcv_nxt and 12888 * tp->irs if seg contains ack then advance tp->snd_una if seg 12889 * contains an ECE and ECN support is enabled, the stream is ECN 12890 * capable. if SYN has been acked change to ESTABLISHED else 12891 * SYN_RCVD state arrange for segment to be acked (eventually) 12892 * continue processing rest of data/controls. 12893 */ 12894 if ((thflags & TH_ACK) && 12895 (SEQ_LEQ(th->th_ack, tp->iss) || 12896 SEQ_GT(th->th_ack, tp->snd_max))) { 12897 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 12898 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12899 return (1); 12900 } 12901 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 12902 TCP_PROBE5(connect__refused, NULL, tp, 12903 mtod(m, const char *), tp, th); 12904 tp = tcp_drop(tp, ECONNREFUSED); 12905 ctf_do_drop(m, tp); 12906 return (1); 12907 } 12908 if (thflags & TH_RST) { 12909 ctf_do_drop(m, tp); 12910 return (1); 12911 } 12912 if (!(thflags & TH_SYN)) { 12913 ctf_do_drop(m, tp); 12914 return (1); 12915 } 12916 tp->irs = th->th_seq; 12917 tcp_rcvseqinit(tp); 12918 rack = (struct tcp_rack *)tp->t_fb_ptr; 12919 if (thflags & TH_ACK) { 12920 int tfo_partial = 0; 12921 12922 KMOD_TCPSTAT_INC(tcps_connects); 12923 soisconnected(so); 12924 #ifdef MAC 12925 mac_socketpeer_set_from_mbuf(m, so); 12926 #endif 12927 /* Do window scaling on this connection? */ 12928 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12929 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12930 tp->rcv_scale = tp->request_r_scale; 12931 } 12932 tp->rcv_adv += min(tp->rcv_wnd, 12933 TCP_MAXWIN << tp->rcv_scale); 12934 /* 12935 * If not all the data that was sent in the TFO SYN 12936 * has been acked, resend the remainder right away. 12937 */ 12938 if ((tp->t_flags & TF_FASTOPEN) && 12939 (tp->snd_una != tp->snd_max)) { 12940 /* Was it a partial ack? */ 12941 if (SEQ_LT(th->th_ack, tp->snd_max)) 12942 tfo_partial = 1; 12943 } 12944 /* 12945 * If there's data, delay ACK; if there's also a FIN ACKNOW 12946 * will be turned on later. 12947 */ 12948 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 12949 rack_timer_cancel(tp, rack, 12950 rack->r_ctl.rc_rcvtime, __LINE__); 12951 tp->t_flags |= TF_DELACK; 12952 } else { 12953 rack->r_wanted_output = 1; 12954 tp->t_flags |= TF_ACKNOW; 12955 } 12956 12957 tcp_ecn_input_syn_sent(tp, thflags, iptos); 12958 12959 if (SEQ_GT(th->th_ack, tp->snd_una)) { 12960 /* 12961 * We advance snd_una for the 12962 * fast open case. If th_ack is 12963 * acknowledging data beyond 12964 * snd_una we can't just call 12965 * ack-processing since the 12966 * data stream in our send-map 12967 * will start at snd_una + 1 (one 12968 * beyond the SYN). If its just 12969 * equal we don't need to do that 12970 * and there is no send_map. 12971 */ 12972 tp->snd_una++; 12973 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 12974 /* 12975 * We sent a SYN with data, and thus have a 12976 * sendmap entry with a SYN set. Lets find it 12977 * and take off the send bit and the byte and 12978 * set it up to be what we send (send it next). 12979 */ 12980 struct rack_sendmap *rsm; 12981 12982 rsm = tqhash_min(rack->r_ctl.tqh); 12983 if (rsm) { 12984 if (rsm->r_flags & RACK_HAS_SYN) { 12985 rsm->r_flags &= ~RACK_HAS_SYN; 12986 rsm->r_start++; 12987 } 12988 rack->r_ctl.rc_resend = rsm; 12989 } 12990 } 12991 } 12992 /* 12993 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 12994 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 12995 */ 12996 tp->t_starttime = ticks; 12997 if (tp->t_flags & TF_NEEDFIN) { 12998 tcp_state_change(tp, TCPS_FIN_WAIT_1); 12999 tp->t_flags &= ~TF_NEEDFIN; 13000 thflags &= ~TH_SYN; 13001 } else { 13002 tcp_state_change(tp, TCPS_ESTABLISHED); 13003 TCP_PROBE5(connect__established, NULL, tp, 13004 mtod(m, const char *), tp, th); 13005 rack_cc_conn_init(tp); 13006 } 13007 } else { 13008 /* 13009 * Received initial SYN in SYN-SENT[*] state => simultaneous 13010 * open. If segment contains CC option and there is a 13011 * cached CC, apply TAO test. If it succeeds, connection is * 13012 * half-synchronized. Otherwise, do 3-way handshake: 13013 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 13014 * there was no CC option, clear cached CC value. 13015 */ 13016 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 13017 tcp_state_change(tp, TCPS_SYN_RECEIVED); 13018 } 13019 /* 13020 * Advance th->th_seq to correspond to first data byte. If data, 13021 * trim to stay within window, dropping FIN if necessary. 13022 */ 13023 th->th_seq++; 13024 if (tlen > tp->rcv_wnd) { 13025 todrop = tlen - tp->rcv_wnd; 13026 m_adj(m, -todrop); 13027 tlen = tp->rcv_wnd; 13028 thflags &= ~TH_FIN; 13029 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13030 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13031 } 13032 tp->snd_wl1 = th->th_seq - 1; 13033 tp->rcv_up = th->th_seq; 13034 /* 13035 * Client side of transaction: already sent SYN and data. If the 13036 * remote host used T/TCP to validate the SYN, our data will be 13037 * ACK'd; if so, enter normal data segment processing in the middle 13038 * of step 5, ack processing. Otherwise, goto step 6. 13039 */ 13040 if (thflags & TH_ACK) { 13041 /* For syn-sent we need to possibly update the rtt */ 13042 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13043 uint32_t t, mcts; 13044 13045 mcts = tcp_ts_getticks(); 13046 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13047 if (!tp->t_rttlow || tp->t_rttlow > t) 13048 tp->t_rttlow = t; 13049 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13050 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13051 tcp_rack_xmit_timer_commit(rack, tp); 13052 } 13053 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 13054 return (ret_val); 13055 /* We may have changed to FIN_WAIT_1 above */ 13056 if (tp->t_state == TCPS_FIN_WAIT_1) { 13057 /* 13058 * In FIN_WAIT_1 STATE in addition to the processing 13059 * for the ESTABLISHED state if our FIN is now 13060 * acknowledged then enter FIN_WAIT_2. 13061 */ 13062 if (ourfinisacked) { 13063 /* 13064 * If we can't receive any more data, then 13065 * closing user can proceed. Starting the 13066 * timer is contrary to the specification, 13067 * but if we don't get a FIN we'll hang 13068 * forever. 13069 * 13070 * XXXjl: we should release the tp also, and 13071 * use a compressed state. 13072 */ 13073 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13074 soisdisconnected(so); 13075 tcp_timer_activate(tp, TT_2MSL, 13076 (tcp_fast_finwait2_recycle ? 13077 tcp_finwait2_timeout : 13078 TP_MAXIDLE(tp))); 13079 } 13080 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13081 } 13082 } 13083 } 13084 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13085 tiwin, thflags, nxt_pkt)); 13086 } 13087 13088 /* 13089 * Return value of 1, the TCB is unlocked and most 13090 * likely gone, return value of 0, the TCP is still 13091 * locked. 13092 */ 13093 static int 13094 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13095 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13096 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13097 { 13098 struct tcp_rack *rack; 13099 int32_t orig_tlen = tlen; 13100 int32_t ret_val = 0; 13101 int32_t ourfinisacked = 0; 13102 13103 rack = (struct tcp_rack *)tp->t_fb_ptr; 13104 ctf_calc_rwin(so, tp); 13105 if ((thflags & TH_RST) || 13106 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13107 return (ctf_process_rst(m, th, so, tp)); 13108 if ((thflags & TH_ACK) && 13109 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13110 SEQ_GT(th->th_ack, tp->snd_max))) { 13111 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13112 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13113 return (1); 13114 } 13115 if (tp->t_flags & TF_FASTOPEN) { 13116 /* 13117 * When a TFO connection is in SYN_RECEIVED, the 13118 * only valid packets are the initial SYN, a 13119 * retransmit/copy of the initial SYN (possibly with 13120 * a subset of the original data), a valid ACK, a 13121 * FIN, or a RST. 13122 */ 13123 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13124 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13125 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13126 return (1); 13127 } else if (thflags & TH_SYN) { 13128 /* non-initial SYN is ignored */ 13129 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13130 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13131 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13132 ctf_do_drop(m, NULL); 13133 return (0); 13134 } 13135 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13136 ctf_do_drop(m, NULL); 13137 return (0); 13138 } 13139 } 13140 13141 /* 13142 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13143 * it's less than ts_recent, drop it. 13144 */ 13145 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13146 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13147 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13148 return (ret_val); 13149 } 13150 /* 13151 * In the SYN-RECEIVED state, validate that the packet belongs to 13152 * this connection before trimming the data to fit the receive 13153 * window. Check the sequence number versus IRS since we know the 13154 * sequence numbers haven't wrapped. This is a partial fix for the 13155 * "LAND" DoS attack. 13156 */ 13157 if (SEQ_LT(th->th_seq, tp->irs)) { 13158 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13159 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13160 return (1); 13161 } 13162 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13163 return (ret_val); 13164 } 13165 /* 13166 * If last ACK falls within this segment's sequence numbers, record 13167 * its timestamp. NOTE: 1) That the test incorporates suggestions 13168 * from the latest proposal of the tcplw@cray.com list (Braden 13169 * 1993/04/26). 2) That updating only on newer timestamps interferes 13170 * with our earlier PAWS tests, so this check should be solely 13171 * predicated on the sequence space of this segment. 3) That we 13172 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13173 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13174 * SEG.Len, This modified check allows us to overcome RFC1323's 13175 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13176 * p.869. In such cases, we can still calculate the RTT correctly 13177 * when RCV.NXT == Last.ACK.Sent. 13178 */ 13179 if ((to->to_flags & TOF_TS) != 0 && 13180 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13181 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13182 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13183 tp->ts_recent_age = tcp_ts_getticks(); 13184 tp->ts_recent = to->to_tsval; 13185 } 13186 tp->snd_wnd = tiwin; 13187 rack_validate_fo_sendwin_up(tp, rack); 13188 /* 13189 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13190 * is on (half-synchronized state), then queue data for later 13191 * processing; else drop segment and return. 13192 */ 13193 if ((thflags & TH_ACK) == 0) { 13194 if (tp->t_flags & TF_FASTOPEN) { 13195 rack_cc_conn_init(tp); 13196 } 13197 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13198 tiwin, thflags, nxt_pkt)); 13199 } 13200 KMOD_TCPSTAT_INC(tcps_connects); 13201 if (tp->t_flags & TF_SONOTCONN) { 13202 tp->t_flags &= ~TF_SONOTCONN; 13203 soisconnected(so); 13204 } 13205 /* Do window scaling? */ 13206 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13207 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13208 tp->rcv_scale = tp->request_r_scale; 13209 } 13210 /* 13211 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13212 * FIN-WAIT-1 13213 */ 13214 tp->t_starttime = ticks; 13215 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 13216 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13217 tp->t_tfo_pending = NULL; 13218 } 13219 if (tp->t_flags & TF_NEEDFIN) { 13220 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13221 tp->t_flags &= ~TF_NEEDFIN; 13222 } else { 13223 tcp_state_change(tp, TCPS_ESTABLISHED); 13224 TCP_PROBE5(accept__established, NULL, tp, 13225 mtod(m, const char *), tp, th); 13226 /* 13227 * TFO connections call cc_conn_init() during SYN 13228 * processing. Calling it again here for such connections 13229 * is not harmless as it would undo the snd_cwnd reduction 13230 * that occurs when a TFO SYN|ACK is retransmitted. 13231 */ 13232 if (!(tp->t_flags & TF_FASTOPEN)) 13233 rack_cc_conn_init(tp); 13234 } 13235 /* 13236 * Account for the ACK of our SYN prior to 13237 * regular ACK processing below, except for 13238 * simultaneous SYN, which is handled later. 13239 */ 13240 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13241 tp->snd_una++; 13242 /* 13243 * If segment contains data or ACK, will call tcp_reass() later; if 13244 * not, do so now to pass queued data to user. 13245 */ 13246 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13247 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13248 (struct mbuf *)0); 13249 if (tp->t_flags & TF_WAKESOR) { 13250 tp->t_flags &= ~TF_WAKESOR; 13251 /* NB: sorwakeup_locked() does an implicit unlock. */ 13252 sorwakeup_locked(so); 13253 } 13254 } 13255 tp->snd_wl1 = th->th_seq - 1; 13256 /* For syn-recv we need to possibly update the rtt */ 13257 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13258 uint32_t t, mcts; 13259 13260 mcts = tcp_ts_getticks(); 13261 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13262 if (!tp->t_rttlow || tp->t_rttlow > t) 13263 tp->t_rttlow = t; 13264 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13265 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13266 tcp_rack_xmit_timer_commit(rack, tp); 13267 } 13268 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13269 return (ret_val); 13270 } 13271 if (tp->t_state == TCPS_FIN_WAIT_1) { 13272 /* We could have went to FIN_WAIT_1 (or EST) above */ 13273 /* 13274 * In FIN_WAIT_1 STATE in addition to the processing for the 13275 * ESTABLISHED state if our FIN is now acknowledged then 13276 * enter FIN_WAIT_2. 13277 */ 13278 if (ourfinisacked) { 13279 /* 13280 * If we can't receive any more data, then closing 13281 * user can proceed. Starting the timer is contrary 13282 * to the specification, but if we don't get a FIN 13283 * we'll hang forever. 13284 * 13285 * XXXjl: we should release the tp also, and use a 13286 * compressed state. 13287 */ 13288 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13289 soisdisconnected(so); 13290 tcp_timer_activate(tp, TT_2MSL, 13291 (tcp_fast_finwait2_recycle ? 13292 tcp_finwait2_timeout : 13293 TP_MAXIDLE(tp))); 13294 } 13295 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13296 } 13297 } 13298 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13299 tiwin, thflags, nxt_pkt)); 13300 } 13301 13302 /* 13303 * Return value of 1, the TCB is unlocked and most 13304 * likely gone, return value of 0, the TCP is still 13305 * locked. 13306 */ 13307 static int 13308 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13309 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13310 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13311 { 13312 int32_t ret_val = 0; 13313 int32_t orig_tlen = tlen; 13314 struct tcp_rack *rack; 13315 13316 /* 13317 * Header prediction: check for the two common cases of a 13318 * uni-directional data xfer. If the packet has no control flags, 13319 * is in-sequence, the window didn't change and we're not 13320 * retransmitting, it's a candidate. If the length is zero and the 13321 * ack moved forward, we're the sender side of the xfer. Just free 13322 * the data acked & wake any higher level process that was blocked 13323 * waiting for space. If the length is non-zero and the ack didn't 13324 * move, we're the receiver side. If we're getting packets in-order 13325 * (the reassembly queue is empty), add the data toc The socket 13326 * buffer and note that we need a delayed ack. Make sure that the 13327 * hidden state-flags are also off. Since we check for 13328 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13329 */ 13330 rack = (struct tcp_rack *)tp->t_fb_ptr; 13331 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13332 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13333 __predict_true(SEGQ_EMPTY(tp)) && 13334 __predict_true(th->th_seq == tp->rcv_nxt)) { 13335 if (tlen == 0) { 13336 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13337 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13338 return (0); 13339 } 13340 } else { 13341 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13342 tiwin, nxt_pkt, iptos)) { 13343 return (0); 13344 } 13345 } 13346 } 13347 ctf_calc_rwin(so, tp); 13348 13349 if ((thflags & TH_RST) || 13350 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13351 return (ctf_process_rst(m, th, so, tp)); 13352 13353 /* 13354 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13355 * synchronized state. 13356 */ 13357 if (thflags & TH_SYN) { 13358 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13359 return (ret_val); 13360 } 13361 /* 13362 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13363 * it's less than ts_recent, drop it. 13364 */ 13365 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13366 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13367 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13368 return (ret_val); 13369 } 13370 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13371 return (ret_val); 13372 } 13373 /* 13374 * If last ACK falls within this segment's sequence numbers, record 13375 * its timestamp. NOTE: 1) That the test incorporates suggestions 13376 * from the latest proposal of the tcplw@cray.com list (Braden 13377 * 1993/04/26). 2) That updating only on newer timestamps interferes 13378 * with our earlier PAWS tests, so this check should be solely 13379 * predicated on the sequence space of this segment. 3) That we 13380 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13381 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13382 * SEG.Len, This modified check allows us to overcome RFC1323's 13383 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13384 * p.869. In such cases, we can still calculate the RTT correctly 13385 * when RCV.NXT == Last.ACK.Sent. 13386 */ 13387 if ((to->to_flags & TOF_TS) != 0 && 13388 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13389 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13390 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13391 tp->ts_recent_age = tcp_ts_getticks(); 13392 tp->ts_recent = to->to_tsval; 13393 } 13394 /* 13395 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13396 * is on (half-synchronized state), then queue data for later 13397 * processing; else drop segment and return. 13398 */ 13399 if ((thflags & TH_ACK) == 0) { 13400 if (tp->t_flags & TF_NEEDSYN) { 13401 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13402 tiwin, thflags, nxt_pkt)); 13403 13404 } else if (tp->t_flags & TF_ACKNOW) { 13405 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13406 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13407 return (ret_val); 13408 } else { 13409 ctf_do_drop(m, NULL); 13410 return (0); 13411 } 13412 } 13413 /* 13414 * Ack processing. 13415 */ 13416 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13417 return (ret_val); 13418 } 13419 if (sbavail(&so->so_snd)) { 13420 if (ctf_progress_timeout_check(tp, true)) { 13421 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 13422 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13423 return (1); 13424 } 13425 } 13426 /* State changes only happen in rack_process_data() */ 13427 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13428 tiwin, thflags, nxt_pkt)); 13429 } 13430 13431 /* 13432 * Return value of 1, the TCB is unlocked and most 13433 * likely gone, return value of 0, the TCP is still 13434 * locked. 13435 */ 13436 static int 13437 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 13438 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13439 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13440 { 13441 int32_t ret_val = 0; 13442 int32_t orig_tlen = tlen; 13443 13444 ctf_calc_rwin(so, tp); 13445 if ((thflags & TH_RST) || 13446 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13447 return (ctf_process_rst(m, th, so, tp)); 13448 /* 13449 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13450 * synchronized state. 13451 */ 13452 if (thflags & TH_SYN) { 13453 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13454 return (ret_val); 13455 } 13456 /* 13457 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13458 * it's less than ts_recent, drop it. 13459 */ 13460 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13461 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13462 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13463 return (ret_val); 13464 } 13465 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13466 return (ret_val); 13467 } 13468 /* 13469 * If last ACK falls within this segment's sequence numbers, record 13470 * its timestamp. NOTE: 1) That the test incorporates suggestions 13471 * from the latest proposal of the tcplw@cray.com list (Braden 13472 * 1993/04/26). 2) That updating only on newer timestamps interferes 13473 * with our earlier PAWS tests, so this check should be solely 13474 * predicated on the sequence space of this segment. 3) That we 13475 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13476 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13477 * SEG.Len, This modified check allows us to overcome RFC1323's 13478 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13479 * p.869. In such cases, we can still calculate the RTT correctly 13480 * when RCV.NXT == Last.ACK.Sent. 13481 */ 13482 if ((to->to_flags & TOF_TS) != 0 && 13483 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13484 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13485 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13486 tp->ts_recent_age = tcp_ts_getticks(); 13487 tp->ts_recent = to->to_tsval; 13488 } 13489 /* 13490 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13491 * is on (half-synchronized state), then queue data for later 13492 * processing; else drop segment and return. 13493 */ 13494 if ((thflags & TH_ACK) == 0) { 13495 if (tp->t_flags & TF_NEEDSYN) { 13496 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13497 tiwin, thflags, nxt_pkt)); 13498 13499 } else if (tp->t_flags & TF_ACKNOW) { 13500 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13501 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13502 return (ret_val); 13503 } else { 13504 ctf_do_drop(m, NULL); 13505 return (0); 13506 } 13507 } 13508 /* 13509 * Ack processing. 13510 */ 13511 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13512 return (ret_val); 13513 } 13514 if (sbavail(&so->so_snd)) { 13515 if (ctf_progress_timeout_check(tp, true)) { 13516 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13517 tp, tick, PROGRESS_DROP, __LINE__); 13518 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13519 return (1); 13520 } 13521 } 13522 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13523 tiwin, thflags, nxt_pkt)); 13524 } 13525 13526 static int 13527 rack_check_data_after_close(struct mbuf *m, 13528 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 13529 { 13530 struct tcp_rack *rack; 13531 13532 rack = (struct tcp_rack *)tp->t_fb_ptr; 13533 if (rack->rc_allow_data_af_clo == 0) { 13534 close_now: 13535 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13536 /* tcp_close will kill the inp pre-log the Reset */ 13537 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13538 tp = tcp_close(tp); 13539 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 13540 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 13541 return (1); 13542 } 13543 if (sbavail(&so->so_snd) == 0) 13544 goto close_now; 13545 /* Ok we allow data that is ignored and a followup reset */ 13546 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13547 tp->rcv_nxt = th->th_seq + *tlen; 13548 tp->t_flags2 |= TF2_DROP_AF_DATA; 13549 rack->r_wanted_output = 1; 13550 *tlen = 0; 13551 return (0); 13552 } 13553 13554 /* 13555 * Return value of 1, the TCB is unlocked and most 13556 * likely gone, return value of 0, the TCP is still 13557 * locked. 13558 */ 13559 static int 13560 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 13561 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13562 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13563 { 13564 int32_t ret_val = 0; 13565 int32_t orig_tlen = tlen; 13566 int32_t ourfinisacked = 0; 13567 13568 ctf_calc_rwin(so, tp); 13569 13570 if ((thflags & TH_RST) || 13571 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13572 return (ctf_process_rst(m, th, so, tp)); 13573 /* 13574 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13575 * synchronized state. 13576 */ 13577 if (thflags & TH_SYN) { 13578 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13579 return (ret_val); 13580 } 13581 /* 13582 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13583 * it's less than ts_recent, drop it. 13584 */ 13585 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13586 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13587 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13588 return (ret_val); 13589 } 13590 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13591 return (ret_val); 13592 } 13593 /* 13594 * If new data are received on a connection after the user processes 13595 * are gone, then RST the other end. 13596 */ 13597 if ((tp->t_flags & TF_CLOSED) && tlen && 13598 rack_check_data_after_close(m, tp, &tlen, th, so)) 13599 return (1); 13600 /* 13601 * If last ACK falls within this segment's sequence numbers, record 13602 * its timestamp. NOTE: 1) That the test incorporates suggestions 13603 * from the latest proposal of the tcplw@cray.com list (Braden 13604 * 1993/04/26). 2) That updating only on newer timestamps interferes 13605 * with our earlier PAWS tests, so this check should be solely 13606 * predicated on the sequence space of this segment. 3) That we 13607 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13608 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13609 * SEG.Len, This modified check allows us to overcome RFC1323's 13610 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13611 * p.869. In such cases, we can still calculate the RTT correctly 13612 * when RCV.NXT == Last.ACK.Sent. 13613 */ 13614 if ((to->to_flags & TOF_TS) != 0 && 13615 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13616 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13617 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13618 tp->ts_recent_age = tcp_ts_getticks(); 13619 tp->ts_recent = to->to_tsval; 13620 } 13621 /* 13622 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13623 * is on (half-synchronized state), then queue data for later 13624 * processing; else drop segment and return. 13625 */ 13626 if ((thflags & TH_ACK) == 0) { 13627 if (tp->t_flags & TF_NEEDSYN) { 13628 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13629 tiwin, thflags, nxt_pkt)); 13630 } else if (tp->t_flags & TF_ACKNOW) { 13631 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13632 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13633 return (ret_val); 13634 } else { 13635 ctf_do_drop(m, NULL); 13636 return (0); 13637 } 13638 } 13639 /* 13640 * Ack processing. 13641 */ 13642 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13643 return (ret_val); 13644 } 13645 if (ourfinisacked) { 13646 /* 13647 * If we can't receive any more data, then closing user can 13648 * proceed. Starting the timer is contrary to the 13649 * specification, but if we don't get a FIN we'll hang 13650 * forever. 13651 * 13652 * XXXjl: we should release the tp also, and use a 13653 * compressed state. 13654 */ 13655 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13656 soisdisconnected(so); 13657 tcp_timer_activate(tp, TT_2MSL, 13658 (tcp_fast_finwait2_recycle ? 13659 tcp_finwait2_timeout : 13660 TP_MAXIDLE(tp))); 13661 } 13662 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13663 } 13664 if (sbavail(&so->so_snd)) { 13665 if (ctf_progress_timeout_check(tp, true)) { 13666 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13667 tp, tick, PROGRESS_DROP, __LINE__); 13668 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13669 return (1); 13670 } 13671 } 13672 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13673 tiwin, thflags, nxt_pkt)); 13674 } 13675 13676 /* 13677 * Return value of 1, the TCB is unlocked and most 13678 * likely gone, return value of 0, the TCP is still 13679 * locked. 13680 */ 13681 static int 13682 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 13683 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13684 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13685 { 13686 int32_t ret_val = 0; 13687 int32_t orig_tlen = tlen; 13688 int32_t ourfinisacked = 0; 13689 13690 ctf_calc_rwin(so, tp); 13691 13692 if ((thflags & TH_RST) || 13693 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13694 return (ctf_process_rst(m, th, so, tp)); 13695 /* 13696 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13697 * synchronized state. 13698 */ 13699 if (thflags & TH_SYN) { 13700 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13701 return (ret_val); 13702 } 13703 /* 13704 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13705 * it's less than ts_recent, drop it. 13706 */ 13707 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13708 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13709 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13710 return (ret_val); 13711 } 13712 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13713 return (ret_val); 13714 } 13715 /* 13716 * If last ACK falls within this segment's sequence numbers, record 13717 * its timestamp. NOTE: 1) That the test incorporates suggestions 13718 * from the latest proposal of the tcplw@cray.com list (Braden 13719 * 1993/04/26). 2) That updating only on newer timestamps interferes 13720 * with our earlier PAWS tests, so this check should be solely 13721 * predicated on the sequence space of this segment. 3) That we 13722 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13723 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13724 * SEG.Len, This modified check allows us to overcome RFC1323's 13725 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13726 * p.869. In such cases, we can still calculate the RTT correctly 13727 * when RCV.NXT == Last.ACK.Sent. 13728 */ 13729 if ((to->to_flags & TOF_TS) != 0 && 13730 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13731 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13732 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13733 tp->ts_recent_age = tcp_ts_getticks(); 13734 tp->ts_recent = to->to_tsval; 13735 } 13736 /* 13737 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13738 * is on (half-synchronized state), then queue data for later 13739 * processing; else drop segment and return. 13740 */ 13741 if ((thflags & TH_ACK) == 0) { 13742 if (tp->t_flags & TF_NEEDSYN) { 13743 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13744 tiwin, thflags, nxt_pkt)); 13745 } else if (tp->t_flags & TF_ACKNOW) { 13746 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13747 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13748 return (ret_val); 13749 } else { 13750 ctf_do_drop(m, NULL); 13751 return (0); 13752 } 13753 } 13754 /* 13755 * Ack processing. 13756 */ 13757 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13758 return (ret_val); 13759 } 13760 if (ourfinisacked) { 13761 tcp_twstart(tp); 13762 m_freem(m); 13763 return (1); 13764 } 13765 if (sbavail(&so->so_snd)) { 13766 if (ctf_progress_timeout_check(tp, true)) { 13767 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13768 tp, tick, PROGRESS_DROP, __LINE__); 13769 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13770 return (1); 13771 } 13772 } 13773 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13774 tiwin, thflags, nxt_pkt)); 13775 } 13776 13777 /* 13778 * Return value of 1, the TCB is unlocked and most 13779 * likely gone, return value of 0, the TCP is still 13780 * locked. 13781 */ 13782 static int 13783 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13784 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13785 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13786 { 13787 int32_t ret_val = 0; 13788 int32_t orig_tlen; 13789 int32_t ourfinisacked = 0; 13790 13791 ctf_calc_rwin(so, tp); 13792 13793 if ((thflags & TH_RST) || 13794 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13795 return (ctf_process_rst(m, th, so, tp)); 13796 /* 13797 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13798 * synchronized state. 13799 */ 13800 if (thflags & TH_SYN) { 13801 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13802 return (ret_val); 13803 } 13804 /* 13805 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13806 * it's less than ts_recent, drop it. 13807 */ 13808 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13809 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13810 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13811 return (ret_val); 13812 } 13813 orig_tlen = tlen; 13814 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13815 return (ret_val); 13816 } 13817 /* 13818 * If last ACK falls within this segment's sequence numbers, record 13819 * its timestamp. NOTE: 1) That the test incorporates suggestions 13820 * from the latest proposal of the tcplw@cray.com list (Braden 13821 * 1993/04/26). 2) That updating only on newer timestamps interferes 13822 * with our earlier PAWS tests, so this check should be solely 13823 * predicated on the sequence space of this segment. 3) That we 13824 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13825 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13826 * SEG.Len, This modified check allows us to overcome RFC1323's 13827 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13828 * p.869. In such cases, we can still calculate the RTT correctly 13829 * when RCV.NXT == Last.ACK.Sent. 13830 */ 13831 if ((to->to_flags & TOF_TS) != 0 && 13832 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13833 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13834 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13835 tp->ts_recent_age = tcp_ts_getticks(); 13836 tp->ts_recent = to->to_tsval; 13837 } 13838 /* 13839 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13840 * is on (half-synchronized state), then queue data for later 13841 * processing; else drop segment and return. 13842 */ 13843 if ((thflags & TH_ACK) == 0) { 13844 if (tp->t_flags & TF_NEEDSYN) { 13845 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13846 tiwin, thflags, nxt_pkt)); 13847 } else if (tp->t_flags & TF_ACKNOW) { 13848 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13849 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13850 return (ret_val); 13851 } else { 13852 ctf_do_drop(m, NULL); 13853 return (0); 13854 } 13855 } 13856 /* 13857 * case TCPS_LAST_ACK: Ack processing. 13858 */ 13859 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13860 return (ret_val); 13861 } 13862 if (ourfinisacked) { 13863 tp = tcp_close(tp); 13864 ctf_do_drop(m, tp); 13865 return (1); 13866 } 13867 if (sbavail(&so->so_snd)) { 13868 if (ctf_progress_timeout_check(tp, true)) { 13869 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13870 tp, tick, PROGRESS_DROP, __LINE__); 13871 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13872 return (1); 13873 } 13874 } 13875 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13876 tiwin, thflags, nxt_pkt)); 13877 } 13878 13879 /* 13880 * Return value of 1, the TCB is unlocked and most 13881 * likely gone, return value of 0, the TCP is still 13882 * locked. 13883 */ 13884 static int 13885 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 13886 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13887 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13888 { 13889 int32_t ret_val = 0; 13890 int32_t orig_tlen = tlen; 13891 int32_t ourfinisacked = 0; 13892 13893 ctf_calc_rwin(so, tp); 13894 13895 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 13896 if ((thflags & TH_RST) || 13897 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13898 return (ctf_process_rst(m, th, so, tp)); 13899 /* 13900 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13901 * synchronized state. 13902 */ 13903 if (thflags & TH_SYN) { 13904 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13905 return (ret_val); 13906 } 13907 /* 13908 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13909 * it's less than ts_recent, drop it. 13910 */ 13911 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13912 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13913 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13914 return (ret_val); 13915 } 13916 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13917 return (ret_val); 13918 } 13919 /* 13920 * If new data are received on a connection after the user processes 13921 * are gone, then RST the other end. 13922 */ 13923 if ((tp->t_flags & TF_CLOSED) && tlen && 13924 rack_check_data_after_close(m, tp, &tlen, th, so)) 13925 return (1); 13926 /* 13927 * If last ACK falls within this segment's sequence numbers, record 13928 * its timestamp. NOTE: 1) That the test incorporates suggestions 13929 * from the latest proposal of the tcplw@cray.com list (Braden 13930 * 1993/04/26). 2) That updating only on newer timestamps interferes 13931 * with our earlier PAWS tests, so this check should be solely 13932 * predicated on the sequence space of this segment. 3) That we 13933 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13934 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13935 * SEG.Len, This modified check allows us to overcome RFC1323's 13936 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13937 * p.869. In such cases, we can still calculate the RTT correctly 13938 * when RCV.NXT == Last.ACK.Sent. 13939 */ 13940 if ((to->to_flags & TOF_TS) != 0 && 13941 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13942 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13943 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13944 tp->ts_recent_age = tcp_ts_getticks(); 13945 tp->ts_recent = to->to_tsval; 13946 } 13947 /* 13948 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13949 * is on (half-synchronized state), then queue data for later 13950 * processing; else drop segment and return. 13951 */ 13952 if ((thflags & TH_ACK) == 0) { 13953 if (tp->t_flags & TF_NEEDSYN) { 13954 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13955 tiwin, thflags, nxt_pkt)); 13956 } else if (tp->t_flags & TF_ACKNOW) { 13957 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13958 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13959 return (ret_val); 13960 } else { 13961 ctf_do_drop(m, NULL); 13962 return (0); 13963 } 13964 } 13965 /* 13966 * Ack processing. 13967 */ 13968 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13969 return (ret_val); 13970 } 13971 if (sbavail(&so->so_snd)) { 13972 if (ctf_progress_timeout_check(tp, true)) { 13973 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13974 tp, tick, PROGRESS_DROP, __LINE__); 13975 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13976 return (1); 13977 } 13978 } 13979 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13980 tiwin, thflags, nxt_pkt)); 13981 } 13982 13983 static void inline 13984 rack_clear_rate_sample(struct tcp_rack *rack) 13985 { 13986 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 13987 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 13988 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 13989 } 13990 13991 static void 13992 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 13993 { 13994 uint64_t bw_est, rate_wanted; 13995 int chged = 0; 13996 uint32_t user_max, orig_min, orig_max; 13997 13998 #ifdef TCP_REQUEST_TRK 13999 if (rack->rc_hybrid_mode && 14000 (rack->r_ctl.rc_pace_max_segs != 0) && 14001 (rack_hybrid_allow_set_maxseg == 1) && 14002 (rack->r_ctl.rc_last_sft != NULL)) { 14003 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 14004 return; 14005 } 14006 #endif 14007 orig_min = rack->r_ctl.rc_pace_min_segs; 14008 orig_max = rack->r_ctl.rc_pace_max_segs; 14009 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 14010 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 14011 chged = 1; 14012 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 14013 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 14014 if (user_max != rack->r_ctl.rc_pace_max_segs) 14015 chged = 1; 14016 } 14017 if (rack->rc_force_max_seg) { 14018 rack->r_ctl.rc_pace_max_segs = user_max; 14019 } else if (rack->use_fixed_rate) { 14020 bw_est = rack_get_bw(rack); 14021 if ((rack->r_ctl.crte == NULL) || 14022 (bw_est != rack->r_ctl.crte->rate)) { 14023 rack->r_ctl.rc_pace_max_segs = user_max; 14024 } else { 14025 /* We are pacing right at the hardware rate */ 14026 uint32_t segsiz, pace_one; 14027 14028 if (rack_pace_one_seg || 14029 (rack->r_ctl.rc_user_set_min_segs == 1)) 14030 pace_one = 1; 14031 else 14032 pace_one = 0; 14033 segsiz = min(ctf_fixed_maxseg(tp), 14034 rack->r_ctl.rc_pace_min_segs); 14035 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14036 tp, bw_est, segsiz, pace_one, 14037 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14038 } 14039 } else if (rack->rc_always_pace) { 14040 if (rack->r_ctl.gp_bw || 14041 rack->r_ctl.init_rate) { 14042 /* We have a rate of some sort set */ 14043 uint32_t orig; 14044 14045 bw_est = rack_get_bw(rack); 14046 orig = rack->r_ctl.rc_pace_max_segs; 14047 if (fill_override) 14048 rate_wanted = *fill_override; 14049 else 14050 rate_wanted = rack_get_gp_est(rack); 14051 if (rate_wanted) { 14052 /* We have something */ 14053 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14054 rate_wanted, 14055 ctf_fixed_maxseg(rack->rc_tp)); 14056 } else 14057 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14058 if (orig != rack->r_ctl.rc_pace_max_segs) 14059 chged = 1; 14060 } else if ((rack->r_ctl.gp_bw == 0) && 14061 (rack->r_ctl.rc_pace_max_segs == 0)) { 14062 /* 14063 * If we have nothing limit us to bursting 14064 * out IW sized pieces. 14065 */ 14066 chged = 1; 14067 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14068 } 14069 } 14070 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14071 chged = 1; 14072 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14073 } 14074 if (chged) 14075 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14076 } 14077 14078 14079 static void 14080 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14081 { 14082 #ifdef INET6 14083 struct ip6_hdr *ip6 = NULL; 14084 #endif 14085 #ifdef INET 14086 struct ip *ip = NULL; 14087 #endif 14088 struct udphdr *udp = NULL; 14089 14090 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14091 #ifdef INET6 14092 if (rack->r_is_v6) { 14093 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14094 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14095 if (tp->t_port) { 14096 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14097 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14098 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14099 udp->uh_dport = tp->t_port; 14100 rack->r_ctl.fsb.udp = udp; 14101 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14102 } else 14103 { 14104 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14105 rack->r_ctl.fsb.udp = NULL; 14106 } 14107 tcpip_fillheaders(rack->rc_inp, 14108 tp->t_port, 14109 ip6, rack->r_ctl.fsb.th); 14110 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14111 } else 14112 #endif /* INET6 */ 14113 #ifdef INET 14114 { 14115 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14116 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14117 if (tp->t_port) { 14118 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14119 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14120 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14121 udp->uh_dport = tp->t_port; 14122 rack->r_ctl.fsb.udp = udp; 14123 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14124 } else 14125 { 14126 rack->r_ctl.fsb.udp = NULL; 14127 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14128 } 14129 tcpip_fillheaders(rack->rc_inp, 14130 tp->t_port, 14131 ip, rack->r_ctl.fsb.th); 14132 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14133 } 14134 #endif 14135 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14136 (long)TCP_MAXWIN << tp->rcv_scale); 14137 rack->r_fsb_inited = 1; 14138 } 14139 14140 static int 14141 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14142 { 14143 /* 14144 * Allocate the larger of spaces V6 if available else just 14145 * V4 and include udphdr (overbook) 14146 */ 14147 #ifdef INET6 14148 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14149 #else 14150 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14151 #endif 14152 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14153 M_TCPFSB, M_NOWAIT|M_ZERO); 14154 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14155 return (ENOMEM); 14156 } 14157 rack->r_fsb_inited = 0; 14158 return (0); 14159 } 14160 14161 static void 14162 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14163 { 14164 /* 14165 * Types of logs (mod value) 14166 * 20 - Initial round setup 14167 * 21 - Rack declares a new round. 14168 */ 14169 struct tcpcb *tp; 14170 14171 tp = rack->rc_tp; 14172 if (tcp_bblogging_on(tp)) { 14173 union tcp_log_stackspecific log; 14174 struct timeval tv; 14175 14176 memset(&log, 0, sizeof(log)); 14177 log.u_bbr.flex1 = rack->r_ctl.current_round; 14178 log.u_bbr.flex2 = rack->r_ctl.roundends; 14179 log.u_bbr.flex3 = high_seq; 14180 log.u_bbr.flex4 = tp->snd_max; 14181 log.u_bbr.flex8 = mod; 14182 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14183 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14184 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14185 TCP_LOG_EVENTP(tp, NULL, 14186 &tptosocket(tp)->so_rcv, 14187 &tptosocket(tp)->so_snd, 14188 TCP_HYSTART, 0, 14189 0, &log, false, &tv); 14190 } 14191 } 14192 14193 static void 14194 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14195 { 14196 rack->rack_deferred_inited = 1; 14197 rack->r_ctl.roundends = tp->snd_max; 14198 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14199 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14200 } 14201 14202 static void 14203 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14204 { 14205 /* Retransmit bit controls. 14206 * 14207 * The setting of these values control one of 14208 * three settings you can have and dictate 14209 * how rack does retransmissions. Note this 14210 * is in *any* mode i.e. pacing on or off DGP 14211 * fixed rate pacing, or just bursting rack. 14212 * 14213 * 1 - Use full sized retransmits i.e. limit 14214 * the size to whatever the pace_max_segments 14215 * size is. 14216 * 14217 * 2 - Use pacer min granularity as a guide to 14218 * the size combined with the current calculated 14219 * goodput b/w measurement. So for example if 14220 * the goodput is measured at 20Mbps we would 14221 * calculate 8125 (pacer minimum 250usec in 14222 * that b/w) and then round it up to the next 14223 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14224 * 14225 * 0 - The rack default 1 MSS (anything not 0/1/2 14226 * fall here too if we are setting via rack_init()). 14227 * 14228 */ 14229 if (ctl == 1) { 14230 rack->full_size_rxt = 1; 14231 rack->shape_rxt_to_pacing_min = 0; 14232 } else if (ctl == 2) { 14233 rack->full_size_rxt = 0; 14234 rack->shape_rxt_to_pacing_min = 1; 14235 } else { 14236 rack->full_size_rxt = 0; 14237 rack->shape_rxt_to_pacing_min = 0; 14238 } 14239 } 14240 14241 static void 14242 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14243 uint32_t flex1, 14244 uint32_t flex2, 14245 uint32_t flex3) 14246 { 14247 if (tcp_bblogging_on(rack->rc_tp)) { 14248 union tcp_log_stackspecific log; 14249 struct timeval tv; 14250 14251 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14252 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14253 log.u_bbr.flex8 = mod; 14254 log.u_bbr.flex1 = flex1; 14255 log.u_bbr.flex2 = flex2; 14256 log.u_bbr.flex3 = flex3; 14257 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14258 0, &log, false, NULL, __func__, __LINE__, &tv); 14259 } 14260 } 14261 14262 static int 14263 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14264 { 14265 struct tcp_rack *rack; 14266 struct rack_sendmap *rsm; 14267 int i; 14268 14269 14270 rack = (struct tcp_rack *)tp->t_fb_ptr; 14271 switch (reqr->req) { 14272 case TCP_QUERY_SENDMAP: 14273 if ((reqr->req_param == tp->snd_max) || 14274 (tp->snd_max == tp->snd_una)){ 14275 /* Unlikely */ 14276 return (0); 14277 } 14278 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14279 if (rsm == NULL) { 14280 /* Can't find that seq -- unlikely */ 14281 return (0); 14282 } 14283 reqr->sendmap_start = rsm->r_start; 14284 reqr->sendmap_end = rsm->r_end; 14285 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14286 reqr->sendmap_fas = rsm->r_fas; 14287 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14288 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14289 for(i=0; i<reqr->sendmap_send_cnt; i++) 14290 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14291 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14292 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14293 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14294 reqr->sendmap_dupacks = rsm->r_dupack; 14295 rack_log_chg_info(tp, rack, 1, 14296 rsm->r_start, 14297 rsm->r_end, 14298 rsm->r_flags); 14299 return(1); 14300 break; 14301 case TCP_QUERY_TIMERS_UP: 14302 if (rack->r_ctl.rc_hpts_flags == 0) { 14303 /* no timers up */ 14304 return (0); 14305 } 14306 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14307 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14308 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14309 } 14310 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14311 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 14312 } 14313 rack_log_chg_info(tp, rack, 2, 14314 rack->r_ctl.rc_hpts_flags, 14315 rack->r_ctl.rc_last_output_to, 14316 rack->r_ctl.rc_timer_exp); 14317 return (1); 14318 break; 14319 case TCP_QUERY_RACK_TIMES: 14320 /* Reordering items */ 14321 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 14322 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 14323 /* Timerstamps and timers */ 14324 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 14325 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 14326 reqr->rack_rtt = rack->rc_rack_rtt; 14327 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 14328 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 14329 /* PRR data */ 14330 reqr->rack_sacked = rack->r_ctl.rc_sacked; 14331 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 14332 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 14333 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 14334 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 14335 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 14336 /* TLP and persists info */ 14337 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 14338 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 14339 if (rack->rc_in_persist) { 14340 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 14341 reqr->rack_in_persist = 1; 14342 } else { 14343 reqr->rack_time_went_idle = 0; 14344 reqr->rack_in_persist = 0; 14345 } 14346 if (rack->r_wanted_output) 14347 reqr->rack_wanted_output = 1; 14348 else 14349 reqr->rack_wanted_output = 0; 14350 return (1); 14351 break; 14352 default: 14353 return (-EINVAL); 14354 } 14355 } 14356 14357 static void 14358 rack_switch_failed(struct tcpcb *tp) 14359 { 14360 /* 14361 * This method gets called if a stack switch was 14362 * attempted and it failed. We are left 14363 * but our hpts timers were stopped and we 14364 * need to validate time units and t_flags2. 14365 */ 14366 struct tcp_rack *rack; 14367 struct timeval tv; 14368 uint32_t cts; 14369 uint32_t toval; 14370 struct hpts_diag diag; 14371 14372 rack = (struct tcp_rack *)tp->t_fb_ptr; 14373 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 14374 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14375 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14376 else 14377 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14378 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14379 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14380 if (tp->t_in_hpts > IHPTS_NONE) { 14381 /* Strange */ 14382 return; 14383 } 14384 cts = tcp_get_usecs(&tv); 14385 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14386 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 14387 toval = rack->r_ctl.rc_last_output_to - cts; 14388 } else { 14389 /* one slot please */ 14390 toval = HPTS_TICKS_PER_SLOT; 14391 } 14392 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14393 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 14394 toval = rack->r_ctl.rc_timer_exp - cts; 14395 } else { 14396 /* one slot please */ 14397 toval = HPTS_TICKS_PER_SLOT; 14398 } 14399 } else 14400 toval = HPTS_TICKS_PER_SLOT; 14401 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 14402 __LINE__, &diag); 14403 rack_log_hpts_diag(rack, cts, &diag, &tv); 14404 } 14405 14406 static int 14407 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 14408 { 14409 struct rack_sendmap *rsm, *ersm; 14410 int insret __diagused; 14411 /* 14412 * When initing outstanding, we must be quite careful 14413 * to not refer to tp->t_fb_ptr. This has the old rack 14414 * pointer in it, not the "new" one (when we are doing 14415 * a stack switch). 14416 */ 14417 14418 14419 if (tp->t_fb->tfb_chg_query == NULL) { 14420 /* Create a send map for the current outstanding data */ 14421 14422 rsm = rack_alloc(rack); 14423 if (rsm == NULL) { 14424 uma_zfree(rack_pcb_zone, ptr); 14425 return (ENOMEM); 14426 } 14427 rsm->r_no_rtt_allowed = 1; 14428 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 14429 rsm->r_rtr_cnt = 1; 14430 rsm->r_rtr_bytes = 0; 14431 if (tp->t_flags & TF_SENTFIN) 14432 rsm->r_flags |= RACK_HAS_FIN; 14433 rsm->r_end = tp->snd_max; 14434 if (tp->snd_una == tp->iss) { 14435 /* The data space is one beyond snd_una */ 14436 rsm->r_flags |= RACK_HAS_SYN; 14437 rsm->r_start = tp->iss; 14438 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 14439 } else 14440 rsm->r_start = tp->snd_una; 14441 rsm->r_dupack = 0; 14442 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 14443 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 14444 if (rsm->m) { 14445 rsm->orig_m_len = rsm->m->m_len; 14446 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14447 } else { 14448 rsm->orig_m_len = 0; 14449 rsm->orig_t_space = 0; 14450 } 14451 } else { 14452 /* 14453 * This can happen if we have a stand-alone FIN or 14454 * SYN. 14455 */ 14456 rsm->m = NULL; 14457 rsm->orig_m_len = 0; 14458 rsm->orig_t_space = 0; 14459 rsm->soff = 0; 14460 } 14461 #ifdef INVARIANTS 14462 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14463 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14464 insret, rack, rsm); 14465 } 14466 #else 14467 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14468 #endif 14469 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14470 rsm->r_in_tmap = 1; 14471 } else { 14472 /* We have a query mechanism, lets use it */ 14473 struct tcp_query_resp qr; 14474 int i; 14475 tcp_seq at; 14476 14477 at = tp->snd_una; 14478 while (at != tp->snd_max) { 14479 memset(&qr, 0, sizeof(qr)); 14480 qr.req = TCP_QUERY_SENDMAP; 14481 qr.req_param = at; 14482 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 14483 break; 14484 /* Move forward */ 14485 at = qr.sendmap_end; 14486 /* Now lets build the entry for this one */ 14487 rsm = rack_alloc(rack); 14488 if (rsm == NULL) { 14489 uma_zfree(rack_pcb_zone, ptr); 14490 return (ENOMEM); 14491 } 14492 memset(rsm, 0, sizeof(struct rack_sendmap)); 14493 /* Now configure the rsm and insert it */ 14494 rsm->r_dupack = qr.sendmap_dupacks; 14495 rsm->r_start = qr.sendmap_start; 14496 rsm->r_end = qr.sendmap_end; 14497 if (qr.sendmap_fas) 14498 rsm->r_fas = qr.sendmap_end; 14499 else 14500 rsm->r_fas = rsm->r_start - tp->snd_una; 14501 /* 14502 * We have carefully aligned the bits 14503 * so that all we have to do is copy over 14504 * the bits with the mask. 14505 */ 14506 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 14507 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 14508 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 14509 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 14510 for (i=0 ; i<rsm->r_rtr_cnt; i++) 14511 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 14512 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 14513 (rsm->r_start - tp->snd_una), &rsm->soff); 14514 if (rsm->m) { 14515 rsm->orig_m_len = rsm->m->m_len; 14516 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14517 } else { 14518 rsm->orig_m_len = 0; 14519 rsm->orig_t_space = 0; 14520 } 14521 #ifdef INVARIANTS 14522 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14523 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14524 insret, rack, rsm); 14525 } 14526 #else 14527 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14528 #endif 14529 if ((rsm->r_flags & RACK_ACKED) == 0) { 14530 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 14531 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 14532 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 14533 /* 14534 * If the existing ersm was sent at 14535 * a later time than the new one, then 14536 * the new one should appear ahead of this 14537 * ersm. 14538 */ 14539 rsm->r_in_tmap = 1; 14540 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 14541 break; 14542 } 14543 } 14544 if (rsm->r_in_tmap == 0) { 14545 /* 14546 * Not found so shove it on the tail. 14547 */ 14548 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14549 rsm->r_in_tmap = 1; 14550 } 14551 } else { 14552 if ((rack->r_ctl.rc_sacklast == NULL) || 14553 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 14554 rack->r_ctl.rc_sacklast = rsm; 14555 } 14556 } 14557 rack_log_chg_info(tp, rack, 3, 14558 rsm->r_start, 14559 rsm->r_end, 14560 rsm->r_flags); 14561 } 14562 } 14563 return (0); 14564 } 14565 14566 14567 static int32_t 14568 rack_init(struct tcpcb *tp, void **ptr) 14569 { 14570 struct inpcb *inp = tptoinpcb(tp); 14571 struct tcp_rack *rack = NULL; 14572 uint32_t iwin, snt, us_cts; 14573 size_t sz; 14574 int err, no_query; 14575 14576 tcp_hpts_init(tp); 14577 14578 /* 14579 * First are we the initial or are we a switched stack? 14580 * If we are initing via tcp_newtcppcb the ptr passed 14581 * will be tp->t_fb_ptr. If its a stack switch that 14582 * has a previous stack we can query it will be a local 14583 * var that will in the end be set into t_fb_ptr. 14584 */ 14585 if (ptr == &tp->t_fb_ptr) 14586 no_query = 1; 14587 else 14588 no_query = 0; 14589 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 14590 if (*ptr == NULL) { 14591 /* 14592 * We need to allocate memory but cant. The INP and INP_INFO 14593 * locks and they are recursive (happens during setup. So a 14594 * scheme to drop the locks fails :( 14595 * 14596 */ 14597 return(ENOMEM); 14598 } 14599 memset(*ptr, 0, sizeof(struct tcp_rack)); 14600 rack = (struct tcp_rack *)*ptr; 14601 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 14602 if (rack->r_ctl.tqh == NULL) { 14603 uma_zfree(rack_pcb_zone, rack); 14604 return(ENOMEM); 14605 } 14606 tqhash_init(rack->r_ctl.tqh); 14607 TAILQ_INIT(&rack->r_ctl.rc_free); 14608 TAILQ_INIT(&rack->r_ctl.rc_tmap); 14609 rack->rc_tp = tp; 14610 rack->rc_inp = inp; 14611 /* Set the flag */ 14612 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 14613 /* Probably not needed but lets be sure */ 14614 rack_clear_rate_sample(rack); 14615 /* 14616 * Save off the default values, socket options will poke 14617 * at these if pacing is not on or we have not yet 14618 * reached where pacing is on (gp_ready/fixed enabled). 14619 * When they get set into the CC module (when gp_ready 14620 * is enabled or we enable fixed) then we will set these 14621 * values into the CC and place in here the old values 14622 * so we have a restoral. Then we will set the flag 14623 * rc_pacing_cc_set. That way whenever we turn off pacing 14624 * or switch off this stack, we will know to go restore 14625 * the saved values. 14626 * 14627 * We specifically put into the beta the ecn value for pacing. 14628 */ 14629 rack->rc_new_rnd_needed = 1; 14630 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 14631 /* We want abe like behavior as well */ 14632 14633 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 14634 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 14635 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 14636 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 14637 if (rack_fill_cw_state) 14638 rack->rc_pace_to_cwnd = 1; 14639 if (rack_pacing_min_seg) 14640 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 14641 if (use_rack_rr) 14642 rack->use_rack_rr = 1; 14643 if (rack_dnd_default) { 14644 rack->rc_pace_dnd = 1; 14645 } 14646 if (V_tcp_delack_enabled) 14647 tp->t_delayed_ack = 1; 14648 else 14649 tp->t_delayed_ack = 0; 14650 #ifdef TCP_ACCOUNTING 14651 if (rack_tcp_accounting) { 14652 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 14653 } 14654 #endif 14655 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 14656 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 14657 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 14658 if (rack->r_ctl.pcm_s == NULL) { 14659 rack->r_ctl.pcm_i.cnt_alloc = 0; 14660 } 14661 #ifdef NETFLIX_STATS 14662 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; 14663 #endif 14664 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 14665 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 14666 if (rack_enable_shared_cwnd) 14667 rack->rack_enable_scwnd = 1; 14668 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 14669 rack->rc_user_set_max_segs = rack_hptsi_segments; 14670 rack->r_ctl.max_reduction = rack_max_reduce; 14671 rack->rc_force_max_seg = 0; 14672 TAILQ_INIT(&rack->r_ctl.opt_list); 14673 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 14674 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 14675 if (rack_hibeta_setting) { 14676 rack->rack_hibeta = 1; 14677 if ((rack_hibeta_setting >= 50) && 14678 (rack_hibeta_setting <= 100)) { 14679 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; 14680 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 14681 } 14682 } else { 14683 rack->r_ctl.saved_hibeta = 50; 14684 } 14685 /* 14686 * We initialize to all ones so we never match 0 14687 * just in case the client sends in 0, it hopefully 14688 * will never have all 1's in ms :-) 14689 */ 14690 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 14691 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 14692 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 14693 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 14694 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 14695 rack->r_ctl.rc_highest_us_rtt = 0; 14696 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 14697 rack->pcm_enabled = rack_pcm_is_enabled; 14698 if (rack_fillcw_bw_cap) 14699 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 14700 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 14701 if (rack_use_cmp_acks) 14702 rack->r_use_cmp_ack = 1; 14703 if (rack_disable_prr) 14704 rack->rack_no_prr = 1; 14705 if (rack_gp_no_rec_chg) 14706 rack->rc_gp_no_rec_chg = 1; 14707 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 14708 rack->r_ctl.pacing_method |= RACK_REG_PACING; 14709 rack->rc_always_pace = 1; 14710 if (rack->rack_hibeta) 14711 rack_set_cc_pacing(rack); 14712 } else 14713 rack->rc_always_pace = 0; 14714 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 14715 rack->r_mbuf_queue = 1; 14716 else 14717 rack->r_mbuf_queue = 0; 14718 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14719 if (rack_limits_scwnd) 14720 rack->r_limit_scw = 1; 14721 else 14722 rack->r_limit_scw = 0; 14723 rack_init_retransmit_value(rack, rack_rxt_controls); 14724 rack->rc_labc = V_tcp_abc_l_var; 14725 if (rack_honors_hpts_min_to) 14726 rack->r_use_hpts_min = 1; 14727 if (tp->snd_una != 0) { 14728 rack->rc_sendvars_notset = 0; 14729 /* 14730 * Make sure any TCP timers are not running. 14731 */ 14732 tcp_timer_stop(tp); 14733 } else { 14734 /* 14735 * Server side, we are called from the 14736 * syn-cache. This means none of the 14737 * snd_una/max are set yet so we have 14738 * to defer this until the first send. 14739 */ 14740 rack->rc_sendvars_notset = 1; 14741 } 14742 14743 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 14744 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 14745 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 14746 rack->r_ctl.rc_min_to = rack_min_to; 14747 microuptime(&rack->r_ctl.act_rcv_time); 14748 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 14749 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 14750 if (rack_hw_up_only) 14751 rack->r_up_only = 1; 14752 if (rack_do_dyn_mul) { 14753 /* When dynamic adjustment is on CA needs to start at 100% */ 14754 rack->rc_gp_dyn_mul = 1; 14755 if (rack_do_dyn_mul >= 100) 14756 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 14757 } else 14758 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 14759 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 14760 if (rack_timely_off) { 14761 rack->rc_skip_timely = 1; 14762 } 14763 if (rack->rc_skip_timely) { 14764 rack->r_ctl.rack_per_of_gp_rec = 90; 14765 rack->r_ctl.rack_per_of_gp_ca = 100; 14766 rack->r_ctl.rack_per_of_gp_ss = 250; 14767 } 14768 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 14769 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14770 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14771 14772 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 14773 rack_probertt_filter_life); 14774 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14775 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 14776 rack->r_ctl.rc_time_of_last_probertt = us_cts; 14777 rack->r_ctl.rc_went_idle_time = us_cts; 14778 rack->r_ctl.rc_time_probertt_starts = 0; 14779 14780 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 14781 if (rack_rnd_cnt_req & 0x10000) 14782 rack->r_ctl.gate_to_fs = 1; 14783 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 14784 if ((rack_rnd_cnt_req & 0x100) > 0) { 14785 14786 } 14787 if (rack_dsack_std_based & 0x1) { 14788 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 14789 rack->rc_rack_tmr_std_based = 1; 14790 } 14791 if (rack_dsack_std_based & 0x2) { 14792 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 14793 rack->rc_rack_use_dsack = 1; 14794 } 14795 /* We require at least one measurement, even if the sysctl is 0 */ 14796 if (rack_req_measurements) 14797 rack->r_ctl.req_measurements = rack_req_measurements; 14798 else 14799 rack->r_ctl.req_measurements = 1; 14800 if (rack_enable_hw_pacing) 14801 rack->rack_hdw_pace_ena = 1; 14802 if (rack_hw_rate_caps) 14803 rack->r_rack_hw_rate_caps = 1; 14804 if (rack_non_rxt_use_cr) 14805 rack->rack_rec_nonrxt_use_cr = 1; 14806 /* Lets setup the fsb block */ 14807 err = rack_init_fsb(tp, rack); 14808 if (err) { 14809 uma_zfree(rack_pcb_zone, *ptr); 14810 *ptr = NULL; 14811 return (err); 14812 } 14813 if (rack_do_hystart) { 14814 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 14815 if (rack_do_hystart > 1) 14816 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 14817 if (rack_do_hystart > 2) 14818 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 14819 } 14820 /* Log what we will do with queries */ 14821 rack_log_chg_info(tp, rack, 7, 14822 no_query, 0, 0); 14823 if (rack_def_profile) 14824 rack_set_profile(rack, rack_def_profile); 14825 /* Cancel the GP measurement in progress */ 14826 tp->t_flags &= ~TF_GPUTINPROG; 14827 if ((tp->t_state != TCPS_CLOSED) && 14828 (tp->t_state != TCPS_TIME_WAIT)) { 14829 /* 14830 * We are already open, we may 14831 * need to adjust a few things. 14832 */ 14833 if (SEQ_GT(tp->snd_max, tp->iss)) 14834 snt = tp->snd_max - tp->iss; 14835 else 14836 snt = 0; 14837 iwin = rc_init_window(rack); 14838 if ((snt < iwin) && 14839 (no_query == 1)) { 14840 /* We are not past the initial window 14841 * on the first init (i.e. a stack switch 14842 * has not yet occured) so we need to make 14843 * sure cwnd and ssthresh is correct. 14844 */ 14845 if (tp->snd_cwnd < iwin) 14846 tp->snd_cwnd = iwin; 14847 /* 14848 * If we are within the initial window 14849 * we want ssthresh to be unlimited. Setting 14850 * it to the rwnd (which the default stack does 14851 * and older racks) is not really a good idea 14852 * since we want to be in SS and grow both the 14853 * cwnd and the rwnd (via dynamic rwnd growth). If 14854 * we set it to the rwnd then as the peer grows its 14855 * rwnd we will be stuck in CA and never hit SS. 14856 * 14857 * Its far better to raise it up high (this takes the 14858 * risk that there as been a loss already, probably 14859 * we should have an indicator in all stacks of loss 14860 * but we don't), but considering the normal use this 14861 * is a risk worth taking. The consequences of not 14862 * hitting SS are far worse than going one more time 14863 * into it early on (before we have sent even a IW). 14864 * It is highly unlikely that we will have had a loss 14865 * before getting the IW out. 14866 */ 14867 tp->snd_ssthresh = 0xffffffff; 14868 } 14869 /* 14870 * Any init based on sequence numbers 14871 * should be done in the deferred init path 14872 * since we can be CLOSED and not have them 14873 * inited when rack_init() is called. We 14874 * are not closed so lets call it. 14875 */ 14876 rack_deferred_init(tp, rack); 14877 } 14878 if ((tp->t_state != TCPS_CLOSED) && 14879 (tp->t_state != TCPS_TIME_WAIT) && 14880 (no_query == 0) && 14881 (tp->snd_una != tp->snd_max)) { 14882 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 14883 if (err) { 14884 *ptr = NULL; 14885 return(err); 14886 } 14887 } 14888 rack_stop_all_timers(tp, rack); 14889 /* Setup all the t_flags2 */ 14890 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14891 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14892 else 14893 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14894 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14895 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14896 /* 14897 * Timers in Rack are kept in microseconds so lets 14898 * convert any initial incoming variables 14899 * from ticks into usecs. Note that we 14900 * also change the values of t_srtt and t_rttvar, if 14901 * they are non-zero. They are kept with a 5 14902 * bit decimal so we have to carefully convert 14903 * these to get the full precision. 14904 */ 14905 rack_convert_rtts(tp); 14906 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 14907 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 14908 /* We do not start any timers on DROPPED connections */ 14909 if (tp->t_fb->tfb_chg_query == NULL) { 14910 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14911 } else { 14912 struct tcp_query_resp qr; 14913 int ret; 14914 14915 memset(&qr, 0, sizeof(qr)); 14916 14917 /* Get the misc time stamps and such for rack */ 14918 qr.req = TCP_QUERY_RACK_TIMES; 14919 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14920 if (ret == 1) { 14921 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 14922 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 14923 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 14924 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 14925 rack->rc_rack_rtt = qr.rack_rtt; 14926 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 14927 rack->r_ctl.rc_sacked = qr.rack_sacked; 14928 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 14929 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 14930 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 14931 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 14932 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 14933 if (qr.rack_tlp_out) { 14934 rack->rc_tlp_in_progress = 1; 14935 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 14936 } else { 14937 rack->rc_tlp_in_progress = 0; 14938 rack->r_ctl.rc_tlp_cnt_out = 0; 14939 } 14940 if (qr.rack_srtt_measured) 14941 rack->rc_srtt_measure_made = 1; 14942 if (qr.rack_in_persist == 1) { 14943 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 14944 #ifdef NETFLIX_SHARED_CWND 14945 if (rack->r_ctl.rc_scw) { 14946 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 14947 rack->rack_scwnd_is_idle = 1; 14948 } 14949 #endif 14950 rack->r_ctl.persist_lost_ends = 0; 14951 rack->probe_not_answered = 0; 14952 rack->forced_ack = 0; 14953 tp->t_rxtshift = 0; 14954 rack->rc_in_persist = 1; 14955 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 14956 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 14957 } 14958 if (qr.rack_wanted_output) 14959 rack->r_wanted_output = 1; 14960 rack_log_chg_info(tp, rack, 6, 14961 qr.rack_min_rtt, 14962 qr.rack_rtt, 14963 qr.rack_reorder_ts); 14964 } 14965 /* Get the old stack timers */ 14966 qr.req_param = 0; 14967 qr.req = TCP_QUERY_TIMERS_UP; 14968 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14969 if (ret) { 14970 /* 14971 * non-zero return means we have a timer('s) 14972 * to start. Zero means no timer (no keepalive 14973 * I suppose). 14974 */ 14975 uint32_t tov = 0; 14976 14977 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 14978 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 14979 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 14980 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 14981 tov = qr.timer_pacing_to - us_cts; 14982 else 14983 tov = HPTS_TICKS_PER_SLOT; 14984 } 14985 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 14986 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 14987 if (tov == 0) { 14988 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 14989 tov = qr.timer_timer_exp - us_cts; 14990 else 14991 tov = HPTS_TICKS_PER_SLOT; 14992 } 14993 } 14994 rack_log_chg_info(tp, rack, 4, 14995 rack->r_ctl.rc_hpts_flags, 14996 rack->r_ctl.rc_last_output_to, 14997 rack->r_ctl.rc_timer_exp); 14998 if (tov) { 14999 struct hpts_diag diag; 15000 15001 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 15002 __LINE__, &diag); 15003 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 15004 } 15005 } 15006 } 15007 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 15008 __LINE__, RACK_RTTS_INIT); 15009 } 15010 return (0); 15011 } 15012 15013 static int 15014 rack_handoff_ok(struct tcpcb *tp) 15015 { 15016 if ((tp->t_state == TCPS_CLOSED) || 15017 (tp->t_state == TCPS_LISTEN)) { 15018 /* Sure no problem though it may not stick */ 15019 return (0); 15020 } 15021 if ((tp->t_state == TCPS_SYN_SENT) || 15022 (tp->t_state == TCPS_SYN_RECEIVED)) { 15023 /* 15024 * We really don't know if you support sack, 15025 * you have to get to ESTAB or beyond to tell. 15026 */ 15027 return (EAGAIN); 15028 } 15029 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15030 /* 15031 * Rack will only send a FIN after all data is acknowledged. 15032 * So in this case we have more data outstanding. We can't 15033 * switch stacks until either all data and only the FIN 15034 * is left (in which case rack_init() now knows how 15035 * to deal with that) <or> all is acknowledged and we 15036 * are only left with incoming data, though why you 15037 * would want to switch to rack after all data is acknowledged 15038 * I have no idea (rrs)! 15039 */ 15040 return (EAGAIN); 15041 } 15042 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15043 return (0); 15044 } 15045 /* 15046 * If we reach here we don't do SACK on this connection so we can 15047 * never do rack. 15048 */ 15049 return (EINVAL); 15050 } 15051 15052 static void 15053 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15054 { 15055 15056 if (tp->t_fb_ptr) { 15057 uint32_t cnt_free = 0; 15058 struct tcp_rack *rack; 15059 struct rack_sendmap *rsm; 15060 15061 tcp_handle_orphaned_packets(tp); 15062 tp->t_flags &= ~TF_FORCEDATA; 15063 rack = (struct tcp_rack *)tp->t_fb_ptr; 15064 rack_log_pacing_delay_calc(rack, 15065 0, 15066 0, 15067 0, 15068 rack_get_gp_est(rack), /* delRate */ 15069 rack_get_lt_bw(rack), /* rttProp */ 15070 20, __LINE__, NULL, 0); 15071 #ifdef NETFLIX_SHARED_CWND 15072 if (rack->r_ctl.rc_scw) { 15073 uint32_t limit; 15074 15075 if (rack->r_limit_scw) 15076 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15077 else 15078 limit = 0; 15079 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15080 rack->r_ctl.rc_scw_index, 15081 limit); 15082 rack->r_ctl.rc_scw = NULL; 15083 } 15084 #endif 15085 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15086 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15087 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15088 rack->r_ctl.fsb.th = NULL; 15089 } 15090 if (rack->rc_always_pace == 1) { 15091 rack_remove_pacing(rack); 15092 } 15093 /* Clean up any options if they were not applied */ 15094 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15095 struct deferred_opt_list *dol; 15096 15097 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15098 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15099 free(dol, M_TCPDO); 15100 } 15101 /* rack does not use force data but other stacks may clear it */ 15102 if (rack->r_ctl.crte != NULL) { 15103 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15104 rack->rack_hdrw_pacing = 0; 15105 rack->r_ctl.crte = NULL; 15106 } 15107 #ifdef TCP_BLACKBOX 15108 tcp_log_flowend(tp); 15109 #endif 15110 /* 15111 * Lets take a different approach to purging just 15112 * get each one and free it like a cum-ack would and 15113 * not use a foreach loop. 15114 */ 15115 rsm = tqhash_min(rack->r_ctl.tqh); 15116 while (rsm) { 15117 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15118 rack->r_ctl.rc_num_maps_alloced--; 15119 uma_zfree(rack_zone, rsm); 15120 rsm = tqhash_min(rack->r_ctl.tqh); 15121 } 15122 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15123 while (rsm) { 15124 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15125 rack->r_ctl.rc_num_maps_alloced--; 15126 rack->rc_free_cnt--; 15127 cnt_free++; 15128 uma_zfree(rack_zone, rsm); 15129 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15130 } 15131 if (rack->r_ctl.pcm_s != NULL) { 15132 free(rack->r_ctl.pcm_s, M_TCPPCM); 15133 rack->r_ctl.pcm_s = NULL; 15134 rack->r_ctl.pcm_i.cnt_alloc = 0; 15135 rack->r_ctl.pcm_i.cnt = 0; 15136 } 15137 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15138 (tcp_bblogging_on(tp))) { 15139 union tcp_log_stackspecific log; 15140 struct timeval tv; 15141 15142 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15143 log.u_bbr.flex8 = 10; 15144 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15145 log.u_bbr.flex2 = rack->rc_free_cnt; 15146 log.u_bbr.flex3 = cnt_free; 15147 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15148 rsm = tqhash_min(rack->r_ctl.tqh); 15149 log.u_bbr.delRate = (uintptr_t)rsm; 15150 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15151 log.u_bbr.cur_del_rate = (uintptr_t)rsm; 15152 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15153 log.u_bbr.pkt_epoch = __LINE__; 15154 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15155 0, &log, false, NULL, NULL, 0, &tv); 15156 } 15157 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15158 ("rack:%p num_aloc:%u after freeing all?", 15159 rack, 15160 rack->r_ctl.rc_num_maps_alloced)); 15161 rack->rc_free_cnt = 0; 15162 free(rack->r_ctl.tqh, M_TCPFSB); 15163 rack->r_ctl.tqh = NULL; 15164 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15165 tp->t_fb_ptr = NULL; 15166 } 15167 /* Make sure snd_nxt is correctly set */ 15168 tp->snd_nxt = tp->snd_max; 15169 } 15170 15171 static void 15172 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15173 { 15174 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15175 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15176 } 15177 switch (tp->t_state) { 15178 case TCPS_SYN_SENT: 15179 rack->r_state = TCPS_SYN_SENT; 15180 rack->r_substate = rack_do_syn_sent; 15181 break; 15182 case TCPS_SYN_RECEIVED: 15183 rack->r_state = TCPS_SYN_RECEIVED; 15184 rack->r_substate = rack_do_syn_recv; 15185 break; 15186 case TCPS_ESTABLISHED: 15187 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15188 rack->r_state = TCPS_ESTABLISHED; 15189 rack->r_substate = rack_do_established; 15190 break; 15191 case TCPS_CLOSE_WAIT: 15192 rack->r_state = TCPS_CLOSE_WAIT; 15193 rack->r_substate = rack_do_close_wait; 15194 break; 15195 case TCPS_FIN_WAIT_1: 15196 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15197 rack->r_state = TCPS_FIN_WAIT_1; 15198 rack->r_substate = rack_do_fin_wait_1; 15199 break; 15200 case TCPS_CLOSING: 15201 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15202 rack->r_state = TCPS_CLOSING; 15203 rack->r_substate = rack_do_closing; 15204 break; 15205 case TCPS_LAST_ACK: 15206 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15207 rack->r_state = TCPS_LAST_ACK; 15208 rack->r_substate = rack_do_lastack; 15209 break; 15210 case TCPS_FIN_WAIT_2: 15211 rack->r_state = TCPS_FIN_WAIT_2; 15212 rack->r_substate = rack_do_fin_wait_2; 15213 break; 15214 case TCPS_LISTEN: 15215 case TCPS_CLOSED: 15216 case TCPS_TIME_WAIT: 15217 default: 15218 break; 15219 }; 15220 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15221 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15222 15223 } 15224 15225 static void 15226 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15227 { 15228 /* 15229 * We received an ack, and then did not 15230 * call send or were bounced out due to the 15231 * hpts was running. Now a timer is up as well, is 15232 * it the right timer? 15233 */ 15234 struct rack_sendmap *rsm; 15235 int tmr_up; 15236 15237 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15238 if (tcp_in_hpts(rack->rc_tp) == 0) { 15239 /* 15240 * Ok we probably need some timer up, but no 15241 * matter what the mask we are not in hpts. We 15242 * may have received an old ack and thus did nothing. 15243 */ 15244 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15245 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15246 return; 15247 } 15248 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15249 return; 15250 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15251 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15252 (tmr_up == PACE_TMR_RXT)) { 15253 /* Should be an RXT */ 15254 return; 15255 } 15256 if (rsm == NULL) { 15257 /* Nothing outstanding? */ 15258 if (tp->t_flags & TF_DELACK) { 15259 if (tmr_up == PACE_TMR_DELACK) 15260 /* We are supposed to have delayed ack up and we do */ 15261 return; 15262 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 15263 /* 15264 * if we hit enobufs then we would expect the possibility 15265 * of nothing outstanding and the RXT up (and the hptsi timer). 15266 */ 15267 return; 15268 } else if (((V_tcp_always_keepalive || 15269 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 15270 (tp->t_state <= TCPS_CLOSING)) && 15271 (tmr_up == PACE_TMR_KEEP) && 15272 (tp->snd_max == tp->snd_una)) { 15273 /* We should have keep alive up and we do */ 15274 return; 15275 } 15276 } 15277 if (SEQ_GT(tp->snd_max, tp->snd_una) && 15278 ((tmr_up == PACE_TMR_TLP) || 15279 (tmr_up == PACE_TMR_RACK) || 15280 (tmr_up == PACE_TMR_RXT))) { 15281 /* 15282 * Either a Rack, TLP or RXT is fine if we 15283 * have outstanding data. 15284 */ 15285 return; 15286 } else if (tmr_up == PACE_TMR_DELACK) { 15287 /* 15288 * If the delayed ack was going to go off 15289 * before the rtx/tlp/rack timer were going to 15290 * expire, then that would be the timer in control. 15291 * Note we don't check the time here trusting the 15292 * code is correct. 15293 */ 15294 return; 15295 } 15296 /* 15297 * Ok the timer originally started is not what we want now. 15298 * We will force the hpts to be stopped if any, and restart 15299 * with the slot set to what was in the saved slot. 15300 */ 15301 if (tcp_in_hpts(rack->rc_tp)) { 15302 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15303 uint32_t us_cts; 15304 15305 us_cts = tcp_get_usecs(NULL); 15306 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 15307 rack->r_early = 1; 15308 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 15309 } 15310 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 15311 } 15312 tcp_hpts_remove(rack->rc_tp); 15313 } 15314 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15315 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15316 } 15317 15318 15319 static void 15320 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 15321 { 15322 if ((SEQ_LT(tp->snd_wl1, seq) || 15323 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 15324 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 15325 /* keep track of pure window updates */ 15326 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 15327 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 15328 tp->snd_wnd = tiwin; 15329 rack_validate_fo_sendwin_up(tp, rack); 15330 tp->snd_wl1 = seq; 15331 tp->snd_wl2 = ack; 15332 if (tp->snd_wnd > tp->max_sndwnd) 15333 tp->max_sndwnd = tp->snd_wnd; 15334 rack->r_wanted_output = 1; 15335 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 15336 tp->snd_wnd = tiwin; 15337 rack_validate_fo_sendwin_up(tp, rack); 15338 tp->snd_wl1 = seq; 15339 tp->snd_wl2 = ack; 15340 } else { 15341 /* Not a valid win update */ 15342 return; 15343 } 15344 if (tp->snd_wnd > tp->max_sndwnd) 15345 tp->max_sndwnd = tp->snd_wnd; 15346 /* Do we exit persists? */ 15347 if ((rack->rc_in_persist != 0) && 15348 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 15349 rack->r_ctl.rc_pace_min_segs))) { 15350 rack_exit_persist(tp, rack, cts); 15351 } 15352 /* Do we enter persists? */ 15353 if ((rack->rc_in_persist == 0) && 15354 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 15355 TCPS_HAVEESTABLISHED(tp->t_state) && 15356 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 15357 sbavail(&tptosocket(tp)->so_snd) && 15358 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 15359 /* 15360 * Here the rwnd is less than 15361 * the pacing size, we are established, 15362 * nothing is outstanding, and there is 15363 * data to send. Enter persists. 15364 */ 15365 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 15366 } 15367 } 15368 15369 static void 15370 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 15371 { 15372 15373 if (tcp_bblogging_on(rack->rc_tp)) { 15374 struct inpcb *inp = tptoinpcb(tp); 15375 union tcp_log_stackspecific log; 15376 struct timeval ltv; 15377 char tcp_hdr_buf[60]; 15378 struct tcphdr *th; 15379 struct timespec ts; 15380 uint32_t orig_snd_una; 15381 uint8_t xx = 0; 15382 15383 #ifdef TCP_REQUEST_TRK 15384 struct tcp_sendfile_track *tcp_req; 15385 15386 if (SEQ_GT(ae->ack, tp->snd_una)) { 15387 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 15388 } else { 15389 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 15390 } 15391 #endif 15392 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15393 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 15394 if (rack->rack_no_prr == 0) 15395 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15396 else 15397 log.u_bbr.flex1 = 0; 15398 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 15399 log.u_bbr.use_lt_bw <<= 1; 15400 log.u_bbr.use_lt_bw |= rack->r_might_revert; 15401 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 15402 log.u_bbr.bbr_state = rack->rc_free_cnt; 15403 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15404 log.u_bbr.pkts_out = tp->t_maxseg; 15405 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 15406 log.u_bbr.flex7 = 1; 15407 log.u_bbr.lost = ae->flags; 15408 log.u_bbr.cwnd_gain = ackval; 15409 log.u_bbr.pacing_gain = 0x2; 15410 if (ae->flags & TSTMP_HDWR) { 15411 /* Record the hardware timestamp if present */ 15412 log.u_bbr.flex3 = M_TSTMP; 15413 ts.tv_sec = ae->timestamp / 1000000000; 15414 ts.tv_nsec = ae->timestamp % 1000000000; 15415 ltv.tv_sec = ts.tv_sec; 15416 ltv.tv_usec = ts.tv_nsec / 1000; 15417 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 15418 } else if (ae->flags & TSTMP_LRO) { 15419 /* Record the LRO the arrival timestamp */ 15420 log.u_bbr.flex3 = M_TSTMP_LRO; 15421 ts.tv_sec = ae->timestamp / 1000000000; 15422 ts.tv_nsec = ae->timestamp % 1000000000; 15423 ltv.tv_sec = ts.tv_sec; 15424 ltv.tv_usec = ts.tv_nsec / 1000; 15425 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 15426 } 15427 log.u_bbr.timeStamp = tcp_get_usecs(<v); 15428 /* Log the rcv time */ 15429 log.u_bbr.delRate = ae->timestamp; 15430 #ifdef TCP_REQUEST_TRK 15431 log.u_bbr.applimited = tp->t_tcpreq_closed; 15432 log.u_bbr.applimited <<= 8; 15433 log.u_bbr.applimited |= tp->t_tcpreq_open; 15434 log.u_bbr.applimited <<= 8; 15435 log.u_bbr.applimited |= tp->t_tcpreq_req; 15436 if (tcp_req) { 15437 /* Copy out any client req info */ 15438 /* seconds */ 15439 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 15440 /* useconds */ 15441 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 15442 log.u_bbr.rttProp = tcp_req->timestamp; 15443 log.u_bbr.cur_del_rate = tcp_req->start; 15444 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 15445 log.u_bbr.flex8 |= 1; 15446 } else { 15447 log.u_bbr.flex8 |= 2; 15448 log.u_bbr.bw_inuse = tcp_req->end; 15449 } 15450 log.u_bbr.flex6 = tcp_req->start_seq; 15451 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 15452 log.u_bbr.flex8 |= 4; 15453 log.u_bbr.epoch = tcp_req->end_seq; 15454 } 15455 } 15456 #endif 15457 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 15458 th = (struct tcphdr *)tcp_hdr_buf; 15459 th->th_seq = ae->seq; 15460 th->th_ack = ae->ack; 15461 th->th_win = ae->win; 15462 /* Now fill in the ports */ 15463 th->th_sport = inp->inp_fport; 15464 th->th_dport = inp->inp_lport; 15465 tcp_set_flags(th, ae->flags); 15466 /* Now do we have a timestamp option? */ 15467 if (ae->flags & HAS_TSTMP) { 15468 u_char *cp; 15469 uint32_t val; 15470 15471 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 15472 cp = (u_char *)(th + 1); 15473 *cp = TCPOPT_NOP; 15474 cp++; 15475 *cp = TCPOPT_NOP; 15476 cp++; 15477 *cp = TCPOPT_TIMESTAMP; 15478 cp++; 15479 *cp = TCPOLEN_TIMESTAMP; 15480 cp++; 15481 val = htonl(ae->ts_value); 15482 bcopy((char *)&val, 15483 (char *)cp, sizeof(uint32_t)); 15484 val = htonl(ae->ts_echo); 15485 bcopy((char *)&val, 15486 (char *)(cp + 4), sizeof(uint32_t)); 15487 } else 15488 th->th_off = (sizeof(struct tcphdr) >> 2); 15489 15490 /* 15491 * For sane logging we need to play a little trick. 15492 * If the ack were fully processed we would have moved 15493 * snd_una to high_seq, but since compressed acks are 15494 * processed in two phases, at this point (logging) snd_una 15495 * won't be advanced. So we would see multiple acks showing 15496 * the advancement. We can prevent that by "pretending" that 15497 * snd_una was advanced and then un-advancing it so that the 15498 * logging code has the right value for tlb_snd_una. 15499 */ 15500 if (tp->snd_una != high_seq) { 15501 orig_snd_una = tp->snd_una; 15502 tp->snd_una = high_seq; 15503 xx = 1; 15504 } else 15505 xx = 0; 15506 TCP_LOG_EVENTP(tp, th, 15507 &tptosocket(tp)->so_rcv, 15508 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 15509 0, &log, true, <v); 15510 if (xx) { 15511 tp->snd_una = orig_snd_una; 15512 } 15513 } 15514 15515 } 15516 15517 static void 15518 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 15519 { 15520 uint32_t us_rtt; 15521 /* 15522 * A persist or keep-alive was forced out, update our 15523 * min rtt time. Note now worry about lost responses. 15524 * When a subsequent keep-alive or persist times out 15525 * and forced_ack is still on, then the last probe 15526 * was not responded to. In such cases we have a 15527 * sysctl that controls the behavior. Either we apply 15528 * the rtt but with reduced confidence (0). Or we just 15529 * plain don't apply the rtt estimate. Having data flow 15530 * will clear the probe_not_answered flag i.e. cum-ack 15531 * move forward <or> exiting and reentering persists. 15532 */ 15533 15534 rack->forced_ack = 0; 15535 rack->rc_tp->t_rxtshift = 0; 15536 if ((rack->rc_in_persist && 15537 (tiwin == rack->rc_tp->snd_wnd)) || 15538 (rack->rc_in_persist == 0)) { 15539 /* 15540 * In persists only apply the RTT update if this is 15541 * a response to our window probe. And that 15542 * means the rwnd sent must match the current 15543 * snd_wnd. If it does not, then we got a 15544 * window update ack instead. For keepalive 15545 * we allow the answer no matter what the window. 15546 * 15547 * Note that if the probe_not_answered is set then 15548 * the forced_ack_ts is the oldest one i.e. the first 15549 * probe sent that might have been lost. This assures 15550 * us that if we do calculate an RTT it is longer not 15551 * some short thing. 15552 */ 15553 if (rack->rc_in_persist) 15554 counter_u64_add(rack_persists_acks, 1); 15555 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 15556 if (us_rtt == 0) 15557 us_rtt = 1; 15558 if (rack->probe_not_answered == 0) { 15559 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15560 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 15561 } else { 15562 /* We have a retransmitted probe here too */ 15563 if (rack_apply_rtt_with_reduced_conf) { 15564 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15565 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 15566 } 15567 } 15568 } 15569 } 15570 15571 static void 15572 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15573 { 15574 /* 15575 * The next send has occurred mark the end of the round 15576 * as when that data gets acknowledged. We can 15577 * also do common things we might need to do when 15578 * a round begins. 15579 */ 15580 rack->r_ctl.roundends = tp->snd_max; 15581 rack->rc_new_rnd_needed = 0; 15582 rack_log_hystart_event(rack, tp->snd_max, 4); 15583 } 15584 15585 15586 static void 15587 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 15588 uint32_t flex3) 15589 { 15590 if (tcp_bblogging_on(rack->rc_tp)) { 15591 union tcp_log_stackspecific log; 15592 struct timeval tv; 15593 15594 (void)tcp_get_usecs(&tv); 15595 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15596 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 15597 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15598 log.u_bbr.flex8 = mod; 15599 log.u_bbr.flex1 = flex1; 15600 log.u_bbr.flex2 = flex2; 15601 log.u_bbr.flex3 = flex3; 15602 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 15603 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 15604 log.u_bbr.bbr_substate = rack->pcm_needed; 15605 log.u_bbr.bbr_substate <<= 1; 15606 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 15607 log.u_bbr.bbr_substate <<= 1; 15608 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 15609 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 15610 0, &log, false, NULL, NULL, 0, &tv); 15611 } 15612 } 15613 15614 static void 15615 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15616 { 15617 /* 15618 * The round (current_round) has ended. We now 15619 * setup for the next round by incrementing the 15620 * round numnber and doing any round specific 15621 * things. 15622 */ 15623 rack_log_hystart_event(rack, high_seq, 21); 15624 rack->r_ctl.current_round++; 15625 /* New round (current_round) begins at next send */ 15626 rack->rc_new_rnd_needed = 1; 15627 if ((rack->pcm_enabled == 1) && 15628 (rack->pcm_needed == 0) && 15629 (rack->pcm_in_progress == 0)) { 15630 /* 15631 * If we have enabled PCM, then we need to 15632 * check if the round has adanced to the state 15633 * where one is required. 15634 */ 15635 int rnds; 15636 15637 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 15638 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 15639 rack->pcm_needed = 1; 15640 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15641 } else if (rack_verbose_logging) { 15642 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15643 } 15644 } 15645 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 15646 /* We have hystart enabled send the round info in */ 15647 if (CC_ALGO(tp)->newround != NULL) { 15648 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 15649 } 15650 } 15651 /* 15652 * For DGP an initial startup check. We want to validate 15653 * that we are not just pushing on slow-start and just 15654 * not gaining.. i.e. filling buffers without getting any 15655 * boost in b/w during the inital slow-start. 15656 */ 15657 if (rack->dgp_on && 15658 (rack->rc_initial_ss_comp == 0) && 15659 (tp->snd_cwnd < tp->snd_ssthresh) && 15660 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 15661 (rack->r_ctl.gp_rnd_thresh > 0) && 15662 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 15663 15664 /* 15665 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 15666 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 15667 * exit SS. 15668 * 15669 * Pick up the flight size now as we enter slowstart (not the 15670 * cwnd which may be inflated). 15671 */ 15672 rack->rc_initial_ss_comp = 1; 15673 15674 if (tcp_bblogging_on(rack->rc_tp)) { 15675 union tcp_log_stackspecific log; 15676 struct timeval tv; 15677 15678 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15679 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15680 log.u_bbr.flex1 = rack->r_ctl.current_round; 15681 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 15682 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 15683 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 15684 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 15685 log.u_bbr.flex8 = 40; 15686 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 15687 0, &log, false, NULL, __func__, __LINE__,&tv); 15688 } 15689 if ((rack->r_ctl.gate_to_fs == 1) && 15690 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 15691 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 15692 } 15693 tp->snd_ssthresh = tp->snd_cwnd - 1; 15694 /* Turn off any fast output running */ 15695 rack->r_fast_output = 0; 15696 } 15697 } 15698 15699 static int 15700 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 15701 { 15702 /* 15703 * Handle a "special" compressed ack mbuf. Each incoming 15704 * ack has only four possible dispositions: 15705 * 15706 * A) It moves the cum-ack forward 15707 * B) It is behind the cum-ack. 15708 * C) It is a window-update ack. 15709 * D) It is a dup-ack. 15710 * 15711 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 15712 * in the incoming mbuf. We also need to still pay attention 15713 * to nxt_pkt since there may be another packet after this 15714 * one. 15715 */ 15716 #ifdef TCP_ACCOUNTING 15717 uint64_t ts_val; 15718 uint64_t rdstc; 15719 #endif 15720 int segsiz; 15721 struct timespec ts; 15722 struct tcp_rack *rack; 15723 struct tcp_ackent *ae; 15724 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 15725 int cnt, i, did_out, ourfinisacked = 0; 15726 struct tcpopt to_holder, *to = NULL; 15727 #ifdef TCP_ACCOUNTING 15728 int win_up_req = 0; 15729 #endif 15730 int nsegs = 0; 15731 int under_pacing = 0; 15732 int post_recovery = 0; 15733 #ifdef TCP_ACCOUNTING 15734 sched_pin(); 15735 #endif 15736 rack = (struct tcp_rack *)tp->t_fb_ptr; 15737 if (rack->gp_ready && 15738 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 15739 under_pacing = 1; 15740 15741 if (rack->r_state != tp->t_state) 15742 rack_set_state(tp, rack); 15743 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 15744 (tp->t_flags & TF_GPUTINPROG)) { 15745 /* 15746 * We have a goodput in progress 15747 * and we have entered a late state. 15748 * Do we have enough data in the sb 15749 * to handle the GPUT request? 15750 */ 15751 uint32_t bytes; 15752 15753 bytes = tp->gput_ack - tp->gput_seq; 15754 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 15755 bytes += tp->gput_seq - tp->snd_una; 15756 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 15757 /* 15758 * There are not enough bytes in the socket 15759 * buffer that have been sent to cover this 15760 * measurement. Cancel it. 15761 */ 15762 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 15763 rack->r_ctl.rc_gp_srtt /*flex1*/, 15764 tp->gput_seq, 15765 0, 0, 18, __LINE__, NULL, 0); 15766 tp->t_flags &= ~TF_GPUTINPROG; 15767 } 15768 } 15769 to = &to_holder; 15770 to->to_flags = 0; 15771 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 15772 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 15773 cnt = m->m_len / sizeof(struct tcp_ackent); 15774 counter_u64_add(rack_multi_single_eq, cnt); 15775 high_seq = tp->snd_una; 15776 the_win = tp->snd_wnd; 15777 win_seq = tp->snd_wl1; 15778 win_upd_ack = tp->snd_wl2; 15779 cts = tcp_tv_to_usectick(tv); 15780 ms_cts = tcp_tv_to_mssectick(tv); 15781 rack->r_ctl.rc_rcvtime = cts; 15782 segsiz = ctf_fixed_maxseg(tp); 15783 if ((rack->rc_gp_dyn_mul) && 15784 (rack->use_fixed_rate == 0) && 15785 (rack->rc_always_pace)) { 15786 /* Check in on probertt */ 15787 rack_check_probe_rtt(rack, cts); 15788 } 15789 for (i = 0; i < cnt; i++) { 15790 #ifdef TCP_ACCOUNTING 15791 ts_val = get_cyclecount(); 15792 #endif 15793 rack_clear_rate_sample(rack); 15794 ae = ((mtod(m, struct tcp_ackent *)) + i); 15795 if (ae->flags & TH_FIN) 15796 rack_log_pacing_delay_calc(rack, 15797 0, 15798 0, 15799 0, 15800 rack_get_gp_est(rack), /* delRate */ 15801 rack_get_lt_bw(rack), /* rttProp */ 15802 20, __LINE__, NULL, 0); 15803 /* Setup the window */ 15804 tiwin = ae->win << tp->snd_scale; 15805 if (tiwin > rack->r_ctl.rc_high_rwnd) 15806 rack->r_ctl.rc_high_rwnd = tiwin; 15807 /* figure out the type of ack */ 15808 if (SEQ_LT(ae->ack, high_seq)) { 15809 /* Case B*/ 15810 ae->ack_val_set = ACK_BEHIND; 15811 } else if (SEQ_GT(ae->ack, high_seq)) { 15812 /* Case A */ 15813 ae->ack_val_set = ACK_CUMACK; 15814 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 15815 /* Case D */ 15816 ae->ack_val_set = ACK_DUPACK; 15817 } else { 15818 /* Case C */ 15819 ae->ack_val_set = ACK_RWND; 15820 } 15821 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 15822 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 15823 /* Validate timestamp */ 15824 if (ae->flags & HAS_TSTMP) { 15825 /* Setup for a timestamp */ 15826 to->to_flags = TOF_TS; 15827 ae->ts_echo -= tp->ts_offset; 15828 to->to_tsecr = ae->ts_echo; 15829 to->to_tsval = ae->ts_value; 15830 /* 15831 * If echoed timestamp is later than the current time, fall back to 15832 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 15833 * were used when this connection was established. 15834 */ 15835 if (TSTMP_GT(ae->ts_echo, ms_cts)) 15836 to->to_tsecr = 0; 15837 if (tp->ts_recent && 15838 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 15839 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 15840 #ifdef TCP_ACCOUNTING 15841 rdstc = get_cyclecount(); 15842 if (rdstc > ts_val) { 15843 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15844 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15845 } 15846 } 15847 #endif 15848 continue; 15849 } 15850 } 15851 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 15852 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 15853 tp->ts_recent_age = tcp_ts_getticks(); 15854 tp->ts_recent = ae->ts_value; 15855 } 15856 } else { 15857 /* Setup for a no options */ 15858 to->to_flags = 0; 15859 } 15860 /* Update the rcv time and perform idle reduction possibly */ 15861 if (tp->t_idle_reduce && 15862 (tp->snd_max == tp->snd_una) && 15863 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 15864 counter_u64_add(rack_input_idle_reduces, 1); 15865 rack_cc_after_idle(rack, tp); 15866 } 15867 tp->t_rcvtime = ticks; 15868 /* Now what about ECN of a chain of pure ACKs? */ 15869 if (tcp_ecn_input_segment(tp, ae->flags, 0, 15870 tcp_packets_this_ack(tp, ae->ack), 15871 ae->codepoint)) 15872 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 15873 #ifdef TCP_ACCOUNTING 15874 /* Count for the specific type of ack in */ 15875 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15876 tp->tcp_cnt_counters[ae->ack_val_set]++; 15877 } 15878 #endif 15879 /* 15880 * Note how we could move up these in the determination 15881 * above, but we don't so that way the timestamp checks (and ECN) 15882 * is done first before we do any processing on the ACK. 15883 * The non-compressed path through the code has this 15884 * weakness (noted by @jtl) that it actually does some 15885 * processing before verifying the timestamp information. 15886 * We don't take that path here which is why we set 15887 * the ack_val_set first, do the timestamp and ecn 15888 * processing, and then look at what we have setup. 15889 */ 15890 if (ae->ack_val_set == ACK_BEHIND) { 15891 /* 15892 * Case B flag reordering, if window is not closed 15893 * or it could be a keep-alive or persists 15894 */ 15895 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 15896 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15897 if (rack->r_ctl.rc_reorder_ts == 0) 15898 rack->r_ctl.rc_reorder_ts = 1; 15899 } 15900 } else if (ae->ack_val_set == ACK_DUPACK) { 15901 /* Case D */ 15902 rack_strike_dupack(rack, ae->ack); 15903 } else if (ae->ack_val_set == ACK_RWND) { 15904 /* Case C */ 15905 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15906 ts.tv_sec = ae->timestamp / 1000000000; 15907 ts.tv_nsec = ae->timestamp % 1000000000; 15908 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15909 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15910 } else { 15911 rack->r_ctl.act_rcv_time = *tv; 15912 } 15913 if (rack->forced_ack) { 15914 rack_handle_probe_response(rack, tiwin, 15915 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 15916 } 15917 #ifdef TCP_ACCOUNTING 15918 win_up_req = 1; 15919 #endif 15920 win_upd_ack = ae->ack; 15921 win_seq = ae->seq; 15922 the_win = tiwin; 15923 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15924 } else { 15925 /* Case A */ 15926 if (SEQ_GT(ae->ack, tp->snd_max)) { 15927 /* 15928 * We just send an ack since the incoming 15929 * ack is beyond the largest seq we sent. 15930 */ 15931 if ((tp->t_flags & TF_ACKNOW) == 0) { 15932 ctf_ack_war_checks(tp); 15933 if (tp->t_flags && TF_ACKNOW) 15934 rack->r_wanted_output = 1; 15935 } 15936 } else { 15937 nsegs++; 15938 /* If the window changed setup to update */ 15939 if (tiwin != tp->snd_wnd) { 15940 win_upd_ack = ae->ack; 15941 win_seq = ae->seq; 15942 the_win = tiwin; 15943 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15944 } 15945 #ifdef TCP_ACCOUNTING 15946 /* Account for the acks */ 15947 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15948 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 15949 } 15950 #endif 15951 high_seq = ae->ack; 15952 /* Setup our act_rcv_time */ 15953 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15954 ts.tv_sec = ae->timestamp / 1000000000; 15955 ts.tv_nsec = ae->timestamp % 1000000000; 15956 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15957 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15958 } else { 15959 rack->r_ctl.act_rcv_time = *tv; 15960 } 15961 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 15962 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 15963 #ifdef TCP_REQUEST_TRK 15964 rack_req_check_for_comp(rack, high_seq); 15965 #endif 15966 if (rack->rc_dsack_round_seen) { 15967 /* Is the dsack round over? */ 15968 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 15969 /* Yes it is */ 15970 rack->rc_dsack_round_seen = 0; 15971 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 15972 } 15973 } 15974 } 15975 } 15976 /* And lets be sure to commit the rtt measurements for this ack */ 15977 tcp_rack_xmit_timer_commit(rack, tp); 15978 #ifdef TCP_ACCOUNTING 15979 rdstc = get_cyclecount(); 15980 if (rdstc > ts_val) { 15981 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15982 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15983 if (ae->ack_val_set == ACK_CUMACK) 15984 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 15985 } 15986 } 15987 #endif 15988 } 15989 #ifdef TCP_ACCOUNTING 15990 ts_val = get_cyclecount(); 15991 #endif 15992 /* Tend to any collapsed window */ 15993 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 15994 /* The peer collapsed the window */ 15995 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 15996 } else if (rack->rc_has_collapsed) 15997 rack_un_collapse_window(rack, __LINE__); 15998 if ((rack->r_collapse_point_valid) && 15999 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 16000 rack->r_collapse_point_valid = 0; 16001 acked_amount = acked = (high_seq - tp->snd_una); 16002 if (acked) { 16003 /* 16004 * The draft (v3) calls for us to use SEQ_GEQ, but that 16005 * causes issues when we are just going app limited. Lets 16006 * instead use SEQ_GT <or> where its equal but more data 16007 * is outstanding. 16008 * 16009 * Also make sure we are on the last ack of a series. We 16010 * have to have all the ack's processed in queue to know 16011 * if there is something left outstanding. 16012 * 16013 */ 16014 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 16015 (rack->rc_new_rnd_needed == 0) && 16016 (nxt_pkt == 0)) { 16017 /* 16018 * We have crossed into a new round with 16019 * this th_ack value. 16020 */ 16021 rack_new_round_setup(tp, rack, high_seq); 16022 } 16023 /* 16024 * Clear the probe not answered flag 16025 * since cum-ack moved forward. 16026 */ 16027 rack->probe_not_answered = 0; 16028 if (tp->t_flags & TF_NEEDSYN) { 16029 /* 16030 * T/TCP: Connection was half-synchronized, and our SYN has 16031 * been ACK'd (so connection is now fully synchronized). Go 16032 * to non-starred state, increment snd_una for ACK of SYN, 16033 * and check if we can do window scaling. 16034 */ 16035 tp->t_flags &= ~TF_NEEDSYN; 16036 tp->snd_una++; 16037 acked_amount = acked = (high_seq - tp->snd_una); 16038 } 16039 if (acked > sbavail(&so->so_snd)) 16040 acked_amount = sbavail(&so->so_snd); 16041 if (IN_FASTRECOVERY(tp->t_flags) && 16042 (rack->rack_no_prr == 0)) 16043 rack_update_prr(tp, rack, acked_amount, high_seq); 16044 if (IN_RECOVERY(tp->t_flags)) { 16045 if (SEQ_LT(high_seq, tp->snd_recover) && 16046 (SEQ_LT(high_seq, tp->snd_max))) { 16047 tcp_rack_partialack(tp); 16048 } else { 16049 rack_post_recovery(tp, high_seq); 16050 post_recovery = 1; 16051 } 16052 } else if ((rack->rto_from_rec == 1) && 16053 SEQ_GEQ(high_seq, tp->snd_recover)) { 16054 /* 16055 * We were in recovery, hit a rxt timeout 16056 * and never re-entered recovery. The timeout(s) 16057 * made up all the lost data. In such a case 16058 * we need to clear the rto_from_rec flag. 16059 */ 16060 rack->rto_from_rec = 0; 16061 } 16062 /* Handle the rack-log-ack part (sendmap) */ 16063 if ((sbused(&so->so_snd) == 0) && 16064 (acked > acked_amount) && 16065 (tp->t_state >= TCPS_FIN_WAIT_1) && 16066 (tp->t_flags & TF_SENTFIN)) { 16067 /* 16068 * We must be sure our fin 16069 * was sent and acked (we can be 16070 * in FIN_WAIT_1 without having 16071 * sent the fin). 16072 */ 16073 ourfinisacked = 1; 16074 /* 16075 * Lets make sure snd_una is updated 16076 * since most likely acked_amount = 0 (it 16077 * should be). 16078 */ 16079 tp->snd_una = high_seq; 16080 } 16081 /* Did we make a RTO error? */ 16082 if ((tp->t_flags & TF_PREVVALID) && 16083 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16084 tp->t_flags &= ~TF_PREVVALID; 16085 if (tp->t_rxtshift == 1 && 16086 (int)(ticks - tp->t_badrxtwin) < 0) 16087 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16088 } 16089 /* Handle the data in the socket buffer */ 16090 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16091 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16092 if (acked_amount > 0) { 16093 uint32_t p_cwnd; 16094 struct mbuf *mfree; 16095 16096 if (post_recovery) { 16097 /* 16098 * Grab the segsiz, multiply by 2 and add the snd_cwnd 16099 * that is the max the CC should add if we are exiting 16100 * recovery and doing a late add. 16101 */ 16102 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16103 p_cwnd <<= 1; 16104 p_cwnd += tp->snd_cwnd; 16105 } 16106 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 16107 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 16108 /* Must be non-newreno (cubic) getting too ahead of itself */ 16109 tp->snd_cwnd = p_cwnd; 16110 } 16111 SOCKBUF_LOCK(&so->so_snd); 16112 mfree = sbcut_locked(&so->so_snd, acked_amount); 16113 tp->snd_una = high_seq; 16114 /* Note we want to hold the sb lock through the sendmap adjust */ 16115 rack_adjust_sendmap_head(rack, &so->so_snd); 16116 /* Wake up the socket if we have room to write more */ 16117 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16118 sowwakeup_locked(so); 16119 m_freem(mfree); 16120 } 16121 /* update progress */ 16122 tp->t_acktime = ticks; 16123 rack_log_progress_event(rack, tp, tp->t_acktime, 16124 PROGRESS_UPDATE, __LINE__); 16125 /* Clear out shifts and such */ 16126 tp->t_rxtshift = 0; 16127 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16128 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16129 rack->rc_tlp_in_progress = 0; 16130 rack->r_ctl.rc_tlp_cnt_out = 0; 16131 /* Send recover and snd_nxt must be dragged along */ 16132 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16133 tp->snd_recover = tp->snd_una; 16134 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16135 tp->snd_nxt = tp->snd_max; 16136 /* 16137 * If the RXT timer is running we want to 16138 * stop it, so we can restart a TLP (or new RXT). 16139 */ 16140 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16141 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16142 tp->snd_wl2 = high_seq; 16143 tp->t_dupacks = 0; 16144 if (under_pacing && 16145 (rack->use_fixed_rate == 0) && 16146 (rack->in_probe_rtt == 0) && 16147 rack->rc_gp_dyn_mul && 16148 rack->rc_always_pace) { 16149 /* Check if we are dragging bottom */ 16150 rack_check_bottom_drag(tp, rack, so); 16151 } 16152 if (tp->snd_una == tp->snd_max) { 16153 tp->t_flags &= ~TF_PREVVALID; 16154 rack->r_ctl.retran_during_recovery = 0; 16155 rack->rc_suspicious = 0; 16156 rack->r_ctl.dsack_byte_cnt = 0; 16157 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16158 if (rack->r_ctl.rc_went_idle_time == 0) 16159 rack->r_ctl.rc_went_idle_time = 1; 16160 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16161 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16162 tp->t_acktime = 0; 16163 /* Set so we might enter persists... */ 16164 rack->r_wanted_output = 1; 16165 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16166 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16167 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16168 (sbavail(&so->so_snd) == 0) && 16169 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16170 /* 16171 * The socket was gone and the 16172 * peer sent data (not now in the past), time to 16173 * reset him. 16174 */ 16175 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16176 /* tcp_close will kill the inp pre-log the Reset */ 16177 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16178 #ifdef TCP_ACCOUNTING 16179 rdstc = get_cyclecount(); 16180 if (rdstc > ts_val) { 16181 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16182 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16183 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16184 } 16185 } 16186 #endif 16187 m_freem(m); 16188 tp = tcp_close(tp); 16189 if (tp == NULL) { 16190 #ifdef TCP_ACCOUNTING 16191 sched_unpin(); 16192 #endif 16193 return (1); 16194 } 16195 /* 16196 * We would normally do drop-with-reset which would 16197 * send back a reset. We can't since we don't have 16198 * all the needed bits. Instead lets arrange for 16199 * a call to tcp_output(). That way since we 16200 * are in the closed state we will generate a reset. 16201 * 16202 * Note if tcp_accounting is on we don't unpin since 16203 * we do that after the goto label. 16204 */ 16205 goto send_out_a_rst; 16206 } 16207 if ((sbused(&so->so_snd) == 0) && 16208 (tp->t_state >= TCPS_FIN_WAIT_1) && 16209 (tp->t_flags & TF_SENTFIN)) { 16210 /* 16211 * If we can't receive any more data, then closing user can 16212 * proceed. Starting the timer is contrary to the 16213 * specification, but if we don't get a FIN we'll hang 16214 * forever. 16215 * 16216 */ 16217 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16218 soisdisconnected(so); 16219 tcp_timer_activate(tp, TT_2MSL, 16220 (tcp_fast_finwait2_recycle ? 16221 tcp_finwait2_timeout : 16222 TP_MAXIDLE(tp))); 16223 } 16224 if (ourfinisacked == 0) { 16225 /* 16226 * We don't change to fin-wait-2 if we have our fin acked 16227 * which means we are probably in TCPS_CLOSING. 16228 */ 16229 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16230 } 16231 } 16232 } 16233 /* Wake up the socket if we have room to write more */ 16234 if (sbavail(&so->so_snd)) { 16235 rack->r_wanted_output = 1; 16236 if (ctf_progress_timeout_check(tp, true)) { 16237 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16238 tp, tick, PROGRESS_DROP, __LINE__); 16239 /* 16240 * We cheat here and don't send a RST, we should send one 16241 * when the pacer drops the connection. 16242 */ 16243 #ifdef TCP_ACCOUNTING 16244 rdstc = get_cyclecount(); 16245 if (rdstc > ts_val) { 16246 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16247 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16248 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16249 } 16250 } 16251 sched_unpin(); 16252 #endif 16253 (void)tcp_drop(tp, ETIMEDOUT); 16254 m_freem(m); 16255 return (1); 16256 } 16257 } 16258 if (ourfinisacked) { 16259 switch(tp->t_state) { 16260 case TCPS_CLOSING: 16261 #ifdef TCP_ACCOUNTING 16262 rdstc = get_cyclecount(); 16263 if (rdstc > ts_val) { 16264 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16265 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16266 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16267 } 16268 } 16269 sched_unpin(); 16270 #endif 16271 tcp_twstart(tp); 16272 m_freem(m); 16273 return (1); 16274 break; 16275 case TCPS_LAST_ACK: 16276 #ifdef TCP_ACCOUNTING 16277 rdstc = get_cyclecount(); 16278 if (rdstc > ts_val) { 16279 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16280 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16281 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16282 } 16283 } 16284 sched_unpin(); 16285 #endif 16286 tp = tcp_close(tp); 16287 ctf_do_drop(m, tp); 16288 return (1); 16289 break; 16290 case TCPS_FIN_WAIT_1: 16291 #ifdef TCP_ACCOUNTING 16292 rdstc = get_cyclecount(); 16293 if (rdstc > ts_val) { 16294 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16295 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16296 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16297 } 16298 } 16299 #endif 16300 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16301 soisdisconnected(so); 16302 tcp_timer_activate(tp, TT_2MSL, 16303 (tcp_fast_finwait2_recycle ? 16304 tcp_finwait2_timeout : 16305 TP_MAXIDLE(tp))); 16306 } 16307 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16308 break; 16309 default: 16310 break; 16311 } 16312 } 16313 if (rack->r_fast_output) { 16314 /* 16315 * We re doing fast output.. can we expand that? 16316 */ 16317 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 16318 } 16319 #ifdef TCP_ACCOUNTING 16320 rdstc = get_cyclecount(); 16321 if (rdstc > ts_val) { 16322 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16323 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16324 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16325 } 16326 } 16327 16328 } else if (win_up_req) { 16329 rdstc = get_cyclecount(); 16330 if (rdstc > ts_val) { 16331 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16332 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 16333 } 16334 } 16335 #endif 16336 } 16337 /* Now is there a next packet, if so we are done */ 16338 m_freem(m); 16339 did_out = 0; 16340 if (nxt_pkt) { 16341 #ifdef TCP_ACCOUNTING 16342 sched_unpin(); 16343 #endif 16344 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 16345 return (0); 16346 } 16347 rack_handle_might_revert(tp, rack); 16348 ctf_calc_rwin(so, tp); 16349 if ((rack->r_wanted_output != 0) || 16350 (rack->r_fast_output != 0) || 16351 (tp->t_flags & TF_ACKNOW )) { 16352 send_out_a_rst: 16353 if (tcp_output(tp) < 0) { 16354 #ifdef TCP_ACCOUNTING 16355 sched_unpin(); 16356 #endif 16357 return (1); 16358 } 16359 did_out = 1; 16360 } 16361 if (tp->t_flags2 & TF2_HPTS_CALLS) 16362 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16363 rack_free_trim(rack); 16364 #ifdef TCP_ACCOUNTING 16365 sched_unpin(); 16366 #endif 16367 rack_timer_audit(tp, rack, &so->so_snd); 16368 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 16369 return (0); 16370 } 16371 16372 #define TCP_LRO_TS_OPTION \ 16373 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 16374 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 16375 16376 static int 16377 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16378 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 16379 struct timeval *tv) 16380 { 16381 struct inpcb *inp = tptoinpcb(tp); 16382 struct socket *so = tptosocket(tp); 16383 #ifdef TCP_ACCOUNTING 16384 uint64_t ts_val; 16385 #endif 16386 int32_t thflags, retval, did_out = 0; 16387 int32_t way_out = 0; 16388 /* 16389 * cts - is the current time from tv (caller gets ts) in microseconds. 16390 * ms_cts - is the current time from tv in milliseconds. 16391 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 16392 */ 16393 uint32_t cts, us_cts, ms_cts; 16394 uint32_t tiwin; 16395 struct timespec ts; 16396 struct tcpopt to; 16397 struct tcp_rack *rack; 16398 struct rack_sendmap *rsm; 16399 int32_t prev_state = 0; 16400 int no_output = 0; 16401 int slot_remaining = 0; 16402 #ifdef TCP_ACCOUNTING 16403 int ack_val_set = 0xf; 16404 #endif 16405 int nsegs; 16406 16407 NET_EPOCH_ASSERT(); 16408 INP_WLOCK_ASSERT(inp); 16409 16410 /* 16411 * tv passed from common code is from either M_TSTMP_LRO or 16412 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 16413 */ 16414 rack = (struct tcp_rack *)tp->t_fb_ptr; 16415 if (rack->rack_deferred_inited == 0) { 16416 /* 16417 * If we are the connecting socket we will 16418 * hit rack_init() when no sequence numbers 16419 * are setup. This makes it so we must defer 16420 * some initialization. Call that now. 16421 */ 16422 rack_deferred_init(tp, rack); 16423 } 16424 /* 16425 * Check to see if we need to skip any output plans. This 16426 * can happen in the non-LRO path where we are pacing and 16427 * must process the ack coming in but need to defer sending 16428 * anything becase a pacing timer is running. 16429 */ 16430 us_cts = tcp_tv_to_usectick(tv); 16431 if (m->m_flags & M_ACKCMP) { 16432 /* 16433 * All compressed ack's are ack's by definition so 16434 * remove any ack required flag and then do the processing. 16435 */ 16436 rack->rc_ack_required = 0; 16437 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 16438 } 16439 thflags = tcp_get_flags(th); 16440 if ((rack->rc_always_pace == 1) && 16441 (rack->rc_ack_can_sendout_data == 0) && 16442 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16443 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 16444 /* 16445 * Ok conditions are right for queuing the packets 16446 * but we do have to check the flags in the inp, it 16447 * could be, if a sack is present, we want to be awoken and 16448 * so should process the packets. 16449 */ 16450 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 16451 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 16452 no_output = 1; 16453 } else { 16454 /* 16455 * If there is no options, or just a 16456 * timestamp option, we will want to queue 16457 * the packets. This is the same that LRO does 16458 * and will need to change with accurate ECN. 16459 */ 16460 uint32_t *ts_ptr; 16461 int optlen; 16462 16463 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 16464 ts_ptr = (uint32_t *)(th + 1); 16465 if ((optlen == 0) || 16466 ((optlen == TCPOLEN_TSTAMP_APPA) && 16467 (*ts_ptr == TCP_LRO_TS_OPTION))) 16468 no_output = 1; 16469 } 16470 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 16471 /* 16472 * It is unrealistic to think we can pace in less than 16473 * the minimum granularity of the pacer (def:250usec). So 16474 * if we have less than that time remaining we should go 16475 * ahead and allow output to be "early". We will attempt to 16476 * make up for it in any pacing time we try to apply on 16477 * the outbound packet. 16478 */ 16479 no_output = 0; 16480 } 16481 } 16482 /* 16483 * If there is a RST or FIN lets dump out the bw 16484 * with a FIN the connection may go on but we 16485 * may not. 16486 */ 16487 if ((thflags & TH_FIN) || (thflags & TH_RST)) 16488 rack_log_pacing_delay_calc(rack, 16489 rack->r_ctl.gp_bw, 16490 0, 16491 0, 16492 rack_get_gp_est(rack), /* delRate */ 16493 rack_get_lt_bw(rack), /* rttProp */ 16494 20, __LINE__, NULL, 0); 16495 if (m->m_flags & M_ACKCMP) { 16496 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 16497 } 16498 cts = tcp_tv_to_usectick(tv); 16499 ms_cts = tcp_tv_to_mssectick(tv); 16500 nsegs = m->m_pkthdr.lro_nsegs; 16501 counter_u64_add(rack_proc_non_comp_ack, 1); 16502 #ifdef TCP_ACCOUNTING 16503 sched_pin(); 16504 if (thflags & TH_ACK) 16505 ts_val = get_cyclecount(); 16506 #endif 16507 if ((m->m_flags & M_TSTMP) || 16508 (m->m_flags & M_TSTMP_LRO)) { 16509 mbuf_tstmp2timespec(m, &ts); 16510 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16511 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16512 } else 16513 rack->r_ctl.act_rcv_time = *tv; 16514 kern_prefetch(rack, &prev_state); 16515 prev_state = 0; 16516 /* 16517 * Unscale the window into a 32-bit value. For the SYN_SENT state 16518 * the scale is zero. 16519 */ 16520 tiwin = th->th_win << tp->snd_scale; 16521 #ifdef TCP_ACCOUNTING 16522 if (thflags & TH_ACK) { 16523 /* 16524 * We have a tradeoff here. We can either do what we are 16525 * doing i.e. pinning to this CPU and then doing the accounting 16526 * <or> we could do a critical enter, setup the rdtsc and cpu 16527 * as in below, and then validate we are on the same CPU on 16528 * exit. I have choosen to not do the critical enter since 16529 * that often will gain you a context switch, and instead lock 16530 * us (line above this if) to the same CPU with sched_pin(). This 16531 * means we may be context switched out for a higher priority 16532 * interupt but we won't be moved to another CPU. 16533 * 16534 * If this occurs (which it won't very often since we most likely 16535 * are running this code in interupt context and only a higher 16536 * priority will bump us ... clock?) we will falsely add in 16537 * to the time the interupt processing time plus the ack processing 16538 * time. This is ok since its a rare event. 16539 */ 16540 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 16541 ctf_fixed_maxseg(tp)); 16542 } 16543 #endif 16544 /* 16545 * Parse options on any incoming segment. 16546 */ 16547 memset(&to, 0, sizeof(to)); 16548 tcp_dooptions(&to, (u_char *)(th + 1), 16549 (th->th_off << 2) - sizeof(struct tcphdr), 16550 (thflags & TH_SYN) ? TO_SYN : 0); 16551 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 16552 __func__)); 16553 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 16554 __func__)); 16555 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 16556 /* 16557 * We don't look at sack's from the 16558 * peer because the MSS is too small which 16559 * can subject us to an attack. 16560 */ 16561 to.to_flags &= ~TOF_SACK; 16562 } 16563 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16564 (tp->t_flags & TF_GPUTINPROG)) { 16565 /* 16566 * We have a goodput in progress 16567 * and we have entered a late state. 16568 * Do we have enough data in the sb 16569 * to handle the GPUT request? 16570 */ 16571 uint32_t bytes; 16572 16573 bytes = tp->gput_ack - tp->gput_seq; 16574 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16575 bytes += tp->gput_seq - tp->snd_una; 16576 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16577 /* 16578 * There are not enough bytes in the socket 16579 * buffer that have been sent to cover this 16580 * measurement. Cancel it. 16581 */ 16582 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16583 rack->r_ctl.rc_gp_srtt /*flex1*/, 16584 tp->gput_seq, 16585 0, 0, 18, __LINE__, NULL, 0); 16586 tp->t_flags &= ~TF_GPUTINPROG; 16587 } 16588 } 16589 if (tcp_bblogging_on(rack->rc_tp)) { 16590 union tcp_log_stackspecific log; 16591 struct timeval ltv; 16592 #ifdef TCP_REQUEST_TRK 16593 struct tcp_sendfile_track *tcp_req; 16594 16595 if (SEQ_GT(th->th_ack, tp->snd_una)) { 16596 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 16597 } else { 16598 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 16599 } 16600 #endif 16601 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16602 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16603 if (rack->rack_no_prr == 0) 16604 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16605 else 16606 log.u_bbr.flex1 = 0; 16607 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16608 log.u_bbr.use_lt_bw <<= 1; 16609 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16610 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16611 log.u_bbr.bbr_state = rack->rc_free_cnt; 16612 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16613 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 16614 log.u_bbr.flex3 = m->m_flags; 16615 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16616 log.u_bbr.lost = thflags; 16617 log.u_bbr.pacing_gain = 0x1; 16618 #ifdef TCP_ACCOUNTING 16619 log.u_bbr.cwnd_gain = ack_val_set; 16620 #endif 16621 log.u_bbr.flex7 = 2; 16622 if (m->m_flags & M_TSTMP) { 16623 /* Record the hardware timestamp if present */ 16624 mbuf_tstmp2timespec(m, &ts); 16625 ltv.tv_sec = ts.tv_sec; 16626 ltv.tv_usec = ts.tv_nsec / 1000; 16627 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16628 } else if (m->m_flags & M_TSTMP_LRO) { 16629 /* Record the LRO the arrival timestamp */ 16630 mbuf_tstmp2timespec(m, &ts); 16631 ltv.tv_sec = ts.tv_sec; 16632 ltv.tv_usec = ts.tv_nsec / 1000; 16633 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16634 } 16635 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16636 /* Log the rcv time */ 16637 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 16638 #ifdef TCP_REQUEST_TRK 16639 log.u_bbr.applimited = tp->t_tcpreq_closed; 16640 log.u_bbr.applimited <<= 8; 16641 log.u_bbr.applimited |= tp->t_tcpreq_open; 16642 log.u_bbr.applimited <<= 8; 16643 log.u_bbr.applimited |= tp->t_tcpreq_req; 16644 if (tcp_req) { 16645 /* Copy out any client req info */ 16646 /* seconds */ 16647 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16648 /* useconds */ 16649 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16650 log.u_bbr.rttProp = tcp_req->timestamp; 16651 log.u_bbr.cur_del_rate = tcp_req->start; 16652 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16653 log.u_bbr.flex8 |= 1; 16654 } else { 16655 log.u_bbr.flex8 |= 2; 16656 log.u_bbr.bw_inuse = tcp_req->end; 16657 } 16658 log.u_bbr.flex6 = tcp_req->start_seq; 16659 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16660 log.u_bbr.flex8 |= 4; 16661 log.u_bbr.epoch = tcp_req->end_seq; 16662 } 16663 } 16664 #endif 16665 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 16666 tlen, &log, true, <v); 16667 } 16668 /* Remove ack required flag if set, we have one */ 16669 if (thflags & TH_ACK) 16670 rack->rc_ack_required = 0; 16671 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16672 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 16673 way_out = 4; 16674 retval = 0; 16675 m_freem(m); 16676 goto done_with_input; 16677 } 16678 /* 16679 * If a segment with the ACK-bit set arrives in the SYN-SENT state 16680 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 16681 */ 16682 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 16683 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 16684 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 16685 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 16686 #ifdef TCP_ACCOUNTING 16687 sched_unpin(); 16688 #endif 16689 return (1); 16690 } 16691 /* 16692 * If timestamps were negotiated during SYN/ACK and a 16693 * segment without a timestamp is received, silently drop 16694 * the segment, unless it is a RST segment or missing timestamps are 16695 * tolerated. 16696 * See section 3.2 of RFC 7323. 16697 */ 16698 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 16699 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 16700 way_out = 5; 16701 retval = 0; 16702 m_freem(m); 16703 goto done_with_input; 16704 } 16705 /* 16706 * Segment received on connection. Reset idle time and keep-alive 16707 * timer. XXX: This should be done after segment validation to 16708 * ignore broken/spoofed segs. 16709 */ 16710 if (tp->t_idle_reduce && 16711 (tp->snd_max == tp->snd_una) && 16712 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16713 counter_u64_add(rack_input_idle_reduces, 1); 16714 rack_cc_after_idle(rack, tp); 16715 } 16716 tp->t_rcvtime = ticks; 16717 #ifdef STATS 16718 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 16719 #endif 16720 if (tiwin > rack->r_ctl.rc_high_rwnd) 16721 rack->r_ctl.rc_high_rwnd = tiwin; 16722 /* 16723 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 16724 * this to occur after we've validated the segment. 16725 */ 16726 if (tcp_ecn_input_segment(tp, thflags, tlen, 16727 tcp_packets_this_ack(tp, th->th_ack), 16728 iptos)) 16729 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 16730 16731 /* 16732 * If echoed timestamp is later than the current time, fall back to 16733 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16734 * were used when this connection was established. 16735 */ 16736 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 16737 to.to_tsecr -= tp->ts_offset; 16738 if (TSTMP_GT(to.to_tsecr, ms_cts)) 16739 to.to_tsecr = 0; 16740 } 16741 if ((rack->r_rcvpath_rtt_up == 1) && 16742 (to.to_flags & TOF_TS) && 16743 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 16744 uint32_t rtt = 0; 16745 16746 /* 16747 * We are receiving only and thus not sending 16748 * data to do an RTT. We set a flag when we first 16749 * sent this TS to the peer. We now have it back 16750 * and have an RTT to share. We log it as a conf 16751 * 4, we are not so sure about it.. since we 16752 * may have lost an ack. 16753 */ 16754 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 16755 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 16756 rack->r_rcvpath_rtt_up = 0; 16757 /* Submit and commit the timer */ 16758 if (rtt > 0) { 16759 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 16760 tcp_rack_xmit_timer_commit(rack, tp); 16761 } 16762 } 16763 /* 16764 * If its the first time in we need to take care of options and 16765 * verify we can do SACK for rack! 16766 */ 16767 if (rack->r_state == 0) { 16768 /* Should be init'd by rack_init() */ 16769 KASSERT(rack->rc_inp != NULL, 16770 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 16771 if (rack->rc_inp == NULL) { 16772 rack->rc_inp = inp; 16773 } 16774 16775 /* 16776 * Process options only when we get SYN/ACK back. The SYN 16777 * case for incoming connections is handled in tcp_syncache. 16778 * According to RFC1323 the window field in a SYN (i.e., a 16779 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 16780 * this is traditional behavior, may need to be cleaned up. 16781 */ 16782 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 16783 /* Handle parallel SYN for ECN */ 16784 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 16785 if ((to.to_flags & TOF_SCALE) && 16786 (tp->t_flags & TF_REQ_SCALE)) { 16787 tp->t_flags |= TF_RCVD_SCALE; 16788 tp->snd_scale = to.to_wscale; 16789 } else 16790 tp->t_flags &= ~TF_REQ_SCALE; 16791 /* 16792 * Initial send window. It will be updated with the 16793 * next incoming segment to the scaled value. 16794 */ 16795 tp->snd_wnd = th->th_win; 16796 rack_validate_fo_sendwin_up(tp, rack); 16797 if ((to.to_flags & TOF_TS) && 16798 (tp->t_flags & TF_REQ_TSTMP)) { 16799 tp->t_flags |= TF_RCVD_TSTMP; 16800 tp->ts_recent = to.to_tsval; 16801 tp->ts_recent_age = cts; 16802 } else 16803 tp->t_flags &= ~TF_REQ_TSTMP; 16804 if (to.to_flags & TOF_MSS) { 16805 tcp_mss(tp, to.to_mss); 16806 } 16807 if ((tp->t_flags & TF_SACK_PERMIT) && 16808 (to.to_flags & TOF_SACKPERM) == 0) 16809 tp->t_flags &= ~TF_SACK_PERMIT; 16810 if (tp->t_flags & TF_FASTOPEN) { 16811 if (to.to_flags & TOF_FASTOPEN) { 16812 uint16_t mss; 16813 16814 if (to.to_flags & TOF_MSS) 16815 mss = to.to_mss; 16816 else 16817 if ((inp->inp_vflag & INP_IPV6) != 0) 16818 mss = TCP6_MSS; 16819 else 16820 mss = TCP_MSS; 16821 tcp_fastopen_update_cache(tp, mss, 16822 to.to_tfo_len, to.to_tfo_cookie); 16823 } else 16824 tcp_fastopen_disable_path(tp); 16825 } 16826 } 16827 /* 16828 * At this point we are at the initial call. Here we decide 16829 * if we are doing RACK or not. We do this by seeing if 16830 * TF_SACK_PERMIT is set and the sack-not-required is clear. 16831 * The code now does do dup-ack counting so if you don't 16832 * switch back you won't get rack & TLP, but you will still 16833 * get this stack. 16834 */ 16835 16836 if ((rack_sack_not_required == 0) && 16837 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 16838 tcp_switch_back_to_default(tp); 16839 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 16840 tlen, iptos); 16841 #ifdef TCP_ACCOUNTING 16842 sched_unpin(); 16843 #endif 16844 return (1); 16845 } 16846 tcp_set_hpts(tp); 16847 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 16848 } 16849 if (thflags & TH_FIN) 16850 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 16851 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16852 if ((rack->rc_gp_dyn_mul) && 16853 (rack->use_fixed_rate == 0) && 16854 (rack->rc_always_pace)) { 16855 /* Check in on probertt */ 16856 rack_check_probe_rtt(rack, cts); 16857 } 16858 rack_clear_rate_sample(rack); 16859 if ((rack->forced_ack) && 16860 ((tcp_get_flags(th) & TH_RST) == 0)) { 16861 rack_handle_probe_response(rack, tiwin, us_cts); 16862 } 16863 /* 16864 * This is the one exception case where we set the rack state 16865 * always. All other times (timers etc) we must have a rack-state 16866 * set (so we assure we have done the checks above for SACK). 16867 */ 16868 rack->r_ctl.rc_rcvtime = cts; 16869 if (rack->r_state != tp->t_state) 16870 rack_set_state(tp, rack); 16871 if (SEQ_GT(th->th_ack, tp->snd_una) && 16872 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 16873 kern_prefetch(rsm, &prev_state); 16874 prev_state = rack->r_state; 16875 if ((thflags & TH_RST) && 16876 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 16877 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 16878 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 16879 /* The connection will be killed by a reset check the tracepoint */ 16880 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 16881 } 16882 retval = (*rack->r_substate) (m, th, so, 16883 tp, &to, drop_hdrlen, 16884 tlen, tiwin, thflags, nxt_pkt, iptos); 16885 if (retval == 0) { 16886 /* 16887 * If retval is 1 the tcb is unlocked and most likely the tp 16888 * is gone. 16889 */ 16890 INP_WLOCK_ASSERT(inp); 16891 if ((rack->rc_gp_dyn_mul) && 16892 (rack->rc_always_pace) && 16893 (rack->use_fixed_rate == 0) && 16894 rack->in_probe_rtt && 16895 (rack->r_ctl.rc_time_probertt_starts == 0)) { 16896 /* 16897 * If we are going for target, lets recheck before 16898 * we output. 16899 */ 16900 rack_check_probe_rtt(rack, cts); 16901 } 16902 if (rack->set_pacing_done_a_iw == 0) { 16903 /* How much has been acked? */ 16904 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 16905 /* We have enough to set in the pacing segment size */ 16906 rack->set_pacing_done_a_iw = 1; 16907 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16908 } 16909 } 16910 tcp_rack_xmit_timer_commit(rack, tp); 16911 #ifdef TCP_ACCOUNTING 16912 /* 16913 * If we set the ack_val_se to what ack processing we are doing 16914 * we also want to track how many cycles we burned. Note 16915 * the bits after tcp_output we let be "free". This is because 16916 * we are also tracking the tcp_output times as well. Note the 16917 * use of 0xf here since we only have 11 counter (0 - 0xa) and 16918 * 0xf cannot be returned and is what we initialize it too to 16919 * indicate we are not doing the tabulations. 16920 */ 16921 if (ack_val_set != 0xf) { 16922 uint64_t crtsc; 16923 16924 crtsc = get_cyclecount(); 16925 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16926 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 16927 } 16928 } 16929 #endif 16930 if ((nxt_pkt == 0) && (no_output == 0)) { 16931 if ((rack->r_wanted_output != 0) || 16932 (tp->t_flags & TF_ACKNOW) || 16933 (rack->r_fast_output != 0)) { 16934 16935 do_output_now: 16936 if (tcp_output(tp) < 0) { 16937 #ifdef TCP_ACCOUNTING 16938 sched_unpin(); 16939 #endif 16940 return (1); 16941 } 16942 did_out = 1; 16943 } 16944 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16945 rack_free_trim(rack); 16946 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 16947 goto do_output_now; 16948 } else if ((no_output == 1) && 16949 (nxt_pkt == 0) && 16950 (tcp_in_hpts(rack->rc_tp) == 0)) { 16951 /* 16952 * We are not in hpts and we had a pacing timer up. Use 16953 * the remaining time (slot_remaining) to restart the timer. 16954 */ 16955 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 16956 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 16957 rack_free_trim(rack); 16958 } 16959 /* Clear the flag, it may have been cleared by output but we may not have */ 16960 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 16961 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16962 /* 16963 * The draft (v3) calls for us to use SEQ_GEQ, but that 16964 * causes issues when we are just going app limited. Lets 16965 * instead use SEQ_GT <or> where its equal but more data 16966 * is outstanding. 16967 * 16968 * Also make sure we are on the last ack of a series. We 16969 * have to have all the ack's processed in queue to know 16970 * if there is something left outstanding. 16971 */ 16972 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 16973 (rack->rc_new_rnd_needed == 0) && 16974 (nxt_pkt == 0)) { 16975 /* 16976 * We have crossed into a new round with 16977 * the new snd_unae. 16978 */ 16979 rack_new_round_setup(tp, rack, tp->snd_una); 16980 } 16981 if ((nxt_pkt == 0) && 16982 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 16983 (SEQ_GT(tp->snd_max, tp->snd_una) || 16984 (tp->t_flags & TF_DELACK) || 16985 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16986 (tp->t_state <= TCPS_CLOSING)))) { 16987 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 16988 if ((tp->snd_max == tp->snd_una) && 16989 ((tp->t_flags & TF_DELACK) == 0) && 16990 (tcp_in_hpts(rack->rc_tp)) && 16991 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 16992 /* keep alive not needed if we are hptsi output yet */ 16993 ; 16994 } else { 16995 int late = 0; 16996 if (tcp_in_hpts(tp)) { 16997 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16998 us_cts = tcp_get_usecs(NULL); 16999 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 17000 rack->r_early = 1; 17001 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 17002 } else 17003 late = 1; 17004 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 17005 } 17006 tcp_hpts_remove(tp); 17007 } 17008 if (late && (did_out == 0)) { 17009 /* 17010 * We are late in the sending 17011 * and we did not call the output 17012 * (this probably should not happen). 17013 */ 17014 goto do_output_now; 17015 } 17016 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 17017 } 17018 way_out = 1; 17019 } else if (nxt_pkt == 0) { 17020 /* Do we have the correct timer running? */ 17021 rack_timer_audit(tp, rack, &so->so_snd); 17022 way_out = 2; 17023 } 17024 done_with_input: 17025 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 17026 if (did_out) 17027 rack->r_wanted_output = 0; 17028 } 17029 17030 #ifdef TCP_ACCOUNTING 17031 sched_unpin(); 17032 #endif 17033 return (retval); 17034 } 17035 17036 static void 17037 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17038 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17039 { 17040 struct timeval tv; 17041 17042 /* First lets see if we have old packets */ 17043 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17044 if (ctf_do_queued_segments(tp, 1)) { 17045 m_freem(m); 17046 return; 17047 } 17048 } 17049 if (m->m_flags & M_TSTMP_LRO) { 17050 mbuf_tstmp2timeval(m, &tv); 17051 } else { 17052 /* Should not be should we kassert instead? */ 17053 tcp_get_usecs(&tv); 17054 } 17055 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17056 &tv) == 0) { 17057 INP_WUNLOCK(tptoinpcb(tp)); 17058 } 17059 } 17060 17061 struct rack_sendmap * 17062 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17063 { 17064 struct rack_sendmap *rsm = NULL; 17065 int32_t idx; 17066 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17067 17068 /* Return the next guy to be re-transmitted */ 17069 if (tqhash_empty(rack->r_ctl.tqh)) { 17070 return (NULL); 17071 } 17072 if (tp->t_flags & TF_SENTFIN) { 17073 /* retran the end FIN? */ 17074 return (NULL); 17075 } 17076 /* ok lets look at this one */ 17077 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17078 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17079 return (rsm); 17080 } 17081 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17082 goto check_it; 17083 } 17084 rsm = rack_find_lowest_rsm(rack); 17085 if (rsm == NULL) { 17086 return (NULL); 17087 } 17088 check_it: 17089 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 17090 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17091 /* 17092 * No sack so we automatically do the 3 strikes and 17093 * retransmit (no rack timer would be started). 17094 */ 17095 return (rsm); 17096 } 17097 if (rsm->r_flags & RACK_ACKED) { 17098 return (NULL); 17099 } 17100 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17101 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17102 /* Its not yet ready */ 17103 return (NULL); 17104 } 17105 srtt = rack_grab_rtt(tp, rack); 17106 idx = rsm->r_rtr_cnt - 1; 17107 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17108 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 17109 if ((tsused == ts_low) || 17110 (TSTMP_LT(tsused, ts_low))) { 17111 /* No time since sending */ 17112 return (NULL); 17113 } 17114 if ((tsused - ts_low) < thresh) { 17115 /* It has not been long enough yet */ 17116 return (NULL); 17117 } 17118 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17119 ((rsm->r_flags & RACK_SACK_PASSED))) { 17120 /* 17121 * We have passed the dup-ack threshold <or> 17122 * a SACK has indicated this is missing. 17123 * Note that if you are a declared attacker 17124 * it is only the dup-ack threshold that 17125 * will cause retransmits. 17126 */ 17127 /* log retransmit reason */ 17128 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17129 rack->r_fast_output = 0; 17130 return (rsm); 17131 } 17132 return (NULL); 17133 } 17134 17135 static void 17136 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, 17137 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17138 int line, struct rack_sendmap *rsm, uint8_t quality) 17139 { 17140 if (tcp_bblogging_on(rack->rc_tp)) { 17141 union tcp_log_stackspecific log; 17142 struct timeval tv; 17143 17144 if (rack_verbose_logging == 0) { 17145 /* 17146 * We are not verbose screen out all but 17147 * ones we always want. 17148 */ 17149 if ((method != 2) && 17150 (method != 3) && 17151 (method != 7) && 17152 (method != 89) && 17153 (method != 14) && 17154 (method != 20)) { 17155 return; 17156 } 17157 } 17158 memset(&log, 0, sizeof(log)); 17159 log.u_bbr.flex1 = slot; 17160 log.u_bbr.flex2 = len; 17161 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17162 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17163 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17164 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17165 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17166 log.u_bbr.use_lt_bw <<= 1; 17167 log.u_bbr.use_lt_bw |= rack->r_late; 17168 log.u_bbr.use_lt_bw <<= 1; 17169 log.u_bbr.use_lt_bw |= rack->r_early; 17170 log.u_bbr.use_lt_bw <<= 1; 17171 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17172 log.u_bbr.use_lt_bw <<= 1; 17173 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17174 log.u_bbr.use_lt_bw <<= 1; 17175 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17176 log.u_bbr.use_lt_bw <<= 1; 17177 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17178 log.u_bbr.use_lt_bw <<= 1; 17179 log.u_bbr.use_lt_bw |= rack->gp_ready; 17180 log.u_bbr.pkt_epoch = line; 17181 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17182 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17183 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17184 log.u_bbr.bw_inuse = bw_est; 17185 log.u_bbr.delRate = bw; 17186 if (rack->r_ctl.gp_bw == 0) 17187 log.u_bbr.cur_del_rate = 0; 17188 else 17189 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17190 log.u_bbr.rttProp = len_time; 17191 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17192 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17193 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17194 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17195 /* We are in slow start */ 17196 log.u_bbr.flex7 = 1; 17197 } else { 17198 /* we are on congestion avoidance */ 17199 log.u_bbr.flex7 = 0; 17200 } 17201 log.u_bbr.flex8 = method; 17202 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17203 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17204 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17205 log.u_bbr.cwnd_gain <<= 1; 17206 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17207 log.u_bbr.cwnd_gain <<= 1; 17208 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17209 log.u_bbr.bbr_substate = quality; 17210 log.u_bbr.bbr_state = rack->dgp_on; 17211 log.u_bbr.bbr_state <<= 1; 17212 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17213 log.u_bbr.bbr_state <<= 2; 17214 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17215 &rack->rc_inp->inp_socket->so_rcv, 17216 &rack->rc_inp->inp_socket->so_snd, 17217 BBR_LOG_HPTSI_CALC, 0, 17218 0, &log, false, &tv); 17219 } 17220 } 17221 17222 static uint32_t 17223 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17224 { 17225 uint32_t new_tso, user_max, pace_one; 17226 17227 user_max = rack->rc_user_set_max_segs * mss; 17228 if (rack->rc_force_max_seg) { 17229 return (user_max); 17230 } 17231 if (rack->use_fixed_rate && 17232 ((rack->r_ctl.crte == NULL) || 17233 (bw != rack->r_ctl.crte->rate))) { 17234 /* Use the user mss since we are not exactly matched */ 17235 return (user_max); 17236 } 17237 if (rack_pace_one_seg || 17238 (rack->r_ctl.rc_user_set_min_segs == 1)) 17239 pace_one = 1; 17240 else 17241 pace_one = 0; 17242 17243 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17244 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17245 if (new_tso > user_max) 17246 new_tso = user_max; 17247 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17248 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17249 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17250 } 17251 if (rack->r_ctl.rc_user_set_min_segs && 17252 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17253 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17254 return (new_tso); 17255 } 17256 17257 static uint64_t 17258 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17259 { 17260 uint64_t reduced_win; 17261 uint32_t gain; 17262 17263 if (window_input < rc_init_window(rack)) { 17264 /* 17265 * The cwnd is collapsed to 17266 * nearly zero, maybe because of a time-out? 17267 * Lets drop back to the lt-bw. 17268 */ 17269 reduced_win = rack_get_lt_bw(rack); 17270 /* Set the flag so the caller knows its a rate and not a reduced window */ 17271 *rate_set = 1; 17272 gain = 100; 17273 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 17274 /* 17275 * If we are in recover our cwnd needs to be less for 17276 * our pacing consideration. 17277 */ 17278 if (rack->rack_hibeta == 0) { 17279 reduced_win = window_input / 2; 17280 gain = 50; 17281 } else { 17282 reduced_win = window_input * rack->r_ctl.saved_hibeta; 17283 reduced_win /= 100; 17284 gain = rack->r_ctl.saved_hibeta; 17285 } 17286 } else { 17287 /* 17288 * Apply Timely factor to increase/decrease the 17289 * amount we are pacing at. 17290 */ 17291 gain = rack_get_output_gain(rack, NULL); 17292 if (gain > rack_gain_p5_ub) { 17293 gain = rack_gain_p5_ub; 17294 } 17295 reduced_win = window_input * gain; 17296 reduced_win /= 100; 17297 } 17298 if (gain_b != NULL) 17299 *gain_b = gain; 17300 /* 17301 * What is being returned here is a trimmed down 17302 * window values in all cases where rate_set is left 17303 * at 0. In one case we actually return the rate (lt_bw). 17304 * the "reduced_win" is returned as a slimmed down cwnd that 17305 * is then calculated by the caller into a rate when rate_set 17306 * is 0. 17307 */ 17308 return (reduced_win); 17309 } 17310 17311 static int32_t 17312 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 17313 { 17314 uint64_t lentim, fill_bw; 17315 17316 rack->r_via_fill_cw = 0; 17317 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 17318 return (slot); 17319 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 17320 return (slot); 17321 if (rack->r_ctl.rc_last_us_rtt == 0) 17322 return (slot); 17323 if (rack->rc_pace_fill_if_rttin_range && 17324 (rack->r_ctl.rc_last_us_rtt >= 17325 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 17326 /* The rtt is huge, N * smallest, lets not fill */ 17327 return (slot); 17328 } 17329 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 17330 return (slot); 17331 /* 17332 * first lets calculate the b/w based on the last us-rtt 17333 * and the the smallest send window. 17334 */ 17335 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17336 if (rack->rc_fillcw_apply_discount) { 17337 uint32_t rate_set = 0; 17338 17339 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 17340 if (rate_set) { 17341 goto at_lt_bw; 17342 } 17343 } 17344 /* Take the rwnd if its smaller */ 17345 if (fill_bw > rack->rc_tp->snd_wnd) 17346 fill_bw = rack->rc_tp->snd_wnd; 17347 /* Now lets make it into a b/w */ 17348 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 17349 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17350 /* Adjust to any cap */ 17351 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 17352 fill_bw = rack->r_ctl.fillcw_cap; 17353 17354 at_lt_bw: 17355 if (rack_bw_multipler > 0) { 17356 /* 17357 * We want to limit fill-cw to the some multiplier 17358 * of the max(lt_bw, gp_est). The normal default 17359 * is 0 for off, so a sysctl has enabled it. 17360 */ 17361 uint64_t lt_bw, gp, rate; 17362 17363 gp = rack_get_gp_est(rack); 17364 lt_bw = rack_get_lt_bw(rack); 17365 if (lt_bw > gp) 17366 rate = lt_bw; 17367 else 17368 rate = gp; 17369 rate *= rack_bw_multipler; 17370 rate /= 100; 17371 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 17372 union tcp_log_stackspecific log; 17373 struct timeval tv; 17374 17375 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 17376 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17377 log.u_bbr.flex1 = rack_bw_multipler; 17378 log.u_bbr.flex2 = len; 17379 log.u_bbr.cur_del_rate = gp; 17380 log.u_bbr.delRate = lt_bw; 17381 log.u_bbr.bw_inuse = rate; 17382 log.u_bbr.rttProp = fill_bw; 17383 log.u_bbr.flex8 = 44; 17384 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 17385 BBR_LOG_CWND, 0, 17386 0, &log, false, NULL, 17387 __func__, __LINE__, &tv); 17388 } 17389 if (fill_bw > rate) 17390 fill_bw = rate; 17391 } 17392 /* We are below the min b/w */ 17393 if (non_paced) 17394 *rate_wanted = fill_bw; 17395 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 17396 return (slot); 17397 rack->r_via_fill_cw = 1; 17398 if (rack->r_rack_hw_rate_caps && 17399 (rack->r_ctl.crte != NULL)) { 17400 uint64_t high_rate; 17401 17402 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 17403 if (fill_bw > high_rate) { 17404 /* We are capping bw at the highest rate table entry */ 17405 if (*rate_wanted > high_rate) { 17406 /* The original rate was also capped */ 17407 rack->r_via_fill_cw = 0; 17408 } 17409 rack_log_hdwr_pacing(rack, 17410 fill_bw, high_rate, __LINE__, 17411 0, 3); 17412 fill_bw = high_rate; 17413 if (capped) 17414 *capped = 1; 17415 } 17416 } else if ((rack->r_ctl.crte == NULL) && 17417 (rack->rack_hdrw_pacing == 0) && 17418 (rack->rack_hdw_pace_ena) && 17419 rack->r_rack_hw_rate_caps && 17420 (rack->rack_attempt_hdwr_pace == 0) && 17421 (rack->rc_inp->inp_route.ro_nh != NULL) && 17422 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17423 /* 17424 * Ok we may have a first attempt that is greater than our top rate 17425 * lets check. 17426 */ 17427 uint64_t high_rate; 17428 17429 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 17430 if (high_rate) { 17431 if (fill_bw > high_rate) { 17432 fill_bw = high_rate; 17433 if (capped) 17434 *capped = 1; 17435 } 17436 } 17437 } 17438 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 17439 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 17440 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 17441 fill_bw = rack->r_ctl.bw_rate_cap; 17442 } 17443 /* 17444 * Ok fill_bw holds our mythical b/w to fill the cwnd 17445 * in an rtt (unless it was capped), what does that 17446 * time wise equate too? 17447 */ 17448 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 17449 lentim /= fill_bw; 17450 *rate_wanted = fill_bw; 17451 if (non_paced || (lentim < slot)) { 17452 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 17453 0, lentim, 12, __LINE__, NULL, 0); 17454 return ((int32_t)lentim); 17455 } else 17456 return (slot); 17457 } 17458 17459 static int32_t 17460 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 17461 { 17462 uint64_t srtt; 17463 int32_t slot = 0; 17464 int32_t minslot = 0; 17465 int can_start_hw_pacing = 1; 17466 int err; 17467 int pace_one; 17468 17469 if (rack_pace_one_seg || 17470 (rack->r_ctl.rc_user_set_min_segs == 1)) 17471 pace_one = 1; 17472 else 17473 pace_one = 0; 17474 if (rack->rc_always_pace == 0) { 17475 /* 17476 * We use the most optimistic possible cwnd/srtt for 17477 * sending calculations. This will make our 17478 * calculation anticipate getting more through 17479 * quicker then possible. But thats ok we don't want 17480 * the peer to have a gap in data sending. 17481 */ 17482 uint64_t cwnd, tr_perms = 0; 17483 int32_t reduce = 0; 17484 17485 old_method: 17486 /* 17487 * We keep no precise pacing with the old method 17488 * instead we use the pacer to mitigate bursts. 17489 */ 17490 if (rack->r_ctl.rc_rack_min_rtt) 17491 srtt = rack->r_ctl.rc_rack_min_rtt; 17492 else 17493 srtt = max(tp->t_srtt, 1); 17494 if (rack->r_ctl.rc_rack_largest_cwnd) 17495 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 17496 else 17497 cwnd = rack->r_ctl.cwnd_to_use; 17498 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 17499 tr_perms = (cwnd * 1000) / srtt; 17500 if (tr_perms == 0) { 17501 tr_perms = ctf_fixed_maxseg(tp); 17502 } 17503 /* 17504 * Calculate how long this will take to drain, if 17505 * the calculation comes out to zero, thats ok we 17506 * will use send_a_lot to possibly spin around for 17507 * more increasing tot_len_this_send to the point 17508 * that its going to require a pace, or we hit the 17509 * cwnd. Which in that case we are just waiting for 17510 * a ACK. 17511 */ 17512 slot = len / tr_perms; 17513 /* Now do we reduce the time so we don't run dry? */ 17514 if (slot && rack_slot_reduction) { 17515 reduce = (slot / rack_slot_reduction); 17516 if (reduce < slot) { 17517 slot -= reduce; 17518 } else 17519 slot = 0; 17520 } 17521 slot *= HPTS_USEC_IN_MSEC; 17522 if (rack->rc_pace_to_cwnd) { 17523 uint64_t rate_wanted = 0; 17524 17525 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 17526 rack->rc_ack_can_sendout_data = 1; 17527 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 17528 } else 17529 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 17530 /*******************************************************/ 17531 /* RRS: We insert non-paced call to stats here for len */ 17532 /*******************************************************/ 17533 } else { 17534 uint64_t bw_est, res, lentim, rate_wanted; 17535 uint32_t segs, oh; 17536 int capped = 0; 17537 int prev_fill; 17538 17539 if ((rack->r_rr_config == 1) && rsm) { 17540 return (rack->r_ctl.rc_min_to); 17541 } 17542 if (rack->use_fixed_rate) { 17543 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 17544 } else if ((rack->r_ctl.init_rate == 0) && 17545 (rack->r_ctl.gp_bw == 0)) { 17546 /* no way to yet do an estimate */ 17547 bw_est = rate_wanted = 0; 17548 } else if (rack->dgp_on) { 17549 bw_est = rack_get_bw(rack); 17550 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 17551 } else { 17552 uint32_t gain, rate_set = 0; 17553 17554 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17555 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 17556 if (rate_set == 0) { 17557 if (rate_wanted > rack->rc_tp->snd_wnd) 17558 rate_wanted = rack->rc_tp->snd_wnd; 17559 /* Now lets make it into a b/w */ 17560 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 17561 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17562 } 17563 bw_est = rate_wanted; 17564 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 17565 rack->r_ctl.cwnd_to_use, 17566 rate_wanted, bw_est, 17567 rack->r_ctl.rc_last_us_rtt, 17568 88, __LINE__, NULL, gain); 17569 } 17570 if ((bw_est == 0) || (rate_wanted == 0) || 17571 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 17572 /* 17573 * No way yet to make a b/w estimate or 17574 * our raise is set incorrectly. 17575 */ 17576 goto old_method; 17577 } 17578 rack_rate_cap_bw(rack, &rate_wanted, &capped); 17579 /* We need to account for all the overheads */ 17580 segs = (len + segsiz - 1) / segsiz; 17581 /* 17582 * We need the diff between 1514 bytes (e-mtu with e-hdr) 17583 * and how much data we put in each packet. Yes this 17584 * means we may be off if we are larger than 1500 bytes 17585 * or smaller. But this just makes us more conservative. 17586 */ 17587 17588 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 17589 if (rack->r_is_v6) { 17590 #ifdef INET6 17591 oh += sizeof(struct ip6_hdr); 17592 #endif 17593 } else { 17594 #ifdef INET 17595 oh += sizeof(struct ip); 17596 #endif 17597 } 17598 /* We add a fixed 14 for the ethernet header */ 17599 oh += 14; 17600 segs *= oh; 17601 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 17602 res = lentim / rate_wanted; 17603 slot = (uint32_t)res; 17604 if (rack_hw_rate_min && 17605 (rate_wanted < rack_hw_rate_min)) { 17606 can_start_hw_pacing = 0; 17607 if (rack->r_ctl.crte) { 17608 /* 17609 * Ok we need to release it, we 17610 * have fallen too low. 17611 */ 17612 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17613 rack->r_ctl.crte = NULL; 17614 rack->rack_attempt_hdwr_pace = 0; 17615 rack->rack_hdrw_pacing = 0; 17616 } 17617 } 17618 if (rack->r_ctl.crte && 17619 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17620 /* 17621 * We want more than the hardware can give us, 17622 * don't start any hw pacing. 17623 */ 17624 can_start_hw_pacing = 0; 17625 if (rack->r_rack_hw_rate_caps == 0) { 17626 /* 17627 * Ok we need to release it, we 17628 * want more than the card can give us and 17629 * no rate cap is in place. Set it up so 17630 * when we want less we can retry. 17631 */ 17632 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17633 rack->r_ctl.crte = NULL; 17634 rack->rack_attempt_hdwr_pace = 0; 17635 rack->rack_hdrw_pacing = 0; 17636 } 17637 } 17638 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 17639 /* 17640 * We lost our rate somehow, this can happen 17641 * if the interface changed underneath us. 17642 */ 17643 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17644 rack->r_ctl.crte = NULL; 17645 /* Lets re-allow attempting to setup pacing */ 17646 rack->rack_hdrw_pacing = 0; 17647 rack->rack_attempt_hdwr_pace = 0; 17648 rack_log_hdwr_pacing(rack, 17649 rate_wanted, bw_est, __LINE__, 17650 0, 6); 17651 } 17652 prev_fill = rack->r_via_fill_cw; 17653 if ((rack->rc_pace_to_cwnd) && 17654 (capped == 0) && 17655 (rack->dgp_on == 1) && 17656 (rack->use_fixed_rate == 0) && 17657 (rack->in_probe_rtt == 0) && 17658 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 17659 /* 17660 * We want to pace at our rate *or* faster to 17661 * fill the cwnd to the max if its not full. 17662 */ 17663 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 17664 /* Re-check to make sure we are not exceeding our max b/w */ 17665 if ((rack->r_ctl.crte != NULL) && 17666 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17667 /* 17668 * We want more than the hardware can give us, 17669 * don't start any hw pacing. 17670 */ 17671 can_start_hw_pacing = 0; 17672 if (rack->r_rack_hw_rate_caps == 0) { 17673 /* 17674 * Ok we need to release it, we 17675 * want more than the card can give us and 17676 * no rate cap is in place. Set it up so 17677 * when we want less we can retry. 17678 */ 17679 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17680 rack->r_ctl.crte = NULL; 17681 rack->rack_attempt_hdwr_pace = 0; 17682 rack->rack_hdrw_pacing = 0; 17683 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 17684 } 17685 } 17686 } 17687 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 17688 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17689 if ((rack->rack_hdw_pace_ena) && 17690 (can_start_hw_pacing > 0) && 17691 (rack->rack_hdrw_pacing == 0) && 17692 (rack->rack_attempt_hdwr_pace == 0)) { 17693 /* 17694 * Lets attempt to turn on hardware pacing 17695 * if we can. 17696 */ 17697 rack->rack_attempt_hdwr_pace = 1; 17698 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 17699 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17700 rate_wanted, 17701 RS_PACING_GEQ, 17702 &err, &rack->r_ctl.crte_prev_rate); 17703 if (rack->r_ctl.crte) { 17704 rack->rack_hdrw_pacing = 1; 17705 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 17706 pace_one, rack->r_ctl.crte, 17707 NULL, rack->r_ctl.pace_len_divisor); 17708 rack_log_hdwr_pacing(rack, 17709 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17710 err, 0); 17711 rack->r_ctl.last_hw_bw_req = rate_wanted; 17712 } else { 17713 counter_u64_add(rack_hw_pace_init_fail, 1); 17714 } 17715 } else if (rack->rack_hdrw_pacing && 17716 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 17717 /* Do we need to adjust our rate? */ 17718 const struct tcp_hwrate_limit_table *nrte; 17719 17720 if (rack->r_up_only && 17721 (rate_wanted < rack->r_ctl.crte->rate)) { 17722 /** 17723 * We have four possible states here 17724 * having to do with the previous time 17725 * and this time. 17726 * previous | this-time 17727 * A) 0 | 0 -- fill_cw not in the picture 17728 * B) 1 | 0 -- we were doing a fill-cw but now are not 17729 * C) 1 | 1 -- all rates from fill_cw 17730 * D) 0 | 1 -- we were doing non-fill and now we are filling 17731 * 17732 * For case A, C and D we don't allow a drop. But for 17733 * case B where we now our on our steady rate we do 17734 * allow a drop. 17735 * 17736 */ 17737 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 17738 goto done_w_hdwr; 17739 } 17740 if ((rate_wanted > rack->r_ctl.crte->rate) || 17741 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 17742 if (rack_hw_rate_to_low && 17743 (bw_est < rack_hw_rate_to_low)) { 17744 /* 17745 * The pacing rate is too low for hardware, but 17746 * do allow hardware pacing to be restarted. 17747 */ 17748 rack_log_hdwr_pacing(rack, 17749 bw_est, rack->r_ctl.crte->rate, __LINE__, 17750 0, 5); 17751 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17752 rack->r_ctl.crte = NULL; 17753 rack->rack_attempt_hdwr_pace = 0; 17754 rack->rack_hdrw_pacing = 0; 17755 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17756 goto done_w_hdwr; 17757 } 17758 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 17759 rack->rc_tp, 17760 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17761 rate_wanted, 17762 RS_PACING_GEQ, 17763 &err, &rack->r_ctl.crte_prev_rate); 17764 if (nrte == NULL) { 17765 /* 17766 * Lost the rate, lets drop hardware pacing 17767 * period. 17768 */ 17769 rack->rack_hdrw_pacing = 0; 17770 rack->r_ctl.crte = NULL; 17771 rack_log_hdwr_pacing(rack, 17772 rate_wanted, 0, __LINE__, 17773 err, 1); 17774 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17775 counter_u64_add(rack_hw_pace_lost, 1); 17776 } else if (nrte != rack->r_ctl.crte) { 17777 rack->r_ctl.crte = nrte; 17778 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 17779 segsiz, pace_one, rack->r_ctl.crte, 17780 NULL, rack->r_ctl.pace_len_divisor); 17781 rack_log_hdwr_pacing(rack, 17782 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17783 err, 2); 17784 rack->r_ctl.last_hw_bw_req = rate_wanted; 17785 } 17786 } else { 17787 /* We just need to adjust the segment size */ 17788 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17789 rack_log_hdwr_pacing(rack, 17790 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17791 0, 4); 17792 rack->r_ctl.last_hw_bw_req = rate_wanted; 17793 } 17794 } 17795 } 17796 if (minslot && (minslot > slot)) { 17797 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, 17798 98, __LINE__, NULL, 0); 17799 slot = minslot; 17800 } 17801 done_w_hdwr: 17802 if (rack_limit_time_with_srtt && 17803 (rack->use_fixed_rate == 0) && 17804 (rack->rack_hdrw_pacing == 0)) { 17805 /* 17806 * Sanity check, we do not allow the pacing delay 17807 * to be longer than the SRTT of the path. If it is 17808 * a slow path, then adding a packet should increase 17809 * the RTT and compensate for this i.e. the srtt will 17810 * be greater so the allowed pacing time will be greater. 17811 * 17812 * Note this restriction is not for where a peak rate 17813 * is set, we are doing fixed pacing or hardware pacing. 17814 */ 17815 if (rack->rc_tp->t_srtt) 17816 srtt = rack->rc_tp->t_srtt; 17817 else 17818 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 17819 if (srtt < (uint64_t)slot) { 17820 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 17821 slot = srtt; 17822 } 17823 } 17824 /*******************************************************************/ 17825 /* RRS: We insert paced call to stats here for len and rate_wanted */ 17826 /*******************************************************************/ 17827 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 17828 } 17829 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 17830 /* 17831 * If this rate is seeing enobufs when it 17832 * goes to send then either the nic is out 17833 * of gas or we are mis-estimating the time 17834 * somehow and not letting the queue empty 17835 * completely. Lets add to the pacing time. 17836 */ 17837 int hw_boost_delay; 17838 17839 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 17840 if (hw_boost_delay > rack_enobuf_hw_max) 17841 hw_boost_delay = rack_enobuf_hw_max; 17842 else if (hw_boost_delay < rack_enobuf_hw_min) 17843 hw_boost_delay = rack_enobuf_hw_min; 17844 slot += hw_boost_delay; 17845 } 17846 return (slot); 17847 } 17848 17849 static void 17850 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 17851 tcp_seq startseq, uint32_t sb_offset) 17852 { 17853 struct rack_sendmap *my_rsm = NULL; 17854 17855 if (tp->t_state < TCPS_ESTABLISHED) { 17856 /* 17857 * We don't start any measurements if we are 17858 * not at least established. 17859 */ 17860 return; 17861 } 17862 if (tp->t_state >= TCPS_FIN_WAIT_1) { 17863 /* 17864 * We will get no more data into the SB 17865 * this means we need to have the data available 17866 * before we start a measurement. 17867 */ 17868 17869 if (sbavail(&tptosocket(tp)->so_snd) < 17870 max(rc_init_window(rack), 17871 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 17872 /* Nope not enough data */ 17873 return; 17874 } 17875 } 17876 tp->t_flags |= TF_GPUTINPROG; 17877 rack->r_ctl.rc_gp_cumack_ts = 0; 17878 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 17879 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 17880 tp->gput_seq = startseq; 17881 rack->app_limited_needs_set = 0; 17882 if (rack->in_probe_rtt) 17883 rack->measure_saw_probe_rtt = 1; 17884 else if ((rack->measure_saw_probe_rtt) && 17885 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 17886 rack->measure_saw_probe_rtt = 0; 17887 if (rack->rc_gp_filled) 17888 tp->gput_ts = rack->r_ctl.last_cumack_advance; 17889 else { 17890 /* Special case initial measurement */ 17891 struct timeval tv; 17892 17893 tp->gput_ts = tcp_get_usecs(&tv); 17894 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 17895 } 17896 /* 17897 * We take a guess out into the future, 17898 * if we have no measurement and no 17899 * initial rate, we measure the first 17900 * initial-windows worth of data to 17901 * speed up getting some GP measurement and 17902 * thus start pacing. 17903 */ 17904 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 17905 rack->app_limited_needs_set = 1; 17906 tp->gput_ack = startseq + max(rc_init_window(rack), 17907 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 17908 rack_log_pacing_delay_calc(rack, 17909 tp->gput_seq, 17910 tp->gput_ack, 17911 0, 17912 tp->gput_ts, 17913 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 17914 9, 17915 __LINE__, NULL, 0); 17916 rack_tend_gp_marks(tp, rack); 17917 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 17918 return; 17919 } 17920 if (sb_offset) { 17921 /* 17922 * We are out somewhere in the sb 17923 * can we use the already outstanding data? 17924 */ 17925 17926 if (rack->r_ctl.rc_app_limited_cnt == 0) { 17927 /* 17928 * Yes first one is good and in this case 17929 * the tp->gput_ts is correctly set based on 17930 * the last ack that arrived (no need to 17931 * set things up when an ack comes in). 17932 */ 17933 my_rsm = tqhash_min(rack->r_ctl.tqh); 17934 if ((my_rsm == NULL) || 17935 (my_rsm->r_rtr_cnt != 1)) { 17936 /* retransmission? */ 17937 goto use_latest; 17938 } 17939 } else { 17940 if (rack->r_ctl.rc_first_appl == NULL) { 17941 /* 17942 * If rc_first_appl is NULL 17943 * then the cnt should be 0. 17944 * This is probably an error, maybe 17945 * a KASSERT would be approprate. 17946 */ 17947 goto use_latest; 17948 } 17949 /* 17950 * If we have a marker pointer to the last one that is 17951 * app limited we can use that, but we need to set 17952 * things up so that when it gets ack'ed we record 17953 * the ack time (if its not already acked). 17954 */ 17955 rack->app_limited_needs_set = 1; 17956 /* 17957 * We want to get to the rsm that is either 17958 * next with space i.e. over 1 MSS or the one 17959 * after that (after the app-limited). 17960 */ 17961 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 17962 if (my_rsm) { 17963 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 17964 /* Have to use the next one */ 17965 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17966 else { 17967 /* Use after the first MSS of it is acked */ 17968 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 17969 goto start_set; 17970 } 17971 } 17972 if ((my_rsm == NULL) || 17973 (my_rsm->r_rtr_cnt != 1)) { 17974 /* 17975 * Either its a retransmit or 17976 * the last is the app-limited one. 17977 */ 17978 goto use_latest; 17979 } 17980 } 17981 tp->gput_seq = my_rsm->r_start; 17982 start_set: 17983 if (my_rsm->r_flags & RACK_ACKED) { 17984 /* 17985 * This one has been acked use the arrival ack time 17986 */ 17987 struct rack_sendmap *nrsm; 17988 17989 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 17990 rack->app_limited_needs_set = 0; 17991 /* 17992 * Ok in this path we need to use the r_end now 17993 * since this guy is the starting ack. 17994 */ 17995 tp->gput_seq = my_rsm->r_end; 17996 /* 17997 * We also need to adjust up the sendtime 17998 * to the send of the next data after my_rsm. 17999 */ 18000 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18001 if (nrsm != NULL) 18002 my_rsm = nrsm; 18003 else { 18004 /* 18005 * The next as not been sent, thats the 18006 * case for using the latest. 18007 */ 18008 goto use_latest; 18009 } 18010 } 18011 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18012 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 18013 rack->r_ctl.rc_gp_cumack_ts = 0; 18014 if ((rack->r_ctl.cleared_app_ack == 1) && 18015 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { 18016 /* 18017 * We just cleared an application limited period 18018 * so the next seq out needs to skip the first 18019 * ack. 18020 */ 18021 rack->app_limited_needs_set = 1; 18022 rack->r_ctl.cleared_app_ack = 0; 18023 } 18024 rack_log_pacing_delay_calc(rack, 18025 tp->gput_seq, 18026 tp->gput_ack, 18027 (uintptr_t)my_rsm, 18028 tp->gput_ts, 18029 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18030 9, 18031 __LINE__, my_rsm, 0); 18032 /* Now lets make sure all are marked as they should be */ 18033 rack_tend_gp_marks(tp, rack); 18034 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18035 return; 18036 } 18037 18038 use_latest: 18039 /* 18040 * We don't know how long we may have been 18041 * idle or if this is the first-send. Lets 18042 * setup the flag so we will trim off 18043 * the first ack'd data so we get a true 18044 * measurement. 18045 */ 18046 rack->app_limited_needs_set = 1; 18047 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18048 rack->r_ctl.rc_gp_cumack_ts = 0; 18049 /* Find this guy so we can pull the send time */ 18050 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18051 if (my_rsm) { 18052 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18053 if (my_rsm->r_flags & RACK_ACKED) { 18054 /* 18055 * Unlikely since its probably what was 18056 * just transmitted (but I am paranoid). 18057 */ 18058 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18059 rack->app_limited_needs_set = 0; 18060 } 18061 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18062 /* This also is unlikely */ 18063 tp->gput_seq = my_rsm->r_start; 18064 } 18065 } else { 18066 /* 18067 * TSNH unless we have some send-map limit, 18068 * and even at that it should not be hitting 18069 * that limit (we should have stopped sending). 18070 */ 18071 struct timeval tv; 18072 18073 microuptime(&tv); 18074 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18075 } 18076 rack_tend_gp_marks(tp, rack); 18077 rack_log_pacing_delay_calc(rack, 18078 tp->gput_seq, 18079 tp->gput_ack, 18080 (uintptr_t)my_rsm, 18081 tp->gput_ts, 18082 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18083 9, __LINE__, NULL, 0); 18084 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18085 } 18086 18087 static inline uint32_t 18088 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18089 uint32_t avail, int32_t sb_offset) 18090 { 18091 uint32_t len; 18092 uint32_t sendwin; 18093 18094 if (tp->snd_wnd > cwnd_to_use) 18095 sendwin = cwnd_to_use; 18096 else 18097 sendwin = tp->snd_wnd; 18098 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18099 /* We never want to go over our peers rcv-window */ 18100 len = 0; 18101 } else { 18102 uint32_t flight; 18103 18104 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18105 if (flight >= sendwin) { 18106 /* 18107 * We have in flight what we are allowed by cwnd (if 18108 * it was rwnd blocking it would have hit above out 18109 * >= tp->snd_wnd). 18110 */ 18111 return (0); 18112 } 18113 len = sendwin - flight; 18114 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18115 /* We would send too much (beyond the rwnd) */ 18116 len = tp->snd_wnd - ctf_outstanding(tp); 18117 } 18118 if ((len + sb_offset) > avail) { 18119 /* 18120 * We don't have that much in the SB, how much is 18121 * there? 18122 */ 18123 len = avail - sb_offset; 18124 } 18125 } 18126 return (len); 18127 } 18128 18129 static void 18130 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18131 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18132 int rsm_is_null, int optlen, int line, uint16_t mode) 18133 { 18134 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18135 union tcp_log_stackspecific log; 18136 struct timeval tv; 18137 18138 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18139 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18140 log.u_bbr.flex1 = error; 18141 log.u_bbr.flex2 = flags; 18142 log.u_bbr.flex3 = rsm_is_null; 18143 log.u_bbr.flex4 = ipoptlen; 18144 log.u_bbr.flex5 = tp->rcv_numsacks; 18145 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18146 log.u_bbr.flex7 = optlen; 18147 log.u_bbr.flex8 = rack->r_fsb_inited; 18148 log.u_bbr.applimited = rack->r_fast_output; 18149 log.u_bbr.bw_inuse = rack_get_bw(rack); 18150 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18151 log.u_bbr.cwnd_gain = mode; 18152 log.u_bbr.pkts_out = orig_len; 18153 log.u_bbr.lt_epoch = len; 18154 log.u_bbr.delivered = line; 18155 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18156 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18157 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 18158 len, &log, false, NULL, __func__, __LINE__, &tv); 18159 } 18160 } 18161 18162 18163 static struct mbuf * 18164 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 18165 struct rack_fast_send_blk *fsb, 18166 int32_t seglimit, int32_t segsize, int hw_tls) 18167 { 18168 #ifdef KERN_TLS 18169 struct ktls_session *tls, *ntls; 18170 #ifdef INVARIANTS 18171 struct mbuf *start; 18172 #endif 18173 #endif 18174 struct mbuf *m, *n, **np, *smb; 18175 struct mbuf *top; 18176 int32_t off, soff; 18177 int32_t len = *plen; 18178 int32_t fragsize; 18179 int32_t len_cp = 0; 18180 uint32_t mlen, frags; 18181 18182 soff = off = the_off; 18183 smb = m = the_m; 18184 np = ⊤ 18185 top = NULL; 18186 #ifdef KERN_TLS 18187 if (hw_tls && (m->m_flags & M_EXTPG)) 18188 tls = m->m_epg_tls; 18189 else 18190 tls = NULL; 18191 #ifdef INVARIANTS 18192 start = m; 18193 #endif 18194 #endif 18195 while (len > 0) { 18196 if (m == NULL) { 18197 *plen = len_cp; 18198 break; 18199 } 18200 #ifdef KERN_TLS 18201 if (hw_tls) { 18202 if (m->m_flags & M_EXTPG) 18203 ntls = m->m_epg_tls; 18204 else 18205 ntls = NULL; 18206 18207 /* 18208 * Avoid mixing TLS records with handshake 18209 * data or TLS records from different 18210 * sessions. 18211 */ 18212 if (tls != ntls) { 18213 MPASS(m != start); 18214 *plen = len_cp; 18215 break; 18216 } 18217 } 18218 #endif 18219 mlen = min(len, m->m_len - off); 18220 if (seglimit) { 18221 /* 18222 * For M_EXTPG mbufs, add 3 segments 18223 * + 1 in case we are crossing page boundaries 18224 * + 2 in case the TLS hdr/trailer are used 18225 * It is cheaper to just add the segments 18226 * than it is to take the cache miss to look 18227 * at the mbuf ext_pgs state in detail. 18228 */ 18229 if (m->m_flags & M_EXTPG) { 18230 fragsize = min(segsize, PAGE_SIZE); 18231 frags = 3; 18232 } else { 18233 fragsize = segsize; 18234 frags = 0; 18235 } 18236 18237 /* Break if we really can't fit anymore. */ 18238 if ((frags + 1) >= seglimit) { 18239 *plen = len_cp; 18240 break; 18241 } 18242 18243 /* 18244 * Reduce size if you can't copy the whole 18245 * mbuf. If we can't copy the whole mbuf, also 18246 * adjust len so the loop will end after this 18247 * mbuf. 18248 */ 18249 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 18250 mlen = (seglimit - frags - 1) * fragsize; 18251 len = mlen; 18252 *plen = len_cp + len; 18253 } 18254 frags += howmany(mlen, fragsize); 18255 if (frags == 0) 18256 frags++; 18257 seglimit -= frags; 18258 KASSERT(seglimit > 0, 18259 ("%s: seglimit went too low", __func__)); 18260 } 18261 n = m_get(M_NOWAIT, m->m_type); 18262 *np = n; 18263 if (n == NULL) 18264 goto nospace; 18265 n->m_len = mlen; 18266 soff += mlen; 18267 len_cp += n->m_len; 18268 if (m->m_flags & (M_EXT | M_EXTPG)) { 18269 n->m_data = m->m_data + off; 18270 mb_dupcl(n, m); 18271 } else { 18272 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 18273 (u_int)n->m_len); 18274 } 18275 len -= n->m_len; 18276 off = 0; 18277 m = m->m_next; 18278 np = &n->m_next; 18279 if (len || (soff == smb->m_len)) { 18280 /* 18281 * We have more so we move forward or 18282 * we have consumed the entire mbuf and 18283 * len has fell to 0. 18284 */ 18285 soff = 0; 18286 smb = m; 18287 } 18288 18289 } 18290 if (fsb != NULL) { 18291 fsb->m = smb; 18292 fsb->off = soff; 18293 if (smb) { 18294 /* 18295 * Save off the size of the mbuf. We do 18296 * this so that we can recognize when it 18297 * has been trimmed by sbcut() as acks 18298 * come in. 18299 */ 18300 fsb->o_m_len = smb->m_len; 18301 fsb->o_t_len = M_TRAILINGROOM(smb); 18302 } else { 18303 /* 18304 * This is the case where the next mbuf went to NULL. This 18305 * means with this copy we have sent everything in the sb. 18306 * In theory we could clear the fast_output flag, but lets 18307 * not since its possible that we could get more added 18308 * and acks that call the extend function which would let 18309 * us send more. 18310 */ 18311 fsb->o_m_len = 0; 18312 fsb->o_t_len = 0; 18313 } 18314 } 18315 return (top); 18316 nospace: 18317 if (top) 18318 m_freem(top); 18319 return (NULL); 18320 18321 } 18322 18323 /* 18324 * This is a copy of m_copym(), taking the TSO segment size/limit 18325 * constraints into account, and advancing the sndptr as it goes. 18326 */ 18327 static struct mbuf * 18328 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 18329 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 18330 { 18331 struct mbuf *m, *n; 18332 int32_t soff; 18333 18334 m = rack->r_ctl.fsb.m; 18335 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 18336 /* 18337 * The trailing space changed, mbufs can grow 18338 * at the tail but they can't shrink from 18339 * it, KASSERT that. Adjust the orig_m_len to 18340 * compensate for this change. 18341 */ 18342 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 18343 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 18344 m, 18345 rack, 18346 (intmax_t)M_TRAILINGROOM(m), 18347 rack->r_ctl.fsb.o_t_len, 18348 rack->r_ctl.fsb.o_m_len, 18349 m->m_len)); 18350 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 18351 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 18352 } 18353 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 18354 /* 18355 * Mbuf shrank, trimmed off the top by an ack, our 18356 * offset changes. 18357 */ 18358 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 18359 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 18360 m, m->m_len, 18361 rack, rack->r_ctl.fsb.o_m_len, 18362 rack->r_ctl.fsb.off)); 18363 18364 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 18365 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 18366 else 18367 rack->r_ctl.fsb.off = 0; 18368 rack->r_ctl.fsb.o_m_len = m->m_len; 18369 #ifdef INVARIANTS 18370 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 18371 panic("rack:%p m:%p m_len grew outside of t_space compensation", 18372 rack, m); 18373 #endif 18374 } 18375 soff = rack->r_ctl.fsb.off; 18376 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 18377 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 18378 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 18379 __FUNCTION__, 18380 rack, *plen, m, m->m_len)); 18381 /* Save off the right location before we copy and advance */ 18382 *s_soff = soff; 18383 *s_mb = rack->r_ctl.fsb.m; 18384 n = rack_fo_base_copym(m, soff, plen, 18385 &rack->r_ctl.fsb, 18386 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 18387 return (n); 18388 } 18389 18390 /* Log the buffer level */ 18391 static void 18392 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 18393 int len, struct timeval *tv, 18394 uint32_t cts) 18395 { 18396 uint32_t p_rate = 0, p_queue = 0, err = 0; 18397 union tcp_log_stackspecific log; 18398 18399 #ifdef RATELIMIT 18400 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18401 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18402 #endif 18403 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18404 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18405 log.u_bbr.flex1 = p_rate; 18406 log.u_bbr.flex2 = p_queue; 18407 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18408 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18409 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18410 log.u_bbr.flex7 = 99; 18411 log.u_bbr.flex8 = 0; 18412 log.u_bbr.pkts_out = err; 18413 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18414 log.u_bbr.timeStamp = cts; 18415 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18416 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18417 len, &log, false, NULL, __func__, __LINE__, tv); 18418 18419 } 18420 18421 static uint32_t 18422 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 18423 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 18424 { 18425 uint64_t lentime = 0; 18426 #ifdef RATELIMIT 18427 uint32_t p_rate = 0, p_queue = 0, err; 18428 union tcp_log_stackspecific log; 18429 uint64_t bw; 18430 18431 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18432 /* Failed or queue is zero */ 18433 if (err || (p_queue == 0)) { 18434 lentime = 0; 18435 goto out; 18436 } 18437 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18438 if (err) { 18439 lentime = 0; 18440 goto out; 18441 } 18442 /* 18443 * If we reach here we have some bytes in 18444 * the queue. The number returned is a value 18445 * between 0 and 0xffff where ffff is full 18446 * and 0 is empty. So how best to make this into 18447 * something usable? 18448 * 18449 * The "safer" way is lets take the b/w gotten 18450 * from the query (which should be our b/w rate) 18451 * and pretend that a full send (our rc_pace_max_segs) 18452 * is outstanding. We factor it so its as if a full 18453 * number of our MSS segment is terms of full 18454 * ethernet segments are outstanding. 18455 */ 18456 bw = p_rate / 8; 18457 if (bw) { 18458 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 18459 lentime *= ETHERNET_SEGMENT_SIZE; 18460 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 18461 lentime /= bw; 18462 } else { 18463 /* TSNH -- KASSERT? */ 18464 lentime = 0; 18465 } 18466 out: 18467 if (tcp_bblogging_on(tp)) { 18468 memset(&log, 0, sizeof(log)); 18469 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18470 log.u_bbr.flex1 = p_rate; 18471 log.u_bbr.flex2 = p_queue; 18472 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18473 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18474 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18475 log.u_bbr.flex7 = 99; 18476 log.u_bbr.flex8 = 0; 18477 log.u_bbr.pkts_out = err; 18478 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18479 log.u_bbr.cur_del_rate = lentime; 18480 log.u_bbr.timeStamp = cts; 18481 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18482 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18483 len, &log, false, NULL, __func__, __LINE__,tv); 18484 } 18485 #endif 18486 return ((uint32_t)lentime); 18487 } 18488 18489 static int 18490 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 18491 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 18492 { 18493 /* 18494 * Enter the fast retransmit path. We are given that a sched_pin is 18495 * in place (if accounting is compliled in) and the cycle count taken 18496 * at the entry is in the ts_val. The concept her is that the rsm 18497 * now holds the mbuf offsets and such so we can directly transmit 18498 * without a lot of overhead, the len field is already set for 18499 * us to prohibit us from sending too much (usually its 1MSS). 18500 */ 18501 struct ip *ip = NULL; 18502 struct udphdr *udp = NULL; 18503 struct tcphdr *th = NULL; 18504 struct mbuf *m = NULL; 18505 struct inpcb *inp; 18506 uint8_t *cpto; 18507 struct tcp_log_buffer *lgb; 18508 #ifdef TCP_ACCOUNTING 18509 uint64_t crtsc; 18510 int cnt_thru = 1; 18511 #endif 18512 struct tcpopt to; 18513 u_char opt[TCP_MAXOLEN]; 18514 uint32_t hdrlen, optlen; 18515 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 18516 uint16_t flags; 18517 uint32_t if_hw_tsomaxsegcount = 0, startseq; 18518 uint32_t if_hw_tsomaxsegsize; 18519 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 18520 18521 #ifdef INET6 18522 struct ip6_hdr *ip6 = NULL; 18523 18524 if (rack->r_is_v6) { 18525 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18526 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18527 } else 18528 #endif /* INET6 */ 18529 { 18530 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18531 hdrlen = sizeof(struct tcpiphdr); 18532 } 18533 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 18534 goto failed; 18535 } 18536 if (doing_tlp) { 18537 /* Its a TLP add the flag, it may already be there but be sure */ 18538 rsm->r_flags |= RACK_TLP; 18539 } else { 18540 /* If it was a TLP it is not not on this retransmit */ 18541 rsm->r_flags &= ~RACK_TLP; 18542 } 18543 startseq = rsm->r_start; 18544 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 18545 inp = rack->rc_inp; 18546 to.to_flags = 0; 18547 flags = tcp_outflags[tp->t_state]; 18548 if (flags & (TH_SYN|TH_RST)) { 18549 goto failed; 18550 } 18551 if (rsm->r_flags & RACK_HAS_FIN) { 18552 /* We can't send a FIN here */ 18553 goto failed; 18554 } 18555 if (flags & TH_FIN) { 18556 /* We never send a FIN */ 18557 flags &= ~TH_FIN; 18558 } 18559 if (tp->t_flags & TF_RCVD_TSTMP) { 18560 to.to_tsval = ms_cts + tp->ts_offset; 18561 to.to_tsecr = tp->ts_recent; 18562 to.to_flags = TOF_TS; 18563 } 18564 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18565 /* TCP-MD5 (RFC2385). */ 18566 if (tp->t_flags & TF_SIGNATURE) 18567 to.to_flags |= TOF_SIGNATURE; 18568 #endif 18569 optlen = tcp_addoptions(&to, opt); 18570 hdrlen += optlen; 18571 udp = rack->r_ctl.fsb.udp; 18572 if (udp) 18573 hdrlen += sizeof(struct udphdr); 18574 if (rack->r_ctl.rc_pace_max_segs) 18575 max_val = rack->r_ctl.rc_pace_max_segs; 18576 else if (rack->rc_user_set_max_segs) 18577 max_val = rack->rc_user_set_max_segs * segsiz; 18578 else 18579 max_val = len; 18580 if ((tp->t_flags & TF_TSO) && 18581 V_tcp_do_tso && 18582 (len > segsiz) && 18583 (tp->t_port == 0)) 18584 tso = 1; 18585 #ifdef INET6 18586 if (MHLEN < hdrlen + max_linkhdr) 18587 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18588 else 18589 #endif 18590 m = m_gethdr(M_NOWAIT, MT_DATA); 18591 if (m == NULL) 18592 goto failed; 18593 m->m_data += max_linkhdr; 18594 m->m_len = hdrlen; 18595 th = rack->r_ctl.fsb.th; 18596 /* Establish the len to send */ 18597 if (len > max_val) 18598 len = max_val; 18599 if ((tso) && (len + optlen > segsiz)) { 18600 uint32_t if_hw_tsomax; 18601 int32_t max_len; 18602 18603 /* extract TSO information */ 18604 if_hw_tsomax = tp->t_tsomax; 18605 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18606 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18607 /* 18608 * Check if we should limit by maximum payload 18609 * length: 18610 */ 18611 if (if_hw_tsomax != 0) { 18612 /* compute maximum TSO length */ 18613 max_len = (if_hw_tsomax - hdrlen - 18614 max_linkhdr); 18615 if (max_len <= 0) { 18616 goto failed; 18617 } else if (len > max_len) { 18618 len = max_len; 18619 } 18620 } 18621 if (len <= segsiz) { 18622 /* 18623 * In case there are too many small fragments don't 18624 * use TSO: 18625 */ 18626 tso = 0; 18627 } 18628 } else { 18629 tso = 0; 18630 } 18631 if ((tso == 0) && (len > segsiz)) 18632 len = segsiz; 18633 (void)tcp_get_usecs(tv); 18634 if ((len == 0) || 18635 (len <= MHLEN - hdrlen - max_linkhdr)) { 18636 goto failed; 18637 } 18638 th->th_seq = htonl(rsm->r_start); 18639 th->th_ack = htonl(tp->rcv_nxt); 18640 /* 18641 * The PUSH bit should only be applied 18642 * if the full retransmission is made. If 18643 * we are sending less than this is the 18644 * left hand edge and should not have 18645 * the PUSH bit. 18646 */ 18647 if ((rsm->r_flags & RACK_HAD_PUSH) && 18648 (len == (rsm->r_end - rsm->r_start))) 18649 flags |= TH_PUSH; 18650 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 18651 if (th->th_win == 0) { 18652 tp->t_sndzerowin++; 18653 tp->t_flags |= TF_RXWIN0SENT; 18654 } else 18655 tp->t_flags &= ~TF_RXWIN0SENT; 18656 if (rsm->r_flags & RACK_TLP) { 18657 /* 18658 * TLP should not count in retran count, but 18659 * in its own bin 18660 */ 18661 counter_u64_add(rack_tlp_retran, 1); 18662 counter_u64_add(rack_tlp_retran_bytes, len); 18663 } else { 18664 tp->t_sndrexmitpack++; 18665 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18666 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18667 } 18668 #ifdef STATS 18669 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18670 len); 18671 #endif 18672 if (rsm->m == NULL) 18673 goto failed; 18674 if (rsm->m && 18675 ((rsm->orig_m_len != rsm->m->m_len) || 18676 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 18677 /* Fix up the orig_m_len and possibly the mbuf offset */ 18678 rack_adjust_orig_mlen(rsm); 18679 } 18680 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 18681 if (len <= segsiz) { 18682 /* 18683 * Must have ran out of mbufs for the copy 18684 * shorten it to no longer need tso. Lets 18685 * not put on sendalot since we are low on 18686 * mbufs. 18687 */ 18688 tso = 0; 18689 } 18690 if ((m->m_next == NULL) || (len <= 0)){ 18691 goto failed; 18692 } 18693 if (udp) { 18694 if (rack->r_is_v6) 18695 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18696 else 18697 ulen = hdrlen + len - sizeof(struct ip); 18698 udp->uh_ulen = htons(ulen); 18699 } 18700 m->m_pkthdr.rcvif = (struct ifnet *)0; 18701 if (TCPS_HAVERCVDSYN(tp->t_state) && 18702 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18703 int ect = tcp_ecn_output_established(tp, &flags, len, true); 18704 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18705 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18706 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18707 #ifdef INET6 18708 if (rack->r_is_v6) { 18709 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18710 ip6->ip6_flow |= htonl(ect << 20); 18711 } 18712 else 18713 #endif 18714 { 18715 ip->ip_tos &= ~IPTOS_ECN_MASK; 18716 ip->ip_tos |= ect; 18717 } 18718 } 18719 if (rack->r_ctl.crte != NULL) { 18720 /* See if we can send via the hw queue */ 18721 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 18722 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 18723 if (slot == 0) 18724 ip_sendflag = 0; 18725 } 18726 tcp_set_flags(th, flags); 18727 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18728 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18729 if (to.to_flags & TOF_SIGNATURE) { 18730 /* 18731 * Calculate MD5 signature and put it into the place 18732 * determined before. 18733 * NOTE: since TCP options buffer doesn't point into 18734 * mbuf's data, calculate offset and use it. 18735 */ 18736 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18737 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18738 /* 18739 * Do not send segment if the calculation of MD5 18740 * digest has failed. 18741 */ 18742 goto failed; 18743 } 18744 } 18745 #endif 18746 #ifdef INET6 18747 if (rack->r_is_v6) { 18748 if (tp->t_port) { 18749 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18750 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18751 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18752 th->th_sum = htons(0); 18753 UDPSTAT_INC(udps_opackets); 18754 } else { 18755 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18756 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18757 th->th_sum = in6_cksum_pseudo(ip6, 18758 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18759 0); 18760 } 18761 } 18762 #endif 18763 #if defined(INET6) && defined(INET) 18764 else 18765 #endif 18766 #ifdef INET 18767 { 18768 if (tp->t_port) { 18769 m->m_pkthdr.csum_flags = CSUM_UDP; 18770 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18771 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18772 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18773 th->th_sum = htons(0); 18774 UDPSTAT_INC(udps_opackets); 18775 } else { 18776 m->m_pkthdr.csum_flags = CSUM_TCP; 18777 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18778 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18779 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18780 IPPROTO_TCP + len + optlen)); 18781 } 18782 /* IP version must be set here for ipv4/ipv6 checking later */ 18783 KASSERT(ip->ip_v == IPVERSION, 18784 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18785 } 18786 #endif 18787 if (tso) { 18788 /* 18789 * Here we use segsiz since we have no added options besides 18790 * any standard timestamp options (no DSACKs or SACKS are sent 18791 * via either fast-path). 18792 */ 18793 KASSERT(len > segsiz, 18794 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 18795 m->m_pkthdr.csum_flags |= CSUM_TSO; 18796 m->m_pkthdr.tso_segsz = segsiz; 18797 } 18798 #ifdef INET6 18799 if (rack->r_is_v6) { 18800 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 18801 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18802 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18803 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18804 else 18805 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18806 } 18807 #endif 18808 #if defined(INET) && defined(INET6) 18809 else 18810 #endif 18811 #ifdef INET 18812 { 18813 ip->ip_len = htons(m->m_pkthdr.len); 18814 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 18815 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18816 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18817 if (tp->t_port == 0 || len < V_tcp_minmss) { 18818 ip->ip_off |= htons(IP_DF); 18819 } 18820 } else { 18821 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18822 } 18823 } 18824 #endif 18825 if (doing_tlp == 0) { 18826 /* Set we retransmitted */ 18827 rack->rc_gp_saw_rec = 1; 18828 } else { 18829 /* Its a TLP set ca or ss */ 18830 if (tp->snd_cwnd > tp->snd_ssthresh) { 18831 /* Set we sent in CA */ 18832 rack->rc_gp_saw_ca = 1; 18833 } else { 18834 /* Set we sent in SS */ 18835 rack->rc_gp_saw_ss = 1; 18836 } 18837 } 18838 /* Time to copy in our header */ 18839 cpto = mtod(m, uint8_t *); 18840 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18841 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18842 if (optlen) { 18843 bcopy(opt, th + 1, optlen); 18844 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18845 } else { 18846 th->th_off = sizeof(struct tcphdr) >> 2; 18847 } 18848 if (tcp_bblogging_on(rack->rc_tp)) { 18849 union tcp_log_stackspecific log; 18850 18851 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18852 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18853 counter_u64_add(rack_collapsed_win_rxt, 1); 18854 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18855 } 18856 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18857 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18858 if (rack->rack_no_prr) 18859 log.u_bbr.flex1 = 0; 18860 else 18861 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18862 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18863 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18864 log.u_bbr.flex4 = max_val; 18865 /* Save off the early/late values */ 18866 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18867 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18868 log.u_bbr.bw_inuse = rack_get_bw(rack); 18869 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 18870 if (doing_tlp == 0) 18871 log.u_bbr.flex8 = 1; 18872 else 18873 log.u_bbr.flex8 = 2; 18874 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18875 log.u_bbr.flex7 = 55; 18876 log.u_bbr.pkts_out = tp->t_maxseg; 18877 log.u_bbr.timeStamp = cts; 18878 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18879 if (rsm && (rsm->r_rtr_cnt > 0)) { 18880 /* 18881 * When we have a retransmit we want to log the 18882 * burst at send and flight at send from before. 18883 */ 18884 log.u_bbr.flex5 = rsm->r_fas; 18885 log.u_bbr.bbr_substate = rsm->r_bas; 18886 } else { 18887 /* 18888 * This is currently unlikely until we do the 18889 * packet pair probes but I will add it for completeness. 18890 */ 18891 log.u_bbr.flex5 = log.u_bbr.inflight; 18892 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 18893 } 18894 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 18895 log.u_bbr.delivered = 0; 18896 log.u_bbr.rttProp = (uintptr_t)rsm; 18897 log.u_bbr.delRate = rsm->r_flags; 18898 log.u_bbr.delRate <<= 31; 18899 log.u_bbr.delRate |= rack->r_must_retran; 18900 log.u_bbr.delRate <<= 1; 18901 log.u_bbr.delRate |= 1; 18902 log.u_bbr.pkt_epoch = __LINE__; 18903 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 18904 len, &log, false, NULL, __func__, __LINE__, tv); 18905 } else 18906 lgb = NULL; 18907 if ((rack->r_ctl.crte != NULL) && 18908 tcp_bblogging_on(tp)) { 18909 rack_log_queue_level(tp, rack, len, tv, cts); 18910 } 18911 #ifdef INET6 18912 if (rack->r_is_v6) { 18913 error = ip6_output(m, inp->in6p_outputopts, 18914 &inp->inp_route6, 18915 ip_sendflag, NULL, NULL, inp); 18916 } 18917 else 18918 #endif 18919 #ifdef INET 18920 { 18921 error = ip_output(m, NULL, 18922 &inp->inp_route, 18923 ip_sendflag, 0, inp); 18924 } 18925 #endif 18926 m = NULL; 18927 if (lgb) { 18928 lgb->tlb_errno = error; 18929 lgb = NULL; 18930 } 18931 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 18932 tp->snd_nxt = tp->snd_max; 18933 if (error) { 18934 goto failed; 18935 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 18936 rack->rc_hw_nobuf = 0; 18937 rack->r_ctl.rc_agg_delayed = 0; 18938 rack->r_early = 0; 18939 rack->r_late = 0; 18940 rack->r_ctl.rc_agg_early = 0; 18941 } 18942 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 18943 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 18944 if (doing_tlp) { 18945 rack->rc_tlp_in_progress = 1; 18946 rack->r_ctl.rc_tlp_cnt_out++; 18947 } 18948 if (error == 0) { 18949 counter_u64_add(rack_total_bytes, len); 18950 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 18951 if (doing_tlp) { 18952 rack->rc_last_sent_tlp_past_cumack = 0; 18953 rack->rc_last_sent_tlp_seq_valid = 1; 18954 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18955 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18956 } 18957 if (rack->r_ctl.rc_prr_sndcnt >= len) 18958 rack->r_ctl.rc_prr_sndcnt -= len; 18959 else 18960 rack->r_ctl.rc_prr_sndcnt = 0; 18961 } 18962 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18963 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18964 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18965 rack->r_ctl.retran_during_recovery += len; 18966 { 18967 int idx; 18968 18969 idx = (len / segsiz) + 3; 18970 if (idx >= TCP_MSS_ACCT_ATIMER) 18971 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18972 else 18973 counter_u64_add(rack_out_size[idx], 1); 18974 } 18975 if (tp->t_rtttime == 0) { 18976 tp->t_rtttime = ticks; 18977 tp->t_rtseq = startseq; 18978 KMOD_TCPSTAT_INC(tcps_segstimed); 18979 } 18980 counter_u64_add(rack_fto_rsm_send, 1); 18981 if (error && (error == ENOBUFS)) { 18982 if (rack->r_ctl.crte != NULL) { 18983 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 18984 if (tcp_bblogging_on(rack->rc_tp)) 18985 rack_log_queue_level(tp, rack, len, tv, cts); 18986 } else 18987 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 18988 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18989 if (rack->rc_enobuf < 0x7f) 18990 rack->rc_enobuf++; 18991 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18992 slot = 10 * HPTS_USEC_IN_MSEC; 18993 if (rack->r_ctl.crte != NULL) { 18994 counter_u64_add(rack_saw_enobuf_hw, 1); 18995 tcp_rl_log_enobuf(rack->r_ctl.crte); 18996 } 18997 counter_u64_add(rack_saw_enobuf, 1); 18998 } else { 18999 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 19000 } 19001 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 19002 #ifdef TCP_ACCOUNTING 19003 crtsc = get_cyclecount(); 19004 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19005 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19006 } 19007 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19008 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19009 } 19010 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19011 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 19012 } 19013 sched_unpin(); 19014 #endif 19015 return (0); 19016 failed: 19017 if (m) 19018 m_free(m); 19019 return (-1); 19020 } 19021 19022 static void 19023 rack_sndbuf_autoscale(struct tcp_rack *rack) 19024 { 19025 /* 19026 * Automatic sizing of send socket buffer. Often the send buffer 19027 * size is not optimally adjusted to the actual network conditions 19028 * at hand (delay bandwidth product). Setting the buffer size too 19029 * small limits throughput on links with high bandwidth and high 19030 * delay (eg. trans-continental/oceanic links). Setting the 19031 * buffer size too big consumes too much real kernel memory, 19032 * especially with many connections on busy servers. 19033 * 19034 * The criteria to step up the send buffer one notch are: 19035 * 1. receive window of remote host is larger than send buffer 19036 * (with a fudge factor of 5/4th); 19037 * 2. send buffer is filled to 7/8th with data (so we actually 19038 * have data to make use of it); 19039 * 3. send buffer fill has not hit maximal automatic size; 19040 * 4. our send window (slow start and cogestion controlled) is 19041 * larger than sent but unacknowledged data in send buffer. 19042 * 19043 * Note that the rack version moves things much faster since 19044 * we want to avoid hitting cache lines in the rack_fast_output() 19045 * path so this is called much less often and thus moves 19046 * the SB forward by a percentage. 19047 */ 19048 struct socket *so; 19049 struct tcpcb *tp; 19050 uint32_t sendwin, scaleup; 19051 19052 tp = rack->rc_tp; 19053 so = rack->rc_inp->inp_socket; 19054 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19055 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19056 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19057 sbused(&so->so_snd) >= 19058 (so->so_snd.sb_hiwat / 8 * 7) && 19059 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19060 sendwin >= (sbused(&so->so_snd) - 19061 (tp->snd_max - tp->snd_una))) { 19062 if (rack_autosndbuf_inc) 19063 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19064 else 19065 scaleup = V_tcp_autosndbuf_inc; 19066 if (scaleup < V_tcp_autosndbuf_inc) 19067 scaleup = V_tcp_autosndbuf_inc; 19068 scaleup += so->so_snd.sb_hiwat; 19069 if (scaleup > V_tcp_autosndbuf_max) 19070 scaleup = V_tcp_autosndbuf_max; 19071 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19072 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19073 } 19074 } 19075 } 19076 19077 static int 19078 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19079 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 19080 { 19081 /* 19082 * Enter to do fast output. We are given that the sched_pin is 19083 * in place (if accounting is compiled in) and the cycle count taken 19084 * at entry is in place in ts_val. The idea here is that 19085 * we know how many more bytes needs to be sent (presumably either 19086 * during pacing or to fill the cwnd and that was greater than 19087 * the max-burst). We have how much to send and all the info we 19088 * need to just send. 19089 */ 19090 #ifdef INET 19091 struct ip *ip = NULL; 19092 #endif 19093 struct udphdr *udp = NULL; 19094 struct tcphdr *th = NULL; 19095 struct mbuf *m, *s_mb; 19096 struct inpcb *inp; 19097 uint8_t *cpto; 19098 struct tcp_log_buffer *lgb; 19099 #ifdef TCP_ACCOUNTING 19100 uint64_t crtsc; 19101 #endif 19102 struct tcpopt to; 19103 u_char opt[TCP_MAXOLEN]; 19104 uint32_t hdrlen, optlen; 19105 #ifdef TCP_ACCOUNTING 19106 int cnt_thru = 1; 19107 #endif 19108 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19109 uint16_t flags; 19110 uint32_t s_soff; 19111 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19112 uint32_t if_hw_tsomaxsegsize; 19113 uint32_t add_flag = RACK_SENT_FP; 19114 #ifdef INET6 19115 struct ip6_hdr *ip6 = NULL; 19116 19117 if (rack->r_is_v6) { 19118 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19119 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19120 } else 19121 #endif /* INET6 */ 19122 { 19123 #ifdef INET 19124 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19125 hdrlen = sizeof(struct tcpiphdr); 19126 #endif 19127 } 19128 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19129 m = NULL; 19130 goto failed; 19131 } 19132 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19133 startseq = tp->snd_max; 19134 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19135 inp = rack->rc_inp; 19136 len = rack->r_ctl.fsb.left_to_send; 19137 to.to_flags = 0; 19138 flags = rack->r_ctl.fsb.tcp_flags; 19139 if (tp->t_flags & TF_RCVD_TSTMP) { 19140 to.to_tsval = ms_cts + tp->ts_offset; 19141 to.to_tsecr = tp->ts_recent; 19142 to.to_flags = TOF_TS; 19143 } 19144 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19145 /* TCP-MD5 (RFC2385). */ 19146 if (tp->t_flags & TF_SIGNATURE) 19147 to.to_flags |= TOF_SIGNATURE; 19148 #endif 19149 optlen = tcp_addoptions(&to, opt); 19150 hdrlen += optlen; 19151 udp = rack->r_ctl.fsb.udp; 19152 if (udp) 19153 hdrlen += sizeof(struct udphdr); 19154 if (rack->r_ctl.rc_pace_max_segs) 19155 max_val = rack->r_ctl.rc_pace_max_segs; 19156 else if (rack->rc_user_set_max_segs) 19157 max_val = rack->rc_user_set_max_segs * segsiz; 19158 else 19159 max_val = len; 19160 if ((tp->t_flags & TF_TSO) && 19161 V_tcp_do_tso && 19162 (len > segsiz) && 19163 (tp->t_port == 0)) 19164 tso = 1; 19165 again: 19166 #ifdef INET6 19167 if (MHLEN < hdrlen + max_linkhdr) 19168 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19169 else 19170 #endif 19171 m = m_gethdr(M_NOWAIT, MT_DATA); 19172 if (m == NULL) 19173 goto failed; 19174 m->m_data += max_linkhdr; 19175 m->m_len = hdrlen; 19176 th = rack->r_ctl.fsb.th; 19177 /* Establish the len to send */ 19178 if (len > max_val) 19179 len = max_val; 19180 if ((tso) && (len + optlen > segsiz)) { 19181 uint32_t if_hw_tsomax; 19182 int32_t max_len; 19183 19184 /* extract TSO information */ 19185 if_hw_tsomax = tp->t_tsomax; 19186 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19187 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19188 /* 19189 * Check if we should limit by maximum payload 19190 * length: 19191 */ 19192 if (if_hw_tsomax != 0) { 19193 /* compute maximum TSO length */ 19194 max_len = (if_hw_tsomax - hdrlen - 19195 max_linkhdr); 19196 if (max_len <= 0) { 19197 goto failed; 19198 } else if (len > max_len) { 19199 len = max_len; 19200 } 19201 } 19202 if (len <= segsiz) { 19203 /* 19204 * In case there are too many small fragments don't 19205 * use TSO: 19206 */ 19207 tso = 0; 19208 } 19209 } else { 19210 tso = 0; 19211 } 19212 if ((tso == 0) && (len > segsiz)) 19213 len = segsiz; 19214 (void)tcp_get_usecs(tv); 19215 if ((len == 0) || 19216 (len <= MHLEN - hdrlen - max_linkhdr)) { 19217 goto failed; 19218 } 19219 sb_offset = tp->snd_max - tp->snd_una; 19220 th->th_seq = htonl(tp->snd_max); 19221 th->th_ack = htonl(tp->rcv_nxt); 19222 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19223 if (th->th_win == 0) { 19224 tp->t_sndzerowin++; 19225 tp->t_flags |= TF_RXWIN0SENT; 19226 } else 19227 tp->t_flags &= ~TF_RXWIN0SENT; 19228 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 19229 KMOD_TCPSTAT_INC(tcps_sndpack); 19230 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 19231 #ifdef STATS 19232 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 19233 len); 19234 #endif 19235 if (rack->r_ctl.fsb.m == NULL) 19236 goto failed; 19237 19238 /* s_mb and s_soff are saved for rack_log_output */ 19239 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 19240 &s_mb, &s_soff); 19241 if (len <= segsiz) { 19242 /* 19243 * Must have ran out of mbufs for the copy 19244 * shorten it to no longer need tso. Lets 19245 * not put on sendalot since we are low on 19246 * mbufs. 19247 */ 19248 tso = 0; 19249 } 19250 if (rack->r_ctl.fsb.rfo_apply_push && 19251 (len == rack->r_ctl.fsb.left_to_send)) { 19252 tcp_set_flags(th, flags | TH_PUSH); 19253 add_flag |= RACK_HAD_PUSH; 19254 } 19255 if ((m->m_next == NULL) || (len <= 0)){ 19256 goto failed; 19257 } 19258 if (udp) { 19259 if (rack->r_is_v6) 19260 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19261 else 19262 ulen = hdrlen + len - sizeof(struct ip); 19263 udp->uh_ulen = htons(ulen); 19264 } 19265 m->m_pkthdr.rcvif = (struct ifnet *)0; 19266 if (TCPS_HAVERCVDSYN(tp->t_state) && 19267 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19268 int ect = tcp_ecn_output_established(tp, &flags, len, false); 19269 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19270 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19271 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19272 #ifdef INET6 19273 if (rack->r_is_v6) { 19274 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19275 ip6->ip6_flow |= htonl(ect << 20); 19276 } 19277 else 19278 #endif 19279 { 19280 #ifdef INET 19281 ip->ip_tos &= ~IPTOS_ECN_MASK; 19282 ip->ip_tos |= ect; 19283 #endif 19284 } 19285 } 19286 tcp_set_flags(th, flags); 19287 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19288 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19289 if (to.to_flags & TOF_SIGNATURE) { 19290 /* 19291 * Calculate MD5 signature and put it into the place 19292 * determined before. 19293 * NOTE: since TCP options buffer doesn't point into 19294 * mbuf's data, calculate offset and use it. 19295 */ 19296 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19297 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19298 /* 19299 * Do not send segment if the calculation of MD5 19300 * digest has failed. 19301 */ 19302 goto failed; 19303 } 19304 } 19305 #endif 19306 #ifdef INET6 19307 if (rack->r_is_v6) { 19308 if (tp->t_port) { 19309 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19310 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19311 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19312 th->th_sum = htons(0); 19313 UDPSTAT_INC(udps_opackets); 19314 } else { 19315 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19316 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19317 th->th_sum = in6_cksum_pseudo(ip6, 19318 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19319 0); 19320 } 19321 } 19322 #endif 19323 #if defined(INET6) && defined(INET) 19324 else 19325 #endif 19326 #ifdef INET 19327 { 19328 if (tp->t_port) { 19329 m->m_pkthdr.csum_flags = CSUM_UDP; 19330 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19331 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19332 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19333 th->th_sum = htons(0); 19334 UDPSTAT_INC(udps_opackets); 19335 } else { 19336 m->m_pkthdr.csum_flags = CSUM_TCP; 19337 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19338 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19339 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19340 IPPROTO_TCP + len + optlen)); 19341 } 19342 /* IP version must be set here for ipv4/ipv6 checking later */ 19343 KASSERT(ip->ip_v == IPVERSION, 19344 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19345 } 19346 #endif 19347 if (tso) { 19348 /* 19349 * Here we use segsiz since we have no added options besides 19350 * any standard timestamp options (no DSACKs or SACKS are sent 19351 * via either fast-path). 19352 */ 19353 KASSERT(len > segsiz, 19354 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19355 m->m_pkthdr.csum_flags |= CSUM_TSO; 19356 m->m_pkthdr.tso_segsz = segsiz; 19357 } 19358 #ifdef INET6 19359 if (rack->r_is_v6) { 19360 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19361 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19362 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19363 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19364 else 19365 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19366 } 19367 #endif 19368 #if defined(INET) && defined(INET6) 19369 else 19370 #endif 19371 #ifdef INET 19372 { 19373 ip->ip_len = htons(m->m_pkthdr.len); 19374 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19375 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19376 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19377 if (tp->t_port == 0 || len < V_tcp_minmss) { 19378 ip->ip_off |= htons(IP_DF); 19379 } 19380 } else { 19381 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19382 } 19383 } 19384 #endif 19385 if (tp->snd_cwnd > tp->snd_ssthresh) { 19386 /* Set we sent in CA */ 19387 rack->rc_gp_saw_ca = 1; 19388 } else { 19389 /* Set we sent in SS */ 19390 rack->rc_gp_saw_ss = 1; 19391 } 19392 /* Time to copy in our header */ 19393 cpto = mtod(m, uint8_t *); 19394 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19395 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19396 if (optlen) { 19397 bcopy(opt, th + 1, optlen); 19398 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19399 } else { 19400 th->th_off = sizeof(struct tcphdr) >> 2; 19401 } 19402 if ((rack->r_ctl.crte != NULL) && 19403 tcp_bblogging_on(tp)) { 19404 rack_log_queue_level(tp, rack, len, tv, cts); 19405 } 19406 if (tcp_bblogging_on(rack->rc_tp)) { 19407 union tcp_log_stackspecific log; 19408 19409 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19410 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19411 if (rack->rack_no_prr) 19412 log.u_bbr.flex1 = 0; 19413 else 19414 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19415 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19416 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19417 log.u_bbr.flex4 = max_val; 19418 /* Save off the early/late values */ 19419 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19420 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19421 log.u_bbr.bw_inuse = rack_get_bw(rack); 19422 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19423 log.u_bbr.flex8 = 0; 19424 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19425 log.u_bbr.flex7 = 44; 19426 log.u_bbr.pkts_out = tp->t_maxseg; 19427 log.u_bbr.timeStamp = cts; 19428 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19429 log.u_bbr.flex5 = log.u_bbr.inflight; 19430 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19431 log.u_bbr.delivered = 0; 19432 log.u_bbr.rttProp = 0; 19433 log.u_bbr.delRate = rack->r_must_retran; 19434 log.u_bbr.delRate <<= 1; 19435 log.u_bbr.pkt_epoch = __LINE__; 19436 /* For fast output no retrans so just inflight and how many mss we send */ 19437 log.u_bbr.flex5 = log.u_bbr.inflight; 19438 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19439 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19440 len, &log, false, NULL, __func__, __LINE__, tv); 19441 } else 19442 lgb = NULL; 19443 #ifdef INET6 19444 if (rack->r_is_v6) { 19445 error = ip6_output(m, inp->in6p_outputopts, 19446 &inp->inp_route6, 19447 0, NULL, NULL, inp); 19448 } 19449 #endif 19450 #if defined(INET) && defined(INET6) 19451 else 19452 #endif 19453 #ifdef INET 19454 { 19455 error = ip_output(m, NULL, 19456 &inp->inp_route, 19457 0, 0, inp); 19458 } 19459 #endif 19460 if (lgb) { 19461 lgb->tlb_errno = error; 19462 lgb = NULL; 19463 } 19464 if (error) { 19465 *send_err = error; 19466 m = NULL; 19467 goto failed; 19468 } else if (rack->rc_hw_nobuf) { 19469 rack->rc_hw_nobuf = 0; 19470 rack->r_ctl.rc_agg_delayed = 0; 19471 rack->r_early = 0; 19472 rack->r_late = 0; 19473 rack->r_ctl.rc_agg_early = 0; 19474 } 19475 if ((error == 0) && (rack->lt_bw_up == 0)) { 19476 /* Unlikely */ 19477 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 19478 rack->r_ctl.lt_seq = tp->snd_una; 19479 rack->lt_bw_up = 1; 19480 } else if ((error == 0) && 19481 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 19482 /* 19483 * Need to record what we have since we are 19484 * approaching seq wrap. 19485 */ 19486 struct timeval tv; 19487 uint64_t tmark; 19488 19489 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 19490 rack->r_ctl.lt_seq = tp->snd_una; 19491 tmark = tcp_get_u64_usecs(&tv); 19492 if (tmark > rack->r_ctl.lt_timemark) { 19493 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 19494 rack->r_ctl.lt_timemark = tmark; 19495 } 19496 } 19497 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 19498 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 19499 m = NULL; 19500 if (tp->snd_una == tp->snd_max) { 19501 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19502 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19503 tp->t_acktime = ticks; 19504 } 19505 counter_u64_add(rack_total_bytes, len); 19506 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 19507 19508 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19509 tot_len += len; 19510 if ((tp->t_flags & TF_GPUTINPROG) == 0) 19511 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 19512 tp->snd_max += len; 19513 tp->snd_nxt = tp->snd_max; 19514 if (rack->rc_new_rnd_needed) { 19515 rack_new_round_starts(tp, rack, tp->snd_max); 19516 } 19517 { 19518 int idx; 19519 19520 idx = (len / segsiz) + 3; 19521 if (idx >= TCP_MSS_ACCT_ATIMER) 19522 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19523 else 19524 counter_u64_add(rack_out_size[idx], 1); 19525 } 19526 if (len <= rack->r_ctl.fsb.left_to_send) 19527 rack->r_ctl.fsb.left_to_send -= len; 19528 else 19529 rack->r_ctl.fsb.left_to_send = 0; 19530 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19531 rack->r_fast_output = 0; 19532 rack->r_ctl.fsb.left_to_send = 0; 19533 /* At the end of fast_output scale up the sb */ 19534 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 19535 rack_sndbuf_autoscale(rack); 19536 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 19537 } 19538 if (tp->t_rtttime == 0) { 19539 tp->t_rtttime = ticks; 19540 tp->t_rtseq = startseq; 19541 KMOD_TCPSTAT_INC(tcps_segstimed); 19542 } 19543 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 19544 (max_val > len) && 19545 (tso == 0)) { 19546 max_val -= len; 19547 len = segsiz; 19548 th = rack->r_ctl.fsb.th; 19549 #ifdef TCP_ACCOUNTING 19550 cnt_thru++; 19551 #endif 19552 goto again; 19553 } 19554 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19555 counter_u64_add(rack_fto_send, 1); 19556 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__); 19557 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 19558 #ifdef TCP_ACCOUNTING 19559 crtsc = get_cyclecount(); 19560 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19561 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19562 } 19563 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19564 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19565 } 19566 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19567 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 19568 } 19569 sched_unpin(); 19570 #endif 19571 return (0); 19572 failed: 19573 if (m) 19574 m_free(m); 19575 rack->r_fast_output = 0; 19576 return (-1); 19577 } 19578 19579 static inline void 19580 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 19581 struct sockbuf *sb, 19582 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 19583 bool hw_tls, 19584 uint16_t flags) 19585 { 19586 rack->r_fast_output = 1; 19587 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19588 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19589 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 19590 rack->r_ctl.fsb.tcp_flags = flags; 19591 rack->r_ctl.fsb.left_to_send = orig_len - len; 19592 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 19593 /* Less than a full sized pace, lets not */ 19594 rack->r_fast_output = 0; 19595 return; 19596 } else { 19597 /* Round down to the nearest pace_max_seg */ 19598 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 19599 } 19600 if (hw_tls) 19601 rack->r_ctl.fsb.hw_tls = 1; 19602 else 19603 rack->r_ctl.fsb.hw_tls = 0; 19604 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19605 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19606 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19607 (tp->snd_max - tp->snd_una))); 19608 if (rack->r_ctl.fsb.left_to_send < segsiz) 19609 rack->r_fast_output = 0; 19610 else { 19611 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19612 rack->r_ctl.fsb.rfo_apply_push = 1; 19613 else 19614 rack->r_ctl.fsb.rfo_apply_push = 0; 19615 } 19616 } 19617 19618 static uint32_t 19619 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 19620 { 19621 uint64_t min_time; 19622 uint32_t maxlen; 19623 19624 min_time = (uint64_t)get_hpts_min_sleep_time(); 19625 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 19626 maxlen = roundup(maxlen, segsiz); 19627 return (maxlen); 19628 } 19629 19630 static struct rack_sendmap * 19631 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 19632 { 19633 struct rack_sendmap *rsm = NULL; 19634 int thresh; 19635 19636 restart: 19637 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 19638 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 19639 /* Nothing, strange turn off validity */ 19640 rack->r_collapse_point_valid = 0; 19641 return (NULL); 19642 } 19643 /* Can we send it yet? */ 19644 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 19645 /* 19646 * Receiver window has not grown enough for 19647 * the segment to be put on the wire. 19648 */ 19649 return (NULL); 19650 } 19651 if (rsm->r_flags & RACK_ACKED) { 19652 /* 19653 * It has been sacked, lets move to the 19654 * next one if possible. 19655 */ 19656 rack->r_ctl.last_collapse_point = rsm->r_end; 19657 /* Are we done? */ 19658 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 19659 rack->r_ctl.high_collapse_point)) { 19660 rack->r_collapse_point_valid = 0; 19661 return (NULL); 19662 } 19663 goto restart; 19664 } 19665 /* Now has it been long enough ? */ 19666 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 19667 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 19668 rack_log_collapse(rack, rsm->r_start, 19669 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19670 thresh, __LINE__, 6, rsm->r_flags, rsm); 19671 return (rsm); 19672 } 19673 /* Not enough time */ 19674 rack_log_collapse(rack, rsm->r_start, 19675 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19676 thresh, __LINE__, 7, rsm->r_flags, rsm); 19677 return (NULL); 19678 } 19679 19680 static inline void 19681 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 19682 { 19683 if ((rack->full_size_rxt == 0) && 19684 (rack->shape_rxt_to_pacing_min == 0) && 19685 (*len >= segsiz)) { 19686 *len = segsiz; 19687 } else if (rack->shape_rxt_to_pacing_min && 19688 rack->gp_ready) { 19689 /* We use pacing min as shaping len req */ 19690 uint32_t maxlen; 19691 19692 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 19693 if (*len > maxlen) 19694 *len = maxlen; 19695 } else { 19696 /* 19697 * The else is full_size_rxt is on so send it all 19698 * note we do need to check this for exceeding 19699 * our max segment size due to the fact that 19700 * we do sometimes merge chunks together i.e. 19701 * we cannot just assume that we will never have 19702 * a chunk greater than pace_max_seg 19703 */ 19704 if (*len > pace_max_seg) 19705 *len = pace_max_seg; 19706 } 19707 } 19708 19709 static int 19710 rack_output(struct tcpcb *tp) 19711 { 19712 struct socket *so; 19713 uint32_t recwin; 19714 uint32_t sb_offset, s_moff = 0; 19715 int32_t len, error = 0; 19716 uint16_t flags; 19717 struct mbuf *m, *s_mb = NULL; 19718 struct mbuf *mb; 19719 uint32_t if_hw_tsomaxsegcount = 0; 19720 uint32_t if_hw_tsomaxsegsize; 19721 int32_t segsiz, minseg; 19722 long tot_len_this_send = 0; 19723 #ifdef INET 19724 struct ip *ip = NULL; 19725 #endif 19726 struct udphdr *udp = NULL; 19727 struct tcp_rack *rack; 19728 struct tcphdr *th; 19729 uint8_t pass = 0; 19730 uint8_t mark = 0; 19731 uint8_t check_done = 0; 19732 uint8_t wanted_cookie = 0; 19733 u_char opt[TCP_MAXOLEN]; 19734 unsigned ipoptlen, optlen, hdrlen, ulen=0; 19735 uint32_t rack_seq; 19736 19737 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 19738 unsigned ipsec_optlen = 0; 19739 19740 #endif 19741 int32_t idle, sendalot; 19742 uint32_t tot_idle; 19743 int32_t sub_from_prr = 0; 19744 volatile int32_t sack_rxmit; 19745 struct rack_sendmap *rsm = NULL; 19746 int32_t tso, mtu; 19747 struct tcpopt to; 19748 int32_t slot = 0; 19749 int32_t sup_rack = 0; 19750 uint32_t cts, ms_cts, delayed, early; 19751 uint32_t add_flag = RACK_SENT_SP; 19752 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 19753 uint8_t doing_tlp = 0; 19754 uint32_t cwnd_to_use, pace_max_seg; 19755 int32_t do_a_prefetch = 0; 19756 int32_t prefetch_rsm = 0; 19757 int32_t orig_len = 0; 19758 struct timeval tv; 19759 int32_t prefetch_so_done = 0; 19760 struct tcp_log_buffer *lgb; 19761 struct inpcb *inp = tptoinpcb(tp); 19762 struct sockbuf *sb; 19763 uint64_t ts_val = 0; 19764 #ifdef TCP_ACCOUNTING 19765 uint64_t crtsc; 19766 #endif 19767 #ifdef INET6 19768 struct ip6_hdr *ip6 = NULL; 19769 int32_t isipv6; 19770 #endif 19771 bool hpts_calling, hw_tls = false; 19772 19773 NET_EPOCH_ASSERT(); 19774 INP_WLOCK_ASSERT(inp); 19775 19776 /* setup and take the cache hits here */ 19777 rack = (struct tcp_rack *)tp->t_fb_ptr; 19778 #ifdef TCP_ACCOUNTING 19779 sched_pin(); 19780 ts_val = get_cyclecount(); 19781 #endif 19782 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 19783 tp->t_flags2 &= ~TF2_HPTS_CALLS; 19784 #ifdef TCP_OFFLOAD 19785 if (tp->t_flags & TF_TOE) { 19786 #ifdef TCP_ACCOUNTING 19787 sched_unpin(); 19788 #endif 19789 return (tcp_offload_output(tp)); 19790 } 19791 #endif 19792 if (rack->rack_deferred_inited == 0) { 19793 /* 19794 * If we are the connecting socket we will 19795 * hit rack_init() when no sequence numbers 19796 * are setup. This makes it so we must defer 19797 * some initialization. Call that now. 19798 */ 19799 rack_deferred_init(tp, rack); 19800 } 19801 /* 19802 * For TFO connections in SYN_RECEIVED, only allow the initial 19803 * SYN|ACK and those sent by the retransmit timer. 19804 */ 19805 if ((tp->t_flags & TF_FASTOPEN) && 19806 (tp->t_state == TCPS_SYN_RECEIVED) && 19807 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 19808 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 19809 #ifdef TCP_ACCOUNTING 19810 sched_unpin(); 19811 #endif 19812 return (0); 19813 } 19814 #ifdef INET6 19815 if (rack->r_state) { 19816 /* Use the cache line loaded if possible */ 19817 isipv6 = rack->r_is_v6; 19818 } else { 19819 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 19820 } 19821 #endif 19822 early = 0; 19823 cts = tcp_get_usecs(&tv); 19824 ms_cts = tcp_tv_to_mssectick(&tv); 19825 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 19826 tcp_in_hpts(rack->rc_tp)) { 19827 /* 19828 * We are on the hpts for some timer but not hptsi output. 19829 * Remove from the hpts unconditionally. 19830 */ 19831 rack_timer_cancel(tp, rack, cts, __LINE__); 19832 } 19833 /* Are we pacing and late? */ 19834 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19835 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 19836 /* We are delayed */ 19837 delayed = cts - rack->r_ctl.rc_last_output_to; 19838 } else { 19839 delayed = 0; 19840 } 19841 /* Do the timers, which may override the pacer */ 19842 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 19843 int retval; 19844 19845 retval = rack_process_timers(tp, rack, cts, hpts_calling, 19846 &doing_tlp); 19847 if (retval != 0) { 19848 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 19849 #ifdef TCP_ACCOUNTING 19850 sched_unpin(); 19851 #endif 19852 /* 19853 * If timers want tcp_drop(), then pass error out, 19854 * otherwise suppress it. 19855 */ 19856 return (retval < 0 ? retval : 0); 19857 } 19858 } 19859 if (rack->rc_in_persist) { 19860 if (tcp_in_hpts(rack->rc_tp) == 0) { 19861 /* Timer is not running */ 19862 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19863 } 19864 #ifdef TCP_ACCOUNTING 19865 sched_unpin(); 19866 #endif 19867 return (0); 19868 } 19869 if ((rack->rc_ack_required == 1) && 19870 (rack->r_timer_override == 0)){ 19871 /* A timeout occurred and no ack has arrived */ 19872 if (tcp_in_hpts(rack->rc_tp) == 0) { 19873 /* Timer is not running */ 19874 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19875 } 19876 #ifdef TCP_ACCOUNTING 19877 sched_unpin(); 19878 #endif 19879 return (0); 19880 } 19881 if ((rack->r_timer_override) || 19882 (rack->rc_ack_can_sendout_data) || 19883 (delayed) || 19884 (tp->t_state < TCPS_ESTABLISHED)) { 19885 rack->rc_ack_can_sendout_data = 0; 19886 if (tcp_in_hpts(rack->rc_tp)) 19887 tcp_hpts_remove(rack->rc_tp); 19888 } else if (tcp_in_hpts(rack->rc_tp)) { 19889 /* 19890 * On the hpts you can't pass even if ACKNOW is on, we will 19891 * when the hpts fires. 19892 */ 19893 #ifdef TCP_ACCOUNTING 19894 crtsc = get_cyclecount(); 19895 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19896 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 19897 } 19898 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19899 tp->tcp_cnt_counters[SND_BLOCKED]++; 19900 } 19901 sched_unpin(); 19902 #endif 19903 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 19904 return (0); 19905 } 19906 /* Finish out both pacing early and late accounting */ 19907 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19908 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 19909 early = rack->r_ctl.rc_last_output_to - cts; 19910 } else 19911 early = 0; 19912 if (delayed && (rack->rc_always_pace == 1)) { 19913 rack->r_ctl.rc_agg_delayed += delayed; 19914 rack->r_late = 1; 19915 } else if (early && (rack->rc_always_pace == 1)) { 19916 rack->r_ctl.rc_agg_early += early; 19917 rack->r_early = 1; 19918 } else if (rack->rc_always_pace == 0) { 19919 /* Non-paced we are not late */ 19920 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 19921 rack->r_early = rack->r_late = 0; 19922 } 19923 /* Now that early/late accounting is done turn off the flag */ 19924 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 19925 rack->r_wanted_output = 0; 19926 rack->r_timer_override = 0; 19927 if ((tp->t_state != rack->r_state) && 19928 TCPS_HAVEESTABLISHED(tp->t_state)) { 19929 rack_set_state(tp, rack); 19930 } 19931 if ((rack->r_fast_output) && 19932 (doing_tlp == 0) && 19933 (tp->rcv_numsacks == 0)) { 19934 int ret; 19935 19936 error = 0; 19937 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19938 if (ret >= 0) 19939 return(ret); 19940 else if (error) { 19941 inp = rack->rc_inp; 19942 so = inp->inp_socket; 19943 sb = &so->so_snd; 19944 goto nomore; 19945 } 19946 } 19947 inp = rack->rc_inp; 19948 /* 19949 * For TFO connections in SYN_SENT or SYN_RECEIVED, 19950 * only allow the initial SYN or SYN|ACK and those sent 19951 * by the retransmit timer. 19952 */ 19953 if ((tp->t_flags & TF_FASTOPEN) && 19954 ((tp->t_state == TCPS_SYN_RECEIVED) || 19955 (tp->t_state == TCPS_SYN_SENT)) && 19956 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 19957 (tp->t_rxtshift == 0)) { /* not a retransmit */ 19958 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19959 so = inp->inp_socket; 19960 sb = &so->so_snd; 19961 goto just_return_nolock; 19962 } 19963 /* 19964 * Determine length of data that should be transmitted, and flags 19965 * that will be used. If there is some data or critical controls 19966 * (SYN, RST) to send, then transmit; otherwise, investigate 19967 * further. 19968 */ 19969 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 19970 if (tp->t_idle_reduce) { 19971 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 19972 rack_cc_after_idle(rack, tp); 19973 } 19974 tp->t_flags &= ~TF_LASTIDLE; 19975 if (idle) { 19976 if (tp->t_flags & TF_MORETOCOME) { 19977 tp->t_flags |= TF_LASTIDLE; 19978 idle = 0; 19979 } 19980 } 19981 if ((tp->snd_una == tp->snd_max) && 19982 rack->r_ctl.rc_went_idle_time && 19983 (cts > rack->r_ctl.rc_went_idle_time)) { 19984 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 19985 if (tot_idle > rack_min_probertt_hold) { 19986 /* Count as a probe rtt */ 19987 if (rack->in_probe_rtt == 0) { 19988 rack->r_ctl.rc_lower_rtt_us_cts = cts; 19989 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 19990 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 19991 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 19992 } else { 19993 rack_exit_probertt(rack, cts); 19994 } 19995 } 19996 } 19997 if (rack_use_fsb && 19998 (rack->r_ctl.fsb.tcp_ip_hdr) && 19999 (rack->r_fsb_inited == 0) && 20000 (rack->r_state != TCPS_CLOSED)) 20001 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 20002 if (rack->rc_sendvars_notset == 1) { 20003 rack->rc_sendvars_notset = 0; 20004 /* 20005 * Make sure any TCP timers (keep-alive) is not running. 20006 */ 20007 tcp_timer_stop(tp); 20008 } 20009 if ((rack->rack_no_prr == 1) && 20010 (rack->rc_always_pace == 0)) { 20011 /* 20012 * Sanity check before sending, if we have 20013 * no-pacing enabled and prr is turned off that 20014 * is a logistics error. Correct this by turnning 20015 * prr back on. A user *must* set some form of 20016 * pacing in order to turn PRR off. We do this 20017 * in the output path so that we can avoid socket 20018 * option ordering issues that would occur if we 20019 * tried to do it while setting rack_no_prr on. 20020 */ 20021 rack->rack_no_prr = 0; 20022 } 20023 if ((rack->pcm_enabled == 1) && 20024 (rack->pcm_needed == 0) && 20025 (tot_idle > 0)) { 20026 /* 20027 * We have been idle some micro seconds. We need 20028 * to factor this in to see if a PCM is needed. 20029 */ 20030 uint32_t rtts_idle, rnds; 20031 20032 if (tp->t_srtt) 20033 rtts_idle = tot_idle / tp->t_srtt; 20034 else 20035 rtts_idle = 0; 20036 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 20037 rack->r_ctl.pcm_idle_rounds += rtts_idle; 20038 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 20039 rack->pcm_needed = 1; 20040 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 20041 } 20042 } 20043 again: 20044 sendalot = 0; 20045 cts = tcp_get_usecs(&tv); 20046 ms_cts = tcp_tv_to_mssectick(&tv); 20047 tso = 0; 20048 mtu = 0; 20049 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20050 minseg = segsiz; 20051 if (rack->r_ctl.rc_pace_max_segs == 0) 20052 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 20053 else 20054 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 20055 if (TCPS_HAVEESTABLISHED(tp->t_state) && 20056 (rack->r_ctl.pcm_max_seg == 0)) { 20057 /* 20058 * We set in our first send so we know that the ctf_fixed_maxseg 20059 * has been fully set. If we do it in rack_init() we most likely 20060 * see 512 bytes so we end up at 5120, not desirable. 20061 */ 20062 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 20063 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 20064 /* 20065 * Assure our initial PCM probe is at least 10 MSS. 20066 */ 20067 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 20068 } 20069 } 20070 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 20071 uint32_t rw_avail, cwa; 20072 20073 if (tp->snd_wnd > ctf_outstanding(tp)) 20074 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 20075 else 20076 rw_avail = 0; 20077 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 20078 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20079 else 20080 cwa = 0; 20081 if ((cwa >= rack->r_ctl.pcm_max_seg) && 20082 (rw_avail > rack->r_ctl.pcm_max_seg)) { 20083 /* Raise up the max seg for this trip through */ 20084 pace_max_seg = rack->r_ctl.pcm_max_seg; 20085 /* Disable any fast output */ 20086 rack->r_fast_output = 0; 20087 } 20088 if (rack_verbose_logging) { 20089 rack_log_pcm(rack, 4, 20090 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 20091 } 20092 } 20093 sb_offset = tp->snd_max - tp->snd_una; 20094 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20095 flags = tcp_outflags[tp->t_state]; 20096 while (rack->rc_free_cnt < rack_free_cache) { 20097 rsm = rack_alloc(rack); 20098 if (rsm == NULL) { 20099 if (hpts_calling) 20100 /* Retry in a ms */ 20101 slot = (1 * HPTS_USEC_IN_MSEC); 20102 so = inp->inp_socket; 20103 sb = &so->so_snd; 20104 goto just_return_nolock; 20105 } 20106 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 20107 rack->rc_free_cnt++; 20108 rsm = NULL; 20109 } 20110 sack_rxmit = 0; 20111 len = 0; 20112 rsm = NULL; 20113 if (flags & TH_RST) { 20114 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 20115 so = inp->inp_socket; 20116 sb = &so->so_snd; 20117 goto send; 20118 } 20119 if (rack->r_ctl.rc_resend) { 20120 /* Retransmit timer */ 20121 rsm = rack->r_ctl.rc_resend; 20122 rack->r_ctl.rc_resend = NULL; 20123 len = rsm->r_end - rsm->r_start; 20124 sack_rxmit = 1; 20125 sendalot = 0; 20126 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20127 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20128 __func__, __LINE__, 20129 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20130 sb_offset = rsm->r_start - tp->snd_una; 20131 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20132 } else if (rack->r_collapse_point_valid && 20133 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 20134 /* 20135 * If an RSM is returned then enough time has passed 20136 * for us to retransmit it. Move up the collapse point, 20137 * since this rsm has its chance to retransmit now. 20138 */ 20139 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 20140 rack->r_ctl.last_collapse_point = rsm->r_end; 20141 /* Are we done? */ 20142 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20143 rack->r_ctl.high_collapse_point)) 20144 rack->r_collapse_point_valid = 0; 20145 sack_rxmit = 1; 20146 /* We are not doing a TLP */ 20147 doing_tlp = 0; 20148 len = rsm->r_end - rsm->r_start; 20149 sb_offset = rsm->r_start - tp->snd_una; 20150 sendalot = 0; 20151 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20152 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 20153 /* We have a retransmit that takes precedence */ 20154 if ((!IN_FASTRECOVERY(tp->t_flags)) && 20155 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 20156 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 20157 /* Enter recovery if not induced by a time-out */ 20158 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 20159 } 20160 #ifdef INVARIANTS 20161 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 20162 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 20163 tp, rack, rsm, rsm->r_start, tp->snd_una); 20164 } 20165 #endif 20166 len = rsm->r_end - rsm->r_start; 20167 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20168 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20169 __func__, __LINE__, 20170 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20171 sb_offset = rsm->r_start - tp->snd_una; 20172 sendalot = 0; 20173 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20174 if (len > 0) { 20175 sack_rxmit = 1; 20176 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 20177 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 20178 min(len, segsiz)); 20179 } 20180 } else if (rack->r_ctl.rc_tlpsend) { 20181 /* Tail loss probe */ 20182 long cwin; 20183 long tlen; 20184 20185 /* 20186 * Check if we can do a TLP with a RACK'd packet 20187 * this can happen if we are not doing the rack 20188 * cheat and we skipped to a TLP and it 20189 * went off. 20190 */ 20191 rsm = rack->r_ctl.rc_tlpsend; 20192 /* We are doing a TLP make sure the flag is preent */ 20193 rsm->r_flags |= RACK_TLP; 20194 rack->r_ctl.rc_tlpsend = NULL; 20195 sack_rxmit = 1; 20196 tlen = rsm->r_end - rsm->r_start; 20197 if (tlen > segsiz) 20198 tlen = segsiz; 20199 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20200 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20201 __func__, __LINE__, 20202 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20203 sb_offset = rsm->r_start - tp->snd_una; 20204 cwin = min(tp->snd_wnd, tlen); 20205 len = cwin; 20206 } 20207 if (rack->r_must_retran && 20208 (doing_tlp == 0) && 20209 (SEQ_GT(tp->snd_max, tp->snd_una)) && 20210 (rsm == NULL)) { 20211 /* 20212 * There are two different ways that we 20213 * can get into this block: 20214 * a) This is a non-sack connection, we had a time-out 20215 * and thus r_must_retran was set and everything 20216 * left outstanding as been marked for retransmit. 20217 * b) The MTU of the path shrank, so that everything 20218 * was marked to be retransmitted with the smaller 20219 * mtu and r_must_retran was set. 20220 * 20221 * This means that we expect the sendmap (outstanding) 20222 * to all be marked must. We can use the tmap to 20223 * look at them. 20224 * 20225 */ 20226 int sendwin, flight; 20227 20228 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 20229 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 20230 if (flight >= sendwin) { 20231 /* 20232 * We can't send yet. 20233 */ 20234 so = inp->inp_socket; 20235 sb = &so->so_snd; 20236 goto just_return_nolock; 20237 } 20238 /* 20239 * This is the case a/b mentioned above. All 20240 * outstanding/not-acked should be marked. 20241 * We can use the tmap to find them. 20242 */ 20243 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 20244 if (rsm == NULL) { 20245 /* TSNH */ 20246 rack->r_must_retran = 0; 20247 rack->r_ctl.rc_out_at_rto = 0; 20248 so = inp->inp_socket; 20249 sb = &so->so_snd; 20250 goto just_return_nolock; 20251 } 20252 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 20253 /* 20254 * The first one does not have the flag, did we collapse 20255 * further up in our list? 20256 */ 20257 rack->r_must_retran = 0; 20258 rack->r_ctl.rc_out_at_rto = 0; 20259 rsm = NULL; 20260 sack_rxmit = 0; 20261 } else { 20262 sack_rxmit = 1; 20263 len = rsm->r_end - rsm->r_start; 20264 sb_offset = rsm->r_start - tp->snd_una; 20265 sendalot = 0; 20266 if ((rack->full_size_rxt == 0) && 20267 (rack->shape_rxt_to_pacing_min == 0) && 20268 (len >= segsiz)) 20269 len = segsiz; 20270 else if (rack->shape_rxt_to_pacing_min && 20271 rack->gp_ready) { 20272 /* We use pacing min as shaping len req */ 20273 uint32_t maxlen; 20274 20275 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20276 if (len > maxlen) 20277 len = maxlen; 20278 } 20279 /* 20280 * Delay removing the flag RACK_MUST_RXT so 20281 * that the fastpath for retransmit will 20282 * work with this rsm. 20283 */ 20284 } 20285 } 20286 /* 20287 * Enforce a connection sendmap count limit if set 20288 * as long as we are not retransmiting. 20289 */ 20290 if ((rsm == NULL) && 20291 (V_tcp_map_entries_limit > 0) && 20292 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 20293 counter_u64_add(rack_to_alloc_limited, 1); 20294 if (!rack->alloc_limit_reported) { 20295 rack->alloc_limit_reported = 1; 20296 counter_u64_add(rack_alloc_limited_conns, 1); 20297 } 20298 so = inp->inp_socket; 20299 sb = &so->so_snd; 20300 goto just_return_nolock; 20301 } 20302 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 20303 /* we are retransmitting the fin */ 20304 len--; 20305 if (len) { 20306 /* 20307 * When retransmitting data do *not* include the 20308 * FIN. This could happen from a TLP probe. 20309 */ 20310 flags &= ~TH_FIN; 20311 } 20312 } 20313 if (rsm && rack->r_fsb_inited && 20314 rack_use_rsm_rfo && 20315 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 20316 int ret; 20317 20318 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 20319 if (ret == 0) 20320 return (0); 20321 } 20322 so = inp->inp_socket; 20323 sb = &so->so_snd; 20324 if (do_a_prefetch == 0) { 20325 kern_prefetch(sb, &do_a_prefetch); 20326 do_a_prefetch = 1; 20327 } 20328 #ifdef NETFLIX_SHARED_CWND 20329 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 20330 rack->rack_enable_scwnd) { 20331 /* We are doing cwnd sharing */ 20332 if (rack->gp_ready && 20333 (rack->rack_attempted_scwnd == 0) && 20334 (rack->r_ctl.rc_scw == NULL) && 20335 tp->t_lib) { 20336 /* The pcbid is in, lets make an attempt */ 20337 counter_u64_add(rack_try_scwnd, 1); 20338 rack->rack_attempted_scwnd = 1; 20339 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 20340 &rack->r_ctl.rc_scw_index, 20341 segsiz); 20342 } 20343 if (rack->r_ctl.rc_scw && 20344 (rack->rack_scwnd_is_idle == 1) && 20345 sbavail(&so->so_snd)) { 20346 /* we are no longer out of data */ 20347 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 20348 rack->rack_scwnd_is_idle = 0; 20349 } 20350 if (rack->r_ctl.rc_scw) { 20351 /* First lets update and get the cwnd */ 20352 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 20353 rack->r_ctl.rc_scw_index, 20354 tp->snd_cwnd, tp->snd_wnd, segsiz); 20355 } 20356 } 20357 #endif 20358 /* 20359 * Get standard flags, and add SYN or FIN if requested by 'hidden' 20360 * state flags. 20361 */ 20362 if (tp->t_flags & TF_NEEDFIN) 20363 flags |= TH_FIN; 20364 if (tp->t_flags & TF_NEEDSYN) 20365 flags |= TH_SYN; 20366 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 20367 void *end_rsm; 20368 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 20369 if (end_rsm) 20370 kern_prefetch(end_rsm, &prefetch_rsm); 20371 prefetch_rsm = 1; 20372 } 20373 SOCKBUF_LOCK(sb); 20374 if ((sack_rxmit == 0) && 20375 (TCPS_HAVEESTABLISHED(tp->t_state) || 20376 (tp->t_flags & TF_FASTOPEN))) { 20377 /* 20378 * We are not retransmitting (sack_rxmit is 0) so we 20379 * are sending new data. This is always based on snd_max. 20380 * Now in theory snd_max may be equal to snd_una, if so 20381 * then nothing is outstanding and the offset would be 0. 20382 */ 20383 uint32_t avail; 20384 20385 avail = sbavail(sb); 20386 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 20387 sb_offset = tp->snd_max - tp->snd_una; 20388 else 20389 sb_offset = 0; 20390 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 20391 if (rack->r_ctl.rc_tlp_new_data) { 20392 /* TLP is forcing out new data */ 20393 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 20394 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 20395 } 20396 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 20397 if (tp->snd_wnd > sb_offset) 20398 len = tp->snd_wnd - sb_offset; 20399 else 20400 len = 0; 20401 } else { 20402 len = rack->r_ctl.rc_tlp_new_data; 20403 } 20404 rack->r_ctl.rc_tlp_new_data = 0; 20405 } else { 20406 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 20407 } 20408 if ((rack->r_ctl.crte == NULL) && 20409 IN_FASTRECOVERY(tp->t_flags) && 20410 (rack->full_size_rxt == 0) && 20411 (rack->shape_rxt_to_pacing_min == 0) && 20412 (len > segsiz)) { 20413 /* 20414 * For prr=off, we need to send only 1 MSS 20415 * at a time. We do this because another sack could 20416 * be arriving that causes us to send retransmits and 20417 * we don't want to be on a long pace due to a larger send 20418 * that keeps us from sending out the retransmit. 20419 */ 20420 len = segsiz; 20421 } else if (rack->shape_rxt_to_pacing_min && 20422 rack->gp_ready) { 20423 /* We use pacing min as shaping len req */ 20424 uint32_t maxlen; 20425 20426 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20427 if (len > maxlen) 20428 len = maxlen; 20429 }/* The else is full_size_rxt is on so send it all */ 20430 } else { 20431 uint32_t outstanding; 20432 /* 20433 * We are inside of a Fast recovery episode, this 20434 * is caused by a SACK or 3 dup acks. At this point 20435 * we have sent all the retransmissions and we rely 20436 * on PRR to dictate what we will send in the form of 20437 * new data. 20438 */ 20439 20440 outstanding = tp->snd_max - tp->snd_una; 20441 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 20442 if (tp->snd_wnd > outstanding) { 20443 len = tp->snd_wnd - outstanding; 20444 /* Check to see if we have the data */ 20445 if ((sb_offset + len) > avail) { 20446 /* It does not all fit */ 20447 if (avail > sb_offset) 20448 len = avail - sb_offset; 20449 else 20450 len = 0; 20451 } 20452 } else { 20453 len = 0; 20454 } 20455 } else if (avail > sb_offset) { 20456 len = avail - sb_offset; 20457 } else { 20458 len = 0; 20459 } 20460 if (len > 0) { 20461 if (len > rack->r_ctl.rc_prr_sndcnt) { 20462 len = rack->r_ctl.rc_prr_sndcnt; 20463 } 20464 if (len > 0) { 20465 sub_from_prr = 1; 20466 } 20467 } 20468 if (len > segsiz) { 20469 /* 20470 * We should never send more than a MSS when 20471 * retransmitting or sending new data in prr 20472 * mode unless the override flag is on. Most 20473 * likely the PRR algorithm is not going to 20474 * let us send a lot as well :-) 20475 */ 20476 if (rack->r_ctl.rc_prr_sendalot == 0) { 20477 len = segsiz; 20478 } 20479 } else if (len < segsiz) { 20480 /* 20481 * Do we send any? The idea here is if the 20482 * send empty's the socket buffer we want to 20483 * do it. However if not then lets just wait 20484 * for our prr_sndcnt to get bigger. 20485 */ 20486 long leftinsb; 20487 20488 leftinsb = sbavail(sb) - sb_offset; 20489 if (leftinsb > len) { 20490 /* This send does not empty the sb */ 20491 len = 0; 20492 } 20493 } 20494 } 20495 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 20496 /* 20497 * If you have not established 20498 * and are not doing FAST OPEN 20499 * no data please. 20500 */ 20501 if ((sack_rxmit == 0) && 20502 !(tp->t_flags & TF_FASTOPEN)) { 20503 len = 0; 20504 sb_offset = 0; 20505 } 20506 } 20507 if (prefetch_so_done == 0) { 20508 kern_prefetch(so, &prefetch_so_done); 20509 prefetch_so_done = 1; 20510 } 20511 orig_len = len; 20512 /* 20513 * Lop off SYN bit if it has already been sent. However, if this is 20514 * SYN-SENT state and if segment contains data and if we don't know 20515 * that foreign host supports TAO, suppress sending segment. 20516 */ 20517 if ((flags & TH_SYN) && 20518 SEQ_GT(tp->snd_max, tp->snd_una) && 20519 ((sack_rxmit == 0) && 20520 (tp->t_rxtshift == 0))) { 20521 /* 20522 * When sending additional segments following a TFO SYN|ACK, 20523 * do not include the SYN bit. 20524 */ 20525 if ((tp->t_flags & TF_FASTOPEN) && 20526 (tp->t_state == TCPS_SYN_RECEIVED)) 20527 flags &= ~TH_SYN; 20528 } 20529 /* 20530 * Be careful not to send data and/or FIN on SYN segments. This 20531 * measure is needed to prevent interoperability problems with not 20532 * fully conformant TCP implementations. 20533 */ 20534 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 20535 len = 0; 20536 flags &= ~TH_FIN; 20537 } 20538 /* 20539 * On TFO sockets, ensure no data is sent in the following cases: 20540 * 20541 * - When retransmitting SYN|ACK on a passively-created socket 20542 * 20543 * - When retransmitting SYN on an actively created socket 20544 * 20545 * - When sending a zero-length cookie (cookie request) on an 20546 * actively created socket 20547 * 20548 * - When the socket is in the CLOSED state (RST is being sent) 20549 */ 20550 if ((tp->t_flags & TF_FASTOPEN) && 20551 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 20552 ((tp->t_state == TCPS_SYN_SENT) && 20553 (tp->t_tfo_client_cookie_len == 0)) || 20554 (flags & TH_RST))) { 20555 sack_rxmit = 0; 20556 len = 0; 20557 } 20558 /* Without fast-open there should never be data sent on a SYN */ 20559 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 20560 len = 0; 20561 } 20562 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 20563 /* We only send 1 MSS if we have a DSACK block */ 20564 add_flag |= RACK_SENT_W_DSACK; 20565 len = segsiz; 20566 } 20567 if (len <= 0) { 20568 /* 20569 * We have nothing to send, or the window shrank, or 20570 * is closed, do we need to go into persists? 20571 */ 20572 len = 0; 20573 if ((tp->snd_wnd == 0) && 20574 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20575 (tp->snd_una == tp->snd_max) && 20576 (sb_offset < (int)sbavail(sb))) { 20577 rack_enter_persist(tp, rack, cts, tp->snd_una); 20578 } 20579 } else if ((rsm == NULL) && 20580 (doing_tlp == 0) && 20581 (len < pace_max_seg)) { 20582 /* 20583 * We are not sending a maximum sized segment for 20584 * some reason. Should we not send anything (think 20585 * sws or persists)? 20586 */ 20587 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20588 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20589 (len < minseg) && 20590 (len < (int)(sbavail(sb) - sb_offset))) { 20591 /* 20592 * Here the rwnd is less than 20593 * the minimum pacing size, this is not a retransmit, 20594 * we are established and 20595 * the send is not the last in the socket buffer 20596 * we send nothing, and we may enter persists 20597 * if nothing is outstanding. 20598 */ 20599 len = 0; 20600 if (tp->snd_max == tp->snd_una) { 20601 /* 20602 * Nothing out we can 20603 * go into persists. 20604 */ 20605 rack_enter_persist(tp, rack, cts, tp->snd_una); 20606 } 20607 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 20608 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20609 (len < (int)(sbavail(sb) - sb_offset)) && 20610 (len < minseg)) { 20611 /* 20612 * Here we are not retransmitting, and 20613 * the cwnd is not so small that we could 20614 * not send at least a min size (rxt timer 20615 * not having gone off), We have 2 segments or 20616 * more already in flight, its not the tail end 20617 * of the socket buffer and the cwnd is blocking 20618 * us from sending out a minimum pacing segment size. 20619 * Lets not send anything. 20620 */ 20621 len = 0; 20622 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 20623 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20624 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20625 (len < (int)(sbavail(sb) - sb_offset)) && 20626 (TCPS_HAVEESTABLISHED(tp->t_state))) { 20627 /* 20628 * Here we have a send window but we have 20629 * filled it up and we can't send another pacing segment. 20630 * We also have in flight more than 2 segments 20631 * and we are not completing the sb i.e. we allow 20632 * the last bytes of the sb to go out even if 20633 * its not a full pacing segment. 20634 */ 20635 len = 0; 20636 } else if ((rack->r_ctl.crte != NULL) && 20637 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 20638 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 20639 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 20640 (len < (int)(sbavail(sb) - sb_offset))) { 20641 /* 20642 * Here we are doing hardware pacing, this is not a TLP, 20643 * we are not sending a pace max segment size, there is rwnd 20644 * room to send at least N pace_max_seg, the cwnd is greater 20645 * than or equal to a full pacing segments plus 4 mss and we have 2 or 20646 * more segments in flight and its not the tail of the socket buffer. 20647 * 20648 * We don't want to send instead we need to get more ack's in to 20649 * allow us to send a full pacing segment. Normally, if we are pacing 20650 * about the right speed, we should have finished our pacing 20651 * send as most of the acks have come back if we are at the 20652 * right rate. This is a bit fuzzy since return path delay 20653 * can delay the acks, which is why we want to make sure we 20654 * have cwnd space to have a bit more than a max pace segments in flight. 20655 * 20656 * If we have not gotten our acks back we are pacing at too high a 20657 * rate delaying will not hurt and will bring our GP estimate down by 20658 * injecting the delay. If we don't do this we will send 20659 * 2 MSS out in response to the acks being clocked in which 20660 * defeats the point of hw-pacing (i.e. to help us get 20661 * larger TSO's out). 20662 */ 20663 len = 0; 20664 } 20665 20666 } 20667 /* len will be >= 0 after this point. */ 20668 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 20669 rack_sndbuf_autoscale(rack); 20670 /* 20671 * Decide if we can use TCP Segmentation Offloading (if supported by 20672 * hardware). 20673 * 20674 * TSO may only be used if we are in a pure bulk sending state. The 20675 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 20676 * options prevent using TSO. With TSO the TCP header is the same 20677 * (except for the sequence number) for all generated packets. This 20678 * makes it impossible to transmit any options which vary per 20679 * generated segment or packet. 20680 * 20681 * IPv4 handling has a clear separation of ip options and ip header 20682 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 20683 * the right thing below to provide length of just ip options and thus 20684 * checking for ipoptlen is enough to decide if ip options are present. 20685 */ 20686 ipoptlen = 0; 20687 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20688 /* 20689 * Pre-calculate here as we save another lookup into the darknesses 20690 * of IPsec that way and can actually decide if TSO is ok. 20691 */ 20692 #ifdef INET6 20693 if (isipv6 && IPSEC_ENABLED(ipv6)) 20694 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 20695 #ifdef INET 20696 else 20697 #endif 20698 #endif /* INET6 */ 20699 #ifdef INET 20700 if (IPSEC_ENABLED(ipv4)) 20701 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 20702 #endif /* INET */ 20703 #endif 20704 20705 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20706 ipoptlen += ipsec_optlen; 20707 #endif 20708 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 20709 (tp->t_port == 0) && 20710 ((tp->t_flags & TF_SIGNATURE) == 0) && 20711 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 20712 ipoptlen == 0) 20713 tso = 1; 20714 { 20715 uint32_t outstanding __unused; 20716 20717 outstanding = tp->snd_max - tp->snd_una; 20718 if (tp->t_flags & TF_SENTFIN) { 20719 /* 20720 * If we sent a fin, snd_max is 1 higher than 20721 * snd_una 20722 */ 20723 outstanding--; 20724 } 20725 if (sack_rxmit) { 20726 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 20727 flags &= ~TH_FIN; 20728 } 20729 } 20730 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 20731 (long)TCP_MAXWIN << tp->rcv_scale); 20732 20733 /* 20734 * Sender silly window avoidance. We transmit under the following 20735 * conditions when len is non-zero: 20736 * 20737 * - We have a full segment (or more with TSO) - This is the last 20738 * buffer in a write()/send() and we are either idle or running 20739 * NODELAY - we've timed out (e.g. persist timer) - we have more 20740 * then 1/2 the maximum send window's worth of data (receiver may be 20741 * limited the window size) - we need to retransmit 20742 */ 20743 if (len) { 20744 if (len >= segsiz) { 20745 goto send; 20746 } 20747 /* 20748 * NOTE! on localhost connections an 'ack' from the remote 20749 * end may occur synchronously with the output and cause us 20750 * to flush a buffer queued with moretocome. XXX 20751 * 20752 */ 20753 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 20754 (idle || (tp->t_flags & TF_NODELAY)) && 20755 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20756 (tp->t_flags & TF_NOPUSH) == 0) { 20757 pass = 2; 20758 goto send; 20759 } 20760 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 20761 pass = 22; 20762 goto send; 20763 } 20764 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 20765 pass = 4; 20766 goto send; 20767 } 20768 if (sack_rxmit) { 20769 pass = 6; 20770 goto send; 20771 } 20772 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 20773 (ctf_outstanding(tp) < (segsiz * 2))) { 20774 /* 20775 * We have less than two MSS outstanding (delayed ack) 20776 * and our rwnd will not let us send a full sized 20777 * MSS. Lets go ahead and let this small segment 20778 * out because we want to try to have at least two 20779 * packets inflight to not be caught by delayed ack. 20780 */ 20781 pass = 12; 20782 goto send; 20783 } 20784 } 20785 /* 20786 * Sending of standalone window updates. 20787 * 20788 * Window updates are important when we close our window due to a 20789 * full socket buffer and are opening it again after the application 20790 * reads data from it. Once the window has opened again and the 20791 * remote end starts to send again the ACK clock takes over and 20792 * provides the most current window information. 20793 * 20794 * We must avoid the silly window syndrome whereas every read from 20795 * the receive buffer, no matter how small, causes a window update 20796 * to be sent. We also should avoid sending a flurry of window 20797 * updates when the socket buffer had queued a lot of data and the 20798 * application is doing small reads. 20799 * 20800 * Prevent a flurry of pointless window updates by only sending an 20801 * update when we can increase the advertized window by more than 20802 * 1/4th of the socket buffer capacity. When the buffer is getting 20803 * full or is very small be more aggressive and send an update 20804 * whenever we can increase by two mss sized segments. In all other 20805 * situations the ACK's to new incoming data will carry further 20806 * window increases. 20807 * 20808 * Don't send an independent window update if a delayed ACK is 20809 * pending (it will get piggy-backed on it) or the remote side 20810 * already has done a half-close and won't send more data. Skip 20811 * this if the connection is in T/TCP half-open state. 20812 */ 20813 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 20814 !(tp->t_flags & TF_DELACK) && 20815 !TCPS_HAVERCVDFIN(tp->t_state)) { 20816 /* 20817 * "adv" is the amount we could increase the window, taking 20818 * into account that we are limited by TCP_MAXWIN << 20819 * tp->rcv_scale. 20820 */ 20821 int32_t adv; 20822 int oldwin; 20823 20824 adv = recwin; 20825 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 20826 oldwin = (tp->rcv_adv - tp->rcv_nxt); 20827 if (adv > oldwin) 20828 adv -= oldwin; 20829 else { 20830 /* We can't increase the window */ 20831 adv = 0; 20832 } 20833 } else 20834 oldwin = 0; 20835 20836 /* 20837 * If the new window size ends up being the same as or less 20838 * than the old size when it is scaled, then don't force 20839 * a window update. 20840 */ 20841 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 20842 goto dontupdate; 20843 20844 if (adv >= (int32_t)(2 * segsiz) && 20845 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 20846 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 20847 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 20848 pass = 7; 20849 goto send; 20850 } 20851 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 20852 pass = 23; 20853 goto send; 20854 } 20855 } 20856 dontupdate: 20857 20858 /* 20859 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 20860 * is also a catch-all for the retransmit timer timeout case. 20861 */ 20862 if (tp->t_flags & TF_ACKNOW) { 20863 pass = 8; 20864 goto send; 20865 } 20866 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 20867 pass = 9; 20868 goto send; 20869 } 20870 /* 20871 * If our state indicates that FIN should be sent and we have not 20872 * yet done so, then we need to send. 20873 */ 20874 if ((flags & TH_FIN) && 20875 (tp->snd_max == tp->snd_una)) { 20876 pass = 11; 20877 goto send; 20878 } 20879 /* 20880 * No reason to send a segment, just return. 20881 */ 20882 just_return: 20883 SOCKBUF_UNLOCK(sb); 20884 just_return_nolock: 20885 { 20886 int app_limited = CTF_JR_SENT_DATA; 20887 20888 if ((tp->t_flags & TF_FASTOPEN) == 0 && 20889 (flags & TH_FIN) && 20890 (len == 0) && 20891 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 20892 ((tp->snd_max - tp->snd_una) <= segsiz)) { 20893 /* 20894 * Ok less than or right at a MSS is 20895 * outstanding. The original FreeBSD stack would 20896 * have sent a FIN, which can speed things up for 20897 * a transactional application doing a MSG_WAITALL. 20898 * To speed things up since we do *not* send a FIN 20899 * if data is outstanding, we send a "challenge ack". 20900 * The idea behind that is instead of having to have 20901 * the peer wait for the delayed-ack timer to run off 20902 * we send an ack that makes the peer send us an ack. 20903 */ 20904 rack_send_ack_challange(rack); 20905 } 20906 if (tot_len_this_send > 0) { 20907 rack->r_ctl.fsb.recwin = recwin; 20908 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 20909 if ((error == 0) && 20910 rack_use_rfo && 20911 ((flags & (TH_SYN|TH_FIN)) == 0) && 20912 (ipoptlen == 0) && 20913 (tp->rcv_numsacks == 0) && 20914 rack->r_fsb_inited && 20915 TCPS_HAVEESTABLISHED(tp->t_state) && 20916 ((IN_RECOVERY(tp->t_flags)) == 0) && 20917 (rack->r_must_retran == 0) && 20918 ((tp->t_flags & TF_NEEDFIN) == 0) && 20919 (len > 0) && (orig_len > 0) && 20920 (orig_len > len) && 20921 ((orig_len - len) >= segsiz) && 20922 ((optlen == 0) || 20923 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 20924 /* We can send at least one more MSS using our fsb */ 20925 rack_setup_fast_output(tp, rack, sb, len, orig_len, 20926 segsiz, pace_max_seg, hw_tls, flags); 20927 } else 20928 rack->r_fast_output = 0; 20929 rack_log_fsb(rack, tp, so, flags, 20930 ipoptlen, orig_len, len, 0, 20931 1, optlen, __LINE__, 1); 20932 /* Assure when we leave that snd_nxt will point to top */ 20933 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 20934 tp->snd_nxt = tp->snd_max; 20935 } else { 20936 int end_window = 0; 20937 uint32_t seq = tp->gput_ack; 20938 20939 rsm = tqhash_max(rack->r_ctl.tqh); 20940 if (rsm) { 20941 /* 20942 * Mark the last sent that we just-returned (hinting 20943 * that delayed ack may play a role in any rtt measurement). 20944 */ 20945 rsm->r_just_ret = 1; 20946 } 20947 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 20948 rack->r_ctl.rc_agg_delayed = 0; 20949 rack->r_early = 0; 20950 rack->r_late = 0; 20951 rack->r_ctl.rc_agg_early = 0; 20952 if ((ctf_outstanding(tp) + 20953 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 20954 minseg)) >= tp->snd_wnd) { 20955 /* We are limited by the rwnd */ 20956 app_limited = CTF_JR_RWND_LIMITED; 20957 if (IN_FASTRECOVERY(tp->t_flags)) 20958 rack->r_ctl.rc_prr_sndcnt = 0; 20959 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 20960 /* We are limited by whats available -- app limited */ 20961 app_limited = CTF_JR_APP_LIMITED; 20962 if (IN_FASTRECOVERY(tp->t_flags)) 20963 rack->r_ctl.rc_prr_sndcnt = 0; 20964 } else if ((idle == 0) && 20965 ((tp->t_flags & TF_NODELAY) == 0) && 20966 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20967 (len < segsiz)) { 20968 /* 20969 * No delay is not on and the 20970 * user is sending less than 1MSS. This 20971 * brings out SWS avoidance so we 20972 * don't send. Another app-limited case. 20973 */ 20974 app_limited = CTF_JR_APP_LIMITED; 20975 } else if (tp->t_flags & TF_NOPUSH) { 20976 /* 20977 * The user has requested no push of 20978 * the last segment and we are 20979 * at the last segment. Another app 20980 * limited case. 20981 */ 20982 app_limited = CTF_JR_APP_LIMITED; 20983 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 20984 /* Its the cwnd */ 20985 app_limited = CTF_JR_CWND_LIMITED; 20986 } else if (IN_FASTRECOVERY(tp->t_flags) && 20987 (rack->rack_no_prr == 0) && 20988 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 20989 app_limited = CTF_JR_PRR; 20990 } else { 20991 /* Now why here are we not sending? */ 20992 #ifdef NOW 20993 #ifdef INVARIANTS 20994 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 20995 #endif 20996 #endif 20997 app_limited = CTF_JR_ASSESSING; 20998 } 20999 /* 21000 * App limited in some fashion, for our pacing GP 21001 * measurements we don't want any gap (even cwnd). 21002 * Close down the measurement window. 21003 */ 21004 if (rack_cwnd_block_ends_measure && 21005 ((app_limited == CTF_JR_CWND_LIMITED) || 21006 (app_limited == CTF_JR_PRR))) { 21007 /* 21008 * The reason we are not sending is 21009 * the cwnd (or prr). We have been configured 21010 * to end the measurement window in 21011 * this case. 21012 */ 21013 end_window = 1; 21014 } else if (rack_rwnd_block_ends_measure && 21015 (app_limited == CTF_JR_RWND_LIMITED)) { 21016 /* 21017 * We are rwnd limited and have been 21018 * configured to end the measurement 21019 * window in this case. 21020 */ 21021 end_window = 1; 21022 } else if (app_limited == CTF_JR_APP_LIMITED) { 21023 /* 21024 * A true application limited period, we have 21025 * ran out of data. 21026 */ 21027 end_window = 1; 21028 } else if (app_limited == CTF_JR_ASSESSING) { 21029 /* 21030 * In the assessing case we hit the end of 21031 * the if/else and had no known reason 21032 * This will panic us under invariants.. 21033 * 21034 * If we get this out in logs we need to 21035 * investagate which reason we missed. 21036 */ 21037 end_window = 1; 21038 } 21039 if (end_window) { 21040 uint8_t log = 0; 21041 21042 /* Adjust the Gput measurement */ 21043 if ((tp->t_flags & TF_GPUTINPROG) && 21044 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21045 tp->gput_ack = tp->snd_max; 21046 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21047 /* 21048 * There is not enough to measure. 21049 */ 21050 tp->t_flags &= ~TF_GPUTINPROG; 21051 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 21052 rack->r_ctl.rc_gp_srtt /*flex1*/, 21053 tp->gput_seq, 21054 0, 0, 18, __LINE__, NULL, 0); 21055 } else 21056 log = 1; 21057 } 21058 /* Mark the last packet has app limited */ 21059 rsm = tqhash_max(rack->r_ctl.tqh); 21060 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 21061 if (rack->r_ctl.rc_app_limited_cnt == 0) 21062 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 21063 else { 21064 /* 21065 * Go out to the end app limited and mark 21066 * this new one as next and move the end_appl up 21067 * to this guy. 21068 */ 21069 if (rack->r_ctl.rc_end_appl) 21070 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 21071 rack->r_ctl.rc_end_appl = rsm; 21072 } 21073 rsm->r_flags |= RACK_APP_LIMITED; 21074 rack->r_ctl.rc_app_limited_cnt++; 21075 } 21076 if (log) 21077 rack_log_pacing_delay_calc(rack, 21078 rack->r_ctl.rc_app_limited_cnt, seq, 21079 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 21080 } 21081 } 21082 /* Check if we need to go into persists or not */ 21083 if ((tp->snd_max == tp->snd_una) && 21084 TCPS_HAVEESTABLISHED(tp->t_state) && 21085 sbavail(sb) && 21086 (sbavail(sb) > tp->snd_wnd) && 21087 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 21088 /* Yes lets make sure to move to persist before timer-start */ 21089 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 21090 } 21091 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 21092 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 21093 } 21094 #ifdef NETFLIX_SHARED_CWND 21095 if ((sbavail(sb) == 0) && 21096 rack->r_ctl.rc_scw) { 21097 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21098 rack->rack_scwnd_is_idle = 1; 21099 } 21100 #endif 21101 #ifdef TCP_ACCOUNTING 21102 if (tot_len_this_send > 0) { 21103 crtsc = get_cyclecount(); 21104 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21105 tp->tcp_cnt_counters[SND_OUT_DATA]++; 21106 } 21107 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21108 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21109 } 21110 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21111 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 21112 } 21113 } else { 21114 crtsc = get_cyclecount(); 21115 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21116 tp->tcp_cnt_counters[SND_LIMITED]++; 21117 } 21118 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21119 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 21120 } 21121 } 21122 sched_unpin(); 21123 #endif 21124 return (0); 21125 21126 send: 21127 if ((rack->r_ctl.crte != NULL) && 21128 (rsm == NULL) && 21129 ((rack->rc_hw_nobuf == 1) || 21130 (rack_hw_check_queue && (check_done == 0)))) { 21131 /* 21132 * We only want to do this once with the hw_check_queue, 21133 * for the enobuf case we would only do it once if 21134 * we come around to again, the flag will be clear. 21135 */ 21136 check_done = 1; 21137 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 21138 if (slot) { 21139 rack->r_ctl.rc_agg_delayed = 0; 21140 rack->r_ctl.rc_agg_early = 0; 21141 rack->r_early = 0; 21142 rack->r_late = 0; 21143 SOCKBUF_UNLOCK(&so->so_snd); 21144 goto skip_all_send; 21145 } 21146 } 21147 if (rsm || sack_rxmit) 21148 counter_u64_add(rack_nfto_resend, 1); 21149 else 21150 counter_u64_add(rack_non_fto_send, 1); 21151 if ((flags & TH_FIN) && 21152 sbavail(sb)) { 21153 /* 21154 * We do not transmit a FIN 21155 * with data outstanding. We 21156 * need to make it so all data 21157 * is acked first. 21158 */ 21159 flags &= ~TH_FIN; 21160 if (TCPS_HAVEESTABLISHED(tp->t_state) && 21161 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 21162 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21163 /* 21164 * Ok less than or right at a MSS is 21165 * outstanding. The original FreeBSD stack would 21166 * have sent a FIN, which can speed things up for 21167 * a transactional application doing a MSG_WAITALL. 21168 * To speed things up since we do *not* send a FIN 21169 * if data is outstanding, we send a "challenge ack". 21170 * The idea behind that is instead of having to have 21171 * the peer wait for the delayed-ack timer to run off 21172 * we send an ack that makes the peer send us an ack. 21173 */ 21174 rack_send_ack_challange(rack); 21175 } 21176 } 21177 /* Enforce stack imposed max seg size if we have one */ 21178 if (pace_max_seg && 21179 (len > pace_max_seg)) { 21180 mark = 1; 21181 len = pace_max_seg; 21182 } 21183 if ((rsm == NULL) && 21184 (rack->pcm_in_progress == 0) && 21185 (rack->r_ctl.pcm_max_seg > 0) && 21186 (len >= rack->r_ctl.pcm_max_seg)) { 21187 /* It is large enough for a measurement */ 21188 add_flag |= RACK_IS_PCM; 21189 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 21190 } else if (rack_verbose_logging) { 21191 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 21192 } 21193 21194 SOCKBUF_LOCK_ASSERT(sb); 21195 if (len > 0) { 21196 if (len >= segsiz) 21197 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 21198 else 21199 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 21200 } 21201 /* 21202 * Before ESTABLISHED, force sending of initial options unless TCP 21203 * set not to do any options. NOTE: we assume that the IP/TCP header 21204 * plus TCP options always fit in a single mbuf, leaving room for a 21205 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 21206 * + optlen <= MCLBYTES 21207 */ 21208 optlen = 0; 21209 #ifdef INET6 21210 if (isipv6) 21211 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 21212 else 21213 #endif 21214 hdrlen = sizeof(struct tcpiphdr); 21215 21216 /* 21217 * Ok what seq are we sending from. If we have 21218 * no rsm to use, then we look at various bits, 21219 * if we are putting out a SYN it will be ISS. 21220 * If we are retransmitting a FIN it will 21221 * be snd_max-1 else its snd_max. 21222 */ 21223 if (rsm == NULL) { 21224 if (flags & TH_SYN) 21225 rack_seq = tp->iss; 21226 else if ((flags & TH_FIN) && 21227 (tp->t_flags & TF_SENTFIN)) 21228 rack_seq = tp->snd_max - 1; 21229 else 21230 rack_seq = tp->snd_max; 21231 } else { 21232 rack_seq = rsm->r_start; 21233 } 21234 /* 21235 * Compute options for segment. We only have to care about SYN and 21236 * established connection segments. Options for SYN-ACK segments 21237 * are handled in TCP syncache. 21238 */ 21239 to.to_flags = 0; 21240 if ((tp->t_flags & TF_NOOPT) == 0) { 21241 /* Maximum segment size. */ 21242 if (flags & TH_SYN) { 21243 to.to_mss = tcp_mssopt(&inp->inp_inc); 21244 if (tp->t_port) 21245 to.to_mss -= V_tcp_udp_tunneling_overhead; 21246 to.to_flags |= TOF_MSS; 21247 21248 /* 21249 * On SYN or SYN|ACK transmits on TFO connections, 21250 * only include the TFO option if it is not a 21251 * retransmit, as the presence of the TFO option may 21252 * have caused the original SYN or SYN|ACK to have 21253 * been dropped by a middlebox. 21254 */ 21255 if ((tp->t_flags & TF_FASTOPEN) && 21256 (tp->t_rxtshift == 0)) { 21257 if (tp->t_state == TCPS_SYN_RECEIVED) { 21258 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 21259 to.to_tfo_cookie = 21260 (u_int8_t *)&tp->t_tfo_cookie.server; 21261 to.to_flags |= TOF_FASTOPEN; 21262 wanted_cookie = 1; 21263 } else if (tp->t_state == TCPS_SYN_SENT) { 21264 to.to_tfo_len = 21265 tp->t_tfo_client_cookie_len; 21266 to.to_tfo_cookie = 21267 tp->t_tfo_cookie.client; 21268 to.to_flags |= TOF_FASTOPEN; 21269 wanted_cookie = 1; 21270 /* 21271 * If we wind up having more data to 21272 * send with the SYN than can fit in 21273 * one segment, don't send any more 21274 * until the SYN|ACK comes back from 21275 * the other end. 21276 */ 21277 sendalot = 0; 21278 } 21279 } 21280 } 21281 /* Window scaling. */ 21282 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 21283 to.to_wscale = tp->request_r_scale; 21284 to.to_flags |= TOF_SCALE; 21285 } 21286 /* Timestamps. */ 21287 if ((tp->t_flags & TF_RCVD_TSTMP) || 21288 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 21289 uint32_t ts_to_use; 21290 21291 if ((rack->r_rcvpath_rtt_up == 1) && 21292 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 21293 /* 21294 * When we are doing a rcv_rtt probe all 21295 * other timestamps use the next msec. This 21296 * is safe since our previous ack is in the 21297 * air and we will just have a few more 21298 * on the next ms. This assures that only 21299 * the one ack has the ms_cts that was on 21300 * our ack-probe. 21301 */ 21302 ts_to_use = ms_cts + 1; 21303 } else { 21304 ts_to_use = ms_cts; 21305 } 21306 to.to_tsval = ts_to_use + tp->ts_offset; 21307 to.to_tsecr = tp->ts_recent; 21308 to.to_flags |= TOF_TS; 21309 if ((len == 0) && 21310 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21311 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 21312 (tp->snd_una == tp->snd_max) && 21313 (flags & TH_ACK) && 21314 (sbavail(sb) == 0) && 21315 (rack->r_ctl.current_round != 0) && 21316 ((flags & (TH_SYN|TH_FIN)) == 0) && 21317 (rack->r_rcvpath_rtt_up == 0)) { 21318 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 21319 rack->r_ctl.last_time_of_arm_rcv = cts; 21320 rack->r_rcvpath_rtt_up = 1; 21321 /* Subtract 1 from seq to force a response */ 21322 rack_seq--; 21323 } 21324 } 21325 /* Set receive buffer autosizing timestamp. */ 21326 if (tp->rfbuf_ts == 0 && 21327 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 21328 tp->rfbuf_ts = ms_cts; 21329 } 21330 /* Selective ACK's. */ 21331 if (tp->t_flags & TF_SACK_PERMIT) { 21332 if (flags & TH_SYN) 21333 to.to_flags |= TOF_SACKPERM; 21334 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 21335 tp->rcv_numsacks > 0) { 21336 to.to_flags |= TOF_SACK; 21337 to.to_nsacks = tp->rcv_numsacks; 21338 to.to_sacks = (u_char *)tp->sackblks; 21339 } 21340 } 21341 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21342 /* TCP-MD5 (RFC2385). */ 21343 if (tp->t_flags & TF_SIGNATURE) 21344 to.to_flags |= TOF_SIGNATURE; 21345 #endif 21346 21347 /* Processing the options. */ 21348 hdrlen += optlen = tcp_addoptions(&to, opt); 21349 /* 21350 * If we wanted a TFO option to be added, but it was unable 21351 * to fit, ensure no data is sent. 21352 */ 21353 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 21354 !(to.to_flags & TOF_FASTOPEN)) 21355 len = 0; 21356 } 21357 if (tp->t_port) { 21358 if (V_tcp_udp_tunneling_port == 0) { 21359 /* The port was removed?? */ 21360 SOCKBUF_UNLOCK(&so->so_snd); 21361 #ifdef TCP_ACCOUNTING 21362 crtsc = get_cyclecount(); 21363 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21364 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 21365 } 21366 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21367 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 21368 } 21369 sched_unpin(); 21370 #endif 21371 return (EHOSTUNREACH); 21372 } 21373 hdrlen += sizeof(struct udphdr); 21374 } 21375 #ifdef INET6 21376 if (isipv6) 21377 ipoptlen = ip6_optlen(inp); 21378 else 21379 #endif 21380 if (inp->inp_options) 21381 ipoptlen = inp->inp_options->m_len - 21382 offsetof(struct ipoption, ipopt_list); 21383 else 21384 ipoptlen = 0; 21385 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21386 ipoptlen += ipsec_optlen; 21387 #endif 21388 21389 /* 21390 * Adjust data length if insertion of options will bump the packet 21391 * length beyond the t_maxseg length. Clear the FIN bit because we 21392 * cut off the tail of the segment. 21393 */ 21394 if (len + optlen + ipoptlen > tp->t_maxseg) { 21395 if (tso) { 21396 uint32_t if_hw_tsomax; 21397 uint32_t moff; 21398 int32_t max_len; 21399 21400 /* extract TSO information */ 21401 if_hw_tsomax = tp->t_tsomax; 21402 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 21403 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 21404 KASSERT(ipoptlen == 0, 21405 ("%s: TSO can't do IP options", __func__)); 21406 21407 /* 21408 * Check if we should limit by maximum payload 21409 * length: 21410 */ 21411 if (if_hw_tsomax != 0) { 21412 /* compute maximum TSO length */ 21413 max_len = (if_hw_tsomax - hdrlen - 21414 max_linkhdr); 21415 if (max_len <= 0) { 21416 len = 0; 21417 } else if (len > max_len) { 21418 sendalot = 1; 21419 len = max_len; 21420 mark = 2; 21421 } 21422 } 21423 /* 21424 * Prevent the last segment from being fractional 21425 * unless the send sockbuf can be emptied: 21426 */ 21427 max_len = (tp->t_maxseg - optlen); 21428 if ((sb_offset + len) < sbavail(sb)) { 21429 moff = len % (u_int)max_len; 21430 if (moff != 0) { 21431 mark = 3; 21432 len -= moff; 21433 } 21434 } 21435 /* 21436 * In case there are too many small fragments don't 21437 * use TSO: 21438 */ 21439 if (len <= max_len) { 21440 mark = 4; 21441 tso = 0; 21442 } 21443 /* 21444 * Send the FIN in a separate segment after the bulk 21445 * sending is done. We don't trust the TSO 21446 * implementations to clear the FIN flag on all but 21447 * the last segment. 21448 */ 21449 if (tp->t_flags & TF_NEEDFIN) { 21450 sendalot = 4; 21451 } 21452 } else { 21453 mark = 5; 21454 if (optlen + ipoptlen >= tp->t_maxseg) { 21455 /* 21456 * Since we don't have enough space to put 21457 * the IP header chain and the TCP header in 21458 * one packet as required by RFC 7112, don't 21459 * send it. Also ensure that at least one 21460 * byte of the payload can be put into the 21461 * TCP segment. 21462 */ 21463 SOCKBUF_UNLOCK(&so->so_snd); 21464 error = EMSGSIZE; 21465 sack_rxmit = 0; 21466 goto out; 21467 } 21468 len = tp->t_maxseg - optlen - ipoptlen; 21469 sendalot = 5; 21470 } 21471 } else { 21472 tso = 0; 21473 mark = 6; 21474 } 21475 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 21476 ("%s: len > IP_MAXPACKET", __func__)); 21477 #ifdef DIAGNOSTIC 21478 #ifdef INET6 21479 if (max_linkhdr + hdrlen > MCLBYTES) 21480 #else 21481 if (max_linkhdr + hdrlen > MHLEN) 21482 #endif 21483 panic("tcphdr too big"); 21484 #endif 21485 21486 /* 21487 * This KASSERT is here to catch edge cases at a well defined place. 21488 * Before, those had triggered (random) panic conditions further 21489 * down. 21490 */ 21491 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21492 if ((len == 0) && 21493 (flags & TH_FIN) && 21494 (sbused(sb))) { 21495 /* 21496 * We have outstanding data, don't send a fin by itself!. 21497 * 21498 * Check to see if we need to send a challenge ack. 21499 */ 21500 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 21501 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21502 /* 21503 * Ok less than or right at a MSS is 21504 * outstanding. The original FreeBSD stack would 21505 * have sent a FIN, which can speed things up for 21506 * a transactional application doing a MSG_WAITALL. 21507 * To speed things up since we do *not* send a FIN 21508 * if data is outstanding, we send a "challenge ack". 21509 * The idea behind that is instead of having to have 21510 * the peer wait for the delayed-ack timer to run off 21511 * we send an ack that makes the peer send us an ack. 21512 */ 21513 rack_send_ack_challange(rack); 21514 } 21515 goto just_return; 21516 } 21517 /* 21518 * Grab a header mbuf, attaching a copy of data to be transmitted, 21519 * and initialize the header from the template for sends on this 21520 * connection. 21521 */ 21522 hw_tls = tp->t_nic_ktls_xmit != 0; 21523 if (len) { 21524 uint32_t max_val; 21525 uint32_t moff; 21526 21527 if (pace_max_seg) 21528 max_val = pace_max_seg; 21529 else 21530 max_val = len; 21531 /* 21532 * We allow a limit on sending with hptsi. 21533 */ 21534 if (len > max_val) { 21535 mark = 7; 21536 len = max_val; 21537 } 21538 #ifdef INET6 21539 if (MHLEN < hdrlen + max_linkhdr) 21540 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 21541 else 21542 #endif 21543 m = m_gethdr(M_NOWAIT, MT_DATA); 21544 21545 if (m == NULL) { 21546 SOCKBUF_UNLOCK(sb); 21547 error = ENOBUFS; 21548 sack_rxmit = 0; 21549 goto out; 21550 } 21551 m->m_data += max_linkhdr; 21552 m->m_len = hdrlen; 21553 21554 /* 21555 * Start the m_copy functions from the closest mbuf to the 21556 * sb_offset in the socket buffer chain. 21557 */ 21558 mb = sbsndptr_noadv(sb, sb_offset, &moff); 21559 s_mb = mb; 21560 s_moff = moff; 21561 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 21562 m_copydata(mb, moff, (int)len, 21563 mtod(m, caddr_t)+hdrlen); 21564 /* 21565 * If we are not retransmitting advance the 21566 * sndptr to help remember the next place in 21567 * the sb. 21568 */ 21569 if (rsm == NULL) 21570 sbsndptr_adv(sb, mb, len); 21571 m->m_len += len; 21572 } else { 21573 struct sockbuf *msb; 21574 21575 /* 21576 * If we are not retransmitting pass in msb so 21577 * the socket buffer can be advanced. Otherwise 21578 * set it to NULL if its a retransmission since 21579 * we don't want to change the sb remembered 21580 * location. 21581 */ 21582 if (rsm == NULL) 21583 msb = sb; 21584 else 21585 msb = NULL; 21586 m->m_next = tcp_m_copym( 21587 mb, moff, &len, 21588 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 21589 ((rsm == NULL) ? hw_tls : 0) 21590 #ifdef NETFLIX_COPY_ARGS 21591 , &s_mb, &s_moff 21592 #endif 21593 ); 21594 if (len <= (tp->t_maxseg - optlen)) { 21595 /* 21596 * Must have ran out of mbufs for the copy 21597 * shorten it to no longer need tso. Lets 21598 * not put on sendalot since we are low on 21599 * mbufs. 21600 */ 21601 tso = 0; 21602 } 21603 if (m->m_next == NULL) { 21604 SOCKBUF_UNLOCK(sb); 21605 (void)m_free(m); 21606 error = ENOBUFS; 21607 sack_rxmit = 0; 21608 goto out; 21609 } 21610 } 21611 if (sack_rxmit) { 21612 if (rsm && (rsm->r_flags & RACK_TLP)) { 21613 /* 21614 * TLP should not count in retran count, but 21615 * in its own bin 21616 */ 21617 counter_u64_add(rack_tlp_retran, 1); 21618 counter_u64_add(rack_tlp_retran_bytes, len); 21619 } else { 21620 tp->t_sndrexmitpack++; 21621 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 21622 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 21623 } 21624 #ifdef STATS 21625 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 21626 len); 21627 #endif 21628 } else { 21629 KMOD_TCPSTAT_INC(tcps_sndpack); 21630 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 21631 #ifdef STATS 21632 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 21633 len); 21634 #endif 21635 } 21636 /* 21637 * If we're sending everything we've got, set PUSH. (This 21638 * will keep happy those implementations which only give 21639 * data to the user when a buffer fills or a PUSH comes in.) 21640 */ 21641 if (sb_offset + len == sbused(sb) && 21642 sbused(sb) && 21643 !(flags & TH_SYN)) { 21644 flags |= TH_PUSH; 21645 add_flag |= RACK_HAD_PUSH; 21646 } 21647 21648 SOCKBUF_UNLOCK(sb); 21649 } else { 21650 SOCKBUF_UNLOCK(sb); 21651 if (tp->t_flags & TF_ACKNOW) 21652 KMOD_TCPSTAT_INC(tcps_sndacks); 21653 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 21654 KMOD_TCPSTAT_INC(tcps_sndctrl); 21655 else 21656 KMOD_TCPSTAT_INC(tcps_sndwinup); 21657 21658 m = m_gethdr(M_NOWAIT, MT_DATA); 21659 if (m == NULL) { 21660 error = ENOBUFS; 21661 sack_rxmit = 0; 21662 goto out; 21663 } 21664 #ifdef INET6 21665 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 21666 MHLEN >= hdrlen) { 21667 M_ALIGN(m, hdrlen); 21668 } else 21669 #endif 21670 m->m_data += max_linkhdr; 21671 m->m_len = hdrlen; 21672 } 21673 SOCKBUF_UNLOCK_ASSERT(sb); 21674 m->m_pkthdr.rcvif = (struct ifnet *)0; 21675 #ifdef MAC 21676 mac_inpcb_create_mbuf(inp, m); 21677 #endif 21678 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21679 #ifdef INET6 21680 if (isipv6) 21681 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 21682 else 21683 #endif /* INET6 */ 21684 #ifdef INET 21685 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 21686 #endif 21687 th = rack->r_ctl.fsb.th; 21688 udp = rack->r_ctl.fsb.udp; 21689 if (udp) { 21690 #ifdef INET6 21691 if (isipv6) 21692 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21693 else 21694 #endif /* INET6 */ 21695 ulen = hdrlen + len - sizeof(struct ip); 21696 udp->uh_ulen = htons(ulen); 21697 } 21698 } else { 21699 #ifdef INET6 21700 if (isipv6) { 21701 ip6 = mtod(m, struct ip6_hdr *); 21702 if (tp->t_port) { 21703 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 21704 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21705 udp->uh_dport = tp->t_port; 21706 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21707 udp->uh_ulen = htons(ulen); 21708 th = (struct tcphdr *)(udp + 1); 21709 } else 21710 th = (struct tcphdr *)(ip6 + 1); 21711 tcpip_fillheaders(inp, tp->t_port, ip6, th); 21712 } else 21713 #endif /* INET6 */ 21714 { 21715 #ifdef INET 21716 ip = mtod(m, struct ip *); 21717 if (tp->t_port) { 21718 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 21719 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21720 udp->uh_dport = tp->t_port; 21721 ulen = hdrlen + len - sizeof(struct ip); 21722 udp->uh_ulen = htons(ulen); 21723 th = (struct tcphdr *)(udp + 1); 21724 } else 21725 th = (struct tcphdr *)(ip + 1); 21726 tcpip_fillheaders(inp, tp->t_port, ip, th); 21727 #endif 21728 } 21729 } 21730 /* 21731 * If we are starting a connection, send ECN setup SYN packet. If we 21732 * are on a retransmit, we may resend those bits a number of times 21733 * as per RFC 3168. 21734 */ 21735 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 21736 flags |= tcp_ecn_output_syn_sent(tp); 21737 } 21738 /* Also handle parallel SYN for ECN */ 21739 if (TCPS_HAVERCVDSYN(tp->t_state) && 21740 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 21741 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 21742 if ((tp->t_state == TCPS_SYN_RECEIVED) && 21743 (tp->t_flags2 & TF2_ECN_SND_ECE)) 21744 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 21745 #ifdef INET6 21746 if (isipv6) { 21747 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 21748 ip6->ip6_flow |= htonl(ect << 20); 21749 } 21750 else 21751 #endif 21752 { 21753 #ifdef INET 21754 ip->ip_tos &= ~IPTOS_ECN_MASK; 21755 ip->ip_tos |= ect; 21756 #endif 21757 } 21758 } 21759 th->th_seq = htonl(rack_seq); 21760 th->th_ack = htonl(tp->rcv_nxt); 21761 tcp_set_flags(th, flags); 21762 /* 21763 * Calculate receive window. Don't shrink window, but avoid silly 21764 * window syndrome. 21765 * If a RST segment is sent, advertise a window of zero. 21766 */ 21767 if (flags & TH_RST) { 21768 recwin = 0; 21769 } else { 21770 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 21771 recwin < (long)segsiz) { 21772 recwin = 0; 21773 } 21774 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 21775 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 21776 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 21777 } 21778 21779 /* 21780 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 21781 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 21782 * handled in syncache. 21783 */ 21784 if (flags & TH_SYN) 21785 th->th_win = htons((u_short) 21786 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 21787 else { 21788 /* Avoid shrinking window with window scaling. */ 21789 recwin = roundup2(recwin, 1 << tp->rcv_scale); 21790 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 21791 } 21792 /* 21793 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 21794 * window. This may cause the remote transmitter to stall. This 21795 * flag tells soreceive() to disable delayed acknowledgements when 21796 * draining the buffer. This can occur if the receiver is 21797 * attempting to read more data than can be buffered prior to 21798 * transmitting on the connection. 21799 */ 21800 if (th->th_win == 0) { 21801 tp->t_sndzerowin++; 21802 tp->t_flags |= TF_RXWIN0SENT; 21803 } else 21804 tp->t_flags &= ~TF_RXWIN0SENT; 21805 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 21806 /* Now are we using fsb?, if so copy the template data to the mbuf */ 21807 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21808 uint8_t *cpto; 21809 21810 cpto = mtod(m, uint8_t *); 21811 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 21812 /* 21813 * We have just copied in: 21814 * IP/IP6 21815 * <optional udphdr> 21816 * tcphdr (no options) 21817 * 21818 * We need to grab the correct pointers into the mbuf 21819 * for both the tcp header, and possibly the udp header (if tunneling). 21820 * We do this by using the offset in the copy buffer and adding it 21821 * to the mbuf base pointer (cpto). 21822 */ 21823 #ifdef INET6 21824 if (isipv6) 21825 ip6 = mtod(m, struct ip6_hdr *); 21826 else 21827 #endif /* INET6 */ 21828 #ifdef INET 21829 ip = mtod(m, struct ip *); 21830 #endif 21831 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 21832 /* If we have a udp header lets set it into the mbuf as well */ 21833 if (udp) 21834 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 21835 } 21836 if (optlen) { 21837 bcopy(opt, th + 1, optlen); 21838 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 21839 } 21840 /* 21841 * Put TCP length in extended header, and then checksum extended 21842 * header and data. 21843 */ 21844 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 21845 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21846 if (to.to_flags & TOF_SIGNATURE) { 21847 /* 21848 * Calculate MD5 signature and put it into the place 21849 * determined before. 21850 * NOTE: since TCP options buffer doesn't point into 21851 * mbuf's data, calculate offset and use it. 21852 */ 21853 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 21854 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 21855 /* 21856 * Do not send segment if the calculation of MD5 21857 * digest has failed. 21858 */ 21859 goto out; 21860 } 21861 } 21862 #endif 21863 #ifdef INET6 21864 if (isipv6) { 21865 /* 21866 * ip6_plen is not need to be filled now, and will be filled 21867 * in ip6_output. 21868 */ 21869 if (tp->t_port) { 21870 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 21871 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21872 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 21873 th->th_sum = htons(0); 21874 UDPSTAT_INC(udps_opackets); 21875 } else { 21876 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 21877 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21878 th->th_sum = in6_cksum_pseudo(ip6, 21879 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 21880 0); 21881 } 21882 } 21883 #endif 21884 #if defined(INET6) && defined(INET) 21885 else 21886 #endif 21887 #ifdef INET 21888 { 21889 if (tp->t_port) { 21890 m->m_pkthdr.csum_flags = CSUM_UDP; 21891 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21892 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 21893 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 21894 th->th_sum = htons(0); 21895 UDPSTAT_INC(udps_opackets); 21896 } else { 21897 m->m_pkthdr.csum_flags = CSUM_TCP; 21898 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21899 th->th_sum = in_pseudo(ip->ip_src.s_addr, 21900 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 21901 IPPROTO_TCP + len + optlen)); 21902 } 21903 /* IP version must be set here for ipv4/ipv6 checking later */ 21904 KASSERT(ip->ip_v == IPVERSION, 21905 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 21906 } 21907 #endif 21908 /* 21909 * Enable TSO and specify the size of the segments. The TCP pseudo 21910 * header checksum is always provided. XXX: Fixme: This is currently 21911 * not the case for IPv6. 21912 */ 21913 if (tso) { 21914 /* 21915 * Here we must use t_maxseg and the optlen since 21916 * the optlen may include SACK's (or DSACK). 21917 */ 21918 KASSERT(len > tp->t_maxseg - optlen, 21919 ("%s: len <= tso_segsz", __func__)); 21920 m->m_pkthdr.csum_flags |= CSUM_TSO; 21921 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 21922 } 21923 KASSERT(len + hdrlen == m_length(m, NULL), 21924 ("%s: mbuf chain different than expected: %d + %u != %u", 21925 __func__, len, hdrlen, m_length(m, NULL))); 21926 21927 #ifdef TCP_HHOOK 21928 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 21929 hhook_run_tcp_est_out(tp, th, &to, len, tso); 21930 #endif 21931 if ((rack->r_ctl.crte != NULL) && 21932 (rack->rc_hw_nobuf == 0) && 21933 tcp_bblogging_on(tp)) { 21934 rack_log_queue_level(tp, rack, len, &tv, cts); 21935 } 21936 /* We're getting ready to send; log now. */ 21937 if (tcp_bblogging_on(rack->rc_tp)) { 21938 union tcp_log_stackspecific log; 21939 21940 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 21941 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 21942 if (rack->rack_no_prr) 21943 log.u_bbr.flex1 = 0; 21944 else 21945 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 21946 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 21947 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 21948 log.u_bbr.flex4 = orig_len; 21949 /* Save off the early/late values */ 21950 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 21951 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 21952 log.u_bbr.bw_inuse = rack_get_bw(rack); 21953 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 21954 log.u_bbr.flex8 = 0; 21955 if (rsm) { 21956 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 21957 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 21958 counter_u64_add(rack_collapsed_win_rxt, 1); 21959 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 21960 } 21961 if (doing_tlp) 21962 log.u_bbr.flex8 = 2; 21963 else 21964 log.u_bbr.flex8 = 1; 21965 } else { 21966 if (doing_tlp) 21967 log.u_bbr.flex8 = 3; 21968 } 21969 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 21970 log.u_bbr.flex7 = mark; 21971 log.u_bbr.flex7 <<= 8; 21972 log.u_bbr.flex7 |= pass; 21973 log.u_bbr.pkts_out = tp->t_maxseg; 21974 log.u_bbr.timeStamp = cts; 21975 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 21976 if (rsm && (rsm->r_rtr_cnt > 0)) { 21977 /* 21978 * When we have a retransmit we want to log the 21979 * burst at send and flight at send from before. 21980 */ 21981 log.u_bbr.flex5 = rsm->r_fas; 21982 log.u_bbr.bbr_substate = rsm->r_bas; 21983 } else { 21984 /* 21985 * New transmits we log in flex5 the inflight again as 21986 * well as the number of segments in our send in the 21987 * substate field. 21988 */ 21989 log.u_bbr.flex5 = log.u_bbr.inflight; 21990 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 21991 } 21992 log.u_bbr.lt_epoch = cwnd_to_use; 21993 log.u_bbr.delivered = sendalot; 21994 log.u_bbr.rttProp = (uintptr_t)rsm; 21995 log.u_bbr.pkt_epoch = __LINE__; 21996 if (rsm) { 21997 log.u_bbr.delRate = rsm->r_flags; 21998 log.u_bbr.delRate <<= 31; 21999 log.u_bbr.delRate |= rack->r_must_retran; 22000 log.u_bbr.delRate <<= 1; 22001 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22002 } else { 22003 log.u_bbr.delRate = rack->r_must_retran; 22004 log.u_bbr.delRate <<= 1; 22005 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22006 } 22007 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 22008 len, &log, false, NULL, __func__, __LINE__, &tv); 22009 } else 22010 lgb = NULL; 22011 22012 /* 22013 * Fill in IP length and desired time to live and send to IP level. 22014 * There should be a better way to handle ttl and tos; we could keep 22015 * them in the template, but need a way to checksum without them. 22016 */ 22017 /* 22018 * m->m_pkthdr.len should have been set before cksum calcuration, 22019 * because in6_cksum() need it. 22020 */ 22021 #ifdef INET6 22022 if (isipv6) { 22023 /* 22024 * we separately set hoplimit for every segment, since the 22025 * user might want to change the value via setsockopt. Also, 22026 * desired default hop limit might be changed via Neighbor 22027 * Discovery. 22028 */ 22029 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 22030 22031 /* 22032 * Set the packet size here for the benefit of DTrace 22033 * probes. ip6_output() will set it properly; it's supposed 22034 * to include the option header lengths as well. 22035 */ 22036 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 22037 22038 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 22039 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22040 else 22041 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22042 22043 if (tp->t_state == TCPS_SYN_SENT) 22044 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 22045 22046 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 22047 /* TODO: IPv6 IP6TOS_ECT bit on */ 22048 error = ip6_output(m, 22049 inp->in6p_outputopts, 22050 &inp->inp_route6, 22051 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 22052 NULL, NULL, inp); 22053 22054 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 22055 mtu = inp->inp_route6.ro_nh->nh_mtu; 22056 } 22057 #endif /* INET6 */ 22058 #if defined(INET) && defined(INET6) 22059 else 22060 #endif 22061 #ifdef INET 22062 { 22063 ip->ip_len = htons(m->m_pkthdr.len); 22064 #ifdef INET6 22065 if (inp->inp_vflag & INP_IPV6PROTO) 22066 ip->ip_ttl = in6_selecthlim(inp, NULL); 22067 #endif /* INET6 */ 22068 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 22069 /* 22070 * If we do path MTU discovery, then we set DF on every 22071 * packet. This might not be the best thing to do according 22072 * to RFC3390 Section 2. However the tcp hostcache migitates 22073 * the problem so it affects only the first tcp connection 22074 * with a host. 22075 * 22076 * NB: Don't set DF on small MTU/MSS to have a safe 22077 * fallback. 22078 */ 22079 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 22080 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22081 if (tp->t_port == 0 || len < V_tcp_minmss) { 22082 ip->ip_off |= htons(IP_DF); 22083 } 22084 } else { 22085 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22086 } 22087 22088 if (tp->t_state == TCPS_SYN_SENT) 22089 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 22090 22091 TCP_PROBE5(send, NULL, tp, ip, tp, th); 22092 22093 error = ip_output(m, 22094 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22095 inp->inp_options, 22096 #else 22097 NULL, 22098 #endif 22099 &inp->inp_route, 22100 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 22101 inp); 22102 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 22103 mtu = inp->inp_route.ro_nh->nh_mtu; 22104 } 22105 #endif /* INET */ 22106 if (lgb) { 22107 lgb->tlb_errno = error; 22108 lgb = NULL; 22109 } 22110 22111 out: 22112 /* 22113 * In transmit state, time the transmission and arrange for the 22114 * retransmit. In persist state, just set snd_max. 22115 */ 22116 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 22117 rack_to_usec_ts(&tv), 22118 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 22119 if (error == 0) { 22120 if (add_flag & RACK_IS_PCM) { 22121 /* We just launched a PCM */ 22122 /* rrs here log */ 22123 rack->pcm_in_progress = 1; 22124 rack->pcm_needed = 0; 22125 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 22126 } 22127 if (rsm == NULL) { 22128 if (rack->lt_bw_up == 0) { 22129 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 22130 rack->r_ctl.lt_seq = tp->snd_una; 22131 rack->lt_bw_up = 1; 22132 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 22133 /* 22134 * Need to record what we have since we are 22135 * approaching seq wrap. 22136 */ 22137 uint64_t tmark; 22138 22139 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 22140 rack->r_ctl.lt_seq = tp->snd_una; 22141 tmark = tcp_get_u64_usecs(&tv); 22142 if (tmark > rack->r_ctl.lt_timemark) { 22143 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 22144 rack->r_ctl.lt_timemark = tmark; 22145 } 22146 } 22147 } 22148 rack->forced_ack = 0; /* If we send something zap the FA flag */ 22149 counter_u64_add(rack_total_bytes, len); 22150 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 22151 if (rsm && doing_tlp) { 22152 rack->rc_last_sent_tlp_past_cumack = 0; 22153 rack->rc_last_sent_tlp_seq_valid = 1; 22154 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 22155 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 22156 } 22157 if (rack->rc_hw_nobuf) { 22158 rack->rc_hw_nobuf = 0; 22159 rack->r_ctl.rc_agg_delayed = 0; 22160 rack->r_early = 0; 22161 rack->r_late = 0; 22162 rack->r_ctl.rc_agg_early = 0; 22163 } 22164 if (rsm && (doing_tlp == 0)) { 22165 /* Set we retransmitted */ 22166 rack->rc_gp_saw_rec = 1; 22167 } else { 22168 if (cwnd_to_use > tp->snd_ssthresh) { 22169 /* Set we sent in CA */ 22170 rack->rc_gp_saw_ca = 1; 22171 } else { 22172 /* Set we sent in SS */ 22173 rack->rc_gp_saw_ss = 1; 22174 } 22175 } 22176 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22177 (tp->t_flags & TF_SACK_PERMIT) && 22178 tp->rcv_numsacks > 0) 22179 tcp_clean_dsack_blocks(tp); 22180 tot_len_this_send += len; 22181 if (len == 0) { 22182 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 22183 } else { 22184 int idx; 22185 22186 idx = (len / segsiz) + 3; 22187 if (idx >= TCP_MSS_ACCT_ATIMER) 22188 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 22189 else 22190 counter_u64_add(rack_out_size[idx], 1); 22191 } 22192 } 22193 if ((rack->rack_no_prr == 0) && 22194 sub_from_prr && 22195 (error == 0)) { 22196 if (rack->r_ctl.rc_prr_sndcnt >= len) 22197 rack->r_ctl.rc_prr_sndcnt -= len; 22198 else 22199 rack->r_ctl.rc_prr_sndcnt = 0; 22200 } 22201 sub_from_prr = 0; 22202 if (doing_tlp) { 22203 /* Make sure the TLP is added */ 22204 add_flag |= RACK_TLP; 22205 } else if (rsm) { 22206 /* If its a resend without TLP then it must not have the flag */ 22207 rsm->r_flags &= ~RACK_TLP; 22208 } 22209 22210 22211 if ((error == 0) && 22212 (len > 0) && 22213 (tp->snd_una == tp->snd_max)) 22214 rack->r_ctl.rc_tlp_rxt_last_time = cts; 22215 22216 { 22217 /* 22218 * This block is not associated with the above error == 0 test. 22219 * It is used to advance snd_max if we have a new transmit. 22220 */ 22221 tcp_seq startseq = tp->snd_max; 22222 22223 22224 if (rsm && (doing_tlp == 0)) 22225 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 22226 if (error) 22227 /* We don't log or do anything with errors */ 22228 goto nomore; 22229 if (doing_tlp == 0) { 22230 if (rsm == NULL) { 22231 /* 22232 * Not a retransmission of some 22233 * sort, new data is going out so 22234 * clear our TLP count and flag. 22235 */ 22236 rack->rc_tlp_in_progress = 0; 22237 rack->r_ctl.rc_tlp_cnt_out = 0; 22238 } 22239 } else { 22240 /* 22241 * We have just sent a TLP, mark that it is true 22242 * and make sure our in progress is set so we 22243 * continue to check the count. 22244 */ 22245 rack->rc_tlp_in_progress = 1; 22246 rack->r_ctl.rc_tlp_cnt_out++; 22247 } 22248 /* 22249 * If we are retransmitting we are done, snd_max 22250 * does not get updated. 22251 */ 22252 if (sack_rxmit) 22253 goto nomore; 22254 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 22255 /* 22256 * Update the time we just added data since 22257 * nothing was outstanding. 22258 */ 22259 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 22260 tp->t_acktime = ticks; 22261 } 22262 /* 22263 * Now for special SYN/FIN handling. 22264 */ 22265 if (flags & (TH_SYN | TH_FIN)) { 22266 if ((flags & TH_SYN) && 22267 ((tp->t_flags & TF_SENTSYN) == 0)) { 22268 tp->snd_max++; 22269 tp->t_flags |= TF_SENTSYN; 22270 } 22271 if ((flags & TH_FIN) && 22272 ((tp->t_flags & TF_SENTFIN) == 0)) { 22273 tp->snd_max++; 22274 tp->t_flags |= TF_SENTFIN; 22275 } 22276 } 22277 tp->snd_max += len; 22278 if (rack->rc_new_rnd_needed) { 22279 rack_new_round_starts(tp, rack, tp->snd_max); 22280 } 22281 /* 22282 * Time this transmission if not a retransmission and 22283 * not currently timing anything. 22284 * This is only relevant in case of switching back to 22285 * the base stack. 22286 */ 22287 if (tp->t_rtttime == 0) { 22288 tp->t_rtttime = ticks; 22289 tp->t_rtseq = startseq; 22290 KMOD_TCPSTAT_INC(tcps_segstimed); 22291 } 22292 if (len && 22293 ((tp->t_flags & TF_GPUTINPROG) == 0)) 22294 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 22295 /* 22296 * If we are doing FO we need to update the mbuf position and subtract 22297 * this happens when the peer sends us duplicate information and 22298 * we thus want to send a DSACK. 22299 * 22300 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 22301 * turned off? If not then we are going to echo multiple DSACK blocks 22302 * out (with the TSO), which we should not be doing. 22303 */ 22304 if (rack->r_fast_output && len) { 22305 if (rack->r_ctl.fsb.left_to_send > len) 22306 rack->r_ctl.fsb.left_to_send -= len; 22307 else 22308 rack->r_ctl.fsb.left_to_send = 0; 22309 if (rack->r_ctl.fsb.left_to_send < segsiz) 22310 rack->r_fast_output = 0; 22311 if (rack->r_fast_output) { 22312 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 22313 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 22314 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 22315 } 22316 } 22317 if (rack_pcm_blast == 0) { 22318 if ((orig_len > len) && 22319 (add_flag & RACK_IS_PCM) && 22320 (len < pace_max_seg) && 22321 ((pace_max_seg - len) > segsiz)) { 22322 /* 22323 * We are doing a PCM measurement and we did 22324 * not get enough data in the TSO to meet the 22325 * burst requirement. 22326 */ 22327 uint32_t n_len; 22328 22329 n_len = (orig_len - len); 22330 orig_len -= len; 22331 pace_max_seg -= len; 22332 len = n_len; 22333 sb_offset = tp->snd_max - tp->snd_una; 22334 /* Re-lock for the next spin */ 22335 SOCKBUF_LOCK(sb); 22336 goto send; 22337 } 22338 } else { 22339 if ((orig_len > len) && 22340 (add_flag & RACK_IS_PCM) && 22341 ((orig_len - len) > segsiz)) { 22342 /* 22343 * We are doing a PCM measurement and we did 22344 * not get enough data in the TSO to meet the 22345 * burst requirement. 22346 */ 22347 uint32_t n_len; 22348 22349 n_len = (orig_len - len); 22350 orig_len -= len; 22351 len = n_len; 22352 sb_offset = tp->snd_max - tp->snd_una; 22353 /* Re-lock for the next spin */ 22354 SOCKBUF_LOCK(sb); 22355 goto send; 22356 } 22357 } 22358 } 22359 nomore: 22360 if (error) { 22361 rack->r_ctl.rc_agg_delayed = 0; 22362 rack->r_early = 0; 22363 rack->r_late = 0; 22364 rack->r_ctl.rc_agg_early = 0; 22365 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 22366 /* 22367 * Failures do not advance the seq counter above. For the 22368 * case of ENOBUFS we will fall out and retry in 1ms with 22369 * the hpts. Everything else will just have to retransmit 22370 * with the timer. 22371 * 22372 * In any case, we do not want to loop around for another 22373 * send without a good reason. 22374 */ 22375 sendalot = 0; 22376 switch (error) { 22377 case EPERM: 22378 case EACCES: 22379 tp->t_softerror = error; 22380 #ifdef TCP_ACCOUNTING 22381 crtsc = get_cyclecount(); 22382 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22383 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22384 } 22385 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22386 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22387 } 22388 sched_unpin(); 22389 #endif 22390 return (error); 22391 case ENOBUFS: 22392 /* 22393 * Pace us right away to retry in a some 22394 * time 22395 */ 22396 if (rack->r_ctl.crte != NULL) { 22397 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 22398 if (tcp_bblogging_on(rack->rc_tp)) 22399 rack_log_queue_level(tp, rack, len, &tv, cts); 22400 } else 22401 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 22402 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 22403 if (rack->rc_enobuf < 0x7f) 22404 rack->rc_enobuf++; 22405 if (slot < (10 * HPTS_USEC_IN_MSEC)) 22406 slot = 10 * HPTS_USEC_IN_MSEC; 22407 if (rack->r_ctl.crte != NULL) { 22408 counter_u64_add(rack_saw_enobuf_hw, 1); 22409 tcp_rl_log_enobuf(rack->r_ctl.crte); 22410 } 22411 counter_u64_add(rack_saw_enobuf, 1); 22412 goto enobufs; 22413 case EMSGSIZE: 22414 /* 22415 * For some reason the interface we used initially 22416 * to send segments changed to another or lowered 22417 * its MTU. If TSO was active we either got an 22418 * interface without TSO capabilits or TSO was 22419 * turned off. If we obtained mtu from ip_output() 22420 * then update it and try again. 22421 */ 22422 if (tso) 22423 tp->t_flags &= ~TF_TSO; 22424 if (mtu != 0) { 22425 int saved_mtu; 22426 22427 saved_mtu = tp->t_maxseg; 22428 tcp_mss_update(tp, -1, mtu, NULL, NULL); 22429 if (saved_mtu > tp->t_maxseg) { 22430 goto again; 22431 } 22432 } 22433 slot = 10 * HPTS_USEC_IN_MSEC; 22434 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22435 #ifdef TCP_ACCOUNTING 22436 crtsc = get_cyclecount(); 22437 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22438 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22439 } 22440 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22441 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22442 } 22443 sched_unpin(); 22444 #endif 22445 return (error); 22446 case ENETUNREACH: 22447 counter_u64_add(rack_saw_enetunreach, 1); 22448 case EHOSTDOWN: 22449 case EHOSTUNREACH: 22450 case ENETDOWN: 22451 if (TCPS_HAVERCVDSYN(tp->t_state)) { 22452 tp->t_softerror = error; 22453 error = 0; 22454 } 22455 /* FALLTHROUGH */ 22456 default: 22457 slot = 10 * HPTS_USEC_IN_MSEC; 22458 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22459 #ifdef TCP_ACCOUNTING 22460 crtsc = get_cyclecount(); 22461 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22462 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22463 } 22464 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22465 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22466 } 22467 sched_unpin(); 22468 #endif 22469 return (error); 22470 } 22471 } else { 22472 rack->rc_enobuf = 0; 22473 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 22474 rack->r_ctl.retran_during_recovery += len; 22475 } 22476 KMOD_TCPSTAT_INC(tcps_sndtotal); 22477 22478 /* 22479 * Data sent (as far as we can tell). If this advertises a larger 22480 * window than any other segment, then remember the size of the 22481 * advertised window. Any pending ACK has now been sent. 22482 */ 22483 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 22484 tp->rcv_adv = tp->rcv_nxt + recwin; 22485 22486 tp->last_ack_sent = tp->rcv_nxt; 22487 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 22488 enobufs: 22489 if (sendalot) { 22490 /* Do we need to turn off sendalot? */ 22491 if (pace_max_seg && 22492 (tot_len_this_send >= pace_max_seg)) { 22493 /* We hit our max. */ 22494 sendalot = 0; 22495 } 22496 } 22497 if ((error == 0) && (flags & TH_FIN)) 22498 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 22499 if (flags & TH_RST) { 22500 /* 22501 * We don't send again after sending a RST. 22502 */ 22503 slot = 0; 22504 sendalot = 0; 22505 if (error == 0) 22506 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 22507 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 22508 /* 22509 * Get our pacing rate, if an error 22510 * occurred in sending (ENOBUF) we would 22511 * hit the else if with slot preset. Other 22512 * errors return. 22513 */ 22514 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 22515 } 22516 /* We have sent clear the flag */ 22517 rack->r_ent_rec_ns = 0; 22518 if (rack->r_must_retran) { 22519 if (rsm) { 22520 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 22521 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 22522 /* 22523 * We have retransmitted all. 22524 */ 22525 rack->r_must_retran = 0; 22526 rack->r_ctl.rc_out_at_rto = 0; 22527 } 22528 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22529 /* 22530 * Sending new data will also kill 22531 * the loop. 22532 */ 22533 rack->r_must_retran = 0; 22534 rack->r_ctl.rc_out_at_rto = 0; 22535 } 22536 } 22537 rack->r_ctl.fsb.recwin = recwin; 22538 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 22539 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22540 /* 22541 * We hit an RTO and now have past snd_max at the RTO 22542 * clear all the WAS flags. 22543 */ 22544 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 22545 } 22546 if (slot) { 22547 /* set the rack tcb into the slot N */ 22548 if ((error == 0) && 22549 rack_use_rfo && 22550 ((flags & (TH_SYN|TH_FIN)) == 0) && 22551 (rsm == NULL) && 22552 (ipoptlen == 0) && 22553 (tp->rcv_numsacks == 0) && 22554 rack->r_fsb_inited && 22555 TCPS_HAVEESTABLISHED(tp->t_state) && 22556 ((IN_RECOVERY(tp->t_flags)) == 0) && 22557 (rack->r_must_retran == 0) && 22558 ((tp->t_flags & TF_NEEDFIN) == 0) && 22559 (len > 0) && (orig_len > 0) && 22560 (orig_len > len) && 22561 ((orig_len - len) >= segsiz) && 22562 ((optlen == 0) || 22563 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22564 /* We can send at least one more MSS using our fsb */ 22565 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22566 segsiz, pace_max_seg, hw_tls, flags); 22567 } else 22568 rack->r_fast_output = 0; 22569 rack_log_fsb(rack, tp, so, flags, 22570 ipoptlen, orig_len, len, error, 22571 (rsm == NULL), optlen, __LINE__, 2); 22572 } else if (sendalot) { 22573 int ret; 22574 22575 sack_rxmit = 0; 22576 if ((error == 0) && 22577 rack_use_rfo && 22578 ((flags & (TH_SYN|TH_FIN)) == 0) && 22579 (rsm == NULL) && 22580 (ipoptlen == 0) && 22581 (tp->rcv_numsacks == 0) && 22582 (rack->r_must_retran == 0) && 22583 rack->r_fsb_inited && 22584 TCPS_HAVEESTABLISHED(tp->t_state) && 22585 ((IN_RECOVERY(tp->t_flags)) == 0) && 22586 ((tp->t_flags & TF_NEEDFIN) == 0) && 22587 (len > 0) && (orig_len > 0) && 22588 (orig_len > len) && 22589 ((orig_len - len) >= segsiz) && 22590 ((optlen == 0) || 22591 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22592 /* we can use fast_output for more */ 22593 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22594 segsiz, pace_max_seg, hw_tls, flags); 22595 if (rack->r_fast_output) { 22596 error = 0; 22597 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 22598 if (ret >= 0) 22599 return (ret); 22600 else if (error) 22601 goto nomore; 22602 22603 } 22604 } 22605 goto again; 22606 } 22607 skip_all_send: 22608 /* Assure when we leave that snd_nxt will point to top */ 22609 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22610 tp->snd_nxt = tp->snd_max; 22611 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 22612 #ifdef TCP_ACCOUNTING 22613 crtsc = get_cyclecount() - ts_val; 22614 if (tot_len_this_send) { 22615 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22616 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22617 } 22618 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22619 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 22620 } 22621 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22622 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 22623 } 22624 } else { 22625 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22626 tp->tcp_cnt_counters[SND_OUT_ACK]++; 22627 } 22628 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22629 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 22630 } 22631 } 22632 sched_unpin(); 22633 #endif 22634 if (error == ENOBUFS) 22635 error = 0; 22636 return (error); 22637 } 22638 22639 static void 22640 rack_update_seg(struct tcp_rack *rack) 22641 { 22642 uint32_t orig_val; 22643 22644 orig_val = rack->r_ctl.rc_pace_max_segs; 22645 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 22646 if (orig_val != rack->r_ctl.rc_pace_max_segs) 22647 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 22648 } 22649 22650 static void 22651 rack_mtu_change(struct tcpcb *tp) 22652 { 22653 /* 22654 * The MSS may have changed 22655 */ 22656 struct tcp_rack *rack; 22657 struct rack_sendmap *rsm; 22658 22659 rack = (struct tcp_rack *)tp->t_fb_ptr; 22660 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 22661 /* 22662 * The MTU has changed we need to resend everything 22663 * since all we have sent is lost. We first fix 22664 * up the mtu though. 22665 */ 22666 rack_set_pace_segments(tp, rack, __LINE__, NULL); 22667 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 22668 rack_remxt_tmr(tp); 22669 rack->r_fast_output = 0; 22670 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 22671 rack->r_ctl.rc_sacked); 22672 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 22673 rack->r_must_retran = 1; 22674 /* Mark all inflight to needing to be rxt'd */ 22675 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 22676 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 22677 } 22678 } 22679 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 22680 /* We don't use snd_nxt to retransmit */ 22681 tp->snd_nxt = tp->snd_max; 22682 } 22683 22684 static int 22685 rack_set_dgp(struct tcp_rack *rack) 22686 { 22687 if (rack->dgp_on == 1) 22688 return(0); 22689 if ((rack->use_fixed_rate == 1) && 22690 (rack->rc_always_pace == 1)) { 22691 /* 22692 * We are already pacing another 22693 * way. 22694 */ 22695 return (EBUSY); 22696 } 22697 if (rack->rc_always_pace == 1) { 22698 rack_remove_pacing(rack); 22699 } 22700 if (tcp_incr_dgp_pacing_cnt() == 0) 22701 return (ENOSPC); 22702 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 22703 rack->rc_fillcw_apply_discount = 0; 22704 rack->dgp_on = 1; 22705 rack->rc_always_pace = 1; 22706 rack->rc_pace_dnd = 1; 22707 rack->use_fixed_rate = 0; 22708 if (rack->gp_ready) 22709 rack_set_cc_pacing(rack); 22710 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22711 rack->rack_attempt_hdwr_pace = 0; 22712 /* rxt settings */ 22713 rack->full_size_rxt = 1; 22714 rack->shape_rxt_to_pacing_min = 0; 22715 /* cmpack=1 */ 22716 rack->r_use_cmp_ack = 1; 22717 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 22718 rack->r_use_cmp_ack) 22719 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22720 /* scwnd=1 */ 22721 rack->rack_enable_scwnd = 1; 22722 /* dynamic=100 */ 22723 rack->rc_gp_dyn_mul = 1; 22724 /* gp_inc_ca */ 22725 rack->r_ctl.rack_per_of_gp_ca = 100; 22726 /* rrr_conf=3 */ 22727 rack->r_rr_config = 3; 22728 /* npush=2 */ 22729 rack->r_ctl.rc_no_push_at_mrtt = 2; 22730 /* fillcw=1 */ 22731 rack->rc_pace_to_cwnd = 1; 22732 rack->rc_pace_fill_if_rttin_range = 0; 22733 rack->rtt_limit_mul = 0; 22734 /* noprr=1 */ 22735 rack->rack_no_prr = 1; 22736 /* lscwnd=1 */ 22737 rack->r_limit_scw = 1; 22738 /* gp_inc_rec */ 22739 rack->r_ctl.rack_per_of_gp_rec = 90; 22740 return (0); 22741 } 22742 22743 static int 22744 rack_set_profile(struct tcp_rack *rack, int prof) 22745 { 22746 int err = EINVAL; 22747 if (prof == 1) { 22748 /* 22749 * Profile 1 is "standard" DGP. It ignores 22750 * client buffer level. 22751 */ 22752 err = rack_set_dgp(rack); 22753 if (err) 22754 return (err); 22755 } else if (prof == 6) { 22756 err = rack_set_dgp(rack); 22757 if (err) 22758 return (err); 22759 /* 22760 * Profile 6 tweaks DGP so that it will apply to 22761 * fill-cw the same settings that profile5 does 22762 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 22763 */ 22764 rack->rc_fillcw_apply_discount = 1; 22765 } else if (prof == 0) { 22766 /* This changes things back to the default settings */ 22767 if (rack->rc_always_pace == 1) { 22768 rack_remove_pacing(rack); 22769 } else { 22770 /* Make sure any stray flags are off */ 22771 rack->dgp_on = 0; 22772 rack->rc_hybrid_mode = 0; 22773 rack->use_fixed_rate = 0; 22774 } 22775 err = 0; 22776 if (rack_fill_cw_state) 22777 rack->rc_pace_to_cwnd = 1; 22778 else 22779 rack->rc_pace_to_cwnd = 0; 22780 22781 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 22782 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22783 rack->rc_always_pace = 1; 22784 if (rack->rack_hibeta) 22785 rack_set_cc_pacing(rack); 22786 } else 22787 rack->rc_always_pace = 0; 22788 if (rack_dsack_std_based & 0x1) { 22789 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 22790 rack->rc_rack_tmr_std_based = 1; 22791 } 22792 if (rack_dsack_std_based & 0x2) { 22793 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 22794 rack->rc_rack_use_dsack = 1; 22795 } 22796 if (rack_use_cmp_acks) 22797 rack->r_use_cmp_ack = 1; 22798 else 22799 rack->r_use_cmp_ack = 0; 22800 if (rack_disable_prr) 22801 rack->rack_no_prr = 1; 22802 else 22803 rack->rack_no_prr = 0; 22804 if (rack_gp_no_rec_chg) 22805 rack->rc_gp_no_rec_chg = 1; 22806 else 22807 rack->rc_gp_no_rec_chg = 0; 22808 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 22809 rack->r_mbuf_queue = 1; 22810 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 22811 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22812 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22813 } else { 22814 rack->r_mbuf_queue = 0; 22815 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 22816 } 22817 if (rack_enable_shared_cwnd) 22818 rack->rack_enable_scwnd = 1; 22819 else 22820 rack->rack_enable_scwnd = 0; 22821 if (rack_do_dyn_mul) { 22822 /* When dynamic adjustment is on CA needs to start at 100% */ 22823 rack->rc_gp_dyn_mul = 1; 22824 if (rack_do_dyn_mul >= 100) 22825 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 22826 } else { 22827 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 22828 rack->rc_gp_dyn_mul = 0; 22829 } 22830 rack->r_rr_config = 0; 22831 rack->r_ctl.rc_no_push_at_mrtt = 0; 22832 rack->rc_pace_fill_if_rttin_range = 0; 22833 rack->rtt_limit_mul = 0; 22834 22835 if (rack_enable_hw_pacing) 22836 rack->rack_hdw_pace_ena = 1; 22837 else 22838 rack->rack_hdw_pace_ena = 0; 22839 if (rack_disable_prr) 22840 rack->rack_no_prr = 1; 22841 else 22842 rack->rack_no_prr = 0; 22843 if (rack_limits_scwnd) 22844 rack->r_limit_scw = 1; 22845 else 22846 rack->r_limit_scw = 0; 22847 rack_init_retransmit_value(rack, rack_rxt_controls); 22848 err = 0; 22849 } 22850 return (err); 22851 } 22852 22853 static int 22854 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 22855 { 22856 struct deferred_opt_list *dol; 22857 22858 dol = malloc(sizeof(struct deferred_opt_list), 22859 M_TCPDO, M_NOWAIT|M_ZERO); 22860 if (dol == NULL) { 22861 /* 22862 * No space yikes -- fail out.. 22863 */ 22864 return (0); 22865 } 22866 dol->optname = sopt_name; 22867 dol->optval = loptval; 22868 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 22869 return (1); 22870 } 22871 22872 static int 22873 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 22874 { 22875 #ifdef TCP_REQUEST_TRK 22876 struct tcp_sendfile_track *sft; 22877 struct timeval tv; 22878 tcp_seq seq; 22879 int err; 22880 22881 microuptime(&tv); 22882 22883 /* Make sure no fixed rate is on */ 22884 rack->use_fixed_rate = 0; 22885 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 22886 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 22887 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 22888 /* Now allocate or find our entry that will have these settings */ 22889 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 22890 if (sft == NULL) { 22891 rack->rc_tp->tcp_hybrid_error++; 22892 /* no space, where would it have gone? */ 22893 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 22894 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 22895 return (ENOSPC); 22896 } 22897 /* mask our internal flags */ 22898 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 22899 /* The seq will be snd_una + everything in the buffer */ 22900 seq = sft->start_seq; 22901 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 22902 /* Disabling hybrid pacing */ 22903 if (rack->rc_hybrid_mode) { 22904 rack_set_profile(rack, 0); 22905 rack->rc_tp->tcp_hybrid_stop++; 22906 } 22907 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 22908 return (0); 22909 } 22910 if (rack->dgp_on == 0) { 22911 /* 22912 * If we have not yet turned DGP on, do so 22913 * now setting pure DGP mode, no buffer level 22914 * response. 22915 */ 22916 if ((err = rack_set_profile(rack, 1)) != 0){ 22917 /* Failed to turn pacing on */ 22918 rack->rc_tp->tcp_hybrid_error++; 22919 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 22920 return (err); 22921 } 22922 } 22923 /* 22924 * Now we must switch to hybrid mode as well which also 22925 * means moving to regular pacing. 22926 */ 22927 if (rack->rc_hybrid_mode == 0) { 22928 /* First time */ 22929 if (tcp_can_enable_pacing()) { 22930 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22931 rack->rc_hybrid_mode = 1; 22932 } else { 22933 return (ENOSPC); 22934 } 22935 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 22936 /* 22937 * This should be true. 22938 */ 22939 tcp_dec_dgp_pacing_cnt(); 22940 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 22941 } 22942 } 22943 /* Now set in our flags */ 22944 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 22945 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 22946 sft->cspr = hybrid->cspr; 22947 else 22948 sft->cspr = 0; 22949 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 22950 sft->hint_maxseg = hybrid->hint_maxseg; 22951 else 22952 sft->hint_maxseg = 0; 22953 rack->rc_tp->tcp_hybrid_start++; 22954 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 22955 return (0); 22956 #else 22957 return (ENOTSUP); 22958 #endif 22959 } 22960 22961 static int 22962 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 22963 { 22964 /* We pulled a SSI info log out what was there */ 22965 si->bytes_transmitted = tp->t_sndbytes; 22966 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 22967 return (0); 22968 } 22969 22970 static int 22971 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 22972 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 22973 22974 { 22975 struct epoch_tracker et; 22976 struct sockopt sopt; 22977 struct cc_newreno_opts opt; 22978 uint64_t val; 22979 int error = 0; 22980 uint16_t ca, ss; 22981 22982 switch (sopt_name) { 22983 case TCP_RACK_SET_RXT_OPTIONS: 22984 if ((optval >= 0) && (optval <= 2)) { 22985 rack_init_retransmit_value(rack, optval); 22986 } else { 22987 /* 22988 * You must send in 0, 1 or 2 all else is 22989 * invalid. 22990 */ 22991 error = EINVAL; 22992 } 22993 break; 22994 case TCP_RACK_DSACK_OPT: 22995 RACK_OPTS_INC(tcp_rack_dsack_opt); 22996 if (optval & 0x1) { 22997 rack->rc_rack_tmr_std_based = 1; 22998 } else { 22999 rack->rc_rack_tmr_std_based = 0; 23000 } 23001 if (optval & 0x2) { 23002 rack->rc_rack_use_dsack = 1; 23003 } else { 23004 rack->rc_rack_use_dsack = 0; 23005 } 23006 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 23007 break; 23008 case TCP_RACK_PACING_DIVISOR: 23009 RACK_OPTS_INC(tcp_rack_pacing_divisor); 23010 if (optval == 0) { 23011 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 23012 } else { 23013 if (optval < RL_MIN_DIVISOR) 23014 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 23015 else 23016 rack->r_ctl.pace_len_divisor = optval; 23017 } 23018 break; 23019 case TCP_RACK_HI_BETA: 23020 RACK_OPTS_INC(tcp_rack_hi_beta); 23021 if (optval > 0) { 23022 rack->rack_hibeta = 1; 23023 if ((optval >= 50) && 23024 (optval <= 100)) { 23025 /* 23026 * User wants to set a custom beta. 23027 */ 23028 rack->r_ctl.saved_hibeta = optval; 23029 if (rack->rc_pacing_cc_set) 23030 rack_undo_cc_pacing(rack); 23031 rack->r_ctl.rc_saved_beta.beta = optval; 23032 } 23033 if (rack->rc_pacing_cc_set == 0) 23034 rack_set_cc_pacing(rack); 23035 } else { 23036 rack->rack_hibeta = 0; 23037 if (rack->rc_pacing_cc_set) 23038 rack_undo_cc_pacing(rack); 23039 } 23040 break; 23041 case TCP_RACK_PACING_BETA: 23042 error = EINVAL; 23043 break; 23044 case TCP_RACK_TIMER_SLOP: 23045 RACK_OPTS_INC(tcp_rack_timer_slop); 23046 rack->r_ctl.timer_slop = optval; 23047 if (rack->rc_tp->t_srtt) { 23048 /* 23049 * If we have an SRTT lets update t_rxtcur 23050 * to have the new slop. 23051 */ 23052 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 23053 rack_rto_min, rack_rto_max, 23054 rack->r_ctl.timer_slop); 23055 } 23056 break; 23057 case TCP_RACK_PACING_BETA_ECN: 23058 RACK_OPTS_INC(tcp_rack_beta_ecn); 23059 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23060 /* This only works for newreno. */ 23061 error = EINVAL; 23062 break; 23063 } 23064 if (rack->rc_pacing_cc_set) { 23065 /* 23066 * Set them into the real CC module 23067 * whats in the rack pcb is the old values 23068 * to be used on restoral/ 23069 */ 23070 sopt.sopt_dir = SOPT_SET; 23071 opt.name = CC_NEWRENO_BETA_ECN; 23072 opt.val = optval; 23073 if (CC_ALGO(tp)->ctl_output != NULL) 23074 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23075 else 23076 error = ENOENT; 23077 } else { 23078 /* 23079 * Not pacing yet so set it into our local 23080 * rack pcb storage. 23081 */ 23082 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 23083 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 23084 } 23085 break; 23086 case TCP_DEFER_OPTIONS: 23087 RACK_OPTS_INC(tcp_defer_opt); 23088 if (optval) { 23089 if (rack->gp_ready) { 23090 /* Too late */ 23091 error = EINVAL; 23092 break; 23093 } 23094 rack->defer_options = 1; 23095 } else 23096 rack->defer_options = 0; 23097 break; 23098 case TCP_RACK_MEASURE_CNT: 23099 RACK_OPTS_INC(tcp_rack_measure_cnt); 23100 if (optval && (optval <= 0xff)) { 23101 rack->r_ctl.req_measurements = optval; 23102 } else 23103 error = EINVAL; 23104 break; 23105 case TCP_REC_ABC_VAL: 23106 RACK_OPTS_INC(tcp_rec_abc_val); 23107 if (optval > 0) 23108 rack->r_use_labc_for_rec = 1; 23109 else 23110 rack->r_use_labc_for_rec = 0; 23111 break; 23112 case TCP_RACK_ABC_VAL: 23113 RACK_OPTS_INC(tcp_rack_abc_val); 23114 if ((optval > 0) && (optval < 255)) 23115 rack->rc_labc = optval; 23116 else 23117 error = EINVAL; 23118 break; 23119 case TCP_HDWR_UP_ONLY: 23120 RACK_OPTS_INC(tcp_pacing_up_only); 23121 if (optval) 23122 rack->r_up_only = 1; 23123 else 23124 rack->r_up_only = 0; 23125 break; 23126 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 23127 RACK_OPTS_INC(tcp_fillcw_rate_cap); 23128 rack->r_ctl.fillcw_cap = loptval; 23129 break; 23130 case TCP_PACING_RATE_CAP: 23131 RACK_OPTS_INC(tcp_pacing_rate_cap); 23132 if ((rack->dgp_on == 1) && 23133 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23134 /* 23135 * If we are doing DGP we need to switch 23136 * to using the pacing limit. 23137 */ 23138 if (tcp_can_enable_pacing() == 0) { 23139 error = ENOSPC; 23140 break; 23141 } 23142 /* 23143 * Now change up the flags and counts to be correct. 23144 */ 23145 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23146 tcp_dec_dgp_pacing_cnt(); 23147 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23148 } 23149 rack->r_ctl.bw_rate_cap = loptval; 23150 break; 23151 case TCP_HYBRID_PACING: 23152 if (hybrid == NULL) { 23153 error = EINVAL; 23154 break; 23155 } 23156 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 23157 error = EPERM; 23158 break; 23159 } 23160 error = process_hybrid_pacing(rack, hybrid); 23161 break; 23162 case TCP_SIDECHAN_DIS: /* URL:scodm */ 23163 if (optval) 23164 rack->r_ctl.side_chan_dis_mask = optval; 23165 else 23166 rack->r_ctl.side_chan_dis_mask = 0; 23167 break; 23168 case TCP_RACK_PROFILE: 23169 RACK_OPTS_INC(tcp_profile); 23170 error = rack_set_profile(rack, optval); 23171 break; 23172 case TCP_USE_CMP_ACKS: 23173 RACK_OPTS_INC(tcp_use_cmp_acks); 23174 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 23175 /* You can't turn it off once its on! */ 23176 error = EINVAL; 23177 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 23178 rack->r_use_cmp_ack = 1; 23179 rack->r_mbuf_queue = 1; 23180 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23181 } 23182 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 23183 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23184 break; 23185 case TCP_SHARED_CWND_TIME_LIMIT: 23186 RACK_OPTS_INC(tcp_lscwnd); 23187 if (optval) 23188 rack->r_limit_scw = 1; 23189 else 23190 rack->r_limit_scw = 0; 23191 break; 23192 case TCP_RACK_DGP_IN_REC: 23193 error = EINVAL; 23194 break; 23195 case TCP_RACK_PACE_TO_FILL: 23196 RACK_OPTS_INC(tcp_fillcw); 23197 if (optval == 0) 23198 rack->rc_pace_to_cwnd = 0; 23199 else { 23200 rack->rc_pace_to_cwnd = 1; 23201 } 23202 if ((optval >= rack_gp_rtt_maxmul) && 23203 rack_gp_rtt_maxmul && 23204 (optval < 0xf)) { 23205 rack->rc_pace_fill_if_rttin_range = 1; 23206 rack->rtt_limit_mul = optval; 23207 } else { 23208 rack->rc_pace_fill_if_rttin_range = 0; 23209 rack->rtt_limit_mul = 0; 23210 } 23211 break; 23212 case TCP_RACK_NO_PUSH_AT_MAX: 23213 RACK_OPTS_INC(tcp_npush); 23214 if (optval == 0) 23215 rack->r_ctl.rc_no_push_at_mrtt = 0; 23216 else if (optval < 0xff) 23217 rack->r_ctl.rc_no_push_at_mrtt = optval; 23218 else 23219 error = EINVAL; 23220 break; 23221 case TCP_SHARED_CWND_ENABLE: 23222 RACK_OPTS_INC(tcp_rack_scwnd); 23223 if (optval == 0) 23224 rack->rack_enable_scwnd = 0; 23225 else 23226 rack->rack_enable_scwnd = 1; 23227 break; 23228 case TCP_RACK_MBUF_QUEUE: 23229 /* Now do we use the LRO mbuf-queue feature */ 23230 RACK_OPTS_INC(tcp_rack_mbufq); 23231 if (optval || rack->r_use_cmp_ack) 23232 rack->r_mbuf_queue = 1; 23233 else 23234 rack->r_mbuf_queue = 0; 23235 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23236 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23237 else 23238 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23239 break; 23240 case TCP_RACK_NONRXT_CFG_RATE: 23241 RACK_OPTS_INC(tcp_rack_cfg_rate); 23242 if (optval == 0) 23243 rack->rack_rec_nonrxt_use_cr = 0; 23244 else 23245 rack->rack_rec_nonrxt_use_cr = 1; 23246 break; 23247 case TCP_NO_PRR: 23248 RACK_OPTS_INC(tcp_rack_noprr); 23249 if (optval == 0) 23250 rack->rack_no_prr = 0; 23251 else if (optval == 1) 23252 rack->rack_no_prr = 1; 23253 else if (optval == 2) 23254 rack->no_prr_addback = 1; 23255 else 23256 error = EINVAL; 23257 break; 23258 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 23259 if (optval > 0) 23260 rack->cspr_is_fcc = 1; 23261 else 23262 rack->cspr_is_fcc = 0; 23263 break; 23264 case TCP_TIMELY_DYN_ADJ: 23265 RACK_OPTS_INC(tcp_timely_dyn); 23266 if (optval == 0) 23267 rack->rc_gp_dyn_mul = 0; 23268 else { 23269 rack->rc_gp_dyn_mul = 1; 23270 if (optval >= 100) { 23271 /* 23272 * If the user sets something 100 or more 23273 * its the gp_ca value. 23274 */ 23275 rack->r_ctl.rack_per_of_gp_ca = optval; 23276 } 23277 } 23278 break; 23279 case TCP_RACK_DO_DETECTION: 23280 error = EINVAL; 23281 break; 23282 case TCP_RACK_TLP_USE: 23283 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 23284 error = EINVAL; 23285 break; 23286 } 23287 RACK_OPTS_INC(tcp_tlp_use); 23288 rack->rack_tlp_threshold_use = optval; 23289 break; 23290 case TCP_RACK_TLP_REDUCE: 23291 /* RACK TLP cwnd reduction (bool) */ 23292 RACK_OPTS_INC(tcp_rack_tlp_reduce); 23293 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 23294 break; 23295 /* Pacing related ones */ 23296 case TCP_RACK_PACE_ALWAYS: 23297 /* 23298 * zero is old rack method, 1 is new 23299 * method using a pacing rate. 23300 */ 23301 RACK_OPTS_INC(tcp_rack_pace_always); 23302 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23303 error = EPERM; 23304 break; 23305 } 23306 if (optval > 0) { 23307 if (rack->rc_always_pace) { 23308 error = EALREADY; 23309 break; 23310 } else if (tcp_can_enable_pacing()) { 23311 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23312 rack->rc_always_pace = 1; 23313 if (rack->rack_hibeta) 23314 rack_set_cc_pacing(rack); 23315 } 23316 else { 23317 error = ENOSPC; 23318 break; 23319 } 23320 } else { 23321 if (rack->rc_always_pace == 1) { 23322 rack_remove_pacing(rack); 23323 } 23324 } 23325 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23326 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23327 else 23328 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23329 /* A rate may be set irate or other, if so set seg size */ 23330 rack_update_seg(rack); 23331 break; 23332 case TCP_BBR_RACK_INIT_RATE: 23333 RACK_OPTS_INC(tcp_initial_rate); 23334 val = optval; 23335 /* Change from kbits per second to bytes per second */ 23336 val *= 1000; 23337 val /= 8; 23338 rack->r_ctl.init_rate = val; 23339 if (rack->rc_always_pace) 23340 rack_update_seg(rack); 23341 break; 23342 case TCP_BBR_IWINTSO: 23343 error = EINVAL; 23344 break; 23345 case TCP_RACK_FORCE_MSEG: 23346 RACK_OPTS_INC(tcp_rack_force_max_seg); 23347 if (optval) 23348 rack->rc_force_max_seg = 1; 23349 else 23350 rack->rc_force_max_seg = 0; 23351 break; 23352 case TCP_RACK_PACE_MIN_SEG: 23353 RACK_OPTS_INC(tcp_rack_min_seg); 23354 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 23355 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23356 break; 23357 case TCP_RACK_PACE_MAX_SEG: 23358 /* Max segments size in a pace in bytes */ 23359 RACK_OPTS_INC(tcp_rack_max_seg); 23360 if ((rack->dgp_on == 1) && 23361 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23362 /* 23363 * If we set a max-seg and are doing DGP then 23364 * we now fall under the pacing limits not the 23365 * DGP ones. 23366 */ 23367 if (tcp_can_enable_pacing() == 0) { 23368 error = ENOSPC; 23369 break; 23370 } 23371 /* 23372 * Now change up the flags and counts to be correct. 23373 */ 23374 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23375 tcp_dec_dgp_pacing_cnt(); 23376 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23377 } 23378 if (optval <= MAX_USER_SET_SEG) 23379 rack->rc_user_set_max_segs = optval; 23380 else 23381 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 23382 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23383 break; 23384 case TCP_RACK_PACE_RATE_REC: 23385 /* Set the fixed pacing rate in Bytes per second ca */ 23386 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 23387 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23388 error = EPERM; 23389 break; 23390 } 23391 if (rack->dgp_on) { 23392 /* 23393 * We are already pacing another 23394 * way. 23395 */ 23396 error = EBUSY; 23397 break; 23398 } 23399 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23400 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23401 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23402 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23403 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23404 rack->use_fixed_rate = 1; 23405 if (rack->rack_hibeta) 23406 rack_set_cc_pacing(rack); 23407 rack_log_pacing_delay_calc(rack, 23408 rack->r_ctl.rc_fixed_pacing_rate_ss, 23409 rack->r_ctl.rc_fixed_pacing_rate_ca, 23410 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23411 __LINE__, NULL,0); 23412 break; 23413 23414 case TCP_RACK_PACE_RATE_SS: 23415 /* Set the fixed pacing rate in Bytes per second ca */ 23416 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 23417 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23418 error = EPERM; 23419 break; 23420 } 23421 if (rack->dgp_on) { 23422 /* 23423 * We are already pacing another 23424 * way. 23425 */ 23426 error = EBUSY; 23427 break; 23428 } 23429 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23430 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23431 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23432 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23433 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23434 rack->use_fixed_rate = 1; 23435 if (rack->rack_hibeta) 23436 rack_set_cc_pacing(rack); 23437 rack_log_pacing_delay_calc(rack, 23438 rack->r_ctl.rc_fixed_pacing_rate_ss, 23439 rack->r_ctl.rc_fixed_pacing_rate_ca, 23440 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23441 __LINE__, NULL, 0); 23442 break; 23443 23444 case TCP_RACK_PACE_RATE_CA: 23445 /* Set the fixed pacing rate in Bytes per second ca */ 23446 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 23447 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23448 error = EPERM; 23449 break; 23450 } 23451 if (rack->dgp_on) { 23452 /* 23453 * We are already pacing another 23454 * way. 23455 */ 23456 error = EBUSY; 23457 break; 23458 } 23459 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23460 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23461 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23462 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23463 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23464 rack->use_fixed_rate = 1; 23465 if (rack->rack_hibeta) 23466 rack_set_cc_pacing(rack); 23467 rack_log_pacing_delay_calc(rack, 23468 rack->r_ctl.rc_fixed_pacing_rate_ss, 23469 rack->r_ctl.rc_fixed_pacing_rate_ca, 23470 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23471 __LINE__, NULL, 0); 23472 break; 23473 case TCP_RACK_GP_INCREASE_REC: 23474 RACK_OPTS_INC(tcp_gp_inc_rec); 23475 rack->r_ctl.rack_per_of_gp_rec = optval; 23476 rack_log_pacing_delay_calc(rack, 23477 rack->r_ctl.rack_per_of_gp_ss, 23478 rack->r_ctl.rack_per_of_gp_ca, 23479 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23480 __LINE__, NULL, 0); 23481 break; 23482 case TCP_RACK_GP_INCREASE_CA: 23483 RACK_OPTS_INC(tcp_gp_inc_ca); 23484 ca = optval; 23485 if (ca < 100) { 23486 /* 23487 * We don't allow any reduction 23488 * over the GP b/w. 23489 */ 23490 error = EINVAL; 23491 break; 23492 } 23493 rack->r_ctl.rack_per_of_gp_ca = ca; 23494 rack_log_pacing_delay_calc(rack, 23495 rack->r_ctl.rack_per_of_gp_ss, 23496 rack->r_ctl.rack_per_of_gp_ca, 23497 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23498 __LINE__, NULL, 0); 23499 break; 23500 case TCP_RACK_GP_INCREASE_SS: 23501 RACK_OPTS_INC(tcp_gp_inc_ss); 23502 ss = optval; 23503 if (ss < 100) { 23504 /* 23505 * We don't allow any reduction 23506 * over the GP b/w. 23507 */ 23508 error = EINVAL; 23509 break; 23510 } 23511 rack->r_ctl.rack_per_of_gp_ss = ss; 23512 rack_log_pacing_delay_calc(rack, 23513 rack->r_ctl.rack_per_of_gp_ss, 23514 rack->r_ctl.rack_per_of_gp_ca, 23515 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23516 __LINE__, NULL, 0); 23517 break; 23518 case TCP_RACK_RR_CONF: 23519 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 23520 if (optval && optval <= 3) 23521 rack->r_rr_config = optval; 23522 else 23523 rack->r_rr_config = 0; 23524 break; 23525 case TCP_PACING_DND: /* URL:dnd */ 23526 if (optval > 0) 23527 rack->rc_pace_dnd = 1; 23528 else 23529 rack->rc_pace_dnd = 0; 23530 break; 23531 case TCP_HDWR_RATE_CAP: 23532 RACK_OPTS_INC(tcp_hdwr_rate_cap); 23533 if (optval) { 23534 if (rack->r_rack_hw_rate_caps == 0) 23535 rack->r_rack_hw_rate_caps = 1; 23536 else 23537 error = EALREADY; 23538 } else { 23539 rack->r_rack_hw_rate_caps = 0; 23540 } 23541 break; 23542 case TCP_DGP_UPPER_BOUNDS: 23543 { 23544 uint8_t val; 23545 val = optval & 0x0000ff; 23546 rack->r_ctl.rack_per_upper_bound_ca = val; 23547 val = (optval >> 16) & 0x0000ff; 23548 rack->r_ctl.rack_per_upper_bound_ss = val; 23549 break; 23550 } 23551 case TCP_SS_EEXIT: /* URL:eexit */ 23552 if (optval > 0) { 23553 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 23554 if (optval & 0x10000) { 23555 rack->r_ctl.gate_to_fs = 1; 23556 } else { 23557 rack->r_ctl.gate_to_fs = 0; 23558 } 23559 if (optval & 0x20000) { 23560 rack->r_ctl.use_gp_not_last = 1; 23561 } else { 23562 rack->r_ctl.use_gp_not_last = 0; 23563 } 23564 if (optval & 0xfffc0000) { 23565 uint32_t v; 23566 23567 v = (optval >> 18) & 0x00003fff; 23568 if (v >= 1000) 23569 rack->r_ctl.gp_gain_req = v; 23570 } 23571 } else { 23572 /* We do not do ss early exit at all */ 23573 rack->rc_initial_ss_comp = 1; 23574 rack->r_ctl.gp_rnd_thresh = 0; 23575 } 23576 break; 23577 case TCP_RACK_SPLIT_LIMIT: 23578 RACK_OPTS_INC(tcp_split_limit); 23579 rack->r_ctl.rc_split_limit = optval; 23580 break; 23581 case TCP_BBR_HDWR_PACE: 23582 RACK_OPTS_INC(tcp_hdwr_pacing); 23583 if (optval){ 23584 if (rack->rack_hdrw_pacing == 0) { 23585 rack->rack_hdw_pace_ena = 1; 23586 rack->rack_attempt_hdwr_pace = 0; 23587 } else 23588 error = EALREADY; 23589 } else { 23590 rack->rack_hdw_pace_ena = 0; 23591 #ifdef RATELIMIT 23592 if (rack->r_ctl.crte != NULL) { 23593 rack->rack_hdrw_pacing = 0; 23594 rack->rack_attempt_hdwr_pace = 0; 23595 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 23596 rack->r_ctl.crte = NULL; 23597 } 23598 #endif 23599 } 23600 break; 23601 /* End Pacing related ones */ 23602 case TCP_RACK_PRR_SENDALOT: 23603 /* Allow PRR to send more than one seg */ 23604 RACK_OPTS_INC(tcp_rack_prr_sendalot); 23605 rack->r_ctl.rc_prr_sendalot = optval; 23606 break; 23607 case TCP_RACK_MIN_TO: 23608 /* Minimum time between rack t-o's in ms */ 23609 RACK_OPTS_INC(tcp_rack_min_to); 23610 rack->r_ctl.rc_min_to = optval; 23611 break; 23612 case TCP_RACK_EARLY_SEG: 23613 /* If early recovery max segments */ 23614 RACK_OPTS_INC(tcp_rack_early_seg); 23615 rack->r_ctl.rc_early_recovery_segs = optval; 23616 break; 23617 case TCP_RACK_ENABLE_HYSTART: 23618 { 23619 if (optval) { 23620 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23621 if (rack_do_hystart > RACK_HYSTART_ON) 23622 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23623 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23624 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23625 } else { 23626 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23627 } 23628 } 23629 break; 23630 case TCP_RACK_REORD_THRESH: 23631 /* RACK reorder threshold (shift amount) */ 23632 RACK_OPTS_INC(tcp_rack_reord_thresh); 23633 if ((optval > 0) && (optval < 31)) 23634 rack->r_ctl.rc_reorder_shift = optval; 23635 else 23636 error = EINVAL; 23637 break; 23638 case TCP_RACK_REORD_FADE: 23639 /* Does reordering fade after ms time */ 23640 RACK_OPTS_INC(tcp_rack_reord_fade); 23641 rack->r_ctl.rc_reorder_fade = optval; 23642 break; 23643 case TCP_RACK_TLP_THRESH: 23644 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 23645 RACK_OPTS_INC(tcp_rack_tlp_thresh); 23646 if (optval) 23647 rack->r_ctl.rc_tlp_threshold = optval; 23648 else 23649 error = EINVAL; 23650 break; 23651 case TCP_BBR_USE_RACK_RR: 23652 RACK_OPTS_INC(tcp_rack_rr); 23653 if (optval) 23654 rack->use_rack_rr = 1; 23655 else 23656 rack->use_rack_rr = 0; 23657 break; 23658 case TCP_RACK_PKT_DELAY: 23659 /* RACK added ms i.e. rack-rtt + reord + N */ 23660 RACK_OPTS_INC(tcp_rack_pkt_delay); 23661 rack->r_ctl.rc_pkt_delay = optval; 23662 break; 23663 case TCP_DELACK: 23664 RACK_OPTS_INC(tcp_rack_delayed_ack); 23665 if (optval == 0) 23666 tp->t_delayed_ack = 0; 23667 else 23668 tp->t_delayed_ack = 1; 23669 if (tp->t_flags & TF_DELACK) { 23670 tp->t_flags &= ~TF_DELACK; 23671 tp->t_flags |= TF_ACKNOW; 23672 NET_EPOCH_ENTER(et); 23673 rack_output(tp); 23674 NET_EPOCH_EXIT(et); 23675 } 23676 break; 23677 23678 case TCP_BBR_RACK_RTT_USE: 23679 RACK_OPTS_INC(tcp_rack_rtt_use); 23680 if ((optval != USE_RTT_HIGH) && 23681 (optval != USE_RTT_LOW) && 23682 (optval != USE_RTT_AVG)) 23683 error = EINVAL; 23684 else 23685 rack->r_ctl.rc_rate_sample_method = optval; 23686 break; 23687 case TCP_HONOR_HPTS_MIN: 23688 RACK_OPTS_INC(tcp_honor_hpts); 23689 if (optval) { 23690 rack->r_use_hpts_min = 1; 23691 /* 23692 * Must be between 2 - 80% to be a reduction else 23693 * we keep the default (10%). 23694 */ 23695 if ((optval > 1) && (optval <= 80)) { 23696 rack->r_ctl.max_reduction = optval; 23697 } 23698 } else 23699 rack->r_use_hpts_min = 0; 23700 break; 23701 case TCP_REC_IS_DYN: /* URL:dynrec */ 23702 RACK_OPTS_INC(tcp_dyn_rec); 23703 if (optval) 23704 rack->rc_gp_no_rec_chg = 1; 23705 else 23706 rack->rc_gp_no_rec_chg = 0; 23707 break; 23708 case TCP_NO_TIMELY: 23709 RACK_OPTS_INC(tcp_notimely); 23710 if (optval) { 23711 rack->rc_skip_timely = 1; 23712 rack->r_ctl.rack_per_of_gp_rec = 90; 23713 rack->r_ctl.rack_per_of_gp_ca = 100; 23714 rack->r_ctl.rack_per_of_gp_ss = 250; 23715 } else { 23716 rack->rc_skip_timely = 0; 23717 } 23718 break; 23719 case TCP_GP_USE_LTBW: 23720 if (optval == 0) { 23721 rack->use_lesser_lt_bw = 0; 23722 rack->dis_lt_bw = 1; 23723 } else if (optval == 1) { 23724 rack->use_lesser_lt_bw = 1; 23725 rack->dis_lt_bw = 0; 23726 } else if (optval == 2) { 23727 rack->use_lesser_lt_bw = 0; 23728 rack->dis_lt_bw = 0; 23729 } 23730 break; 23731 case TCP_DATA_AFTER_CLOSE: 23732 RACK_OPTS_INC(tcp_data_after_close); 23733 if (optval) 23734 rack->rc_allow_data_af_clo = 1; 23735 else 23736 rack->rc_allow_data_af_clo = 0; 23737 break; 23738 default: 23739 break; 23740 } 23741 tcp_log_socket_option(tp, sopt_name, optval, error); 23742 return (error); 23743 } 23744 23745 static void 23746 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 23747 { 23748 /* 23749 * A new connection has been created (tp) and 23750 * the parent is the inpcb given. We want to 23751 * apply a read-lock to the parent (we are already 23752 * holding a write lock on the tp) and copy anything 23753 * out of the rack specific data as long as its tfb is 23754 * the same as ours i.e. we are the same stack. Otherwise 23755 * we just return. 23756 */ 23757 struct tcpcb *par; 23758 struct tcp_rack *dest, *src; 23759 int cnt = 0; 23760 23761 par = intotcpcb(parent); 23762 if (par->t_fb != tp->t_fb) { 23763 /* Not the same stack */ 23764 tcp_log_socket_option(tp, 0, 0, 1); 23765 return; 23766 } 23767 /* Ok if we reach here lets setup the two rack pointers */ 23768 dest = (struct tcp_rack *)tp->t_fb_ptr; 23769 src = (struct tcp_rack *)par->t_fb_ptr; 23770 if ((src == NULL) || (dest == NULL)) { 23771 /* Huh? */ 23772 tcp_log_socket_option(tp, 0, 0, 2); 23773 return; 23774 } 23775 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 23776 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 23777 if ((src->dgp_on) && (dest->dgp_on == 0)) { 23778 /* Profile 1 had to be set via sock opt */ 23779 rack_set_dgp(dest); 23780 cnt++; 23781 } 23782 /* TCP_RACK_SET_RXT_OPTIONS */ 23783 if (dest->full_size_rxt != src->full_size_rxt) { 23784 dest->full_size_rxt = src->full_size_rxt; 23785 cnt++; 23786 } 23787 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 23788 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 23789 cnt++; 23790 } 23791 /* TCP_RACK_DSACK_OPT */ 23792 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 23793 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 23794 cnt++; 23795 } 23796 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 23797 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 23798 cnt++; 23799 } 23800 /* TCP_RACK_PACING_DIVISOR */ 23801 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 23802 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 23803 cnt++; 23804 } 23805 /* TCP_RACK_HI_BETA */ 23806 if (src->rack_hibeta != dest->rack_hibeta) { 23807 cnt++; 23808 if (src->rack_hibeta) { 23809 dest->r_ctl.rc_saved_beta.beta = src->r_ctl.rc_saved_beta.beta; 23810 dest->rack_hibeta = 1; 23811 } else { 23812 dest->rack_hibeta = 0; 23813 } 23814 } 23815 /* TCP_RACK_TIMER_SLOP */ 23816 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 23817 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 23818 cnt++; 23819 } 23820 /* TCP_RACK_PACING_BETA_ECN */ 23821 if (dest->r_ctl.rc_saved_beta.beta_ecn != src->r_ctl.rc_saved_beta.beta_ecn) { 23822 dest->r_ctl.rc_saved_beta.beta_ecn = src->r_ctl.rc_saved_beta.beta_ecn; 23823 cnt++; 23824 } 23825 if (dest->r_ctl.rc_saved_beta.newreno_flags != src->r_ctl.rc_saved_beta.newreno_flags) { 23826 dest->r_ctl.rc_saved_beta.newreno_flags = src->r_ctl.rc_saved_beta.newreno_flags; 23827 cnt++; 23828 } 23829 /* We do not do TCP_DEFER_OPTIONS */ 23830 /* TCP_RACK_MEASURE_CNT */ 23831 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 23832 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 23833 cnt++; 23834 } 23835 /* TCP_HDWR_UP_ONLY */ 23836 if (dest->r_up_only != src->r_up_only) { 23837 dest->r_up_only = src->r_up_only; 23838 cnt++; 23839 } 23840 /* TCP_FILLCW_RATE_CAP */ 23841 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 23842 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 23843 cnt++; 23844 } 23845 /* TCP_PACING_RATE_CAP */ 23846 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 23847 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 23848 cnt++; 23849 } 23850 /* A listener can't set TCP_HYBRID_PACING */ 23851 /* TCP_SIDECHAN_DIS */ 23852 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 23853 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 23854 cnt++; 23855 } 23856 /* TCP_SHARED_CWND_TIME_LIMIT */ 23857 if (dest->r_limit_scw != src->r_limit_scw) { 23858 dest->r_limit_scw = src->r_limit_scw; 23859 cnt++; 23860 } 23861 /* TCP_RACK_PACE_TO_FILL */ 23862 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 23863 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 23864 cnt++; 23865 } 23866 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 23867 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 23868 cnt++; 23869 } 23870 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 23871 dest->rtt_limit_mul = src->rtt_limit_mul; 23872 cnt++; 23873 } 23874 /* TCP_RACK_NO_PUSH_AT_MAX */ 23875 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 23876 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 23877 cnt++; 23878 } 23879 /* TCP_SHARED_CWND_ENABLE */ 23880 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 23881 dest->rack_enable_scwnd = src->rack_enable_scwnd; 23882 cnt++; 23883 } 23884 /* TCP_USE_CMP_ACKS */ 23885 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 23886 dest->r_use_cmp_ack = src->r_use_cmp_ack; 23887 cnt++; 23888 } 23889 23890 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23891 dest->r_mbuf_queue = src->r_mbuf_queue; 23892 cnt++; 23893 } 23894 /* TCP_RACK_MBUF_QUEUE */ 23895 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23896 dest->r_mbuf_queue = src->r_mbuf_queue; 23897 cnt++; 23898 } 23899 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 23900 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23901 } else { 23902 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23903 } 23904 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 23905 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23906 } 23907 /* TCP_RACK_NONRXT_CFG_RATE */ 23908 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 23909 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 23910 cnt++; 23911 } 23912 /* TCP_NO_PRR */ 23913 if (dest->rack_no_prr != src->rack_no_prr) { 23914 dest->rack_no_prr = src->rack_no_prr; 23915 cnt++; 23916 } 23917 if (dest->no_prr_addback != src->no_prr_addback) { 23918 dest->no_prr_addback = src->no_prr_addback; 23919 cnt++; 23920 } 23921 /* RACK_CSPR_IS_FCC */ 23922 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 23923 dest->cspr_is_fcc = src->cspr_is_fcc; 23924 cnt++; 23925 } 23926 /* TCP_TIMELY_DYN_ADJ */ 23927 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 23928 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 23929 cnt++; 23930 } 23931 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23932 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23933 cnt++; 23934 } 23935 /* TCP_RACK_TLP_USE */ 23936 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 23937 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 23938 cnt++; 23939 } 23940 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 23941 /* TCP_BBR_RACK_INIT_RATE */ 23942 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 23943 dest->r_ctl.init_rate = src->r_ctl.init_rate; 23944 cnt++; 23945 } 23946 /* TCP_RACK_FORCE_MSEG */ 23947 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 23948 dest->rc_force_max_seg = src->rc_force_max_seg; 23949 cnt++; 23950 } 23951 /* TCP_RACK_PACE_MIN_SEG */ 23952 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 23953 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 23954 cnt++; 23955 } 23956 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 23957 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 23958 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 23959 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 23960 cnt++; 23961 } 23962 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 23963 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 23964 cnt++; 23965 } 23966 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 23967 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 23968 cnt++; 23969 } 23970 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 23971 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 23972 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 23973 cnt++; 23974 } 23975 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23976 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23977 cnt++; 23978 } 23979 23980 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 23981 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 23982 cnt++; 23983 } 23984 /* TCP_RACK_RR_CONF */ 23985 if (dest->r_rr_config != src->r_rr_config) { 23986 dest->r_rr_config = src->r_rr_config; 23987 cnt++; 23988 } 23989 /* TCP_PACING_DND */ 23990 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 23991 dest->rc_pace_dnd = src->rc_pace_dnd; 23992 cnt++; 23993 } 23994 /* TCP_HDWR_RATE_CAP */ 23995 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 23996 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 23997 cnt++; 23998 } 23999 /* TCP_DGP_UPPER_BOUNDS */ 24000 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 24001 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 24002 cnt++; 24003 } 24004 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 24005 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 24006 cnt++; 24007 } 24008 /* TCP_SS_EEXIT */ 24009 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 24010 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 24011 cnt++; 24012 } 24013 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 24014 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 24015 cnt++; 24016 } 24017 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 24018 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 24019 cnt++; 24020 } 24021 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 24022 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 24023 cnt++; 24024 } 24025 /* TCP_BBR_HDWR_PACE */ 24026 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 24027 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 24028 cnt++; 24029 } 24030 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 24031 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 24032 cnt++; 24033 } 24034 /* TCP_RACK_PRR_SENDALOT */ 24035 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 24036 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 24037 cnt++; 24038 } 24039 /* TCP_RACK_MIN_TO */ 24040 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 24041 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 24042 cnt++; 24043 } 24044 /* TCP_RACK_EARLY_SEG */ 24045 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 24046 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 24047 cnt++; 24048 } 24049 /* TCP_RACK_ENABLE_HYSTART */ 24050 if (par->t_ccv.flags != tp->t_ccv.flags) { 24051 cnt++; 24052 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24053 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 24054 if (rack_do_hystart > RACK_HYSTART_ON) 24055 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 24056 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 24057 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 24058 } else { 24059 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 24060 } 24061 } 24062 /* TCP_RACK_REORD_THRESH */ 24063 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 24064 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 24065 cnt++; 24066 } 24067 /* TCP_RACK_REORD_FADE */ 24068 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 24069 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 24070 cnt++; 24071 } 24072 /* TCP_RACK_TLP_THRESH */ 24073 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 24074 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 24075 cnt++; 24076 } 24077 /* TCP_BBR_USE_RACK_RR */ 24078 if (dest->use_rack_rr != src->use_rack_rr) { 24079 dest->use_rack_rr = src->use_rack_rr; 24080 cnt++; 24081 } 24082 /* TCP_RACK_PKT_DELAY */ 24083 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 24084 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 24085 cnt++; 24086 } 24087 /* TCP_DELACK will get copied via the main code if applicable */ 24088 /* TCP_BBR_RACK_RTT_USE */ 24089 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 24090 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 24091 cnt++; 24092 } 24093 /* TCP_HONOR_HPTS_MIN */ 24094 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 24095 dest->r_use_hpts_min = src->r_use_hpts_min; 24096 cnt++; 24097 } 24098 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 24099 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 24100 cnt++; 24101 } 24102 /* TCP_REC_IS_DYN */ 24103 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 24104 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 24105 cnt++; 24106 } 24107 if (dest->rc_skip_timely != src->rc_skip_timely) { 24108 dest->rc_skip_timely = src->rc_skip_timely; 24109 cnt++; 24110 } 24111 /* TCP_DATA_AFTER_CLOSE */ 24112 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 24113 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 24114 cnt++; 24115 } 24116 /* TCP_GP_USE_LTBW */ 24117 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 24118 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 24119 cnt++; 24120 } 24121 if (dest->dis_lt_bw != src->dis_lt_bw) { 24122 dest->dis_lt_bw = src->dis_lt_bw; 24123 cnt++; 24124 } 24125 tcp_log_socket_option(tp, 0, cnt, 0); 24126 } 24127 24128 24129 static void 24130 rack_apply_deferred_options(struct tcp_rack *rack) 24131 { 24132 struct deferred_opt_list *dol, *sdol; 24133 uint32_t s_optval; 24134 24135 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 24136 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 24137 /* Disadvantage of deferal is you loose the error return */ 24138 s_optval = (uint32_t)dol->optval; 24139 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 24140 free(dol, M_TCPDO); 24141 } 24142 } 24143 24144 static void 24145 rack_hw_tls_change(struct tcpcb *tp, int chg) 24146 { 24147 /* Update HW tls state */ 24148 struct tcp_rack *rack; 24149 24150 rack = (struct tcp_rack *)tp->t_fb_ptr; 24151 if (chg) 24152 rack->r_ctl.fsb.hw_tls = 1; 24153 else 24154 rack->r_ctl.fsb.hw_tls = 0; 24155 } 24156 24157 static int 24158 rack_pru_options(struct tcpcb *tp, int flags) 24159 { 24160 if (flags & PRUS_OOB) 24161 return (EOPNOTSUPP); 24162 return (0); 24163 } 24164 24165 static bool 24166 rack_wake_check(struct tcpcb *tp) 24167 { 24168 struct tcp_rack *rack; 24169 struct timeval tv; 24170 uint32_t cts; 24171 24172 rack = (struct tcp_rack *)tp->t_fb_ptr; 24173 if (rack->r_ctl.rc_hpts_flags) { 24174 cts = tcp_get_usecs(&tv); 24175 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 24176 /* 24177 * Pacing timer is up, check if we are ready. 24178 */ 24179 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 24180 return (true); 24181 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 24182 /* 24183 * A timer is up, check if we are ready. 24184 */ 24185 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 24186 return (true); 24187 } 24188 } 24189 return (false); 24190 } 24191 24192 static struct tcp_function_block __tcp_rack = { 24193 .tfb_tcp_block_name = __XSTRING(STACKNAME), 24194 .tfb_tcp_output = rack_output, 24195 .tfb_do_queued_segments = ctf_do_queued_segments, 24196 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 24197 .tfb_tcp_do_segment = rack_do_segment, 24198 .tfb_tcp_ctloutput = rack_ctloutput, 24199 .tfb_tcp_fb_init = rack_init, 24200 .tfb_tcp_fb_fini = rack_fini, 24201 .tfb_tcp_timer_stop_all = rack_stopall, 24202 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 24203 .tfb_tcp_handoff_ok = rack_handoff_ok, 24204 .tfb_tcp_mtu_chg = rack_mtu_change, 24205 .tfb_pru_options = rack_pru_options, 24206 .tfb_hwtls_change = rack_hw_tls_change, 24207 .tfb_chg_query = rack_chg_query, 24208 .tfb_switch_failed = rack_switch_failed, 24209 .tfb_early_wake_check = rack_wake_check, 24210 .tfb_compute_pipe = rack_compute_pipe, 24211 .tfb_stack_info = rack_stack_information, 24212 .tfb_inherit = rack_inherit, 24213 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP | TCP_FUNC_DEFAULT_OK, 24214 24215 }; 24216 24217 /* 24218 * rack_ctloutput() must drop the inpcb lock before performing copyin on 24219 * socket option arguments. When it re-acquires the lock after the copy, it 24220 * has to revalidate that the connection is still valid for the socket 24221 * option. 24222 */ 24223 static int 24224 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24225 { 24226 struct inpcb *inp = tptoinpcb(tp); 24227 #ifdef INET 24228 struct ip *ip; 24229 #endif 24230 struct tcp_rack *rack; 24231 struct tcp_hybrid_req hybrid; 24232 uint64_t loptval; 24233 int32_t error = 0, optval; 24234 24235 rack = (struct tcp_rack *)tp->t_fb_ptr; 24236 if (rack == NULL) { 24237 INP_WUNLOCK(inp); 24238 return (EINVAL); 24239 } 24240 #ifdef INET 24241 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 24242 #endif 24243 24244 switch (sopt->sopt_level) { 24245 #ifdef INET6 24246 case IPPROTO_IPV6: 24247 MPASS(inp->inp_vflag & INP_IPV6PROTO); 24248 switch (sopt->sopt_name) { 24249 case IPV6_USE_MIN_MTU: 24250 tcp6_use_min_mtu(tp); 24251 break; 24252 } 24253 INP_WUNLOCK(inp); 24254 return (0); 24255 #endif 24256 #ifdef INET 24257 case IPPROTO_IP: 24258 switch (sopt->sopt_name) { 24259 case IP_TOS: 24260 /* 24261 * The DSCP codepoint has changed, update the fsb. 24262 */ 24263 ip->ip_tos = rack->rc_inp->inp_ip_tos; 24264 break; 24265 case IP_TTL: 24266 /* 24267 * The TTL has changed, update the fsb. 24268 */ 24269 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 24270 break; 24271 } 24272 INP_WUNLOCK(inp); 24273 return (0); 24274 #endif 24275 #ifdef SO_PEERPRIO 24276 case SOL_SOCKET: 24277 switch (sopt->sopt_name) { 24278 case SO_PEERPRIO: /* SC-URL:bs */ 24279 /* Already read in and sanity checked in sosetopt(). */ 24280 if (inp->inp_socket) { 24281 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 24282 } 24283 break; 24284 } 24285 INP_WUNLOCK(inp); 24286 return (0); 24287 #endif 24288 case IPPROTO_TCP: 24289 switch (sopt->sopt_name) { 24290 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 24291 /* Pacing related ones */ 24292 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 24293 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 24294 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 24295 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 24296 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 24297 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 24298 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 24299 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 24300 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 24301 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 24302 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 24303 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 24304 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 24305 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 24306 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 24307 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 24308 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24309 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 24310 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 24311 /* End pacing related */ 24312 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 24313 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 24314 case TCP_RACK_MIN_TO: /* URL:min_to */ 24315 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 24316 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 24317 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 24318 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 24319 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 24320 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 24321 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 24322 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 24323 case TCP_NO_PRR: /* URL:noprr */ 24324 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 24325 case TCP_DATA_AFTER_CLOSE: /* no URL */ 24326 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 24327 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 24328 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 24329 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 24330 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 24331 case TCP_RACK_PROFILE: /* URL:profile */ 24332 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24333 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 24334 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 24335 case TCP_RACK_ABC_VAL: /* URL:labc */ 24336 case TCP_REC_ABC_VAL: /* URL:reclabc */ 24337 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 24338 case TCP_DEFER_OPTIONS: /* URL:defer */ 24339 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 24340 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 24341 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 24342 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 24343 case TCP_RACK_HI_BETA: /* URL:hibeta */ 24344 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 24345 case TCP_SS_EEXIT: /* URL:eexit */ 24346 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 24347 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 24348 case TCP_PACING_DND: /* URL:dnd */ 24349 case TCP_NO_TIMELY: /* URL:notimely */ 24350 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24351 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 24352 case TCP_REC_IS_DYN: /* URL:dynrec */ 24353 case TCP_GP_USE_LTBW: /* URL:useltbw */ 24354 goto process_opt; 24355 break; 24356 default: 24357 /* Filter off all unknown options to the base stack */ 24358 return (tcp_default_ctloutput(tp, sopt)); 24359 break; 24360 } 24361 default: 24362 INP_WUNLOCK(inp); 24363 return (0); 24364 } 24365 process_opt: 24366 INP_WUNLOCK(inp); 24367 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24368 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 24369 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 24370 /* 24371 * We truncate it down to 32 bits for the socket-option trace this 24372 * means rates > 34Gbps won't show right, but thats probably ok. 24373 */ 24374 optval = (uint32_t)loptval; 24375 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 24376 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 24377 } else { 24378 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 24379 /* Save it in 64 bit form too */ 24380 loptval = optval; 24381 } 24382 if (error) 24383 return (error); 24384 INP_WLOCK(inp); 24385 if (tp->t_fb != &__tcp_rack) { 24386 INP_WUNLOCK(inp); 24387 return (ENOPROTOOPT); 24388 } 24389 if (rack->defer_options && (rack->gp_ready == 0) && 24390 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 24391 (sopt->sopt_name != TCP_HYBRID_PACING) && 24392 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 24393 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 24394 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 24395 /* Options are being deferred */ 24396 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 24397 INP_WUNLOCK(inp); 24398 return (0); 24399 } else { 24400 /* No memory to defer, fail */ 24401 INP_WUNLOCK(inp); 24402 return (ENOMEM); 24403 } 24404 } 24405 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 24406 INP_WUNLOCK(inp); 24407 return (error); 24408 } 24409 24410 static void 24411 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 24412 { 24413 24414 INP_WLOCK_ASSERT(tptoinpcb(tp)); 24415 bzero(ti, sizeof(*ti)); 24416 24417 ti->tcpi_state = tp->t_state; 24418 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 24419 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 24420 if (tp->t_flags & TF_SACK_PERMIT) 24421 ti->tcpi_options |= TCPI_OPT_SACK; 24422 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 24423 ti->tcpi_options |= TCPI_OPT_WSCALE; 24424 ti->tcpi_snd_wscale = tp->snd_scale; 24425 ti->tcpi_rcv_wscale = tp->rcv_scale; 24426 } 24427 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 24428 ti->tcpi_options |= TCPI_OPT_ECN; 24429 if (tp->t_flags & TF_FASTOPEN) 24430 ti->tcpi_options |= TCPI_OPT_TFO; 24431 /* still kept in ticks is t_rcvtime */ 24432 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 24433 /* Since we hold everything in precise useconds this is easy */ 24434 ti->tcpi_rtt = tp->t_srtt; 24435 ti->tcpi_rttvar = tp->t_rttvar; 24436 ti->tcpi_rto = tp->t_rxtcur; 24437 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 24438 ti->tcpi_snd_cwnd = tp->snd_cwnd; 24439 /* 24440 * FreeBSD-specific extension fields for tcp_info. 24441 */ 24442 ti->tcpi_rcv_space = tp->rcv_wnd; 24443 ti->tcpi_rcv_nxt = tp->rcv_nxt; 24444 ti->tcpi_snd_wnd = tp->snd_wnd; 24445 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 24446 ti->tcpi_snd_nxt = tp->snd_nxt; 24447 ti->tcpi_snd_mss = tp->t_maxseg; 24448 ti->tcpi_rcv_mss = tp->t_maxseg; 24449 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 24450 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 24451 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 24452 ti->tcpi_total_tlp = tp->t_sndtlppack; 24453 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 24454 ti->tcpi_rttmin = tp->t_rttlow; 24455 #ifdef NETFLIX_STATS 24456 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 24457 #endif 24458 #ifdef TCP_OFFLOAD 24459 if (tp->t_flags & TF_TOE) { 24460 ti->tcpi_options |= TCPI_OPT_TOE; 24461 tcp_offload_tcp_info(tp, ti); 24462 } 24463 #endif 24464 } 24465 24466 static int 24467 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24468 { 24469 struct inpcb *inp = tptoinpcb(tp); 24470 struct tcp_rack *rack; 24471 int32_t error, optval; 24472 uint64_t val, loptval; 24473 struct tcp_info ti; 24474 /* 24475 * Because all our options are either boolean or an int, we can just 24476 * pull everything into optval and then unlock and copy. If we ever 24477 * add a option that is not a int, then this will have quite an 24478 * impact to this routine. 24479 */ 24480 error = 0; 24481 rack = (struct tcp_rack *)tp->t_fb_ptr; 24482 if (rack == NULL) { 24483 INP_WUNLOCK(inp); 24484 return (EINVAL); 24485 } 24486 switch (sopt->sopt_name) { 24487 case TCP_INFO: 24488 /* First get the info filled */ 24489 rack_fill_info(tp, &ti); 24490 /* Fix up the rtt related fields if needed */ 24491 INP_WUNLOCK(inp); 24492 error = sooptcopyout(sopt, &ti, sizeof ti); 24493 return (error); 24494 /* 24495 * Beta is the congestion control value for NewReno that influences how 24496 * much of a backoff happens when loss is detected. It is normally set 24497 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 24498 * when you exit recovery. 24499 */ 24500 case TCP_RACK_PACING_BETA: 24501 break; 24502 /* 24503 * Beta_ecn is the congestion control value for NewReno that influences how 24504 * much of a backoff happens when a ECN mark is detected. It is normally set 24505 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 24506 * you exit recovery. Note that classic ECN has a beta of 50, it is only 24507 * ABE Ecn that uses this "less" value, but we do too with pacing :) 24508 */ 24509 24510 case TCP_RACK_PACING_BETA_ECN: 24511 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24512 error = EINVAL; 24513 else if (rack->rc_pacing_cc_set == 0) 24514 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 24515 else { 24516 /* 24517 * Reach out into the CC data and report back what 24518 * I have previously set. Yeah it looks hackish but 24519 * we don't want to report the saved values. 24520 */ 24521 if (tp->t_ccv.cc_data) 24522 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 24523 else 24524 error = EINVAL; 24525 } 24526 break; 24527 case TCP_RACK_DSACK_OPT: 24528 optval = 0; 24529 if (rack->rc_rack_tmr_std_based) { 24530 optval |= 1; 24531 } 24532 if (rack->rc_rack_use_dsack) { 24533 optval |= 2; 24534 } 24535 break; 24536 case TCP_RACK_ENABLE_HYSTART: 24537 { 24538 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24539 optval = RACK_HYSTART_ON; 24540 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 24541 optval = RACK_HYSTART_ON_W_SC; 24542 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 24543 optval = RACK_HYSTART_ON_W_SC_C; 24544 } else { 24545 optval = RACK_HYSTART_OFF; 24546 } 24547 } 24548 break; 24549 case TCP_RACK_DGP_IN_REC: 24550 error = EINVAL; 24551 break; 24552 case TCP_RACK_HI_BETA: 24553 optval = rack->rack_hibeta; 24554 break; 24555 case TCP_DEFER_OPTIONS: 24556 optval = rack->defer_options; 24557 break; 24558 case TCP_RACK_MEASURE_CNT: 24559 optval = rack->r_ctl.req_measurements; 24560 break; 24561 case TCP_REC_ABC_VAL: 24562 optval = rack->r_use_labc_for_rec; 24563 break; 24564 case TCP_RACK_ABC_VAL: 24565 optval = rack->rc_labc; 24566 break; 24567 case TCP_HDWR_UP_ONLY: 24568 optval= rack->r_up_only; 24569 break; 24570 case TCP_FILLCW_RATE_CAP: 24571 loptval = rack->r_ctl.fillcw_cap; 24572 break; 24573 case TCP_PACING_RATE_CAP: 24574 loptval = rack->r_ctl.bw_rate_cap; 24575 break; 24576 case TCP_RACK_PROFILE: 24577 /* You cannot retrieve a profile, its write only */ 24578 error = EINVAL; 24579 break; 24580 case TCP_SIDECHAN_DIS: 24581 optval = rack->r_ctl.side_chan_dis_mask; 24582 break; 24583 case TCP_HYBRID_PACING: 24584 /* You cannot retrieve hybrid pacing information, its write only */ 24585 error = EINVAL; 24586 break; 24587 case TCP_USE_CMP_ACKS: 24588 optval = rack->r_use_cmp_ack; 24589 break; 24590 case TCP_RACK_PACE_TO_FILL: 24591 optval = rack->rc_pace_to_cwnd; 24592 break; 24593 case TCP_RACK_NO_PUSH_AT_MAX: 24594 optval = rack->r_ctl.rc_no_push_at_mrtt; 24595 break; 24596 case TCP_SHARED_CWND_ENABLE: 24597 optval = rack->rack_enable_scwnd; 24598 break; 24599 case TCP_RACK_NONRXT_CFG_RATE: 24600 optval = rack->rack_rec_nonrxt_use_cr; 24601 break; 24602 case TCP_NO_PRR: 24603 if (rack->rack_no_prr == 1) 24604 optval = 1; 24605 else if (rack->no_prr_addback == 1) 24606 optval = 2; 24607 else 24608 optval = 0; 24609 break; 24610 case TCP_GP_USE_LTBW: 24611 if (rack->dis_lt_bw) { 24612 /* It is not used */ 24613 optval = 0; 24614 } else if (rack->use_lesser_lt_bw) { 24615 /* we use min() */ 24616 optval = 1; 24617 } else { 24618 /* we use max() */ 24619 optval = 2; 24620 } 24621 break; 24622 case TCP_RACK_DO_DETECTION: 24623 error = EINVAL; 24624 break; 24625 case TCP_RACK_MBUF_QUEUE: 24626 /* Now do we use the LRO mbuf-queue feature */ 24627 optval = rack->r_mbuf_queue; 24628 break; 24629 case RACK_CSPR_IS_FCC: 24630 optval = rack->cspr_is_fcc; 24631 break; 24632 case TCP_TIMELY_DYN_ADJ: 24633 optval = rack->rc_gp_dyn_mul; 24634 break; 24635 case TCP_BBR_IWINTSO: 24636 error = EINVAL; 24637 break; 24638 case TCP_RACK_TLP_REDUCE: 24639 /* RACK TLP cwnd reduction (bool) */ 24640 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 24641 break; 24642 case TCP_BBR_RACK_INIT_RATE: 24643 val = rack->r_ctl.init_rate; 24644 /* convert to kbits per sec */ 24645 val *= 8; 24646 val /= 1000; 24647 optval = (uint32_t)val; 24648 break; 24649 case TCP_RACK_FORCE_MSEG: 24650 optval = rack->rc_force_max_seg; 24651 break; 24652 case TCP_RACK_PACE_MIN_SEG: 24653 optval = rack->r_ctl.rc_user_set_min_segs; 24654 break; 24655 case TCP_RACK_PACE_MAX_SEG: 24656 /* Max segments in a pace */ 24657 optval = rack->rc_user_set_max_segs; 24658 break; 24659 case TCP_RACK_PACE_ALWAYS: 24660 /* Use the always pace method */ 24661 optval = rack->rc_always_pace; 24662 break; 24663 case TCP_RACK_PRR_SENDALOT: 24664 /* Allow PRR to send more than one seg */ 24665 optval = rack->r_ctl.rc_prr_sendalot; 24666 break; 24667 case TCP_RACK_MIN_TO: 24668 /* Minimum time between rack t-o's in ms */ 24669 optval = rack->r_ctl.rc_min_to; 24670 break; 24671 case TCP_RACK_SPLIT_LIMIT: 24672 optval = rack->r_ctl.rc_split_limit; 24673 break; 24674 case TCP_RACK_EARLY_SEG: 24675 /* If early recovery max segments */ 24676 optval = rack->r_ctl.rc_early_recovery_segs; 24677 break; 24678 case TCP_RACK_REORD_THRESH: 24679 /* RACK reorder threshold (shift amount) */ 24680 optval = rack->r_ctl.rc_reorder_shift; 24681 break; 24682 case TCP_SS_EEXIT: 24683 if (rack->r_ctl.gp_rnd_thresh) { 24684 uint32_t v; 24685 24686 v = rack->r_ctl.gp_gain_req; 24687 v <<= 17; 24688 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 24689 if (rack->r_ctl.gate_to_fs == 1) 24690 optval |= 0x10000; 24691 } else 24692 optval = 0; 24693 break; 24694 case TCP_RACK_REORD_FADE: 24695 /* Does reordering fade after ms time */ 24696 optval = rack->r_ctl.rc_reorder_fade; 24697 break; 24698 case TCP_BBR_USE_RACK_RR: 24699 /* Do we use the rack cheat for rxt */ 24700 optval = rack->use_rack_rr; 24701 break; 24702 case TCP_RACK_RR_CONF: 24703 optval = rack->r_rr_config; 24704 break; 24705 case TCP_HDWR_RATE_CAP: 24706 optval = rack->r_rack_hw_rate_caps; 24707 break; 24708 case TCP_BBR_HDWR_PACE: 24709 optval = rack->rack_hdw_pace_ena; 24710 break; 24711 case TCP_RACK_TLP_THRESH: 24712 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24713 optval = rack->r_ctl.rc_tlp_threshold; 24714 break; 24715 case TCP_RACK_PKT_DELAY: 24716 /* RACK added ms i.e. rack-rtt + reord + N */ 24717 optval = rack->r_ctl.rc_pkt_delay; 24718 break; 24719 case TCP_RACK_TLP_USE: 24720 optval = rack->rack_tlp_threshold_use; 24721 break; 24722 case TCP_PACING_DND: 24723 optval = rack->rc_pace_dnd; 24724 break; 24725 case TCP_RACK_PACE_RATE_CA: 24726 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 24727 break; 24728 case TCP_RACK_PACE_RATE_SS: 24729 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 24730 break; 24731 case TCP_RACK_PACE_RATE_REC: 24732 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 24733 break; 24734 case TCP_DGP_UPPER_BOUNDS: 24735 optval = rack->r_ctl.rack_per_upper_bound_ss; 24736 optval <<= 16; 24737 optval |= rack->r_ctl.rack_per_upper_bound_ca; 24738 break; 24739 case TCP_RACK_GP_INCREASE_SS: 24740 optval = rack->r_ctl.rack_per_of_gp_ca; 24741 break; 24742 case TCP_RACK_GP_INCREASE_CA: 24743 optval = rack->r_ctl.rack_per_of_gp_ss; 24744 break; 24745 case TCP_RACK_PACING_DIVISOR: 24746 optval = rack->r_ctl.pace_len_divisor; 24747 break; 24748 case TCP_BBR_RACK_RTT_USE: 24749 optval = rack->r_ctl.rc_rate_sample_method; 24750 break; 24751 case TCP_DELACK: 24752 optval = tp->t_delayed_ack; 24753 break; 24754 case TCP_DATA_AFTER_CLOSE: 24755 optval = rack->rc_allow_data_af_clo; 24756 break; 24757 case TCP_SHARED_CWND_TIME_LIMIT: 24758 optval = rack->r_limit_scw; 24759 break; 24760 case TCP_HONOR_HPTS_MIN: 24761 if (rack->r_use_hpts_min) 24762 optval = rack->r_ctl.max_reduction; 24763 else 24764 optval = 0; 24765 break; 24766 case TCP_REC_IS_DYN: 24767 optval = rack->rc_gp_no_rec_chg; 24768 break; 24769 case TCP_NO_TIMELY: 24770 optval = rack->rc_skip_timely; 24771 break; 24772 case TCP_RACK_TIMER_SLOP: 24773 optval = rack->r_ctl.timer_slop; 24774 break; 24775 default: 24776 return (tcp_default_ctloutput(tp, sopt)); 24777 break; 24778 } 24779 INP_WUNLOCK(inp); 24780 if (error == 0) { 24781 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24782 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 24783 error = sooptcopyout(sopt, &loptval, sizeof loptval); 24784 else 24785 error = sooptcopyout(sopt, &optval, sizeof optval); 24786 } 24787 return (error); 24788 } 24789 24790 static int 24791 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 24792 { 24793 if (sopt->sopt_dir == SOPT_SET) { 24794 return (rack_set_sockopt(tp, sopt)); 24795 } else if (sopt->sopt_dir == SOPT_GET) { 24796 return (rack_get_sockopt(tp, sopt)); 24797 } else { 24798 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 24799 } 24800 } 24801 24802 static const char *rack_stack_names[] = { 24803 __XSTRING(STACKNAME), 24804 #ifdef STACKALIAS 24805 __XSTRING(STACKALIAS), 24806 #endif 24807 }; 24808 24809 static int 24810 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 24811 { 24812 memset(mem, 0, size); 24813 return (0); 24814 } 24815 24816 static void 24817 rack_dtor(void *mem, int32_t size, void *arg) 24818 { 24819 24820 } 24821 24822 static bool rack_mod_inited = false; 24823 24824 static int 24825 tcp_addrack(module_t mod, int32_t type, void *data) 24826 { 24827 int32_t err = 0; 24828 int num_stacks; 24829 24830 switch (type) { 24831 case MOD_LOAD: 24832 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 24833 sizeof(struct rack_sendmap), 24834 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 24835 24836 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 24837 sizeof(struct tcp_rack), 24838 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 24839 24840 sysctl_ctx_init(&rack_sysctl_ctx); 24841 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 24842 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 24843 OID_AUTO, 24844 #ifdef STACKALIAS 24845 __XSTRING(STACKALIAS), 24846 #else 24847 __XSTRING(STACKNAME), 24848 #endif 24849 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 24850 ""); 24851 if (rack_sysctl_root == NULL) { 24852 printf("Failed to add sysctl node\n"); 24853 err = EFAULT; 24854 goto free_uma; 24855 } 24856 rack_init_sysctls(); 24857 num_stacks = nitems(rack_stack_names); 24858 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 24859 rack_stack_names, &num_stacks); 24860 if (err) { 24861 printf("Failed to register %s stack name for " 24862 "%s module\n", rack_stack_names[num_stacks], 24863 __XSTRING(MODNAME)); 24864 sysctl_ctx_free(&rack_sysctl_ctx); 24865 free_uma: 24866 uma_zdestroy(rack_zone); 24867 uma_zdestroy(rack_pcb_zone); 24868 rack_counter_destroy(); 24869 printf("Failed to register rack module -- err:%d\n", err); 24870 return (err); 24871 } 24872 tcp_lro_reg_mbufq(); 24873 rack_mod_inited = true; 24874 break; 24875 case MOD_QUIESCE: 24876 err = deregister_tcp_functions(&__tcp_rack, true, false); 24877 break; 24878 case MOD_UNLOAD: 24879 err = deregister_tcp_functions(&__tcp_rack, false, true); 24880 if (err == EBUSY) 24881 break; 24882 if (rack_mod_inited) { 24883 uma_zdestroy(rack_zone); 24884 uma_zdestroy(rack_pcb_zone); 24885 sysctl_ctx_free(&rack_sysctl_ctx); 24886 rack_counter_destroy(); 24887 rack_mod_inited = false; 24888 } 24889 tcp_lro_dereg_mbufq(); 24890 err = 0; 24891 break; 24892 default: 24893 return (EOPNOTSUPP); 24894 } 24895 return (err); 24896 } 24897 24898 static moduledata_t tcp_rack = { 24899 .name = __XSTRING(MODNAME), 24900 .evhand = tcp_addrack, 24901 .priv = 0 24902 }; 24903 24904 MODULE_VERSION(MODNAME, 1); 24905 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 24906 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 24907 24908 #endif /* #if !defined(INET) && !defined(INET6) */ 24909