1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> /* for proc0 declaration */ 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #ifdef STATS 52 #include <sys/qmath.h> 53 #include <sys/tree.h> 54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 55 #else 56 #include <sys/tree.h> 57 #endif 58 #include <sys/refcount.h> 59 #include <sys/queue.h> 60 #include <sys/tim_filter.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 #include <sys/protosw.h> 65 #ifdef TCP_ACCOUNTING 66 #include <sys/sched.h> 67 #include <machine/cpu.h> 68 #endif 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/route/nhop.h> 73 #include <net/vnet.h> 74 75 #define TCPSTATES /* for logging */ 76 77 #include <netinet/in.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/ip.h> 81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 83 #include <netinet/ip_var.h> 84 #include <netinet/ip6.h> 85 #include <netinet6/in6_pcb.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/tcp.h> 88 #define TCPOUTFLAGS 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_log_buf.h> 94 #include <netinet/tcp_syncache.h> 95 #include <netinet/tcp_hpts.h> 96 #include <netinet/tcp_ratelimit.h> 97 #include <netinet/tcp_accounting.h> 98 #include <netinet/tcpip.h> 99 #include <netinet/cc/cc.h> 100 #include <netinet/cc/cc_newreno.h> 101 #include <netinet/tcp_fastopen.h> 102 #include <netinet/tcp_lro.h> 103 #ifdef NETFLIX_SHARED_CWND 104 #include <netinet/tcp_shared_cwnd.h> 105 #endif 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_ecn.h> 113 114 #include <netipsec/ipsec_support.h> 115 116 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 117 #include <netipsec/ipsec.h> 118 #include <netipsec/ipsec6.h> 119 #endif /* IPSEC */ 120 121 #include <netinet/udp.h> 122 #include <netinet/udp_var.h> 123 #include <machine/in_cksum.h> 124 125 #ifdef MAC 126 #include <security/mac/mac_framework.h> 127 #endif 128 #include "sack_filter.h" 129 #include "tcp_rack.h" 130 #include "tailq_hash.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 146 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 150 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 151 152 struct sysctl_ctx_list rack_sysctl_ctx; 153 struct sysctl_oid *rack_sysctl_root; 154 155 #define CUM_ACKED 1 156 #define SACKED 2 157 158 /* 159 * The RACK module incorporates a number of 160 * TCP ideas that have been put out into the IETF 161 * over the last few years: 162 * - Matt Mathis's Rate Halving which slowly drops 163 * the congestion window so that the ack clock can 164 * be maintained during a recovery. 165 * - Yuchung Cheng's RACK TCP (for which its named) that 166 * will stop us using the number of dup acks and instead 167 * use time as the gage of when we retransmit. 168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 169 * of Dukkipati et.al. 170 * RACK depends on SACK, so if an endpoint arrives that 171 * cannot do SACK the state machine below will shuttle the 172 * connection back to using the "default" TCP stack that is 173 * in FreeBSD. 174 * 175 * To implement RACK the original TCP stack was first decomposed 176 * into a functional state machine with individual states 177 * for each of the possible TCP connection states. The do_segment 178 * functions role in life is to mandate the connection supports SACK 179 * initially and then assure that the RACK state matches the conenction 180 * state before calling the states do_segment function. Each 181 * state is simplified due to the fact that the original do_segment 182 * has been decomposed and we *know* what state we are in (no 183 * switches on the state) and all tests for SACK are gone. This 184 * greatly simplifies what each state does. 185 * 186 * TCP output is also over-written with a new version since it 187 * must maintain the new rack scoreboard. 188 * 189 */ 190 static int32_t rack_tlp_thresh = 1; 191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 192 static int32_t rack_tlp_use_greater = 1; 193 static int32_t rack_reorder_thresh = 2; 194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 195 * - 60 seconds */ 196 static uint32_t rack_pcm_every_n_rounds = 100; 197 static uint32_t rack_pcm_blast = 0; 198 static uint32_t rack_pcm_is_enabled = 1; 199 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 200 201 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */ 202 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 203 204 205 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 206 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 207 static int32_t rack_rxt_controls = 0; 208 static int32_t rack_fill_cw_state = 0; 209 static uint8_t rack_req_measurements = 1; 210 /* Attack threshold detections */ 211 static uint32_t rack_highest_sack_thresh_seen = 0; 212 static uint32_t rack_highest_move_thresh_seen = 0; 213 static uint32_t rack_merge_out_sacks_on_attack = 0; 214 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 215 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */ 216 static int32_t rack_hw_rate_caps = 0; /* 1; */ 217 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 218 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 219 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 220 static int32_t rack_hw_up_only = 0; 221 static int32_t rack_stats_gets_ms_rtt = 1; 222 static int32_t rack_prr_addbackmax = 2; 223 static int32_t rack_do_hystart = 0; 224 static int32_t rack_apply_rtt_with_reduced_conf = 0; 225 static int32_t rack_hibeta_setting = 0; 226 static int32_t rack_default_pacing_divisor = 250; 227 static uint16_t rack_pacing_min_seg = 0; 228 static int32_t rack_timely_off = 0; 229 230 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 231 static int32_t rack_pkt_delay = 1000; 232 static int32_t rack_send_a_lot_in_prr = 1; 233 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 234 static int32_t rack_verbose_logging = 0; 235 static int32_t rack_ignore_data_after_close = 1; 236 static int32_t rack_enable_shared_cwnd = 1; 237 static int32_t rack_use_cmp_acks = 1; 238 static int32_t rack_use_fsb = 1; 239 static int32_t rack_use_rfo = 1; 240 static int32_t rack_use_rsm_rfo = 1; 241 static int32_t rack_max_abc_post_recovery = 2; 242 static int32_t rack_client_low_buf = 0; 243 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 244 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 245 #ifdef TCP_ACCOUNTING 246 static int32_t rack_tcp_accounting = 0; 247 #endif 248 static int32_t rack_limits_scwnd = 1; 249 static int32_t rack_enable_mqueue_for_nonpaced = 0; 250 static int32_t rack_hybrid_allow_set_maxseg = 0; 251 static int32_t rack_disable_prr = 0; 252 static int32_t use_rack_rr = 1; 253 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 254 static int32_t rack_persist_min = 250000; /* 250usec */ 255 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 256 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 257 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ 258 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 259 static int32_t rack_limit_time_with_srtt = 0; 260 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 261 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 262 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 263 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 264 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 265 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 266 static int32_t rack_full_buffer_discount = 10; 267 /* 268 * Currently regular tcp has a rto_min of 30ms 269 * the backoff goes 12 times so that ends up 270 * being a total of 122.850 seconds before a 271 * connection is killed. 272 */ 273 static uint32_t rack_def_data_window = 20; 274 static uint32_t rack_goal_bdp = 2; 275 static uint32_t rack_min_srtts = 1; 276 static uint32_t rack_min_measure_usec = 0; 277 static int32_t rack_tlp_min = 10000; /* 10ms */ 278 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 279 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 280 static const int32_t rack_free_cache = 2; 281 static int32_t rack_hptsi_segments = 40; 282 static int32_t rack_rate_sample_method = USE_RTT_LOW; 283 static int32_t rack_pace_every_seg = 0; 284 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 285 static int32_t rack_slot_reduction = 4; 286 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 287 static int32_t rack_cwnd_block_ends_measure = 0; 288 static int32_t rack_rwnd_block_ends_measure = 0; 289 static int32_t rack_def_profile = 0; 290 291 static int32_t rack_lower_cwnd_at_tlp = 0; 292 static int32_t rack_always_send_oldest = 0; 293 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 294 295 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 296 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 297 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 298 299 /* Probertt */ 300 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 301 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 302 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 303 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 304 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 305 306 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 307 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 308 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 309 static uint32_t rack_probertt_use_min_rtt_exit = 0; 310 static uint32_t rack_probe_rtt_sets_cwnd = 0; 311 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 312 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 313 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 314 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 315 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 316 static uint32_t rack_probertt_filter_life = 10000000; 317 static uint32_t rack_probertt_lower_within = 10; 318 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 319 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 320 static int32_t rack_probertt_clear_is = 1; 321 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 322 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 323 324 /* Part of pacing */ 325 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 326 327 /* Timely information: 328 * 329 * Here we have various control parameters on how 330 * timely may change the multiplier. rack_gain_p5_ub 331 * is associated with timely but not directly influencing 332 * the rate decision like the other variables. It controls 333 * the way fill-cw interacts with timely and caps how much 334 * timely can boost the fill-cw b/w. 335 * 336 * The other values are various boost/shrink numbers as well 337 * as potential caps when adjustments are made to the timely 338 * gain (returned by rack_get_output_gain(). Remember too that 339 * the gain returned can be overriden by other factors such as 340 * probeRTT as well as fixed-rate-pacing. 341 */ 342 static int32_t rack_gain_p5_ub = 250; 343 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 344 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 345 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 346 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 347 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 348 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 349 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 350 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 351 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 352 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 353 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 354 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 355 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 356 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 357 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 358 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 359 static int32_t rack_use_max_for_nobackoff = 0; 360 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 361 static int32_t rack_timely_no_stopping = 0; 362 static int32_t rack_down_raise_thresh = 100; 363 static int32_t rack_req_segs = 1; 364 static uint64_t rack_bw_rate_cap = 0; 365 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 366 367 368 /* Rack specific counters */ 369 counter_u64_t rack_saw_enobuf; 370 counter_u64_t rack_saw_enobuf_hw; 371 counter_u64_t rack_saw_enetunreach; 372 counter_u64_t rack_persists_sends; 373 counter_u64_t rack_persists_acks; 374 counter_u64_t rack_persists_loss; 375 counter_u64_t rack_persists_lost_ends; 376 counter_u64_t rack_total_bytes; 377 #ifdef INVARIANTS 378 counter_u64_t rack_adjust_map_bw; 379 #endif 380 /* Tail loss probe counters */ 381 counter_u64_t rack_tlp_tot; 382 counter_u64_t rack_tlp_newdata; 383 counter_u64_t rack_tlp_retran; 384 counter_u64_t rack_tlp_retran_bytes; 385 counter_u64_t rack_to_tot; 386 counter_u64_t rack_hot_alloc; 387 counter_u64_t rack_to_alloc; 388 counter_u64_t rack_to_alloc_hard; 389 counter_u64_t rack_to_alloc_emerg; 390 counter_u64_t rack_to_alloc_limited; 391 counter_u64_t rack_alloc_limited_conns; 392 counter_u64_t rack_split_limited; 393 counter_u64_t rack_rxt_clamps_cwnd; 394 counter_u64_t rack_rxt_clamps_cwnd_uniq; 395 396 counter_u64_t rack_multi_single_eq; 397 counter_u64_t rack_proc_non_comp_ack; 398 399 counter_u64_t rack_fto_send; 400 counter_u64_t rack_fto_rsm_send; 401 counter_u64_t rack_nfto_resend; 402 counter_u64_t rack_non_fto_send; 403 counter_u64_t rack_extended_rfo; 404 405 counter_u64_t rack_sack_proc_all; 406 counter_u64_t rack_sack_proc_short; 407 counter_u64_t rack_sack_proc_restart; 408 counter_u64_t rack_sack_attacks_detected; 409 counter_u64_t rack_sack_attacks_reversed; 410 counter_u64_t rack_sack_attacks_suspect; 411 counter_u64_t rack_sack_used_next_merge; 412 counter_u64_t rack_sack_splits; 413 counter_u64_t rack_sack_used_prev_merge; 414 counter_u64_t rack_sack_skipped_acked; 415 counter_u64_t rack_ack_total; 416 counter_u64_t rack_express_sack; 417 counter_u64_t rack_sack_total; 418 counter_u64_t rack_move_none; 419 counter_u64_t rack_move_some; 420 421 counter_u64_t rack_input_idle_reduces; 422 counter_u64_t rack_collapsed_win; 423 counter_u64_t rack_collapsed_win_seen; 424 counter_u64_t rack_collapsed_win_rxt; 425 counter_u64_t rack_collapsed_win_rxt_bytes; 426 counter_u64_t rack_try_scwnd; 427 counter_u64_t rack_hw_pace_init_fail; 428 counter_u64_t rack_hw_pace_lost; 429 430 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 431 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 432 433 434 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 435 436 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 437 (tv) = (value) + slop; \ 438 if ((u_long)(tv) < (u_long)(tvmin)) \ 439 (tv) = (tvmin); \ 440 if ((u_long)(tv) > (u_long)(tvmax)) \ 441 (tv) = (tvmax); \ 442 } while (0) 443 444 static void 445 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 446 447 static int 448 rack_process_ack(struct mbuf *m, struct tcphdr *th, 449 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 450 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 451 static int 452 rack_process_data(struct mbuf *m, struct tcphdr *th, 453 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 454 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 455 static void 456 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 457 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 458 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 459 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 460 uint8_t limit_type); 461 static struct rack_sendmap * 462 rack_check_recovery_mode(struct tcpcb *tp, 463 uint32_t tsused); 464 static uint32_t 465 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 466 static void 467 rack_cong_signal(struct tcpcb *tp, 468 uint32_t type, uint32_t ack, int ); 469 static void rack_counter_destroy(void); 470 static int 471 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 472 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 473 static void 474 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 475 static void 476 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 477 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 478 static void rack_dtor(void *mem, int32_t size, void *arg); 479 static void 480 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 481 uint32_t flex1, uint32_t flex2, 482 uint32_t flex3, uint32_t flex4, 483 uint32_t flex5, uint32_t flex6, 484 uint16_t flex7, uint8_t mod); 485 486 static void 487 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 488 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 489 struct rack_sendmap *rsm, uint8_t quality); 490 static struct rack_sendmap * 491 rack_find_high_nonack(struct tcp_rack *rack, 492 struct rack_sendmap *rsm); 493 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 494 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 495 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 496 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 497 static void 498 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 499 tcp_seq th_ack, int line, uint8_t quality); 500 static void 501 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 502 503 static uint32_t 504 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 505 static int32_t rack_handoff_ok(struct tcpcb *tp); 506 static int32_t rack_init(struct tcpcb *tp, void **ptr); 507 static void rack_init_sysctls(void); 508 509 static void 510 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 511 struct tcphdr *th, int entered_rec, int dup_ack_struck, 512 int *dsack_seen, int *sacks_seen); 513 static void 514 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 515 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 516 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 517 518 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 519 520 521 static void 522 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 523 struct rack_sendmap *rsm, uint32_t cts); 524 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 525 static int32_t rack_output(struct tcpcb *tp); 526 527 static uint32_t 528 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 529 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 530 uint32_t cts, uint32_t segsiz); 531 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 532 static void rack_remxt_tmr(struct tcpcb *tp); 533 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 534 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 535 static int32_t rack_stopall(struct tcpcb *tp); 536 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 537 static uint32_t 538 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 539 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 540 static void 541 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 542 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 543 static int 544 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 545 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 546 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 547 static int 548 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 549 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 550 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 551 552 static int 553 rack_do_closing(struct mbuf *m, struct tcphdr *th, 554 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 555 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 556 static int 557 rack_do_established(struct mbuf *m, struct tcphdr *th, 558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 559 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 560 static int 561 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 562 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 563 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 564 static int 565 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 566 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 567 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 568 static int 569 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 570 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 571 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 572 static int 573 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 574 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 575 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 576 static int 577 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 578 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 579 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 580 static int 581 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 582 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 583 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 584 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 585 struct rack_sendmap * 586 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 587 uint32_t tsused); 588 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 589 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 590 static void 591 tcp_rack_partialack(struct tcpcb *tp); 592 static int 593 rack_set_profile(struct tcp_rack *rack, int prof); 594 static void 595 rack_apply_deferred_options(struct tcp_rack *rack); 596 597 int32_t rack_clear_counter=0; 598 599 static uint64_t 600 rack_get_lt_bw(struct tcp_rack *rack) 601 { 602 struct timeval tv; 603 uint64_t tim, bytes; 604 605 tim = rack->r_ctl.lt_bw_time; 606 bytes = rack->r_ctl.lt_bw_bytes; 607 if (rack->lt_bw_up) { 608 /* Include all the current bytes too */ 609 microuptime(&tv); 610 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 611 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 612 } 613 if ((bytes != 0) && (tim != 0)) 614 return ((bytes * (uint64_t)1000000) / tim); 615 else 616 return (0); 617 } 618 619 static void 620 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 621 { 622 struct sockopt sopt; 623 struct cc_newreno_opts opt; 624 struct newreno old; 625 struct tcpcb *tp; 626 int error, failed = 0; 627 628 tp = rack->rc_tp; 629 if (tp->t_cc == NULL) { 630 /* Tcb is leaving */ 631 return; 632 } 633 rack->rc_pacing_cc_set = 1; 634 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 635 /* Not new-reno we can't play games with beta! */ 636 failed = 1; 637 goto out; 638 639 } 640 if (CC_ALGO(tp)->ctl_output == NULL) { 641 /* Huh, not using new-reno so no swaps.? */ 642 failed = 2; 643 goto out; 644 } 645 old.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 646 /* Get the current values out */ 647 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 648 sopt.sopt_dir = SOPT_GET; 649 opt.name = CC_NEWRENO_BETA; 650 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 651 if (error) { 652 failed = 3; 653 goto out; 654 } 655 old.beta = opt.val; 656 opt.name = CC_NEWRENO_BETA_ECN; 657 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 658 if (error) { 659 failed = 4; 660 goto out; 661 } 662 old.beta_ecn = opt.val; 663 664 /* Now lets set in the values we have stored */ 665 sopt.sopt_dir = SOPT_SET; 666 opt.name = CC_NEWRENO_BETA; 667 opt.val = rack->r_ctl.rc_saved_beta.beta; 668 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 669 if (error) { 670 failed = 5; 671 goto out; 672 } 673 opt.name = CC_NEWRENO_BETA_ECN; 674 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 675 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 676 if (error) { 677 failed = 6; 678 goto out; 679 } 680 /* Save off the values for restoral */ 681 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 682 out: 683 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 684 union tcp_log_stackspecific log; 685 struct timeval tv; 686 struct newreno *ptr; 687 688 ptr = ((struct newreno *)tp->t_ccv.cc_data); 689 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 690 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 691 log.u_bbr.flex1 = ptr->beta; 692 log.u_bbr.flex2 = ptr->beta_ecn; 693 log.u_bbr.flex3 = ptr->newreno_flags; 694 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 695 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 696 log.u_bbr.flex6 = failed; 697 log.u_bbr.flex7 = rack->gp_ready; 698 log.u_bbr.flex7 <<= 1; 699 log.u_bbr.flex7 |= rack->use_fixed_rate; 700 log.u_bbr.flex7 <<= 1; 701 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 702 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 703 log.u_bbr.flex8 = flex8; 704 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 705 0, &log, false, NULL, NULL, 0, &tv); 706 } 707 } 708 709 static void 710 rack_set_cc_pacing(struct tcp_rack *rack) 711 { 712 if (rack->rc_pacing_cc_set) 713 return; 714 /* 715 * Use the swap utility placing in 3 for flex8 to id a 716 * set of a new set of values. 717 */ 718 rack->rc_pacing_cc_set = 1; 719 rack_swap_beta_values(rack, 3); 720 } 721 722 static void 723 rack_undo_cc_pacing(struct tcp_rack *rack) 724 { 725 if (rack->rc_pacing_cc_set == 0) 726 return; 727 /* 728 * Use the swap utility placing in 4 for flex8 to id a 729 * restoral of the old values. 730 */ 731 rack->rc_pacing_cc_set = 0; 732 rack_swap_beta_values(rack, 4); 733 } 734 735 static void 736 rack_remove_pacing(struct tcp_rack *rack) 737 { 738 if (rack->rc_pacing_cc_set) 739 rack_undo_cc_pacing(rack); 740 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 741 tcp_decrement_paced_conn(); 742 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 743 tcp_dec_dgp_pacing_cnt(); 744 rack->rc_always_pace = 0; 745 rack->r_ctl.pacing_method = RACK_PACING_NONE; 746 rack->dgp_on = 0; 747 rack->rc_hybrid_mode = 0; 748 rack->use_fixed_rate = 0; 749 } 750 751 static void 752 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 753 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 754 { 755 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 756 union tcp_log_stackspecific log; 757 struct timeval tv; 758 759 memset(&log, 0, sizeof(log)); 760 log.u_bbr.flex1 = seq_end; 761 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 762 log.u_bbr.flex3 = ack_end_t; 763 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 764 log.u_bbr.flex5 = send_end_t; 765 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 766 log.u_bbr.flex7 = mode; 767 log.u_bbr.flex8 = 69; 768 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 769 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 770 log.u_bbr.pkts_out = line; 771 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 772 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 773 log.u_bbr.epoch = rack->r_ctl.current_round; 774 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 775 if (rsm != NULL) { 776 log.u_bbr.applimited = rsm->r_start; 777 log.u_bbr.delivered = rsm->r_end; 778 log.u_bbr.epoch = rsm->r_flags; 779 } 780 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 781 TCP_LOG_EVENTP(rack->rc_tp, NULL, 782 &rack->rc_inp->inp_socket->so_rcv, 783 &rack->rc_inp->inp_socket->so_snd, 784 BBR_LOG_HPTSI_CALC, 0, 785 0, &log, false, &tv); 786 } 787 } 788 789 static int 790 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 791 { 792 uint32_t stat; 793 int32_t error; 794 795 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 796 if (error || req->newptr == NULL) 797 return error; 798 799 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 800 if (error) 801 return (error); 802 if (stat == 1) { 803 #ifdef INVARIANTS 804 printf("Clearing RACK counters\n"); 805 #endif 806 counter_u64_zero(rack_tlp_tot); 807 counter_u64_zero(rack_tlp_newdata); 808 counter_u64_zero(rack_tlp_retran); 809 counter_u64_zero(rack_tlp_retran_bytes); 810 counter_u64_zero(rack_to_tot); 811 counter_u64_zero(rack_saw_enobuf); 812 counter_u64_zero(rack_saw_enobuf_hw); 813 counter_u64_zero(rack_saw_enetunreach); 814 counter_u64_zero(rack_persists_sends); 815 counter_u64_zero(rack_total_bytes); 816 counter_u64_zero(rack_persists_acks); 817 counter_u64_zero(rack_persists_loss); 818 counter_u64_zero(rack_persists_lost_ends); 819 #ifdef INVARIANTS 820 counter_u64_zero(rack_adjust_map_bw); 821 #endif 822 counter_u64_zero(rack_to_alloc_hard); 823 counter_u64_zero(rack_to_alloc_emerg); 824 counter_u64_zero(rack_sack_proc_all); 825 counter_u64_zero(rack_fto_send); 826 counter_u64_zero(rack_fto_rsm_send); 827 counter_u64_zero(rack_extended_rfo); 828 counter_u64_zero(rack_hw_pace_init_fail); 829 counter_u64_zero(rack_hw_pace_lost); 830 counter_u64_zero(rack_non_fto_send); 831 counter_u64_zero(rack_nfto_resend); 832 counter_u64_zero(rack_sack_proc_short); 833 counter_u64_zero(rack_sack_proc_restart); 834 counter_u64_zero(rack_to_alloc); 835 counter_u64_zero(rack_to_alloc_limited); 836 counter_u64_zero(rack_alloc_limited_conns); 837 counter_u64_zero(rack_split_limited); 838 counter_u64_zero(rack_rxt_clamps_cwnd); 839 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 840 counter_u64_zero(rack_multi_single_eq); 841 counter_u64_zero(rack_proc_non_comp_ack); 842 counter_u64_zero(rack_sack_attacks_detected); 843 counter_u64_zero(rack_sack_attacks_reversed); 844 counter_u64_zero(rack_sack_attacks_suspect); 845 counter_u64_zero(rack_sack_used_next_merge); 846 counter_u64_zero(rack_sack_used_prev_merge); 847 counter_u64_zero(rack_sack_splits); 848 counter_u64_zero(rack_sack_skipped_acked); 849 counter_u64_zero(rack_ack_total); 850 counter_u64_zero(rack_express_sack); 851 counter_u64_zero(rack_sack_total); 852 counter_u64_zero(rack_move_none); 853 counter_u64_zero(rack_move_some); 854 counter_u64_zero(rack_try_scwnd); 855 counter_u64_zero(rack_collapsed_win); 856 counter_u64_zero(rack_collapsed_win_rxt); 857 counter_u64_zero(rack_collapsed_win_seen); 858 counter_u64_zero(rack_collapsed_win_rxt_bytes); 859 } else if (stat == 2) { 860 #ifdef INVARIANTS 861 printf("Clearing RACK option array\n"); 862 #endif 863 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 864 } else if (stat == 3) { 865 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 866 } else if (stat == 4) { 867 #ifdef INVARIANTS 868 printf("Clearing RACK out size array\n"); 869 #endif 870 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 871 } 872 rack_clear_counter = 0; 873 return (0); 874 } 875 876 static void 877 rack_init_sysctls(void) 878 { 879 struct sysctl_oid *rack_counters; 880 struct sysctl_oid *rack_attack; 881 struct sysctl_oid *rack_pacing; 882 struct sysctl_oid *rack_timely; 883 struct sysctl_oid *rack_timers; 884 struct sysctl_oid *rack_tlp; 885 struct sysctl_oid *rack_misc; 886 struct sysctl_oid *rack_features; 887 struct sysctl_oid *rack_measure; 888 struct sysctl_oid *rack_probertt; 889 struct sysctl_oid *rack_hw_pacing; 890 891 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 892 SYSCTL_CHILDREN(rack_sysctl_root), 893 OID_AUTO, 894 "sack_attack", 895 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 896 "Rack Sack Attack Counters and Controls"); 897 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 898 SYSCTL_CHILDREN(rack_sysctl_root), 899 OID_AUTO, 900 "stats", 901 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 902 "Rack Counters"); 903 SYSCTL_ADD_S32(&rack_sysctl_ctx, 904 SYSCTL_CHILDREN(rack_sysctl_root), 905 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 906 &rack_rate_sample_method , USE_RTT_LOW, 907 "What method should we use for rate sampling 0=high, 1=low "); 908 /* Probe rtt related controls */ 909 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 910 SYSCTL_CHILDREN(rack_sysctl_root), 911 OID_AUTO, 912 "probertt", 913 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 914 "ProbeRTT related Controls"); 915 SYSCTL_ADD_U16(&rack_sysctl_ctx, 916 SYSCTL_CHILDREN(rack_probertt), 917 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 918 &rack_atexit_prtt_hbp, 130, 919 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 920 SYSCTL_ADD_U16(&rack_sysctl_ctx, 921 SYSCTL_CHILDREN(rack_probertt), 922 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 923 &rack_atexit_prtt, 130, 924 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 925 SYSCTL_ADD_U16(&rack_sysctl_ctx, 926 SYSCTL_CHILDREN(rack_probertt), 927 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 928 &rack_per_of_gp_probertt, 60, 929 "What percentage of goodput do we pace at in probertt"); 930 SYSCTL_ADD_U16(&rack_sysctl_ctx, 931 SYSCTL_CHILDREN(rack_probertt), 932 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 933 &rack_per_of_gp_probertt_reduce, 10, 934 "What percentage of goodput do we reduce every gp_srtt"); 935 SYSCTL_ADD_U16(&rack_sysctl_ctx, 936 SYSCTL_CHILDREN(rack_probertt), 937 OID_AUTO, "gp_per_low", CTLFLAG_RW, 938 &rack_per_of_gp_lowthresh, 40, 939 "What percentage of goodput do we allow the multiplier to fall to"); 940 SYSCTL_ADD_U32(&rack_sysctl_ctx, 941 SYSCTL_CHILDREN(rack_probertt), 942 OID_AUTO, "time_between", CTLFLAG_RW, 943 & rack_time_between_probertt, 96000000, 944 "How many useconds between the lowest rtt falling must past before we enter probertt"); 945 SYSCTL_ADD_U32(&rack_sysctl_ctx, 946 SYSCTL_CHILDREN(rack_probertt), 947 OID_AUTO, "safety", CTLFLAG_RW, 948 &rack_probe_rtt_safety_val, 2000000, 949 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 950 SYSCTL_ADD_U32(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_probertt), 952 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 953 &rack_probe_rtt_sets_cwnd, 0, 954 "Do we set the cwnd too (if always_lower is on)"); 955 SYSCTL_ADD_U32(&rack_sysctl_ctx, 956 SYSCTL_CHILDREN(rack_probertt), 957 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 958 &rack_max_drain_wait, 2, 959 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 960 SYSCTL_ADD_U32(&rack_sysctl_ctx, 961 SYSCTL_CHILDREN(rack_probertt), 962 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 963 &rack_must_drain, 1, 964 "We must drain this many gp_srtt's waiting for flight to reach goal"); 965 SYSCTL_ADD_U32(&rack_sysctl_ctx, 966 SYSCTL_CHILDREN(rack_probertt), 967 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 968 &rack_probertt_use_min_rtt_entry, 1, 969 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 970 SYSCTL_ADD_U32(&rack_sysctl_ctx, 971 SYSCTL_CHILDREN(rack_probertt), 972 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 973 &rack_probertt_use_min_rtt_exit, 0, 974 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 975 SYSCTL_ADD_U32(&rack_sysctl_ctx, 976 SYSCTL_CHILDREN(rack_probertt), 977 OID_AUTO, "length_div", CTLFLAG_RW, 978 &rack_probertt_gpsrtt_cnt_div, 0, 979 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 980 SYSCTL_ADD_U32(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_probertt), 982 OID_AUTO, "length_mul", CTLFLAG_RW, 983 &rack_probertt_gpsrtt_cnt_mul, 0, 984 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 985 SYSCTL_ADD_U32(&rack_sysctl_ctx, 986 SYSCTL_CHILDREN(rack_probertt), 987 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 988 &rack_min_probertt_hold, 200000, 989 "What is the minimum time we hold probertt at target"); 990 SYSCTL_ADD_U32(&rack_sysctl_ctx, 991 SYSCTL_CHILDREN(rack_probertt), 992 OID_AUTO, "filter_life", CTLFLAG_RW, 993 &rack_probertt_filter_life, 10000000, 994 "What is the time for the filters life in useconds"); 995 SYSCTL_ADD_U32(&rack_sysctl_ctx, 996 SYSCTL_CHILDREN(rack_probertt), 997 OID_AUTO, "lower_within", CTLFLAG_RW, 998 &rack_probertt_lower_within, 10, 999 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 1000 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1001 SYSCTL_CHILDREN(rack_probertt), 1002 OID_AUTO, "must_move", CTLFLAG_RW, 1003 &rack_min_rtt_movement, 250, 1004 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1005 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1006 SYSCTL_CHILDREN(rack_probertt), 1007 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1008 &rack_probertt_clear_is, 1, 1009 "Do we clear I/S counts on exiting probe-rtt"); 1010 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1011 SYSCTL_CHILDREN(rack_probertt), 1012 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1013 &rack_max_drain_hbp, 1, 1014 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1015 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1016 SYSCTL_CHILDREN(rack_probertt), 1017 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1018 &rack_hbp_thresh, 3, 1019 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1020 /* Pacing related sysctls */ 1021 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1022 SYSCTL_CHILDREN(rack_sysctl_root), 1023 OID_AUTO, 1024 "pacing", 1025 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1026 "Pacing related Controls"); 1027 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_pacing), 1029 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1030 &rack_pcm_is_enabled, 1, 1031 "Do we by default do PCM measurements?"); 1032 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1033 SYSCTL_CHILDREN(rack_pacing), 1034 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1035 &rack_pcm_every_n_rounds, 100, 1036 "How many rounds before we need to do a PCM measurement"); 1037 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1038 SYSCTL_CHILDREN(rack_pacing), 1039 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1040 &rack_pcm_blast, 0, 1041 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1042 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1043 SYSCTL_CHILDREN(rack_pacing), 1044 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1045 &rack_gp_gain_req, 1200, 1046 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1047 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_pacing), 1049 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1050 &rack_rnd_cnt_req, 0x10005, 1051 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1052 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1053 SYSCTL_CHILDREN(rack_pacing), 1054 OID_AUTO, "no_timely", CTLFLAG_RW, 1055 &rack_timely_off, 0, 1056 "Do we not use timely in DGP?"); 1057 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1058 SYSCTL_CHILDREN(rack_pacing), 1059 OID_AUTO, "fullbufdisc", CTLFLAG_RW, 1060 &rack_full_buffer_discount, 10, 1061 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?"); 1062 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1063 SYSCTL_CHILDREN(rack_pacing), 1064 OID_AUTO, "fillcw", CTLFLAG_RW, 1065 &rack_fill_cw_state, 0, 1066 "Enable fillcw on new connections (default=0 off)?"); 1067 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1068 SYSCTL_CHILDREN(rack_pacing), 1069 OID_AUTO, "min_burst", CTLFLAG_RW, 1070 &rack_pacing_min_seg, 0, 1071 "What is the min burst size for pacing (0 disables)?"); 1072 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1073 SYSCTL_CHILDREN(rack_pacing), 1074 OID_AUTO, "divisor", CTLFLAG_RW, 1075 &rack_default_pacing_divisor, 250, 1076 "What is the default divisor given to the rl code?"); 1077 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1078 SYSCTL_CHILDREN(rack_pacing), 1079 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1080 &rack_bw_multipler, 0, 1081 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1082 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1083 SYSCTL_CHILDREN(rack_pacing), 1084 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1085 &rack_max_per_above, 30, 1086 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1087 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1088 SYSCTL_CHILDREN(rack_pacing), 1089 OID_AUTO, "allow1mss", CTLFLAG_RW, 1090 &rack_pace_one_seg, 0, 1091 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1092 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1093 SYSCTL_CHILDREN(rack_pacing), 1094 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1095 &rack_limit_time_with_srtt, 0, 1096 "Do we limit pacing time based on srtt"); 1097 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1098 SYSCTL_CHILDREN(rack_pacing), 1099 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1100 &rack_per_of_gp_ss, 250, 1101 "If non zero, what percentage of goodput to pace at in slow start"); 1102 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1103 SYSCTL_CHILDREN(rack_pacing), 1104 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1105 &rack_per_of_gp_ca, 150, 1106 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1107 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1108 SYSCTL_CHILDREN(rack_pacing), 1109 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1110 &rack_per_of_gp_rec, 200, 1111 "If non zero, what percentage of goodput to pace at in recovery"); 1112 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1113 SYSCTL_CHILDREN(rack_pacing), 1114 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1115 &rack_hptsi_segments, 40, 1116 "What size is the max for TSO segments in pacing and burst mitigation"); 1117 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1118 SYSCTL_CHILDREN(rack_pacing), 1119 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1120 &rack_slot_reduction, 4, 1121 "When doing only burst mitigation what is the reduce divisor"); 1122 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1123 SYSCTL_CHILDREN(rack_sysctl_root), 1124 OID_AUTO, "use_pacing", CTLFLAG_RW, 1125 &rack_pace_every_seg, 0, 1126 "If set we use pacing, if clear we use only the original burst mitigation"); 1127 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1128 SYSCTL_CHILDREN(rack_pacing), 1129 OID_AUTO, "rate_cap", CTLFLAG_RW, 1130 &rack_bw_rate_cap, 0, 1131 "If set we apply this value to the absolute rate cap used by pacing"); 1132 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1133 SYSCTL_CHILDREN(rack_pacing), 1134 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1135 &rack_fillcw_bw_cap, 3750000, 1136 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1137 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1138 SYSCTL_CHILDREN(rack_sysctl_root), 1139 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1140 &rack_req_measurements, 1, 1141 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1142 /* Hardware pacing */ 1143 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1144 SYSCTL_CHILDREN(rack_sysctl_root), 1145 OID_AUTO, 1146 "hdwr_pacing", 1147 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1148 "Pacing related Controls"); 1149 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_hw_pacing), 1151 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1152 &rack_hw_rwnd_factor, 2, 1153 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1154 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1155 SYSCTL_CHILDREN(rack_hw_pacing), 1156 OID_AUTO, "precheck", CTLFLAG_RW, 1157 &rack_hw_check_queue, 0, 1158 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1159 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1160 SYSCTL_CHILDREN(rack_hw_pacing), 1161 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1162 &rack_enobuf_hw_boost_mult, 0, 1163 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1164 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1165 SYSCTL_CHILDREN(rack_hw_pacing), 1166 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1167 &rack_enobuf_hw_max, 2, 1168 "What is the max boost the pacing time if we see a ENOBUFS?"); 1169 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1170 SYSCTL_CHILDREN(rack_hw_pacing), 1171 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1172 &rack_enobuf_hw_min, 2, 1173 "What is the min boost the pacing time if we see a ENOBUFS?"); 1174 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1175 SYSCTL_CHILDREN(rack_hw_pacing), 1176 OID_AUTO, "enable", CTLFLAG_RW, 1177 &rack_enable_hw_pacing, 0, 1178 "Should RACK attempt to use hw pacing?"); 1179 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1180 SYSCTL_CHILDREN(rack_hw_pacing), 1181 OID_AUTO, "rate_cap", CTLFLAG_RW, 1182 &rack_hw_rate_caps, 0, 1183 "Does the highest hardware pacing rate cap the rate we will send at??"); 1184 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1185 SYSCTL_CHILDREN(rack_hw_pacing), 1186 OID_AUTO, "uncap_per", CTLFLAG_RW, 1187 &rack_hw_rate_cap_per, 0, 1188 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1189 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1190 SYSCTL_CHILDREN(rack_hw_pacing), 1191 OID_AUTO, "rate_min", CTLFLAG_RW, 1192 &rack_hw_rate_min, 0, 1193 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1194 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1195 SYSCTL_CHILDREN(rack_hw_pacing), 1196 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1197 &rack_hw_rate_to_low, 0, 1198 "If we fall below this rate, dis-engage hw pacing?"); 1199 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1200 SYSCTL_CHILDREN(rack_hw_pacing), 1201 OID_AUTO, "up_only", CTLFLAG_RW, 1202 &rack_hw_up_only, 0, 1203 "Do we allow hw pacing to lower the rate selected?"); 1204 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1205 SYSCTL_CHILDREN(rack_hw_pacing), 1206 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1207 &rack_hw_pace_extra_slots, 0, 1208 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1209 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1210 SYSCTL_CHILDREN(rack_sysctl_root), 1211 OID_AUTO, 1212 "timely", 1213 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1214 "Rack Timely RTT Controls"); 1215 /* Timely based GP dynmics */ 1216 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1217 SYSCTL_CHILDREN(rack_timely), 1218 OID_AUTO, "upper", CTLFLAG_RW, 1219 &rack_gp_per_bw_mul_up, 2, 1220 "Rack timely upper range for equal b/w (in percentage)"); 1221 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1222 SYSCTL_CHILDREN(rack_timely), 1223 OID_AUTO, "lower", CTLFLAG_RW, 1224 &rack_gp_per_bw_mul_down, 4, 1225 "Rack timely lower range for equal b/w (in percentage)"); 1226 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1227 SYSCTL_CHILDREN(rack_timely), 1228 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1229 &rack_gp_rtt_maxmul, 3, 1230 "Rack timely multiplier of lowest rtt for rtt_max"); 1231 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1232 SYSCTL_CHILDREN(rack_timely), 1233 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1234 &rack_gp_rtt_mindiv, 4, 1235 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1236 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1237 SYSCTL_CHILDREN(rack_timely), 1238 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1239 &rack_gp_rtt_minmul, 1, 1240 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1241 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1242 SYSCTL_CHILDREN(rack_timely), 1243 OID_AUTO, "decrease", CTLFLAG_RW, 1244 &rack_gp_decrease_per, 80, 1245 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1246 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1247 SYSCTL_CHILDREN(rack_timely), 1248 OID_AUTO, "increase", CTLFLAG_RW, 1249 &rack_gp_increase_per, 2, 1250 "Rack timely increase perentage of our GP multiplication factor"); 1251 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1252 SYSCTL_CHILDREN(rack_timely), 1253 OID_AUTO, "lowerbound", CTLFLAG_RW, 1254 &rack_per_lower_bound, 50, 1255 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1256 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1257 SYSCTL_CHILDREN(rack_timely), 1258 OID_AUTO, "p5_upper", CTLFLAG_RW, 1259 &rack_gain_p5_ub, 250, 1260 "Profile 5 upper bound to timely gain"); 1261 1262 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1263 SYSCTL_CHILDREN(rack_timely), 1264 OID_AUTO, "upperboundss", CTLFLAG_RW, 1265 &rack_per_upper_bound_ss, 0, 1266 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1267 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1268 SYSCTL_CHILDREN(rack_timely), 1269 OID_AUTO, "upperboundca", CTLFLAG_RW, 1270 &rack_per_upper_bound_ca, 0, 1271 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1272 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1273 SYSCTL_CHILDREN(rack_timely), 1274 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1275 &rack_do_dyn_mul, 0, 1276 "Rack timely do we enable dynmaic timely goodput by default"); 1277 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1278 SYSCTL_CHILDREN(rack_timely), 1279 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1280 &rack_gp_no_rec_chg, 1, 1281 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1282 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1283 SYSCTL_CHILDREN(rack_timely), 1284 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1285 &rack_timely_dec_clear, 6, 1286 "Rack timely what threshold do we count to before another boost during b/w decent"); 1287 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_timely), 1289 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1290 &rack_timely_max_push_rise, 3, 1291 "Rack timely how many times do we push up with b/w increase"); 1292 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1293 SYSCTL_CHILDREN(rack_timely), 1294 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1295 &rack_timely_max_push_drop, 3, 1296 "Rack timely how many times do we push back on b/w decent"); 1297 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1298 SYSCTL_CHILDREN(rack_timely), 1299 OID_AUTO, "min_segs", CTLFLAG_RW, 1300 &rack_timely_min_segs, 4, 1301 "Rack timely when setting the cwnd what is the min num segments"); 1302 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1303 SYSCTL_CHILDREN(rack_timely), 1304 OID_AUTO, "noback_max", CTLFLAG_RW, 1305 &rack_use_max_for_nobackoff, 0, 1306 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1307 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1308 SYSCTL_CHILDREN(rack_timely), 1309 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1310 &rack_timely_int_timely_only, 0, 1311 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1312 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1313 SYSCTL_CHILDREN(rack_timely), 1314 OID_AUTO, "nonstop", CTLFLAG_RW, 1315 &rack_timely_no_stopping, 0, 1316 "Rack timely don't stop increase"); 1317 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1318 SYSCTL_CHILDREN(rack_timely), 1319 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1320 &rack_down_raise_thresh, 100, 1321 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1322 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1323 SYSCTL_CHILDREN(rack_timely), 1324 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1325 &rack_req_segs, 1, 1326 "Bottom dragging if not these many segments outstanding and room"); 1327 1328 /* TLP and Rack related parameters */ 1329 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_sysctl_root), 1331 OID_AUTO, 1332 "tlp", 1333 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1334 "TLP and Rack related Controls"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_tlp), 1337 OID_AUTO, "use_rrr", CTLFLAG_RW, 1338 &use_rack_rr, 1, 1339 "Do we use Rack Rapid Recovery"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_tlp), 1342 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1343 &rack_max_abc_post_recovery, 2, 1344 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1345 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_tlp), 1347 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1348 &rack_non_rxt_use_cr, 0, 1349 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1350 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_tlp), 1352 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1353 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1354 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1355 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1356 SYSCTL_CHILDREN(rack_tlp), 1357 OID_AUTO, "limit", CTLFLAG_RW, 1358 &rack_tlp_limit, 2, 1359 "How many TLP's can be sent without sending new data"); 1360 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_tlp), 1362 OID_AUTO, "use_greater", CTLFLAG_RW, 1363 &rack_tlp_use_greater, 1, 1364 "Should we use the rack_rtt time if its greater than srtt"); 1365 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1366 SYSCTL_CHILDREN(rack_tlp), 1367 OID_AUTO, "tlpminto", CTLFLAG_RW, 1368 &rack_tlp_min, 10000, 1369 "TLP minimum timeout per the specification (in microseconds)"); 1370 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1371 SYSCTL_CHILDREN(rack_tlp), 1372 OID_AUTO, "send_oldest", CTLFLAG_RW, 1373 &rack_always_send_oldest, 0, 1374 "Should we always send the oldest TLP and RACK-TLP"); 1375 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1376 SYSCTL_CHILDREN(rack_tlp), 1377 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1378 &rack_lower_cwnd_at_tlp, 0, 1379 "When a TLP completes a retran should we enter recovery"); 1380 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1381 SYSCTL_CHILDREN(rack_tlp), 1382 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1383 &rack_reorder_thresh, 2, 1384 "What factor for rack will be added when seeing reordering (shift right)"); 1385 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1386 SYSCTL_CHILDREN(rack_tlp), 1387 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1388 &rack_tlp_thresh, 1, 1389 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1390 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1391 SYSCTL_CHILDREN(rack_tlp), 1392 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1393 &rack_reorder_fade, 60000000, 1394 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1395 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1396 SYSCTL_CHILDREN(rack_tlp), 1397 OID_AUTO, "pktdelay", CTLFLAG_RW, 1398 &rack_pkt_delay, 1000, 1399 "Extra RACK time (in microseconds) besides reordering thresh"); 1400 1401 /* Timer related controls */ 1402 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1403 SYSCTL_CHILDREN(rack_sysctl_root), 1404 OID_AUTO, 1405 "timers", 1406 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1407 "Timer related controls"); 1408 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1409 SYSCTL_CHILDREN(rack_timers), 1410 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1411 &rack_ssthresh_rest_rto_rec, 0, 1412 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1413 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1414 SYSCTL_CHILDREN(rack_timers), 1415 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1416 &rack_rxt_scoreboard_clear_thresh, 2, 1417 "How many RTO's are allowed before we clear the scoreboard"); 1418 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1419 SYSCTL_CHILDREN(rack_timers), 1420 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1421 &rack_honors_hpts_min_to, 1, 1422 "Do rack pacing timers honor hpts min timeout"); 1423 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1424 SYSCTL_CHILDREN(rack_timers), 1425 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1426 &rack_max_reduce, 10, 1427 "Max percentage we will reduce slot by for pacing when we are behind"); 1428 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1429 SYSCTL_CHILDREN(rack_timers), 1430 OID_AUTO, "persmin", CTLFLAG_RW, 1431 &rack_persist_min, 250000, 1432 "What is the minimum time in microseconds between persists"); 1433 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1434 SYSCTL_CHILDREN(rack_timers), 1435 OID_AUTO, "persmax", CTLFLAG_RW, 1436 &rack_persist_max, 2000000, 1437 "What is the largest delay in microseconds between persists"); 1438 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1439 SYSCTL_CHILDREN(rack_timers), 1440 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1441 &rack_delayed_ack_time, 40000, 1442 "Delayed ack time (40ms in microseconds)"); 1443 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1444 SYSCTL_CHILDREN(rack_timers), 1445 OID_AUTO, "minrto", CTLFLAG_RW, 1446 &rack_rto_min, 30000, 1447 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1448 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1449 SYSCTL_CHILDREN(rack_timers), 1450 OID_AUTO, "maxrto", CTLFLAG_RW, 1451 &rack_rto_max, 4000000, 1452 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1453 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1454 SYSCTL_CHILDREN(rack_timers), 1455 OID_AUTO, "minto", CTLFLAG_RW, 1456 &rack_min_to, 1000, 1457 "Minimum rack timeout in microseconds"); 1458 /* Measure controls */ 1459 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1460 SYSCTL_CHILDREN(rack_sysctl_root), 1461 OID_AUTO, 1462 "measure", 1463 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1464 "Measure related controls"); 1465 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_measure), 1467 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1468 &rack_wma_divisor, 8, 1469 "When doing b/w calculation what is the divisor for the WMA"); 1470 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1471 SYSCTL_CHILDREN(rack_measure), 1472 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1473 &rack_cwnd_block_ends_measure, 0, 1474 "Does a cwnd just-return end the measurement window (app limited)"); 1475 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1476 SYSCTL_CHILDREN(rack_measure), 1477 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1478 &rack_rwnd_block_ends_measure, 0, 1479 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1480 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1481 SYSCTL_CHILDREN(rack_measure), 1482 OID_AUTO, "min_target", CTLFLAG_RW, 1483 &rack_def_data_window, 20, 1484 "What is the minimum target window (in mss) for a GP measurements"); 1485 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1486 SYSCTL_CHILDREN(rack_measure), 1487 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1488 &rack_goal_bdp, 2, 1489 "What is the goal BDP to measure"); 1490 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1491 SYSCTL_CHILDREN(rack_measure), 1492 OID_AUTO, "min_srtts", CTLFLAG_RW, 1493 &rack_min_srtts, 1, 1494 "What is the goal BDP to measure"); 1495 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1496 SYSCTL_CHILDREN(rack_measure), 1497 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1498 &rack_min_measure_usec, 0, 1499 "What is the Minimum time time for a measurement if 0, this is off"); 1500 /* Features */ 1501 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1502 SYSCTL_CHILDREN(rack_sysctl_root), 1503 OID_AUTO, 1504 "features", 1505 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1506 "Feature controls"); 1507 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1508 SYSCTL_CHILDREN(rack_features), 1509 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1510 &rack_hybrid_allow_set_maxseg, 0, 1511 "Should hybrid pacing allow the setmss command"); 1512 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1513 SYSCTL_CHILDREN(rack_features), 1514 OID_AUTO, "cmpack", CTLFLAG_RW, 1515 &rack_use_cmp_acks, 1, 1516 "Should RACK have LRO send compressed acks"); 1517 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1518 SYSCTL_CHILDREN(rack_features), 1519 OID_AUTO, "fsb", CTLFLAG_RW, 1520 &rack_use_fsb, 1, 1521 "Should RACK use the fast send block?"); 1522 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1523 SYSCTL_CHILDREN(rack_features), 1524 OID_AUTO, "rfo", CTLFLAG_RW, 1525 &rack_use_rfo, 1, 1526 "Should RACK use rack_fast_output()?"); 1527 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1528 SYSCTL_CHILDREN(rack_features), 1529 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1530 &rack_use_rsm_rfo, 1, 1531 "Should RACK use rack_fast_rsm_output()?"); 1532 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1533 SYSCTL_CHILDREN(rack_features), 1534 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1535 &rack_enable_mqueue_for_nonpaced, 0, 1536 "Should RACK use mbuf queuing for non-paced connections"); 1537 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1538 SYSCTL_CHILDREN(rack_features), 1539 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1540 &rack_do_hystart, 0, 1541 "Should RACK enable HyStart++ on connections?"); 1542 /* Misc rack controls */ 1543 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1544 SYSCTL_CHILDREN(rack_sysctl_root), 1545 OID_AUTO, 1546 "misc", 1547 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1548 "Misc related controls"); 1549 #ifdef TCP_ACCOUNTING 1550 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1551 SYSCTL_CHILDREN(rack_misc), 1552 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1553 &rack_tcp_accounting, 0, 1554 "Should we turn on TCP accounting for all rack sessions?"); 1555 #endif 1556 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1557 SYSCTL_CHILDREN(rack_misc), 1558 OID_AUTO, "dnd", CTLFLAG_RW, 1559 &rack_dnd_default, 0, 1560 "Do not disturb default for rack_rrr = 3"); 1561 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_misc), 1563 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1564 &sad_seg_size_per, 800, 1565 "Percentage of segment size needed in a sack 800 = 80.0?"); 1566 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1567 SYSCTL_CHILDREN(rack_misc), 1568 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1569 &rack_rxt_controls, 0, 1570 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1571 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1572 SYSCTL_CHILDREN(rack_misc), 1573 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1574 &rack_hibeta_setting, 0, 1575 "Do we ue a high beta (80 instead of 50)?"); 1576 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1577 SYSCTL_CHILDREN(rack_misc), 1578 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1579 &rack_apply_rtt_with_reduced_conf, 0, 1580 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1581 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_misc), 1583 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1584 &rack_dsack_std_based, 3, 1585 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1586 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1587 SYSCTL_CHILDREN(rack_misc), 1588 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1589 &rack_prr_addbackmax, 2, 1590 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1591 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_misc), 1593 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1594 &rack_stats_gets_ms_rtt, 1, 1595 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1596 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1597 SYSCTL_CHILDREN(rack_misc), 1598 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1599 &rack_client_low_buf, 0, 1600 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1601 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1602 SYSCTL_CHILDREN(rack_misc), 1603 OID_AUTO, "defprofile", CTLFLAG_RW, 1604 &rack_def_profile, 0, 1605 "Should RACK use a default profile (0=no, num == profile num)?"); 1606 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1607 SYSCTL_CHILDREN(rack_misc), 1608 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1609 &rack_enable_shared_cwnd, 1, 1610 "Should RACK try to use the shared cwnd on connections where allowed"); 1611 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1612 SYSCTL_CHILDREN(rack_misc), 1613 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1614 &rack_limits_scwnd, 1, 1615 "Should RACK place low end time limits on the shared cwnd feature"); 1616 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1617 SYSCTL_CHILDREN(rack_misc), 1618 OID_AUTO, "no_prr", CTLFLAG_RW, 1619 &rack_disable_prr, 0, 1620 "Should RACK not use prr and only pace (must have pacing on)"); 1621 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1622 SYSCTL_CHILDREN(rack_misc), 1623 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1624 &rack_verbose_logging, 0, 1625 "Should RACK black box logging be verbose"); 1626 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1627 SYSCTL_CHILDREN(rack_misc), 1628 OID_AUTO, "data_after_close", CTLFLAG_RW, 1629 &rack_ignore_data_after_close, 1, 1630 "Do we hold off sending a RST until all pending data is ack'd"); 1631 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1632 SYSCTL_CHILDREN(rack_misc), 1633 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1634 &rack_sack_not_required, 1, 1635 "Do we allow rack to run on connections not supporting SACK"); 1636 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1637 SYSCTL_CHILDREN(rack_misc), 1638 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1639 &rack_send_a_lot_in_prr, 1, 1640 "Send a lot in prr"); 1641 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1642 SYSCTL_CHILDREN(rack_misc), 1643 OID_AUTO, "autoscale", CTLFLAG_RW, 1644 &rack_autosndbuf_inc, 20, 1645 "What percentage should rack scale up its snd buffer by?"); 1646 1647 1648 /* Sack Attacker detection stuff */ 1649 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1650 SYSCTL_CHILDREN(rack_attack), 1651 OID_AUTO, "merge_out", CTLFLAG_RW, 1652 &rack_merge_out_sacks_on_attack, 0, 1653 "Do we merge the sendmap when we decide we are being attacked?"); 1654 1655 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_attack), 1657 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1658 &rack_highest_sack_thresh_seen, 0, 1659 "Highest sack to ack ratio seen"); 1660 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1661 SYSCTL_CHILDREN(rack_attack), 1662 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1663 &rack_highest_move_thresh_seen, 0, 1664 "Highest move to non-move ratio seen"); 1665 rack_ack_total = counter_u64_alloc(M_WAITOK); 1666 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1667 SYSCTL_CHILDREN(rack_attack), 1668 OID_AUTO, "acktotal", CTLFLAG_RD, 1669 &rack_ack_total, 1670 "Total number of Ack's"); 1671 rack_express_sack = counter_u64_alloc(M_WAITOK); 1672 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1673 SYSCTL_CHILDREN(rack_attack), 1674 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1675 &rack_express_sack, 1676 "Total expresss number of Sack's"); 1677 rack_sack_total = counter_u64_alloc(M_WAITOK); 1678 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1679 SYSCTL_CHILDREN(rack_attack), 1680 OID_AUTO, "sacktotal", CTLFLAG_RD, 1681 &rack_sack_total, 1682 "Total number of SACKs"); 1683 rack_move_none = counter_u64_alloc(M_WAITOK); 1684 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1685 SYSCTL_CHILDREN(rack_attack), 1686 OID_AUTO, "move_none", CTLFLAG_RD, 1687 &rack_move_none, 1688 "Total number of SACK index reuse of positions under threshold"); 1689 rack_move_some = counter_u64_alloc(M_WAITOK); 1690 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1691 SYSCTL_CHILDREN(rack_attack), 1692 OID_AUTO, "move_some", CTLFLAG_RD, 1693 &rack_move_some, 1694 "Total number of SACK index reuse of positions over threshold"); 1695 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1696 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1697 SYSCTL_CHILDREN(rack_attack), 1698 OID_AUTO, "attacks", CTLFLAG_RD, 1699 &rack_sack_attacks_detected, 1700 "Total number of SACK attackers that had sack disabled"); 1701 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1702 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1703 SYSCTL_CHILDREN(rack_attack), 1704 OID_AUTO, "reversed", CTLFLAG_RD, 1705 &rack_sack_attacks_reversed, 1706 "Total number of SACK attackers that were later determined false positive"); 1707 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1708 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1709 SYSCTL_CHILDREN(rack_attack), 1710 OID_AUTO, "suspect", CTLFLAG_RD, 1711 &rack_sack_attacks_suspect, 1712 "Total number of SACKs that triggered early detection"); 1713 1714 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1715 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1716 SYSCTL_CHILDREN(rack_attack), 1717 OID_AUTO, "nextmerge", CTLFLAG_RD, 1718 &rack_sack_used_next_merge, 1719 "Total number of times we used the next merge"); 1720 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1722 SYSCTL_CHILDREN(rack_attack), 1723 OID_AUTO, "prevmerge", CTLFLAG_RD, 1724 &rack_sack_used_prev_merge, 1725 "Total number of times we used the prev merge"); 1726 /* Counters */ 1727 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1728 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1729 SYSCTL_CHILDREN(rack_counters), 1730 OID_AUTO, "totalbytes", CTLFLAG_RD, 1731 &rack_total_bytes, 1732 "Total number of bytes sent"); 1733 rack_fto_send = counter_u64_alloc(M_WAITOK); 1734 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1735 SYSCTL_CHILDREN(rack_counters), 1736 OID_AUTO, "fto_send", CTLFLAG_RD, 1737 &rack_fto_send, "Total number of rack_fast_output sends"); 1738 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1739 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1740 SYSCTL_CHILDREN(rack_counters), 1741 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1742 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1743 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1744 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1745 SYSCTL_CHILDREN(rack_counters), 1746 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1747 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1748 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_counters), 1751 OID_AUTO, "nfto_send", CTLFLAG_RD, 1752 &rack_non_fto_send, "Total number of rack_output first sends"); 1753 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1754 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1755 SYSCTL_CHILDREN(rack_counters), 1756 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1757 &rack_extended_rfo, "Total number of times we extended rfo"); 1758 1759 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1760 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1761 SYSCTL_CHILDREN(rack_counters), 1762 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1763 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1764 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1765 1766 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1767 SYSCTL_CHILDREN(rack_counters), 1768 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1769 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1770 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1771 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1772 SYSCTL_CHILDREN(rack_counters), 1773 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1774 &rack_tlp_tot, 1775 "Total number of tail loss probe expirations"); 1776 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1777 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1778 SYSCTL_CHILDREN(rack_counters), 1779 OID_AUTO, "tlp_new", CTLFLAG_RD, 1780 &rack_tlp_newdata, 1781 "Total number of tail loss probe sending new data"); 1782 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1783 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1784 SYSCTL_CHILDREN(rack_counters), 1785 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1786 &rack_tlp_retran, 1787 "Total number of tail loss probe sending retransmitted data"); 1788 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1789 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1790 SYSCTL_CHILDREN(rack_counters), 1791 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1792 &rack_tlp_retran_bytes, 1793 "Total bytes of tail loss probe sending retransmitted data"); 1794 rack_to_tot = counter_u64_alloc(M_WAITOK); 1795 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1796 SYSCTL_CHILDREN(rack_counters), 1797 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1798 &rack_to_tot, 1799 "Total number of times the rack to expired"); 1800 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1801 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1802 SYSCTL_CHILDREN(rack_counters), 1803 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1804 &rack_saw_enobuf, 1805 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1806 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1807 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1808 SYSCTL_CHILDREN(rack_counters), 1809 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1810 &rack_saw_enobuf_hw, 1811 "Total number of times a send returned enobuf for hdwr paced connections"); 1812 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1813 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1814 SYSCTL_CHILDREN(rack_counters), 1815 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1816 &rack_saw_enetunreach, 1817 "Total number of times a send received a enetunreachable"); 1818 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1819 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1820 SYSCTL_CHILDREN(rack_counters), 1821 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1822 &rack_hot_alloc, 1823 "Total allocations from the top of our list"); 1824 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1825 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1826 SYSCTL_CHILDREN(rack_counters), 1827 OID_AUTO, "allocs", CTLFLAG_RD, 1828 &rack_to_alloc, 1829 "Total allocations of tracking structures"); 1830 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1831 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1832 SYSCTL_CHILDREN(rack_counters), 1833 OID_AUTO, "allochard", CTLFLAG_RD, 1834 &rack_to_alloc_hard, 1835 "Total allocations done with sleeping the hard way"); 1836 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1837 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1838 SYSCTL_CHILDREN(rack_counters), 1839 OID_AUTO, "allocemerg", CTLFLAG_RD, 1840 &rack_to_alloc_emerg, 1841 "Total allocations done from emergency cache"); 1842 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1843 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1844 SYSCTL_CHILDREN(rack_counters), 1845 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1846 &rack_to_alloc_limited, 1847 "Total allocations dropped due to limit"); 1848 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1849 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1850 SYSCTL_CHILDREN(rack_counters), 1851 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1852 &rack_alloc_limited_conns, 1853 "Connections with allocations dropped due to limit"); 1854 rack_split_limited = counter_u64_alloc(M_WAITOK); 1855 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1856 SYSCTL_CHILDREN(rack_counters), 1857 OID_AUTO, "split_limited", CTLFLAG_RD, 1858 &rack_split_limited, 1859 "Split allocations dropped due to limit"); 1860 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1861 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1862 SYSCTL_CHILDREN(rack_counters), 1863 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1864 &rack_rxt_clamps_cwnd, 1865 "Number of times that excessive rxt clamped the cwnd down"); 1866 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1867 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1868 SYSCTL_CHILDREN(rack_counters), 1869 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1870 &rack_rxt_clamps_cwnd_uniq, 1871 "Number of connections that have had excessive rxt clamped the cwnd down"); 1872 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1873 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1874 SYSCTL_CHILDREN(rack_counters), 1875 OID_AUTO, "persist_sends", CTLFLAG_RD, 1876 &rack_persists_sends, 1877 "Number of times we sent a persist probe"); 1878 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1879 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1880 SYSCTL_CHILDREN(rack_counters), 1881 OID_AUTO, "persist_acks", CTLFLAG_RD, 1882 &rack_persists_acks, 1883 "Number of times a persist probe was acked"); 1884 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1885 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1886 SYSCTL_CHILDREN(rack_counters), 1887 OID_AUTO, "persist_loss", CTLFLAG_RD, 1888 &rack_persists_loss, 1889 "Number of times we detected a lost persist probe (no ack)"); 1890 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1891 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1892 SYSCTL_CHILDREN(rack_counters), 1893 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1894 &rack_persists_lost_ends, 1895 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1896 #ifdef INVARIANTS 1897 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1898 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1899 SYSCTL_CHILDREN(rack_counters), 1900 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1901 &rack_adjust_map_bw, 1902 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1903 #endif 1904 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1905 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1906 SYSCTL_CHILDREN(rack_counters), 1907 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1908 &rack_multi_single_eq, 1909 "Number of compressed acks total represented"); 1910 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1911 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1912 SYSCTL_CHILDREN(rack_counters), 1913 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1914 &rack_proc_non_comp_ack, 1915 "Number of non compresseds acks that we processed"); 1916 1917 1918 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1919 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1920 SYSCTL_CHILDREN(rack_counters), 1921 OID_AUTO, "sack_long", CTLFLAG_RD, 1922 &rack_sack_proc_all, 1923 "Total times we had to walk whole list for sack processing"); 1924 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1925 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1926 SYSCTL_CHILDREN(rack_counters), 1927 OID_AUTO, "sack_restart", CTLFLAG_RD, 1928 &rack_sack_proc_restart, 1929 "Total times we had to walk whole list due to a restart"); 1930 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1931 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1932 SYSCTL_CHILDREN(rack_counters), 1933 OID_AUTO, "sack_short", CTLFLAG_RD, 1934 &rack_sack_proc_short, 1935 "Total times we took shortcut for sack processing"); 1936 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1937 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1938 SYSCTL_CHILDREN(rack_attack), 1939 OID_AUTO, "skipacked", CTLFLAG_RD, 1940 &rack_sack_skipped_acked, 1941 "Total number of times we skipped previously sacked"); 1942 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1943 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1944 SYSCTL_CHILDREN(rack_attack), 1945 OID_AUTO, "ofsplit", CTLFLAG_RD, 1946 &rack_sack_splits, 1947 "Total number of times we did the old fashion tree split"); 1948 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1949 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1950 SYSCTL_CHILDREN(rack_counters), 1951 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1952 &rack_input_idle_reduces, 1953 "Total number of idle reductions on input"); 1954 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1955 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1956 SYSCTL_CHILDREN(rack_counters), 1957 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1958 &rack_collapsed_win_seen, 1959 "Total number of collapsed window events seen (where our window shrinks)"); 1960 1961 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1962 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1963 SYSCTL_CHILDREN(rack_counters), 1964 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1965 &rack_collapsed_win, 1966 "Total number of collapsed window events where we mark packets"); 1967 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1968 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1969 SYSCTL_CHILDREN(rack_counters), 1970 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1971 &rack_collapsed_win_rxt, 1972 "Total number of packets that were retransmitted"); 1973 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1974 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1975 SYSCTL_CHILDREN(rack_counters), 1976 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1977 &rack_collapsed_win_rxt_bytes, 1978 "Total number of bytes that were retransmitted"); 1979 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1980 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1981 SYSCTL_CHILDREN(rack_counters), 1982 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1983 &rack_try_scwnd, 1984 "Total number of scwnd attempts"); 1985 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1986 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1987 OID_AUTO, "outsize", CTLFLAG_RD, 1988 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1989 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1990 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1991 OID_AUTO, "opts", CTLFLAG_RD, 1992 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1993 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1994 SYSCTL_CHILDREN(rack_sysctl_root), 1995 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1996 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1997 } 1998 1999 static uint32_t 2000 rc_init_window(struct tcp_rack *rack) 2001 { 2002 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 2003 2004 } 2005 2006 static uint64_t 2007 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 2008 { 2009 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 2010 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 2011 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2012 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 2013 else 2014 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 2015 } 2016 2017 static void 2018 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 2019 uint64_t data, uint8_t mod, uint16_t aux, 2020 struct tcp_sendfile_track *cur, int line) 2021 { 2022 #ifdef TCP_REQUEST_TRK 2023 int do_log = 0; 2024 2025 /* 2026 * The rate cap one is noisy and only should come out when normal BB logging 2027 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2028 * once per chunk and make up the BBpoint that can be turned on by the client. 2029 */ 2030 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2031 /* 2032 * The very noisy two need to only come out when 2033 * we have verbose logging on. 2034 */ 2035 if (rack_verbose_logging != 0) 2036 do_log = tcp_bblogging_on(rack->rc_tp); 2037 else 2038 do_log = 0; 2039 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2040 /* 2041 * All other less noisy logs here except the measure which 2042 * also needs to come out on the point and the log. 2043 */ 2044 do_log = tcp_bblogging_on(rack->rc_tp); 2045 } else { 2046 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2047 } 2048 2049 if (do_log) { 2050 union tcp_log_stackspecific log; 2051 struct timeval tv; 2052 uint64_t lt_bw; 2053 2054 /* Convert our ms to a microsecond */ 2055 memset(&log, 0, sizeof(log)); 2056 2057 log.u_bbr.cwnd_gain = line; 2058 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2059 log.u_bbr.rttProp = tim; 2060 log.u_bbr.bw_inuse = cbw; 2061 log.u_bbr.delRate = rack_get_gp_est(rack); 2062 lt_bw = rack_get_lt_bw(rack); 2063 log.u_bbr.flex1 = seq; 2064 log.u_bbr.pacing_gain = aux; 2065 /* lt_bw = < flex3 | flex2 > */ 2066 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2067 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2068 /* Record the last obtained us rtt in inflight */ 2069 if (cur == NULL) { 2070 /* Make sure we are looking at the right log if an overide comes in */ 2071 cur = rack->r_ctl.rc_last_sft; 2072 } 2073 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2074 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2075 else { 2076 /* Use the last known rtt i.e. the rack-rtt */ 2077 log.u_bbr.inflight = rack->rc_rack_rtt; 2078 } 2079 if (cur != NULL) { 2080 uint64_t off; 2081 2082 log.u_bbr.cur_del_rate = cur->deadline; 2083 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2084 /* start = < lost | pkt_epoch > */ 2085 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2086 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2087 log.u_bbr.flex6 = cur->start_seq; 2088 log.u_bbr.pkts_out = cur->end_seq; 2089 } else { 2090 /* start = < lost | pkt_epoch > */ 2091 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2092 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2093 /* end = < pkts_out | flex6 > */ 2094 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2095 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2096 } 2097 /* first_send = <lt_epoch | epoch> */ 2098 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2099 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2100 /* localtime = <delivered | applimited>*/ 2101 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2102 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2103 #ifdef TCP_REQUEST_TRK 2104 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2105 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2106 #endif 2107 log.u_bbr.inhpts = 1; 2108 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2109 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2110 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2111 } else { 2112 log.u_bbr.flex7 = 0xffff; 2113 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2114 } 2115 /* 2116 * Compose bbr_state to be a bit wise 0000ADHF 2117 * where A is the always_pace flag 2118 * where D is the dgp_on flag 2119 * where H is the hybrid_mode on flag 2120 * where F is the use_fixed_rate flag. 2121 */ 2122 log.u_bbr.bbr_state = rack->rc_always_pace; 2123 log.u_bbr.bbr_state <<= 1; 2124 log.u_bbr.bbr_state |= rack->dgp_on; 2125 log.u_bbr.bbr_state <<= 1; 2126 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2127 log.u_bbr.bbr_state <<= 1; 2128 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2129 log.u_bbr.flex8 = mod; 2130 tcp_log_event(rack->rc_tp, NULL, 2131 &rack->rc_inp->inp_socket->so_rcv, 2132 &rack->rc_inp->inp_socket->so_snd, 2133 TCP_HYBRID_PACING_LOG, 0, 2134 0, &log, false, NULL, __func__, __LINE__, &tv); 2135 2136 } 2137 #endif 2138 } 2139 2140 #ifdef TCP_REQUEST_TRK 2141 static void 2142 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2143 { 2144 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2145 union tcp_log_stackspecific log; 2146 struct timeval tv; 2147 uint64_t off; 2148 2149 /* Convert our ms to a microsecond */ 2150 memset(&log, 0, sizeof(log)); 2151 2152 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2153 log.u_bbr.delRate = cur->sent_at_fs; 2154 2155 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2156 /* 2157 * We did not get a new Rules Applied to set so 2158 * no overlapping send occured, this means the 2159 * current byte counts are correct. 2160 */ 2161 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2162 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2163 } else { 2164 /* 2165 * Overlapping send case, we switched to a new 2166 * send and did a rules applied. 2167 */ 2168 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2169 log.u_bbr.rttProp = cur->rxt_at_ls; 2170 } 2171 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2172 log.u_bbr.cwnd_gain = line; 2173 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2174 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2175 /* start = < flex1 | flex2 > */ 2176 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2177 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2178 /* end = < flex3 | flex4 > */ 2179 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2180 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2181 2182 /* localtime = <delivered | applimited>*/ 2183 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2184 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2185 /* client timestamp = <lt_epoch | epoch>*/ 2186 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2187 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2188 /* now set all the flags in */ 2189 log.u_bbr.pkts_out = cur->hybrid_flags; 2190 log.u_bbr.lost = cur->playout_ms; 2191 log.u_bbr.flex6 = cur->flags; 2192 /* 2193 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2194 * where a false retransmit occurred so first_send <-> lastsend may 2195 * include longer time then it actually took if we have a false rxt. 2196 */ 2197 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2198 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2199 /* 2200 * Compose bbr_state to be a bit wise 0000ADHF 2201 * where A is the always_pace flag 2202 * where D is the dgp_on flag 2203 * where H is the hybrid_mode on flag 2204 * where F is the use_fixed_rate flag. 2205 */ 2206 log.u_bbr.bbr_state = rack->rc_always_pace; 2207 log.u_bbr.bbr_state <<= 1; 2208 log.u_bbr.bbr_state |= rack->dgp_on; 2209 log.u_bbr.bbr_state <<= 1; 2210 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2211 log.u_bbr.bbr_state <<= 1; 2212 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2213 2214 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2215 tcp_log_event(rack->rc_tp, NULL, 2216 &rack->rc_inp->inp_socket->so_rcv, 2217 &rack->rc_inp->inp_socket->so_snd, 2218 TCP_HYBRID_PACING_LOG, 0, 2219 0, &log, false, NULL, __func__, __LINE__, &tv); 2220 } 2221 } 2222 #endif 2223 2224 static inline uint64_t 2225 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2226 { 2227 uint64_t ret_bw, ether; 2228 uint64_t u_segsiz; 2229 2230 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2231 if (rack->r_is_v6){ 2232 #ifdef INET6 2233 ether += sizeof(struct ip6_hdr); 2234 #endif 2235 ether += 14; /* eheader size 6+6+2 */ 2236 } else { 2237 #ifdef INET 2238 ether += sizeof(struct ip); 2239 #endif 2240 ether += 14; /* eheader size 6+6+2 */ 2241 } 2242 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2243 ret_bw = bw; 2244 ret_bw *= ether; 2245 ret_bw /= u_segsiz; 2246 return (ret_bw); 2247 } 2248 2249 static void 2250 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2251 { 2252 #ifdef TCP_REQUEST_TRK 2253 struct timeval tv; 2254 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2255 #endif 2256 2257 if (rack->r_ctl.bw_rate_cap == 0) 2258 return; 2259 #ifdef TCP_REQUEST_TRK 2260 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2261 (rack->r_ctl.rc_last_sft != NULL)) { 2262 /* 2263 * We have a dynamic cap. The original target 2264 * is in bw_rate_cap, but we need to look at 2265 * how long it is until we hit the deadline. 2266 */ 2267 struct tcp_sendfile_track *ent; 2268 2269 ent = rack->r_ctl.rc_last_sft; 2270 microuptime(&tv); 2271 timenow = tcp_tv_to_lusectick(&tv); 2272 if (timenow >= ent->deadline) { 2273 /* No time left we do DGP only */ 2274 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2275 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2276 rack->r_ctl.bw_rate_cap = 0; 2277 return; 2278 } 2279 /* We have the time */ 2280 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2281 if (timeleft < HPTS_MSEC_IN_SEC) { 2282 /* If there is less than a ms left just use DGPs rate */ 2283 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2284 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2285 rack->r_ctl.bw_rate_cap = 0; 2286 return; 2287 } 2288 /* 2289 * Now lets find the amount of data left to send. 2290 * 2291 * Now ideally we want to use the end_seq to figure out how much more 2292 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2293 */ 2294 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2295 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2296 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2297 else { 2298 /* TSNH, we should catch it at the send */ 2299 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2300 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2301 rack->r_ctl.bw_rate_cap = 0; 2302 return; 2303 } 2304 } else { 2305 /* 2306 * The hard way, figure out how much is gone and then 2307 * take that away from the total the client asked for 2308 * (thats off by tls overhead if this is tls). 2309 */ 2310 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2311 lengone = rack->rc_tp->snd_una - ent->start_seq; 2312 else 2313 lengone = 0; 2314 if (lengone < (ent->end - ent->start)) 2315 lenleft = (ent->end - ent->start) - lengone; 2316 else { 2317 /* TSNH, we should catch it at the send */ 2318 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2319 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2320 rack->r_ctl.bw_rate_cap = 0; 2321 return; 2322 } 2323 } 2324 if (lenleft == 0) { 2325 /* We have it all sent */ 2326 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2327 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2328 if (rack->r_ctl.bw_rate_cap) 2329 goto normal_ratecap; 2330 else 2331 return; 2332 } 2333 calcbw = lenleft * HPTS_USEC_IN_SEC; 2334 calcbw /= timeleft; 2335 /* Now we must compensate for IP/TCP overhead */ 2336 calcbw = rack_compensate_for_linerate(rack, calcbw); 2337 /* Update the bit rate cap */ 2338 rack->r_ctl.bw_rate_cap = calcbw; 2339 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2340 (rack_hybrid_allow_set_maxseg == 1) && 2341 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2342 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2343 uint32_t orig_max; 2344 2345 orig_max = rack->r_ctl.rc_pace_max_segs; 2346 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2347 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2348 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2349 } 2350 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2351 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2352 if ((calcbw > 0) && (*bw > calcbw)) { 2353 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2354 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2355 *capped = 1; 2356 *bw = calcbw; 2357 } 2358 return; 2359 } 2360 normal_ratecap: 2361 #endif 2362 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2363 #ifdef TCP_REQUEST_TRK 2364 if (rack->rc_hybrid_mode && 2365 rack->rc_catch_up && 2366 (rack->r_ctl.rc_last_sft != NULL) && 2367 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2368 (rack_hybrid_allow_set_maxseg == 1) && 2369 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2370 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2371 uint32_t orig_max; 2372 2373 orig_max = rack->r_ctl.rc_pace_max_segs; 2374 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2375 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2376 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2377 } 2378 #endif 2379 *capped = 1; 2380 *bw = rack->r_ctl.bw_rate_cap; 2381 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2382 *bw, 0, 0, 2383 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2384 } 2385 } 2386 2387 static uint64_t 2388 rack_get_gp_est(struct tcp_rack *rack) 2389 { 2390 uint64_t bw, lt_bw, ret_bw; 2391 2392 if (rack->rc_gp_filled == 0) { 2393 /* 2394 * We have yet no b/w measurement, 2395 * if we have a user set initial bw 2396 * return it. If we don't have that and 2397 * we have an srtt, use the tcp IW (10) to 2398 * calculate a fictional b/w over the SRTT 2399 * which is more or less a guess. Note 2400 * we don't use our IW from rack on purpose 2401 * so if we have like IW=30, we are not 2402 * calculating a "huge" b/w. 2403 */ 2404 uint64_t srtt; 2405 2406 if (rack->dis_lt_bw == 1) 2407 lt_bw = 0; 2408 else 2409 lt_bw = rack_get_lt_bw(rack); 2410 if (lt_bw) { 2411 /* 2412 * No goodput bw but a long-term b/w does exist 2413 * lets use that. 2414 */ 2415 ret_bw = lt_bw; 2416 goto compensate; 2417 } 2418 if (rack->r_ctl.init_rate) 2419 return (rack->r_ctl.init_rate); 2420 2421 /* Ok lets come up with the IW guess, if we have a srtt */ 2422 if (rack->rc_tp->t_srtt == 0) { 2423 /* 2424 * Go with old pacing method 2425 * i.e. burst mitigation only. 2426 */ 2427 return (0); 2428 } 2429 /* Ok lets get the initial TCP win (not racks) */ 2430 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2431 srtt = (uint64_t)rack->rc_tp->t_srtt; 2432 bw *= (uint64_t)USECS_IN_SECOND; 2433 bw /= srtt; 2434 ret_bw = bw; 2435 goto compensate; 2436 2437 } 2438 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2439 /* Averaging is done, we can return the value */ 2440 bw = rack->r_ctl.gp_bw; 2441 } else { 2442 /* Still doing initial average must calculate */ 2443 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2444 } 2445 if (rack->dis_lt_bw) { 2446 /* We are not using lt-bw */ 2447 ret_bw = bw; 2448 goto compensate; 2449 } 2450 lt_bw = rack_get_lt_bw(rack); 2451 if (lt_bw == 0) { 2452 /* If we don't have one then equate it to the gp_bw */ 2453 lt_bw = rack->r_ctl.gp_bw; 2454 } 2455 if (rack->use_lesser_lt_bw) { 2456 if (lt_bw < bw) 2457 ret_bw = lt_bw; 2458 else 2459 ret_bw = bw; 2460 } else { 2461 if (lt_bw > bw) 2462 ret_bw = lt_bw; 2463 else 2464 ret_bw = bw; 2465 } 2466 /* 2467 * Now lets compensate based on the TCP/IP overhead. Our 2468 * Goodput estimate does not include this so we must pace out 2469 * a bit faster since our pacing calculations do. The pacing 2470 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2471 * we are using to do this, so we do that here in the opposite 2472 * direction as well. This means that if we are tunneled and the 2473 * segsiz is say 1200 bytes we will get quite a boost, but its 2474 * compensated for in the pacing time the opposite way. 2475 */ 2476 compensate: 2477 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2478 return(ret_bw); 2479 } 2480 2481 2482 static uint64_t 2483 rack_get_bw(struct tcp_rack *rack) 2484 { 2485 uint64_t bw; 2486 2487 if (rack->use_fixed_rate) { 2488 /* Return the fixed pacing rate */ 2489 return (rack_get_fixed_pacing_bw(rack)); 2490 } 2491 bw = rack_get_gp_est(rack); 2492 return (bw); 2493 } 2494 2495 static uint16_t 2496 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2497 { 2498 if (rack->use_fixed_rate) { 2499 return (100); 2500 } else if (rack->in_probe_rtt && (rsm == NULL)) 2501 return (rack->r_ctl.rack_per_of_gp_probertt); 2502 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2503 rack->r_ctl.rack_per_of_gp_rec)) { 2504 if (rsm) { 2505 /* a retransmission always use the recovery rate */ 2506 return (rack->r_ctl.rack_per_of_gp_rec); 2507 } else if (rack->rack_rec_nonrxt_use_cr) { 2508 /* Directed to use the configured rate */ 2509 goto configured_rate; 2510 } else if (rack->rack_no_prr && 2511 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2512 /* No PRR, lets just use the b/w estimate only */ 2513 return (100); 2514 } else { 2515 /* 2516 * Here we may have a non-retransmit but we 2517 * have no overrides, so just use the recovery 2518 * rate (prr is in effect). 2519 */ 2520 return (rack->r_ctl.rack_per_of_gp_rec); 2521 } 2522 } 2523 configured_rate: 2524 /* For the configured rate we look at our cwnd vs the ssthresh */ 2525 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2526 return (rack->r_ctl.rack_per_of_gp_ss); 2527 else 2528 return (rack->r_ctl.rack_per_of_gp_ca); 2529 } 2530 2531 static void 2532 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2533 { 2534 /* 2535 * Types of logs (mod value) 2536 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2537 * 2 = a dsack round begins, persist is reset to 16. 2538 * 3 = a dsack round ends 2539 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2540 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2541 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2542 */ 2543 if (tcp_bblogging_on(rack->rc_tp)) { 2544 union tcp_log_stackspecific log; 2545 struct timeval tv; 2546 2547 memset(&log, 0, sizeof(log)); 2548 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2549 log.u_bbr.flex1 <<= 1; 2550 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2551 log.u_bbr.flex1 <<= 1; 2552 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2553 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2554 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2555 log.u_bbr.flex4 = flex4; 2556 log.u_bbr.flex5 = flex5; 2557 log.u_bbr.flex6 = flex6; 2558 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2559 log.u_bbr.flex8 = mod; 2560 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2561 log.u_bbr.epoch = rack->r_ctl.current_round; 2562 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2563 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2564 &rack->rc_inp->inp_socket->so_rcv, 2565 &rack->rc_inp->inp_socket->so_snd, 2566 RACK_DSACK_HANDLING, 0, 2567 0, &log, false, &tv); 2568 } 2569 } 2570 2571 static void 2572 rack_log_hdwr_pacing(struct tcp_rack *rack, 2573 uint64_t rate, uint64_t hw_rate, int line, 2574 int error, uint16_t mod) 2575 { 2576 if (tcp_bblogging_on(rack->rc_tp)) { 2577 union tcp_log_stackspecific log; 2578 struct timeval tv; 2579 const struct ifnet *ifp; 2580 uint64_t ifp64; 2581 2582 memset(&log, 0, sizeof(log)); 2583 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2584 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2585 if (rack->r_ctl.crte) { 2586 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2587 } else if (rack->rc_inp->inp_route.ro_nh && 2588 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2589 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2590 } else 2591 ifp = NULL; 2592 if (ifp) { 2593 ifp64 = (uintptr_t)ifp; 2594 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff); 2595 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff); 2596 } 2597 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2598 log.u_bbr.bw_inuse = rate; 2599 log.u_bbr.flex5 = line; 2600 log.u_bbr.flex6 = error; 2601 log.u_bbr.flex7 = mod; 2602 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2603 log.u_bbr.flex8 = rack->use_fixed_rate; 2604 log.u_bbr.flex8 <<= 1; 2605 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2606 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2607 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2608 if (rack->r_ctl.crte) 2609 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2610 else 2611 log.u_bbr.cur_del_rate = 0; 2612 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2613 log.u_bbr.epoch = rack->r_ctl.current_round; 2614 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2615 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2616 &rack->rc_inp->inp_socket->so_rcv, 2617 &rack->rc_inp->inp_socket->so_snd, 2618 BBR_LOG_HDWR_PACE, 0, 2619 0, &log, false, &tv); 2620 } 2621 } 2622 2623 static uint64_t 2624 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2625 { 2626 /* 2627 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2628 */ 2629 uint64_t bw_est, high_rate; 2630 uint64_t gain; 2631 2632 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2633 bw_est = bw * gain; 2634 bw_est /= (uint64_t)100; 2635 /* Never fall below the minimum (def 64kbps) */ 2636 if (bw_est < RACK_MIN_BW) 2637 bw_est = RACK_MIN_BW; 2638 if (rack->r_rack_hw_rate_caps) { 2639 /* Rate caps are in place */ 2640 if (rack->r_ctl.crte != NULL) { 2641 /* We have a hdwr rate already */ 2642 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2643 if (bw_est >= high_rate) { 2644 /* We are capping bw at the highest rate table entry */ 2645 if (rack_hw_rate_cap_per && 2646 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2647 rack->r_rack_hw_rate_caps = 0; 2648 goto done; 2649 } 2650 rack_log_hdwr_pacing(rack, 2651 bw_est, high_rate, __LINE__, 2652 0, 3); 2653 bw_est = high_rate; 2654 if (capped) 2655 *capped = 1; 2656 } 2657 } else if ((rack->rack_hdrw_pacing == 0) && 2658 (rack->rack_hdw_pace_ena) && 2659 (rack->rack_attempt_hdwr_pace == 0) && 2660 (rack->rc_inp->inp_route.ro_nh != NULL) && 2661 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2662 /* 2663 * Special case, we have not yet attempted hardware 2664 * pacing, and yet we may, when we do, find out if we are 2665 * above the highest rate. We need to know the maxbw for the interface 2666 * in question (if it supports ratelimiting). We get back 2667 * a 0, if the interface is not found in the RL lists. 2668 */ 2669 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2670 if (high_rate) { 2671 /* Yep, we have a rate is it above this rate? */ 2672 if (bw_est > high_rate) { 2673 bw_est = high_rate; 2674 if (capped) 2675 *capped = 1; 2676 } 2677 } 2678 } 2679 } 2680 done: 2681 return (bw_est); 2682 } 2683 2684 static void 2685 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2686 { 2687 if (tcp_bblogging_on(rack->rc_tp)) { 2688 union tcp_log_stackspecific log; 2689 struct timeval tv; 2690 2691 if ((mod != 1) && (rack_verbose_logging == 0)) { 2692 /* 2693 * We get 3 values currently for mod 2694 * 1 - We are retransmitting and this tells the reason. 2695 * 2 - We are clearing a dup-ack count. 2696 * 3 - We are incrementing a dup-ack count. 2697 * 2698 * The clear/increment are only logged 2699 * if you have BBverbose on. 2700 */ 2701 return; 2702 } 2703 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2704 log.u_bbr.flex1 = tsused; 2705 log.u_bbr.flex2 = thresh; 2706 log.u_bbr.flex3 = rsm->r_flags; 2707 log.u_bbr.flex4 = rsm->r_dupack; 2708 log.u_bbr.flex5 = rsm->r_start; 2709 log.u_bbr.flex6 = rsm->r_end; 2710 log.u_bbr.flex8 = mod; 2711 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2712 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2713 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2714 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2715 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2716 log.u_bbr.pacing_gain = rack->r_must_retran; 2717 log.u_bbr.epoch = rack->r_ctl.current_round; 2718 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2719 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2720 &rack->rc_inp->inp_socket->so_rcv, 2721 &rack->rc_inp->inp_socket->so_snd, 2722 BBR_LOG_SETTINGS_CHG, 0, 2723 0, &log, false, &tv); 2724 } 2725 } 2726 2727 static void 2728 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2729 { 2730 if (tcp_bblogging_on(rack->rc_tp)) { 2731 union tcp_log_stackspecific log; 2732 struct timeval tv; 2733 2734 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2735 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2736 log.u_bbr.flex2 = to; 2737 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2738 log.u_bbr.flex4 = slot; 2739 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2740 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2741 log.u_bbr.flex7 = rack->rc_in_persist; 2742 log.u_bbr.flex8 = which; 2743 if (rack->rack_no_prr) 2744 log.u_bbr.pkts_out = 0; 2745 else 2746 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2747 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2748 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2749 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2750 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2751 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2752 log.u_bbr.pacing_gain = rack->r_must_retran; 2753 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2754 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2755 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2756 log.u_bbr.lost = rack_rto_min; 2757 log.u_bbr.epoch = rack->r_ctl.roundends; 2758 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2759 log.u_bbr.bw_inuse <<= 32; 2760 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2761 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2762 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2763 &rack->rc_inp->inp_socket->so_rcv, 2764 &rack->rc_inp->inp_socket->so_snd, 2765 BBR_LOG_TIMERSTAR, 0, 2766 0, &log, false, &tv); 2767 } 2768 } 2769 2770 static void 2771 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2772 { 2773 if (tcp_bblogging_on(rack->rc_tp)) { 2774 union tcp_log_stackspecific log; 2775 struct timeval tv; 2776 2777 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2778 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2779 log.u_bbr.flex8 = to_num; 2780 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2781 log.u_bbr.flex2 = rack->rc_rack_rtt; 2782 if (rsm == NULL) 2783 log.u_bbr.flex3 = 0; 2784 else 2785 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2786 if (rack->rack_no_prr) 2787 log.u_bbr.flex5 = 0; 2788 else 2789 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2790 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2791 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2792 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2793 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2794 log.u_bbr.pacing_gain = rack->r_must_retran; 2795 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2796 log.u_bbr.bw_inuse <<= 32; 2797 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2798 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2799 &rack->rc_inp->inp_socket->so_rcv, 2800 &rack->rc_inp->inp_socket->so_snd, 2801 BBR_LOG_RTO, 0, 2802 0, &log, false, &tv); 2803 } 2804 } 2805 2806 static void 2807 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2808 struct rack_sendmap *prev, 2809 struct rack_sendmap *rsm, 2810 struct rack_sendmap *next, 2811 int flag, uint32_t th_ack, int line) 2812 { 2813 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2814 union tcp_log_stackspecific log; 2815 struct timeval tv; 2816 2817 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2818 log.u_bbr.flex8 = flag; 2819 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2820 log.u_bbr.cur_del_rate = (uintptr_t)prev; 2821 log.u_bbr.delRate = (uintptr_t)rsm; 2822 log.u_bbr.rttProp = (uintptr_t)next; 2823 log.u_bbr.flex7 = 0; 2824 if (prev) { 2825 log.u_bbr.flex1 = prev->r_start; 2826 log.u_bbr.flex2 = prev->r_end; 2827 log.u_bbr.flex7 |= 0x4; 2828 } 2829 if (rsm) { 2830 log.u_bbr.flex3 = rsm->r_start; 2831 log.u_bbr.flex4 = rsm->r_end; 2832 log.u_bbr.flex7 |= 0x2; 2833 } 2834 if (next) { 2835 log.u_bbr.flex5 = next->r_start; 2836 log.u_bbr.flex6 = next->r_end; 2837 log.u_bbr.flex7 |= 0x1; 2838 } 2839 log.u_bbr.applimited = line; 2840 log.u_bbr.pkts_out = th_ack; 2841 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2842 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2843 if (rack->rack_no_prr) 2844 log.u_bbr.lost = 0; 2845 else 2846 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2847 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2848 log.u_bbr.bw_inuse <<= 32; 2849 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2850 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2851 &rack->rc_inp->inp_socket->so_rcv, 2852 &rack->rc_inp->inp_socket->so_snd, 2853 TCP_LOG_MAPCHG, 0, 2854 0, &log, false, &tv); 2855 } 2856 } 2857 2858 static void 2859 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2860 struct rack_sendmap *rsm, int conf) 2861 { 2862 if (tcp_bblogging_on(tp)) { 2863 union tcp_log_stackspecific log; 2864 struct timeval tv; 2865 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2866 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2867 log.u_bbr.flex1 = t; 2868 log.u_bbr.flex2 = len; 2869 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2870 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2871 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2872 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2873 log.u_bbr.flex7 = conf; 2874 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2875 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2876 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2877 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2878 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2879 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2880 if (rsm) { 2881 log.u_bbr.pkt_epoch = rsm->r_start; 2882 log.u_bbr.lost = rsm->r_end; 2883 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2884 /* We loose any upper of the 24 bits */ 2885 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2886 } else { 2887 /* Its a SYN */ 2888 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2889 log.u_bbr.lost = 0; 2890 log.u_bbr.cwnd_gain = 0; 2891 log.u_bbr.pacing_gain = 0; 2892 } 2893 /* Write out general bits of interest rrs here */ 2894 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2895 log.u_bbr.use_lt_bw <<= 1; 2896 log.u_bbr.use_lt_bw |= rack->forced_ack; 2897 log.u_bbr.use_lt_bw <<= 1; 2898 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2899 log.u_bbr.use_lt_bw <<= 1; 2900 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2901 log.u_bbr.use_lt_bw <<= 1; 2902 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2903 log.u_bbr.use_lt_bw <<= 1; 2904 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2905 log.u_bbr.use_lt_bw <<= 1; 2906 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2907 log.u_bbr.use_lt_bw <<= 1; 2908 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2909 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2910 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2911 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2912 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2913 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2914 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2915 log.u_bbr.bw_inuse <<= 32; 2916 if (rsm) 2917 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2918 TCP_LOG_EVENTP(tp, NULL, 2919 &rack->rc_inp->inp_socket->so_rcv, 2920 &rack->rc_inp->inp_socket->so_snd, 2921 BBR_LOG_BBRRTT, 0, 2922 0, &log, false, &tv); 2923 2924 2925 } 2926 } 2927 2928 static void 2929 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2930 { 2931 /* 2932 * Log the rtt sample we are 2933 * applying to the srtt algorithm in 2934 * useconds. 2935 */ 2936 if (tcp_bblogging_on(rack->rc_tp)) { 2937 union tcp_log_stackspecific log; 2938 struct timeval tv; 2939 2940 /* Convert our ms to a microsecond */ 2941 memset(&log, 0, sizeof(log)); 2942 log.u_bbr.flex1 = rtt; 2943 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2944 log.u_bbr.flex7 = 1; 2945 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2946 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2947 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2948 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2949 log.u_bbr.pacing_gain = rack->r_must_retran; 2950 /* 2951 * We capture in delRate the upper 32 bits as 2952 * the confidence level we had declared, and the 2953 * lower 32 bits as the actual RTT using the arrival 2954 * timestamp. 2955 */ 2956 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2957 log.u_bbr.delRate <<= 32; 2958 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2959 /* Lets capture all the things that make up t_rtxcur */ 2960 log.u_bbr.applimited = rack_rto_min; 2961 log.u_bbr.epoch = rack_rto_max; 2962 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2963 log.u_bbr.lost = rack_rto_min; 2964 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2965 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2966 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2967 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2968 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2969 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2970 &rack->rc_inp->inp_socket->so_rcv, 2971 &rack->rc_inp->inp_socket->so_snd, 2972 TCP_LOG_RTT, 0, 2973 0, &log, false, &tv); 2974 } 2975 } 2976 2977 static void 2978 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2979 { 2980 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2981 union tcp_log_stackspecific log; 2982 struct timeval tv; 2983 2984 /* Convert our ms to a microsecond */ 2985 memset(&log, 0, sizeof(log)); 2986 log.u_bbr.flex1 = rtt; 2987 log.u_bbr.flex2 = send_time; 2988 log.u_bbr.flex3 = ack_time; 2989 log.u_bbr.flex4 = where; 2990 log.u_bbr.flex7 = 2; 2991 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2992 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2993 log.u_bbr.bw_inuse <<= 32; 2994 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2995 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2996 &rack->rc_inp->inp_socket->so_rcv, 2997 &rack->rc_inp->inp_socket->so_snd, 2998 TCP_LOG_RTT, 0, 2999 0, &log, false, &tv); 3000 } 3001 } 3002 3003 3004 static void 3005 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 3006 { 3007 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3008 union tcp_log_stackspecific log; 3009 struct timeval tv; 3010 3011 /* Convert our ms to a microsecond */ 3012 memset(&log, 0, sizeof(log)); 3013 log.u_bbr.flex1 = idx; 3014 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 3015 log.u_bbr.flex3 = tsecho; 3016 log.u_bbr.flex7 = 3; 3017 log.u_bbr.rttProp = tsv; 3018 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3019 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3020 log.u_bbr.bw_inuse <<= 32; 3021 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3022 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3023 &rack->rc_inp->inp_socket->so_rcv, 3024 &rack->rc_inp->inp_socket->so_snd, 3025 TCP_LOG_RTT, 0, 3026 0, &log, false, &tv); 3027 } 3028 } 3029 3030 3031 static inline void 3032 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3033 { 3034 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3035 union tcp_log_stackspecific log; 3036 struct timeval tv; 3037 3038 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3039 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3040 log.u_bbr.flex1 = line; 3041 log.u_bbr.flex2 = tick; 3042 log.u_bbr.flex3 = tp->t_maxunacktime; 3043 log.u_bbr.flex4 = tp->t_acktime; 3044 log.u_bbr.flex8 = event; 3045 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3046 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3047 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3048 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3049 log.u_bbr.pacing_gain = rack->r_must_retran; 3050 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3051 log.u_bbr.bw_inuse <<= 32; 3052 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3053 TCP_LOG_EVENTP(tp, NULL, 3054 &rack->rc_inp->inp_socket->so_rcv, 3055 &rack->rc_inp->inp_socket->so_snd, 3056 BBR_LOG_PROGRESS, 0, 3057 0, &log, false, &tv); 3058 } 3059 } 3060 3061 static void 3062 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 3063 { 3064 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3065 union tcp_log_stackspecific log; 3066 3067 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3068 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3069 log.u_bbr.flex1 = slot; 3070 if (rack->rack_no_prr) 3071 log.u_bbr.flex2 = 0; 3072 else 3073 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3074 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3075 log.u_bbr.flex6 = line; 3076 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3077 log.u_bbr.flex8 = rack->rc_in_persist; 3078 log.u_bbr.timeStamp = cts; 3079 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3080 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3081 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3082 log.u_bbr.pacing_gain = rack->r_must_retran; 3083 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3084 &rack->rc_inp->inp_socket->so_rcv, 3085 &rack->rc_inp->inp_socket->so_snd, 3086 BBR_LOG_BBRSND, 0, 3087 0, &log, false, tv); 3088 } 3089 } 3090 3091 static void 3092 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3093 { 3094 if (tcp_bblogging_on(rack->rc_tp)) { 3095 union tcp_log_stackspecific log; 3096 struct timeval tv; 3097 3098 memset(&log, 0, sizeof(log)); 3099 log.u_bbr.flex1 = did_out; 3100 log.u_bbr.flex2 = nxt_pkt; 3101 log.u_bbr.flex3 = way_out; 3102 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3103 if (rack->rack_no_prr) 3104 log.u_bbr.flex5 = 0; 3105 else 3106 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3107 log.u_bbr.flex6 = nsegs; 3108 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3109 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3110 log.u_bbr.flex7 <<= 1; 3111 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3112 log.u_bbr.flex7 <<= 1; 3113 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3114 log.u_bbr.flex8 = rack->rc_in_persist; 3115 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3116 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3117 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3118 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3119 log.u_bbr.use_lt_bw <<= 1; 3120 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3121 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3122 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3123 log.u_bbr.pacing_gain = rack->r_must_retran; 3124 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3125 log.u_bbr.bw_inuse <<= 32; 3126 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3127 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3128 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3129 log.u_bbr.lost = rack->rc_tp->t_srtt; 3130 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3131 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3132 &rack->rc_inp->inp_socket->so_rcv, 3133 &rack->rc_inp->inp_socket->so_snd, 3134 BBR_LOG_DOSEG_DONE, 0, 3135 0, &log, false, &tv); 3136 } 3137 } 3138 3139 static void 3140 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3141 { 3142 if (tcp_bblogging_on(rack->rc_tp)) { 3143 union tcp_log_stackspecific log; 3144 struct timeval tv; 3145 3146 memset(&log, 0, sizeof(log)); 3147 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3148 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3149 log.u_bbr.flex4 = arg1; 3150 log.u_bbr.flex5 = arg2; 3151 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3152 log.u_bbr.flex6 = arg3; 3153 log.u_bbr.flex8 = frm; 3154 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3155 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3156 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3157 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3158 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3159 log.u_bbr.pacing_gain = rack->r_must_retran; 3160 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3161 &tptosocket(tp)->so_snd, 3162 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3163 } 3164 } 3165 3166 static void 3167 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3168 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3169 { 3170 if (tcp_bblogging_on(rack->rc_tp)) { 3171 union tcp_log_stackspecific log; 3172 struct timeval tv; 3173 3174 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3175 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3176 log.u_bbr.flex1 = slot; 3177 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3178 log.u_bbr.flex4 = reason; 3179 if (rack->rack_no_prr) 3180 log.u_bbr.flex5 = 0; 3181 else 3182 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3183 log.u_bbr.flex7 = hpts_calling; 3184 log.u_bbr.flex8 = rack->rc_in_persist; 3185 log.u_bbr.lt_epoch = cwnd_to_use; 3186 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3187 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3188 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3189 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3190 log.u_bbr.pacing_gain = rack->r_must_retran; 3191 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3192 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3193 log.u_bbr.bw_inuse <<= 32; 3194 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3195 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3196 &rack->rc_inp->inp_socket->so_rcv, 3197 &rack->rc_inp->inp_socket->so_snd, 3198 BBR_LOG_JUSTRET, 0, 3199 tlen, &log, false, &tv); 3200 } 3201 } 3202 3203 static void 3204 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3205 struct timeval *tv, uint32_t flags_on_entry) 3206 { 3207 if (tcp_bblogging_on(rack->rc_tp)) { 3208 union tcp_log_stackspecific log; 3209 3210 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3211 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3212 log.u_bbr.flex1 = line; 3213 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3214 log.u_bbr.flex3 = flags_on_entry; 3215 log.u_bbr.flex4 = us_cts; 3216 if (rack->rack_no_prr) 3217 log.u_bbr.flex5 = 0; 3218 else 3219 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3220 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3221 log.u_bbr.flex7 = hpts_removed; 3222 log.u_bbr.flex8 = 1; 3223 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3224 log.u_bbr.timeStamp = us_cts; 3225 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3226 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3227 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3228 log.u_bbr.pacing_gain = rack->r_must_retran; 3229 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3230 log.u_bbr.bw_inuse <<= 32; 3231 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3232 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3233 &rack->rc_inp->inp_socket->so_rcv, 3234 &rack->rc_inp->inp_socket->so_snd, 3235 BBR_LOG_TIMERCANC, 0, 3236 0, &log, false, tv); 3237 } 3238 } 3239 3240 static void 3241 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3242 uint32_t flex1, uint32_t flex2, 3243 uint32_t flex3, uint32_t flex4, 3244 uint32_t flex5, uint32_t flex6, 3245 uint16_t flex7, uint8_t mod) 3246 { 3247 if (tcp_bblogging_on(rack->rc_tp)) { 3248 union tcp_log_stackspecific log; 3249 struct timeval tv; 3250 3251 if (mod == 1) { 3252 /* No you can't use 1, its for the real to cancel */ 3253 return; 3254 } 3255 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3256 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3257 log.u_bbr.flex1 = flex1; 3258 log.u_bbr.flex2 = flex2; 3259 log.u_bbr.flex3 = flex3; 3260 log.u_bbr.flex4 = flex4; 3261 log.u_bbr.flex5 = flex5; 3262 log.u_bbr.flex6 = flex6; 3263 log.u_bbr.flex7 = flex7; 3264 log.u_bbr.flex8 = mod; 3265 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3266 &rack->rc_inp->inp_socket->so_rcv, 3267 &rack->rc_inp->inp_socket->so_snd, 3268 BBR_LOG_TIMERCANC, 0, 3269 0, &log, false, &tv); 3270 } 3271 } 3272 3273 static void 3274 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3275 { 3276 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3277 union tcp_log_stackspecific log; 3278 struct timeval tv; 3279 3280 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3281 log.u_bbr.flex1 = timers; 3282 log.u_bbr.flex2 = ret; 3283 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3284 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3285 log.u_bbr.flex5 = cts; 3286 if (rack->rack_no_prr) 3287 log.u_bbr.flex6 = 0; 3288 else 3289 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3290 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3291 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3292 log.u_bbr.pacing_gain = rack->r_must_retran; 3293 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3294 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3295 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3296 &rack->rc_inp->inp_socket->so_rcv, 3297 &rack->rc_inp->inp_socket->so_snd, 3298 BBR_LOG_TO_PROCESS, 0, 3299 0, &log, false, &tv); 3300 } 3301 } 3302 3303 static void 3304 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3305 { 3306 if (tcp_bblogging_on(rack->rc_tp)) { 3307 union tcp_log_stackspecific log; 3308 struct timeval tv; 3309 3310 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3311 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3312 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3313 if (rack->rack_no_prr) 3314 log.u_bbr.flex3 = 0; 3315 else 3316 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3317 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3318 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3319 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3320 log.u_bbr.flex7 = line; 3321 log.u_bbr.flex8 = frm; 3322 log.u_bbr.pkts_out = orig_cwnd; 3323 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3324 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3325 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3326 log.u_bbr.use_lt_bw <<= 1; 3327 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3328 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3329 &rack->rc_inp->inp_socket->so_rcv, 3330 &rack->rc_inp->inp_socket->so_snd, 3331 BBR_LOG_BBRUPD, 0, 3332 0, &log, false, &tv); 3333 } 3334 } 3335 3336 static void 3337 rack_counter_destroy(void) 3338 { 3339 counter_u64_free(rack_total_bytes); 3340 counter_u64_free(rack_fto_send); 3341 counter_u64_free(rack_fto_rsm_send); 3342 counter_u64_free(rack_nfto_resend); 3343 counter_u64_free(rack_hw_pace_init_fail); 3344 counter_u64_free(rack_hw_pace_lost); 3345 counter_u64_free(rack_non_fto_send); 3346 counter_u64_free(rack_extended_rfo); 3347 counter_u64_free(rack_ack_total); 3348 counter_u64_free(rack_express_sack); 3349 counter_u64_free(rack_sack_total); 3350 counter_u64_free(rack_move_none); 3351 counter_u64_free(rack_move_some); 3352 counter_u64_free(rack_sack_attacks_detected); 3353 counter_u64_free(rack_sack_attacks_reversed); 3354 counter_u64_free(rack_sack_attacks_suspect); 3355 counter_u64_free(rack_sack_used_next_merge); 3356 counter_u64_free(rack_sack_used_prev_merge); 3357 counter_u64_free(rack_tlp_tot); 3358 counter_u64_free(rack_tlp_newdata); 3359 counter_u64_free(rack_tlp_retran); 3360 counter_u64_free(rack_tlp_retran_bytes); 3361 counter_u64_free(rack_to_tot); 3362 counter_u64_free(rack_saw_enobuf); 3363 counter_u64_free(rack_saw_enobuf_hw); 3364 counter_u64_free(rack_saw_enetunreach); 3365 counter_u64_free(rack_hot_alloc); 3366 counter_u64_free(rack_to_alloc); 3367 counter_u64_free(rack_to_alloc_hard); 3368 counter_u64_free(rack_to_alloc_emerg); 3369 counter_u64_free(rack_to_alloc_limited); 3370 counter_u64_free(rack_alloc_limited_conns); 3371 counter_u64_free(rack_split_limited); 3372 counter_u64_free(rack_multi_single_eq); 3373 counter_u64_free(rack_rxt_clamps_cwnd); 3374 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3375 counter_u64_free(rack_proc_non_comp_ack); 3376 counter_u64_free(rack_sack_proc_all); 3377 counter_u64_free(rack_sack_proc_restart); 3378 counter_u64_free(rack_sack_proc_short); 3379 counter_u64_free(rack_sack_skipped_acked); 3380 counter_u64_free(rack_sack_splits); 3381 counter_u64_free(rack_input_idle_reduces); 3382 counter_u64_free(rack_collapsed_win); 3383 counter_u64_free(rack_collapsed_win_rxt); 3384 counter_u64_free(rack_collapsed_win_rxt_bytes); 3385 counter_u64_free(rack_collapsed_win_seen); 3386 counter_u64_free(rack_try_scwnd); 3387 counter_u64_free(rack_persists_sends); 3388 counter_u64_free(rack_persists_acks); 3389 counter_u64_free(rack_persists_loss); 3390 counter_u64_free(rack_persists_lost_ends); 3391 #ifdef INVARIANTS 3392 counter_u64_free(rack_adjust_map_bw); 3393 #endif 3394 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3395 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3396 } 3397 3398 static struct rack_sendmap * 3399 rack_alloc(struct tcp_rack *rack) 3400 { 3401 struct rack_sendmap *rsm; 3402 3403 /* 3404 * First get the top of the list it in 3405 * theory is the "hottest" rsm we have, 3406 * possibly just freed by ack processing. 3407 */ 3408 if (rack->rc_free_cnt > rack_free_cache) { 3409 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3410 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3411 counter_u64_add(rack_hot_alloc, 1); 3412 rack->rc_free_cnt--; 3413 return (rsm); 3414 } 3415 /* 3416 * Once we get under our free cache we probably 3417 * no longer have a "hot" one available. Lets 3418 * get one from UMA. 3419 */ 3420 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3421 if (rsm) { 3422 rack->r_ctl.rc_num_maps_alloced++; 3423 counter_u64_add(rack_to_alloc, 1); 3424 return (rsm); 3425 } 3426 /* 3427 * Dig in to our aux rsm's (the last two) since 3428 * UMA failed to get us one. 3429 */ 3430 if (rack->rc_free_cnt) { 3431 counter_u64_add(rack_to_alloc_emerg, 1); 3432 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3433 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3434 rack->rc_free_cnt--; 3435 return (rsm); 3436 } 3437 return (NULL); 3438 } 3439 3440 static struct rack_sendmap * 3441 rack_alloc_full_limit(struct tcp_rack *rack) 3442 { 3443 if ((V_tcp_map_entries_limit > 0) && 3444 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3445 counter_u64_add(rack_to_alloc_limited, 1); 3446 if (!rack->alloc_limit_reported) { 3447 rack->alloc_limit_reported = 1; 3448 counter_u64_add(rack_alloc_limited_conns, 1); 3449 } 3450 return (NULL); 3451 } 3452 return (rack_alloc(rack)); 3453 } 3454 3455 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3456 static struct rack_sendmap * 3457 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3458 { 3459 struct rack_sendmap *rsm; 3460 3461 if (limit_type) { 3462 /* currently there is only one limit type */ 3463 if (rack->r_ctl.rc_split_limit > 0 && 3464 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3465 counter_u64_add(rack_split_limited, 1); 3466 if (!rack->alloc_limit_reported) { 3467 rack->alloc_limit_reported = 1; 3468 counter_u64_add(rack_alloc_limited_conns, 1); 3469 } 3470 return (NULL); 3471 } 3472 } 3473 3474 /* allocate and mark in the limit type, if set */ 3475 rsm = rack_alloc(rack); 3476 if (rsm != NULL && limit_type) { 3477 rsm->r_limit_type = limit_type; 3478 rack->r_ctl.rc_num_split_allocs++; 3479 } 3480 return (rsm); 3481 } 3482 3483 static void 3484 rack_free_trim(struct tcp_rack *rack) 3485 { 3486 struct rack_sendmap *rsm; 3487 3488 /* 3489 * Free up all the tail entries until 3490 * we get our list down to the limit. 3491 */ 3492 while (rack->rc_free_cnt > rack_free_cache) { 3493 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3494 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3495 rack->rc_free_cnt--; 3496 rack->r_ctl.rc_num_maps_alloced--; 3497 uma_zfree(rack_zone, rsm); 3498 } 3499 } 3500 3501 static void 3502 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3503 { 3504 if (rsm->r_flags & RACK_APP_LIMITED) { 3505 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3506 rack->r_ctl.rc_app_limited_cnt--; 3507 } 3508 } 3509 if (rsm->r_limit_type) { 3510 /* currently there is only one limit type */ 3511 rack->r_ctl.rc_num_split_allocs--; 3512 } 3513 if (rsm == rack->r_ctl.rc_first_appl) { 3514 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); 3515 rack->r_ctl.cleared_app_ack = 1; 3516 if (rack->r_ctl.rc_app_limited_cnt == 0) 3517 rack->r_ctl.rc_first_appl = NULL; 3518 else 3519 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3520 } 3521 if (rsm == rack->r_ctl.rc_resend) 3522 rack->r_ctl.rc_resend = NULL; 3523 if (rsm == rack->r_ctl.rc_end_appl) 3524 rack->r_ctl.rc_end_appl = NULL; 3525 if (rack->r_ctl.rc_tlpsend == rsm) 3526 rack->r_ctl.rc_tlpsend = NULL; 3527 if (rack->r_ctl.rc_sacklast == rsm) 3528 rack->r_ctl.rc_sacklast = NULL; 3529 memset(rsm, 0, sizeof(struct rack_sendmap)); 3530 /* Make sure we are not going to overrun our count limit of 0xff */ 3531 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3532 rack_free_trim(rack); 3533 } 3534 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3535 rack->rc_free_cnt++; 3536 } 3537 3538 static uint32_t 3539 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3540 { 3541 uint64_t srtt, bw, len, tim; 3542 uint32_t segsiz, def_len, minl; 3543 3544 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3545 def_len = rack_def_data_window * segsiz; 3546 if (rack->rc_gp_filled == 0) { 3547 /* 3548 * We have no measurement (IW is in flight?) so 3549 * we can only guess using our data_window sysctl 3550 * value (usually 20MSS). 3551 */ 3552 return (def_len); 3553 } 3554 /* 3555 * Now we have a number of factors to consider. 3556 * 3557 * 1) We have a desired BDP which is usually 3558 * at least 2. 3559 * 2) We have a minimum number of rtt's usually 1 SRTT 3560 * but we allow it too to be more. 3561 * 3) We want to make sure a measurement last N useconds (if 3562 * we have set rack_min_measure_usec. 3563 * 3564 * We handle the first concern here by trying to create a data 3565 * window of max(rack_def_data_window, DesiredBDP). The 3566 * second concern we handle in not letting the measurement 3567 * window end normally until at least the required SRTT's 3568 * have gone by which is done further below in 3569 * rack_enough_for_measurement(). Finally the third concern 3570 * we also handle here by calculating how long that time 3571 * would take at the current BW and then return the 3572 * max of our first calculation and that length. Note 3573 * that if rack_min_measure_usec is 0, we don't deal 3574 * with concern 3. Also for both Concern 1 and 3 an 3575 * application limited period could end the measurement 3576 * earlier. 3577 * 3578 * So lets calculate the BDP with the "known" b/w using 3579 * the SRTT has our rtt and then multiply it by the 3580 * goal. 3581 */ 3582 bw = rack_get_bw(rack); 3583 srtt = (uint64_t)tp->t_srtt; 3584 len = bw * srtt; 3585 len /= (uint64_t)HPTS_USEC_IN_SEC; 3586 len *= max(1, rack_goal_bdp); 3587 /* Now we need to round up to the nearest MSS */ 3588 len = roundup(len, segsiz); 3589 if (rack_min_measure_usec) { 3590 /* Now calculate our min length for this b/w */ 3591 tim = rack_min_measure_usec; 3592 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3593 if (minl == 0) 3594 minl = 1; 3595 minl = roundup(minl, segsiz); 3596 if (len < minl) 3597 len = minl; 3598 } 3599 /* 3600 * Now if we have a very small window we want 3601 * to attempt to get the window that is 3602 * as small as possible. This happens on 3603 * low b/w connections and we don't want to 3604 * span huge numbers of rtt's between measurements. 3605 * 3606 * We basically include 2 over our "MIN window" so 3607 * that the measurement can be shortened (possibly) by 3608 * an ack'ed packet. 3609 */ 3610 if (len < def_len) 3611 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3612 else 3613 return (max((uint32_t)len, def_len)); 3614 3615 } 3616 3617 static int 3618 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3619 { 3620 uint32_t tim, srtts, segsiz; 3621 3622 /* 3623 * Has enough time passed for the GP measurement to be valid? 3624 */ 3625 if (SEQ_LT(th_ack, tp->gput_seq)) { 3626 /* Not enough bytes yet */ 3627 return (0); 3628 } 3629 if ((tp->snd_max == tp->snd_una) || 3630 (th_ack == tp->snd_max)){ 3631 /* 3632 * All is acked quality of all acked is 3633 * usually low or medium, but we in theory could split 3634 * all acked into two cases, where you got 3635 * a signifigant amount of your window and 3636 * where you did not. For now we leave it 3637 * but it is something to contemplate in the 3638 * future. The danger here is that delayed ack 3639 * is effecting the last byte (which is a 50:50 chance). 3640 */ 3641 *quality = RACK_QUALITY_ALLACKED; 3642 return (1); 3643 } 3644 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3645 /* 3646 * We obtained our entire window of data we wanted 3647 * no matter if we are in recovery or not then 3648 * its ok since expanding the window does not 3649 * make things fuzzy (or at least not as much). 3650 */ 3651 *quality = RACK_QUALITY_HIGH; 3652 return (1); 3653 } 3654 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3655 if (SEQ_LT(th_ack, tp->gput_ack) && 3656 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3657 /* Not enough bytes yet */ 3658 return (0); 3659 } 3660 if (rack->r_ctl.rc_first_appl && 3661 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3662 /* 3663 * We are up to the app limited send point 3664 * we have to measure irrespective of the time.. 3665 */ 3666 *quality = RACK_QUALITY_APPLIMITED; 3667 return (1); 3668 } 3669 /* Now what about time? */ 3670 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3671 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3672 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3673 /* 3674 * We do not allow a measurement if we are in recovery 3675 * that would shrink the goodput window we wanted. 3676 * This is to prevent cloudyness of when the last send 3677 * was actually made. 3678 */ 3679 *quality = RACK_QUALITY_HIGH; 3680 return (1); 3681 } 3682 /* Nope not even a full SRTT has passed */ 3683 return (0); 3684 } 3685 3686 static void 3687 rack_log_timely(struct tcp_rack *rack, 3688 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3689 uint64_t up_bnd, int line, uint8_t method) 3690 { 3691 if (tcp_bblogging_on(rack->rc_tp)) { 3692 union tcp_log_stackspecific log; 3693 struct timeval tv; 3694 3695 memset(&log, 0, sizeof(log)); 3696 log.u_bbr.flex1 = logged; 3697 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3698 log.u_bbr.flex2 <<= 4; 3699 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3700 log.u_bbr.flex2 <<= 4; 3701 log.u_bbr.flex2 |= rack->rc_gp_incr; 3702 log.u_bbr.flex2 <<= 4; 3703 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3704 log.u_bbr.flex3 = rack->rc_gp_incr; 3705 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3706 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3707 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3708 log.u_bbr.flex7 = rack->rc_gp_bwred; 3709 log.u_bbr.flex8 = method; 3710 log.u_bbr.cur_del_rate = cur_bw; 3711 log.u_bbr.delRate = low_bnd; 3712 log.u_bbr.bw_inuse = up_bnd; 3713 log.u_bbr.rttProp = rack_get_bw(rack); 3714 log.u_bbr.pkt_epoch = line; 3715 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3716 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3717 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3718 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3719 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3720 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3721 log.u_bbr.cwnd_gain <<= 1; 3722 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3723 log.u_bbr.cwnd_gain <<= 1; 3724 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3725 log.u_bbr.cwnd_gain <<= 1; 3726 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3727 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3728 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3729 &rack->rc_inp->inp_socket->so_rcv, 3730 &rack->rc_inp->inp_socket->so_snd, 3731 TCP_TIMELY_WORK, 0, 3732 0, &log, false, &tv); 3733 } 3734 } 3735 3736 static int 3737 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3738 { 3739 /* 3740 * Before we increase we need to know if 3741 * the estimate just made was less than 3742 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3743 * 3744 * If we already are pacing at a fast enough 3745 * rate to push us faster there is no sense of 3746 * increasing. 3747 * 3748 * We first caculate our actual pacing rate (ss or ca multiplier 3749 * times our cur_bw). 3750 * 3751 * Then we take the last measured rate and multipy by our 3752 * maximum pacing overage to give us a max allowable rate. 3753 * 3754 * If our act_rate is smaller than our max_allowable rate 3755 * then we should increase. Else we should hold steady. 3756 * 3757 */ 3758 uint64_t act_rate, max_allow_rate; 3759 3760 if (rack_timely_no_stopping) 3761 return (1); 3762 3763 if ((cur_bw == 0) || (last_bw_est == 0)) { 3764 /* 3765 * Initial startup case or 3766 * everything is acked case. 3767 */ 3768 rack_log_timely(rack, mult, cur_bw, 0, 0, 3769 __LINE__, 9); 3770 return (1); 3771 } 3772 if (mult <= 100) { 3773 /* 3774 * We can always pace at or slightly above our rate. 3775 */ 3776 rack_log_timely(rack, mult, cur_bw, 0, 0, 3777 __LINE__, 9); 3778 return (1); 3779 } 3780 act_rate = cur_bw * (uint64_t)mult; 3781 act_rate /= 100; 3782 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3783 max_allow_rate /= 100; 3784 if (act_rate < max_allow_rate) { 3785 /* 3786 * Here the rate we are actually pacing at 3787 * is smaller than 10% above our last measurement. 3788 * This means we are pacing below what we would 3789 * like to try to achieve (plus some wiggle room). 3790 */ 3791 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3792 __LINE__, 9); 3793 return (1); 3794 } else { 3795 /* 3796 * Here we are already pacing at least rack_max_per_above(10%) 3797 * what we are getting back. This indicates most likely 3798 * that we are being limited (cwnd/rwnd/app) and can't 3799 * get any more b/w. There is no sense of trying to 3800 * raise up the pacing rate its not speeding us up 3801 * and we already are pacing faster than we are getting. 3802 */ 3803 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3804 __LINE__, 8); 3805 return (0); 3806 } 3807 } 3808 3809 static void 3810 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3811 { 3812 /* 3813 * When we drag bottom, we want to assure 3814 * that no multiplier is below 1.0, if so 3815 * we want to restore it to at least that. 3816 */ 3817 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3818 /* This is unlikely we usually do not touch recovery */ 3819 rack->r_ctl.rack_per_of_gp_rec = 100; 3820 } 3821 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3822 rack->r_ctl.rack_per_of_gp_ca = 100; 3823 } 3824 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3825 rack->r_ctl.rack_per_of_gp_ss = 100; 3826 } 3827 } 3828 3829 static void 3830 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3831 { 3832 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3833 rack->r_ctl.rack_per_of_gp_ca = 100; 3834 } 3835 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3836 rack->r_ctl.rack_per_of_gp_ss = 100; 3837 } 3838 } 3839 3840 static void 3841 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3842 { 3843 int32_t calc, logged, plus; 3844 3845 logged = 0; 3846 3847 if (rack->rc_skip_timely) 3848 return; 3849 if (override) { 3850 /* 3851 * override is passed when we are 3852 * loosing b/w and making one last 3853 * gasp at trying to not loose out 3854 * to a new-reno flow. 3855 */ 3856 goto extra_boost; 3857 } 3858 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3859 if (rack->rc_gp_incr && 3860 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3861 /* 3862 * Reset and get 5 strokes more before the boost. Note 3863 * that the count is 0 based so we have to add one. 3864 */ 3865 extra_boost: 3866 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3867 rack->rc_gp_timely_inc_cnt = 0; 3868 } else 3869 plus = (uint32_t)rack_gp_increase_per; 3870 /* Must be at least 1% increase for true timely increases */ 3871 if ((plus < 1) && 3872 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3873 plus = 1; 3874 if (rack->rc_gp_saw_rec && 3875 (rack->rc_gp_no_rec_chg == 0) && 3876 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3877 rack->r_ctl.rack_per_of_gp_rec)) { 3878 /* We have been in recovery ding it too */ 3879 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3880 if (calc > 0xffff) 3881 calc = 0xffff; 3882 logged |= 1; 3883 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3884 if (rack->r_ctl.rack_per_upper_bound_ca && 3885 (rack->rc_dragged_bottom == 0) && 3886 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3887 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3888 } 3889 if (rack->rc_gp_saw_ca && 3890 (rack->rc_gp_saw_ss == 0) && 3891 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3892 rack->r_ctl.rack_per_of_gp_ca)) { 3893 /* In CA */ 3894 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3895 if (calc > 0xffff) 3896 calc = 0xffff; 3897 logged |= 2; 3898 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3899 if (rack->r_ctl.rack_per_upper_bound_ca && 3900 (rack->rc_dragged_bottom == 0) && 3901 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3902 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3903 } 3904 if (rack->rc_gp_saw_ss && 3905 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3906 rack->r_ctl.rack_per_of_gp_ss)) { 3907 /* In SS */ 3908 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3909 if (calc > 0xffff) 3910 calc = 0xffff; 3911 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3912 if (rack->r_ctl.rack_per_upper_bound_ss && 3913 (rack->rc_dragged_bottom == 0) && 3914 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3915 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3916 logged |= 4; 3917 } 3918 if (logged && 3919 (rack->rc_gp_incr == 0)){ 3920 /* Go into increment mode */ 3921 rack->rc_gp_incr = 1; 3922 rack->rc_gp_timely_inc_cnt = 0; 3923 } 3924 if (rack->rc_gp_incr && 3925 logged && 3926 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3927 rack->rc_gp_timely_inc_cnt++; 3928 } 3929 rack_log_timely(rack, logged, plus, 0, 0, 3930 __LINE__, 1); 3931 } 3932 3933 static uint32_t 3934 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3935 { 3936 /*- 3937 * norm_grad = rtt_diff / minrtt; 3938 * new_per = curper * (1 - B * norm_grad) 3939 * 3940 * B = rack_gp_decrease_per (default 80%) 3941 * rtt_dif = input var current rtt-diff 3942 * curper = input var current percentage 3943 * minrtt = from rack filter 3944 * 3945 * In order to do the floating point calculations above we 3946 * do an integer conversion. The code looks confusing so let me 3947 * translate it into something that use more variables and 3948 * is clearer for us humans :) 3949 * 3950 * uint64_t norm_grad, inverse, reduce_by, final_result; 3951 * uint32_t perf; 3952 * 3953 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 3954 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 3955 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 3956 * inverse /= 1000000; 3957 * reduce_by = (1000000 - inverse); 3958 * final_result = (cur_per * reduce_by) / 1000000; 3959 * perf = (uint32_t)final_result; 3960 */ 3961 uint64_t perf; 3962 3963 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3964 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3965 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3966 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3967 (uint64_t)1000000)) / 3968 (uint64_t)1000000); 3969 if (perf > curper) { 3970 /* TSNH */ 3971 perf = curper - 1; 3972 } 3973 return ((uint32_t)perf); 3974 } 3975 3976 static uint32_t 3977 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3978 { 3979 /* 3980 * highrttthresh 3981 * result = curper * (1 - (B * ( 1 - ------ )) 3982 * gp_srtt 3983 * 3984 * B = rack_gp_decrease_per (default .8 i.e. 80) 3985 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3986 */ 3987 uint64_t perf; 3988 uint32_t highrttthresh; 3989 3990 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3991 3992 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3993 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3994 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3995 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3996 if (tcp_bblogging_on(rack->rc_tp)) { 3997 uint64_t log1; 3998 3999 log1 = rtt; 4000 log1 <<= 32; 4001 log1 |= highrttthresh; 4002 rack_log_timely(rack, 4003 rack_gp_decrease_per, 4004 (uint64_t)curper, 4005 log1, 4006 perf, 4007 __LINE__, 4008 15); 4009 } 4010 return (perf); 4011 } 4012 4013 static void 4014 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 4015 { 4016 uint64_t logvar, logvar2, logvar3; 4017 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 4018 4019 if (rack->rc_skip_timely) 4020 return; 4021 if (rack->rc_gp_incr) { 4022 /* Turn off increment counting */ 4023 rack->rc_gp_incr = 0; 4024 rack->rc_gp_timely_inc_cnt = 0; 4025 } 4026 ss_red = ca_red = rec_red = 0; 4027 logged = 0; 4028 /* Calculate the reduction value */ 4029 if (rtt_diff < 0) { 4030 rtt_diff *= -1; 4031 } 4032 /* Must be at least 1% reduction */ 4033 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4034 /* We have been in recovery ding it too */ 4035 if (timely_says == 2) { 4036 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4037 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4038 if (alt < new_per) 4039 val = alt; 4040 else 4041 val = new_per; 4042 } else 4043 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4044 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4045 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4046 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4047 } else { 4048 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4049 rec_red = 0; 4050 } 4051 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4052 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4053 logged |= 1; 4054 } 4055 if (rack->rc_gp_saw_ss) { 4056 /* Sent in SS */ 4057 if (timely_says == 2) { 4058 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4059 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4060 if (alt < new_per) 4061 val = alt; 4062 else 4063 val = new_per; 4064 } else 4065 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4066 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4067 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4068 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4069 } else { 4070 ss_red = new_per; 4071 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4072 logvar = new_per; 4073 logvar <<= 32; 4074 logvar |= alt; 4075 logvar2 = (uint32_t)rtt; 4076 logvar2 <<= 32; 4077 logvar2 |= (uint32_t)rtt_diff; 4078 logvar3 = rack_gp_rtt_maxmul; 4079 logvar3 <<= 32; 4080 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4081 rack_log_timely(rack, timely_says, 4082 logvar2, logvar3, 4083 logvar, __LINE__, 10); 4084 } 4085 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4086 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4087 logged |= 4; 4088 } else if (rack->rc_gp_saw_ca) { 4089 /* Sent in CA */ 4090 if (timely_says == 2) { 4091 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4092 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4093 if (alt < new_per) 4094 val = alt; 4095 else 4096 val = new_per; 4097 } else 4098 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4099 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4100 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4101 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4102 } else { 4103 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4104 ca_red = 0; 4105 logvar = new_per; 4106 logvar <<= 32; 4107 logvar |= alt; 4108 logvar2 = (uint32_t)rtt; 4109 logvar2 <<= 32; 4110 logvar2 |= (uint32_t)rtt_diff; 4111 logvar3 = rack_gp_rtt_maxmul; 4112 logvar3 <<= 32; 4113 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4114 rack_log_timely(rack, timely_says, 4115 logvar2, logvar3, 4116 logvar, __LINE__, 10); 4117 } 4118 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4119 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4120 logged |= 2; 4121 } 4122 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4123 rack->rc_gp_timely_dec_cnt++; 4124 if (rack_timely_dec_clear && 4125 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4126 rack->rc_gp_timely_dec_cnt = 0; 4127 } 4128 logvar = ss_red; 4129 logvar <<= 32; 4130 logvar |= ca_red; 4131 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4132 __LINE__, 2); 4133 } 4134 4135 static void 4136 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4137 uint32_t rtt, uint32_t line, uint8_t reas) 4138 { 4139 if (tcp_bblogging_on(rack->rc_tp)) { 4140 union tcp_log_stackspecific log; 4141 struct timeval tv; 4142 4143 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4144 log.u_bbr.flex1 = line; 4145 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4146 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4147 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4148 log.u_bbr.flex5 = rtt; 4149 log.u_bbr.flex6 = rack->rc_highly_buffered; 4150 log.u_bbr.flex6 <<= 1; 4151 log.u_bbr.flex6 |= rack->forced_ack; 4152 log.u_bbr.flex6 <<= 1; 4153 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4154 log.u_bbr.flex6 <<= 1; 4155 log.u_bbr.flex6 |= rack->in_probe_rtt; 4156 log.u_bbr.flex6 <<= 1; 4157 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4158 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4159 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4160 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4161 log.u_bbr.flex8 = reas; 4162 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4163 log.u_bbr.delRate = rack_get_bw(rack); 4164 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4165 log.u_bbr.cur_del_rate <<= 32; 4166 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4167 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4168 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4169 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4170 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4171 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4172 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4173 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4174 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4175 log.u_bbr.rttProp = us_cts; 4176 log.u_bbr.rttProp <<= 32; 4177 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4178 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4179 &rack->rc_inp->inp_socket->so_rcv, 4180 &rack->rc_inp->inp_socket->so_snd, 4181 BBR_LOG_RTT_SHRINKS, 0, 4182 0, &log, false, &rack->r_ctl.act_rcv_time); 4183 } 4184 } 4185 4186 static void 4187 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4188 { 4189 uint64_t bwdp; 4190 4191 bwdp = rack_get_bw(rack); 4192 bwdp *= (uint64_t)rtt; 4193 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4194 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4195 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4196 /* 4197 * A window protocol must be able to have 4 packets 4198 * outstanding as the floor in order to function 4199 * (especially considering delayed ack :D). 4200 */ 4201 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4202 } 4203 } 4204 4205 static void 4206 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4207 { 4208 /** 4209 * ProbeRTT is a bit different in rack_pacing than in 4210 * BBR. It is like BBR in that it uses the lowering of 4211 * the RTT as a signal that we saw something new and 4212 * counts from there for how long between. But it is 4213 * different in that its quite simple. It does not 4214 * play with the cwnd and wait until we get down 4215 * to N segments outstanding and hold that for 4216 * 200ms. Instead it just sets the pacing reduction 4217 * rate to a set percentage (70 by default) and hold 4218 * that for a number of recent GP Srtt's. 4219 */ 4220 uint32_t segsiz; 4221 4222 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4223 if (rack->rc_gp_dyn_mul == 0) 4224 return; 4225 4226 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4227 /* We are idle */ 4228 return; 4229 } 4230 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4231 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4232 /* 4233 * Stop the goodput now, the idea here is 4234 * that future measurements with in_probe_rtt 4235 * won't register if they are not greater so 4236 * we want to get what info (if any) is available 4237 * now. 4238 */ 4239 rack_do_goodput_measurement(rack->rc_tp, rack, 4240 rack->rc_tp->snd_una, __LINE__, 4241 RACK_QUALITY_PROBERTT); 4242 } 4243 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4244 rack->r_ctl.rc_time_probertt_entered = us_cts; 4245 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4246 rack->r_ctl.rc_pace_min_segs); 4247 rack->in_probe_rtt = 1; 4248 rack->measure_saw_probe_rtt = 1; 4249 rack->r_ctl.rc_time_probertt_starts = 0; 4250 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4251 if (rack_probertt_use_min_rtt_entry) 4252 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4253 else 4254 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4255 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4256 __LINE__, RACK_RTTS_ENTERPROBE); 4257 } 4258 4259 static void 4260 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4261 { 4262 struct rack_sendmap *rsm; 4263 uint32_t segsiz; 4264 4265 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4266 rack->r_ctl.rc_pace_min_segs); 4267 rack->in_probe_rtt = 0; 4268 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4269 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4270 /* 4271 * Stop the goodput now, the idea here is 4272 * that future measurements with in_probe_rtt 4273 * won't register if they are not greater so 4274 * we want to get what info (if any) is available 4275 * now. 4276 */ 4277 rack_do_goodput_measurement(rack->rc_tp, rack, 4278 rack->rc_tp->snd_una, __LINE__, 4279 RACK_QUALITY_PROBERTT); 4280 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4281 /* 4282 * We don't have enough data to make a measurement. 4283 * So lets just stop and start here after exiting 4284 * probe-rtt. We probably are not interested in 4285 * the results anyway. 4286 */ 4287 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4288 } 4289 /* 4290 * Measurements through the current snd_max are going 4291 * to be limited by the slower pacing rate. 4292 * 4293 * We need to mark these as app-limited so we 4294 * don't collapse the b/w. 4295 */ 4296 rsm = tqhash_max(rack->r_ctl.tqh); 4297 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4298 if (rack->r_ctl.rc_app_limited_cnt == 0) 4299 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4300 else { 4301 /* 4302 * Go out to the end app limited and mark 4303 * this new one as next and move the end_appl up 4304 * to this guy. 4305 */ 4306 if (rack->r_ctl.rc_end_appl) 4307 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4308 rack->r_ctl.rc_end_appl = rsm; 4309 } 4310 rsm->r_flags |= RACK_APP_LIMITED; 4311 rack->r_ctl.rc_app_limited_cnt++; 4312 } 4313 /* 4314 * Now, we need to examine our pacing rate multipliers. 4315 * If its under 100%, we need to kick it back up to 4316 * 100%. We also don't let it be over our "max" above 4317 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4318 * Note setting clamp_atexit_prtt to 0 has the effect 4319 * of setting CA/SS to 100% always at exit (which is 4320 * the default behavior). 4321 */ 4322 if (rack_probertt_clear_is) { 4323 rack->rc_gp_incr = 0; 4324 rack->rc_gp_bwred = 0; 4325 rack->rc_gp_timely_inc_cnt = 0; 4326 rack->rc_gp_timely_dec_cnt = 0; 4327 } 4328 /* Do we do any clamping at exit? */ 4329 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4330 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4331 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4332 } 4333 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4334 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4335 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4336 } 4337 /* 4338 * Lets set rtt_diff to 0, so that we will get a "boost" 4339 * after exiting. 4340 */ 4341 rack->r_ctl.rc_rtt_diff = 0; 4342 4343 /* Clear all flags so we start fresh */ 4344 rack->rc_tp->t_bytes_acked = 0; 4345 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4346 /* 4347 * If configured to, set the cwnd and ssthresh to 4348 * our targets. 4349 */ 4350 if (rack_probe_rtt_sets_cwnd) { 4351 uint64_t ebdp; 4352 uint32_t setto; 4353 4354 /* Set ssthresh so we get into CA once we hit our target */ 4355 if (rack_probertt_use_min_rtt_exit == 1) { 4356 /* Set to min rtt */ 4357 rack_set_prtt_target(rack, segsiz, 4358 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4359 } else if (rack_probertt_use_min_rtt_exit == 2) { 4360 /* Set to current gp rtt */ 4361 rack_set_prtt_target(rack, segsiz, 4362 rack->r_ctl.rc_gp_srtt); 4363 } else if (rack_probertt_use_min_rtt_exit == 3) { 4364 /* Set to entry gp rtt */ 4365 rack_set_prtt_target(rack, segsiz, 4366 rack->r_ctl.rc_entry_gp_rtt); 4367 } else { 4368 uint64_t sum; 4369 uint32_t setval; 4370 4371 sum = rack->r_ctl.rc_entry_gp_rtt; 4372 sum *= 10; 4373 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4374 if (sum >= 20) { 4375 /* 4376 * A highly buffered path needs 4377 * cwnd space for timely to work. 4378 * Lets set things up as if 4379 * we are heading back here again. 4380 */ 4381 setval = rack->r_ctl.rc_entry_gp_rtt; 4382 } else if (sum >= 15) { 4383 /* 4384 * Lets take the smaller of the 4385 * two since we are just somewhat 4386 * buffered. 4387 */ 4388 setval = rack->r_ctl.rc_gp_srtt; 4389 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4390 setval = rack->r_ctl.rc_entry_gp_rtt; 4391 } else { 4392 /* 4393 * Here we are not highly buffered 4394 * and should pick the min we can to 4395 * keep from causing loss. 4396 */ 4397 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4398 } 4399 rack_set_prtt_target(rack, segsiz, 4400 setval); 4401 } 4402 if (rack_probe_rtt_sets_cwnd > 1) { 4403 /* There is a percentage here to boost */ 4404 ebdp = rack->r_ctl.rc_target_probertt_flight; 4405 ebdp *= rack_probe_rtt_sets_cwnd; 4406 ebdp /= 100; 4407 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4408 } else 4409 setto = rack->r_ctl.rc_target_probertt_flight; 4410 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4411 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4412 /* Enforce a min */ 4413 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4414 } 4415 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4416 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4417 } 4418 rack_log_rtt_shrinks(rack, us_cts, 4419 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4420 __LINE__, RACK_RTTS_EXITPROBE); 4421 /* Clear times last so log has all the info */ 4422 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4423 rack->r_ctl.rc_time_probertt_entered = us_cts; 4424 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4425 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4426 } 4427 4428 static void 4429 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4430 { 4431 /* Check in on probe-rtt */ 4432 4433 if (rack->rc_gp_filled == 0) { 4434 /* We do not do p-rtt unless we have gp measurements */ 4435 return; 4436 } 4437 if (rack->in_probe_rtt) { 4438 uint64_t no_overflow; 4439 uint32_t endtime, must_stay; 4440 4441 if (rack->r_ctl.rc_went_idle_time && 4442 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4443 /* 4444 * We went idle during prtt, just exit now. 4445 */ 4446 rack_exit_probertt(rack, us_cts); 4447 } else if (rack_probe_rtt_safety_val && 4448 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4449 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4450 /* 4451 * Probe RTT safety value triggered! 4452 */ 4453 rack_log_rtt_shrinks(rack, us_cts, 4454 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4455 __LINE__, RACK_RTTS_SAFETY); 4456 rack_exit_probertt(rack, us_cts); 4457 } 4458 /* Calculate the max we will wait */ 4459 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4460 if (rack->rc_highly_buffered) 4461 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4462 /* Calculate the min we must wait */ 4463 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4464 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4465 TSTMP_LT(us_cts, endtime)) { 4466 uint32_t calc; 4467 /* Do we lower more? */ 4468 no_exit: 4469 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4470 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4471 else 4472 calc = 0; 4473 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4474 if (calc) { 4475 /* Maybe */ 4476 calc *= rack_per_of_gp_probertt_reduce; 4477 if (calc > rack_per_of_gp_probertt) 4478 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4479 else 4480 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4481 /* Limit it too */ 4482 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4483 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4484 } 4485 /* We must reach target or the time set */ 4486 return; 4487 } 4488 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4489 if ((TSTMP_LT(us_cts, must_stay) && 4490 rack->rc_highly_buffered) || 4491 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4492 rack->r_ctl.rc_target_probertt_flight)) { 4493 /* We are not past the must_stay time */ 4494 goto no_exit; 4495 } 4496 rack_log_rtt_shrinks(rack, us_cts, 4497 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4498 __LINE__, RACK_RTTS_REACHTARGET); 4499 rack->r_ctl.rc_time_probertt_starts = us_cts; 4500 if (rack->r_ctl.rc_time_probertt_starts == 0) 4501 rack->r_ctl.rc_time_probertt_starts = 1; 4502 /* Restore back to our rate we want to pace at in prtt */ 4503 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4504 } 4505 /* 4506 * Setup our end time, some number of gp_srtts plus 200ms. 4507 */ 4508 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4509 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4510 if (rack_probertt_gpsrtt_cnt_div) 4511 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4512 else 4513 endtime = 0; 4514 endtime += rack_min_probertt_hold; 4515 endtime += rack->r_ctl.rc_time_probertt_starts; 4516 if (TSTMP_GEQ(us_cts, endtime)) { 4517 /* yes, exit probertt */ 4518 rack_exit_probertt(rack, us_cts); 4519 } 4520 4521 } else if ((rack->rc_skip_timely == 0) && 4522 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4523 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4524 /* Go into probertt, its been too long since we went lower */ 4525 rack_enter_probertt(rack, us_cts); 4526 } 4527 } 4528 4529 static void 4530 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4531 uint32_t rtt, int32_t rtt_diff) 4532 { 4533 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4534 uint32_t losses; 4535 4536 if ((rack->rc_gp_dyn_mul == 0) || 4537 (rack->use_fixed_rate) || 4538 (rack->in_probe_rtt) || 4539 (rack->rc_always_pace == 0)) { 4540 /* No dynamic GP multiplier in play */ 4541 return; 4542 } 4543 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4544 cur_bw = rack_get_bw(rack); 4545 /* Calculate our up and down range */ 4546 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4547 up_bnd /= 100; 4548 up_bnd += rack->r_ctl.last_gp_comp_bw; 4549 4550 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4551 subfr /= 100; 4552 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4553 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4554 /* 4555 * This is the case where our RTT is above 4556 * the max target and we have been configured 4557 * to just do timely no bonus up stuff in that case. 4558 * 4559 * There are two configurations, set to 1, and we 4560 * just do timely if we are over our max. If its 4561 * set above 1 then we slam the multipliers down 4562 * to 100 and then decrement per timely. 4563 */ 4564 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4565 __LINE__, 3); 4566 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4567 rack_validate_multipliers_at_or_below_100(rack); 4568 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4569 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4570 /* 4571 * We are decreasing this is a bit complicated this 4572 * means we are loosing ground. This could be 4573 * because another flow entered and we are competing 4574 * for b/w with it. This will push the RTT up which 4575 * makes timely unusable unless we want to get shoved 4576 * into a corner and just be backed off (the age 4577 * old problem with delay based CC). 4578 * 4579 * On the other hand if it was a route change we 4580 * would like to stay somewhat contained and not 4581 * blow out the buffers. 4582 */ 4583 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4584 __LINE__, 3); 4585 rack->r_ctl.last_gp_comp_bw = cur_bw; 4586 if (rack->rc_gp_bwred == 0) { 4587 /* Go into reduction counting */ 4588 rack->rc_gp_bwred = 1; 4589 rack->rc_gp_timely_dec_cnt = 0; 4590 } 4591 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4592 /* 4593 * Push another time with a faster pacing 4594 * to try to gain back (we include override to 4595 * get a full raise factor). 4596 */ 4597 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4598 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4599 (timely_says == 0) || 4600 (rack_down_raise_thresh == 0)) { 4601 /* 4602 * Do an override up in b/w if we were 4603 * below the threshold or if the threshold 4604 * is zero we always do the raise. 4605 */ 4606 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4607 } else { 4608 /* Log it stays the same */ 4609 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4610 __LINE__, 11); 4611 } 4612 rack->rc_gp_timely_dec_cnt++; 4613 /* We are not incrementing really no-count */ 4614 rack->rc_gp_incr = 0; 4615 rack->rc_gp_timely_inc_cnt = 0; 4616 } else { 4617 /* 4618 * Lets just use the RTT 4619 * information and give up 4620 * pushing. 4621 */ 4622 goto use_timely; 4623 } 4624 } else if ((timely_says != 2) && 4625 !losses && 4626 (last_bw_est > up_bnd)) { 4627 /* 4628 * We are increasing b/w lets keep going, updating 4629 * our b/w and ignoring any timely input, unless 4630 * of course we are at our max raise (if there is one). 4631 */ 4632 4633 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4634 __LINE__, 3); 4635 rack->r_ctl.last_gp_comp_bw = cur_bw; 4636 if (rack->rc_gp_saw_ss && 4637 rack->r_ctl.rack_per_upper_bound_ss && 4638 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4639 /* 4640 * In cases where we can't go higher 4641 * we should just use timely. 4642 */ 4643 goto use_timely; 4644 } 4645 if (rack->rc_gp_saw_ca && 4646 rack->r_ctl.rack_per_upper_bound_ca && 4647 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4648 /* 4649 * In cases where we can't go higher 4650 * we should just use timely. 4651 */ 4652 goto use_timely; 4653 } 4654 rack->rc_gp_bwred = 0; 4655 rack->rc_gp_timely_dec_cnt = 0; 4656 /* You get a set number of pushes if timely is trying to reduce */ 4657 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4658 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4659 } else { 4660 /* Log it stays the same */ 4661 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4662 __LINE__, 12); 4663 } 4664 return; 4665 } else { 4666 /* 4667 * We are staying between the lower and upper range bounds 4668 * so use timely to decide. 4669 */ 4670 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4671 __LINE__, 3); 4672 use_timely: 4673 if (timely_says) { 4674 rack->rc_gp_incr = 0; 4675 rack->rc_gp_timely_inc_cnt = 0; 4676 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4677 !losses && 4678 (last_bw_est < low_bnd)) { 4679 /* We are loosing ground */ 4680 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4681 rack->rc_gp_timely_dec_cnt++; 4682 /* We are not incrementing really no-count */ 4683 rack->rc_gp_incr = 0; 4684 rack->rc_gp_timely_inc_cnt = 0; 4685 } else 4686 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4687 } else { 4688 rack->rc_gp_bwred = 0; 4689 rack->rc_gp_timely_dec_cnt = 0; 4690 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4691 } 4692 } 4693 } 4694 4695 static int32_t 4696 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4697 { 4698 int32_t timely_says; 4699 uint64_t log_mult, log_rtt_a_diff; 4700 4701 log_rtt_a_diff = rtt; 4702 log_rtt_a_diff <<= 32; 4703 log_rtt_a_diff |= (uint32_t)rtt_diff; 4704 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4705 rack_gp_rtt_maxmul)) { 4706 /* Reduce the b/w multiplier */ 4707 timely_says = 2; 4708 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4709 log_mult <<= 32; 4710 log_mult |= prev_rtt; 4711 rack_log_timely(rack, timely_says, log_mult, 4712 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4713 log_rtt_a_diff, __LINE__, 4); 4714 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4715 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4716 max(rack_gp_rtt_mindiv , 1)))) { 4717 /* Increase the b/w multiplier */ 4718 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4719 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4720 max(rack_gp_rtt_mindiv , 1)); 4721 log_mult <<= 32; 4722 log_mult |= prev_rtt; 4723 timely_says = 0; 4724 rack_log_timely(rack, timely_says, log_mult , 4725 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4726 log_rtt_a_diff, __LINE__, 5); 4727 } else { 4728 /* 4729 * Use a gradient to find it the timely gradient 4730 * is: 4731 * grad = rc_rtt_diff / min_rtt; 4732 * 4733 * anything below or equal to 0 will be 4734 * a increase indication. Anything above 4735 * zero is a decrease. Note we take care 4736 * of the actual gradient calculation 4737 * in the reduction (its not needed for 4738 * increase). 4739 */ 4740 log_mult = prev_rtt; 4741 if (rtt_diff <= 0) { 4742 /* 4743 * Rttdiff is less than zero, increase the 4744 * b/w multiplier (its 0 or negative) 4745 */ 4746 timely_says = 0; 4747 rack_log_timely(rack, timely_says, log_mult, 4748 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4749 } else { 4750 /* Reduce the b/w multiplier */ 4751 timely_says = 1; 4752 rack_log_timely(rack, timely_says, log_mult, 4753 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4754 } 4755 } 4756 return (timely_says); 4757 } 4758 4759 static __inline int 4760 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4761 { 4762 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4763 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4764 /** 4765 * This covers the case that the 4766 * resent is completely inside 4767 * the gp range or up to it. 4768 * |----------------| 4769 * |-----| <or> 4770 * |----| 4771 * <or> |---| 4772 */ 4773 return (1); 4774 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4775 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4776 /** 4777 * This covers the case of 4778 * |--------------| 4779 * |-------->| 4780 */ 4781 return (1); 4782 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4783 SEQ_LT(rsm->r_start, tp->gput_ack) && 4784 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4785 4786 /** 4787 * This covers the case of 4788 * |--------------| 4789 * |-------->| 4790 */ 4791 return (1); 4792 } 4793 return (0); 4794 } 4795 4796 static __inline void 4797 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4798 { 4799 4800 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4801 return; 4802 /* 4803 * We have a Goodput measurement in progress. Mark 4804 * the send if its within the window. If its not 4805 * in the window make sure it does not have the mark. 4806 */ 4807 if (rack_in_gp_window(tp, rsm)) 4808 rsm->r_flags |= RACK_IN_GP_WIN; 4809 else 4810 rsm->r_flags &= ~RACK_IN_GP_WIN; 4811 } 4812 4813 static __inline void 4814 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4815 { 4816 /* A GP measurement is ending, clear all marks on the send map*/ 4817 struct rack_sendmap *rsm = NULL; 4818 4819 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4820 if (rsm == NULL) { 4821 rsm = tqhash_min(rack->r_ctl.tqh); 4822 } 4823 /* Nothing left? */ 4824 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4825 rsm->r_flags &= ~RACK_IN_GP_WIN; 4826 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4827 } 4828 } 4829 4830 4831 static __inline void 4832 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4833 { 4834 struct rack_sendmap *rsm = NULL; 4835 4836 if (tp->snd_una == tp->snd_max) { 4837 /* Nothing outstanding yet, nothing to do here */ 4838 return; 4839 } 4840 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4841 /* 4842 * We are measuring ahead of some outstanding 4843 * data. We need to walk through up until we get 4844 * to gp_seq marking so that no rsm is set incorrectly 4845 * with RACK_IN_GP_WIN. 4846 */ 4847 rsm = tqhash_min(rack->r_ctl.tqh); 4848 while (rsm != NULL) { 4849 rack_mark_in_gp_win(tp, rsm); 4850 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4851 break; 4852 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4853 } 4854 } 4855 if (rsm == NULL) { 4856 /* 4857 * Need to find the GP seq, if rsm is 4858 * set we stopped as we hit it. 4859 */ 4860 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4861 if (rsm == NULL) 4862 return; 4863 rack_mark_in_gp_win(tp, rsm); 4864 } 4865 /* 4866 * Now we may need to mark already sent rsm, ahead of 4867 * gput_seq in the window since they may have been sent 4868 * *before* we started our measurment. The rsm, if non-null 4869 * has been marked (note if rsm would have been NULL we would have 4870 * returned in the previous block). So we go to the next, and continue 4871 * until we run out of entries or we exceed the gp_ack value. 4872 */ 4873 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4874 while (rsm) { 4875 rack_mark_in_gp_win(tp, rsm); 4876 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4877 break; 4878 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4879 } 4880 } 4881 4882 static void 4883 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 4884 { 4885 if (tcp_bblogging_on(rack->rc_tp)) { 4886 union tcp_log_stackspecific log; 4887 struct timeval tv; 4888 4889 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4890 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4891 log.u_bbr.flex1 = add_part; 4892 log.u_bbr.flex2 = sub_part; 4893 log.u_bbr.flex3 = rack_wma_divisor; 4894 log.u_bbr.flex4 = srtt; 4895 log.u_bbr.flex7 = (uint16_t)line; 4896 log.u_bbr.flex8 = meth; 4897 log.u_bbr.delRate = rack->r_ctl.gp_bw; 4898 log.u_bbr.cur_del_rate = meas_bw; 4899 log.u_bbr.rttProp = utim; 4900 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4901 &rack->rc_inp->inp_socket->so_rcv, 4902 &rack->rc_inp->inp_socket->so_snd, 4903 BBR_LOG_THRESH_CALC, 0, 4904 0, &log, false, &rack->r_ctl.act_rcv_time); 4905 } 4906 } 4907 4908 static void 4909 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4910 tcp_seq th_ack, int line, uint8_t quality) 4911 { 4912 uint64_t tim, bytes_ps, stim, utim; 4913 uint32_t segsiz, bytes, reqbytes, us_cts; 4914 int32_t gput, new_rtt_diff, timely_says; 4915 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4916 int did_add = 0; 4917 4918 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4919 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4920 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4921 tim = us_cts - tp->gput_ts; 4922 else 4923 tim = 0; 4924 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4925 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4926 else 4927 stim = 0; 4928 /* 4929 * Use the larger of the send time or ack time. This prevents us 4930 * from being influenced by ack artifacts to come up with too 4931 * high of measurement. Note that since we are spanning over many more 4932 * bytes in most of our measurements hopefully that is less likely to 4933 * occur. 4934 */ 4935 if (tim > stim) 4936 utim = max(tim, 1); 4937 else 4938 utim = max(stim, 1); 4939 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4940 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 4941 if ((tim == 0) && (stim == 0)) { 4942 /* 4943 * Invalid measurement time, maybe 4944 * all on one ack/one send? 4945 */ 4946 bytes = 0; 4947 bytes_ps = 0; 4948 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4949 0, 0, 0, 10, __LINE__, NULL, quality); 4950 goto skip_measurement; 4951 } 4952 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4953 /* We never made a us_rtt measurement? */ 4954 bytes = 0; 4955 bytes_ps = 0; 4956 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4957 0, 0, 0, 10, __LINE__, NULL, quality); 4958 goto skip_measurement; 4959 } 4960 /* 4961 * Calculate the maximum possible b/w this connection 4962 * could have. We base our calculation on the lowest 4963 * rtt we have seen during the measurement and the 4964 * largest rwnd the client has given us in that time. This 4965 * forms a BDP that is the maximum that we could ever 4966 * get to the client. Anything larger is not valid. 4967 * 4968 * I originally had code here that rejected measurements 4969 * where the time was less than 1/2 the latest us_rtt. 4970 * But after thinking on that I realized its wrong since 4971 * say you had a 150Mbps or even 1Gbps link, and you 4972 * were a long way away.. example I am in Europe (100ms rtt) 4973 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4974 * bytes my time would be 1.2ms, and yet my rtt would say 4975 * the measurement was invalid the time was < 50ms. The 4976 * same thing is true for 150Mb (8ms of time). 4977 * 4978 * A better way I realized is to look at what the maximum 4979 * the connection could possibly do. This is gated on 4980 * the lowest RTT we have seen and the highest rwnd. 4981 * We should in theory never exceed that, if we are 4982 * then something on the path is storing up packets 4983 * and then feeding them all at once to our endpoint 4984 * messing up our measurement. 4985 */ 4986 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4987 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4988 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4989 if (SEQ_LT(th_ack, tp->gput_seq)) { 4990 /* No measurement can be made */ 4991 bytes = 0; 4992 bytes_ps = 0; 4993 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4994 0, 0, 0, 10, __LINE__, NULL, quality); 4995 goto skip_measurement; 4996 } else 4997 bytes = (th_ack - tp->gput_seq); 4998 bytes_ps = (uint64_t)bytes; 4999 /* 5000 * Don't measure a b/w for pacing unless we have gotten at least 5001 * an initial windows worth of data in this measurement interval. 5002 * 5003 * Small numbers of bytes get badly influenced by delayed ack and 5004 * other artifacts. Note we take the initial window or our 5005 * defined minimum GP (defaulting to 10 which hopefully is the 5006 * IW). 5007 */ 5008 if (rack->rc_gp_filled == 0) { 5009 /* 5010 * The initial estimate is special. We 5011 * have blasted out an IW worth of packets 5012 * without a real valid ack ts results. We 5013 * then setup the app_limited_needs_set flag, 5014 * this should get the first ack in (probably 2 5015 * MSS worth) to be recorded as the timestamp. 5016 * We thus allow a smaller number of bytes i.e. 5017 * IW - 2MSS. 5018 */ 5019 reqbytes -= (2 * segsiz); 5020 /* Also lets fill previous for our first measurement to be neutral */ 5021 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5022 } 5023 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 5024 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5025 rack->r_ctl.rc_app_limited_cnt, 5026 0, 0, 10, __LINE__, NULL, quality); 5027 goto skip_measurement; 5028 } 5029 /* 5030 * We now need to calculate the Timely like status so 5031 * we can update (possibly) the b/w multipliers. 5032 */ 5033 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5034 if (rack->rc_gp_filled == 0) { 5035 /* No previous reading */ 5036 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5037 } else { 5038 if (rack->measure_saw_probe_rtt == 0) { 5039 /* 5040 * We don't want a probertt to be counted 5041 * since it will be negative incorrectly. We 5042 * expect to be reducing the RTT when we 5043 * pace at a slower rate. 5044 */ 5045 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5046 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5047 } 5048 } 5049 timely_says = rack_make_timely_judgement(rack, 5050 rack->r_ctl.rc_gp_srtt, 5051 rack->r_ctl.rc_rtt_diff, 5052 rack->r_ctl.rc_prev_gp_srtt 5053 ); 5054 bytes_ps *= HPTS_USEC_IN_SEC; 5055 bytes_ps /= utim; 5056 if (bytes_ps > rack->r_ctl.last_max_bw) { 5057 /* 5058 * Something is on path playing 5059 * since this b/w is not possible based 5060 * on our BDP (highest rwnd and lowest rtt 5061 * we saw in the measurement window). 5062 * 5063 * Another option here would be to 5064 * instead skip the measurement. 5065 */ 5066 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5067 bytes_ps, rack->r_ctl.last_max_bw, 0, 5068 11, __LINE__, NULL, quality); 5069 bytes_ps = rack->r_ctl.last_max_bw; 5070 } 5071 /* We store gp for b/w in bytes per second */ 5072 if (rack->rc_gp_filled == 0) { 5073 /* Initial measurement */ 5074 if (bytes_ps) { 5075 rack->r_ctl.gp_bw = bytes_ps; 5076 rack->rc_gp_filled = 1; 5077 rack->r_ctl.num_measurements = 1; 5078 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5079 } else { 5080 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5081 rack->r_ctl.rc_app_limited_cnt, 5082 0, 0, 10, __LINE__, NULL, quality); 5083 } 5084 if (tcp_in_hpts(rack->rc_tp) && 5085 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5086 /* 5087 * Ok we can't trust the pacer in this case 5088 * where we transition from un-paced to paced. 5089 * Or for that matter when the burst mitigation 5090 * was making a wild guess and got it wrong. 5091 * Stop the pacer and clear up all the aggregate 5092 * delays etc. 5093 */ 5094 tcp_hpts_remove(rack->rc_tp); 5095 rack->r_ctl.rc_hpts_flags = 0; 5096 rack->r_ctl.rc_last_output_to = 0; 5097 } 5098 did_add = 2; 5099 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5100 /* Still a small number run an average */ 5101 rack->r_ctl.gp_bw += bytes_ps; 5102 addpart = rack->r_ctl.num_measurements; 5103 rack->r_ctl.num_measurements++; 5104 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5105 /* We have collected enough to move forward */ 5106 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5107 } 5108 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5109 did_add = 3; 5110 } else { 5111 /* 5112 * We want to take 1/wma of the goodput and add in to 7/8th 5113 * of the old value weighted by the srtt. So if your measurement 5114 * period is say 2 SRTT's long you would get 1/4 as the 5115 * value, if it was like 1/2 SRTT then you would get 1/16th. 5116 * 5117 * But we must be careful not to take too much i.e. if the 5118 * srtt is say 20ms and the measurement is taken over 5119 * 400ms our weight would be 400/20 i.e. 20. On the 5120 * other hand if we get a measurement over 1ms with a 5121 * 10ms rtt we only want to take a much smaller portion. 5122 */ 5123 uint8_t meth; 5124 5125 if (rack->r_ctl.num_measurements < 0xff) { 5126 rack->r_ctl.num_measurements++; 5127 } 5128 srtt = (uint64_t)tp->t_srtt; 5129 if (srtt == 0) { 5130 /* 5131 * Strange why did t_srtt go back to zero? 5132 */ 5133 if (rack->r_ctl.rc_rack_min_rtt) 5134 srtt = rack->r_ctl.rc_rack_min_rtt; 5135 else 5136 srtt = HPTS_USEC_IN_MSEC; 5137 } 5138 /* 5139 * XXXrrs: Note for reviewers, in playing with 5140 * dynamic pacing I discovered this GP calculation 5141 * as done originally leads to some undesired results. 5142 * Basically you can get longer measurements contributing 5143 * too much to the WMA. Thus I changed it if you are doing 5144 * dynamic adjustments to only do the aportioned adjustment 5145 * if we have a very small (time wise) measurement. Longer 5146 * measurements just get there weight (defaulting to 1/8) 5147 * add to the WMA. We may want to think about changing 5148 * this to always do that for both sides i.e. dynamic 5149 * and non-dynamic... but considering lots of folks 5150 * were playing with this I did not want to change the 5151 * calculation per.se. without your thoughts.. Lawerence? 5152 * Peter?? 5153 */ 5154 if (rack->rc_gp_dyn_mul == 0) { 5155 subpart = rack->r_ctl.gp_bw * utim; 5156 subpart /= (srtt * 8); 5157 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5158 /* 5159 * The b/w update takes no more 5160 * away then 1/2 our running total 5161 * so factor it in. 5162 */ 5163 addpart = bytes_ps * utim; 5164 addpart /= (srtt * 8); 5165 meth = 1; 5166 } else { 5167 /* 5168 * Don't allow a single measurement 5169 * to account for more than 1/2 of the 5170 * WMA. This could happen on a retransmission 5171 * where utim becomes huge compared to 5172 * srtt (multiple retransmissions when using 5173 * the sending rate which factors in all the 5174 * transmissions from the first one). 5175 */ 5176 subpart = rack->r_ctl.gp_bw / 2; 5177 addpart = bytes_ps / 2; 5178 meth = 2; 5179 } 5180 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5181 resid_bw = rack->r_ctl.gp_bw - subpart; 5182 rack->r_ctl.gp_bw = resid_bw + addpart; 5183 did_add = 1; 5184 } else { 5185 if ((utim / srtt) <= 1) { 5186 /* 5187 * The b/w update was over a small period 5188 * of time. The idea here is to prevent a small 5189 * measurement time period from counting 5190 * too much. So we scale it based on the 5191 * time so it attributes less than 1/rack_wma_divisor 5192 * of its measurement. 5193 */ 5194 subpart = rack->r_ctl.gp_bw * utim; 5195 subpart /= (srtt * rack_wma_divisor); 5196 addpart = bytes_ps * utim; 5197 addpart /= (srtt * rack_wma_divisor); 5198 meth = 3; 5199 } else { 5200 /* 5201 * The scaled measurement was long 5202 * enough so lets just add in the 5203 * portion of the measurement i.e. 1/rack_wma_divisor 5204 */ 5205 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5206 addpart = bytes_ps / rack_wma_divisor; 5207 meth = 4; 5208 } 5209 if ((rack->measure_saw_probe_rtt == 0) || 5210 (bytes_ps > rack->r_ctl.gp_bw)) { 5211 /* 5212 * For probe-rtt we only add it in 5213 * if its larger, all others we just 5214 * add in. 5215 */ 5216 did_add = 1; 5217 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5218 resid_bw = rack->r_ctl.gp_bw - subpart; 5219 rack->r_ctl.gp_bw = resid_bw + addpart; 5220 } 5221 } 5222 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5223 } 5224 /* 5225 * We only watch the growth of the GP during the initial startup 5226 * or first-slowstart that ensues. If we ever needed to watch 5227 * growth of gp outside of that period all we need to do is 5228 * remove the first clause of this if (rc_initial_ss_comp). 5229 */ 5230 if ((rack->rc_initial_ss_comp == 0) && 5231 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5232 uint64_t gp_est; 5233 5234 gp_est = bytes_ps; 5235 if (tcp_bblogging_on(rack->rc_tp)) { 5236 union tcp_log_stackspecific log; 5237 struct timeval tv; 5238 5239 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5240 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5241 log.u_bbr.flex1 = rack->r_ctl.current_round; 5242 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5243 log.u_bbr.delRate = gp_est; 5244 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5245 log.u_bbr.flex8 = 41; 5246 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5247 0, &log, false, NULL, __func__, __LINE__,&tv); 5248 } 5249 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5250 (rack->r_ctl.last_gpest == 0)) { 5251 /* 5252 * The round we get our measurement averaging going 5253 * is the base round so it always is the source point 5254 * for when we had our first increment. From there on 5255 * we only record the round that had a rise. 5256 */ 5257 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5258 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5259 } else if (gp_est >= rack->r_ctl.last_gpest) { 5260 /* 5261 * Test to see if its gone up enough 5262 * to set the round count up to now. Note 5263 * that on the seeding of the 4th measurement we 5264 */ 5265 gp_est *= 1000; 5266 gp_est /= rack->r_ctl.last_gpest; 5267 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5268 /* 5269 * We went up enough to record the round. 5270 */ 5271 if (tcp_bblogging_on(rack->rc_tp)) { 5272 union tcp_log_stackspecific log; 5273 struct timeval tv; 5274 5275 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5276 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5277 log.u_bbr.flex1 = rack->r_ctl.current_round; 5278 log.u_bbr.flex2 = (uint32_t)gp_est; 5279 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5280 log.u_bbr.delRate = gp_est; 5281 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5282 log.u_bbr.flex8 = 42; 5283 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5284 0, &log, false, NULL, __func__, __LINE__,&tv); 5285 } 5286 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5287 if (rack->r_ctl.use_gp_not_last == 1) 5288 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5289 else 5290 rack->r_ctl.last_gpest = bytes_ps; 5291 } 5292 } 5293 } 5294 if ((rack->gp_ready == 0) && 5295 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5296 /* We have enough measurements now */ 5297 rack->gp_ready = 1; 5298 if (rack->dgp_on || 5299 rack->rack_hibeta) 5300 rack_set_cc_pacing(rack); 5301 if (rack->defer_options) 5302 rack_apply_deferred_options(rack); 5303 } 5304 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5305 rack_get_bw(rack), 22, did_add, NULL, quality); 5306 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5307 5308 if ((rack->measure_saw_probe_rtt == 0) && 5309 rack->rc_gp_rtt_set) { 5310 if (rack->rc_skip_timely == 0) { 5311 rack_update_multiplier(rack, timely_says, bytes_ps, 5312 rack->r_ctl.rc_gp_srtt, 5313 rack->r_ctl.rc_rtt_diff); 5314 } 5315 } 5316 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5317 rack_get_bw(rack), 3, line, NULL, quality); 5318 rack_log_pacing_delay_calc(rack, 5319 bytes, /* flex2 */ 5320 tim, /* flex1 */ 5321 bytes_ps, /* bw_inuse */ 5322 rack->r_ctl.gp_bw, /* delRate */ 5323 rack_get_lt_bw(rack), /* rttProp */ 5324 20, line, NULL, 0); 5325 /* reset the gp srtt and setup the new prev */ 5326 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5327 /* Record the lost count for the next measurement */ 5328 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5329 skip_measurement: 5330 /* 5331 * We restart our diffs based on the gpsrtt in the 5332 * measurement window. 5333 */ 5334 rack->rc_gp_rtt_set = 0; 5335 rack->rc_gp_saw_rec = 0; 5336 rack->rc_gp_saw_ca = 0; 5337 rack->rc_gp_saw_ss = 0; 5338 rack->rc_dragged_bottom = 0; 5339 if (quality == RACK_QUALITY_HIGH) { 5340 /* 5341 * Gput in the stats world is in kbps where bytes_ps is 5342 * bytes per second so we do ((x * 8)/ 1000). 5343 */ 5344 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5345 #ifdef STATS 5346 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5347 gput); 5348 /* 5349 * XXXLAS: This is a temporary hack, and should be 5350 * chained off VOI_TCP_GPUT when stats(9) grows an 5351 * API to deal with chained VOIs. 5352 */ 5353 if (tp->t_stats_gput_prev > 0) 5354 stats_voi_update_abs_s32(tp->t_stats, 5355 VOI_TCP_GPUT_ND, 5356 ((gput - tp->t_stats_gput_prev) * 100) / 5357 tp->t_stats_gput_prev); 5358 #endif 5359 tp->t_stats_gput_prev = gput; 5360 } 5361 tp->t_flags &= ~TF_GPUTINPROG; 5362 /* 5363 * Now are we app limited now and there is space from where we 5364 * were to where we want to go? 5365 * 5366 * We don't do the other case i.e. non-applimited here since 5367 * the next send will trigger us picking up the missing data. 5368 */ 5369 if (rack->r_ctl.rc_first_appl && 5370 TCPS_HAVEESTABLISHED(tp->t_state) && 5371 rack->r_ctl.rc_app_limited_cnt && 5372 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5373 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5374 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5375 /* 5376 * Yep there is enough outstanding to make a measurement here. 5377 */ 5378 struct rack_sendmap *rsm; 5379 5380 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5381 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5382 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5383 rack->app_limited_needs_set = 0; 5384 tp->gput_seq = th_ack; 5385 if (rack->in_probe_rtt) 5386 rack->measure_saw_probe_rtt = 1; 5387 else if ((rack->measure_saw_probe_rtt) && 5388 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5389 rack->measure_saw_probe_rtt = 0; 5390 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5391 /* There is a full window to gain info from */ 5392 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5393 } else { 5394 /* We can only measure up to the applimited point */ 5395 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5396 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5397 /* 5398 * We don't have enough to make a measurement. 5399 */ 5400 tp->t_flags &= ~TF_GPUTINPROG; 5401 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5402 0, 0, 0, 6, __LINE__, NULL, quality); 5403 return; 5404 } 5405 } 5406 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5407 /* 5408 * We will get no more data into the SB 5409 * this means we need to have the data available 5410 * before we start a measurement. 5411 */ 5412 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5413 /* Nope not enough data. */ 5414 return; 5415 } 5416 } 5417 tp->t_flags |= TF_GPUTINPROG; 5418 /* 5419 * Now we need to find the timestamp of the send at tp->gput_seq 5420 * for the send based measurement. 5421 */ 5422 rack->r_ctl.rc_gp_cumack_ts = 0; 5423 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5424 if (rsm) { 5425 /* Ok send-based limit is set */ 5426 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5427 /* 5428 * Move back to include the earlier part 5429 * so our ack time lines up right (this may 5430 * make an overlapping measurement but thats 5431 * ok). 5432 */ 5433 tp->gput_seq = rsm->r_start; 5434 } 5435 if (rsm->r_flags & RACK_ACKED) { 5436 struct rack_sendmap *nrsm; 5437 5438 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5439 tp->gput_seq = rsm->r_end; 5440 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5441 if (nrsm) 5442 rsm = nrsm; 5443 else { 5444 rack->app_limited_needs_set = 1; 5445 } 5446 } else 5447 rack->app_limited_needs_set = 1; 5448 /* We always go from the first send */ 5449 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5450 } else { 5451 /* 5452 * If we don't find the rsm due to some 5453 * send-limit set the current time, which 5454 * basically disables the send-limit. 5455 */ 5456 struct timeval tv; 5457 5458 microuptime(&tv); 5459 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5460 } 5461 rack_tend_gp_marks(tp, rack); 5462 rack_log_pacing_delay_calc(rack, 5463 tp->gput_seq, 5464 tp->gput_ack, 5465 (uintptr_t)rsm, 5466 tp->gput_ts, 5467 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5468 9, 5469 __LINE__, rsm, quality); 5470 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5471 } else { 5472 /* 5473 * To make sure proper timestamp merging occurs, we need to clear 5474 * all GP marks if we don't start a measurement. 5475 */ 5476 rack_clear_gp_marks(tp, rack); 5477 } 5478 } 5479 5480 /* 5481 * CC wrapper hook functions 5482 */ 5483 static void 5484 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5485 uint16_t type, int32_t post_recovery) 5486 { 5487 uint32_t prior_cwnd, acked; 5488 struct tcp_log_buffer *lgb = NULL; 5489 uint8_t labc_to_use, quality; 5490 5491 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5492 tp->t_ccv.nsegs = nsegs; 5493 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5494 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5495 uint32_t max; 5496 5497 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5498 if (tp->t_ccv.bytes_this_ack > max) { 5499 tp->t_ccv.bytes_this_ack = max; 5500 } 5501 } 5502 #ifdef STATS 5503 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5504 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5505 #endif 5506 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5507 /* 5508 * We will ack all the data, time to end any 5509 * lt_bw_up we have running until something 5510 * new is sent. Note we need to use the actual 5511 * ack_rcv_time which with pacing may be different. 5512 */ 5513 uint64_t tmark; 5514 5515 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5516 rack->r_ctl.lt_seq = tp->snd_max; 5517 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 5518 if (tmark >= rack->r_ctl.lt_timemark) { 5519 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5520 } 5521 rack->r_ctl.lt_timemark = tmark; 5522 rack->lt_bw_up = 0; 5523 } 5524 quality = RACK_QUALITY_NONE; 5525 if ((tp->t_flags & TF_GPUTINPROG) && 5526 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5527 /* Measure the Goodput */ 5528 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5529 } 5530 /* Which way our we limited, if not cwnd limited no advance in CA */ 5531 if (tp->snd_cwnd <= tp->snd_wnd) 5532 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5533 else 5534 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5535 if (tp->snd_cwnd > tp->snd_ssthresh) { 5536 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5537 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5538 /* For the setting of a window past use the actual scwnd we are using */ 5539 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5540 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5541 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5542 } 5543 } else { 5544 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5545 tp->t_bytes_acked = 0; 5546 } 5547 prior_cwnd = tp->snd_cwnd; 5548 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5549 (rack_client_low_buf && rack->client_bufferlvl && 5550 (rack->client_bufferlvl < rack_client_low_buf))) 5551 labc_to_use = rack->rc_labc; 5552 else 5553 labc_to_use = rack_max_abc_post_recovery; 5554 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5555 union tcp_log_stackspecific log; 5556 struct timeval tv; 5557 5558 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5559 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5560 log.u_bbr.flex1 = th_ack; 5561 log.u_bbr.flex2 = tp->t_ccv.flags; 5562 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5563 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5564 log.u_bbr.flex5 = labc_to_use; 5565 log.u_bbr.flex6 = prior_cwnd; 5566 log.u_bbr.flex7 = V_tcp_do_newsack; 5567 log.u_bbr.flex8 = 1; 5568 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5569 0, &log, false, NULL, __func__, __LINE__,&tv); 5570 } 5571 if (CC_ALGO(tp)->ack_received != NULL) { 5572 /* XXXLAS: Find a way to live without this */ 5573 tp->t_ccv.curack = th_ack; 5574 tp->t_ccv.labc = labc_to_use; 5575 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5576 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5577 } 5578 if (lgb) { 5579 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5580 } 5581 if (rack->r_must_retran) { 5582 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5583 /* 5584 * We now are beyond the rxt point so lets disable 5585 * the flag. 5586 */ 5587 rack->r_ctl.rc_out_at_rto = 0; 5588 rack->r_must_retran = 0; 5589 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5590 /* 5591 * Only decrement the rc_out_at_rto if the cwnd advances 5592 * at least a whole segment. Otherwise next time the peer 5593 * acks, we won't be able to send this generaly happens 5594 * when we are in Congestion Avoidance. 5595 */ 5596 if (acked <= rack->r_ctl.rc_out_at_rto){ 5597 rack->r_ctl.rc_out_at_rto -= acked; 5598 } else { 5599 rack->r_ctl.rc_out_at_rto = 0; 5600 } 5601 } 5602 } 5603 #ifdef STATS 5604 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5605 #endif 5606 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5607 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5608 } 5609 if ((rack->rc_initial_ss_comp == 0) && 5610 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5611 /* 5612 * The cwnd has grown beyond ssthresh we have 5613 * entered ca and completed our first Slowstart. 5614 */ 5615 rack->rc_initial_ss_comp = 1; 5616 } 5617 } 5618 5619 static void 5620 tcp_rack_partialack(struct tcpcb *tp) 5621 { 5622 struct tcp_rack *rack; 5623 5624 rack = (struct tcp_rack *)tp->t_fb_ptr; 5625 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5626 /* 5627 * If we are doing PRR and have enough 5628 * room to send <or> we are pacing and prr 5629 * is disabled we will want to see if we 5630 * can send data (by setting r_wanted_output to 5631 * true). 5632 */ 5633 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5634 rack->rack_no_prr) 5635 rack->r_wanted_output = 1; 5636 } 5637 5638 static void 5639 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 5640 { 5641 /* 5642 * Now exit recovery. 5643 */ 5644 EXIT_RECOVERY(tp->t_flags); 5645 } 5646 5647 static void 5648 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 5649 { 5650 struct tcp_rack *rack; 5651 uint32_t orig_cwnd; 5652 5653 orig_cwnd = tp->snd_cwnd; 5654 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5655 rack = (struct tcp_rack *)tp->t_fb_ptr; 5656 /* only alert CC if we alerted when we entered */ 5657 if (CC_ALGO(tp)->post_recovery != NULL) { 5658 tp->t_ccv.curack = th_ack; 5659 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 5660 if (tp->snd_cwnd < tp->snd_ssthresh) { 5661 /* 5662 * Rack has burst control and pacing 5663 * so lets not set this any lower than 5664 * snd_ssthresh per RFC-6582 (option 2). 5665 */ 5666 tp->snd_cwnd = tp->snd_ssthresh; 5667 } 5668 } 5669 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5670 union tcp_log_stackspecific log; 5671 struct timeval tv; 5672 5673 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5674 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5675 log.u_bbr.flex1 = th_ack; 5676 log.u_bbr.flex2 = tp->t_ccv.flags; 5677 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5678 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5679 log.u_bbr.flex5 = V_tcp_abc_l_var; 5680 log.u_bbr.flex6 = orig_cwnd; 5681 log.u_bbr.flex7 = V_tcp_do_newsack; 5682 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 5683 log.u_bbr.flex8 = 2; 5684 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5685 0, &log, false, NULL, __func__, __LINE__, &tv); 5686 } 5687 if ((rack->rack_no_prr == 0) && 5688 (rack->no_prr_addback == 0) && 5689 (rack->r_ctl.rc_prr_sndcnt > 0)) { 5690 /* 5691 * Suck the next prr cnt back into cwnd, but 5692 * only do that if we are not application limited. 5693 */ 5694 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 5695 /* 5696 * We are allowed to add back to the cwnd the amount we did 5697 * not get out if: 5698 * a) no_prr_addback is off. 5699 * b) we are not app limited 5700 * c) we are doing prr 5701 * <and> 5702 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 5703 */ 5704 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 5705 rack->r_ctl.rc_prr_sndcnt); 5706 } 5707 rack->r_ctl.rc_prr_sndcnt = 0; 5708 rack_log_to_prr(rack, 1, 0, __LINE__); 5709 } 5710 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 5711 tp->snd_recover = tp->snd_una; 5712 if (rack->r_ctl.dsack_persist) { 5713 rack->r_ctl.dsack_persist--; 5714 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 5715 rack->r_ctl.num_dsack = 0; 5716 } 5717 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 5718 } 5719 if (rack->rto_from_rec == 1) { 5720 rack->rto_from_rec = 0; 5721 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 5722 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 5723 } 5724 rack_exit_recovery(tp, rack, 1); 5725 } 5726 5727 static void 5728 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 5729 { 5730 struct tcp_rack *rack; 5731 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 5732 5733 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5734 #ifdef STATS 5735 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 5736 #endif 5737 if (IN_RECOVERY(tp->t_flags) == 0) { 5738 in_rec_at_entry = 0; 5739 ssthresh_enter = tp->snd_ssthresh; 5740 cwnd_enter = tp->snd_cwnd; 5741 } else 5742 in_rec_at_entry = 1; 5743 rack = (struct tcp_rack *)tp->t_fb_ptr; 5744 switch (type) { 5745 case CC_NDUPACK: 5746 tp->t_flags &= ~TF_WASFRECOVERY; 5747 tp->t_flags &= ~TF_WASCRECOVERY; 5748 if (!IN_FASTRECOVERY(tp->t_flags)) { 5749 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 5750 if (rack->rc_initial_ss_comp == 0) { 5751 /* Yep it is the end of the initial slowstart */ 5752 rack->rc_initial_ss_comp = 1; 5753 } 5754 rack->r_ctl.rc_prr_delivered = 0; 5755 rack->r_ctl.rc_prr_out = 0; 5756 rack->r_fast_output = 0; 5757 if (rack->rack_no_prr == 0) { 5758 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5759 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 5760 } 5761 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 5762 tp->snd_recover = tp->snd_max; 5763 if (tp->t_flags2 & TF2_ECN_PERMIT) 5764 tp->t_flags2 |= TF2_ECN_SND_CWR; 5765 } 5766 break; 5767 case CC_ECN: 5768 if (!IN_CONGRECOVERY(tp->t_flags) || 5769 /* 5770 * Allow ECN reaction on ACK to CWR, if 5771 * that data segment was also CE marked. 5772 */ 5773 SEQ_GEQ(ack, tp->snd_recover)) { 5774 EXIT_CONGRECOVERY(tp->t_flags); 5775 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5776 rack->r_fast_output = 0; 5777 tp->snd_recover = tp->snd_max + 1; 5778 if (tp->t_flags2 & TF2_ECN_PERMIT) 5779 tp->t_flags2 |= TF2_ECN_SND_CWR; 5780 } 5781 break; 5782 case CC_RTO: 5783 tp->t_dupacks = 0; 5784 tp->t_bytes_acked = 0; 5785 rack->r_fast_output = 0; 5786 if (IN_RECOVERY(tp->t_flags)) 5787 rack_exit_recovery(tp, rack, 2); 5788 orig_cwnd = tp->snd_cwnd; 5789 rack_log_to_prr(rack, 16, orig_cwnd, line); 5790 if (CC_ALGO(tp)->cong_signal == NULL) { 5791 /* TSNH */ 5792 tp->snd_ssthresh = max(2, 5793 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5794 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5795 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5796 } 5797 if (tp->t_flags2 & TF2_ECN_PERMIT) 5798 tp->t_flags2 |= TF2_ECN_SND_CWR; 5799 break; 5800 case CC_RTO_ERR: 5801 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5802 /* RTO was unnecessary, so reset everything. */ 5803 tp->snd_cwnd = tp->snd_cwnd_prev; 5804 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5805 tp->snd_recover = tp->snd_recover_prev; 5806 if (tp->t_flags & TF_WASFRECOVERY) { 5807 ENTER_FASTRECOVERY(tp->t_flags); 5808 tp->t_flags &= ~TF_WASFRECOVERY; 5809 } 5810 if (tp->t_flags & TF_WASCRECOVERY) { 5811 ENTER_CONGRECOVERY(tp->t_flags); 5812 tp->t_flags &= ~TF_WASCRECOVERY; 5813 } 5814 tp->snd_nxt = tp->snd_max; 5815 tp->t_badrxtwin = 0; 5816 break; 5817 } 5818 if ((CC_ALGO(tp)->cong_signal != NULL) && 5819 (type != CC_RTO)){ 5820 tp->t_ccv.curack = ack; 5821 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 5822 } 5823 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5824 rack_log_to_prr(rack, 15, cwnd_enter, line); 5825 rack->r_ctl.dsack_byte_cnt = 0; 5826 rack->r_ctl.retran_during_recovery = 0; 5827 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5828 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5829 rack->r_ent_rec_ns = 1; 5830 } 5831 } 5832 5833 static inline void 5834 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5835 { 5836 uint32_t i_cwnd; 5837 5838 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5839 5840 if (CC_ALGO(tp)->after_idle != NULL) 5841 CC_ALGO(tp)->after_idle(&tp->t_ccv); 5842 5843 if (tp->snd_cwnd == 1) 5844 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5845 else 5846 i_cwnd = rc_init_window(rack); 5847 5848 /* 5849 * Being idle is no different than the initial window. If the cc 5850 * clamps it down below the initial window raise it to the initial 5851 * window. 5852 */ 5853 if (tp->snd_cwnd < i_cwnd) { 5854 tp->snd_cwnd = i_cwnd; 5855 } 5856 } 5857 5858 /* 5859 * Indicate whether this ack should be delayed. We can delay the ack if 5860 * following conditions are met: 5861 * - There is no delayed ack timer in progress. 5862 * - Our last ack wasn't a 0-sized window. We never want to delay 5863 * the ack that opens up a 0-sized window. 5864 * - LRO wasn't used for this segment. We make sure by checking that the 5865 * segment size is not larger than the MSS. 5866 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5867 * connection. 5868 */ 5869 #define DELAY_ACK(tp, tlen) \ 5870 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5871 ((tp->t_flags & TF_DELACK) == 0) && \ 5872 (tlen <= tp->t_maxseg) && \ 5873 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5874 5875 static struct rack_sendmap * 5876 rack_find_lowest_rsm(struct tcp_rack *rack) 5877 { 5878 struct rack_sendmap *rsm; 5879 5880 /* 5881 * Walk the time-order transmitted list looking for an rsm that is 5882 * not acked. This will be the one that was sent the longest time 5883 * ago that is still outstanding. 5884 */ 5885 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5886 if (rsm->r_flags & RACK_ACKED) { 5887 continue; 5888 } 5889 goto finish; 5890 } 5891 finish: 5892 return (rsm); 5893 } 5894 5895 static struct rack_sendmap * 5896 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5897 { 5898 struct rack_sendmap *prsm; 5899 5900 /* 5901 * Walk the sequence order list backward until we hit and arrive at 5902 * the highest seq not acked. In theory when this is called it 5903 * should be the last segment (which it was not). 5904 */ 5905 prsm = rsm; 5906 5907 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 5908 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5909 continue; 5910 } 5911 return (prsm); 5912 } 5913 return (NULL); 5914 } 5915 5916 static uint32_t 5917 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 5918 { 5919 int32_t lro; 5920 uint32_t thresh; 5921 5922 /* 5923 * lro is the flag we use to determine if we have seen reordering. 5924 * If it gets set we have seen reordering. The reorder logic either 5925 * works in one of two ways: 5926 * 5927 * If reorder-fade is configured, then we track the last time we saw 5928 * re-ordering occur. If we reach the point where enough time as 5929 * passed we no longer consider reordering has occuring. 5930 * 5931 * Or if reorder-face is 0, then once we see reordering we consider 5932 * the connection to alway be subject to reordering and just set lro 5933 * to 1. 5934 * 5935 * In the end if lro is non-zero we add the extra time for 5936 * reordering in. 5937 */ 5938 if (srtt == 0) 5939 srtt = 1; 5940 if (rack->r_ctl.rc_reorder_ts) { 5941 if (rack->r_ctl.rc_reorder_fade) { 5942 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5943 lro = cts - rack->r_ctl.rc_reorder_ts; 5944 if (lro == 0) { 5945 /* 5946 * No time as passed since the last 5947 * reorder, mark it as reordering. 5948 */ 5949 lro = 1; 5950 } 5951 } else { 5952 /* Negative time? */ 5953 lro = 0; 5954 } 5955 if (lro > rack->r_ctl.rc_reorder_fade) { 5956 /* Turn off reordering seen too */ 5957 rack->r_ctl.rc_reorder_ts = 0; 5958 lro = 0; 5959 } 5960 } else { 5961 /* Reodering does not fade */ 5962 lro = 1; 5963 } 5964 } else { 5965 lro = 0; 5966 } 5967 if (rack->rc_rack_tmr_std_based == 0) { 5968 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5969 } else { 5970 /* Standards based pkt-delay is 1/4 srtt */ 5971 thresh = srtt + (srtt >> 2); 5972 } 5973 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5974 /* It must be set, if not you get 1/4 rtt */ 5975 if (rack->r_ctl.rc_reorder_shift) 5976 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5977 else 5978 thresh += (srtt >> 2); 5979 } 5980 if (rack->rc_rack_use_dsack && 5981 lro && 5982 (rack->r_ctl.num_dsack > 0)) { 5983 /* 5984 * We only increase the reordering window if we 5985 * have seen reordering <and> we have a DSACK count. 5986 */ 5987 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5988 if (log_allowed) 5989 rack_log_dsack_event(rack, 4, line, srtt, thresh); 5990 } 5991 /* SRTT * 2 is the ceiling */ 5992 if (thresh > (srtt * 2)) { 5993 thresh = srtt * 2; 5994 } 5995 /* And we don't want it above the RTO max either */ 5996 if (thresh > rack_rto_max) { 5997 thresh = rack_rto_max; 5998 } 5999 if (log_allowed) 6000 rack_log_dsack_event(rack, 6, line, srtt, thresh); 6001 return (thresh); 6002 } 6003 6004 static uint32_t 6005 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 6006 struct rack_sendmap *rsm, uint32_t srtt) 6007 { 6008 struct rack_sendmap *prsm; 6009 uint32_t thresh, len; 6010 int segsiz; 6011 6012 if (srtt == 0) 6013 srtt = 1; 6014 if (rack->r_ctl.rc_tlp_threshold) 6015 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 6016 else 6017 thresh = (srtt * 2); 6018 6019 /* Get the previous sent packet, if any */ 6020 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6021 len = rsm->r_end - rsm->r_start; 6022 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6023 /* Exactly like the ID */ 6024 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6025 uint32_t alt_thresh; 6026 /* 6027 * Compensate for delayed-ack with the d-ack time. 6028 */ 6029 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6030 if (alt_thresh > thresh) 6031 thresh = alt_thresh; 6032 } 6033 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6034 /* 2.1 behavior */ 6035 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6036 if (prsm && (len <= segsiz)) { 6037 /* 6038 * Two packets outstanding, thresh should be (2*srtt) + 6039 * possible inter-packet delay (if any). 6040 */ 6041 uint32_t inter_gap = 0; 6042 int idx, nidx; 6043 6044 idx = rsm->r_rtr_cnt - 1; 6045 nidx = prsm->r_rtr_cnt - 1; 6046 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6047 /* Yes it was sent later (or at the same time) */ 6048 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6049 } 6050 thresh += inter_gap; 6051 } else if (len <= segsiz) { 6052 /* 6053 * Possibly compensate for delayed-ack. 6054 */ 6055 uint32_t alt_thresh; 6056 6057 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6058 if (alt_thresh > thresh) 6059 thresh = alt_thresh; 6060 } 6061 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6062 /* 2.2 behavior */ 6063 if (len <= segsiz) { 6064 uint32_t alt_thresh; 6065 /* 6066 * Compensate for delayed-ack with the d-ack time. 6067 */ 6068 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6069 if (alt_thresh > thresh) 6070 thresh = alt_thresh; 6071 } 6072 } 6073 /* Not above an RTO */ 6074 if (thresh > tp->t_rxtcur) { 6075 thresh = tp->t_rxtcur; 6076 } 6077 /* Not above a RTO max */ 6078 if (thresh > rack_rto_max) { 6079 thresh = rack_rto_max; 6080 } 6081 /* Apply user supplied min TLP */ 6082 if (thresh < rack_tlp_min) { 6083 thresh = rack_tlp_min; 6084 } 6085 return (thresh); 6086 } 6087 6088 static uint32_t 6089 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6090 { 6091 /* 6092 * We want the rack_rtt which is the 6093 * last rtt we measured. However if that 6094 * does not exist we fallback to the srtt (which 6095 * we probably will never do) and then as a last 6096 * resort we use RACK_INITIAL_RTO if no srtt is 6097 * yet set. 6098 */ 6099 if (rack->rc_rack_rtt) 6100 return (rack->rc_rack_rtt); 6101 else if (tp->t_srtt == 0) 6102 return (RACK_INITIAL_RTO); 6103 return (tp->t_srtt); 6104 } 6105 6106 static struct rack_sendmap * 6107 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6108 { 6109 /* 6110 * Check to see that we don't need to fall into recovery. We will 6111 * need to do so if our oldest transmit is past the time we should 6112 * have had an ack. 6113 */ 6114 struct tcp_rack *rack; 6115 struct rack_sendmap *rsm; 6116 int32_t idx; 6117 uint32_t srtt, thresh; 6118 6119 rack = (struct tcp_rack *)tp->t_fb_ptr; 6120 if (tqhash_empty(rack->r_ctl.tqh)) { 6121 return (NULL); 6122 } 6123 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6124 if (rsm == NULL) 6125 return (NULL); 6126 6127 6128 if (rsm->r_flags & RACK_ACKED) { 6129 rsm = rack_find_lowest_rsm(rack); 6130 if (rsm == NULL) 6131 return (NULL); 6132 } 6133 idx = rsm->r_rtr_cnt - 1; 6134 srtt = rack_grab_rtt(tp, rack); 6135 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6136 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6137 return (NULL); 6138 } 6139 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6140 return (NULL); 6141 } 6142 /* Ok if we reach here we are over-due and this guy can be sent */ 6143 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6144 return (rsm); 6145 } 6146 6147 static uint32_t 6148 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6149 { 6150 int32_t t; 6151 int32_t tt; 6152 uint32_t ret_val; 6153 6154 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6155 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6156 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6157 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6158 ret_val = (uint32_t)tt; 6159 return (ret_val); 6160 } 6161 6162 static uint32_t 6163 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6164 { 6165 /* 6166 * Start the FR timer, we do this based on getting the first one in 6167 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6168 * events we need to stop the running timer (if its running) before 6169 * starting the new one. 6170 */ 6171 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6172 uint32_t srtt_cur; 6173 int32_t idx; 6174 int32_t is_tlp_timer = 0; 6175 struct rack_sendmap *rsm; 6176 6177 if (rack->t_timers_stopped) { 6178 /* All timers have been stopped none are to run */ 6179 return (0); 6180 } 6181 if (rack->rc_in_persist) { 6182 /* We can't start any timer in persists */ 6183 return (rack_get_persists_timer_val(tp, rack)); 6184 } 6185 rack->rc_on_min_to = 0; 6186 if ((tp->t_state < TCPS_ESTABLISHED) || 6187 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6188 goto activate_rxt; 6189 } 6190 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6191 if ((rsm == NULL) || sup_rack) { 6192 /* Nothing on the send map or no rack */ 6193 activate_rxt: 6194 time_since_sent = 0; 6195 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6196 if (rsm) { 6197 /* 6198 * Should we discount the RTX timer any? 6199 * 6200 * We want to discount it the smallest amount. 6201 * If a timer (Rack/TLP or RXT) has gone off more 6202 * recently thats the discount we want to use (now - timer time). 6203 * If the retransmit of the oldest packet was more recent then 6204 * we want to use that (now - oldest-packet-last_transmit_time). 6205 * 6206 */ 6207 idx = rsm->r_rtr_cnt - 1; 6208 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6209 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6210 else 6211 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6212 if (TSTMP_GT(cts, tstmp_touse)) 6213 time_since_sent = cts - tstmp_touse; 6214 } 6215 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6216 sbavail(&tptosocket(tp)->so_snd)) { 6217 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6218 to = tp->t_rxtcur; 6219 if (to > time_since_sent) 6220 to -= time_since_sent; 6221 else 6222 to = rack->r_ctl.rc_min_to; 6223 if (to == 0) 6224 to = 1; 6225 /* Special case for KEEPINIT */ 6226 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6227 (TP_KEEPINIT(tp) != 0) && 6228 rsm) { 6229 /* 6230 * We have to put a ceiling on the rxt timer 6231 * of the keep-init timeout. 6232 */ 6233 uint32_t max_time, red; 6234 6235 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6236 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6237 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6238 if (red < max_time) 6239 max_time -= red; 6240 else 6241 max_time = 1; 6242 } 6243 /* Reduce timeout to the keep value if needed */ 6244 if (max_time < to) 6245 to = max_time; 6246 } 6247 return (to); 6248 } 6249 return (0); 6250 } 6251 if (rsm->r_flags & RACK_ACKED) { 6252 rsm = rack_find_lowest_rsm(rack); 6253 if (rsm == NULL) { 6254 /* No lowest? */ 6255 goto activate_rxt; 6256 } 6257 } 6258 /* Convert from ms to usecs */ 6259 if ((rsm->r_flags & RACK_SACK_PASSED) || 6260 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6261 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6262 if ((tp->t_flags & TF_SENTFIN) && 6263 ((tp->snd_max - tp->snd_una) == 1) && 6264 (rsm->r_flags & RACK_HAS_FIN)) { 6265 /* 6266 * We don't start a rack timer if all we have is a 6267 * FIN outstanding. 6268 */ 6269 goto activate_rxt; 6270 } 6271 if ((rack->use_rack_rr == 0) && 6272 (IN_FASTRECOVERY(tp->t_flags)) && 6273 (rack->rack_no_prr == 0) && 6274 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6275 /* 6276 * We are not cheating, in recovery and 6277 * not enough ack's to yet get our next 6278 * retransmission out. 6279 * 6280 * Note that classified attackers do not 6281 * get to use the rack-cheat. 6282 */ 6283 goto activate_tlp; 6284 } 6285 srtt = rack_grab_rtt(tp, rack); 6286 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6287 idx = rsm->r_rtr_cnt - 1; 6288 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6289 if (SEQ_GEQ(exp, cts)) { 6290 to = exp - cts; 6291 if (to < rack->r_ctl.rc_min_to) { 6292 to = rack->r_ctl.rc_min_to; 6293 if (rack->r_rr_config == 3) 6294 rack->rc_on_min_to = 1; 6295 } 6296 } else { 6297 to = rack->r_ctl.rc_min_to; 6298 if (rack->r_rr_config == 3) 6299 rack->rc_on_min_to = 1; 6300 } 6301 } else { 6302 /* Ok we need to do a TLP not RACK */ 6303 activate_tlp: 6304 if ((rack->rc_tlp_in_progress != 0) && 6305 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6306 /* 6307 * The previous send was a TLP and we have sent 6308 * N TLP's without sending new data. 6309 */ 6310 goto activate_rxt; 6311 } 6312 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6313 if (rsm == NULL) { 6314 /* We found no rsm to TLP with. */ 6315 goto activate_rxt; 6316 } 6317 if (rsm->r_flags & RACK_HAS_FIN) { 6318 /* If its a FIN we dont do TLP */ 6319 rsm = NULL; 6320 goto activate_rxt; 6321 } 6322 idx = rsm->r_rtr_cnt - 1; 6323 time_since_sent = 0; 6324 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6325 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6326 else 6327 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6328 if (TSTMP_GT(cts, tstmp_touse)) 6329 time_since_sent = cts - tstmp_touse; 6330 is_tlp_timer = 1; 6331 if (tp->t_srtt) { 6332 if ((rack->rc_srtt_measure_made == 0) && 6333 (tp->t_srtt == 1)) { 6334 /* 6335 * If another stack as run and set srtt to 1, 6336 * then the srtt was 0, so lets use the initial. 6337 */ 6338 srtt = RACK_INITIAL_RTO; 6339 } else { 6340 srtt_cur = tp->t_srtt; 6341 srtt = srtt_cur; 6342 } 6343 } else 6344 srtt = RACK_INITIAL_RTO; 6345 /* 6346 * If the SRTT is not keeping up and the 6347 * rack RTT has spiked we want to use 6348 * the last RTT not the smoothed one. 6349 */ 6350 if (rack_tlp_use_greater && 6351 tp->t_srtt && 6352 (srtt < rack_grab_rtt(tp, rack))) { 6353 srtt = rack_grab_rtt(tp, rack); 6354 } 6355 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6356 if (thresh > time_since_sent) { 6357 to = thresh - time_since_sent; 6358 } else { 6359 to = rack->r_ctl.rc_min_to; 6360 rack_log_alt_to_to_cancel(rack, 6361 thresh, /* flex1 */ 6362 time_since_sent, /* flex2 */ 6363 tstmp_touse, /* flex3 */ 6364 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6365 (uint32_t)rsm->r_tim_lastsent[idx], 6366 srtt, 6367 idx, 99); 6368 } 6369 if (to < rack_tlp_min) { 6370 to = rack_tlp_min; 6371 } 6372 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 6373 /* 6374 * If the TLP time works out to larger than the max 6375 * RTO lets not do TLP.. just RTO. 6376 */ 6377 goto activate_rxt; 6378 } 6379 } 6380 if (is_tlp_timer == 0) { 6381 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6382 } else { 6383 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6384 } 6385 if (to == 0) 6386 to = 1; 6387 return (to); 6388 } 6389 6390 static void 6391 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6392 { 6393 if (rack->rc_in_persist == 0) { 6394 if (tp->t_flags & TF_GPUTINPROG) { 6395 /* 6396 * Stop the goodput now, the calling of the 6397 * measurement function clears the flag. 6398 */ 6399 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6400 RACK_QUALITY_PERSIST); 6401 } 6402 #ifdef NETFLIX_SHARED_CWND 6403 if (rack->r_ctl.rc_scw) { 6404 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6405 rack->rack_scwnd_is_idle = 1; 6406 } 6407 #endif 6408 rack->r_ctl.rc_went_idle_time = cts; 6409 if (rack->r_ctl.rc_went_idle_time == 0) 6410 rack->r_ctl.rc_went_idle_time = 1; 6411 if (rack->lt_bw_up) { 6412 /* Suspend our LT BW measurement */ 6413 uint64_t tmark; 6414 6415 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6416 rack->r_ctl.lt_seq = snd_una; 6417 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 6418 if (tmark >= rack->r_ctl.lt_timemark) { 6419 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6420 } 6421 rack->r_ctl.lt_timemark = tmark; 6422 rack->lt_bw_up = 0; 6423 rack->r_persist_lt_bw_off = 1; 6424 } 6425 rack_timer_cancel(tp, rack, cts, __LINE__); 6426 rack->r_ctl.persist_lost_ends = 0; 6427 rack->probe_not_answered = 0; 6428 rack->forced_ack = 0; 6429 tp->t_rxtshift = 0; 6430 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6431 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6432 rack->rc_in_persist = 1; 6433 } 6434 } 6435 6436 static void 6437 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6438 { 6439 if (tcp_in_hpts(rack->rc_tp)) { 6440 tcp_hpts_remove(rack->rc_tp); 6441 rack->r_ctl.rc_hpts_flags = 0; 6442 } 6443 #ifdef NETFLIX_SHARED_CWND 6444 if (rack->r_ctl.rc_scw) { 6445 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6446 rack->rack_scwnd_is_idle = 0; 6447 } 6448 #endif 6449 if (rack->rc_gp_dyn_mul && 6450 (rack->use_fixed_rate == 0) && 6451 (rack->rc_always_pace)) { 6452 /* 6453 * Do we count this as if a probe-rtt just 6454 * finished? 6455 */ 6456 uint32_t time_idle, idle_min; 6457 6458 time_idle = cts - rack->r_ctl.rc_went_idle_time; 6459 idle_min = rack_min_probertt_hold; 6460 if (rack_probertt_gpsrtt_cnt_div) { 6461 uint64_t extra; 6462 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 6463 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 6464 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 6465 idle_min += (uint32_t)extra; 6466 } 6467 if (time_idle >= idle_min) { 6468 /* Yes, we count it as a probe-rtt. */ 6469 uint32_t us_cts; 6470 6471 us_cts = tcp_get_usecs(NULL); 6472 if (rack->in_probe_rtt == 0) { 6473 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 6474 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 6475 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 6476 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 6477 } else { 6478 rack_exit_probertt(rack, us_cts); 6479 } 6480 } 6481 } 6482 if (rack->r_persist_lt_bw_off) { 6483 /* Continue where we left off */ 6484 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 6485 rack->lt_bw_up = 1; 6486 rack->r_persist_lt_bw_off = 0; 6487 } 6488 rack->rc_in_persist = 0; 6489 rack->r_ctl.rc_went_idle_time = 0; 6490 tp->t_rxtshift = 0; 6491 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6492 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6493 rack->r_ctl.rc_agg_delayed = 0; 6494 rack->r_early = 0; 6495 rack->r_late = 0; 6496 rack->r_ctl.rc_agg_early = 0; 6497 } 6498 6499 static void 6500 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 6501 struct hpts_diag *diag, struct timeval *tv) 6502 { 6503 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6504 union tcp_log_stackspecific log; 6505 6506 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6507 log.u_bbr.flex1 = diag->p_nxt_slot; 6508 log.u_bbr.flex2 = diag->p_cur_slot; 6509 log.u_bbr.flex3 = diag->slot_req; 6510 log.u_bbr.flex4 = diag->inp_hptsslot; 6511 log.u_bbr.flex5 = diag->slot_remaining; 6512 log.u_bbr.flex6 = diag->need_new_to; 6513 log.u_bbr.flex7 = diag->p_hpts_active; 6514 log.u_bbr.flex8 = diag->p_on_min_sleep; 6515 /* Hijack other fields as needed */ 6516 log.u_bbr.epoch = diag->have_slept; 6517 log.u_bbr.lt_epoch = diag->yet_to_sleep; 6518 log.u_bbr.pkts_out = diag->co_ret; 6519 log.u_bbr.applimited = diag->hpts_sleep_time; 6520 log.u_bbr.delivered = diag->p_prev_slot; 6521 log.u_bbr.inflight = diag->p_runningslot; 6522 log.u_bbr.bw_inuse = diag->wheel_slot; 6523 log.u_bbr.rttProp = diag->wheel_cts; 6524 log.u_bbr.timeStamp = cts; 6525 log.u_bbr.delRate = diag->maxslots; 6526 log.u_bbr.cur_del_rate = diag->p_curtick; 6527 log.u_bbr.cur_del_rate <<= 32; 6528 log.u_bbr.cur_del_rate |= diag->p_lasttick; 6529 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6530 &rack->rc_inp->inp_socket->so_rcv, 6531 &rack->rc_inp->inp_socket->so_snd, 6532 BBR_LOG_HPTSDIAG, 0, 6533 0, &log, false, tv); 6534 } 6535 6536 } 6537 6538 static void 6539 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 6540 { 6541 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6542 union tcp_log_stackspecific log; 6543 struct timeval tv; 6544 6545 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6546 log.u_bbr.flex1 = sb->sb_flags; 6547 log.u_bbr.flex2 = len; 6548 log.u_bbr.flex3 = sb->sb_state; 6549 log.u_bbr.flex8 = type; 6550 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6551 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6552 &rack->rc_inp->inp_socket->so_rcv, 6553 &rack->rc_inp->inp_socket->so_snd, 6554 TCP_LOG_SB_WAKE, 0, 6555 len, &log, false, &tv); 6556 } 6557 } 6558 6559 static void 6560 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 6561 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 6562 { 6563 struct hpts_diag diag; 6564 struct inpcb *inp = tptoinpcb(tp); 6565 struct timeval tv; 6566 uint32_t delayed_ack = 0; 6567 uint32_t hpts_timeout; 6568 uint32_t entry_slot = slot; 6569 uint8_t stopped; 6570 uint32_t left = 0; 6571 uint32_t us_cts; 6572 6573 if ((tp->t_state == TCPS_CLOSED) || 6574 (tp->t_state == TCPS_LISTEN)) { 6575 return; 6576 } 6577 if (tcp_in_hpts(tp)) { 6578 /* Already on the pacer */ 6579 return; 6580 } 6581 stopped = rack->rc_tmr_stopped; 6582 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 6583 left = rack->r_ctl.rc_timer_exp - cts; 6584 } 6585 rack->r_ctl.rc_timer_exp = 0; 6586 rack->r_ctl.rc_hpts_flags = 0; 6587 us_cts = tcp_get_usecs(&tv); 6588 /* Now early/late accounting */ 6589 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 6590 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 6591 /* 6592 * We have a early carry over set, 6593 * we can always add more time so we 6594 * can always make this compensation. 6595 * 6596 * Note if ack's are allowed to wake us do not 6597 * penalize the next timer for being awoke 6598 * by an ack aka the rc_agg_early (non-paced mode). 6599 */ 6600 slot += rack->r_ctl.rc_agg_early; 6601 rack->r_early = 0; 6602 rack->r_ctl.rc_agg_early = 0; 6603 } 6604 if ((rack->r_late) && 6605 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 6606 /* 6607 * This is harder, we can 6608 * compensate some but it 6609 * really depends on what 6610 * the current pacing time is. 6611 */ 6612 if (rack->r_ctl.rc_agg_delayed >= slot) { 6613 /* 6614 * We can't compensate for it all. 6615 * And we have to have some time 6616 * on the clock. We always have a min 6617 * 10 slots (10 x 10 i.e. 100 usecs). 6618 */ 6619 if (slot <= HPTS_TICKS_PER_SLOT) { 6620 /* We gain delay */ 6621 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 6622 slot = HPTS_TICKS_PER_SLOT; 6623 } else { 6624 /* We take off some */ 6625 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 6626 slot = HPTS_TICKS_PER_SLOT; 6627 } 6628 } else { 6629 slot -= rack->r_ctl.rc_agg_delayed; 6630 rack->r_ctl.rc_agg_delayed = 0; 6631 /* Make sure we have 100 useconds at minimum */ 6632 if (slot < HPTS_TICKS_PER_SLOT) { 6633 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 6634 slot = HPTS_TICKS_PER_SLOT; 6635 } 6636 if (rack->r_ctl.rc_agg_delayed == 0) 6637 rack->r_late = 0; 6638 } 6639 } else if (rack->r_late) { 6640 /* r_use_hpts_min is on and so is DGP */ 6641 uint32_t max_red; 6642 6643 max_red = (slot * rack->r_ctl.max_reduction) / 100; 6644 if (max_red >= rack->r_ctl.rc_agg_delayed) { 6645 slot -= rack->r_ctl.rc_agg_delayed; 6646 rack->r_ctl.rc_agg_delayed = 0; 6647 } else { 6648 slot -= max_red; 6649 rack->r_ctl.rc_agg_delayed -= max_red; 6650 } 6651 } 6652 if ((rack->r_use_hpts_min == 1) && 6653 (slot > 0) && 6654 (rack->dgp_on == 1)) { 6655 /* 6656 * We are enforcing a min pacing timer 6657 * based on our hpts min timeout. 6658 */ 6659 uint32_t min; 6660 6661 min = get_hpts_min_sleep_time(); 6662 if (min > slot) { 6663 slot = min; 6664 } 6665 } 6666 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 6667 if (tp->t_flags & TF_DELACK) { 6668 delayed_ack = TICKS_2_USEC(tcp_delacktime); 6669 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 6670 } 6671 if (delayed_ack && ((hpts_timeout == 0) || 6672 (delayed_ack < hpts_timeout))) 6673 hpts_timeout = delayed_ack; 6674 else 6675 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6676 /* 6677 * If no timers are going to run and we will fall off the hptsi 6678 * wheel, we resort to a keep-alive timer if its configured. 6679 */ 6680 if ((hpts_timeout == 0) && 6681 (slot == 0)) { 6682 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6683 (tp->t_state <= TCPS_CLOSING)) { 6684 /* 6685 * Ok we have no timer (persists, rack, tlp, rxt or 6686 * del-ack), we don't have segments being paced. So 6687 * all that is left is the keepalive timer. 6688 */ 6689 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6690 /* Get the established keep-alive time */ 6691 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 6692 } else { 6693 /* 6694 * Get the initial setup keep-alive time, 6695 * note that this is probably not going to 6696 * happen, since rack will be running a rxt timer 6697 * if a SYN of some sort is outstanding. It is 6698 * actually handled in rack_timeout_rxt(). 6699 */ 6700 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 6701 } 6702 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 6703 if (rack->in_probe_rtt) { 6704 /* 6705 * We want to instead not wake up a long time from 6706 * now but to wake up about the time we would 6707 * exit probe-rtt and initiate a keep-alive ack. 6708 * This will get us out of probe-rtt and update 6709 * our min-rtt. 6710 */ 6711 hpts_timeout = rack_min_probertt_hold; 6712 } 6713 } 6714 } 6715 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 6716 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 6717 /* 6718 * RACK, TLP, persists and RXT timers all are restartable 6719 * based on actions input .. i.e we received a packet (ack 6720 * or sack) and that changes things (rw, or snd_una etc). 6721 * Thus we can restart them with a new value. For 6722 * keep-alive, delayed_ack we keep track of what was left 6723 * and restart the timer with a smaller value. 6724 */ 6725 if (left < hpts_timeout) 6726 hpts_timeout = left; 6727 } 6728 if (hpts_timeout) { 6729 /* 6730 * Hack alert for now we can't time-out over 2,147,483 6731 * seconds (a bit more than 596 hours), which is probably ok 6732 * :). 6733 */ 6734 if (hpts_timeout > 0x7ffffffe) 6735 hpts_timeout = 0x7ffffffe; 6736 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 6737 } 6738 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 6739 if ((rack->gp_ready == 0) && 6740 (rack->use_fixed_rate == 0) && 6741 (hpts_timeout < slot) && 6742 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 6743 /* 6744 * We have no good estimate yet for the 6745 * old clunky burst mitigation or the 6746 * real pacing. And the tlp or rxt is smaller 6747 * than the pacing calculation. Lets not 6748 * pace that long since we know the calculation 6749 * so far is not accurate. 6750 */ 6751 slot = hpts_timeout; 6752 } 6753 /** 6754 * Turn off all the flags for queuing by default. The 6755 * flags have important meanings to what happens when 6756 * LRO interacts with the transport. Most likely (by default now) 6757 * mbuf_queueing and ack compression are on. So the transport 6758 * has a couple of flags that control what happens (if those 6759 * are not on then these flags won't have any effect since it 6760 * won't go through the queuing LRO path). 6761 * 6762 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 6763 * pacing output, so don't disturb. But 6764 * it also means LRO can wake me if there 6765 * is a SACK arrival. 6766 * 6767 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 6768 * with the above flag (QUEUE_READY) and 6769 * when present it says don't even wake me 6770 * if a SACK arrives. 6771 * 6772 * The idea behind these flags is that if we are pacing we 6773 * set the MBUF_QUEUE_READY and only get woken up if 6774 * a SACK arrives (which could change things) or if 6775 * our pacing timer expires. If, however, we have a rack 6776 * timer running, then we don't even want a sack to wake 6777 * us since the rack timer has to expire before we can send. 6778 * 6779 * Other cases should usually have none of the flags set 6780 * so LRO can call into us. 6781 */ 6782 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 6783 if (slot) { 6784 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 6785 rack->r_ctl.rc_last_output_to = us_cts + slot; 6786 /* 6787 * A pacing timer (slot) is being set, in 6788 * such a case we cannot send (we are blocked by 6789 * the timer). So lets tell LRO that it should not 6790 * wake us unless there is a SACK. Note this only 6791 * will be effective if mbuf queueing is on or 6792 * compressed acks are being processed. 6793 */ 6794 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 6795 /* 6796 * But wait if we have a Rack timer running 6797 * even a SACK should not disturb us (with 6798 * the exception of r_rr_config 3). 6799 */ 6800 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 6801 (IN_RECOVERY(tp->t_flags))) { 6802 if (rack->r_rr_config != 3) 6803 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6804 else if (rack->rc_pace_dnd) { 6805 /* 6806 * When DND is on, we only let a sack 6807 * interrupt us if we are not in recovery. 6808 * 6809 * If DND is off, then we never hit here 6810 * and let all sacks wake us up. 6811 * 6812 */ 6813 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6814 } 6815 } 6816 if (rack->rc_ack_can_sendout_data) { 6817 /* 6818 * Ahh but wait, this is that special case 6819 * where the pacing timer can be disturbed 6820 * backout the changes (used for non-paced 6821 * burst limiting). 6822 */ 6823 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 6824 TF2_MBUF_QUEUE_READY); 6825 } 6826 if ((rack->use_rack_rr) && 6827 (rack->r_rr_config < 2) && 6828 ((hpts_timeout) && (hpts_timeout < slot))) { 6829 /* 6830 * Arrange for the hpts to kick back in after the 6831 * t-o if the t-o does not cause a send. 6832 */ 6833 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6834 __LINE__, &diag); 6835 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6836 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6837 } else { 6838 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 6839 __LINE__, &diag); 6840 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6841 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6842 } 6843 } else if (hpts_timeout) { 6844 /* 6845 * With respect to t_flags2(?) here, lets let any new acks wake 6846 * us up here. Since we are not pacing (no pacing timer), output 6847 * can happen so we should let it. If its a Rack timer, then any inbound 6848 * packet probably won't change the sending (we will be blocked) 6849 * but it may change the prr stats so letting it in (the set defaults 6850 * at the start of this block) are good enough. 6851 */ 6852 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6853 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6854 __LINE__, &diag); 6855 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6856 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6857 } else { 6858 /* No timer starting */ 6859 #ifdef INVARIANTS 6860 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6861 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6862 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6863 } 6864 #endif 6865 } 6866 rack->rc_tmr_stopped = 0; 6867 if (slot) 6868 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 6869 } 6870 6871 static void 6872 rack_mark_lost(struct tcpcb *tp, 6873 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 6874 { 6875 struct rack_sendmap *nrsm; 6876 uint32_t thresh, exp; 6877 6878 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 6879 nrsm = rsm; 6880 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 6881 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 6882 /* Got up to all that were marked sack-passed */ 6883 break; 6884 } 6885 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 6886 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 6887 if (TSTMP_LT(exp, cts) || (exp == cts)) { 6888 /* We now consider it lost */ 6889 nrsm->r_flags |= RACK_WAS_LOST; 6890 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 6891 } else { 6892 /* Past here it won't be lost so stop */ 6893 break; 6894 } 6895 } 6896 } 6897 } 6898 6899 /* 6900 * RACK Timer, here we simply do logging and house keeping. 6901 * the normal rack_output() function will call the 6902 * appropriate thing to check if we need to do a RACK retransmit. 6903 * We return 1, saying don't proceed with rack_output only 6904 * when all timers have been stopped (destroyed PCB?). 6905 */ 6906 static int 6907 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6908 { 6909 /* 6910 * This timer simply provides an internal trigger to send out data. 6911 * The check_recovery_mode call will see if there are needed 6912 * retransmissions, if so we will enter fast-recovery. The output 6913 * call may or may not do the same thing depending on sysctl 6914 * settings. 6915 */ 6916 struct rack_sendmap *rsm; 6917 6918 counter_u64_add(rack_to_tot, 1); 6919 if (rack->r_state && (rack->r_state != tp->t_state)) 6920 rack_set_state(tp, rack); 6921 rack->rc_on_min_to = 0; 6922 rsm = rack_check_recovery_mode(tp, cts); 6923 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6924 if (rsm) { 6925 /* We need to stroke any lost that are now declared as lost */ 6926 rack_mark_lost(tp, rack, rsm, cts); 6927 rack->r_ctl.rc_resend = rsm; 6928 rack->r_timer_override = 1; 6929 if (rack->use_rack_rr) { 6930 /* 6931 * Don't accumulate extra pacing delay 6932 * we are allowing the rack timer to 6933 * over-ride pacing i.e. rrr takes precedence 6934 * if the pacing interval is longer than the rrr 6935 * time (in other words we get the min pacing 6936 * time versus rrr pacing time). 6937 */ 6938 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6939 } 6940 } 6941 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6942 if (rsm == NULL) { 6943 /* restart a timer and return 1 */ 6944 rack_start_hpts_timer(rack, tp, cts, 6945 0, 0, 0); 6946 return (1); 6947 } 6948 return (0); 6949 } 6950 6951 6952 6953 static void 6954 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6955 { 6956 6957 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 6958 /* 6959 * The trailing space changed, mbufs can grow 6960 * at the tail but they can't shrink from 6961 * it, KASSERT that. Adjust the orig_m_len to 6962 * compensate for this change. 6963 */ 6964 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 6965 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 6966 rsm->m, 6967 rsm, 6968 (intmax_t)M_TRAILINGROOM(rsm->m), 6969 rsm->orig_t_space, 6970 rsm->orig_m_len, 6971 rsm->m->m_len)); 6972 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 6973 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 6974 } 6975 if (rsm->m->m_len < rsm->orig_m_len) { 6976 /* 6977 * Mbuf shrank, trimmed off the top by an ack, our 6978 * offset changes. 6979 */ 6980 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 6981 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 6982 rsm->m, rsm->m->m_len, 6983 rsm, rsm->orig_m_len, 6984 rsm->soff)); 6985 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 6986 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6987 else 6988 rsm->soff = 0; 6989 rsm->orig_m_len = rsm->m->m_len; 6990 #ifdef INVARIANTS 6991 } else if (rsm->m->m_len > rsm->orig_m_len) { 6992 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 6993 rsm, rsm->m); 6994 #endif 6995 } 6996 } 6997 6998 static void 6999 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 7000 { 7001 struct mbuf *m; 7002 uint32_t soff; 7003 7004 if (src_rsm->m && 7005 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 7006 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 7007 /* Fix up the orig_m_len and possibly the mbuf offset */ 7008 rack_adjust_orig_mlen(src_rsm); 7009 } 7010 m = src_rsm->m; 7011 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 7012 while (soff >= m->m_len) { 7013 /* Move out past this mbuf */ 7014 soff -= m->m_len; 7015 m = m->m_next; 7016 KASSERT((m != NULL), 7017 ("rsm:%p nrsm:%p hit at soff:%u null m", 7018 src_rsm, rsm, soff)); 7019 if (m == NULL) { 7020 /* This should *not* happen which is why there is a kassert */ 7021 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7022 (src_rsm->r_start - rack->rc_tp->snd_una), 7023 &src_rsm->soff); 7024 src_rsm->orig_m_len = src_rsm->m->m_len; 7025 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7026 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7027 (rsm->r_start - rack->rc_tp->snd_una), 7028 &rsm->soff); 7029 rsm->orig_m_len = rsm->m->m_len; 7030 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7031 return; 7032 } 7033 } 7034 rsm->m = m; 7035 rsm->soff = soff; 7036 rsm->orig_m_len = m->m_len; 7037 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7038 } 7039 7040 static __inline void 7041 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7042 struct rack_sendmap *rsm, uint32_t start) 7043 { 7044 int idx; 7045 7046 nrsm->r_start = start; 7047 nrsm->r_end = rsm->r_end; 7048 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7049 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7050 nrsm->r_flags = rsm->r_flags; 7051 nrsm->r_dupack = rsm->r_dupack; 7052 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7053 nrsm->r_rtr_bytes = 0; 7054 nrsm->r_fas = rsm->r_fas; 7055 nrsm->r_bas = rsm->r_bas; 7056 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7057 nrsm->r_just_ret = rsm->r_just_ret; 7058 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7059 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7060 } 7061 /* Now if we have SYN flag we keep it on the left edge */ 7062 if (nrsm->r_flags & RACK_HAS_SYN) 7063 nrsm->r_flags &= ~RACK_HAS_SYN; 7064 /* Now if we have a FIN flag we keep it on the right edge */ 7065 if (rsm->r_flags & RACK_HAS_FIN) 7066 rsm->r_flags &= ~RACK_HAS_FIN; 7067 /* Push bit must go to the right edge as well */ 7068 if (rsm->r_flags & RACK_HAD_PUSH) 7069 rsm->r_flags &= ~RACK_HAD_PUSH; 7070 /* Clone over the state of the hw_tls flag */ 7071 nrsm->r_hw_tls = rsm->r_hw_tls; 7072 /* 7073 * Now we need to find nrsm's new location in the mbuf chain 7074 * we basically calculate a new offset, which is soff + 7075 * how much is left in original rsm. Then we walk out the mbuf 7076 * chain to find the righ position, it may be the same mbuf 7077 * or maybe not. 7078 */ 7079 KASSERT(((rsm->m != NULL) || 7080 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7081 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7082 if (rsm->m) 7083 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7084 } 7085 7086 static struct rack_sendmap * 7087 rack_merge_rsm(struct tcp_rack *rack, 7088 struct rack_sendmap *l_rsm, 7089 struct rack_sendmap *r_rsm) 7090 { 7091 /* 7092 * We are merging two ack'd RSM's, 7093 * the l_rsm is on the left (lower seq 7094 * values) and the r_rsm is on the right 7095 * (higher seq value). The simplest way 7096 * to merge these is to move the right 7097 * one into the left. I don't think there 7098 * is any reason we need to try to find 7099 * the oldest (or last oldest retransmitted). 7100 */ 7101 rack_log_map_chg(rack->rc_tp, rack, NULL, 7102 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7103 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7104 if (l_rsm->r_dupack < r_rsm->r_dupack) 7105 l_rsm->r_dupack = r_rsm->r_dupack; 7106 if (r_rsm->r_rtr_bytes) 7107 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7108 if (r_rsm->r_in_tmap) { 7109 /* This really should not happen */ 7110 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7111 r_rsm->r_in_tmap = 0; 7112 } 7113 7114 /* Now the flags */ 7115 if (r_rsm->r_flags & RACK_HAS_FIN) 7116 l_rsm->r_flags |= RACK_HAS_FIN; 7117 if (r_rsm->r_flags & RACK_TLP) 7118 l_rsm->r_flags |= RACK_TLP; 7119 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7120 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7121 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7122 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7123 /* 7124 * If both are app-limited then let the 7125 * free lower the count. If right is app 7126 * limited and left is not, transfer. 7127 */ 7128 l_rsm->r_flags |= RACK_APP_LIMITED; 7129 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7130 if (r_rsm == rack->r_ctl.rc_first_appl) 7131 rack->r_ctl.rc_first_appl = l_rsm; 7132 } 7133 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7134 /* 7135 * We keep the largest value, which is the newest 7136 * send. We do this in case a segment that is 7137 * joined together and not part of a GP estimate 7138 * later gets expanded into the GP estimate. 7139 * 7140 * We prohibit the merging of unlike kinds i.e. 7141 * all pieces that are in the GP estimate can be 7142 * merged and all pieces that are not in a GP estimate 7143 * can be merged, but not disimilar pieces. Combine 7144 * this with taking the highest here and we should 7145 * be ok unless of course the client reneges. Then 7146 * all bets are off. 7147 */ 7148 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7149 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7150 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7151 } 7152 /* 7153 * When merging two RSM's we also need to consider the ack time and keep 7154 * newest. If the ack gets merged into a measurement then that is the 7155 * one we will want to be using. 7156 */ 7157 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7158 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7159 7160 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7161 /* Transfer the split limit to the map we free */ 7162 r_rsm->r_limit_type = l_rsm->r_limit_type; 7163 l_rsm->r_limit_type = 0; 7164 } 7165 rack_free(rack, r_rsm); 7166 l_rsm->r_flags |= RACK_MERGED; 7167 return (l_rsm); 7168 } 7169 7170 /* 7171 * TLP Timer, here we simply setup what segment we want to 7172 * have the TLP expire on, the normal rack_output() will then 7173 * send it out. 7174 * 7175 * We return 1, saying don't proceed with rack_output only 7176 * when all timers have been stopped (destroyed PCB?). 7177 */ 7178 static int 7179 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7180 { 7181 /* 7182 * Tail Loss Probe. 7183 */ 7184 struct rack_sendmap *rsm = NULL; 7185 int insret __diagused; 7186 struct socket *so = tptosocket(tp); 7187 uint32_t amm; 7188 uint32_t out, avail; 7189 int collapsed_win = 0; 7190 7191 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7192 /* Its not time yet */ 7193 return (0); 7194 } 7195 if (ctf_progress_timeout_check(tp, true)) { 7196 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7197 return (-ETIMEDOUT); /* tcp_drop() */ 7198 } 7199 /* 7200 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7201 * need to figure out how to force a full MSS segment out. 7202 */ 7203 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7204 rack->r_ctl.retran_during_recovery = 0; 7205 rack->r_might_revert = 0; 7206 rack->r_ctl.dsack_byte_cnt = 0; 7207 counter_u64_add(rack_tlp_tot, 1); 7208 if (rack->r_state && (rack->r_state != tp->t_state)) 7209 rack_set_state(tp, rack); 7210 avail = sbavail(&so->so_snd); 7211 out = tp->snd_max - tp->snd_una; 7212 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7213 /* special case, we need a retransmission */ 7214 collapsed_win = 1; 7215 goto need_retran; 7216 } 7217 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7218 rack->r_ctl.dsack_persist--; 7219 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7220 rack->r_ctl.num_dsack = 0; 7221 } 7222 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7223 } 7224 if ((tp->t_flags & TF_GPUTINPROG) && 7225 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7226 /* 7227 * If this is the second in a row 7228 * TLP and we are doing a measurement 7229 * its time to abandon the measurement. 7230 * Something is likely broken on 7231 * the clients network and measuring a 7232 * broken network does us no good. 7233 */ 7234 tp->t_flags &= ~TF_GPUTINPROG; 7235 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7236 rack->r_ctl.rc_gp_srtt /*flex1*/, 7237 tp->gput_seq, 7238 0, 0, 18, __LINE__, NULL, 0); 7239 } 7240 /* 7241 * Check our send oldest always settings, and if 7242 * there is an oldest to send jump to the need_retran. 7243 */ 7244 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7245 goto need_retran; 7246 7247 if (avail > out) { 7248 /* New data is available */ 7249 amm = avail - out; 7250 if (amm > ctf_fixed_maxseg(tp)) { 7251 amm = ctf_fixed_maxseg(tp); 7252 if ((amm + out) > tp->snd_wnd) { 7253 /* We are rwnd limited */ 7254 goto need_retran; 7255 } 7256 } else if (amm < ctf_fixed_maxseg(tp)) { 7257 /* not enough to fill a MTU */ 7258 goto need_retran; 7259 } 7260 if (IN_FASTRECOVERY(tp->t_flags)) { 7261 /* Unlikely */ 7262 if (rack->rack_no_prr == 0) { 7263 if (out + amm <= tp->snd_wnd) { 7264 rack->r_ctl.rc_prr_sndcnt = amm; 7265 rack->r_ctl.rc_tlp_new_data = amm; 7266 rack_log_to_prr(rack, 4, 0, __LINE__); 7267 } 7268 } else 7269 goto need_retran; 7270 } else { 7271 /* Set the send-new override */ 7272 if (out + amm <= tp->snd_wnd) 7273 rack->r_ctl.rc_tlp_new_data = amm; 7274 else 7275 goto need_retran; 7276 } 7277 rack->r_ctl.rc_tlpsend = NULL; 7278 counter_u64_add(rack_tlp_newdata, 1); 7279 goto send; 7280 } 7281 need_retran: 7282 /* 7283 * Ok we need to arrange the last un-acked segment to be re-sent, or 7284 * optionally the first un-acked segment. 7285 */ 7286 if (collapsed_win == 0) { 7287 if (rack_always_send_oldest) 7288 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7289 else { 7290 rsm = tqhash_max(rack->r_ctl.tqh); 7291 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7292 rsm = rack_find_high_nonack(rack, rsm); 7293 } 7294 } 7295 if (rsm == NULL) { 7296 #ifdef TCP_BLACKBOX 7297 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7298 #endif 7299 goto out; 7300 } 7301 } else { 7302 /* 7303 * We had a collapsed window, lets find 7304 * the point before the collapse. 7305 */ 7306 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7307 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7308 else { 7309 rsm = tqhash_min(rack->r_ctl.tqh); 7310 } 7311 if (rsm == NULL) { 7312 /* Huh */ 7313 goto out; 7314 } 7315 } 7316 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7317 /* 7318 * We need to split this the last segment in two. 7319 */ 7320 struct rack_sendmap *nrsm; 7321 7322 nrsm = rack_alloc_full_limit(rack); 7323 if (nrsm == NULL) { 7324 /* 7325 * No memory to split, we will just exit and punt 7326 * off to the RXT timer. 7327 */ 7328 goto out; 7329 } 7330 rack_clone_rsm(rack, nrsm, rsm, 7331 (rsm->r_end - ctf_fixed_maxseg(tp))); 7332 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7333 #ifndef INVARIANTS 7334 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7335 #else 7336 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7337 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7338 nrsm, insret, rack, rsm); 7339 } 7340 #endif 7341 if (rsm->r_in_tmap) { 7342 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7343 nrsm->r_in_tmap = 1; 7344 } 7345 rsm = nrsm; 7346 } 7347 rack->r_ctl.rc_tlpsend = rsm; 7348 send: 7349 /* Make sure output path knows we are doing a TLP */ 7350 *doing_tlp = 1; 7351 rack->r_timer_override = 1; 7352 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7353 return (0); 7354 out: 7355 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7356 return (0); 7357 } 7358 7359 /* 7360 * Delayed ack Timer, here we simply need to setup the 7361 * ACK_NOW flag and remove the DELACK flag. From there 7362 * the output routine will send the ack out. 7363 * 7364 * We only return 1, saying don't proceed, if all timers 7365 * are stopped (destroyed PCB?). 7366 */ 7367 static int 7368 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7369 { 7370 7371 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7372 tp->t_flags &= ~TF_DELACK; 7373 tp->t_flags |= TF_ACKNOW; 7374 KMOD_TCPSTAT_INC(tcps_delack); 7375 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7376 return (0); 7377 } 7378 7379 static inline int 7380 rack_send_ack_challange(struct tcp_rack *rack) 7381 { 7382 struct tcptemp *t_template; 7383 7384 t_template = tcpip_maketemplate(rack->rc_inp); 7385 if (t_template) { 7386 if (rack->forced_ack == 0) { 7387 rack->forced_ack = 1; 7388 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7389 } else { 7390 rack->probe_not_answered = 1; 7391 } 7392 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 7393 &t_template->tt_t, (struct mbuf *)NULL, 7394 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 7395 free(t_template, M_TEMP); 7396 /* This does send an ack so kill any D-ack timer */ 7397 if (rack->rc_tp->t_flags & TF_DELACK) 7398 rack->rc_tp->t_flags &= ~TF_DELACK; 7399 return(1); 7400 } else 7401 return (0); 7402 7403 } 7404 7405 /* 7406 * Persists timer, here we simply send the 7407 * same thing as a keepalive will. 7408 * the one byte send. 7409 * 7410 * We only return 1, saying don't proceed, if all timers 7411 * are stopped (destroyed PCB?). 7412 */ 7413 static int 7414 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7415 { 7416 int32_t retval = 1; 7417 7418 if (rack->rc_in_persist == 0) 7419 return (0); 7420 if (ctf_progress_timeout_check(tp, false)) { 7421 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7422 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7423 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7424 return (-ETIMEDOUT); /* tcp_drop() */ 7425 } 7426 /* 7427 * Persistence timer into zero window. Force a byte to be output, if 7428 * possible. 7429 */ 7430 KMOD_TCPSTAT_INC(tcps_persisttimeo); 7431 /* 7432 * Hack: if the peer is dead/unreachable, we do not time out if the 7433 * window is closed. After a full backoff, drop the connection if 7434 * the idle time (no responses to probes) reaches the maximum 7435 * backoff that we would use if retransmitting. 7436 */ 7437 if (tp->t_rxtshift >= V_tcp_retries && 7438 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 7439 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 7440 KMOD_TCPSTAT_INC(tcps_persistdrop); 7441 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7442 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7443 retval = -ETIMEDOUT; /* tcp_drop() */ 7444 goto out; 7445 } 7446 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 7447 tp->snd_una == tp->snd_max) 7448 rack_exit_persist(tp, rack, cts); 7449 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 7450 /* 7451 * If the user has closed the socket then drop a persisting 7452 * connection after a much reduced timeout. 7453 */ 7454 if (tp->t_state > TCPS_CLOSE_WAIT && 7455 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 7456 KMOD_TCPSTAT_INC(tcps_persistdrop); 7457 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7458 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7459 retval = -ETIMEDOUT; /* tcp_drop() */ 7460 goto out; 7461 } 7462 if (rack_send_ack_challange(rack)) { 7463 /* only set it if we were answered */ 7464 if (rack->probe_not_answered) { 7465 counter_u64_add(rack_persists_loss, 1); 7466 rack->r_ctl.persist_lost_ends++; 7467 } 7468 counter_u64_add(rack_persists_sends, 1); 7469 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 7470 } 7471 if (tp->t_rxtshift < V_tcp_retries) 7472 tp->t_rxtshift++; 7473 out: 7474 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 7475 rack_start_hpts_timer(rack, tp, cts, 7476 0, 0, 0); 7477 return (retval); 7478 } 7479 7480 /* 7481 * If a keepalive goes off, we had no other timers 7482 * happening. We always return 1 here since this 7483 * routine either drops the connection or sends 7484 * out a segment with respond. 7485 */ 7486 static int 7487 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7488 { 7489 struct inpcb *inp = tptoinpcb(tp); 7490 7491 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 7492 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 7493 /* 7494 * Keep-alive timer went off; send something or drop connection if 7495 * idle for too long. 7496 */ 7497 KMOD_TCPSTAT_INC(tcps_keeptimeo); 7498 if (tp->t_state < TCPS_ESTABLISHED) 7499 goto dropit; 7500 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7501 tp->t_state <= TCPS_CLOSING) { 7502 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 7503 goto dropit; 7504 /* 7505 * Send a packet designed to force a response if the peer is 7506 * up and reachable: either an ACK if the connection is 7507 * still alive, or an RST if the peer has closed the 7508 * connection due to timeout or reboot. Using sequence 7509 * number tp->snd_una-1 causes the transmitted zero-length 7510 * segment to lie outside the receive window; by the 7511 * protocol spec, this requires the correspondent TCP to 7512 * respond. 7513 */ 7514 KMOD_TCPSTAT_INC(tcps_keepprobe); 7515 rack_send_ack_challange(rack); 7516 } 7517 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7518 return (1); 7519 dropit: 7520 KMOD_TCPSTAT_INC(tcps_keepdrops); 7521 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7522 return (-ETIMEDOUT); /* tcp_drop() */ 7523 } 7524 7525 /* 7526 * Retransmit helper function, clear up all the ack 7527 * flags and take care of important book keeping. 7528 */ 7529 static void 7530 rack_remxt_tmr(struct tcpcb *tp) 7531 { 7532 /* 7533 * The retransmit timer went off, all sack'd blocks must be 7534 * un-acked. 7535 */ 7536 struct rack_sendmap *rsm, *trsm = NULL; 7537 struct tcp_rack *rack; 7538 7539 rack = (struct tcp_rack *)tp->t_fb_ptr; 7540 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 7541 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 7542 rack->r_timer_override = 1; 7543 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 7544 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 7545 rack->r_late = 0; 7546 rack->r_early = 0; 7547 rack->r_ctl.rc_agg_delayed = 0; 7548 rack->r_ctl.rc_agg_early = 0; 7549 if (rack->r_state && (rack->r_state != tp->t_state)) 7550 rack_set_state(tp, rack); 7551 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 7552 /* 7553 * We do not clear the scoreboard until we have had 7554 * more than rack_rxt_scoreboard_clear_thresh time-outs. 7555 */ 7556 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7557 if (rack->r_ctl.rc_resend != NULL) 7558 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7559 7560 return; 7561 } 7562 /* 7563 * Ideally we would like to be able to 7564 * mark SACK-PASS on anything not acked here. 7565 * 7566 * However, if we do that we would burst out 7567 * all that data 1ms apart. This would be unwise, 7568 * so for now we will just let the normal rxt timer 7569 * and tlp timer take care of it. 7570 * 7571 * Also we really need to stick them back in sequence 7572 * order. This way we send in the proper order and any 7573 * sacks that come floating in will "re-ack" the data. 7574 * To do this we zap the tmap with an INIT and then 7575 * walk through and place every rsm in the tail queue 7576 * hash table back in its seq ordered place. 7577 */ 7578 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7579 7580 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 7581 rsm->r_dupack = 0; 7582 if (rack_verbose_logging) 7583 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7584 /* We must re-add it back to the tlist */ 7585 if (trsm == NULL) { 7586 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7587 } else { 7588 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 7589 } 7590 rsm->r_in_tmap = 1; 7591 trsm = rsm; 7592 if (rsm->r_flags & RACK_ACKED) 7593 rsm->r_flags |= RACK_WAS_ACKED; 7594 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 7595 rsm->r_flags |= RACK_MUST_RXT; 7596 } 7597 /* zero the lost since it's all gone */ 7598 rack->r_ctl.rc_considered_lost = 0; 7599 /* Clear the count (we just un-acked them) */ 7600 rack->r_ctl.rc_sacked = 0; 7601 rack->r_ctl.rc_sacklast = NULL; 7602 /* Clear the tlp rtx mark */ 7603 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7604 if (rack->r_ctl.rc_resend != NULL) 7605 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7606 rack->r_ctl.rc_prr_sndcnt = 0; 7607 rack_log_to_prr(rack, 6, 0, __LINE__); 7608 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7609 if (rack->r_ctl.rc_resend != NULL) 7610 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7611 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && 7612 ((tp->t_flags & TF_SENTFIN) == 0)) { 7613 /* 7614 * For non-sack customers new data 7615 * needs to go out as retransmits until 7616 * we retransmit up to snd_max. 7617 */ 7618 rack->r_must_retran = 1; 7619 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 7620 rack->r_ctl.rc_sacked); 7621 } 7622 } 7623 7624 static void 7625 rack_convert_rtts(struct tcpcb *tp) 7626 { 7627 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 7628 tp->t_rxtcur = RACK_REXMTVAL(tp); 7629 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7630 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 7631 } 7632 if (tp->t_rxtcur > rack_rto_max) { 7633 tp->t_rxtcur = rack_rto_max; 7634 } 7635 } 7636 7637 static void 7638 rack_cc_conn_init(struct tcpcb *tp) 7639 { 7640 struct tcp_rack *rack; 7641 uint32_t srtt; 7642 7643 rack = (struct tcp_rack *)tp->t_fb_ptr; 7644 srtt = tp->t_srtt; 7645 cc_conn_init(tp); 7646 /* 7647 * Now convert to rack's internal format, 7648 * if required. 7649 */ 7650 if ((srtt == 0) && (tp->t_srtt != 0)) 7651 rack_convert_rtts(tp); 7652 /* 7653 * We want a chance to stay in slowstart as 7654 * we create a connection. TCP spec says that 7655 * initially ssthresh is infinite. For our 7656 * purposes that is the snd_wnd. 7657 */ 7658 if (tp->snd_ssthresh < tp->snd_wnd) { 7659 tp->snd_ssthresh = tp->snd_wnd; 7660 } 7661 /* 7662 * We also want to assure a IW worth of 7663 * data can get inflight. 7664 */ 7665 if (rc_init_window(rack) < tp->snd_cwnd) 7666 tp->snd_cwnd = rc_init_window(rack); 7667 } 7668 7669 /* 7670 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 7671 * we will setup to retransmit the lowest seq number outstanding. 7672 */ 7673 static int 7674 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7675 { 7676 struct inpcb *inp = tptoinpcb(tp); 7677 int32_t rexmt; 7678 int32_t retval = 0; 7679 bool isipv6; 7680 7681 if ((tp->t_flags & TF_GPUTINPROG) && 7682 (tp->t_rxtshift)) { 7683 /* 7684 * We have had a second timeout 7685 * measurements on successive rxt's are not profitable. 7686 * It is unlikely to be of any use (the network is 7687 * broken or the client went away). 7688 */ 7689 tp->t_flags &= ~TF_GPUTINPROG; 7690 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7691 rack->r_ctl.rc_gp_srtt /*flex1*/, 7692 tp->gput_seq, 7693 0, 0, 18, __LINE__, NULL, 0); 7694 } 7695 if (ctf_progress_timeout_check(tp, false)) { 7696 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7697 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7698 return (-ETIMEDOUT); /* tcp_drop() */ 7699 } 7700 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 7701 rack->r_ctl.retran_during_recovery = 0; 7702 rack->rc_ack_required = 1; 7703 rack->r_ctl.dsack_byte_cnt = 0; 7704 if (IN_RECOVERY(tp->t_flags) && 7705 (rack->rto_from_rec == 0)) { 7706 /* 7707 * Mark that we had a rto while in recovery 7708 * and save the ssthresh so if we go back 7709 * into recovery we will have a chance 7710 * to slowstart back to the level. 7711 */ 7712 rack->rto_from_rec = 1; 7713 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 7714 } 7715 if (IN_FASTRECOVERY(tp->t_flags)) 7716 tp->t_flags |= TF_WASFRECOVERY; 7717 else 7718 tp->t_flags &= ~TF_WASFRECOVERY; 7719 if (IN_CONGRECOVERY(tp->t_flags)) 7720 tp->t_flags |= TF_WASCRECOVERY; 7721 else 7722 tp->t_flags &= ~TF_WASCRECOVERY; 7723 if (TCPS_HAVEESTABLISHED(tp->t_state) && 7724 (tp->snd_una == tp->snd_max)) { 7725 /* Nothing outstanding .. nothing to do */ 7726 return (0); 7727 } 7728 if (rack->r_ctl.dsack_persist) { 7729 rack->r_ctl.dsack_persist--; 7730 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7731 rack->r_ctl.num_dsack = 0; 7732 } 7733 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7734 } 7735 /* 7736 * Rack can only run one timer at a time, so we cannot 7737 * run a KEEPINIT (gating SYN sending) and a retransmit 7738 * timer for the SYN. So if we are in a front state and 7739 * have a KEEPINIT timer we need to check the first transmit 7740 * against now to see if we have exceeded the KEEPINIT time 7741 * (if one is set). 7742 */ 7743 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 7744 (TP_KEEPINIT(tp) != 0)) { 7745 struct rack_sendmap *rsm; 7746 7747 rsm = tqhash_min(rack->r_ctl.tqh); 7748 if (rsm) { 7749 /* Ok we have something outstanding to test keepinit with */ 7750 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 7751 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 7752 /* We have exceeded the KEEPINIT time */ 7753 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7754 goto drop_it; 7755 } 7756 } 7757 } 7758 /* 7759 * Retransmission timer went off. Message has not been acked within 7760 * retransmit interval. Back off to a longer retransmit interval 7761 * and retransmit one segment. 7762 */ 7763 if ((rack->r_ctl.rc_resend == NULL) || 7764 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 7765 /* 7766 * If the rwnd collapsed on 7767 * the one we are retransmitting 7768 * it does not count against the 7769 * rxt count. 7770 */ 7771 tp->t_rxtshift++; 7772 } 7773 rack_remxt_tmr(tp); 7774 if (tp->t_rxtshift > V_tcp_retries) { 7775 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7776 drop_it: 7777 tp->t_rxtshift = V_tcp_retries; 7778 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 7779 /* XXXGL: previously t_softerror was casted to uint16_t */ 7780 MPASS(tp->t_softerror >= 0); 7781 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 7782 goto out; /* tcp_drop() */ 7783 } 7784 if (tp->t_state == TCPS_SYN_SENT) { 7785 /* 7786 * If the SYN was retransmitted, indicate CWND to be limited 7787 * to 1 segment in cc_conn_init(). 7788 */ 7789 tp->snd_cwnd = 1; 7790 } else if (tp->t_rxtshift == 1) { 7791 /* 7792 * first retransmit; record ssthresh and cwnd so they can be 7793 * recovered if this turns out to be a "bad" retransmit. A 7794 * retransmit is considered "bad" if an ACK for this segment 7795 * is received within RTT/2 interval; the assumption here is 7796 * that the ACK was already in flight. See "On Estimating 7797 * End-to-End Network Path Properties" by Allman and Paxson 7798 * for more details. 7799 */ 7800 tp->snd_cwnd_prev = tp->snd_cwnd; 7801 tp->snd_ssthresh_prev = tp->snd_ssthresh; 7802 tp->snd_recover_prev = tp->snd_recover; 7803 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 7804 tp->t_flags |= TF_PREVVALID; 7805 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 7806 tp->t_flags &= ~TF_PREVVALID; 7807 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 7808 if ((tp->t_state == TCPS_SYN_SENT) || 7809 (tp->t_state == TCPS_SYN_RECEIVED)) 7810 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 7811 else 7812 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 7813 7814 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 7815 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 7816 /* 7817 * We enter the path for PLMTUD if connection is established or, if 7818 * connection is FIN_WAIT_1 status, reason for the last is that if 7819 * amount of data we send is very small, we could send it in couple 7820 * of packets and process straight to FIN. In that case we won't 7821 * catch ESTABLISHED state. 7822 */ 7823 #ifdef INET6 7824 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 7825 #else 7826 isipv6 = false; 7827 #endif 7828 if (((V_tcp_pmtud_blackhole_detect == 1) || 7829 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 7830 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 7831 ((tp->t_state == TCPS_ESTABLISHED) || 7832 (tp->t_state == TCPS_FIN_WAIT_1))) { 7833 /* 7834 * Idea here is that at each stage of mtu probe (usually, 7835 * 1448 -> 1188 -> 524) should be given 2 chances to recover 7836 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 7837 * should take care of that. 7838 */ 7839 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 7840 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 7841 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 7842 tp->t_rxtshift % 2 == 0)) { 7843 /* 7844 * Enter Path MTU Black-hole Detection mechanism: - 7845 * Disable Path MTU Discovery (IP "DF" bit). - 7846 * Reduce MTU to lower value than what we negotiated 7847 * with peer. 7848 */ 7849 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7850 /* Record that we may have found a black hole. */ 7851 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7852 /* Keep track of previous MSS. */ 7853 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7854 } 7855 7856 /* 7857 * Reduce the MSS to blackhole value or to the 7858 * default in an attempt to retransmit. 7859 */ 7860 #ifdef INET6 7861 if (isipv6 && 7862 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7863 /* Use the sysctl tuneable blackhole MSS. */ 7864 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7865 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7866 } else if (isipv6) { 7867 /* Use the default MSS. */ 7868 tp->t_maxseg = V_tcp_v6mssdflt; 7869 /* 7870 * Disable Path MTU Discovery when we switch 7871 * to minmss. 7872 */ 7873 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7874 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7875 } 7876 #endif 7877 #if defined(INET6) && defined(INET) 7878 else 7879 #endif 7880 #ifdef INET 7881 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7882 /* Use the sysctl tuneable blackhole MSS. */ 7883 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7884 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7885 } else { 7886 /* Use the default MSS. */ 7887 tp->t_maxseg = V_tcp_mssdflt; 7888 /* 7889 * Disable Path MTU Discovery when we switch 7890 * to minmss. 7891 */ 7892 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7893 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7894 } 7895 #endif 7896 } else { 7897 /* 7898 * If further retransmissions are still unsuccessful 7899 * with a lowered MTU, maybe this isn't a blackhole 7900 * and we restore the previous MSS and blackhole 7901 * detection flags. The limit '6' is determined by 7902 * giving each probe stage (1448, 1188, 524) 2 7903 * chances to recover. 7904 */ 7905 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7906 (tp->t_rxtshift >= 6)) { 7907 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7908 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7909 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7910 if (tp->t_maxseg < V_tcp_mssdflt) { 7911 /* 7912 * The MSS is so small we should not 7913 * process incoming SACK's since we are 7914 * subject to attack in such a case. 7915 */ 7916 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 7917 } else { 7918 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 7919 } 7920 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7921 } 7922 } 7923 } 7924 /* 7925 * Disable RFC1323 and SACK if we haven't got any response to 7926 * our third SYN to work-around some broken terminal servers 7927 * (most of which have hopefully been retired) that have bad VJ 7928 * header compression code which trashes TCP segments containing 7929 * unknown-to-them TCP options. 7930 */ 7931 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7932 (tp->t_rxtshift == 3)) 7933 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7934 /* 7935 * If we backed off this far, our srtt estimate is probably bogus. 7936 * Clobber it so we'll take the next rtt measurement as our srtt; 7937 * move the current srtt into rttvar to keep the current retransmit 7938 * times until then. 7939 */ 7940 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7941 #ifdef INET6 7942 if ((inp->inp_vflag & INP_IPV6) != 0) 7943 in6_losing(inp); 7944 else 7945 #endif 7946 in_losing(inp); 7947 tp->t_rttvar += tp->t_srtt; 7948 tp->t_srtt = 0; 7949 } 7950 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7951 tp->snd_recover = tp->snd_max; 7952 tp->t_flags |= TF_ACKNOW; 7953 tp->t_rtttime = 0; 7954 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 7955 out: 7956 return (retval); 7957 } 7958 7959 static int 7960 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7961 { 7962 int32_t ret = 0; 7963 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7964 7965 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7966 (tp->t_flags & TF_GPUTINPROG)) { 7967 /* 7968 * We have a goodput in progress 7969 * and we have entered a late state. 7970 * Do we have enough data in the sb 7971 * to handle the GPUT request? 7972 */ 7973 uint32_t bytes; 7974 7975 bytes = tp->gput_ack - tp->gput_seq; 7976 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7977 bytes += tp->gput_seq - tp->snd_una; 7978 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 7979 /* 7980 * There are not enough bytes in the socket 7981 * buffer that have been sent to cover this 7982 * measurement. Cancel it. 7983 */ 7984 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7985 rack->r_ctl.rc_gp_srtt /*flex1*/, 7986 tp->gput_seq, 7987 0, 0, 18, __LINE__, NULL, 0); 7988 tp->t_flags &= ~TF_GPUTINPROG; 7989 } 7990 } 7991 if (timers == 0) { 7992 return (0); 7993 } 7994 if (tp->t_state == TCPS_LISTEN) { 7995 /* no timers on listen sockets */ 7996 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7997 return (0); 7998 return (1); 7999 } 8000 if ((timers & PACE_TMR_RACK) && 8001 rack->rc_on_min_to) { 8002 /* 8003 * For the rack timer when we 8004 * are on a min-timeout (which means rrr_conf = 3) 8005 * we don't want to check the timer. It may 8006 * be going off for a pace and thats ok we 8007 * want to send the retransmit (if its ready). 8008 * 8009 * If its on a normal rack timer (non-min) then 8010 * we will check if its expired. 8011 */ 8012 goto skip_time_check; 8013 } 8014 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 8015 uint32_t left; 8016 8017 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8018 ret = -1; 8019 rack_log_to_processing(rack, cts, ret, 0); 8020 return (0); 8021 } 8022 if (hpts_calling == 0) { 8023 /* 8024 * A user send or queued mbuf (sack) has called us? We 8025 * return 0 and let the pacing guards 8026 * deal with it if they should or 8027 * should not cause a send. 8028 */ 8029 ret = -2; 8030 rack_log_to_processing(rack, cts, ret, 0); 8031 return (0); 8032 } 8033 /* 8034 * Ok our timer went off early and we are not paced false 8035 * alarm, go back to sleep. We make sure we don't have 8036 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8037 * flag in place. 8038 */ 8039 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8040 ret = -3; 8041 left = rack->r_ctl.rc_timer_exp - cts; 8042 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8043 rack_log_to_processing(rack, cts, ret, left); 8044 return (1); 8045 } 8046 skip_time_check: 8047 rack->rc_tmr_stopped = 0; 8048 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8049 if (timers & PACE_TMR_DELACK) { 8050 ret = rack_timeout_delack(tp, rack, cts); 8051 } else if (timers & PACE_TMR_RACK) { 8052 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8053 rack->r_fast_output = 0; 8054 ret = rack_timeout_rack(tp, rack, cts); 8055 } else if (timers & PACE_TMR_TLP) { 8056 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8057 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8058 } else if (timers & PACE_TMR_RXT) { 8059 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8060 rack->r_fast_output = 0; 8061 ret = rack_timeout_rxt(tp, rack, cts); 8062 } else if (timers & PACE_TMR_PERSIT) { 8063 ret = rack_timeout_persist(tp, rack, cts); 8064 } else if (timers & PACE_TMR_KEEP) { 8065 ret = rack_timeout_keepalive(tp, rack, cts); 8066 } 8067 rack_log_to_processing(rack, cts, ret, timers); 8068 return (ret); 8069 } 8070 8071 static void 8072 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8073 { 8074 struct timeval tv; 8075 uint32_t us_cts, flags_on_entry; 8076 uint8_t hpts_removed = 0; 8077 8078 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8079 us_cts = tcp_get_usecs(&tv); 8080 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8081 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8082 ((tp->snd_max - tp->snd_una) == 0))) { 8083 tcp_hpts_remove(rack->rc_tp); 8084 hpts_removed = 1; 8085 /* If we were not delayed cancel out the flag. */ 8086 if ((tp->snd_max - tp->snd_una) == 0) 8087 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8088 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8089 } 8090 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8091 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8092 if (tcp_in_hpts(rack->rc_tp) && 8093 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8094 /* 8095 * Canceling timer's when we have no output being 8096 * paced. We also must remove ourselves from the 8097 * hpts. 8098 */ 8099 tcp_hpts_remove(rack->rc_tp); 8100 hpts_removed = 1; 8101 } 8102 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8103 } 8104 if (hpts_removed == 0) 8105 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8106 } 8107 8108 static int 8109 rack_stopall(struct tcpcb *tp) 8110 { 8111 struct tcp_rack *rack; 8112 8113 rack = (struct tcp_rack *)tp->t_fb_ptr; 8114 rack->t_timers_stopped = 1; 8115 8116 tcp_hpts_remove(tp); 8117 8118 return (0); 8119 } 8120 8121 static void 8122 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8123 { 8124 /* 8125 * Assure no timers are running. 8126 */ 8127 if (tcp_timer_active(tp, TT_PERSIST)) { 8128 /* We enter in persists, set the flag appropriately */ 8129 rack->rc_in_persist = 1; 8130 } 8131 if (tcp_in_hpts(rack->rc_tp)) { 8132 tcp_hpts_remove(rack->rc_tp); 8133 } 8134 } 8135 8136 static void 8137 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8138 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8139 { 8140 int32_t idx; 8141 8142 rsm->r_rtr_cnt++; 8143 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8144 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8145 rsm->r_flags |= RACK_OVERMAX; 8146 } 8147 rsm->r_act_rxt_cnt++; 8148 /* Peg the count/index */ 8149 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8150 rsm->r_dupack = 0; 8151 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8152 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8153 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8154 } 8155 if (rsm->r_flags & RACK_WAS_LOST) { 8156 /* 8157 * We retransmitted it putting it back in flight 8158 * remove the lost desgination and reduce the 8159 * bytes considered lost. 8160 */ 8161 rsm->r_flags &= ~RACK_WAS_LOST; 8162 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 8163 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 8164 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 8165 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 8166 else 8167 rack->r_ctl.rc_considered_lost = 0; 8168 } 8169 idx = rsm->r_rtr_cnt - 1; 8170 rsm->r_tim_lastsent[idx] = ts; 8171 /* 8172 * Here we don't add in the len of send, since its already 8173 * in snduna <->snd_max. 8174 */ 8175 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8176 rack->r_ctl.rc_sacked); 8177 if (rsm->r_flags & RACK_ACKED) { 8178 /* Problably MTU discovery messing with us */ 8179 rsm->r_flags &= ~RACK_ACKED; 8180 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8181 } 8182 if (rsm->r_in_tmap) { 8183 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8184 rsm->r_in_tmap = 0; 8185 } 8186 /* Lets make sure it really is in or not the GP window */ 8187 rack_mark_in_gp_win(tp, rsm); 8188 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8189 rsm->r_in_tmap = 1; 8190 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8191 /* Take off the must retransmit flag, if its on */ 8192 if (rsm->r_flags & RACK_MUST_RXT) { 8193 if (rack->r_must_retran) 8194 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8195 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8196 /* 8197 * We have retransmitted all we need. Clear 8198 * any must retransmit flags. 8199 */ 8200 rack->r_must_retran = 0; 8201 rack->r_ctl.rc_out_at_rto = 0; 8202 } 8203 rsm->r_flags &= ~RACK_MUST_RXT; 8204 } 8205 /* Remove any collapsed flag */ 8206 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8207 if (rsm->r_flags & RACK_SACK_PASSED) { 8208 /* We have retransmitted due to the SACK pass */ 8209 rsm->r_flags &= ~RACK_SACK_PASSED; 8210 rsm->r_flags |= RACK_WAS_SACKPASS; 8211 } 8212 } 8213 8214 static uint32_t 8215 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8216 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8217 { 8218 /* 8219 * We (re-)transmitted starting at rsm->r_start for some length 8220 * (possibly less than r_end. 8221 */ 8222 struct rack_sendmap *nrsm; 8223 int insret __diagused; 8224 uint32_t c_end; 8225 int32_t len; 8226 8227 len = *lenp; 8228 c_end = rsm->r_start + len; 8229 if (SEQ_GEQ(c_end, rsm->r_end)) { 8230 /* 8231 * We retransmitted the whole piece or more than the whole 8232 * slopping into the next rsm. 8233 */ 8234 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8235 if (c_end == rsm->r_end) { 8236 *lenp = 0; 8237 return (0); 8238 } else { 8239 int32_t act_len; 8240 8241 /* Hangs over the end return whats left */ 8242 act_len = rsm->r_end - rsm->r_start; 8243 *lenp = (len - act_len); 8244 return (rsm->r_end); 8245 } 8246 /* We don't get out of this block. */ 8247 } 8248 /* 8249 * Here we retransmitted less than the whole thing which means we 8250 * have to split this into what was transmitted and what was not. 8251 */ 8252 nrsm = rack_alloc_full_limit(rack); 8253 if (nrsm == NULL) { 8254 /* 8255 * We can't get memory, so lets not proceed. 8256 */ 8257 *lenp = 0; 8258 return (0); 8259 } 8260 /* 8261 * So here we are going to take the original rsm and make it what we 8262 * retransmitted. nrsm will be the tail portion we did not 8263 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8264 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8265 * 1, 6 and the new piece will be 6, 11. 8266 */ 8267 rack_clone_rsm(rack, nrsm, rsm, c_end); 8268 nrsm->r_dupack = 0; 8269 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8270 #ifndef INVARIANTS 8271 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8272 #else 8273 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8274 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8275 nrsm, insret, rack, rsm); 8276 } 8277 #endif 8278 if (rsm->r_in_tmap) { 8279 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8280 nrsm->r_in_tmap = 1; 8281 } 8282 rsm->r_flags &= (~RACK_HAS_FIN); 8283 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8284 /* Log a split of rsm into rsm and nrsm */ 8285 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8286 *lenp = 0; 8287 return (0); 8288 } 8289 8290 static void 8291 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8292 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8293 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 8294 uint32_t s_moff, int hw_tls, int segsiz) 8295 { 8296 struct tcp_rack *rack; 8297 struct rack_sendmap *rsm, *nrsm; 8298 int insret __diagused; 8299 8300 register uint32_t snd_max, snd_una; 8301 8302 /* 8303 * Add to the RACK log of packets in flight or retransmitted. If 8304 * there is a TS option we will use the TS echoed, if not we will 8305 * grab a TS. 8306 * 8307 * Retransmissions will increment the count and move the ts to its 8308 * proper place. Note that if options do not include TS's then we 8309 * won't be able to effectively use the ACK for an RTT on a retran. 8310 * 8311 * Notes about r_start and r_end. Lets consider a send starting at 8312 * sequence 1 for 10 bytes. In such an example the r_start would be 8313 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8314 * This means that r_end is actually the first sequence for the next 8315 * slot (11). 8316 * 8317 */ 8318 /* 8319 * If err is set what do we do XXXrrs? should we not add the thing? 8320 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8321 * i.e. proceed with add ** do this for now. 8322 */ 8323 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8324 if (err) 8325 /* 8326 * We don't log errors -- we could but snd_max does not 8327 * advance in this case either. 8328 */ 8329 return; 8330 8331 if (th_flags & TH_RST) { 8332 /* 8333 * We don't log resets and we return immediately from 8334 * sending 8335 */ 8336 return; 8337 } 8338 rack = (struct tcp_rack *)tp->t_fb_ptr; 8339 snd_una = tp->snd_una; 8340 snd_max = tp->snd_max; 8341 if (th_flags & (TH_SYN | TH_FIN)) { 8342 /* 8343 * The call to rack_log_output is made before bumping 8344 * snd_max. This means we can record one extra byte on a SYN 8345 * or FIN if seq_out is adding more on and a FIN is present 8346 * (and we are not resending). 8347 */ 8348 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 8349 len++; 8350 if (th_flags & TH_FIN) 8351 len++; 8352 } 8353 if (SEQ_LEQ((seq_out + len), snd_una)) { 8354 /* Are sending an old segment to induce an ack (keep-alive)? */ 8355 return; 8356 } 8357 if (SEQ_LT(seq_out, snd_una)) { 8358 /* huh? should we panic? */ 8359 uint32_t end; 8360 8361 end = seq_out + len; 8362 seq_out = snd_una; 8363 if (SEQ_GEQ(end, seq_out)) 8364 len = end - seq_out; 8365 else 8366 len = 0; 8367 } 8368 if (len == 0) { 8369 /* We don't log zero window probes */ 8370 return; 8371 } 8372 if (IN_FASTRECOVERY(tp->t_flags)) { 8373 rack->r_ctl.rc_prr_out += len; 8374 } 8375 /* First question is it a retransmission or new? */ 8376 if (seq_out == snd_max) { 8377 /* Its new */ 8378 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 8379 again: 8380 rsm = rack_alloc(rack); 8381 if (rsm == NULL) { 8382 /* 8383 * Hmm out of memory and the tcb got destroyed while 8384 * we tried to wait. 8385 */ 8386 return; 8387 } 8388 if (th_flags & TH_FIN) { 8389 rsm->r_flags = RACK_HAS_FIN|add_flag; 8390 } else { 8391 rsm->r_flags = add_flag; 8392 } 8393 if (hw_tls) 8394 rsm->r_hw_tls = 1; 8395 rsm->r_tim_lastsent[0] = cts; 8396 rsm->r_rtr_cnt = 1; 8397 rsm->r_act_rxt_cnt = 0; 8398 rsm->r_rtr_bytes = 0; 8399 if (th_flags & TH_SYN) { 8400 /* The data space is one beyond snd_una */ 8401 rsm->r_flags |= RACK_HAS_SYN; 8402 } 8403 rsm->r_start = seq_out; 8404 rsm->r_end = rsm->r_start + len; 8405 rack_mark_in_gp_win(tp, rsm); 8406 rsm->r_dupack = 0; 8407 /* 8408 * save off the mbuf location that 8409 * sndmbuf_noadv returned (which is 8410 * where we started copying from).. 8411 */ 8412 rsm->m = s_mb; 8413 rsm->soff = s_moff; 8414 /* 8415 * Here we do add in the len of send, since its not yet 8416 * reflected in in snduna <->snd_max 8417 */ 8418 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 8419 rack->r_ctl.rc_sacked) + 8420 (rsm->r_end - rsm->r_start)); 8421 if ((rack->rc_initial_ss_comp == 0) && 8422 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 8423 rack->r_ctl.ss_hi_fs = rsm->r_fas; 8424 } 8425 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 8426 if (rsm->m) { 8427 if (rsm->m->m_len <= rsm->soff) { 8428 /* 8429 * XXXrrs Question, will this happen? 8430 * 8431 * If sbsndptr is set at the correct place 8432 * then s_moff should always be somewhere 8433 * within rsm->m. But if the sbsndptr was 8434 * off then that won't be true. If it occurs 8435 * we need to walkout to the correct location. 8436 */ 8437 struct mbuf *lm; 8438 8439 lm = rsm->m; 8440 while (lm->m_len <= rsm->soff) { 8441 rsm->soff -= lm->m_len; 8442 lm = lm->m_next; 8443 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 8444 __func__, rack, s_moff, s_mb, rsm->soff)); 8445 } 8446 rsm->m = lm; 8447 } 8448 rsm->orig_m_len = rsm->m->m_len; 8449 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 8450 } else { 8451 rsm->orig_m_len = 0; 8452 rsm->orig_t_space = 0; 8453 } 8454 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 8455 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8456 /* Log a new rsm */ 8457 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 8458 #ifndef INVARIANTS 8459 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 8460 #else 8461 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 8462 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8463 nrsm, insret, rack, rsm); 8464 } 8465 #endif 8466 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8467 rsm->r_in_tmap = 1; 8468 if (rsm->r_flags & RACK_IS_PCM) { 8469 rack->r_ctl.pcm_i.send_time = cts; 8470 rack->r_ctl.pcm_i.eseq = rsm->r_end; 8471 /* First time through we set the start too */ 8472 if (rack->pcm_in_progress == 0) 8473 rack->r_ctl.pcm_i.sseq = rsm->r_start; 8474 } 8475 /* 8476 * Special case detection, is there just a single 8477 * packet outstanding when we are not in recovery? 8478 * 8479 * If this is true mark it so. 8480 */ 8481 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 8482 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 8483 struct rack_sendmap *prsm; 8484 8485 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 8486 if (prsm) 8487 prsm->r_one_out_nr = 1; 8488 } 8489 return; 8490 } 8491 /* 8492 * If we reach here its a retransmission and we need to find it. 8493 */ 8494 more: 8495 if (hintrsm && (hintrsm->r_start == seq_out)) { 8496 rsm = hintrsm; 8497 hintrsm = NULL; 8498 } else { 8499 /* No hints sorry */ 8500 rsm = NULL; 8501 } 8502 if ((rsm) && (rsm->r_start == seq_out)) { 8503 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8504 if (len == 0) { 8505 return; 8506 } else { 8507 goto more; 8508 } 8509 } 8510 /* Ok it was not the last pointer go through it the hard way. */ 8511 refind: 8512 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 8513 if (rsm) { 8514 if (rsm->r_start == seq_out) { 8515 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8516 if (len == 0) { 8517 return; 8518 } else { 8519 goto refind; 8520 } 8521 } 8522 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 8523 /* Transmitted within this piece */ 8524 /* 8525 * Ok we must split off the front and then let the 8526 * update do the rest 8527 */ 8528 nrsm = rack_alloc_full_limit(rack); 8529 if (nrsm == NULL) { 8530 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 8531 return; 8532 } 8533 /* 8534 * copy rsm to nrsm and then trim the front of rsm 8535 * to not include this part. 8536 */ 8537 rack_clone_rsm(rack, nrsm, rsm, seq_out); 8538 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8539 #ifndef INVARIANTS 8540 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8541 #else 8542 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8543 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8544 nrsm, insret, rack, rsm); 8545 } 8546 #endif 8547 if (rsm->r_in_tmap) { 8548 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8549 nrsm->r_in_tmap = 1; 8550 } 8551 rsm->r_flags &= (~RACK_HAS_FIN); 8552 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 8553 if (len == 0) { 8554 return; 8555 } else if (len > 0) 8556 goto refind; 8557 } 8558 } 8559 /* 8560 * Hmm not found in map did they retransmit both old and on into the 8561 * new? 8562 */ 8563 if (seq_out == tp->snd_max) { 8564 goto again; 8565 } else if (SEQ_LT(seq_out, tp->snd_max)) { 8566 #ifdef INVARIANTS 8567 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 8568 seq_out, len, tp->snd_una, tp->snd_max); 8569 printf("Starting Dump of all rack entries\n"); 8570 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8571 printf("rsm:%p start:%u end:%u\n", 8572 rsm, rsm->r_start, rsm->r_end); 8573 } 8574 printf("Dump complete\n"); 8575 panic("seq_out not found rack:%p tp:%p", 8576 rack, tp); 8577 #endif 8578 } else { 8579 #ifdef INVARIANTS 8580 /* 8581 * Hmm beyond sndmax? (only if we are using the new rtt-pack 8582 * flag) 8583 */ 8584 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 8585 seq_out, len, tp->snd_max, tp); 8586 #endif 8587 } 8588 } 8589 8590 /* 8591 * Record one of the RTT updates from an ack into 8592 * our sample structure. 8593 */ 8594 8595 static void 8596 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 8597 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 8598 { 8599 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8600 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 8601 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 8602 } 8603 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8604 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 8605 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 8606 } 8607 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 8608 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 8609 rack->r_ctl.rc_gp_lowrtt = us_rtt; 8610 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 8611 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 8612 } 8613 if ((confidence == 1) && 8614 ((rsm == NULL) || 8615 (rsm->r_just_ret) || 8616 (rsm->r_one_out_nr && 8617 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 8618 /* 8619 * If the rsm had a just return 8620 * hit it then we can't trust the 8621 * rtt measurement for buffer deterimination 8622 * Note that a confidence of 2, indicates 8623 * SACK'd which overrides the r_just_ret or 8624 * the r_one_out_nr. If it was a CUM-ACK and 8625 * we had only two outstanding, but get an 8626 * ack for only 1. Then that also lowers our 8627 * confidence. 8628 */ 8629 confidence = 0; 8630 } 8631 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8632 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 8633 if (rack->r_ctl.rack_rs.confidence == 0) { 8634 /* 8635 * We take anything with no current confidence 8636 * saved. 8637 */ 8638 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8639 rack->r_ctl.rack_rs.confidence = confidence; 8640 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8641 } else if (confidence != 0) { 8642 /* 8643 * Once we have a confident number, 8644 * we can update it with a smaller 8645 * value since this confident number 8646 * may include the DSACK time until 8647 * the next segment (the second one) arrived. 8648 */ 8649 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8650 rack->r_ctl.rack_rs.confidence = confidence; 8651 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8652 } 8653 } 8654 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 8655 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 8656 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 8657 rack->r_ctl.rack_rs.rs_rtt_cnt++; 8658 } 8659 8660 /* 8661 * Collect new round-trip time estimate 8662 * and update averages and current timeout. 8663 */ 8664 static void 8665 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 8666 { 8667 int32_t delta; 8668 int32_t rtt; 8669 8670 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 8671 /* No valid sample */ 8672 return; 8673 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 8674 /* We are to use the lowest RTT seen in a single ack */ 8675 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 8676 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 8677 /* We are to use the highest RTT seen in a single ack */ 8678 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 8679 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 8680 /* We are to use the average RTT seen in a single ack */ 8681 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 8682 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 8683 } else { 8684 #ifdef INVARIANTS 8685 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 8686 #endif 8687 return; 8688 } 8689 if (rtt == 0) 8690 rtt = 1; 8691 if (rack->rc_gp_rtt_set == 0) { 8692 /* 8693 * With no RTT we have to accept 8694 * even one we are not confident of. 8695 */ 8696 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 8697 rack->rc_gp_rtt_set = 1; 8698 } else if (rack->r_ctl.rack_rs.confidence) { 8699 /* update the running gp srtt */ 8700 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 8701 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 8702 } 8703 if (rack->r_ctl.rack_rs.confidence) { 8704 /* 8705 * record the low and high for highly buffered path computation, 8706 * we only do this if we are confident (not a retransmission). 8707 */ 8708 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 8709 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8710 } 8711 if (rack->rc_highly_buffered == 0) { 8712 /* 8713 * Currently once we declare a path has 8714 * highly buffered there is no going 8715 * back, which may be a problem... 8716 */ 8717 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 8718 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 8719 rack->r_ctl.rc_highest_us_rtt, 8720 rack->r_ctl.rc_lowest_us_rtt, 8721 RACK_RTTS_SEEHBP); 8722 rack->rc_highly_buffered = 1; 8723 } 8724 } 8725 } 8726 if ((rack->r_ctl.rack_rs.confidence) || 8727 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 8728 /* 8729 * If we are highly confident of it <or> it was 8730 * never retransmitted we accept it as the last us_rtt. 8731 */ 8732 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8733 /* The lowest rtt can be set if its was not retransmited */ 8734 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 8735 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8736 if (rack->r_ctl.rc_lowest_us_rtt == 0) 8737 rack->r_ctl.rc_lowest_us_rtt = 1; 8738 } 8739 } 8740 rack = (struct tcp_rack *)tp->t_fb_ptr; 8741 if (tp->t_srtt != 0) { 8742 /* 8743 * We keep a simple srtt in microseconds, like our rtt 8744 * measurement. We don't need to do any tricks with shifting 8745 * etc. Instead we just add in 1/8th of the new measurement 8746 * and subtract out 1/8 of the old srtt. We do the same with 8747 * the variance after finding the absolute value of the 8748 * difference between this sample and the current srtt. 8749 */ 8750 delta = tp->t_srtt - rtt; 8751 /* Take off 1/8th of the current sRTT */ 8752 tp->t_srtt -= (tp->t_srtt >> 3); 8753 /* Add in 1/8th of the new RTT just measured */ 8754 tp->t_srtt += (rtt >> 3); 8755 if (tp->t_srtt <= 0) 8756 tp->t_srtt = 1; 8757 /* Now lets make the absolute value of the variance */ 8758 if (delta < 0) 8759 delta = -delta; 8760 /* Subtract out 1/8th */ 8761 tp->t_rttvar -= (tp->t_rttvar >> 3); 8762 /* Add in 1/8th of the new variance we just saw */ 8763 tp->t_rttvar += (delta >> 3); 8764 if (tp->t_rttvar <= 0) 8765 tp->t_rttvar = 1; 8766 } else { 8767 /* 8768 * No rtt measurement yet - use the unsmoothed rtt. Set the 8769 * variance to half the rtt (so our first retransmit happens 8770 * at 3*rtt). 8771 */ 8772 tp->t_srtt = rtt; 8773 tp->t_rttvar = rtt >> 1; 8774 } 8775 rack->rc_srtt_measure_made = 1; 8776 KMOD_TCPSTAT_INC(tcps_rttupdated); 8777 if (tp->t_rttupdated < UCHAR_MAX) 8778 tp->t_rttupdated++; 8779 #ifdef STATS 8780 if (rack_stats_gets_ms_rtt == 0) { 8781 /* Send in the microsecond rtt used for rxt timeout purposes */ 8782 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 8783 } else if (rack_stats_gets_ms_rtt == 1) { 8784 /* Send in the millisecond rtt used for rxt timeout purposes */ 8785 int32_t ms_rtt; 8786 8787 /* Round up */ 8788 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8789 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8790 } else if (rack_stats_gets_ms_rtt == 2) { 8791 /* Send in the millisecond rtt has close to the path RTT as we can get */ 8792 int32_t ms_rtt; 8793 8794 /* Round up */ 8795 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8796 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8797 } else { 8798 /* Send in the microsecond rtt has close to the path RTT as we can get */ 8799 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8800 } 8801 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8802 #endif 8803 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 8804 /* 8805 * the retransmit should happen at rtt + 4 * rttvar. Because of the 8806 * way we do the smoothing, srtt and rttvar will each average +1/2 8807 * tick of bias. When we compute the retransmit timer, we want 1/2 8808 * tick of rounding and 1 extra tick because of +-1/2 tick 8809 * uncertainty in the firing of the timer. The bias will give us 8810 * exactly the 1.5 tick we need. But, because the bias is 8811 * statistical, we have to test that we don't drop below the minimum 8812 * feasible timer (which is 2 ticks). 8813 */ 8814 tp->t_rxtshift = 0; 8815 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8816 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 8817 rack_log_rtt_sample(rack, rtt); 8818 tp->t_softerror = 0; 8819 } 8820 8821 8822 static void 8823 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 8824 { 8825 /* 8826 * Apply to filter the inbound us-rtt at us_cts. 8827 */ 8828 uint32_t old_rtt; 8829 8830 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 8831 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 8832 us_rtt, us_cts); 8833 if (old_rtt > us_rtt) { 8834 /* We just hit a new lower rtt time */ 8835 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 8836 __LINE__, RACK_RTTS_NEWRTT); 8837 /* 8838 * Only count it if its lower than what we saw within our 8839 * calculated range. 8840 */ 8841 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 8842 if (rack_probertt_lower_within && 8843 rack->rc_gp_dyn_mul && 8844 (rack->use_fixed_rate == 0) && 8845 (rack->rc_always_pace)) { 8846 /* 8847 * We are seeing a new lower rtt very close 8848 * to the time that we would have entered probe-rtt. 8849 * This is probably due to the fact that a peer flow 8850 * has entered probe-rtt. Lets go in now too. 8851 */ 8852 uint32_t val; 8853 8854 val = rack_probertt_lower_within * rack_time_between_probertt; 8855 val /= 100; 8856 if ((rack->in_probe_rtt == 0) && 8857 (rack->rc_skip_timely == 0) && 8858 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 8859 rack_enter_probertt(rack, us_cts); 8860 } 8861 } 8862 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 8863 } 8864 } 8865 } 8866 8867 static int 8868 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 8869 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 8870 { 8871 uint32_t us_rtt; 8872 int32_t i, all; 8873 uint32_t t, len_acked; 8874 8875 if ((rsm->r_flags & RACK_ACKED) || 8876 (rsm->r_flags & RACK_WAS_ACKED)) 8877 /* Already done */ 8878 return (0); 8879 if (rsm->r_no_rtt_allowed) { 8880 /* Not allowed */ 8881 return (0); 8882 } 8883 if (ack_type == CUM_ACKED) { 8884 if (SEQ_GT(th_ack, rsm->r_end)) { 8885 len_acked = rsm->r_end - rsm->r_start; 8886 all = 1; 8887 } else { 8888 len_acked = th_ack - rsm->r_start; 8889 all = 0; 8890 } 8891 } else { 8892 len_acked = rsm->r_end - rsm->r_start; 8893 all = 0; 8894 } 8895 if (rsm->r_rtr_cnt == 1) { 8896 8897 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8898 if ((int)t <= 0) 8899 t = 1; 8900 if (!tp->t_rttlow || tp->t_rttlow > t) 8901 tp->t_rttlow = t; 8902 if (!rack->r_ctl.rc_rack_min_rtt || 8903 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8904 rack->r_ctl.rc_rack_min_rtt = t; 8905 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8906 rack->r_ctl.rc_rack_min_rtt = 1; 8907 } 8908 } 8909 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8910 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8911 else 8912 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8913 if (us_rtt == 0) 8914 us_rtt = 1; 8915 if (CC_ALGO(tp)->rttsample != NULL) { 8916 /* Kick the RTT to the CC */ 8917 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8918 } 8919 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8920 if (ack_type == SACKED) { 8921 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8922 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8923 } else { 8924 /* 8925 * We need to setup what our confidence 8926 * is in this ack. 8927 * 8928 * If the rsm was app limited and it is 8929 * less than a mss in length (the end 8930 * of the send) then we have a gap. If we 8931 * were app limited but say we were sending 8932 * multiple MSS's then we are more confident 8933 * int it. 8934 * 8935 * When we are not app-limited then we see if 8936 * the rsm is being included in the current 8937 * measurement, we tell this by the app_limited_needs_set 8938 * flag. 8939 * 8940 * Note that being cwnd blocked is not applimited 8941 * as well as the pacing delay between packets which 8942 * are sending only 1 or 2 MSS's also will show up 8943 * in the RTT. We probably need to examine this algorithm 8944 * a bit more and enhance it to account for the delay 8945 * between rsm's. We could do that by saving off the 8946 * pacing delay of each rsm (in an rsm) and then 8947 * factoring that in somehow though for now I am 8948 * not sure how :) 8949 */ 8950 int calc_conf = 0; 8951 8952 if (rsm->r_flags & RACK_APP_LIMITED) { 8953 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8954 calc_conf = 0; 8955 else 8956 calc_conf = 1; 8957 } else if (rack->app_limited_needs_set == 0) { 8958 calc_conf = 1; 8959 } else { 8960 calc_conf = 0; 8961 } 8962 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8963 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8964 calc_conf, rsm, rsm->r_rtr_cnt); 8965 } 8966 if ((rsm->r_flags & RACK_TLP) && 8967 (!IN_FASTRECOVERY(tp->t_flags))) { 8968 /* Segment was a TLP and our retrans matched */ 8969 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8970 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 8971 } 8972 } 8973 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 8974 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8975 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 8976 /* New more recent rack_tmit_time */ 8977 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8978 if (rack->r_ctl.rc_rack_tmit_time == 0) 8979 rack->r_ctl.rc_rack_tmit_time = 1; 8980 rack->rc_rack_rtt = t; 8981 } 8982 return (1); 8983 } 8984 /* 8985 * We clear the soft/rxtshift since we got an ack. 8986 * There is no assurance we will call the commit() function 8987 * so we need to clear these to avoid incorrect handling. 8988 */ 8989 tp->t_rxtshift = 0; 8990 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8991 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8992 tp->t_softerror = 0; 8993 if (to && (to->to_flags & TOF_TS) && 8994 (ack_type == CUM_ACKED) && 8995 (to->to_tsecr) && 8996 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8997 /* 8998 * Now which timestamp does it match? In this block the ACK 8999 * must be coming from a previous transmission. 9000 */ 9001 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9002 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 9003 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9004 if ((int)t <= 0) 9005 t = 1; 9006 if (CC_ALGO(tp)->rttsample != NULL) { 9007 /* 9008 * Kick the RTT to the CC, here 9009 * we lie a bit in that we know the 9010 * retransmission is correct even though 9011 * we retransmitted. This is because 9012 * we match the timestamps. 9013 */ 9014 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 9015 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 9016 else 9017 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 9018 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9019 } 9020 if ((i + 1) < rsm->r_rtr_cnt) { 9021 /* 9022 * The peer ack'd from our previous 9023 * transmission. We have a spurious 9024 * retransmission and thus we dont 9025 * want to update our rack_rtt. 9026 * 9027 * Hmm should there be a CC revert here? 9028 * 9029 */ 9030 return (0); 9031 } 9032 if (!tp->t_rttlow || tp->t_rttlow > t) 9033 tp->t_rttlow = t; 9034 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9035 rack->r_ctl.rc_rack_min_rtt = t; 9036 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9037 rack->r_ctl.rc_rack_min_rtt = 1; 9038 } 9039 } 9040 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9041 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9042 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9043 /* New more recent rack_tmit_time */ 9044 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9045 if (rack->r_ctl.rc_rack_tmit_time == 0) 9046 rack->r_ctl.rc_rack_tmit_time = 1; 9047 rack->rc_rack_rtt = t; 9048 } 9049 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9050 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9051 rsm->r_rtr_cnt); 9052 return (1); 9053 } 9054 } 9055 /* If we are logging log out the sendmap */ 9056 if (tcp_bblogging_on(rack->rc_tp)) { 9057 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9058 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9059 } 9060 } 9061 goto ts_not_found; 9062 } else { 9063 /* 9064 * Ok its a SACK block that we retransmitted. or a windows 9065 * machine without timestamps. We can tell nothing from the 9066 * time-stamp since its not there or the time the peer last 9067 * received a segment that moved forward its cum-ack point. 9068 */ 9069 ts_not_found: 9070 i = rsm->r_rtr_cnt - 1; 9071 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9072 if ((int)t <= 0) 9073 t = 1; 9074 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9075 /* 9076 * We retransmitted and the ack came back in less 9077 * than the smallest rtt we have observed. We most 9078 * likely did an improper retransmit as outlined in 9079 * 6.2 Step 2 point 2 in the rack-draft so we 9080 * don't want to update our rack_rtt. We in 9081 * theory (in future) might want to think about reverting our 9082 * cwnd state but we won't for now. 9083 */ 9084 return (0); 9085 } else if (rack->r_ctl.rc_rack_min_rtt) { 9086 /* 9087 * We retransmitted it and the retransmit did the 9088 * job. 9089 */ 9090 if (!rack->r_ctl.rc_rack_min_rtt || 9091 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9092 rack->r_ctl.rc_rack_min_rtt = t; 9093 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9094 rack->r_ctl.rc_rack_min_rtt = 1; 9095 } 9096 } 9097 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9098 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9099 (uint32_t)rsm->r_tim_lastsent[i]))) { 9100 /* New more recent rack_tmit_time */ 9101 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9102 if (rack->r_ctl.rc_rack_tmit_time == 0) 9103 rack->r_ctl.rc_rack_tmit_time = 1; 9104 rack->rc_rack_rtt = t; 9105 } 9106 return (1); 9107 } 9108 } 9109 return (0); 9110 } 9111 9112 /* 9113 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9114 */ 9115 static void 9116 rack_log_sack_passed(struct tcpcb *tp, 9117 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9118 { 9119 struct rack_sendmap *nrsm; 9120 uint32_t thresh; 9121 9122 /* Get our rxt threshold for lost consideration */ 9123 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9124 /* Now start looking at rsm's */ 9125 nrsm = rsm; 9126 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9127 rack_head, r_tnext) { 9128 if (nrsm == rsm) { 9129 /* Skip original segment he is acked */ 9130 continue; 9131 } 9132 if (nrsm->r_flags & RACK_ACKED) { 9133 /* 9134 * Skip ack'd segments, though we 9135 * should not see these, since tmap 9136 * should not have ack'd segments. 9137 */ 9138 continue; 9139 } 9140 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9141 /* 9142 * If the peer dropped the rwnd on 9143 * these then we don't worry about them. 9144 */ 9145 continue; 9146 } 9147 /* Check lost state */ 9148 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9149 uint32_t exp; 9150 9151 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9152 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9153 /* We consider it lost */ 9154 nrsm->r_flags |= RACK_WAS_LOST; 9155 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9156 } 9157 } 9158 if (nrsm->r_flags & RACK_SACK_PASSED) { 9159 /* 9160 * We found one that is already marked 9161 * passed, we have been here before and 9162 * so all others below this are marked. 9163 */ 9164 break; 9165 } 9166 nrsm->r_flags |= RACK_SACK_PASSED; 9167 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9168 } 9169 } 9170 9171 static void 9172 rack_need_set_test(struct tcpcb *tp, 9173 struct tcp_rack *rack, 9174 struct rack_sendmap *rsm, 9175 tcp_seq th_ack, 9176 int line, 9177 int use_which) 9178 { 9179 struct rack_sendmap *s_rsm; 9180 9181 if ((tp->t_flags & TF_GPUTINPROG) && 9182 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9183 /* 9184 * We were app limited, and this ack 9185 * butts up or goes beyond the point where we want 9186 * to start our next measurement. We need 9187 * to record the new gput_ts as here and 9188 * possibly update the start sequence. 9189 */ 9190 uint32_t seq, ts; 9191 9192 if (rsm->r_rtr_cnt > 1) { 9193 /* 9194 * This is a retransmit, can we 9195 * really make any assessment at this 9196 * point? We are not really sure of 9197 * the timestamp, is it this or the 9198 * previous transmission? 9199 * 9200 * Lets wait for something better that 9201 * is not retransmitted. 9202 */ 9203 return; 9204 } 9205 seq = tp->gput_seq; 9206 ts = tp->gput_ts; 9207 rack->app_limited_needs_set = 0; 9208 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9209 /* Do we start at a new end? */ 9210 if ((use_which == RACK_USE_BEG) && 9211 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9212 /* 9213 * When we get an ACK that just eats 9214 * up some of the rsm, we set RACK_USE_BEG 9215 * since whats at r_start (i.e. th_ack) 9216 * is left unacked and thats where the 9217 * measurement now starts. 9218 */ 9219 tp->gput_seq = rsm->r_start; 9220 } 9221 if ((use_which == RACK_USE_END) && 9222 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9223 /* 9224 * We use the end when the cumack 9225 * is moving forward and completely 9226 * deleting the rsm passed so basically 9227 * r_end holds th_ack. 9228 * 9229 * For SACK's we also want to use the end 9230 * since this piece just got sacked and 9231 * we want to target anything after that 9232 * in our measurement. 9233 */ 9234 tp->gput_seq = rsm->r_end; 9235 } 9236 if (use_which == RACK_USE_END_OR_THACK) { 9237 /* 9238 * special case for ack moving forward, 9239 * not a sack, we need to move all the 9240 * way up to where this ack cum-ack moves 9241 * to. 9242 */ 9243 if (SEQ_GT(th_ack, rsm->r_end)) 9244 tp->gput_seq = th_ack; 9245 else 9246 tp->gput_seq = rsm->r_end; 9247 } 9248 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9249 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9250 else 9251 s_rsm = NULL; 9252 /* 9253 * Pick up the correct send time if we can the rsm passed in 9254 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9255 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9256 * find a different seq i.e. the next send up. 9257 * 9258 * If that has not been sent, s_rsm will be NULL and we must 9259 * arrange it so this function will get called again by setting 9260 * app_limited_needs_set. 9261 */ 9262 if (s_rsm) 9263 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9264 else { 9265 /* If we hit here we have to have *not* sent tp->gput_seq */ 9266 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9267 /* Set it up so we will go through here again */ 9268 rack->app_limited_needs_set = 1; 9269 } 9270 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9271 /* 9272 * We moved beyond this guy's range, re-calculate 9273 * the new end point. 9274 */ 9275 if (rack->rc_gp_filled == 0) { 9276 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9277 } else { 9278 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9279 } 9280 } 9281 /* 9282 * We are moving the goal post, we may be able to clear the 9283 * measure_saw_probe_rtt flag. 9284 */ 9285 if ((rack->in_probe_rtt == 0) && 9286 (rack->measure_saw_probe_rtt) && 9287 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9288 rack->measure_saw_probe_rtt = 0; 9289 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9290 seq, tp->gput_seq, 9291 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9292 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9293 5, line, NULL, 0); 9294 if (rack->rc_gp_filled && 9295 ((tp->gput_ack - tp->gput_seq) < 9296 max(rc_init_window(rack), (MIN_GP_WIN * 9297 ctf_fixed_maxseg(tp))))) { 9298 uint32_t ideal_amount; 9299 9300 ideal_amount = rack_get_measure_window(tp, rack); 9301 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9302 /* 9303 * There is no sense of continuing this measurement 9304 * because its too small to gain us anything we 9305 * trust. Skip it and that way we can start a new 9306 * measurement quicker. 9307 */ 9308 tp->t_flags &= ~TF_GPUTINPROG; 9309 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9310 0, 0, 9311 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9312 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9313 6, __LINE__, NULL, 0); 9314 } else { 9315 /* 9316 * Reset the window further out. 9317 */ 9318 tp->gput_ack = tp->gput_seq + ideal_amount; 9319 } 9320 } 9321 rack_tend_gp_marks(tp, rack); 9322 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9323 } 9324 } 9325 9326 static inline int 9327 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9328 { 9329 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 9330 /* Behind our TLP definition or right at */ 9331 return (0); 9332 } 9333 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 9334 /* The start is beyond or right at our end of TLP definition */ 9335 return (0); 9336 } 9337 /* It has to be a sub-part of the original TLP recorded */ 9338 return (1); 9339 } 9340 9341 static uint32_t 9342 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 9343 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 9344 uint32_t segsiz) 9345 { 9346 uint32_t start, end, changed = 0; 9347 struct rack_sendmap stack_map; 9348 struct rack_sendmap *rsm, *nrsm, *prev, *next; 9349 int insret __diagused; 9350 int32_t used_ref = 1; 9351 int can_use_hookery = 0; 9352 9353 start = sack->start; 9354 end = sack->end; 9355 rsm = *prsm; 9356 9357 do_rest_ofb: 9358 if ((rsm == NULL) || 9359 (SEQ_LT(end, rsm->r_start)) || 9360 (SEQ_GEQ(start, rsm->r_end)) || 9361 (SEQ_LT(start, rsm->r_start))) { 9362 /* 9363 * We are not in the right spot, 9364 * find the correct spot in the tree. 9365 */ 9366 used_ref = 0; 9367 rsm = tqhash_find(rack->r_ctl.tqh, start); 9368 } 9369 if (rsm == NULL) { 9370 /* TSNH */ 9371 goto out; 9372 } 9373 /* Ok we have an ACK for some piece of this rsm */ 9374 if (rsm->r_start != start) { 9375 if ((rsm->r_flags & RACK_ACKED) == 0) { 9376 /* 9377 * Before any splitting or hookery is 9378 * done is it a TLP of interest i.e. rxt? 9379 */ 9380 if ((rsm->r_flags & RACK_TLP) && 9381 (rsm->r_rtr_cnt > 1)) { 9382 /* 9383 * We are splitting a rxt TLP, check 9384 * if we need to save off the start/end 9385 */ 9386 if (rack->rc_last_tlp_acked_set && 9387 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9388 /* 9389 * We already turned this on since we are inside 9390 * the previous one was a partially sack now we 9391 * are getting another one (maybe all of it). 9392 * 9393 */ 9394 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9395 /* 9396 * Lets make sure we have all of it though. 9397 */ 9398 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9399 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9400 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9401 rack->r_ctl.last_tlp_acked_end); 9402 } 9403 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9404 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9405 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9406 rack->r_ctl.last_tlp_acked_end); 9407 } 9408 } else { 9409 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9410 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9411 rack->rc_last_tlp_past_cumack = 0; 9412 rack->rc_last_tlp_acked_set = 1; 9413 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9414 } 9415 } 9416 /** 9417 * Need to split this in two pieces the before and after, 9418 * the before remains in the map, the after must be 9419 * added. In other words we have: 9420 * rsm |--------------| 9421 * sackblk |-------> 9422 * rsm will become 9423 * rsm |---| 9424 * and nrsm will be the sacked piece 9425 * nrsm |----------| 9426 * 9427 * But before we start down that path lets 9428 * see if the sack spans over on top of 9429 * the next guy and it is already sacked. 9430 * 9431 */ 9432 /* 9433 * Hookery can only be used if the two entries 9434 * are in the same bucket and neither one of 9435 * them staddle the bucket line. 9436 */ 9437 next = tqhash_next(rack->r_ctl.tqh, rsm); 9438 if (next && 9439 (rsm->bindex == next->bindex) && 9440 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9441 ((next->r_flags & RACK_STRADDLE) == 0) && 9442 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9443 ((next->r_flags & RACK_IS_PCM) == 0) && 9444 (rsm->r_flags & RACK_IN_GP_WIN) && 9445 (next->r_flags & RACK_IN_GP_WIN)) 9446 can_use_hookery = 1; 9447 else 9448 can_use_hookery = 0; 9449 if (next && can_use_hookery && 9450 (next->r_flags & RACK_ACKED) && 9451 SEQ_GEQ(end, next->r_start)) { 9452 /** 9453 * So the next one is already acked, and 9454 * we can thus by hookery use our stack_map 9455 * to reflect the piece being sacked and 9456 * then adjust the two tree entries moving 9457 * the start and ends around. So we start like: 9458 * rsm |------------| (not-acked) 9459 * next |-----------| (acked) 9460 * sackblk |--------> 9461 * We want to end like so: 9462 * rsm |------| (not-acked) 9463 * next |-----------------| (acked) 9464 * nrsm |-----| 9465 * Where nrsm is a temporary stack piece we 9466 * use to update all the gizmos. 9467 */ 9468 /* Copy up our fudge block */ 9469 nrsm = &stack_map; 9470 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9471 /* Now adjust our tree blocks */ 9472 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 9473 next->r_start = start; 9474 rsm->r_flags |= RACK_SHUFFLED; 9475 next->r_flags |= RACK_SHUFFLED; 9476 /* Now we must adjust back where next->m is */ 9477 rack_setup_offset_for_rsm(rack, rsm, next); 9478 /* 9479 * Which timestamp do we keep? It is rather 9480 * important in GP measurements to have the 9481 * accurate end of the send window. 9482 * 9483 * We keep the largest value, which is the newest 9484 * send. We do this in case a segment that is 9485 * joined together and not part of a GP estimate 9486 * later gets expanded into the GP estimate. 9487 * 9488 * We prohibit the merging of unlike kinds i.e. 9489 * all pieces that are in the GP estimate can be 9490 * merged and all pieces that are not in a GP estimate 9491 * can be merged, but not disimilar pieces. Combine 9492 * this with taking the highest here and we should 9493 * be ok unless of course the client reneges. Then 9494 * all bets are off. 9495 */ 9496 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 9497 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 9498 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 9499 /* 9500 * And we must keep the newest ack arrival time. 9501 */ 9502 if (next->r_ack_arrival < 9503 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9504 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9505 9506 9507 /* We don't need to adjust rsm, it did not change */ 9508 /* Clear out the dup ack count of the remainder */ 9509 rsm->r_dupack = 0; 9510 rsm->r_just_ret = 0; 9511 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9512 /* Now lets make sure our fudge block is right */ 9513 nrsm->r_start = start; 9514 /* Now lets update all the stats and such */ 9515 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9516 if (rack->app_limited_needs_set) 9517 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9518 changed += (nrsm->r_end - nrsm->r_start); 9519 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9520 if (rsm->r_flags & RACK_WAS_LOST) { 9521 int my_chg; 9522 9523 my_chg = (nrsm->r_end - nrsm->r_start); 9524 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9525 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9526 if (my_chg <= rack->r_ctl.rc_considered_lost) 9527 rack->r_ctl.rc_considered_lost -= my_chg; 9528 else 9529 rack->r_ctl.rc_considered_lost = 0; 9530 } 9531 if (nrsm->r_flags & RACK_SACK_PASSED) { 9532 rack->r_ctl.rc_reorder_ts = cts; 9533 if (rack->r_ctl.rc_reorder_ts == 0) 9534 rack->r_ctl.rc_reorder_ts = 1; 9535 } 9536 /* 9537 * Now we want to go up from rsm (the 9538 * one left un-acked) to the next one 9539 * in the tmap. We do this so when 9540 * we walk backwards we include marking 9541 * sack-passed on rsm (The one passed in 9542 * is skipped since it is generally called 9543 * on something sacked before removing it 9544 * from the tmap). 9545 */ 9546 if (rsm->r_in_tmap) { 9547 nrsm = TAILQ_NEXT(rsm, r_tnext); 9548 /* 9549 * Now that we have the next 9550 * one walk backwards from there. 9551 */ 9552 if (nrsm && nrsm->r_in_tmap) 9553 rack_log_sack_passed(tp, rack, nrsm, cts); 9554 } 9555 /* Now are we done? */ 9556 if (SEQ_LT(end, next->r_end) || 9557 (end == next->r_end)) { 9558 /* Done with block */ 9559 goto out; 9560 } 9561 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 9562 counter_u64_add(rack_sack_used_next_merge, 1); 9563 /* Postion for the next block */ 9564 start = next->r_end; 9565 rsm = tqhash_next(rack->r_ctl.tqh, next); 9566 if (rsm == NULL) 9567 goto out; 9568 } else { 9569 /** 9570 * We can't use any hookery here, so we 9571 * need to split the map. We enter like 9572 * so: 9573 * rsm |--------| 9574 * sackblk |-----> 9575 * We will add the new block nrsm and 9576 * that will be the new portion, and then 9577 * fall through after reseting rsm. So we 9578 * split and look like this: 9579 * rsm |----| 9580 * sackblk |-----> 9581 * nrsm |---| 9582 * We then fall through reseting 9583 * rsm to nrsm, so the next block 9584 * picks it up. 9585 */ 9586 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9587 if (nrsm == NULL) { 9588 /* 9589 * failed XXXrrs what can we do but loose the sack 9590 * info? 9591 */ 9592 goto out; 9593 } 9594 counter_u64_add(rack_sack_splits, 1); 9595 rack_clone_rsm(rack, nrsm, rsm, start); 9596 rsm->r_just_ret = 0; 9597 #ifndef INVARIANTS 9598 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9599 #else 9600 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9601 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9602 nrsm, insret, rack, rsm); 9603 } 9604 #endif 9605 if (rsm->r_in_tmap) { 9606 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9607 nrsm->r_in_tmap = 1; 9608 } 9609 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 9610 rsm->r_flags &= (~RACK_HAS_FIN); 9611 /* Position us to point to the new nrsm that starts the sack blk */ 9612 rsm = nrsm; 9613 } 9614 } else { 9615 /* Already sacked this piece */ 9616 counter_u64_add(rack_sack_skipped_acked, 1); 9617 if (end == rsm->r_end) { 9618 /* Done with block */ 9619 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9620 goto out; 9621 } else if (SEQ_LT(end, rsm->r_end)) { 9622 /* A partial sack to a already sacked block */ 9623 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9624 goto out; 9625 } else { 9626 /* 9627 * The end goes beyond this guy 9628 * reposition the start to the 9629 * next block. 9630 */ 9631 start = rsm->r_end; 9632 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9633 if (rsm == NULL) 9634 goto out; 9635 } 9636 } 9637 } 9638 if (SEQ_GEQ(end, rsm->r_end)) { 9639 /** 9640 * The end of this block is either beyond this guy or right 9641 * at this guy. I.e.: 9642 * rsm --- |-----| 9643 * end |-----| 9644 * <or> 9645 * end |---------| 9646 */ 9647 if ((rsm->r_flags & RACK_ACKED) == 0) { 9648 /* 9649 * Is it a TLP of interest? 9650 */ 9651 if ((rsm->r_flags & RACK_TLP) && 9652 (rsm->r_rtr_cnt > 1)) { 9653 /* 9654 * We are splitting a rxt TLP, check 9655 * if we need to save off the start/end 9656 */ 9657 if (rack->rc_last_tlp_acked_set && 9658 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9659 /* 9660 * We already turned this on since we are inside 9661 * the previous one was a partially sack now we 9662 * are getting another one (maybe all of it). 9663 */ 9664 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9665 /* 9666 * Lets make sure we have all of it though. 9667 */ 9668 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9669 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9670 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9671 rack->r_ctl.last_tlp_acked_end); 9672 } 9673 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9674 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9675 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9676 rack->r_ctl.last_tlp_acked_end); 9677 } 9678 } else { 9679 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9680 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9681 rack->rc_last_tlp_past_cumack = 0; 9682 rack->rc_last_tlp_acked_set = 1; 9683 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9684 } 9685 } 9686 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9687 changed += (rsm->r_end - rsm->r_start); 9688 /* You get a count for acking a whole segment or more */ 9689 if (rsm->r_flags & RACK_WAS_LOST) { 9690 int my_chg; 9691 9692 my_chg = (rsm->r_end - rsm->r_start); 9693 rsm->r_flags &= ~RACK_WAS_LOST; 9694 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9695 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9696 if (my_chg <= rack->r_ctl.rc_considered_lost) 9697 rack->r_ctl.rc_considered_lost -= my_chg; 9698 else 9699 rack->r_ctl.rc_considered_lost = 0; 9700 } 9701 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9702 if (rsm->r_in_tmap) /* should be true */ 9703 rack_log_sack_passed(tp, rack, rsm, cts); 9704 /* Is Reordering occuring? */ 9705 if (rsm->r_flags & RACK_SACK_PASSED) { 9706 rsm->r_flags &= ~RACK_SACK_PASSED; 9707 rack->r_ctl.rc_reorder_ts = cts; 9708 if (rack->r_ctl.rc_reorder_ts == 0) 9709 rack->r_ctl.rc_reorder_ts = 1; 9710 } 9711 if (rack->app_limited_needs_set) 9712 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9713 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9714 rsm->r_flags |= RACK_ACKED; 9715 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9716 if (rsm->r_in_tmap) { 9717 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9718 rsm->r_in_tmap = 0; 9719 } 9720 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 9721 } else { 9722 counter_u64_add(rack_sack_skipped_acked, 1); 9723 } 9724 if (end == rsm->r_end) { 9725 /* This block only - done, setup for next */ 9726 goto out; 9727 } 9728 /* 9729 * There is more not coverend by this rsm move on 9730 * to the next block in the tail queue hash table. 9731 */ 9732 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 9733 start = rsm->r_end; 9734 rsm = nrsm; 9735 if (rsm == NULL) 9736 goto out; 9737 goto do_rest_ofb; 9738 } 9739 /** 9740 * The end of this sack block is smaller than 9741 * our rsm i.e.: 9742 * rsm --- |-----| 9743 * end |--| 9744 */ 9745 if ((rsm->r_flags & RACK_ACKED) == 0) { 9746 /* 9747 * Is it a TLP of interest? 9748 */ 9749 if ((rsm->r_flags & RACK_TLP) && 9750 (rsm->r_rtr_cnt > 1)) { 9751 /* 9752 * We are splitting a rxt TLP, check 9753 * if we need to save off the start/end 9754 */ 9755 if (rack->rc_last_tlp_acked_set && 9756 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9757 /* 9758 * We already turned this on since we are inside 9759 * the previous one was a partially sack now we 9760 * are getting another one (maybe all of it). 9761 */ 9762 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9763 /* 9764 * Lets make sure we have all of it though. 9765 */ 9766 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9767 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9768 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9769 rack->r_ctl.last_tlp_acked_end); 9770 } 9771 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9772 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9773 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9774 rack->r_ctl.last_tlp_acked_end); 9775 } 9776 } else { 9777 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9778 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9779 rack->rc_last_tlp_past_cumack = 0; 9780 rack->rc_last_tlp_acked_set = 1; 9781 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9782 } 9783 } 9784 /* 9785 * Hookery can only be used if the two entries 9786 * are in the same bucket and neither one of 9787 * them staddle the bucket line. 9788 */ 9789 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 9790 if (prev && 9791 (rsm->bindex == prev->bindex) && 9792 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9793 ((prev->r_flags & RACK_STRADDLE) == 0) && 9794 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9795 ((prev->r_flags & RACK_IS_PCM) == 0) && 9796 (rsm->r_flags & RACK_IN_GP_WIN) && 9797 (prev->r_flags & RACK_IN_GP_WIN)) 9798 can_use_hookery = 1; 9799 else 9800 can_use_hookery = 0; 9801 if (prev && can_use_hookery && 9802 (prev->r_flags & RACK_ACKED)) { 9803 /** 9804 * Goal, we want the right remainder of rsm to shrink 9805 * in place and span from (rsm->r_start = end) to rsm->r_end. 9806 * We want to expand prev to go all the way 9807 * to prev->r_end <- end. 9808 * so in the tree we have before: 9809 * prev |--------| (acked) 9810 * rsm |-------| (non-acked) 9811 * sackblk |-| 9812 * We churn it so we end up with 9813 * prev |----------| (acked) 9814 * rsm |-----| (non-acked) 9815 * nrsm |-| (temporary) 9816 * 9817 * Note if either prev/rsm is a TLP we don't 9818 * do this. 9819 */ 9820 nrsm = &stack_map; 9821 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9822 tqhash_update_end(rack->r_ctl.tqh, prev, end); 9823 rsm->r_start = end; 9824 rsm->r_flags |= RACK_SHUFFLED; 9825 prev->r_flags |= RACK_SHUFFLED; 9826 /* Now adjust nrsm (stack copy) to be 9827 * the one that is the small 9828 * piece that was "sacked". 9829 */ 9830 nrsm->r_end = end; 9831 rsm->r_dupack = 0; 9832 /* 9833 * Which timestamp do we keep? It is rather 9834 * important in GP measurements to have the 9835 * accurate end of the send window. 9836 * 9837 * We keep the largest value, which is the newest 9838 * send. We do this in case a segment that is 9839 * joined together and not part of a GP estimate 9840 * later gets expanded into the GP estimate. 9841 * 9842 * We prohibit the merging of unlike kinds i.e. 9843 * all pieces that are in the GP estimate can be 9844 * merged and all pieces that are not in a GP estimate 9845 * can be merged, but not disimilar pieces. Combine 9846 * this with taking the highest here and we should 9847 * be ok unless of course the client reneges. Then 9848 * all bets are off. 9849 */ 9850 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 9851 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 9852 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9853 } 9854 /* 9855 * And we must keep the newest ack arrival time. 9856 */ 9857 9858 if(prev->r_ack_arrival < 9859 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9860 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9861 9862 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9863 /* 9864 * Now that the rsm has had its start moved forward 9865 * lets go ahead and get its new place in the world. 9866 */ 9867 rack_setup_offset_for_rsm(rack, prev, rsm); 9868 /* 9869 * Now nrsm is our new little piece 9870 * that is acked (which was merged 9871 * to prev). Update the rtt and changed 9872 * based on that. Also check for reordering. 9873 */ 9874 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9875 if (rack->app_limited_needs_set) 9876 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9877 changed += (nrsm->r_end - nrsm->r_start); 9878 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9879 if (rsm->r_flags & RACK_WAS_LOST) { 9880 int my_chg; 9881 9882 my_chg = (nrsm->r_end - nrsm->r_start); 9883 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9884 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9885 if (my_chg <= rack->r_ctl.rc_considered_lost) 9886 rack->r_ctl.rc_considered_lost -= my_chg; 9887 else 9888 rack->r_ctl.rc_considered_lost = 0; 9889 } 9890 if (nrsm->r_flags & RACK_SACK_PASSED) { 9891 rack->r_ctl.rc_reorder_ts = cts; 9892 if (rack->r_ctl.rc_reorder_ts == 0) 9893 rack->r_ctl.rc_reorder_ts = 1; 9894 } 9895 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 9896 rsm = prev; 9897 counter_u64_add(rack_sack_used_prev_merge, 1); 9898 } else { 9899 /** 9900 * This is the case where our previous 9901 * block is not acked either, so we must 9902 * split the block in two. 9903 */ 9904 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9905 if (nrsm == NULL) { 9906 /* failed rrs what can we do but loose the sack info? */ 9907 goto out; 9908 } 9909 if ((rsm->r_flags & RACK_TLP) && 9910 (rsm->r_rtr_cnt > 1)) { 9911 /* 9912 * We are splitting a rxt TLP, check 9913 * if we need to save off the start/end 9914 */ 9915 if (rack->rc_last_tlp_acked_set && 9916 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9917 /* 9918 * We already turned this on since this block is inside 9919 * the previous one was a partially sack now we 9920 * are getting another one (maybe all of it). 9921 */ 9922 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9923 /* 9924 * Lets make sure we have all of it though. 9925 */ 9926 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9927 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9928 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9929 rack->r_ctl.last_tlp_acked_end); 9930 } 9931 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9932 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9933 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9934 rack->r_ctl.last_tlp_acked_end); 9935 } 9936 } else { 9937 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9938 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9939 rack->rc_last_tlp_acked_set = 1; 9940 rack->rc_last_tlp_past_cumack = 0; 9941 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9942 } 9943 } 9944 /** 9945 * In this case nrsm becomes 9946 * nrsm->r_start = end; 9947 * nrsm->r_end = rsm->r_end; 9948 * which is un-acked. 9949 * <and> 9950 * rsm->r_end = nrsm->r_start; 9951 * i.e. the remaining un-acked 9952 * piece is left on the left 9953 * hand side. 9954 * 9955 * So we start like this 9956 * rsm |----------| (not acked) 9957 * sackblk |---| 9958 * build it so we have 9959 * rsm |---| (acked) 9960 * nrsm |------| (not acked) 9961 */ 9962 counter_u64_add(rack_sack_splits, 1); 9963 rack_clone_rsm(rack, nrsm, rsm, end); 9964 rsm->r_flags &= (~RACK_HAS_FIN); 9965 rsm->r_just_ret = 0; 9966 #ifndef INVARIANTS 9967 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9968 #else 9969 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9970 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 9971 nrsm, insret, rack, rsm); 9972 } 9973 #endif 9974 if (rsm->r_in_tmap) { 9975 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9976 nrsm->r_in_tmap = 1; 9977 } 9978 nrsm->r_dupack = 0; 9979 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 9980 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9981 changed += (rsm->r_end - rsm->r_start); 9982 if (rsm->r_flags & RACK_WAS_LOST) { 9983 int my_chg; 9984 9985 my_chg = (rsm->r_end - rsm->r_start); 9986 rsm->r_flags &= ~RACK_WAS_LOST; 9987 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9988 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9989 if (my_chg <= rack->r_ctl.rc_considered_lost) 9990 rack->r_ctl.rc_considered_lost -= my_chg; 9991 else 9992 rack->r_ctl.rc_considered_lost = 0; 9993 } 9994 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9995 9996 if (rsm->r_in_tmap) /* should be true */ 9997 rack_log_sack_passed(tp, rack, rsm, cts); 9998 /* Is Reordering occuring? */ 9999 if (rsm->r_flags & RACK_SACK_PASSED) { 10000 rsm->r_flags &= ~RACK_SACK_PASSED; 10001 rack->r_ctl.rc_reorder_ts = cts; 10002 if (rack->r_ctl.rc_reorder_ts == 0) 10003 rack->r_ctl.rc_reorder_ts = 1; 10004 } 10005 if (rack->app_limited_needs_set) 10006 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10007 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10008 rsm->r_flags |= RACK_ACKED; 10009 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10010 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 10011 if (rsm->r_in_tmap) { 10012 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10013 rsm->r_in_tmap = 0; 10014 } 10015 } 10016 } else if (start != end){ 10017 /* 10018 * The block was already acked. 10019 */ 10020 counter_u64_add(rack_sack_skipped_acked, 1); 10021 } 10022 out: 10023 if (rsm && 10024 ((rsm->r_flags & RACK_TLP) == 0) && 10025 (rsm->r_flags & RACK_ACKED)) { 10026 /* 10027 * Now can we merge where we worked 10028 * with either the previous or 10029 * next block? 10030 */ 10031 next = tqhash_next(rack->r_ctl.tqh, rsm); 10032 while (next) { 10033 if (next->r_flags & RACK_TLP) 10034 break; 10035 /* Only allow merges between ones in or out of GP window */ 10036 if ((next->r_flags & RACK_IN_GP_WIN) && 10037 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10038 break; 10039 } 10040 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10041 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10042 break; 10043 } 10044 if (rsm->bindex != next->bindex) 10045 break; 10046 if (rsm->r_flags & RACK_STRADDLE) 10047 break; 10048 if (rsm->r_flags & RACK_IS_PCM) 10049 break; 10050 if (next->r_flags & RACK_STRADDLE) 10051 break; 10052 if (next->r_flags & RACK_IS_PCM) 10053 break; 10054 if (next->r_flags & RACK_ACKED) { 10055 /* yep this and next can be merged */ 10056 rsm = rack_merge_rsm(rack, rsm, next); 10057 next = tqhash_next(rack->r_ctl.tqh, rsm); 10058 } else 10059 break; 10060 } 10061 /* Now what about the previous? */ 10062 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10063 while (prev) { 10064 if (prev->r_flags & RACK_TLP) 10065 break; 10066 /* Only allow merges between ones in or out of GP window */ 10067 if ((prev->r_flags & RACK_IN_GP_WIN) && 10068 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10069 break; 10070 } 10071 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10072 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10073 break; 10074 } 10075 if (rsm->bindex != prev->bindex) 10076 break; 10077 if (rsm->r_flags & RACK_STRADDLE) 10078 break; 10079 if (rsm->r_flags & RACK_IS_PCM) 10080 break; 10081 if (prev->r_flags & RACK_STRADDLE) 10082 break; 10083 if (prev->r_flags & RACK_IS_PCM) 10084 break; 10085 if (prev->r_flags & RACK_ACKED) { 10086 /* yep the previous and this can be merged */ 10087 rsm = rack_merge_rsm(rack, prev, rsm); 10088 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10089 } else 10090 break; 10091 } 10092 } 10093 if (used_ref == 0) { 10094 counter_u64_add(rack_sack_proc_all, 1); 10095 } else { 10096 counter_u64_add(rack_sack_proc_short, 1); 10097 } 10098 /* Save off the next one for quick reference. */ 10099 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10100 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10101 return (changed); 10102 } 10103 10104 static void inline 10105 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10106 { 10107 struct rack_sendmap *tmap; 10108 10109 tmap = NULL; 10110 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10111 /* Its no longer sacked, mark it so */ 10112 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10113 #ifdef INVARIANTS 10114 if (rsm->r_in_tmap) { 10115 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10116 rack, rsm, rsm->r_flags); 10117 } 10118 #endif 10119 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10120 /* Rebuild it into our tmap */ 10121 if (tmap == NULL) { 10122 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10123 tmap = rsm; 10124 } else { 10125 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10126 tmap = rsm; 10127 } 10128 tmap->r_in_tmap = 1; 10129 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10130 } 10131 /* 10132 * Now lets possibly clear the sack filter so we start 10133 * recognizing sacks that cover this area. 10134 */ 10135 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10136 10137 } 10138 10139 10140 static void inline 10141 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10142 { 10143 /* 10144 * We look at advancing the end send time for our GP 10145 * measurement tracking only as the cumulative acknowledgment 10146 * moves forward. You might wonder about this, why not 10147 * at every transmission or retransmission within the 10148 * GP window update the rc_gp_cumack_ts? Well its rather 10149 * nuanced but basically the GP window *may* expand (as 10150 * it does below) or worse and harder to track it may shrink. 10151 * 10152 * This last makes it impossible to track at the time of 10153 * the send, since you may set forward your rc_gp_cumack_ts 10154 * when you send, because that send *is* in your currently 10155 * "guessed" window, but then it shrinks. Now which was 10156 * the send time of the last bytes in the window, by the 10157 * time you ask that question that part of the sendmap 10158 * is freed. So you don't know and you will have too 10159 * long of send window. Instead by updating the time 10160 * marker only when the cumack advances this assures us 10161 * that we will have only the sends in the window of our 10162 * GP measurement. 10163 * 10164 * Another complication from this is the 10165 * merging of sendmap entries. During SACK processing this 10166 * can happen to conserve the sendmap size. That breaks 10167 * everything down in tracking the send window of the GP 10168 * estimate. So to prevent that and keep it working with 10169 * a tiny bit more limited merging, we only allow like 10170 * types to be merged. I.e. if two sends are in the GP window 10171 * then its ok to merge them together. If two sends are not 10172 * in the GP window its ok to merge them together too. Though 10173 * one send in and one send out cannot be merged. We combine 10174 * this with never allowing the shrinking of the GP window when 10175 * we are in recovery so that we can properly calculate the 10176 * sending times. 10177 * 10178 * This all of course seems complicated, because it is.. :) 10179 * 10180 * The cum-ack is being advanced upon the sendmap. 10181 * If we are not doing a GP estimate don't 10182 * proceed. 10183 */ 10184 uint64_t ts; 10185 10186 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10187 return; 10188 /* 10189 * If this sendmap entry is going 10190 * beyond the measurement window we had picked, 10191 * expand the measurement window by that much. 10192 */ 10193 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10194 tp->gput_ack = rsm->r_end; 10195 } 10196 /* 10197 * If we have not setup a ack, then we 10198 * have no idea if the newly acked pieces 10199 * will be "in our seq measurement range". If 10200 * it is when we clear the app_limited_needs_set 10201 * flag the timestamp will be updated. 10202 */ 10203 if (rack->app_limited_needs_set) 10204 return; 10205 /* 10206 * Finally, we grab out the latest timestamp 10207 * that this packet was sent and then see 10208 * if: 10209 * a) The packet touches are newly defined GP range. 10210 * b) The time is greater than (newer) than the 10211 * one we currently have. If so we update 10212 * our sending end time window. 10213 * 10214 * Note we *do not* do this at send time. The reason 10215 * is that if you do you *may* pick up a newer timestamp 10216 * for a range you are not going to measure. We project 10217 * out how far and then sometimes modify that to be 10218 * smaller. If that occurs then you will have a send 10219 * that does not belong to the range included. 10220 */ 10221 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10222 rack->r_ctl.rc_gp_cumack_ts) 10223 return; 10224 if (rack_in_gp_window(tp, rsm)) { 10225 rack->r_ctl.rc_gp_cumack_ts = ts; 10226 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10227 __LINE__, from, rsm); 10228 } 10229 } 10230 10231 static void 10232 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10233 { 10234 struct rack_sendmap *rsm; 10235 /* 10236 * The ACK point is advancing to th_ack, we must drop off 10237 * the packets in the rack log and calculate any eligble 10238 * RTT's. 10239 */ 10240 10241 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 10242 /* 10243 * If we have some sack blocks in the filter 10244 * lets prune them out by calling sfb with no blocks. 10245 */ 10246 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 10247 } 10248 if (SEQ_GT(th_ack, tp->snd_una)) { 10249 /* Clear any app ack remembered settings */ 10250 rack->r_ctl.cleared_app_ack = 0; 10251 } 10252 rack->r_wanted_output = 1; 10253 if (SEQ_GT(th_ack, tp->snd_una)) 10254 rack->r_ctl.last_cumack_advance = acktime; 10255 10256 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10257 if ((rack->rc_last_tlp_acked_set == 1)&& 10258 (rack->rc_last_tlp_past_cumack == 1) && 10259 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10260 /* 10261 * We have reached the point where our last rack 10262 * tlp retransmit sequence is ahead of the cum-ack. 10263 * This can only happen when the cum-ack moves all 10264 * the way around (its been a full 2^^31+1 bytes 10265 * or more since we sent a retransmitted TLP). Lets 10266 * turn off the valid flag since its not really valid. 10267 * 10268 * Note since sack's also turn on this event we have 10269 * a complication, we have to wait to age it out until 10270 * the cum-ack is by the TLP before checking which is 10271 * what the next else clause does. 10272 */ 10273 rack_log_dsack_event(rack, 9, __LINE__, 10274 rack->r_ctl.last_tlp_acked_start, 10275 rack->r_ctl.last_tlp_acked_end); 10276 rack->rc_last_tlp_acked_set = 0; 10277 rack->rc_last_tlp_past_cumack = 0; 10278 } else if ((rack->rc_last_tlp_acked_set == 1) && 10279 (rack->rc_last_tlp_past_cumack == 0) && 10280 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10281 /* 10282 * It is safe to start aging TLP's out. 10283 */ 10284 rack->rc_last_tlp_past_cumack = 1; 10285 } 10286 /* We do the same for the tlp send seq as well */ 10287 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10288 (rack->rc_last_sent_tlp_past_cumack == 1) && 10289 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10290 rack_log_dsack_event(rack, 9, __LINE__, 10291 rack->r_ctl.last_sent_tlp_seq, 10292 (rack->r_ctl.last_sent_tlp_seq + 10293 rack->r_ctl.last_sent_tlp_len)); 10294 rack->rc_last_sent_tlp_seq_valid = 0; 10295 rack->rc_last_sent_tlp_past_cumack = 0; 10296 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10297 (rack->rc_last_sent_tlp_past_cumack == 0) && 10298 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10299 /* 10300 * It is safe to start aging TLP's send. 10301 */ 10302 rack->rc_last_sent_tlp_past_cumack = 1; 10303 } 10304 more: 10305 rsm = tqhash_min(rack->r_ctl.tqh); 10306 if (rsm == NULL) { 10307 if ((th_ack - 1) == tp->iss) { 10308 /* 10309 * For the SYN incoming case we will not 10310 * have called tcp_output for the sending of 10311 * the SYN, so there will be no map. All 10312 * other cases should probably be a panic. 10313 */ 10314 return; 10315 } 10316 if (tp->t_flags & TF_SENTFIN) { 10317 /* if we sent a FIN we often will not have map */ 10318 return; 10319 } 10320 #ifdef INVARIANTS 10321 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 10322 tp, 10323 tp->t_state, th_ack, rack, 10324 tp->snd_una, tp->snd_max); 10325 #endif 10326 return; 10327 } 10328 if (SEQ_LT(th_ack, rsm->r_start)) { 10329 /* Huh map is missing this */ 10330 #ifdef INVARIANTS 10331 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 10332 rsm->r_start, 10333 th_ack, tp->t_state, rack->r_state); 10334 #endif 10335 return; 10336 } 10337 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 10338 10339 /* Now was it a retransmitted TLP? */ 10340 if ((rsm->r_flags & RACK_TLP) && 10341 (rsm->r_rtr_cnt > 1)) { 10342 /* 10343 * Yes, this rsm was a TLP and retransmitted, remember that 10344 * since if a DSACK comes back on this we don't want 10345 * to think of it as a reordered segment. This may 10346 * get updated again with possibly even other TLPs 10347 * in flight, but thats ok. Only when we don't send 10348 * a retransmitted TLP for 1/2 the sequences space 10349 * will it get turned off (above). 10350 */ 10351 if (rack->rc_last_tlp_acked_set && 10352 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10353 /* 10354 * We already turned this on since the end matches, 10355 * the previous one was a partially ack now we 10356 * are getting another one (maybe all of it). 10357 */ 10358 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10359 /* 10360 * Lets make sure we have all of it though. 10361 */ 10362 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10363 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10364 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10365 rack->r_ctl.last_tlp_acked_end); 10366 } 10367 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10368 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10369 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10370 rack->r_ctl.last_tlp_acked_end); 10371 } 10372 } else { 10373 rack->rc_last_tlp_past_cumack = 1; 10374 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10375 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10376 rack->rc_last_tlp_acked_set = 1; 10377 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10378 } 10379 } 10380 /* Now do we consume the whole thing? */ 10381 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 10382 if (SEQ_GEQ(th_ack, rsm->r_end)) { 10383 /* Its all consumed. */ 10384 uint32_t left; 10385 uint8_t newly_acked; 10386 10387 if (rsm->r_flags & RACK_WAS_LOST) { 10388 /* 10389 * This can happen when we marked it as lost 10390 * and yet before retransmitting we get an ack 10391 * which can happen due to reordering. 10392 */ 10393 rsm->r_flags &= ~RACK_WAS_LOST; 10394 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 10395 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10396 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 10397 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 10398 else 10399 rack->r_ctl.rc_considered_lost = 0; 10400 } 10401 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 10402 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 10403 rsm->r_rtr_bytes = 0; 10404 /* 10405 * Record the time of highest cumack sent if its in our measurement 10406 * window and possibly bump out the end. 10407 */ 10408 rack_rsm_sender_update(rack, tp, rsm, 4); 10409 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 10410 if (rsm->r_in_tmap) { 10411 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10412 rsm->r_in_tmap = 0; 10413 } 10414 newly_acked = 1; 10415 if (rsm->r_flags & RACK_ACKED) { 10416 /* 10417 * It was acked on the scoreboard -- remove 10418 * it from total 10419 */ 10420 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10421 newly_acked = 0; 10422 } else if (rsm->r_flags & RACK_SACK_PASSED) { 10423 /* 10424 * There are segments ACKED on the 10425 * scoreboard further up. We are seeing 10426 * reordering. 10427 */ 10428 rsm->r_flags &= ~RACK_SACK_PASSED; 10429 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10430 rsm->r_flags |= RACK_ACKED; 10431 rack->r_ctl.rc_reorder_ts = cts; 10432 if (rack->r_ctl.rc_reorder_ts == 0) 10433 rack->r_ctl.rc_reorder_ts = 1; 10434 if (rack->r_ent_rec_ns) { 10435 /* 10436 * We have sent no more, and we saw an sack 10437 * then ack arrive. 10438 */ 10439 rack->r_might_revert = 1; 10440 } 10441 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10442 } else { 10443 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10444 } 10445 if ((rsm->r_flags & RACK_TO_REXT) && 10446 (tp->t_flags & TF_RCVD_TSTMP) && 10447 (to->to_flags & TOF_TS) && 10448 (to->to_tsecr != 0) && 10449 (tp->t_flags & TF_PREVVALID)) { 10450 /* 10451 * We can use the timestamp to see 10452 * if this retransmission was from the 10453 * first transmit. If so we made a mistake. 10454 */ 10455 tp->t_flags &= ~TF_PREVVALID; 10456 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 10457 /* The first transmit is what this ack is for */ 10458 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 10459 } 10460 } 10461 left = th_ack - rsm->r_end; 10462 if (rack->app_limited_needs_set && newly_acked) 10463 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 10464 /* Free back to zone */ 10465 rack_free(rack, rsm); 10466 if (left) { 10467 goto more; 10468 } 10469 /* Check for reneging */ 10470 rsm = tqhash_min(rack->r_ctl.tqh); 10471 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 10472 /* 10473 * The peer has moved snd_una up to 10474 * the edge of this send, i.e. one 10475 * that it had previously acked. The only 10476 * way that can be true if the peer threw 10477 * away data (space issues) that it had 10478 * previously sacked (else it would have 10479 * given us snd_una up to (rsm->r_end). 10480 * We need to undo the acked markings here. 10481 * 10482 * Note we have to look to make sure th_ack is 10483 * our rsm->r_start in case we get an old ack 10484 * where th_ack is behind snd_una. 10485 */ 10486 rack_peer_reneges(rack, rsm, th_ack); 10487 } 10488 return; 10489 } 10490 if (rsm->r_flags & RACK_ACKED) { 10491 /* 10492 * It was acked on the scoreboard -- remove it from 10493 * total for the part being cum-acked. 10494 */ 10495 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 10496 } else { 10497 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 10498 } 10499 /* And what about the lost flag? */ 10500 if (rsm->r_flags & RACK_WAS_LOST) { 10501 /* 10502 * This can happen when we marked it as lost 10503 * and yet before retransmitting we get an ack 10504 * which can happen due to reordering. In this 10505 * case its only a partial ack of the send. 10506 */ 10507 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), 10508 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); 10509 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) 10510 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; 10511 else 10512 rack->r_ctl.rc_considered_lost = 0; 10513 } 10514 /* 10515 * Clear the dup ack count for 10516 * the piece that remains. 10517 */ 10518 rsm->r_dupack = 0; 10519 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10520 if (rsm->r_rtr_bytes) { 10521 /* 10522 * It was retransmitted adjust the 10523 * sack holes for what was acked. 10524 */ 10525 int ack_am; 10526 10527 ack_am = (th_ack - rsm->r_start); 10528 if (ack_am >= rsm->r_rtr_bytes) { 10529 rack->r_ctl.rc_holes_rxt -= ack_am; 10530 rsm->r_rtr_bytes -= ack_am; 10531 } 10532 } 10533 /* 10534 * Update where the piece starts and record 10535 * the time of send of highest cumack sent if 10536 * its in our GP range. 10537 */ 10538 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 10539 /* Now we need to move our offset forward too */ 10540 if (rsm->m && 10541 ((rsm->orig_m_len != rsm->m->m_len) || 10542 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 10543 /* Fix up the orig_m_len and possibly the mbuf offset */ 10544 rack_adjust_orig_mlen(rsm); 10545 } 10546 rsm->soff += (th_ack - rsm->r_start); 10547 rack_rsm_sender_update(rack, tp, rsm, 5); 10548 /* The trim will move th_ack into r_start for us */ 10549 tqhash_trim(rack->r_ctl.tqh, th_ack); 10550 /* Now do we need to move the mbuf fwd too? */ 10551 { 10552 struct mbuf *m; 10553 uint32_t soff; 10554 10555 m = rsm->m; 10556 soff = rsm->soff; 10557 if (m) { 10558 while (soff >= m->m_len) { 10559 soff -= m->m_len; 10560 KASSERT((m->m_next != NULL), 10561 (" rsm:%p off:%u soff:%u m:%p", 10562 rsm, rsm->soff, soff, m)); 10563 m = m->m_next; 10564 if (m == NULL) { 10565 /* 10566 * This is a fall-back that prevents a panic. In reality 10567 * we should be able to walk the mbuf's and find our place. 10568 * At this point snd_una has not been updated with the sbcut() yet 10569 * but tqhash_trim did update rsm->r_start so the offset calcuation 10570 * should work fine. This is undesirable since we will take cache 10571 * hits to access the socket buffer. And even more puzzling is that 10572 * it happens occasionally. It should not :( 10573 */ 10574 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 10575 (rsm->r_start - tp->snd_una), 10576 &soff); 10577 break; 10578 } 10579 } 10580 /* 10581 * Now save in our updated values. 10582 */ 10583 rsm->m = m; 10584 rsm->soff = soff; 10585 rsm->orig_m_len = rsm->m->m_len; 10586 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 10587 } 10588 } 10589 if (rack->app_limited_needs_set && 10590 SEQ_GEQ(th_ack, tp->gput_seq)) 10591 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 10592 } 10593 10594 static void 10595 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 10596 { 10597 struct rack_sendmap *rsm; 10598 int sack_pass_fnd = 0; 10599 10600 if (rack->r_might_revert) { 10601 /* 10602 * Ok we have reordering, have not sent anything, we 10603 * might want to revert the congestion state if nothing 10604 * further has SACK_PASSED on it. Lets check. 10605 * 10606 * We also get here when we have DSACKs come in for 10607 * all the data that we FR'd. Note that a rxt or tlp 10608 * timer clears this from happening. 10609 */ 10610 10611 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 10612 if (rsm->r_flags & RACK_SACK_PASSED) { 10613 sack_pass_fnd = 1; 10614 break; 10615 } 10616 } 10617 if (sack_pass_fnd == 0) { 10618 /* 10619 * We went into recovery 10620 * incorrectly due to reordering! 10621 */ 10622 int orig_cwnd; 10623 10624 rack->r_ent_rec_ns = 0; 10625 orig_cwnd = tp->snd_cwnd; 10626 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 10627 tp->snd_recover = tp->snd_una; 10628 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 10629 if (IN_RECOVERY(tp->t_flags)) { 10630 rack_exit_recovery(tp, rack, 3); 10631 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 10632 /* 10633 * We were in recovery, had an RTO 10634 * and then re-entered recovery (more sack's arrived) 10635 * and we have properly recorded the old ssthresh from 10636 * the first recovery. We want to be able to slow-start 10637 * back to this level. The ssthresh from the timeout 10638 * and then back into recovery will end up most likely 10639 * to be min(cwnd=1mss, 2mss). Which makes it basically 10640 * so we get no slow-start after our RTO. 10641 */ 10642 rack->rto_from_rec = 0; 10643 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 10644 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 10645 } 10646 } 10647 } 10648 rack->r_might_revert = 0; 10649 } 10650 } 10651 10652 10653 static int 10654 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 10655 { 10656 10657 uint32_t am, l_end; 10658 int was_tlp = 0; 10659 10660 if (SEQ_GT(end, start)) 10661 am = end - start; 10662 else 10663 am = 0; 10664 if ((rack->rc_last_tlp_acked_set ) && 10665 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 10666 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 10667 /* 10668 * The DSACK is because of a TLP which we don't 10669 * do anything with the reordering window over since 10670 * it was not reordering that caused the DSACK but 10671 * our previous retransmit TLP. 10672 */ 10673 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10674 was_tlp = 1; 10675 goto skip_dsack_round; 10676 } 10677 if (rack->rc_last_sent_tlp_seq_valid) { 10678 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 10679 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 10680 (SEQ_LEQ(end, l_end))) { 10681 /* 10682 * This dsack is from the last sent TLP, ignore it 10683 * for reordering purposes. 10684 */ 10685 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10686 was_tlp = 1; 10687 goto skip_dsack_round; 10688 } 10689 } 10690 if (rack->rc_dsack_round_seen == 0) { 10691 rack->rc_dsack_round_seen = 1; 10692 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 10693 rack->r_ctl.num_dsack++; 10694 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 10695 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 10696 } 10697 skip_dsack_round: 10698 /* 10699 * We keep track of how many DSACK blocks we get 10700 * after a recovery incident. 10701 */ 10702 rack->r_ctl.dsack_byte_cnt += am; 10703 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 10704 rack->r_ctl.retran_during_recovery && 10705 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 10706 /* 10707 * False recovery most likely culprit is reordering. If 10708 * nothing else is missing we need to revert. 10709 */ 10710 rack->r_might_revert = 1; 10711 rack_handle_might_revert(rack->rc_tp, rack); 10712 rack->r_might_revert = 0; 10713 rack->r_ctl.retran_during_recovery = 0; 10714 rack->r_ctl.dsack_byte_cnt = 0; 10715 } 10716 return (was_tlp); 10717 } 10718 10719 static uint32_t 10720 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 10721 { 10722 return (((tp->snd_max - snd_una) - 10723 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 10724 } 10725 10726 static int32_t 10727 rack_compute_pipe(struct tcpcb *tp) 10728 { 10729 return ((int32_t)do_rack_compute_pipe(tp, 10730 (struct tcp_rack *)tp->t_fb_ptr, 10731 tp->snd_una)); 10732 } 10733 10734 static void 10735 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 10736 { 10737 /* Deal with changed and PRR here (in recovery only) */ 10738 uint32_t pipe, snd_una; 10739 10740 rack->r_ctl.rc_prr_delivered += changed; 10741 10742 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 10743 /* 10744 * It is all outstanding, we are application limited 10745 * and thus we don't need more room to send anything. 10746 * Note we use tp->snd_una here and not th_ack because 10747 * the data as yet not been cut from the sb. 10748 */ 10749 rack->r_ctl.rc_prr_sndcnt = 0; 10750 return; 10751 } 10752 /* Compute prr_sndcnt */ 10753 if (SEQ_GT(tp->snd_una, th_ack)) { 10754 snd_una = tp->snd_una; 10755 } else { 10756 snd_una = th_ack; 10757 } 10758 pipe = do_rack_compute_pipe(tp, rack, snd_una); 10759 if (pipe > tp->snd_ssthresh) { 10760 long sndcnt; 10761 10762 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 10763 if (rack->r_ctl.rc_prr_recovery_fs > 0) 10764 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 10765 else { 10766 rack->r_ctl.rc_prr_sndcnt = 0; 10767 rack_log_to_prr(rack, 9, 0, __LINE__); 10768 sndcnt = 0; 10769 } 10770 sndcnt++; 10771 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 10772 sndcnt -= rack->r_ctl.rc_prr_out; 10773 else 10774 sndcnt = 0; 10775 rack->r_ctl.rc_prr_sndcnt = sndcnt; 10776 rack_log_to_prr(rack, 10, 0, __LINE__); 10777 } else { 10778 uint32_t limit; 10779 10780 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 10781 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 10782 else 10783 limit = 0; 10784 if (changed > limit) 10785 limit = changed; 10786 limit += ctf_fixed_maxseg(tp); 10787 if (tp->snd_ssthresh > pipe) { 10788 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 10789 rack_log_to_prr(rack, 11, 0, __LINE__); 10790 } else { 10791 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 10792 rack_log_to_prr(rack, 12, 0, __LINE__); 10793 } 10794 } 10795 } 10796 10797 static void 10798 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 10799 int *dsack_seen, int *sacks_seen) 10800 { 10801 uint32_t changed; 10802 struct tcp_rack *rack; 10803 struct rack_sendmap *rsm; 10804 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 10805 register uint32_t th_ack; 10806 int32_t i, j, k, num_sack_blks = 0; 10807 uint32_t cts, acked, ack_point; 10808 int loop_start = 0; 10809 uint32_t tsused; 10810 uint32_t segsiz; 10811 10812 10813 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10814 if (tcp_get_flags(th) & TH_RST) { 10815 /* We don't log resets */ 10816 return; 10817 } 10818 rack = (struct tcp_rack *)tp->t_fb_ptr; 10819 cts = tcp_get_usecs(NULL); 10820 rsm = tqhash_min(rack->r_ctl.tqh); 10821 changed = 0; 10822 th_ack = th->th_ack; 10823 segsiz = ctf_fixed_maxseg(rack->rc_tp); 10824 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 10825 /* 10826 * You only get credit for 10827 * MSS and greater (and you get extra 10828 * credit for larger cum-ack moves). 10829 */ 10830 int ac; 10831 10832 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 10833 counter_u64_add(rack_ack_total, ac); 10834 } 10835 if (SEQ_GT(th_ack, tp->snd_una)) { 10836 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 10837 tp->t_acktime = ticks; 10838 } 10839 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 10840 changed = th_ack - rsm->r_start; 10841 if (changed) { 10842 rack_process_to_cumack(tp, rack, th_ack, cts, to, 10843 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 10844 } 10845 if ((to->to_flags & TOF_SACK) == 0) { 10846 /* We are done nothing left and no sack. */ 10847 rack_handle_might_revert(tp, rack); 10848 /* 10849 * For cases where we struck a dup-ack 10850 * with no SACK, add to the changes so 10851 * PRR will work right. 10852 */ 10853 if (dup_ack_struck && (changed == 0)) { 10854 changed += ctf_fixed_maxseg(rack->rc_tp); 10855 } 10856 goto out; 10857 } 10858 /* Sack block processing */ 10859 if (SEQ_GT(th_ack, tp->snd_una)) 10860 ack_point = th_ack; 10861 else 10862 ack_point = tp->snd_una; 10863 for (i = 0; i < to->to_nsacks; i++) { 10864 bcopy((to->to_sacks + i * TCPOLEN_SACK), 10865 &sack, sizeof(sack)); 10866 sack.start = ntohl(sack.start); 10867 sack.end = ntohl(sack.end); 10868 if (SEQ_GT(sack.end, sack.start) && 10869 SEQ_GT(sack.start, ack_point) && 10870 SEQ_LT(sack.start, tp->snd_max) && 10871 SEQ_GT(sack.end, ack_point) && 10872 SEQ_LEQ(sack.end, tp->snd_max)) { 10873 sack_blocks[num_sack_blks] = sack; 10874 num_sack_blks++; 10875 } else if (SEQ_LEQ(sack.start, th_ack) && 10876 SEQ_LEQ(sack.end, th_ack)) { 10877 int was_tlp; 10878 10879 if (dsack_seen != NULL) 10880 *dsack_seen = 1; 10881 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 10882 /* 10883 * Its a D-SACK block. 10884 */ 10885 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 10886 } 10887 } 10888 if (rack->rc_dsack_round_seen) { 10889 /* Is the dsack roound over? */ 10890 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 10891 /* Yes it is */ 10892 rack->rc_dsack_round_seen = 0; 10893 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 10894 } 10895 } 10896 /* 10897 * Sort the SACK blocks so we can update the rack scoreboard with 10898 * just one pass. 10899 */ 10900 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 10901 num_sack_blks, th->th_ack); 10902 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 10903 if (sacks_seen != NULL) 10904 *sacks_seen = num_sack_blks; 10905 if (num_sack_blks == 0) { 10906 /* Nothing to sack, but we need to update counts */ 10907 goto out_with_totals; 10908 } 10909 /* Its a sack of some sort */ 10910 if (num_sack_blks < 2) { 10911 /* Only one, we don't need to sort */ 10912 goto do_sack_work; 10913 } 10914 /* Sort the sacks */ 10915 for (i = 0; i < num_sack_blks; i++) { 10916 for (j = i + 1; j < num_sack_blks; j++) { 10917 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 10918 sack = sack_blocks[i]; 10919 sack_blocks[i] = sack_blocks[j]; 10920 sack_blocks[j] = sack; 10921 } 10922 } 10923 } 10924 /* 10925 * Now are any of the sack block ends the same (yes some 10926 * implementations send these)? 10927 */ 10928 again: 10929 if (num_sack_blks == 0) 10930 goto out_with_totals; 10931 if (num_sack_blks > 1) { 10932 for (i = 0; i < num_sack_blks; i++) { 10933 for (j = i + 1; j < num_sack_blks; j++) { 10934 if (sack_blocks[i].end == sack_blocks[j].end) { 10935 /* 10936 * Ok these two have the same end we 10937 * want the smallest end and then 10938 * throw away the larger and start 10939 * again. 10940 */ 10941 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 10942 /* 10943 * The second block covers 10944 * more area use that 10945 */ 10946 sack_blocks[i].start = sack_blocks[j].start; 10947 } 10948 /* 10949 * Now collapse out the dup-sack and 10950 * lower the count 10951 */ 10952 for (k = (j + 1); k < num_sack_blks; k++) { 10953 sack_blocks[j].start = sack_blocks[k].start; 10954 sack_blocks[j].end = sack_blocks[k].end; 10955 j++; 10956 } 10957 num_sack_blks--; 10958 goto again; 10959 } 10960 } 10961 } 10962 } 10963 do_sack_work: 10964 /* 10965 * First lets look to see if 10966 * we have retransmitted and 10967 * can use the transmit next? 10968 */ 10969 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 10970 if (rsm && 10971 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 10972 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 10973 /* 10974 * We probably did the FR and the next 10975 * SACK in continues as we would expect. 10976 */ 10977 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); 10978 if (acked) { 10979 rack->r_wanted_output = 1; 10980 changed += acked; 10981 } 10982 if (num_sack_blks == 1) { 10983 /* 10984 * This is what we would expect from 10985 * a normal implementation to happen 10986 * after we have retransmitted the FR, 10987 * i.e the sack-filter pushes down 10988 * to 1 block and the next to be retransmitted 10989 * is the sequence in the sack block (has more 10990 * are acked). Count this as ACK'd data to boost 10991 * up the chances of recovering any false positives. 10992 */ 10993 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 10994 counter_u64_add(rack_express_sack, 1); 10995 goto out_with_totals; 10996 } else { 10997 /* 10998 * Start the loop through the 10999 * rest of blocks, past the first block. 11000 */ 11001 loop_start = 1; 11002 } 11003 } 11004 counter_u64_add(rack_sack_total, 1); 11005 rsm = rack->r_ctl.rc_sacklast; 11006 for (i = loop_start; i < num_sack_blks; i++) { 11007 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); 11008 if (acked) { 11009 rack->r_wanted_output = 1; 11010 changed += acked; 11011 } 11012 } 11013 out_with_totals: 11014 if (num_sack_blks > 1) { 11015 /* 11016 * You get an extra stroke if 11017 * you have more than one sack-blk, this 11018 * could be where we are skipping forward 11019 * and the sack-filter is still working, or 11020 * it could be an attacker constantly 11021 * moving us. 11022 */ 11023 counter_u64_add(rack_move_some, 1); 11024 } 11025 out: 11026 if (changed) { 11027 /* Something changed cancel the rack timer */ 11028 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11029 } 11030 tsused = tcp_get_usecs(NULL); 11031 rsm = tcp_rack_output(tp, rack, tsused); 11032 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11033 rsm && 11034 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11035 /* Enter recovery */ 11036 entered_recovery = 1; 11037 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 11038 /* 11039 * When we enter recovery we need to assure we send 11040 * one packet. 11041 */ 11042 if (rack->rack_no_prr == 0) { 11043 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11044 rack_log_to_prr(rack, 8, 0, __LINE__); 11045 } 11046 rack->r_timer_override = 1; 11047 rack->r_early = 0; 11048 rack->r_ctl.rc_agg_early = 0; 11049 } else if (IN_FASTRECOVERY(tp->t_flags) && 11050 rsm && 11051 (rack->r_rr_config == 3)) { 11052 /* 11053 * Assure we can output and we get no 11054 * remembered pace time except the retransmit. 11055 */ 11056 rack->r_timer_override = 1; 11057 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11058 rack->r_ctl.rc_resend = rsm; 11059 } 11060 if (IN_FASTRECOVERY(tp->t_flags) && 11061 (rack->rack_no_prr == 0) && 11062 (entered_recovery == 0)) { 11063 rack_update_prr(tp, rack, changed, th_ack); 11064 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11065 ((tcp_in_hpts(rack->rc_tp) == 0) && 11066 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11067 /* 11068 * If you are pacing output you don't want 11069 * to override. 11070 */ 11071 rack->r_early = 0; 11072 rack->r_ctl.rc_agg_early = 0; 11073 rack->r_timer_override = 1; 11074 } 11075 } 11076 } 11077 11078 static void 11079 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 11080 { 11081 struct rack_sendmap *rsm; 11082 11083 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11084 while (rsm) { 11085 /* 11086 * We need to skip anything already set 11087 * to be retransmitted. 11088 */ 11089 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11090 (rsm->r_flags & RACK_MUST_RXT)) { 11091 rsm = TAILQ_NEXT(rsm, r_tnext); 11092 continue; 11093 } 11094 break; 11095 } 11096 if (rsm && (rsm->r_dupack < 0xff)) { 11097 rsm->r_dupack++; 11098 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11099 struct timeval tv; 11100 uint32_t cts; 11101 /* 11102 * Here we see if we need to retransmit. For 11103 * a SACK type connection if enough time has passed 11104 * we will get a return of the rsm. For a non-sack 11105 * connection we will get the rsm returned if the 11106 * dupack value is 3 or more. 11107 */ 11108 cts = tcp_get_usecs(&tv); 11109 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11110 if (rack->r_ctl.rc_resend != NULL) { 11111 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11112 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11113 th_ack, __LINE__); 11114 } 11115 rack->r_wanted_output = 1; 11116 rack->r_timer_override = 1; 11117 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11118 } 11119 } else { 11120 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11121 } 11122 } 11123 } 11124 11125 static void 11126 rack_check_bottom_drag(struct tcpcb *tp, 11127 struct tcp_rack *rack, 11128 struct socket *so) 11129 { 11130 /* 11131 * So what is dragging bottom? 11132 * 11133 * Dragging bottom means you were under pacing and had a 11134 * delay in processing inbound acks waiting on our pacing 11135 * timer to expire. While you were waiting all of the acknowledgments 11136 * for the packets you sent have arrived. This means we are pacing 11137 * way underneath the bottleneck to the point where our Goodput 11138 * measurements stop working, since they require more than one 11139 * ack (usually at least 8 packets worth with multiple acks so we can 11140 * gauge the inter-ack times). If that occurs we have a real problem 11141 * since we are stuck in a hole that we can't get out of without 11142 * something speeding us up. 11143 * 11144 * We also check to see if we are widdling down to just one segment 11145 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 11146 * then we are adding the delayed ack interval into our measurments and 11147 * we need to speed up slightly. 11148 */ 11149 uint32_t segsiz, minseg; 11150 11151 segsiz = ctf_fixed_maxseg(tp); 11152 minseg = segsiz; 11153 if (tp->snd_max == tp->snd_una) { 11154 /* 11155 * We are doing dynamic pacing and we are way 11156 * under. Basically everything got acked while 11157 * we were still waiting on the pacer to expire. 11158 * 11159 * This means we need to boost the b/w in 11160 * addition to any earlier boosting of 11161 * the multiplier. 11162 */ 11163 uint64_t lt_bw; 11164 11165 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 11166 lt_bw = rack_get_lt_bw(rack); 11167 rack->rc_dragged_bottom = 1; 11168 rack_validate_multipliers_at_or_above100(rack); 11169 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11170 (rack->dis_lt_bw == 0) && 11171 (rack->use_lesser_lt_bw == 0) && 11172 (lt_bw > 0)) { 11173 /* 11174 * Lets use the long-term b/w we have 11175 * been getting as a base. 11176 */ 11177 if (rack->rc_gp_filled == 0) { 11178 if (lt_bw > ONE_POINT_TWO_MEG) { 11179 /* 11180 * If we have no measurement 11181 * don't let us set in more than 11182 * 1.2Mbps. If we are still too 11183 * low after pacing with this we 11184 * will hopefully have a max b/w 11185 * available to sanity check things. 11186 */ 11187 lt_bw = ONE_POINT_TWO_MEG; 11188 } 11189 rack->r_ctl.rc_rtt_diff = 0; 11190 rack->r_ctl.gp_bw = lt_bw; 11191 rack->rc_gp_filled = 1; 11192 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11193 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11194 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11195 } else if (lt_bw > rack->r_ctl.gp_bw) { 11196 rack->r_ctl.rc_rtt_diff = 0; 11197 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11198 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11199 rack->r_ctl.gp_bw = lt_bw; 11200 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11201 } else 11202 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11203 if ((rack->gp_ready == 0) && 11204 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11205 /* We have enough measurements now */ 11206 rack->gp_ready = 1; 11207 if (rack->dgp_on || 11208 rack->rack_hibeta) 11209 rack_set_cc_pacing(rack); 11210 if (rack->defer_options) 11211 rack_apply_deferred_options(rack); 11212 } 11213 } else { 11214 /* 11215 * zero rtt possibly?, settle for just an old increase. 11216 */ 11217 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11218 } 11219 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11220 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11221 minseg)) && 11222 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11223 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11224 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11225 (segsiz * rack_req_segs))) { 11226 /* 11227 * We are doing dynamic GP pacing and 11228 * we have everything except 1MSS or less 11229 * bytes left out. We are still pacing away. 11230 * And there is data that could be sent, This 11231 * means we are inserting delayed ack time in 11232 * our measurements because we are pacing too slow. 11233 */ 11234 rack_validate_multipliers_at_or_above100(rack); 11235 rack->rc_dragged_bottom = 1; 11236 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11237 } 11238 } 11239 11240 #ifdef TCP_REQUEST_TRK 11241 static void 11242 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11243 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11244 { 11245 int do_log; 11246 11247 do_log = tcp_bblogging_on(rack->rc_tp); 11248 if (do_log == 0) { 11249 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11250 return; 11251 /* We only allow the three below with point logging on */ 11252 if ((mod != HYBRID_LOG_RULES_APP) && 11253 (mod != HYBRID_LOG_RULES_SET) && 11254 (mod != HYBRID_LOG_REQ_COMP)) 11255 return; 11256 11257 } 11258 if (do_log) { 11259 union tcp_log_stackspecific log; 11260 struct timeval tv; 11261 11262 /* Convert our ms to a microsecond */ 11263 memset(&log, 0, sizeof(log)); 11264 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11265 log.u_bbr.flex1 = seq; 11266 log.u_bbr.cwnd_gain = line; 11267 if (cur != NULL) { 11268 uint64_t off; 11269 11270 log.u_bbr.flex2 = cur->start_seq; 11271 log.u_bbr.flex3 = cur->end_seq; 11272 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11273 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11274 log.u_bbr.flex6 = cur->flags; 11275 log.u_bbr.pkts_out = cur->hybrid_flags; 11276 log.u_bbr.rttProp = cur->timestamp; 11277 log.u_bbr.cur_del_rate = cur->cspr; 11278 log.u_bbr.bw_inuse = cur->start; 11279 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11280 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11281 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11282 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11283 log.u_bbr.inhpts = 1; 11284 #ifdef TCP_REQUEST_TRK 11285 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11286 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11287 #endif 11288 } else { 11289 log.u_bbr.flex2 = err; 11290 } 11291 /* 11292 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11293 */ 11294 log.u_bbr.flex7 = rack->rc_catch_up; 11295 log.u_bbr.flex7 <<= 1; 11296 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11297 log.u_bbr.flex7 <<= 1; 11298 log.u_bbr.flex7 |= rack->dgp_on; 11299 /* 11300 * Compose bbr_state to be a bit wise 0000ADHF 11301 * where A is the always_pace flag 11302 * where D is the dgp_on flag 11303 * where H is the hybrid_mode on flag 11304 * where F is the use_fixed_rate flag. 11305 */ 11306 log.u_bbr.bbr_state = rack->rc_always_pace; 11307 log.u_bbr.bbr_state <<= 1; 11308 log.u_bbr.bbr_state |= rack->dgp_on; 11309 log.u_bbr.bbr_state <<= 1; 11310 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 11311 log.u_bbr.bbr_state <<= 1; 11312 log.u_bbr.bbr_state |= rack->use_fixed_rate; 11313 log.u_bbr.flex8 = mod; 11314 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11315 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11316 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11317 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 11318 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 11319 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 11320 tcp_log_event(rack->rc_tp, NULL, 11321 &rack->rc_inp->inp_socket->so_rcv, 11322 &rack->rc_inp->inp_socket->so_snd, 11323 TCP_HYBRID_PACING_LOG, 0, 11324 0, &log, false, NULL, __func__, __LINE__, &tv); 11325 } 11326 } 11327 #endif 11328 11329 #ifdef TCP_REQUEST_TRK 11330 static void 11331 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11332 { 11333 struct tcp_sendfile_track *rc_cur, *orig_ent; 11334 struct tcpcb *tp; 11335 int err = 0; 11336 11337 orig_ent = rack->r_ctl.rc_last_sft; 11338 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 11339 if (rc_cur == NULL) { 11340 /* If not in the beginning what about the end piece */ 11341 if (rack->rc_hybrid_mode) 11342 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11343 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 11344 } else { 11345 err = 12345; 11346 } 11347 /* If we find no parameters we are in straight DGP mode */ 11348 if(rc_cur == NULL) { 11349 /* None found for this seq, just DGP for now */ 11350 if (rack->rc_hybrid_mode) { 11351 rack->r_ctl.client_suggested_maxseg = 0; 11352 rack->rc_catch_up = 0; 11353 if (rack->cspr_is_fcc == 0) 11354 rack->r_ctl.bw_rate_cap = 0; 11355 else 11356 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11357 } 11358 if (rack->rc_hybrid_mode) { 11359 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11360 } 11361 if (rack->r_ctl.rc_last_sft) { 11362 rack->r_ctl.rc_last_sft = NULL; 11363 } 11364 return; 11365 } 11366 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 11367 /* This entry was never setup for hybrid pacing on/off etc */ 11368 if (rack->rc_hybrid_mode) { 11369 rack->r_ctl.client_suggested_maxseg = 0; 11370 rack->rc_catch_up = 0; 11371 rack->r_ctl.bw_rate_cap = 0; 11372 } 11373 if (rack->r_ctl.rc_last_sft) { 11374 rack->r_ctl.rc_last_sft = NULL; 11375 } 11376 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11377 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 11378 rc_cur->first_send = cts; 11379 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 11380 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11381 } 11382 return; 11383 } 11384 /* 11385 * Ok if we have a new entry *or* have never 11386 * set up an entry we need to proceed. If 11387 * we have already set it up this entry we 11388 * just continue along with what we already 11389 * setup. 11390 */ 11391 tp = rack->rc_tp; 11392 if ((rack->r_ctl.rc_last_sft != NULL) && 11393 (rack->r_ctl.rc_last_sft == rc_cur)) { 11394 /* Its already in place */ 11395 if (rack->rc_hybrid_mode) 11396 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 11397 return; 11398 } 11399 if (rack->rc_hybrid_mode == 0) { 11400 rack->r_ctl.rc_last_sft = rc_cur; 11401 if (orig_ent) { 11402 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11403 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11404 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11405 } 11406 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11407 return; 11408 } 11409 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 11410 /* Compensate for all the header overhead's */ 11411 if (rack->cspr_is_fcc == 0) 11412 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11413 else 11414 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11415 } else { 11416 if (rack->rc_hybrid_mode) { 11417 if (rack->cspr_is_fcc == 0) 11418 rack->r_ctl.bw_rate_cap = 0; 11419 else 11420 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11421 } 11422 } 11423 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 11424 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 11425 else 11426 rack->r_ctl.client_suggested_maxseg = 0; 11427 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 11428 /* 11429 * It is the same timestamp as the previous one 11430 * add the hybrid flag that will indicate we use 11431 * sendtime not arrival time for catch-up mode. 11432 */ 11433 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 11434 } 11435 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 11436 (rc_cur->cspr > 0)) { 11437 uint64_t len; 11438 11439 rack->rc_catch_up = 1; 11440 /* 11441 * Calculate the deadline time, first set the 11442 * time to when the request arrived. 11443 */ 11444 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 11445 /* 11446 * For cases where its a duplicate tm (we received more 11447 * than one request for a tm) we want to use now, the point 11448 * where we are just sending the first bit of the request. 11449 */ 11450 rc_cur->deadline = cts; 11451 } else { 11452 /* 11453 * Here we have a different tm from the last request 11454 * so we want to use arrival time as our base. 11455 */ 11456 rc_cur->deadline = rc_cur->localtime; 11457 } 11458 /* 11459 * Next calculate the length and compensate for 11460 * TLS if need be. 11461 */ 11462 len = rc_cur->end - rc_cur->start; 11463 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 11464 /* 11465 * This session is doing TLS. Take a swag guess 11466 * at the overhead. 11467 */ 11468 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 11469 } 11470 /* 11471 * Now considering the size, and the cspr, what is the time that 11472 * would be required at the cspr rate. Here we use the raw 11473 * cspr value since the client only looks at the raw data. We 11474 * do use len which includes TLS overhead, but not the TCP/IP etc. 11475 * That will get made up for in the CU pacing rate set. 11476 */ 11477 len *= HPTS_USEC_IN_SEC; 11478 len /= rc_cur->cspr; 11479 rc_cur->deadline += len; 11480 } else { 11481 rack->rc_catch_up = 0; 11482 rc_cur->deadline = 0; 11483 } 11484 if (rack->r_ctl.client_suggested_maxseg != 0) { 11485 /* 11486 * We need to reset the max pace segs if we have a 11487 * client_suggested_maxseg. 11488 */ 11489 rack_set_pace_segments(tp, rack, __LINE__, NULL); 11490 } 11491 if (orig_ent) { 11492 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11493 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11494 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11495 } 11496 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11497 /* Remember it for next time and for CU mode */ 11498 rack->r_ctl.rc_last_sft = rc_cur; 11499 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 11500 } 11501 #endif 11502 11503 static void 11504 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11505 { 11506 #ifdef TCP_REQUEST_TRK 11507 struct tcp_sendfile_track *ent; 11508 11509 ent = rack->r_ctl.rc_last_sft; 11510 if ((ent == NULL) || 11511 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 11512 (SEQ_GEQ(seq, ent->end_seq))) { 11513 /* Time to update the track. */ 11514 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 11515 ent = rack->r_ctl.rc_last_sft; 11516 } 11517 /* Out of all */ 11518 if (ent == NULL) { 11519 return; 11520 } 11521 if (SEQ_LT(ent->end_seq, (seq + len))) { 11522 /* 11523 * This is the case where our end_seq guess 11524 * was wrong. This is usually due to TLS having 11525 * more bytes then our guess. It could also be the 11526 * case that the client sent in two requests closely 11527 * and the SB is full of both so we are sending part 11528 * of each (end|beg). In such a case lets move this 11529 * guys end to match the end of this send. That 11530 * way it will complete when all of it is acked. 11531 */ 11532 ent->end_seq = (seq + len); 11533 if (rack->rc_hybrid_mode) 11534 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 11535 } 11536 /* Now validate we have set the send time of this one */ 11537 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11538 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 11539 ent->first_send = cts; 11540 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 11541 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11542 } 11543 #endif 11544 } 11545 11546 static void 11547 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 11548 { 11549 /* 11550 * The fast output path is enabled and we 11551 * have moved the cumack forward. Lets see if 11552 * we can expand forward the fast path length by 11553 * that amount. What we would ideally like to 11554 * do is increase the number of bytes in the 11555 * fast path block (left_to_send) by the 11556 * acked amount. However we have to gate that 11557 * by two factors: 11558 * 1) The amount outstanding and the rwnd of the peer 11559 * (i.e. we don't want to exceed the rwnd of the peer). 11560 * <and> 11561 * 2) The amount of data left in the socket buffer (i.e. 11562 * we can't send beyond what is in the buffer). 11563 * 11564 * Note that this does not take into account any increase 11565 * in the cwnd. We will only extend the fast path by 11566 * what was acked. 11567 */ 11568 uint32_t new_total, gating_val; 11569 11570 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 11571 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 11572 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 11573 if (new_total <= gating_val) { 11574 /* We can increase left_to_send by the acked amount */ 11575 counter_u64_add(rack_extended_rfo, 1); 11576 rack->r_ctl.fsb.left_to_send = new_total; 11577 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 11578 ("rack:%p left_to_send:%u sbavail:%u out:%u", 11579 rack, rack->r_ctl.fsb.left_to_send, 11580 sbavail(&rack->rc_inp->inp_socket->so_snd), 11581 (tp->snd_max - tp->snd_una))); 11582 11583 } 11584 } 11585 11586 static void 11587 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 11588 { 11589 /* 11590 * Here any sendmap entry that points to the 11591 * beginning mbuf must be adjusted to the correct 11592 * offset. This must be called with: 11593 * 1) The socket buffer locked 11594 * 2) snd_una adjusted to its new position. 11595 * 11596 * Note that (2) implies rack_ack_received has also 11597 * been called and all the sbcut's have been done. 11598 * 11599 * We grab the first mbuf in the socket buffer and 11600 * then go through the front of the sendmap, recalculating 11601 * the stored offset for any sendmap entry that has 11602 * that mbuf. We must use the sb functions to do this 11603 * since its possible an add was done has well as 11604 * the subtraction we may have just completed. This should 11605 * not be a penalty though, since we just referenced the sb 11606 * to go in and trim off the mbufs that we freed (of course 11607 * there will be a penalty for the sendmap references though). 11608 * 11609 * Note also with INVARIANT on, we validate with a KASSERT 11610 * that the first sendmap entry has a soff of 0. 11611 * 11612 */ 11613 struct mbuf *m; 11614 struct rack_sendmap *rsm; 11615 tcp_seq snd_una; 11616 #ifdef INVARIANTS 11617 int first_processed = 0; 11618 #endif 11619 11620 snd_una = rack->rc_tp->snd_una; 11621 SOCKBUF_LOCK_ASSERT(sb); 11622 m = sb->sb_mb; 11623 rsm = tqhash_min(rack->r_ctl.tqh); 11624 if ((rsm == NULL) || (m == NULL)) { 11625 /* Nothing outstanding */ 11626 return; 11627 } 11628 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 11629 KASSERT((rsm->m == m), 11630 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 11631 rack, sb, rsm)); 11632 while (rsm->m && (rsm->m == m)) { 11633 /* one to adjust */ 11634 #ifdef INVARIANTS 11635 struct mbuf *tm; 11636 uint32_t soff; 11637 11638 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 11639 if ((rsm->orig_m_len != m->m_len) || 11640 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 11641 rack_adjust_orig_mlen(rsm); 11642 } 11643 if (first_processed == 0) { 11644 KASSERT((rsm->soff == 0), 11645 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 11646 rack, rsm)); 11647 first_processed = 1; 11648 } 11649 if ((rsm->soff != soff) || (rsm->m != tm)) { 11650 /* 11651 * This is not a fatal error, we anticipate it 11652 * might happen (the else code), so we count it here 11653 * so that under invariant we can see that it really 11654 * does happen. 11655 */ 11656 counter_u64_add(rack_adjust_map_bw, 1); 11657 } 11658 rsm->m = tm; 11659 rsm->soff = soff; 11660 if (tm) { 11661 rsm->orig_m_len = rsm->m->m_len; 11662 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11663 } else { 11664 rsm->orig_m_len = 0; 11665 rsm->orig_t_space = 0; 11666 } 11667 #else 11668 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 11669 if (rsm->m) { 11670 rsm->orig_m_len = rsm->m->m_len; 11671 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11672 } else { 11673 rsm->orig_m_len = 0; 11674 rsm->orig_t_space = 0; 11675 } 11676 #endif 11677 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 11678 if (rsm == NULL) 11679 break; 11680 } 11681 } 11682 11683 #ifdef TCP_REQUEST_TRK 11684 static inline void 11685 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 11686 { 11687 struct tcp_sendfile_track *ent; 11688 int i; 11689 11690 if ((rack->rc_hybrid_mode == 0) && 11691 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 11692 /* 11693 * Just do normal completions hybrid pacing is not on 11694 * and CLDL is off as well. 11695 */ 11696 tcp_req_check_for_comp(rack->rc_tp, th_ack); 11697 return; 11698 } 11699 /* 11700 * Originally I was just going to find the th_ack associated 11701 * with an entry. But then I realized a large strech ack could 11702 * in theory ack two or more requests at once. So instead we 11703 * need to find all entries that are completed by th_ack not 11704 * just a single entry and do our logging. 11705 */ 11706 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11707 while (ent != NULL) { 11708 /* 11709 * We may be doing hybrid pacing or CLDL and need more details possibly 11710 * so we do it manually instead of calling 11711 * tcp_req_check_for_comp() 11712 */ 11713 uint64_t laa, tim, data, cbw, ftim; 11714 11715 /* Ok this ack frees it */ 11716 rack_log_hybrid(rack, th_ack, 11717 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 11718 rack_log_hybrid_sends(rack, ent, __LINE__); 11719 /* calculate the time based on the ack arrival */ 11720 data = ent->end - ent->start; 11721 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 11722 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 11723 if (ent->first_send > ent->localtime) 11724 ftim = ent->first_send; 11725 else 11726 ftim = ent->localtime; 11727 } else { 11728 /* TSNH */ 11729 ftim = ent->localtime; 11730 } 11731 if (laa > ent->localtime) 11732 tim = laa - ftim; 11733 else 11734 tim = 0; 11735 cbw = data * HPTS_USEC_IN_SEC; 11736 if (tim > 0) 11737 cbw /= tim; 11738 else 11739 cbw = 0; 11740 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 11741 /* 11742 * Check to see if we are freeing what we are pointing to send wise 11743 * if so be sure to NULL the pointer so we know we are no longer 11744 * set to anything. 11745 */ 11746 if (ent == rack->r_ctl.rc_last_sft) { 11747 rack->r_ctl.rc_last_sft = NULL; 11748 if (rack->rc_hybrid_mode) { 11749 rack->rc_catch_up = 0; 11750 if (rack->cspr_is_fcc == 0) 11751 rack->r_ctl.bw_rate_cap = 0; 11752 else 11753 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11754 rack->r_ctl.client_suggested_maxseg = 0; 11755 } 11756 } 11757 /* Generate the log that the tcp_netflix call would have */ 11758 tcp_req_log_req_info(rack->rc_tp, ent, 11759 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 11760 /* Free it and see if there is another one */ 11761 tcp_req_free_a_slot(rack->rc_tp, ent); 11762 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11763 } 11764 } 11765 #endif 11766 11767 11768 /* 11769 * Return value of 1, we do not need to call rack_process_data(). 11770 * return value of 0, rack_process_data can be called. 11771 * For ret_val if its 0 the TCP is locked, if its non-zero 11772 * its unlocked and probably unsafe to touch the TCB. 11773 */ 11774 static int 11775 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11776 struct tcpcb *tp, struct tcpopt *to, 11777 uint32_t tiwin, int32_t tlen, 11778 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 11779 { 11780 int32_t ourfinisacked = 0; 11781 int32_t nsegs, acked_amount; 11782 int32_t acked; 11783 struct mbuf *mfree; 11784 struct tcp_rack *rack; 11785 int32_t under_pacing = 0; 11786 int32_t post_recovery = 0; 11787 uint32_t p_cwnd; 11788 11789 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11790 11791 rack = (struct tcp_rack *)tp->t_fb_ptr; 11792 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { 11793 /* Checking SEG.ACK against ISS is definitely redundant. */ 11794 tp->t_flags2 |= TF2_NO_ISS_CHECK; 11795 } 11796 if (!V_tcp_insecure_ack) { 11797 tcp_seq seq_min; 11798 bool ghost_ack_check; 11799 11800 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { 11801 /* Check for too old ACKs (RFC 5961, Section 5.2). */ 11802 seq_min = tp->snd_una - tp->max_sndwnd; 11803 ghost_ack_check = false; 11804 } else { 11805 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { 11806 /* Checking for ghost ACKs is stricter. */ 11807 seq_min = tp->iss + 1; 11808 ghost_ack_check = true; 11809 } else { 11810 /* 11811 * Checking for too old ACKs (RFC 5961, 11812 * Section 5.2) is stricter. 11813 */ 11814 seq_min = tp->snd_una - tp->max_sndwnd; 11815 ghost_ack_check = false; 11816 } 11817 } 11818 if (SEQ_LT(th->th_ack, seq_min)) { 11819 if (ghost_ack_check) 11820 TCPSTAT_INC(tcps_rcvghostack); 11821 else 11822 TCPSTAT_INC(tcps_rcvacktooold); 11823 /* Send challenge ACK. */ 11824 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11825 rack->r_wanted_output = 1; 11826 return (1); 11827 } 11828 } 11829 if (SEQ_GT(th->th_ack, tp->snd_max)) { 11830 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11831 rack->r_wanted_output = 1; 11832 return (1); 11833 } 11834 if (rack->gp_ready && 11835 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11836 under_pacing = 1; 11837 } 11838 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 11839 int in_rec, dup_ack_struck = 0; 11840 int dsack_seen = 0, sacks_seen = 0; 11841 11842 in_rec = IN_FASTRECOVERY(tp->t_flags); 11843 if (rack->rc_in_persist) { 11844 tp->t_rxtshift = 0; 11845 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11846 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11847 } 11848 11849 if ((th->th_ack == tp->snd_una) && 11850 (tiwin == tp->snd_wnd) && 11851 (orig_tlen == 0) && 11852 ((to->to_flags & TOF_SACK) == 0)) { 11853 rack_strike_dupack(rack, th->th_ack); 11854 dup_ack_struck = 1; 11855 } 11856 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 11857 dup_ack_struck, &dsack_seen, &sacks_seen); 11858 11859 } 11860 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11861 /* 11862 * Old ack, behind (or duplicate to) the last one rcv'd 11863 * Note: We mark reordering is occuring if its 11864 * less than and we have not closed our window. 11865 */ 11866 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 11867 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 11868 if (rack->r_ctl.rc_reorder_ts == 0) 11869 rack->r_ctl.rc_reorder_ts = 1; 11870 } 11871 return (0); 11872 } 11873 /* 11874 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 11875 * something we sent. 11876 */ 11877 if (tp->t_flags & TF_NEEDSYN) { 11878 /* 11879 * T/TCP: Connection was half-synchronized, and our SYN has 11880 * been ACK'd (so connection is now fully synchronized). Go 11881 * to non-starred state, increment snd_una for ACK of SYN, 11882 * and check if we can do window scaling. 11883 */ 11884 tp->t_flags &= ~TF_NEEDSYN; 11885 tp->snd_una++; 11886 /* Do window scaling? */ 11887 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11888 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11889 tp->rcv_scale = tp->request_r_scale; 11890 /* Send window already scaled. */ 11891 } 11892 } 11893 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11894 11895 acked = BYTES_THIS_ACK(tp, th); 11896 if (acked) { 11897 /* 11898 * Any time we move the cum-ack forward clear 11899 * keep-alive tied probe-not-answered. The 11900 * persists clears its own on entry. 11901 */ 11902 rack->probe_not_answered = 0; 11903 } 11904 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11905 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11906 /* 11907 * If we just performed our first retransmit, and the ACK arrives 11908 * within our recovery window, then it was a mistake to do the 11909 * retransmit in the first place. Recover our original cwnd and 11910 * ssthresh, and proceed to transmit where we left off. 11911 */ 11912 if ((tp->t_flags & TF_PREVVALID) && 11913 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11914 tp->t_flags &= ~TF_PREVVALID; 11915 if (tp->t_rxtshift == 1 && 11916 (int)(ticks - tp->t_badrxtwin) < 0) 11917 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 11918 } 11919 if (acked) { 11920 /* assure we are not backed off */ 11921 tp->t_rxtshift = 0; 11922 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11923 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11924 rack->rc_tlp_in_progress = 0; 11925 rack->r_ctl.rc_tlp_cnt_out = 0; 11926 /* 11927 * If it is the RXT timer we want to 11928 * stop it, so we can restart a TLP. 11929 */ 11930 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11931 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11932 #ifdef TCP_REQUEST_TRK 11933 rack_req_check_for_comp(rack, th->th_ack); 11934 #endif 11935 } 11936 /* 11937 * If we have a timestamp reply, update smoothed round trip time. If 11938 * no timestamp is present but transmit timer is running and timed 11939 * sequence number was acked, update smoothed round trip time. Since 11940 * we now have an rtt measurement, cancel the timer backoff (cf., 11941 * Phil Karn's retransmit alg.). Recompute the initial retransmit 11942 * timer. 11943 * 11944 * Some boxes send broken timestamp replies during the SYN+ACK 11945 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11946 * and blow up the retransmit timer. 11947 */ 11948 /* 11949 * If all outstanding data is acked, stop retransmit timer and 11950 * remember to restart (more output or persist). If there is more 11951 * data to be acked, restart retransmit timer, using current 11952 * (possibly backed-off) value. 11953 */ 11954 if (acked == 0) { 11955 if (ofia) 11956 *ofia = ourfinisacked; 11957 return (0); 11958 } 11959 if (IN_RECOVERY(tp->t_flags)) { 11960 if (SEQ_LT(th->th_ack, tp->snd_recover) && 11961 (SEQ_LT(th->th_ack, tp->snd_max))) { 11962 tcp_rack_partialack(tp); 11963 } else { 11964 rack_post_recovery(tp, th->th_ack); 11965 post_recovery = 1; 11966 /* 11967 * Grab the segsiz, multiply by 2 and add the snd_cwnd 11968 * that is the max the CC should add if we are exiting 11969 * recovery and doing a late add. 11970 */ 11971 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 11972 p_cwnd <<= 1; 11973 p_cwnd += tp->snd_cwnd; 11974 } 11975 } else if ((rack->rto_from_rec == 1) && 11976 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 11977 /* 11978 * We were in recovery, hit a rxt timeout 11979 * and never re-entered recovery. The timeout(s) 11980 * made up all the lost data. In such a case 11981 * we need to clear the rto_from_rec flag. 11982 */ 11983 rack->rto_from_rec = 0; 11984 } 11985 /* 11986 * Let the congestion control algorithm update congestion control 11987 * related information. This typically means increasing the 11988 * congestion window. 11989 */ 11990 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 11991 if (post_recovery && 11992 (tp->snd_cwnd > p_cwnd)) { 11993 /* Must be non-newreno (cubic) getting too ahead of itself */ 11994 tp->snd_cwnd = p_cwnd; 11995 } 11996 SOCK_SENDBUF_LOCK(so); 11997 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 11998 tp->snd_wnd -= acked_amount; 11999 mfree = sbcut_locked(&so->so_snd, acked_amount); 12000 if ((sbused(&so->so_snd) == 0) && 12001 (acked > acked_amount) && 12002 (tp->t_state >= TCPS_FIN_WAIT_1) && 12003 (tp->t_flags & TF_SENTFIN)) { 12004 /* 12005 * We must be sure our fin 12006 * was sent and acked (we can be 12007 * in FIN_WAIT_1 without having 12008 * sent the fin). 12009 */ 12010 ourfinisacked = 1; 12011 } 12012 tp->snd_una = th->th_ack; 12013 /* wakeups? */ 12014 if (acked_amount && sbavail(&so->so_snd)) 12015 rack_adjust_sendmap_head(rack, &so->so_snd); 12016 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12017 /* NB: sowwakeup_locked() does an implicit unlock. */ 12018 sowwakeup_locked(so); 12019 m_freem(mfree); 12020 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12021 tp->snd_recover = tp->snd_una; 12022 12023 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12024 tp->snd_nxt = tp->snd_max; 12025 } 12026 if (under_pacing && 12027 (rack->use_fixed_rate == 0) && 12028 (rack->in_probe_rtt == 0) && 12029 rack->rc_gp_dyn_mul && 12030 rack->rc_always_pace) { 12031 /* Check if we are dragging bottom */ 12032 rack_check_bottom_drag(tp, rack, so); 12033 } 12034 if (tp->snd_una == tp->snd_max) { 12035 /* Nothing left outstanding */ 12036 tp->t_flags &= ~TF_PREVVALID; 12037 if (rack->r_ctl.rc_went_idle_time == 0) 12038 rack->r_ctl.rc_went_idle_time = 1; 12039 rack->r_ctl.retran_during_recovery = 0; 12040 rack->r_ctl.dsack_byte_cnt = 0; 12041 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12042 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12043 tp->t_acktime = 0; 12044 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12045 rack->rc_suspicious = 0; 12046 /* Set need output so persist might get set */ 12047 rack->r_wanted_output = 1; 12048 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12049 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12050 (sbavail(&so->so_snd) == 0) && 12051 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12052 /* 12053 * The socket was gone and the 12054 * peer sent data (now or in the past), time to 12055 * reset him. 12056 */ 12057 *ret_val = 1; 12058 /* tcp_close will kill the inp pre-log the Reset */ 12059 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12060 tp = tcp_close(tp); 12061 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12062 return (1); 12063 } 12064 } 12065 if (ofia) 12066 *ofia = ourfinisacked; 12067 return (0); 12068 } 12069 12070 12071 static void 12072 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12073 int dir, uint32_t flags, struct rack_sendmap *rsm) 12074 { 12075 if (tcp_bblogging_on(rack->rc_tp)) { 12076 union tcp_log_stackspecific log; 12077 struct timeval tv; 12078 12079 memset(&log, 0, sizeof(log)); 12080 log.u_bbr.flex1 = cnt; 12081 log.u_bbr.flex2 = split; 12082 log.u_bbr.flex3 = out; 12083 log.u_bbr.flex4 = line; 12084 log.u_bbr.flex5 = rack->r_must_retran; 12085 log.u_bbr.flex6 = flags; 12086 log.u_bbr.flex7 = rack->rc_has_collapsed; 12087 log.u_bbr.flex8 = dir; /* 12088 * 1 is collapsed, 0 is uncollapsed, 12089 * 2 is log of a rsm being marked, 3 is a split. 12090 */ 12091 if (rsm == NULL) 12092 log.u_bbr.rttProp = 0; 12093 else 12094 log.u_bbr.rttProp = (uintptr_t)rsm; 12095 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12096 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12097 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12098 &rack->rc_inp->inp_socket->so_rcv, 12099 &rack->rc_inp->inp_socket->so_snd, 12100 TCP_RACK_LOG_COLLAPSE, 0, 12101 0, &log, false, &tv); 12102 } 12103 } 12104 12105 static void 12106 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12107 { 12108 /* 12109 * Here all we do is mark the collapsed point and set the flag. 12110 * This may happen again and again, but there is no 12111 * sense splitting our map until we know where the 12112 * peer finally lands in the collapse. 12113 */ 12114 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12115 if ((rack->rc_has_collapsed == 0) || 12116 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12117 counter_u64_add(rack_collapsed_win_seen, 1); 12118 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12119 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12120 rack->rc_has_collapsed = 1; 12121 rack->r_collapse_point_valid = 1; 12122 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12123 } 12124 12125 static void 12126 rack_un_collapse_window(struct tcp_rack *rack, int line) 12127 { 12128 struct rack_sendmap *nrsm, *rsm; 12129 int cnt = 0, split = 0; 12130 int insret __diagused; 12131 12132 12133 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12134 rack->rc_has_collapsed = 0; 12135 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12136 if (rsm == NULL) { 12137 /* Nothing to do maybe the peer ack'ed it all */ 12138 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12139 return; 12140 } 12141 /* Now do we need to split this one? */ 12142 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12143 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12144 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12145 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12146 if (nrsm == NULL) { 12147 /* We can't get a rsm, mark all? */ 12148 nrsm = rsm; 12149 goto no_split; 12150 } 12151 /* Clone it */ 12152 split = 1; 12153 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12154 #ifndef INVARIANTS 12155 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12156 #else 12157 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12158 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12159 nrsm, insret, rack, rsm); 12160 } 12161 #endif 12162 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12163 rack->r_ctl.last_collapse_point, __LINE__); 12164 if (rsm->r_in_tmap) { 12165 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12166 nrsm->r_in_tmap = 1; 12167 } 12168 /* 12169 * Set in the new RSM as the 12170 * collapsed starting point 12171 */ 12172 rsm = nrsm; 12173 } 12174 12175 no_split: 12176 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12177 cnt++; 12178 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12179 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12180 cnt++; 12181 } 12182 if (cnt) { 12183 counter_u64_add(rack_collapsed_win, 1); 12184 } 12185 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12186 } 12187 12188 static void 12189 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12190 int32_t tlen, int32_t tfo_syn) 12191 { 12192 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12193 rack_timer_cancel(tp, rack, 12194 rack->r_ctl.rc_rcvtime, __LINE__); 12195 tp->t_flags |= TF_DELACK; 12196 } else { 12197 rack->r_wanted_output = 1; 12198 tp->t_flags |= TF_ACKNOW; 12199 } 12200 } 12201 12202 static void 12203 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12204 { 12205 /* 12206 * If fast output is in progress, lets validate that 12207 * the new window did not shrink on us and make it 12208 * so fast output should end. 12209 */ 12210 if (rack->r_fast_output) { 12211 uint32_t out; 12212 12213 /* 12214 * Calculate what we will send if left as is 12215 * and compare that to our send window. 12216 */ 12217 out = ctf_outstanding(tp); 12218 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12219 /* ok we have an issue */ 12220 if (out >= tp->snd_wnd) { 12221 /* Turn off fast output the window is met or collapsed */ 12222 rack->r_fast_output = 0; 12223 } else { 12224 /* we have some room left */ 12225 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12226 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12227 /* If not at least 1 full segment never mind */ 12228 rack->r_fast_output = 0; 12229 } 12230 } 12231 } 12232 } 12233 } 12234 12235 /* 12236 * Return value of 1, the TCB is unlocked and most 12237 * likely gone, return value of 0, the TCP is still 12238 * locked. 12239 */ 12240 static int 12241 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12242 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12243 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12244 { 12245 /* 12246 * Update window information. Don't look at window if no ACK: TAC's 12247 * send garbage on first SYN. 12248 */ 12249 int32_t nsegs; 12250 int32_t tfo_syn; 12251 struct tcp_rack *rack; 12252 12253 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12254 12255 rack = (struct tcp_rack *)tp->t_fb_ptr; 12256 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12257 if ((thflags & TH_ACK) && 12258 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12259 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12260 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12261 /* keep track of pure window updates */ 12262 if (tlen == 0 && 12263 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12264 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12265 tp->snd_wnd = tiwin; 12266 rack_validate_fo_sendwin_up(tp, rack); 12267 tp->snd_wl1 = th->th_seq; 12268 tp->snd_wl2 = th->th_ack; 12269 if (tp->snd_wnd > tp->max_sndwnd) 12270 tp->max_sndwnd = tp->snd_wnd; 12271 rack->r_wanted_output = 1; 12272 } else if (thflags & TH_ACK) { 12273 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12274 tp->snd_wnd = tiwin; 12275 rack_validate_fo_sendwin_up(tp, rack); 12276 tp->snd_wl1 = th->th_seq; 12277 tp->snd_wl2 = th->th_ack; 12278 } 12279 } 12280 if (tp->snd_wnd < ctf_outstanding(tp)) 12281 /* The peer collapsed the window */ 12282 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12283 else if (rack->rc_has_collapsed) 12284 rack_un_collapse_window(rack, __LINE__); 12285 if ((rack->r_collapse_point_valid) && 12286 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12287 rack->r_collapse_point_valid = 0; 12288 /* Was persist timer active and now we have window space? */ 12289 if ((rack->rc_in_persist != 0) && 12290 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12291 rack->r_ctl.rc_pace_min_segs))) { 12292 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12293 tp->snd_nxt = tp->snd_max; 12294 /* Make sure we output to start the timer */ 12295 rack->r_wanted_output = 1; 12296 } 12297 /* Do we enter persists? */ 12298 if ((rack->rc_in_persist == 0) && 12299 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12300 TCPS_HAVEESTABLISHED(tp->t_state) && 12301 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12302 sbavail(&tptosocket(tp)->so_snd) && 12303 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12304 /* 12305 * Here the rwnd is less than 12306 * the pacing size, we are established, 12307 * nothing is outstanding, and there is 12308 * data to send. Enter persists. 12309 */ 12310 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12311 } 12312 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12313 m_freem(m); 12314 return (0); 12315 } 12316 /* 12317 * don't process the URG bit, ignore them drag 12318 * along the up. 12319 */ 12320 tp->rcv_up = tp->rcv_nxt; 12321 12322 /* 12323 * Process the segment text, merging it into the TCP sequencing 12324 * queue, and arranging for acknowledgment of receipt if necessary. 12325 * This process logically involves adjusting tp->rcv_wnd as data is 12326 * presented to the user (this happens in tcp_usrreq.c, case 12327 * PRU_RCVD). If a FIN has already been received on this connection 12328 * then we just ignore the text. 12329 */ 12330 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12331 (tp->t_flags & TF_FASTOPEN)); 12332 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12333 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12334 tcp_seq save_start = th->th_seq; 12335 tcp_seq save_rnxt = tp->rcv_nxt; 12336 int save_tlen = tlen; 12337 12338 m_adj(m, drop_hdrlen); /* delayed header drop */ 12339 /* 12340 * Insert segment which includes th into TCP reassembly 12341 * queue with control block tp. Set thflags to whether 12342 * reassembly now includes a segment with FIN. This handles 12343 * the common case inline (segment is the next to be 12344 * received on an established connection, and the queue is 12345 * empty), avoiding linkage into and removal from the queue 12346 * and repetition of various conversions. Set DELACK for 12347 * segments received in order, but ack immediately when 12348 * segments are out of order (so fast retransmit can work). 12349 */ 12350 if (th->th_seq == tp->rcv_nxt && 12351 SEGQ_EMPTY(tp) && 12352 (TCPS_HAVEESTABLISHED(tp->t_state) || 12353 tfo_syn)) { 12354 #ifdef NETFLIX_SB_LIMITS 12355 u_int mcnt, appended; 12356 12357 if (so->so_rcv.sb_shlim) { 12358 mcnt = m_memcnt(m); 12359 appended = 0; 12360 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12361 CFO_NOSLEEP, NULL) == false) { 12362 counter_u64_add(tcp_sb_shlim_fails, 1); 12363 m_freem(m); 12364 return (0); 12365 } 12366 } 12367 #endif 12368 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 12369 tp->rcv_nxt += tlen; 12370 if (tlen && 12371 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12372 (tp->t_fbyte_in == 0)) { 12373 tp->t_fbyte_in = ticks; 12374 if (tp->t_fbyte_in == 0) 12375 tp->t_fbyte_in = 1; 12376 if (tp->t_fbyte_out && tp->t_fbyte_in) 12377 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12378 } 12379 thflags = tcp_get_flags(th) & TH_FIN; 12380 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12381 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12382 SOCK_RECVBUF_LOCK(so); 12383 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12384 m_freem(m); 12385 } else { 12386 int32_t newsize; 12387 12388 if (tlen > 0) { 12389 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12390 if (newsize) 12391 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12392 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12393 } 12394 #ifdef NETFLIX_SB_LIMITS 12395 appended = 12396 #endif 12397 sbappendstream_locked(&so->so_rcv, m, 0); 12398 } 12399 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12400 /* NB: sorwakeup_locked() does an implicit unlock. */ 12401 sorwakeup_locked(so); 12402 #ifdef NETFLIX_SB_LIMITS 12403 if (so->so_rcv.sb_shlim && appended != mcnt) 12404 counter_fo_release(so->so_rcv.sb_shlim, 12405 mcnt - appended); 12406 #endif 12407 } else { 12408 /* 12409 * XXX: Due to the header drop above "th" is 12410 * theoretically invalid by now. Fortunately 12411 * m_adj() doesn't actually frees any mbufs when 12412 * trimming from the head. 12413 */ 12414 tcp_seq temp = save_start; 12415 12416 thflags = tcp_reass(tp, th, &temp, &tlen, m); 12417 tp->t_flags |= TF_ACKNOW; 12418 if (tp->t_flags & TF_WAKESOR) { 12419 tp->t_flags &= ~TF_WAKESOR; 12420 /* NB: sorwakeup_locked() does an implicit unlock. */ 12421 sorwakeup_locked(so); 12422 } 12423 } 12424 if ((tp->t_flags & TF_SACK_PERMIT) && 12425 (save_tlen > 0) && 12426 TCPS_HAVEESTABLISHED(tp->t_state)) { 12427 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 12428 /* 12429 * DSACK actually handled in the fastpath 12430 * above. 12431 */ 12432 tcp_update_sack_list(tp, save_start, 12433 save_start + save_tlen); 12434 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 12435 if ((tp->rcv_numsacks >= 1) && 12436 (tp->sackblks[0].end == save_start)) { 12437 /* 12438 * Partial overlap, recorded at todrop 12439 * above. 12440 */ 12441 tcp_update_sack_list(tp, 12442 tp->sackblks[0].start, 12443 tp->sackblks[0].end); 12444 } else { 12445 tcp_update_dsack_list(tp, save_start, 12446 save_start + save_tlen); 12447 } 12448 } else if (tlen >= save_tlen) { 12449 /* Update of sackblks. */ 12450 tcp_update_dsack_list(tp, save_start, 12451 save_start + save_tlen); 12452 } else if (tlen > 0) { 12453 tcp_update_dsack_list(tp, save_start, 12454 save_start + tlen); 12455 } 12456 } 12457 } else { 12458 m_freem(m); 12459 thflags &= ~TH_FIN; 12460 } 12461 12462 /* 12463 * If FIN is received ACK the FIN and let the user know that the 12464 * connection is closing. 12465 */ 12466 if (thflags & TH_FIN) { 12467 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12468 /* The socket upcall is handled by socantrcvmore. */ 12469 socantrcvmore(so); 12470 /* 12471 * If connection is half-synchronized (ie NEEDSYN 12472 * flag on) then delay ACK, so it may be piggybacked 12473 * when SYN is sent. Otherwise, since we received a 12474 * FIN then no more input can be expected, send ACK 12475 * now. 12476 */ 12477 if (tp->t_flags & TF_NEEDSYN) { 12478 rack_timer_cancel(tp, rack, 12479 rack->r_ctl.rc_rcvtime, __LINE__); 12480 tp->t_flags |= TF_DELACK; 12481 } else { 12482 tp->t_flags |= TF_ACKNOW; 12483 } 12484 tp->rcv_nxt++; 12485 } 12486 switch (tp->t_state) { 12487 /* 12488 * In SYN_RECEIVED and ESTABLISHED STATES enter the 12489 * CLOSE_WAIT state. 12490 */ 12491 case TCPS_SYN_RECEIVED: 12492 tp->t_starttime = ticks; 12493 /* FALLTHROUGH */ 12494 case TCPS_ESTABLISHED: 12495 rack_timer_cancel(tp, rack, 12496 rack->r_ctl.rc_rcvtime, __LINE__); 12497 tcp_state_change(tp, TCPS_CLOSE_WAIT); 12498 break; 12499 12500 /* 12501 * If still in FIN_WAIT_1 STATE FIN has not been 12502 * acked so enter the CLOSING state. 12503 */ 12504 case TCPS_FIN_WAIT_1: 12505 rack_timer_cancel(tp, rack, 12506 rack->r_ctl.rc_rcvtime, __LINE__); 12507 tcp_state_change(tp, TCPS_CLOSING); 12508 break; 12509 12510 /* 12511 * In FIN_WAIT_2 state enter the TIME_WAIT state, 12512 * starting the time-wait timer, turning off the 12513 * other standard timers. 12514 */ 12515 case TCPS_FIN_WAIT_2: 12516 rack_timer_cancel(tp, rack, 12517 rack->r_ctl.rc_rcvtime, __LINE__); 12518 tcp_twstart(tp); 12519 return (1); 12520 } 12521 } 12522 /* 12523 * Return any desired output. 12524 */ 12525 if ((tp->t_flags & TF_ACKNOW) || 12526 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 12527 rack->r_wanted_output = 1; 12528 } 12529 return (0); 12530 } 12531 12532 /* 12533 * Here nothing is really faster, its just that we 12534 * have broken out the fast-data path also just like 12535 * the fast-ack. 12536 */ 12537 static int 12538 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 12539 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12540 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 12541 { 12542 int32_t nsegs; 12543 int32_t newsize = 0; /* automatic sockbuf scaling */ 12544 struct tcp_rack *rack; 12545 #ifdef NETFLIX_SB_LIMITS 12546 u_int mcnt, appended; 12547 #endif 12548 12549 /* 12550 * If last ACK falls within this segment's sequence numbers, record 12551 * the timestamp. NOTE that the test is modified according to the 12552 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12553 */ 12554 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 12555 return (0); 12556 } 12557 if (tiwin && tiwin != tp->snd_wnd) { 12558 return (0); 12559 } 12560 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 12561 return (0); 12562 } 12563 if (__predict_false((to->to_flags & TOF_TS) && 12564 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 12565 return (0); 12566 } 12567 if (__predict_false((th->th_ack != tp->snd_una))) { 12568 return (0); 12569 } 12570 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 12571 return (0); 12572 } 12573 if ((to->to_flags & TOF_TS) != 0 && 12574 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12575 tp->ts_recent_age = tcp_ts_getticks(); 12576 tp->ts_recent = to->to_tsval; 12577 } 12578 rack = (struct tcp_rack *)tp->t_fb_ptr; 12579 /* 12580 * This is a pure, in-sequence data packet with nothing on the 12581 * reassembly queue and we have enough buffer space to take it. 12582 */ 12583 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12584 12585 #ifdef NETFLIX_SB_LIMITS 12586 if (so->so_rcv.sb_shlim) { 12587 mcnt = m_memcnt(m); 12588 appended = 0; 12589 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12590 CFO_NOSLEEP, NULL) == false) { 12591 counter_u64_add(tcp_sb_shlim_fails, 1); 12592 m_freem(m); 12593 return (1); 12594 } 12595 } 12596 #endif 12597 /* Clean receiver SACK report if present */ 12598 if (tp->rcv_numsacks) 12599 tcp_clean_sackreport(tp); 12600 KMOD_TCPSTAT_INC(tcps_preddat); 12601 tp->rcv_nxt += tlen; 12602 if (tlen && 12603 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12604 (tp->t_fbyte_in == 0)) { 12605 tp->t_fbyte_in = ticks; 12606 if (tp->t_fbyte_in == 0) 12607 tp->t_fbyte_in = 1; 12608 if (tp->t_fbyte_out && tp->t_fbyte_in) 12609 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12610 } 12611 /* 12612 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 12613 */ 12614 tp->snd_wl1 = th->th_seq; 12615 /* 12616 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 12617 */ 12618 tp->rcv_up = tp->rcv_nxt; 12619 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12620 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12621 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12622 12623 /* Add data to socket buffer. */ 12624 SOCK_RECVBUF_LOCK(so); 12625 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12626 m_freem(m); 12627 } else { 12628 /* 12629 * Set new socket buffer size. Give up when limit is 12630 * reached. 12631 */ 12632 if (newsize) 12633 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12634 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12635 m_adj(m, drop_hdrlen); /* delayed header drop */ 12636 #ifdef NETFLIX_SB_LIMITS 12637 appended = 12638 #endif 12639 sbappendstream_locked(&so->so_rcv, m, 0); 12640 ctf_calc_rwin(so, tp); 12641 } 12642 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12643 /* NB: sorwakeup_locked() does an implicit unlock. */ 12644 sorwakeup_locked(so); 12645 #ifdef NETFLIX_SB_LIMITS 12646 if (so->so_rcv.sb_shlim && mcnt != appended) 12647 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 12648 #endif 12649 rack_handle_delayed_ack(tp, rack, tlen, 0); 12650 if (tp->snd_una == tp->snd_max) 12651 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12652 return (1); 12653 } 12654 12655 /* 12656 * This subfunction is used to try to highly optimize the 12657 * fast path. We again allow window updates that are 12658 * in sequence to remain in the fast-path. We also add 12659 * in the __predict's to attempt to help the compiler. 12660 * Note that if we return a 0, then we can *not* process 12661 * it and the caller should push the packet into the 12662 * slow-path. 12663 */ 12664 static int 12665 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12666 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12667 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 12668 { 12669 int32_t acked; 12670 int32_t nsegs; 12671 int32_t under_pacing = 0; 12672 struct tcp_rack *rack; 12673 12674 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12675 /* Old ack, behind (or duplicate to) the last one rcv'd */ 12676 return (0); 12677 } 12678 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 12679 /* Above what we have sent? */ 12680 return (0); 12681 } 12682 if (__predict_false(tiwin == 0)) { 12683 /* zero window */ 12684 return (0); 12685 } 12686 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 12687 /* We need a SYN or a FIN, unlikely.. */ 12688 return (0); 12689 } 12690 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 12691 /* Timestamp is behind .. old ack with seq wrap? */ 12692 return (0); 12693 } 12694 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 12695 /* Still recovering */ 12696 return (0); 12697 } 12698 rack = (struct tcp_rack *)tp->t_fb_ptr; 12699 if (rack->r_ctl.rc_sacked) { 12700 /* We have sack holes on our scoreboard */ 12701 return (0); 12702 } 12703 /* Ok if we reach here, we can process a fast-ack */ 12704 if (rack->gp_ready && 12705 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12706 under_pacing = 1; 12707 } 12708 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12709 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 12710 /* Did the window get updated? */ 12711 if (tiwin != tp->snd_wnd) { 12712 tp->snd_wnd = tiwin; 12713 rack_validate_fo_sendwin_up(tp, rack); 12714 tp->snd_wl1 = th->th_seq; 12715 if (tp->snd_wnd > tp->max_sndwnd) 12716 tp->max_sndwnd = tp->snd_wnd; 12717 } 12718 /* Do we exit persists? */ 12719 if ((rack->rc_in_persist != 0) && 12720 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12721 rack->r_ctl.rc_pace_min_segs))) { 12722 rack_exit_persist(tp, rack, cts); 12723 } 12724 /* Do we enter persists? */ 12725 if ((rack->rc_in_persist == 0) && 12726 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12727 TCPS_HAVEESTABLISHED(tp->t_state) && 12728 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12729 sbavail(&tptosocket(tp)->so_snd) && 12730 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12731 /* 12732 * Here the rwnd is less than 12733 * the pacing size, we are established, 12734 * nothing is outstanding, and there is 12735 * data to send. Enter persists. 12736 */ 12737 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 12738 } 12739 /* 12740 * If last ACK falls within this segment's sequence numbers, record 12741 * the timestamp. NOTE that the test is modified according to the 12742 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12743 */ 12744 if ((to->to_flags & TOF_TS) != 0 && 12745 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12746 tp->ts_recent_age = tcp_ts_getticks(); 12747 tp->ts_recent = to->to_tsval; 12748 } 12749 /* 12750 * This is a pure ack for outstanding data. 12751 */ 12752 KMOD_TCPSTAT_INC(tcps_predack); 12753 12754 /* 12755 * "bad retransmit" recovery. 12756 */ 12757 if ((tp->t_flags & TF_PREVVALID) && 12758 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12759 tp->t_flags &= ~TF_PREVVALID; 12760 if (tp->t_rxtshift == 1 && 12761 (int)(ticks - tp->t_badrxtwin) < 0) 12762 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12763 } 12764 /* 12765 * Recalculate the transmit timer / rtt. 12766 * 12767 * Some boxes send broken timestamp replies during the SYN+ACK 12768 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12769 * and blow up the retransmit timer. 12770 */ 12771 acked = BYTES_THIS_ACK(tp, th); 12772 12773 #ifdef TCP_HHOOK 12774 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 12775 hhook_run_tcp_est_in(tp, th, to); 12776 #endif 12777 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12778 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12779 if (acked) { 12780 struct mbuf *mfree; 12781 12782 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 12783 SOCK_SENDBUF_LOCK(so); 12784 mfree = sbcut_locked(&so->so_snd, acked); 12785 tp->snd_una = th->th_ack; 12786 /* Note we want to hold the sb lock through the sendmap adjust */ 12787 rack_adjust_sendmap_head(rack, &so->so_snd); 12788 /* Wake up the socket if we have room to write more */ 12789 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12790 sowwakeup_locked(so); 12791 m_freem(mfree); 12792 tp->t_rxtshift = 0; 12793 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12794 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12795 rack->rc_tlp_in_progress = 0; 12796 rack->r_ctl.rc_tlp_cnt_out = 0; 12797 /* 12798 * If it is the RXT timer we want to 12799 * stop it, so we can restart a TLP. 12800 */ 12801 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12802 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12803 12804 #ifdef TCP_REQUEST_TRK 12805 rack_req_check_for_comp(rack, th->th_ack); 12806 #endif 12807 } 12808 /* 12809 * Let the congestion control algorithm update congestion control 12810 * related information. This typically means increasing the 12811 * congestion window. 12812 */ 12813 if (tp->snd_wnd < ctf_outstanding(tp)) { 12814 /* The peer collapsed the window */ 12815 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12816 } else if (rack->rc_has_collapsed) 12817 rack_un_collapse_window(rack, __LINE__); 12818 if ((rack->r_collapse_point_valid) && 12819 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 12820 rack->r_collapse_point_valid = 0; 12821 /* 12822 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 12823 */ 12824 tp->snd_wl2 = th->th_ack; 12825 tp->t_dupacks = 0; 12826 m_freem(m); 12827 /* ND6_HINT(tp); *//* Some progress has been made. */ 12828 12829 /* 12830 * If all outstanding data are acked, stop retransmit timer, 12831 * otherwise restart timer using current (possibly backed-off) 12832 * value. If process is waiting for space, wakeup/selwakeup/signal. 12833 * If data are ready to send, let tcp_output decide between more 12834 * output or persist. 12835 */ 12836 if (under_pacing && 12837 (rack->use_fixed_rate == 0) && 12838 (rack->in_probe_rtt == 0) && 12839 rack->rc_gp_dyn_mul && 12840 rack->rc_always_pace) { 12841 /* Check if we are dragging bottom */ 12842 rack_check_bottom_drag(tp, rack, so); 12843 } 12844 if (tp->snd_una == tp->snd_max) { 12845 tp->t_flags &= ~TF_PREVVALID; 12846 rack->r_ctl.retran_during_recovery = 0; 12847 rack->rc_suspicious = 0; 12848 rack->r_ctl.dsack_byte_cnt = 0; 12849 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12850 if (rack->r_ctl.rc_went_idle_time == 0) 12851 rack->r_ctl.rc_went_idle_time = 1; 12852 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12853 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12854 tp->t_acktime = 0; 12855 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12856 } 12857 if (acked && rack->r_fast_output) 12858 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 12859 if (sbavail(&so->so_snd)) { 12860 rack->r_wanted_output = 1; 12861 } 12862 return (1); 12863 } 12864 12865 /* 12866 * Return value of 1, the TCB is unlocked and most 12867 * likely gone, return value of 0, the TCP is still 12868 * locked. 12869 */ 12870 static int 12871 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 12872 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12873 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12874 { 12875 int32_t ret_val = 0; 12876 int32_t orig_tlen = tlen; 12877 int32_t todrop; 12878 int32_t ourfinisacked = 0; 12879 struct tcp_rack *rack; 12880 12881 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12882 12883 ctf_calc_rwin(so, tp); 12884 /* 12885 * If the state is SYN_SENT: if seg contains an ACK, but not for our 12886 * SYN, drop the input. if seg contains a RST, then drop the 12887 * connection. if seg does not contain SYN, then drop it. Otherwise 12888 * this is an acceptable SYN segment initialize tp->rcv_nxt and 12889 * tp->irs if seg contains ack then advance tp->snd_una if seg 12890 * contains an ECE and ECN support is enabled, the stream is ECN 12891 * capable. if SYN has been acked change to ESTABLISHED else 12892 * SYN_RCVD state arrange for segment to be acked (eventually) 12893 * continue processing rest of data/controls. 12894 */ 12895 if ((thflags & TH_ACK) && 12896 (SEQ_LEQ(th->th_ack, tp->iss) || 12897 SEQ_GT(th->th_ack, tp->snd_max))) { 12898 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 12899 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12900 return (1); 12901 } 12902 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 12903 TCP_PROBE5(connect__refused, NULL, tp, 12904 mtod(m, const char *), tp, th); 12905 tp = tcp_drop(tp, ECONNREFUSED); 12906 ctf_do_drop(m, tp); 12907 return (1); 12908 } 12909 if (thflags & TH_RST) { 12910 ctf_do_drop(m, tp); 12911 return (1); 12912 } 12913 if (!(thflags & TH_SYN)) { 12914 ctf_do_drop(m, tp); 12915 return (1); 12916 } 12917 tp->irs = th->th_seq; 12918 tcp_rcvseqinit(tp); 12919 rack = (struct tcp_rack *)tp->t_fb_ptr; 12920 if (thflags & TH_ACK) { 12921 int tfo_partial = 0; 12922 12923 KMOD_TCPSTAT_INC(tcps_connects); 12924 soisconnected(so); 12925 #ifdef MAC 12926 mac_socketpeer_set_from_mbuf(m, so); 12927 #endif 12928 /* Do window scaling on this connection? */ 12929 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12930 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12931 tp->rcv_scale = tp->request_r_scale; 12932 } 12933 tp->rcv_adv += min(tp->rcv_wnd, 12934 TCP_MAXWIN << tp->rcv_scale); 12935 /* 12936 * If not all the data that was sent in the TFO SYN 12937 * has been acked, resend the remainder right away. 12938 */ 12939 if ((tp->t_flags & TF_FASTOPEN) && 12940 (tp->snd_una != tp->snd_max)) { 12941 /* Was it a partial ack? */ 12942 if (SEQ_LT(th->th_ack, tp->snd_max)) 12943 tfo_partial = 1; 12944 } 12945 /* 12946 * If there's data, delay ACK; if there's also a FIN ACKNOW 12947 * will be turned on later. 12948 */ 12949 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 12950 rack_timer_cancel(tp, rack, 12951 rack->r_ctl.rc_rcvtime, __LINE__); 12952 tp->t_flags |= TF_DELACK; 12953 } else { 12954 rack->r_wanted_output = 1; 12955 tp->t_flags |= TF_ACKNOW; 12956 } 12957 12958 tcp_ecn_input_syn_sent(tp, thflags, iptos); 12959 12960 if (SEQ_GT(th->th_ack, tp->snd_una)) { 12961 /* 12962 * We advance snd_una for the 12963 * fast open case. If th_ack is 12964 * acknowledging data beyond 12965 * snd_una we can't just call 12966 * ack-processing since the 12967 * data stream in our send-map 12968 * will start at snd_una + 1 (one 12969 * beyond the SYN). If its just 12970 * equal we don't need to do that 12971 * and there is no send_map. 12972 */ 12973 tp->snd_una++; 12974 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 12975 /* 12976 * We sent a SYN with data, and thus have a 12977 * sendmap entry with a SYN set. Lets find it 12978 * and take off the send bit and the byte and 12979 * set it up to be what we send (send it next). 12980 */ 12981 struct rack_sendmap *rsm; 12982 12983 rsm = tqhash_min(rack->r_ctl.tqh); 12984 if (rsm) { 12985 if (rsm->r_flags & RACK_HAS_SYN) { 12986 rsm->r_flags &= ~RACK_HAS_SYN; 12987 rsm->r_start++; 12988 } 12989 rack->r_ctl.rc_resend = rsm; 12990 } 12991 } 12992 } 12993 /* 12994 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 12995 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 12996 */ 12997 tp->t_starttime = ticks; 12998 if (tp->t_flags & TF_NEEDFIN) { 12999 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13000 tp->t_flags &= ~TF_NEEDFIN; 13001 thflags &= ~TH_SYN; 13002 } else { 13003 tcp_state_change(tp, TCPS_ESTABLISHED); 13004 TCP_PROBE5(connect__established, NULL, tp, 13005 mtod(m, const char *), tp, th); 13006 rack_cc_conn_init(tp); 13007 } 13008 } else { 13009 /* 13010 * Received initial SYN in SYN-SENT[*] state => simultaneous 13011 * open. If segment contains CC option and there is a 13012 * cached CC, apply TAO test. If it succeeds, connection is * 13013 * half-synchronized. Otherwise, do 3-way handshake: 13014 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 13015 * there was no CC option, clear cached CC value. 13016 */ 13017 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 13018 tcp_state_change(tp, TCPS_SYN_RECEIVED); 13019 } 13020 /* 13021 * Advance th->th_seq to correspond to first data byte. If data, 13022 * trim to stay within window, dropping FIN if necessary. 13023 */ 13024 th->th_seq++; 13025 if (tlen > tp->rcv_wnd) { 13026 todrop = tlen - tp->rcv_wnd; 13027 m_adj(m, -todrop); 13028 tlen = tp->rcv_wnd; 13029 thflags &= ~TH_FIN; 13030 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13031 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13032 } 13033 tp->snd_wl1 = th->th_seq - 1; 13034 tp->rcv_up = th->th_seq; 13035 /* 13036 * Client side of transaction: already sent SYN and data. If the 13037 * remote host used T/TCP to validate the SYN, our data will be 13038 * ACK'd; if so, enter normal data segment processing in the middle 13039 * of step 5, ack processing. Otherwise, goto step 6. 13040 */ 13041 if (thflags & TH_ACK) { 13042 /* For syn-sent we need to possibly update the rtt */ 13043 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13044 uint32_t t, mcts; 13045 13046 mcts = tcp_ts_getticks(); 13047 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13048 if (!tp->t_rttlow || tp->t_rttlow > t) 13049 tp->t_rttlow = t; 13050 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13051 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13052 tcp_rack_xmit_timer_commit(rack, tp); 13053 } 13054 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 13055 return (ret_val); 13056 /* We may have changed to FIN_WAIT_1 above */ 13057 if (tp->t_state == TCPS_FIN_WAIT_1) { 13058 /* 13059 * In FIN_WAIT_1 STATE in addition to the processing 13060 * for the ESTABLISHED state if our FIN is now 13061 * acknowledged then enter FIN_WAIT_2. 13062 */ 13063 if (ourfinisacked) { 13064 /* 13065 * If we can't receive any more data, then 13066 * closing user can proceed. Starting the 13067 * timer is contrary to the specification, 13068 * but if we don't get a FIN we'll hang 13069 * forever. 13070 * 13071 * XXXjl: we should release the tp also, and 13072 * use a compressed state. 13073 */ 13074 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13075 soisdisconnected(so); 13076 tcp_timer_activate(tp, TT_2MSL, 13077 (tcp_fast_finwait2_recycle ? 13078 tcp_finwait2_timeout : 13079 TP_MAXIDLE(tp))); 13080 } 13081 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13082 } 13083 } 13084 } 13085 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13086 tiwin, thflags, nxt_pkt)); 13087 } 13088 13089 /* 13090 * Return value of 1, the TCB is unlocked and most 13091 * likely gone, return value of 0, the TCP is still 13092 * locked. 13093 */ 13094 static int 13095 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13096 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13097 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13098 { 13099 struct tcp_rack *rack; 13100 int32_t orig_tlen = tlen; 13101 int32_t ret_val = 0; 13102 int32_t ourfinisacked = 0; 13103 13104 rack = (struct tcp_rack *)tp->t_fb_ptr; 13105 ctf_calc_rwin(so, tp); 13106 if ((thflags & TH_RST) || 13107 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13108 return (ctf_process_rst(m, th, so, tp)); 13109 if ((thflags & TH_ACK) && 13110 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13111 SEQ_GT(th->th_ack, tp->snd_max))) { 13112 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13113 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13114 return (1); 13115 } 13116 if (tp->t_flags & TF_FASTOPEN) { 13117 /* 13118 * When a TFO connection is in SYN_RECEIVED, the 13119 * only valid packets are the initial SYN, a 13120 * retransmit/copy of the initial SYN (possibly with 13121 * a subset of the original data), a valid ACK, a 13122 * FIN, or a RST. 13123 */ 13124 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13125 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13126 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13127 return (1); 13128 } else if (thflags & TH_SYN) { 13129 /* non-initial SYN is ignored */ 13130 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13131 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13132 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13133 ctf_do_drop(m, NULL); 13134 return (0); 13135 } 13136 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13137 ctf_do_drop(m, NULL); 13138 return (0); 13139 } 13140 } 13141 13142 /* 13143 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13144 * it's less than ts_recent, drop it. 13145 */ 13146 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13147 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13148 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13149 return (ret_val); 13150 } 13151 /* 13152 * In the SYN-RECEIVED state, validate that the packet belongs to 13153 * this connection before trimming the data to fit the receive 13154 * window. Check the sequence number versus IRS since we know the 13155 * sequence numbers haven't wrapped. This is a partial fix for the 13156 * "LAND" DoS attack. 13157 */ 13158 if (SEQ_LT(th->th_seq, tp->irs)) { 13159 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13160 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13161 return (1); 13162 } 13163 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13164 return (ret_val); 13165 } 13166 /* 13167 * If last ACK falls within this segment's sequence numbers, record 13168 * its timestamp. NOTE: 1) That the test incorporates suggestions 13169 * from the latest proposal of the tcplw@cray.com list (Braden 13170 * 1993/04/26). 2) That updating only on newer timestamps interferes 13171 * with our earlier PAWS tests, so this check should be solely 13172 * predicated on the sequence space of this segment. 3) That we 13173 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13174 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13175 * SEG.Len, This modified check allows us to overcome RFC1323's 13176 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13177 * p.869. In such cases, we can still calculate the RTT correctly 13178 * when RCV.NXT == Last.ACK.Sent. 13179 */ 13180 if ((to->to_flags & TOF_TS) != 0 && 13181 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13182 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13183 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13184 tp->ts_recent_age = tcp_ts_getticks(); 13185 tp->ts_recent = to->to_tsval; 13186 } 13187 tp->snd_wnd = tiwin; 13188 rack_validate_fo_sendwin_up(tp, rack); 13189 /* 13190 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13191 * is on (half-synchronized state), then queue data for later 13192 * processing; else drop segment and return. 13193 */ 13194 if ((thflags & TH_ACK) == 0) { 13195 if (tp->t_flags & TF_FASTOPEN) { 13196 rack_cc_conn_init(tp); 13197 } 13198 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13199 tiwin, thflags, nxt_pkt)); 13200 } 13201 KMOD_TCPSTAT_INC(tcps_connects); 13202 if (tp->t_flags & TF_SONOTCONN) { 13203 tp->t_flags &= ~TF_SONOTCONN; 13204 soisconnected(so); 13205 } 13206 /* Do window scaling? */ 13207 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13208 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13209 tp->rcv_scale = tp->request_r_scale; 13210 } 13211 /* 13212 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13213 * FIN-WAIT-1 13214 */ 13215 tp->t_starttime = ticks; 13216 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 13217 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13218 tp->t_tfo_pending = NULL; 13219 } 13220 if (tp->t_flags & TF_NEEDFIN) { 13221 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13222 tp->t_flags &= ~TF_NEEDFIN; 13223 } else { 13224 tcp_state_change(tp, TCPS_ESTABLISHED); 13225 TCP_PROBE5(accept__established, NULL, tp, 13226 mtod(m, const char *), tp, th); 13227 /* 13228 * TFO connections call cc_conn_init() during SYN 13229 * processing. Calling it again here for such connections 13230 * is not harmless as it would undo the snd_cwnd reduction 13231 * that occurs when a TFO SYN|ACK is retransmitted. 13232 */ 13233 if (!(tp->t_flags & TF_FASTOPEN)) 13234 rack_cc_conn_init(tp); 13235 } 13236 /* 13237 * Account for the ACK of our SYN prior to 13238 * regular ACK processing below, except for 13239 * simultaneous SYN, which is handled later. 13240 */ 13241 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13242 tp->snd_una++; 13243 /* 13244 * If segment contains data or ACK, will call tcp_reass() later; if 13245 * not, do so now to pass queued data to user. 13246 */ 13247 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13248 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13249 (struct mbuf *)0); 13250 if (tp->t_flags & TF_WAKESOR) { 13251 tp->t_flags &= ~TF_WAKESOR; 13252 /* NB: sorwakeup_locked() does an implicit unlock. */ 13253 sorwakeup_locked(so); 13254 } 13255 } 13256 tp->snd_wl1 = th->th_seq - 1; 13257 /* For syn-recv we need to possibly update the rtt */ 13258 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13259 uint32_t t, mcts; 13260 13261 mcts = tcp_ts_getticks(); 13262 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13263 if (!tp->t_rttlow || tp->t_rttlow > t) 13264 tp->t_rttlow = t; 13265 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13266 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13267 tcp_rack_xmit_timer_commit(rack, tp); 13268 } 13269 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13270 return (ret_val); 13271 } 13272 if (tp->t_state == TCPS_FIN_WAIT_1) { 13273 /* We could have went to FIN_WAIT_1 (or EST) above */ 13274 /* 13275 * In FIN_WAIT_1 STATE in addition to the processing for the 13276 * ESTABLISHED state if our FIN is now acknowledged then 13277 * enter FIN_WAIT_2. 13278 */ 13279 if (ourfinisacked) { 13280 /* 13281 * If we can't receive any more data, then closing 13282 * user can proceed. Starting the timer is contrary 13283 * to the specification, but if we don't get a FIN 13284 * we'll hang forever. 13285 * 13286 * XXXjl: we should release the tp also, and use a 13287 * compressed state. 13288 */ 13289 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13290 soisdisconnected(so); 13291 tcp_timer_activate(tp, TT_2MSL, 13292 (tcp_fast_finwait2_recycle ? 13293 tcp_finwait2_timeout : 13294 TP_MAXIDLE(tp))); 13295 } 13296 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13297 } 13298 } 13299 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13300 tiwin, thflags, nxt_pkt)); 13301 } 13302 13303 /* 13304 * Return value of 1, the TCB is unlocked and most 13305 * likely gone, return value of 0, the TCP is still 13306 * locked. 13307 */ 13308 static int 13309 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13310 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13311 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13312 { 13313 int32_t ret_val = 0; 13314 int32_t orig_tlen = tlen; 13315 struct tcp_rack *rack; 13316 13317 /* 13318 * Header prediction: check for the two common cases of a 13319 * uni-directional data xfer. If the packet has no control flags, 13320 * is in-sequence, the window didn't change and we're not 13321 * retransmitting, it's a candidate. If the length is zero and the 13322 * ack moved forward, we're the sender side of the xfer. Just free 13323 * the data acked & wake any higher level process that was blocked 13324 * waiting for space. If the length is non-zero and the ack didn't 13325 * move, we're the receiver side. If we're getting packets in-order 13326 * (the reassembly queue is empty), add the data toc The socket 13327 * buffer and note that we need a delayed ack. Make sure that the 13328 * hidden state-flags are also off. Since we check for 13329 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13330 */ 13331 rack = (struct tcp_rack *)tp->t_fb_ptr; 13332 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13333 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13334 __predict_true(SEGQ_EMPTY(tp)) && 13335 __predict_true(th->th_seq == tp->rcv_nxt)) { 13336 if (tlen == 0) { 13337 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13338 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13339 return (0); 13340 } 13341 } else { 13342 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13343 tiwin, nxt_pkt, iptos)) { 13344 return (0); 13345 } 13346 } 13347 } 13348 ctf_calc_rwin(so, tp); 13349 13350 if ((thflags & TH_RST) || 13351 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13352 return (ctf_process_rst(m, th, so, tp)); 13353 13354 /* 13355 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13356 * synchronized state. 13357 */ 13358 if (thflags & TH_SYN) { 13359 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13360 return (ret_val); 13361 } 13362 /* 13363 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13364 * it's less than ts_recent, drop it. 13365 */ 13366 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13367 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13368 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13369 return (ret_val); 13370 } 13371 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13372 return (ret_val); 13373 } 13374 /* 13375 * If last ACK falls within this segment's sequence numbers, record 13376 * its timestamp. NOTE: 1) That the test incorporates suggestions 13377 * from the latest proposal of the tcplw@cray.com list (Braden 13378 * 1993/04/26). 2) That updating only on newer timestamps interferes 13379 * with our earlier PAWS tests, so this check should be solely 13380 * predicated on the sequence space of this segment. 3) That we 13381 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13382 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13383 * SEG.Len, This modified check allows us to overcome RFC1323's 13384 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13385 * p.869. In such cases, we can still calculate the RTT correctly 13386 * when RCV.NXT == Last.ACK.Sent. 13387 */ 13388 if ((to->to_flags & TOF_TS) != 0 && 13389 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13390 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13391 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13392 tp->ts_recent_age = tcp_ts_getticks(); 13393 tp->ts_recent = to->to_tsval; 13394 } 13395 /* 13396 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13397 * is on (half-synchronized state), then queue data for later 13398 * processing; else drop segment and return. 13399 */ 13400 if ((thflags & TH_ACK) == 0) { 13401 if (tp->t_flags & TF_NEEDSYN) { 13402 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13403 tiwin, thflags, nxt_pkt)); 13404 13405 } else if (tp->t_flags & TF_ACKNOW) { 13406 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13407 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13408 return (ret_val); 13409 } else { 13410 ctf_do_drop(m, NULL); 13411 return (0); 13412 } 13413 } 13414 /* 13415 * Ack processing. 13416 */ 13417 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13418 return (ret_val); 13419 } 13420 if (sbavail(&so->so_snd)) { 13421 if (ctf_progress_timeout_check(tp, true)) { 13422 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 13423 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13424 return (1); 13425 } 13426 } 13427 /* State changes only happen in rack_process_data() */ 13428 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13429 tiwin, thflags, nxt_pkt)); 13430 } 13431 13432 /* 13433 * Return value of 1, the TCB is unlocked and most 13434 * likely gone, return value of 0, the TCP is still 13435 * locked. 13436 */ 13437 static int 13438 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 13439 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13440 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13441 { 13442 int32_t ret_val = 0; 13443 int32_t orig_tlen = tlen; 13444 13445 ctf_calc_rwin(so, tp); 13446 if ((thflags & TH_RST) || 13447 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13448 return (ctf_process_rst(m, th, so, tp)); 13449 /* 13450 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13451 * synchronized state. 13452 */ 13453 if (thflags & TH_SYN) { 13454 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13455 return (ret_val); 13456 } 13457 /* 13458 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13459 * it's less than ts_recent, drop it. 13460 */ 13461 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13462 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13463 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13464 return (ret_val); 13465 } 13466 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13467 return (ret_val); 13468 } 13469 /* 13470 * If last ACK falls within this segment's sequence numbers, record 13471 * its timestamp. NOTE: 1) That the test incorporates suggestions 13472 * from the latest proposal of the tcplw@cray.com list (Braden 13473 * 1993/04/26). 2) That updating only on newer timestamps interferes 13474 * with our earlier PAWS tests, so this check should be solely 13475 * predicated on the sequence space of this segment. 3) That we 13476 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13477 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13478 * SEG.Len, This modified check allows us to overcome RFC1323's 13479 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13480 * p.869. In such cases, we can still calculate the RTT correctly 13481 * when RCV.NXT == Last.ACK.Sent. 13482 */ 13483 if ((to->to_flags & TOF_TS) != 0 && 13484 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13485 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13486 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13487 tp->ts_recent_age = tcp_ts_getticks(); 13488 tp->ts_recent = to->to_tsval; 13489 } 13490 /* 13491 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13492 * is on (half-synchronized state), then queue data for later 13493 * processing; else drop segment and return. 13494 */ 13495 if ((thflags & TH_ACK) == 0) { 13496 if (tp->t_flags & TF_NEEDSYN) { 13497 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13498 tiwin, thflags, nxt_pkt)); 13499 13500 } else if (tp->t_flags & TF_ACKNOW) { 13501 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13502 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13503 return (ret_val); 13504 } else { 13505 ctf_do_drop(m, NULL); 13506 return (0); 13507 } 13508 } 13509 /* 13510 * Ack processing. 13511 */ 13512 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13513 return (ret_val); 13514 } 13515 if (sbavail(&so->so_snd)) { 13516 if (ctf_progress_timeout_check(tp, true)) { 13517 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13518 tp, tick, PROGRESS_DROP, __LINE__); 13519 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13520 return (1); 13521 } 13522 } 13523 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13524 tiwin, thflags, nxt_pkt)); 13525 } 13526 13527 static int 13528 rack_check_data_after_close(struct mbuf *m, 13529 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 13530 { 13531 struct tcp_rack *rack; 13532 13533 rack = (struct tcp_rack *)tp->t_fb_ptr; 13534 if (rack->rc_allow_data_af_clo == 0) { 13535 close_now: 13536 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13537 /* tcp_close will kill the inp pre-log the Reset */ 13538 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13539 tp = tcp_close(tp); 13540 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 13541 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 13542 return (1); 13543 } 13544 if (sbavail(&so->so_snd) == 0) 13545 goto close_now; 13546 /* Ok we allow data that is ignored and a followup reset */ 13547 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13548 tp->rcv_nxt = th->th_seq + *tlen; 13549 tp->t_flags2 |= TF2_DROP_AF_DATA; 13550 rack->r_wanted_output = 1; 13551 *tlen = 0; 13552 return (0); 13553 } 13554 13555 /* 13556 * Return value of 1, the TCB is unlocked and most 13557 * likely gone, return value of 0, the TCP is still 13558 * locked. 13559 */ 13560 static int 13561 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 13562 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13563 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13564 { 13565 int32_t ret_val = 0; 13566 int32_t orig_tlen = tlen; 13567 int32_t ourfinisacked = 0; 13568 13569 ctf_calc_rwin(so, tp); 13570 13571 if ((thflags & TH_RST) || 13572 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13573 return (ctf_process_rst(m, th, so, tp)); 13574 /* 13575 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13576 * synchronized state. 13577 */ 13578 if (thflags & TH_SYN) { 13579 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13580 return (ret_val); 13581 } 13582 /* 13583 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13584 * it's less than ts_recent, drop it. 13585 */ 13586 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13587 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13588 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13589 return (ret_val); 13590 } 13591 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13592 return (ret_val); 13593 } 13594 /* 13595 * If new data are received on a connection after the user processes 13596 * are gone, then RST the other end. 13597 */ 13598 if ((tp->t_flags & TF_CLOSED) && tlen && 13599 rack_check_data_after_close(m, tp, &tlen, th, so)) 13600 return (1); 13601 /* 13602 * If last ACK falls within this segment's sequence numbers, record 13603 * its timestamp. NOTE: 1) That the test incorporates suggestions 13604 * from the latest proposal of the tcplw@cray.com list (Braden 13605 * 1993/04/26). 2) That updating only on newer timestamps interferes 13606 * with our earlier PAWS tests, so this check should be solely 13607 * predicated on the sequence space of this segment. 3) That we 13608 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13609 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13610 * SEG.Len, This modified check allows us to overcome RFC1323's 13611 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13612 * p.869. In such cases, we can still calculate the RTT correctly 13613 * when RCV.NXT == Last.ACK.Sent. 13614 */ 13615 if ((to->to_flags & TOF_TS) != 0 && 13616 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13617 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13618 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13619 tp->ts_recent_age = tcp_ts_getticks(); 13620 tp->ts_recent = to->to_tsval; 13621 } 13622 /* 13623 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13624 * is on (half-synchronized state), then queue data for later 13625 * processing; else drop segment and return. 13626 */ 13627 if ((thflags & TH_ACK) == 0) { 13628 if (tp->t_flags & TF_NEEDSYN) { 13629 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13630 tiwin, thflags, nxt_pkt)); 13631 } else if (tp->t_flags & TF_ACKNOW) { 13632 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13633 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13634 return (ret_val); 13635 } else { 13636 ctf_do_drop(m, NULL); 13637 return (0); 13638 } 13639 } 13640 /* 13641 * Ack processing. 13642 */ 13643 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13644 return (ret_val); 13645 } 13646 if (ourfinisacked) { 13647 /* 13648 * If we can't receive any more data, then closing user can 13649 * proceed. Starting the timer is contrary to the 13650 * specification, but if we don't get a FIN we'll hang 13651 * forever. 13652 * 13653 * XXXjl: we should release the tp also, and use a 13654 * compressed state. 13655 */ 13656 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13657 soisdisconnected(so); 13658 tcp_timer_activate(tp, TT_2MSL, 13659 (tcp_fast_finwait2_recycle ? 13660 tcp_finwait2_timeout : 13661 TP_MAXIDLE(tp))); 13662 } 13663 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13664 } 13665 if (sbavail(&so->so_snd)) { 13666 if (ctf_progress_timeout_check(tp, true)) { 13667 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13668 tp, tick, PROGRESS_DROP, __LINE__); 13669 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13670 return (1); 13671 } 13672 } 13673 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13674 tiwin, thflags, nxt_pkt)); 13675 } 13676 13677 /* 13678 * Return value of 1, the TCB is unlocked and most 13679 * likely gone, return value of 0, the TCP is still 13680 * locked. 13681 */ 13682 static int 13683 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 13684 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13685 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13686 { 13687 int32_t ret_val = 0; 13688 int32_t orig_tlen = tlen; 13689 int32_t ourfinisacked = 0; 13690 13691 ctf_calc_rwin(so, tp); 13692 13693 if ((thflags & TH_RST) || 13694 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13695 return (ctf_process_rst(m, th, so, tp)); 13696 /* 13697 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13698 * synchronized state. 13699 */ 13700 if (thflags & TH_SYN) { 13701 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13702 return (ret_val); 13703 } 13704 /* 13705 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13706 * it's less than ts_recent, drop it. 13707 */ 13708 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13709 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13710 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13711 return (ret_val); 13712 } 13713 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13714 return (ret_val); 13715 } 13716 /* 13717 * If last ACK falls within this segment's sequence numbers, record 13718 * its timestamp. NOTE: 1) That the test incorporates suggestions 13719 * from the latest proposal of the tcplw@cray.com list (Braden 13720 * 1993/04/26). 2) That updating only on newer timestamps interferes 13721 * with our earlier PAWS tests, so this check should be solely 13722 * predicated on the sequence space of this segment. 3) That we 13723 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13724 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13725 * SEG.Len, This modified check allows us to overcome RFC1323's 13726 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13727 * p.869. In such cases, we can still calculate the RTT correctly 13728 * when RCV.NXT == Last.ACK.Sent. 13729 */ 13730 if ((to->to_flags & TOF_TS) != 0 && 13731 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13732 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13733 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13734 tp->ts_recent_age = tcp_ts_getticks(); 13735 tp->ts_recent = to->to_tsval; 13736 } 13737 /* 13738 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13739 * is on (half-synchronized state), then queue data for later 13740 * processing; else drop segment and return. 13741 */ 13742 if ((thflags & TH_ACK) == 0) { 13743 if (tp->t_flags & TF_NEEDSYN) { 13744 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13745 tiwin, thflags, nxt_pkt)); 13746 } else if (tp->t_flags & TF_ACKNOW) { 13747 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13748 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13749 return (ret_val); 13750 } else { 13751 ctf_do_drop(m, NULL); 13752 return (0); 13753 } 13754 } 13755 /* 13756 * Ack processing. 13757 */ 13758 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13759 return (ret_val); 13760 } 13761 if (ourfinisacked) { 13762 tcp_twstart(tp); 13763 m_freem(m); 13764 return (1); 13765 } 13766 if (sbavail(&so->so_snd)) { 13767 if (ctf_progress_timeout_check(tp, true)) { 13768 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13769 tp, tick, PROGRESS_DROP, __LINE__); 13770 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13771 return (1); 13772 } 13773 } 13774 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13775 tiwin, thflags, nxt_pkt)); 13776 } 13777 13778 /* 13779 * Return value of 1, the TCB is unlocked and most 13780 * likely gone, return value of 0, the TCP is still 13781 * locked. 13782 */ 13783 static int 13784 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13785 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13786 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13787 { 13788 int32_t ret_val = 0; 13789 int32_t orig_tlen; 13790 int32_t ourfinisacked = 0; 13791 13792 ctf_calc_rwin(so, tp); 13793 13794 if ((thflags & TH_RST) || 13795 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13796 return (ctf_process_rst(m, th, so, tp)); 13797 /* 13798 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13799 * synchronized state. 13800 */ 13801 if (thflags & TH_SYN) { 13802 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13803 return (ret_val); 13804 } 13805 /* 13806 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13807 * it's less than ts_recent, drop it. 13808 */ 13809 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13810 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13811 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13812 return (ret_val); 13813 } 13814 orig_tlen = tlen; 13815 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13816 return (ret_val); 13817 } 13818 /* 13819 * If last ACK falls within this segment's sequence numbers, record 13820 * its timestamp. NOTE: 1) That the test incorporates suggestions 13821 * from the latest proposal of the tcplw@cray.com list (Braden 13822 * 1993/04/26). 2) That updating only on newer timestamps interferes 13823 * with our earlier PAWS tests, so this check should be solely 13824 * predicated on the sequence space of this segment. 3) That we 13825 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13826 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13827 * SEG.Len, This modified check allows us to overcome RFC1323's 13828 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13829 * p.869. In such cases, we can still calculate the RTT correctly 13830 * when RCV.NXT == Last.ACK.Sent. 13831 */ 13832 if ((to->to_flags & TOF_TS) != 0 && 13833 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13834 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13835 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13836 tp->ts_recent_age = tcp_ts_getticks(); 13837 tp->ts_recent = to->to_tsval; 13838 } 13839 /* 13840 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13841 * is on (half-synchronized state), then queue data for later 13842 * processing; else drop segment and return. 13843 */ 13844 if ((thflags & TH_ACK) == 0) { 13845 if (tp->t_flags & TF_NEEDSYN) { 13846 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13847 tiwin, thflags, nxt_pkt)); 13848 } else if (tp->t_flags & TF_ACKNOW) { 13849 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13850 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13851 return (ret_val); 13852 } else { 13853 ctf_do_drop(m, NULL); 13854 return (0); 13855 } 13856 } 13857 /* 13858 * case TCPS_LAST_ACK: Ack processing. 13859 */ 13860 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13861 return (ret_val); 13862 } 13863 if (ourfinisacked) { 13864 tp = tcp_close(tp); 13865 ctf_do_drop(m, tp); 13866 return (1); 13867 } 13868 if (sbavail(&so->so_snd)) { 13869 if (ctf_progress_timeout_check(tp, true)) { 13870 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13871 tp, tick, PROGRESS_DROP, __LINE__); 13872 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13873 return (1); 13874 } 13875 } 13876 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13877 tiwin, thflags, nxt_pkt)); 13878 } 13879 13880 /* 13881 * Return value of 1, the TCB is unlocked and most 13882 * likely gone, return value of 0, the TCP is still 13883 * locked. 13884 */ 13885 static int 13886 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 13887 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13888 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13889 { 13890 int32_t ret_val = 0; 13891 int32_t orig_tlen = tlen; 13892 int32_t ourfinisacked = 0; 13893 13894 ctf_calc_rwin(so, tp); 13895 13896 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 13897 if ((thflags & TH_RST) || 13898 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13899 return (ctf_process_rst(m, th, so, tp)); 13900 /* 13901 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13902 * synchronized state. 13903 */ 13904 if (thflags & TH_SYN) { 13905 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13906 return (ret_val); 13907 } 13908 /* 13909 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13910 * it's less than ts_recent, drop it. 13911 */ 13912 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13913 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13914 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13915 return (ret_val); 13916 } 13917 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13918 return (ret_val); 13919 } 13920 /* 13921 * If new data are received on a connection after the user processes 13922 * are gone, then RST the other end. 13923 */ 13924 if ((tp->t_flags & TF_CLOSED) && tlen && 13925 rack_check_data_after_close(m, tp, &tlen, th, so)) 13926 return (1); 13927 /* 13928 * If last ACK falls within this segment's sequence numbers, record 13929 * its timestamp. NOTE: 1) That the test incorporates suggestions 13930 * from the latest proposal of the tcplw@cray.com list (Braden 13931 * 1993/04/26). 2) That updating only on newer timestamps interferes 13932 * with our earlier PAWS tests, so this check should be solely 13933 * predicated on the sequence space of this segment. 3) That we 13934 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13935 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13936 * SEG.Len, This modified check allows us to overcome RFC1323's 13937 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13938 * p.869. In such cases, we can still calculate the RTT correctly 13939 * when RCV.NXT == Last.ACK.Sent. 13940 */ 13941 if ((to->to_flags & TOF_TS) != 0 && 13942 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13943 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13944 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13945 tp->ts_recent_age = tcp_ts_getticks(); 13946 tp->ts_recent = to->to_tsval; 13947 } 13948 /* 13949 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13950 * is on (half-synchronized state), then queue data for later 13951 * processing; else drop segment and return. 13952 */ 13953 if ((thflags & TH_ACK) == 0) { 13954 if (tp->t_flags & TF_NEEDSYN) { 13955 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13956 tiwin, thflags, nxt_pkt)); 13957 } else if (tp->t_flags & TF_ACKNOW) { 13958 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13959 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13960 return (ret_val); 13961 } else { 13962 ctf_do_drop(m, NULL); 13963 return (0); 13964 } 13965 } 13966 /* 13967 * Ack processing. 13968 */ 13969 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13970 return (ret_val); 13971 } 13972 if (sbavail(&so->so_snd)) { 13973 if (ctf_progress_timeout_check(tp, true)) { 13974 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13975 tp, tick, PROGRESS_DROP, __LINE__); 13976 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13977 return (1); 13978 } 13979 } 13980 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13981 tiwin, thflags, nxt_pkt)); 13982 } 13983 13984 static void inline 13985 rack_clear_rate_sample(struct tcp_rack *rack) 13986 { 13987 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 13988 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 13989 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 13990 } 13991 13992 static void 13993 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 13994 { 13995 uint64_t bw_est, rate_wanted; 13996 int chged = 0; 13997 uint32_t user_max, orig_min, orig_max; 13998 13999 #ifdef TCP_REQUEST_TRK 14000 if (rack->rc_hybrid_mode && 14001 (rack->r_ctl.rc_pace_max_segs != 0) && 14002 (rack_hybrid_allow_set_maxseg == 1) && 14003 (rack->r_ctl.rc_last_sft != NULL)) { 14004 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 14005 return; 14006 } 14007 #endif 14008 orig_min = rack->r_ctl.rc_pace_min_segs; 14009 orig_max = rack->r_ctl.rc_pace_max_segs; 14010 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 14011 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 14012 chged = 1; 14013 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 14014 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 14015 if (user_max != rack->r_ctl.rc_pace_max_segs) 14016 chged = 1; 14017 } 14018 if (rack->rc_force_max_seg) { 14019 rack->r_ctl.rc_pace_max_segs = user_max; 14020 } else if (rack->use_fixed_rate) { 14021 bw_est = rack_get_bw(rack); 14022 if ((rack->r_ctl.crte == NULL) || 14023 (bw_est != rack->r_ctl.crte->rate)) { 14024 rack->r_ctl.rc_pace_max_segs = user_max; 14025 } else { 14026 /* We are pacing right at the hardware rate */ 14027 uint32_t segsiz, pace_one; 14028 14029 if (rack_pace_one_seg || 14030 (rack->r_ctl.rc_user_set_min_segs == 1)) 14031 pace_one = 1; 14032 else 14033 pace_one = 0; 14034 segsiz = min(ctf_fixed_maxseg(tp), 14035 rack->r_ctl.rc_pace_min_segs); 14036 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14037 tp, bw_est, segsiz, pace_one, 14038 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14039 } 14040 } else if (rack->rc_always_pace) { 14041 if (rack->r_ctl.gp_bw || 14042 rack->r_ctl.init_rate) { 14043 /* We have a rate of some sort set */ 14044 uint32_t orig; 14045 14046 bw_est = rack_get_bw(rack); 14047 orig = rack->r_ctl.rc_pace_max_segs; 14048 if (fill_override) 14049 rate_wanted = *fill_override; 14050 else 14051 rate_wanted = rack_get_gp_est(rack); 14052 if (rate_wanted) { 14053 /* We have something */ 14054 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14055 rate_wanted, 14056 ctf_fixed_maxseg(rack->rc_tp)); 14057 } else 14058 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14059 if (orig != rack->r_ctl.rc_pace_max_segs) 14060 chged = 1; 14061 } else if ((rack->r_ctl.gp_bw == 0) && 14062 (rack->r_ctl.rc_pace_max_segs == 0)) { 14063 /* 14064 * If we have nothing limit us to bursting 14065 * out IW sized pieces. 14066 */ 14067 chged = 1; 14068 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14069 } 14070 } 14071 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14072 chged = 1; 14073 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14074 } 14075 if (chged) 14076 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14077 } 14078 14079 14080 static void 14081 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14082 { 14083 #ifdef INET6 14084 struct ip6_hdr *ip6 = NULL; 14085 #endif 14086 #ifdef INET 14087 struct ip *ip = NULL; 14088 #endif 14089 struct udphdr *udp = NULL; 14090 14091 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14092 #ifdef INET6 14093 if (rack->r_is_v6) { 14094 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14095 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14096 if (tp->t_port) { 14097 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14098 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14099 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14100 udp->uh_dport = tp->t_port; 14101 rack->r_ctl.fsb.udp = udp; 14102 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14103 } else 14104 { 14105 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14106 rack->r_ctl.fsb.udp = NULL; 14107 } 14108 tcpip_fillheaders(rack->rc_inp, 14109 tp->t_port, 14110 ip6, rack->r_ctl.fsb.th); 14111 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14112 } else 14113 #endif /* INET6 */ 14114 #ifdef INET 14115 { 14116 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14117 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14118 if (tp->t_port) { 14119 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14120 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14121 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14122 udp->uh_dport = tp->t_port; 14123 rack->r_ctl.fsb.udp = udp; 14124 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14125 } else 14126 { 14127 rack->r_ctl.fsb.udp = NULL; 14128 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14129 } 14130 tcpip_fillheaders(rack->rc_inp, 14131 tp->t_port, 14132 ip, rack->r_ctl.fsb.th); 14133 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14134 } 14135 #endif 14136 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14137 (long)TCP_MAXWIN << tp->rcv_scale); 14138 rack->r_fsb_inited = 1; 14139 } 14140 14141 static int 14142 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14143 { 14144 /* 14145 * Allocate the larger of spaces V6 if available else just 14146 * V4 and include udphdr (overbook) 14147 */ 14148 #ifdef INET6 14149 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14150 #else 14151 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14152 #endif 14153 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14154 M_TCPFSB, M_NOWAIT|M_ZERO); 14155 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14156 return (ENOMEM); 14157 } 14158 rack->r_fsb_inited = 0; 14159 return (0); 14160 } 14161 14162 static void 14163 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14164 { 14165 /* 14166 * Types of logs (mod value) 14167 * 20 - Initial round setup 14168 * 21 - Rack declares a new round. 14169 */ 14170 struct tcpcb *tp; 14171 14172 tp = rack->rc_tp; 14173 if (tcp_bblogging_on(tp)) { 14174 union tcp_log_stackspecific log; 14175 struct timeval tv; 14176 14177 memset(&log, 0, sizeof(log)); 14178 log.u_bbr.flex1 = rack->r_ctl.current_round; 14179 log.u_bbr.flex2 = rack->r_ctl.roundends; 14180 log.u_bbr.flex3 = high_seq; 14181 log.u_bbr.flex4 = tp->snd_max; 14182 log.u_bbr.flex8 = mod; 14183 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14184 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14185 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14186 TCP_LOG_EVENTP(tp, NULL, 14187 &tptosocket(tp)->so_rcv, 14188 &tptosocket(tp)->so_snd, 14189 TCP_HYSTART, 0, 14190 0, &log, false, &tv); 14191 } 14192 } 14193 14194 static void 14195 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14196 { 14197 rack->rack_deferred_inited = 1; 14198 rack->r_ctl.roundends = tp->snd_max; 14199 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14200 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14201 } 14202 14203 static void 14204 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14205 { 14206 /* Retransmit bit controls. 14207 * 14208 * The setting of these values control one of 14209 * three settings you can have and dictate 14210 * how rack does retransmissions. Note this 14211 * is in *any* mode i.e. pacing on or off DGP 14212 * fixed rate pacing, or just bursting rack. 14213 * 14214 * 1 - Use full sized retransmits i.e. limit 14215 * the size to whatever the pace_max_segments 14216 * size is. 14217 * 14218 * 2 - Use pacer min granularity as a guide to 14219 * the size combined with the current calculated 14220 * goodput b/w measurement. So for example if 14221 * the goodput is measured at 20Mbps we would 14222 * calculate 8125 (pacer minimum 250usec in 14223 * that b/w) and then round it up to the next 14224 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14225 * 14226 * 0 - The rack default 1 MSS (anything not 0/1/2 14227 * fall here too if we are setting via rack_init()). 14228 * 14229 */ 14230 if (ctl == 1) { 14231 rack->full_size_rxt = 1; 14232 rack->shape_rxt_to_pacing_min = 0; 14233 } else if (ctl == 2) { 14234 rack->full_size_rxt = 0; 14235 rack->shape_rxt_to_pacing_min = 1; 14236 } else { 14237 rack->full_size_rxt = 0; 14238 rack->shape_rxt_to_pacing_min = 0; 14239 } 14240 } 14241 14242 static void 14243 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14244 uint32_t flex1, 14245 uint32_t flex2, 14246 uint32_t flex3) 14247 { 14248 if (tcp_bblogging_on(rack->rc_tp)) { 14249 union tcp_log_stackspecific log; 14250 struct timeval tv; 14251 14252 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14253 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14254 log.u_bbr.flex8 = mod; 14255 log.u_bbr.flex1 = flex1; 14256 log.u_bbr.flex2 = flex2; 14257 log.u_bbr.flex3 = flex3; 14258 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14259 0, &log, false, NULL, __func__, __LINE__, &tv); 14260 } 14261 } 14262 14263 static int 14264 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14265 { 14266 struct tcp_rack *rack; 14267 struct rack_sendmap *rsm; 14268 int i; 14269 14270 14271 rack = (struct tcp_rack *)tp->t_fb_ptr; 14272 switch (reqr->req) { 14273 case TCP_QUERY_SENDMAP: 14274 if ((reqr->req_param == tp->snd_max) || 14275 (tp->snd_max == tp->snd_una)){ 14276 /* Unlikely */ 14277 return (0); 14278 } 14279 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14280 if (rsm == NULL) { 14281 /* Can't find that seq -- unlikely */ 14282 return (0); 14283 } 14284 reqr->sendmap_start = rsm->r_start; 14285 reqr->sendmap_end = rsm->r_end; 14286 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14287 reqr->sendmap_fas = rsm->r_fas; 14288 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14289 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14290 for(i=0; i<reqr->sendmap_send_cnt; i++) 14291 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14292 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14293 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14294 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14295 reqr->sendmap_dupacks = rsm->r_dupack; 14296 rack_log_chg_info(tp, rack, 1, 14297 rsm->r_start, 14298 rsm->r_end, 14299 rsm->r_flags); 14300 return(1); 14301 break; 14302 case TCP_QUERY_TIMERS_UP: 14303 if (rack->r_ctl.rc_hpts_flags == 0) { 14304 /* no timers up */ 14305 return (0); 14306 } 14307 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14308 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14309 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14310 } 14311 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14312 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 14313 } 14314 rack_log_chg_info(tp, rack, 2, 14315 rack->r_ctl.rc_hpts_flags, 14316 rack->r_ctl.rc_last_output_to, 14317 rack->r_ctl.rc_timer_exp); 14318 return (1); 14319 break; 14320 case TCP_QUERY_RACK_TIMES: 14321 /* Reordering items */ 14322 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 14323 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 14324 /* Timerstamps and timers */ 14325 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 14326 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 14327 reqr->rack_rtt = rack->rc_rack_rtt; 14328 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 14329 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 14330 /* PRR data */ 14331 reqr->rack_sacked = rack->r_ctl.rc_sacked; 14332 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 14333 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 14334 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 14335 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 14336 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 14337 /* TLP and persists info */ 14338 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 14339 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 14340 if (rack->rc_in_persist) { 14341 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 14342 reqr->rack_in_persist = 1; 14343 } else { 14344 reqr->rack_time_went_idle = 0; 14345 reqr->rack_in_persist = 0; 14346 } 14347 if (rack->r_wanted_output) 14348 reqr->rack_wanted_output = 1; 14349 else 14350 reqr->rack_wanted_output = 0; 14351 return (1); 14352 break; 14353 default: 14354 return (-EINVAL); 14355 } 14356 } 14357 14358 static void 14359 rack_switch_failed(struct tcpcb *tp) 14360 { 14361 /* 14362 * This method gets called if a stack switch was 14363 * attempted and it failed. We are left 14364 * but our hpts timers were stopped and we 14365 * need to validate time units and t_flags2. 14366 */ 14367 struct tcp_rack *rack; 14368 struct timeval tv; 14369 uint32_t cts; 14370 uint32_t toval; 14371 struct hpts_diag diag; 14372 14373 rack = (struct tcp_rack *)tp->t_fb_ptr; 14374 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 14375 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14376 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14377 else 14378 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14379 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14380 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14381 if (tp->t_in_hpts > IHPTS_NONE) { 14382 /* Strange */ 14383 return; 14384 } 14385 cts = tcp_get_usecs(&tv); 14386 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14387 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 14388 toval = rack->r_ctl.rc_last_output_to - cts; 14389 } else { 14390 /* one slot please */ 14391 toval = HPTS_TICKS_PER_SLOT; 14392 } 14393 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14394 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 14395 toval = rack->r_ctl.rc_timer_exp - cts; 14396 } else { 14397 /* one slot please */ 14398 toval = HPTS_TICKS_PER_SLOT; 14399 } 14400 } else 14401 toval = HPTS_TICKS_PER_SLOT; 14402 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 14403 __LINE__, &diag); 14404 rack_log_hpts_diag(rack, cts, &diag, &tv); 14405 } 14406 14407 static int 14408 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 14409 { 14410 struct rack_sendmap *rsm, *ersm; 14411 int insret __diagused; 14412 /* 14413 * When initing outstanding, we must be quite careful 14414 * to not refer to tp->t_fb_ptr. This has the old rack 14415 * pointer in it, not the "new" one (when we are doing 14416 * a stack switch). 14417 */ 14418 14419 14420 if (tp->t_fb->tfb_chg_query == NULL) { 14421 /* Create a send map for the current outstanding data */ 14422 14423 rsm = rack_alloc(rack); 14424 if (rsm == NULL) { 14425 uma_zfree(rack_pcb_zone, ptr); 14426 return (ENOMEM); 14427 } 14428 rsm->r_no_rtt_allowed = 1; 14429 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 14430 rsm->r_rtr_cnt = 1; 14431 rsm->r_rtr_bytes = 0; 14432 if (tp->t_flags & TF_SENTFIN) 14433 rsm->r_flags |= RACK_HAS_FIN; 14434 rsm->r_end = tp->snd_max; 14435 if (tp->snd_una == tp->iss) { 14436 /* The data space is one beyond snd_una */ 14437 rsm->r_flags |= RACK_HAS_SYN; 14438 rsm->r_start = tp->iss; 14439 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 14440 } else 14441 rsm->r_start = tp->snd_una; 14442 rsm->r_dupack = 0; 14443 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 14444 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 14445 if (rsm->m) { 14446 rsm->orig_m_len = rsm->m->m_len; 14447 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14448 } else { 14449 rsm->orig_m_len = 0; 14450 rsm->orig_t_space = 0; 14451 } 14452 } else { 14453 /* 14454 * This can happen if we have a stand-alone FIN or 14455 * SYN. 14456 */ 14457 rsm->m = NULL; 14458 rsm->orig_m_len = 0; 14459 rsm->orig_t_space = 0; 14460 rsm->soff = 0; 14461 } 14462 #ifdef INVARIANTS 14463 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14464 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14465 insret, rack, rsm); 14466 } 14467 #else 14468 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14469 #endif 14470 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14471 rsm->r_in_tmap = 1; 14472 } else { 14473 /* We have a query mechanism, lets use it */ 14474 struct tcp_query_resp qr; 14475 int i; 14476 tcp_seq at; 14477 14478 at = tp->snd_una; 14479 while (at != tp->snd_max) { 14480 memset(&qr, 0, sizeof(qr)); 14481 qr.req = TCP_QUERY_SENDMAP; 14482 qr.req_param = at; 14483 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 14484 break; 14485 /* Move forward */ 14486 at = qr.sendmap_end; 14487 /* Now lets build the entry for this one */ 14488 rsm = rack_alloc(rack); 14489 if (rsm == NULL) { 14490 uma_zfree(rack_pcb_zone, ptr); 14491 return (ENOMEM); 14492 } 14493 memset(rsm, 0, sizeof(struct rack_sendmap)); 14494 /* Now configure the rsm and insert it */ 14495 rsm->r_dupack = qr.sendmap_dupacks; 14496 rsm->r_start = qr.sendmap_start; 14497 rsm->r_end = qr.sendmap_end; 14498 if (qr.sendmap_fas) 14499 rsm->r_fas = qr.sendmap_end; 14500 else 14501 rsm->r_fas = rsm->r_start - tp->snd_una; 14502 /* 14503 * We have carefully aligned the bits 14504 * so that all we have to do is copy over 14505 * the bits with the mask. 14506 */ 14507 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 14508 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 14509 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 14510 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 14511 for (i=0 ; i<rsm->r_rtr_cnt; i++) 14512 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 14513 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 14514 (rsm->r_start - tp->snd_una), &rsm->soff); 14515 if (rsm->m) { 14516 rsm->orig_m_len = rsm->m->m_len; 14517 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14518 } else { 14519 rsm->orig_m_len = 0; 14520 rsm->orig_t_space = 0; 14521 } 14522 #ifdef INVARIANTS 14523 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14524 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14525 insret, rack, rsm); 14526 } 14527 #else 14528 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14529 #endif 14530 if ((rsm->r_flags & RACK_ACKED) == 0) { 14531 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 14532 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 14533 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 14534 /* 14535 * If the existing ersm was sent at 14536 * a later time than the new one, then 14537 * the new one should appear ahead of this 14538 * ersm. 14539 */ 14540 rsm->r_in_tmap = 1; 14541 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 14542 break; 14543 } 14544 } 14545 if (rsm->r_in_tmap == 0) { 14546 /* 14547 * Not found so shove it on the tail. 14548 */ 14549 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14550 rsm->r_in_tmap = 1; 14551 } 14552 } else { 14553 if ((rack->r_ctl.rc_sacklast == NULL) || 14554 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 14555 rack->r_ctl.rc_sacklast = rsm; 14556 } 14557 } 14558 rack_log_chg_info(tp, rack, 3, 14559 rsm->r_start, 14560 rsm->r_end, 14561 rsm->r_flags); 14562 } 14563 } 14564 return (0); 14565 } 14566 14567 14568 static int32_t 14569 rack_init(struct tcpcb *tp, void **ptr) 14570 { 14571 struct inpcb *inp = tptoinpcb(tp); 14572 struct tcp_rack *rack = NULL; 14573 uint32_t iwin, snt, us_cts; 14574 size_t sz; 14575 int err, no_query; 14576 14577 tcp_hpts_init(tp); 14578 14579 /* 14580 * First are we the initial or are we a switched stack? 14581 * If we are initing via tcp_newtcppcb the ptr passed 14582 * will be tp->t_fb_ptr. If its a stack switch that 14583 * has a previous stack we can query it will be a local 14584 * var that will in the end be set into t_fb_ptr. 14585 */ 14586 if (ptr == &tp->t_fb_ptr) 14587 no_query = 1; 14588 else 14589 no_query = 0; 14590 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 14591 if (*ptr == NULL) { 14592 /* 14593 * We need to allocate memory but cant. The INP and INP_INFO 14594 * locks and they are recursive (happens during setup. So a 14595 * scheme to drop the locks fails :( 14596 * 14597 */ 14598 return(ENOMEM); 14599 } 14600 memset(*ptr, 0, sizeof(struct tcp_rack)); 14601 rack = (struct tcp_rack *)*ptr; 14602 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 14603 if (rack->r_ctl.tqh == NULL) { 14604 uma_zfree(rack_pcb_zone, rack); 14605 return(ENOMEM); 14606 } 14607 tqhash_init(rack->r_ctl.tqh); 14608 TAILQ_INIT(&rack->r_ctl.rc_free); 14609 TAILQ_INIT(&rack->r_ctl.rc_tmap); 14610 rack->rc_tp = tp; 14611 rack->rc_inp = inp; 14612 /* Set the flag */ 14613 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 14614 /* Probably not needed but lets be sure */ 14615 rack_clear_rate_sample(rack); 14616 /* 14617 * Save off the default values, socket options will poke 14618 * at these if pacing is not on or we have not yet 14619 * reached where pacing is on (gp_ready/fixed enabled). 14620 * When they get set into the CC module (when gp_ready 14621 * is enabled or we enable fixed) then we will set these 14622 * values into the CC and place in here the old values 14623 * so we have a restoral. Then we will set the flag 14624 * rc_pacing_cc_set. That way whenever we turn off pacing 14625 * or switch off this stack, we will know to go restore 14626 * the saved values. 14627 * 14628 * We specifically put into the beta the ecn value for pacing. 14629 */ 14630 rack->rc_new_rnd_needed = 1; 14631 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 14632 /* We want abe like behavior as well */ 14633 14634 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 14635 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 14636 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 14637 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 14638 if (rack_fill_cw_state) 14639 rack->rc_pace_to_cwnd = 1; 14640 if (rack_pacing_min_seg) 14641 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 14642 if (use_rack_rr) 14643 rack->use_rack_rr = 1; 14644 if (rack_dnd_default) { 14645 rack->rc_pace_dnd = 1; 14646 } 14647 if (V_tcp_delack_enabled) 14648 tp->t_delayed_ack = 1; 14649 else 14650 tp->t_delayed_ack = 0; 14651 #ifdef TCP_ACCOUNTING 14652 if (rack_tcp_accounting) { 14653 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 14654 } 14655 #endif 14656 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 14657 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 14658 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 14659 if (rack->r_ctl.pcm_s == NULL) { 14660 rack->r_ctl.pcm_i.cnt_alloc = 0; 14661 } 14662 #ifdef NETFLIX_STATS 14663 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; 14664 #endif 14665 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 14666 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 14667 if (rack_enable_shared_cwnd) 14668 rack->rack_enable_scwnd = 1; 14669 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 14670 rack->rc_user_set_max_segs = rack_hptsi_segments; 14671 rack->r_ctl.max_reduction = rack_max_reduce; 14672 rack->rc_force_max_seg = 0; 14673 TAILQ_INIT(&rack->r_ctl.opt_list); 14674 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 14675 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 14676 if (rack_hibeta_setting) { 14677 rack->rack_hibeta = 1; 14678 if ((rack_hibeta_setting >= 50) && 14679 (rack_hibeta_setting <= 100)) { 14680 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; 14681 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 14682 } 14683 } else { 14684 rack->r_ctl.saved_hibeta = 50; 14685 } 14686 /* 14687 * We initialize to all ones so we never match 0 14688 * just in case the client sends in 0, it hopefully 14689 * will never have all 1's in ms :-) 14690 */ 14691 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 14692 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 14693 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 14694 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 14695 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 14696 rack->r_ctl.rc_highest_us_rtt = 0; 14697 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 14698 rack->pcm_enabled = rack_pcm_is_enabled; 14699 if (rack_fillcw_bw_cap) 14700 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 14701 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 14702 if (rack_use_cmp_acks) 14703 rack->r_use_cmp_ack = 1; 14704 if (rack_disable_prr) 14705 rack->rack_no_prr = 1; 14706 if (rack_gp_no_rec_chg) 14707 rack->rc_gp_no_rec_chg = 1; 14708 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 14709 rack->r_ctl.pacing_method |= RACK_REG_PACING; 14710 rack->rc_always_pace = 1; 14711 if (rack->rack_hibeta) 14712 rack_set_cc_pacing(rack); 14713 } else 14714 rack->rc_always_pace = 0; 14715 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 14716 rack->r_mbuf_queue = 1; 14717 else 14718 rack->r_mbuf_queue = 0; 14719 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14720 if (rack_limits_scwnd) 14721 rack->r_limit_scw = 1; 14722 else 14723 rack->r_limit_scw = 0; 14724 rack_init_retransmit_value(rack, rack_rxt_controls); 14725 rack->rc_labc = V_tcp_abc_l_var; 14726 if (rack_honors_hpts_min_to) 14727 rack->r_use_hpts_min = 1; 14728 if (tp->snd_una != 0) { 14729 rack->rc_sendvars_notset = 0; 14730 /* 14731 * Make sure any TCP timers are not running. 14732 */ 14733 tcp_timer_stop(tp); 14734 } else { 14735 /* 14736 * Server side, we are called from the 14737 * syn-cache. This means none of the 14738 * snd_una/max are set yet so we have 14739 * to defer this until the first send. 14740 */ 14741 rack->rc_sendvars_notset = 1; 14742 } 14743 14744 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 14745 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 14746 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 14747 rack->r_ctl.rc_min_to = rack_min_to; 14748 microuptime(&rack->r_ctl.act_rcv_time); 14749 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 14750 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 14751 if (rack_hw_up_only) 14752 rack->r_up_only = 1; 14753 if (rack_do_dyn_mul) { 14754 /* When dynamic adjustment is on CA needs to start at 100% */ 14755 rack->rc_gp_dyn_mul = 1; 14756 if (rack_do_dyn_mul >= 100) 14757 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 14758 } else 14759 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 14760 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 14761 if (rack_timely_off) { 14762 rack->rc_skip_timely = 1; 14763 } 14764 if (rack->rc_skip_timely) { 14765 rack->r_ctl.rack_per_of_gp_rec = 90; 14766 rack->r_ctl.rack_per_of_gp_ca = 100; 14767 rack->r_ctl.rack_per_of_gp_ss = 250; 14768 } 14769 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 14770 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14771 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14772 14773 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 14774 rack_probertt_filter_life); 14775 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14776 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 14777 rack->r_ctl.rc_time_of_last_probertt = us_cts; 14778 rack->r_ctl.rc_went_idle_time = us_cts; 14779 rack->r_ctl.rc_time_probertt_starts = 0; 14780 14781 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 14782 if (rack_rnd_cnt_req & 0x10000) 14783 rack->r_ctl.gate_to_fs = 1; 14784 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 14785 if ((rack_rnd_cnt_req & 0x100) > 0) { 14786 14787 } 14788 if (rack_dsack_std_based & 0x1) { 14789 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 14790 rack->rc_rack_tmr_std_based = 1; 14791 } 14792 if (rack_dsack_std_based & 0x2) { 14793 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 14794 rack->rc_rack_use_dsack = 1; 14795 } 14796 /* We require at least one measurement, even if the sysctl is 0 */ 14797 if (rack_req_measurements) 14798 rack->r_ctl.req_measurements = rack_req_measurements; 14799 else 14800 rack->r_ctl.req_measurements = 1; 14801 if (rack_enable_hw_pacing) 14802 rack->rack_hdw_pace_ena = 1; 14803 if (rack_hw_rate_caps) 14804 rack->r_rack_hw_rate_caps = 1; 14805 if (rack_non_rxt_use_cr) 14806 rack->rack_rec_nonrxt_use_cr = 1; 14807 /* Lets setup the fsb block */ 14808 err = rack_init_fsb(tp, rack); 14809 if (err) { 14810 uma_zfree(rack_pcb_zone, *ptr); 14811 *ptr = NULL; 14812 return (err); 14813 } 14814 if (rack_do_hystart) { 14815 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 14816 if (rack_do_hystart > 1) 14817 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 14818 if (rack_do_hystart > 2) 14819 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 14820 } 14821 /* Log what we will do with queries */ 14822 rack_log_chg_info(tp, rack, 7, 14823 no_query, 0, 0); 14824 if (rack_def_profile) 14825 rack_set_profile(rack, rack_def_profile); 14826 /* Cancel the GP measurement in progress */ 14827 tp->t_flags &= ~TF_GPUTINPROG; 14828 if ((tp->t_state != TCPS_CLOSED) && 14829 (tp->t_state != TCPS_TIME_WAIT)) { 14830 /* 14831 * We are already open, we may 14832 * need to adjust a few things. 14833 */ 14834 if (SEQ_GT(tp->snd_max, tp->iss)) 14835 snt = tp->snd_max - tp->iss; 14836 else 14837 snt = 0; 14838 iwin = rc_init_window(rack); 14839 if ((snt < iwin) && 14840 (no_query == 1)) { 14841 /* We are not past the initial window 14842 * on the first init (i.e. a stack switch 14843 * has not yet occured) so we need to make 14844 * sure cwnd and ssthresh is correct. 14845 */ 14846 if (tp->snd_cwnd < iwin) 14847 tp->snd_cwnd = iwin; 14848 /* 14849 * If we are within the initial window 14850 * we want ssthresh to be unlimited. Setting 14851 * it to the rwnd (which the default stack does 14852 * and older racks) is not really a good idea 14853 * since we want to be in SS and grow both the 14854 * cwnd and the rwnd (via dynamic rwnd growth). If 14855 * we set it to the rwnd then as the peer grows its 14856 * rwnd we will be stuck in CA and never hit SS. 14857 * 14858 * Its far better to raise it up high (this takes the 14859 * risk that there as been a loss already, probably 14860 * we should have an indicator in all stacks of loss 14861 * but we don't), but considering the normal use this 14862 * is a risk worth taking. The consequences of not 14863 * hitting SS are far worse than going one more time 14864 * into it early on (before we have sent even a IW). 14865 * It is highly unlikely that we will have had a loss 14866 * before getting the IW out. 14867 */ 14868 tp->snd_ssthresh = 0xffffffff; 14869 } 14870 /* 14871 * Any init based on sequence numbers 14872 * should be done in the deferred init path 14873 * since we can be CLOSED and not have them 14874 * inited when rack_init() is called. We 14875 * are not closed so lets call it. 14876 */ 14877 rack_deferred_init(tp, rack); 14878 } 14879 if ((tp->t_state != TCPS_CLOSED) && 14880 (tp->t_state != TCPS_TIME_WAIT) && 14881 (no_query == 0) && 14882 (tp->snd_una != tp->snd_max)) { 14883 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 14884 if (err) { 14885 *ptr = NULL; 14886 return(err); 14887 } 14888 } 14889 rack_stop_all_timers(tp, rack); 14890 /* Setup all the t_flags2 */ 14891 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14892 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14893 else 14894 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14895 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14896 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14897 /* 14898 * Timers in Rack are kept in microseconds so lets 14899 * convert any initial incoming variables 14900 * from ticks into usecs. Note that we 14901 * also change the values of t_srtt and t_rttvar, if 14902 * they are non-zero. They are kept with a 5 14903 * bit decimal so we have to carefully convert 14904 * these to get the full precision. 14905 */ 14906 rack_convert_rtts(tp); 14907 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 14908 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 14909 /* We do not start any timers on DROPPED connections */ 14910 if (tp->t_fb->tfb_chg_query == NULL) { 14911 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14912 } else { 14913 struct tcp_query_resp qr; 14914 int ret; 14915 14916 memset(&qr, 0, sizeof(qr)); 14917 14918 /* Get the misc time stamps and such for rack */ 14919 qr.req = TCP_QUERY_RACK_TIMES; 14920 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14921 if (ret == 1) { 14922 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 14923 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 14924 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 14925 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 14926 rack->rc_rack_rtt = qr.rack_rtt; 14927 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 14928 rack->r_ctl.rc_sacked = qr.rack_sacked; 14929 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 14930 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 14931 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 14932 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 14933 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 14934 if (qr.rack_tlp_out) { 14935 rack->rc_tlp_in_progress = 1; 14936 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 14937 } else { 14938 rack->rc_tlp_in_progress = 0; 14939 rack->r_ctl.rc_tlp_cnt_out = 0; 14940 } 14941 if (qr.rack_srtt_measured) 14942 rack->rc_srtt_measure_made = 1; 14943 if (qr.rack_in_persist == 1) { 14944 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 14945 #ifdef NETFLIX_SHARED_CWND 14946 if (rack->r_ctl.rc_scw) { 14947 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 14948 rack->rack_scwnd_is_idle = 1; 14949 } 14950 #endif 14951 rack->r_ctl.persist_lost_ends = 0; 14952 rack->probe_not_answered = 0; 14953 rack->forced_ack = 0; 14954 tp->t_rxtshift = 0; 14955 rack->rc_in_persist = 1; 14956 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 14957 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 14958 } 14959 if (qr.rack_wanted_output) 14960 rack->r_wanted_output = 1; 14961 rack_log_chg_info(tp, rack, 6, 14962 qr.rack_min_rtt, 14963 qr.rack_rtt, 14964 qr.rack_reorder_ts); 14965 } 14966 /* Get the old stack timers */ 14967 qr.req_param = 0; 14968 qr.req = TCP_QUERY_TIMERS_UP; 14969 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14970 if (ret) { 14971 /* 14972 * non-zero return means we have a timer('s) 14973 * to start. Zero means no timer (no keepalive 14974 * I suppose). 14975 */ 14976 uint32_t tov = 0; 14977 14978 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 14979 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 14980 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 14981 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 14982 tov = qr.timer_pacing_to - us_cts; 14983 else 14984 tov = HPTS_TICKS_PER_SLOT; 14985 } 14986 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 14987 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 14988 if (tov == 0) { 14989 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 14990 tov = qr.timer_timer_exp - us_cts; 14991 else 14992 tov = HPTS_TICKS_PER_SLOT; 14993 } 14994 } 14995 rack_log_chg_info(tp, rack, 4, 14996 rack->r_ctl.rc_hpts_flags, 14997 rack->r_ctl.rc_last_output_to, 14998 rack->r_ctl.rc_timer_exp); 14999 if (tov) { 15000 struct hpts_diag diag; 15001 15002 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 15003 __LINE__, &diag); 15004 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 15005 } 15006 } 15007 } 15008 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 15009 __LINE__, RACK_RTTS_INIT); 15010 } 15011 return (0); 15012 } 15013 15014 static int 15015 rack_handoff_ok(struct tcpcb *tp) 15016 { 15017 if ((tp->t_state == TCPS_CLOSED) || 15018 (tp->t_state == TCPS_LISTEN)) { 15019 /* Sure no problem though it may not stick */ 15020 return (0); 15021 } 15022 if ((tp->t_state == TCPS_SYN_SENT) || 15023 (tp->t_state == TCPS_SYN_RECEIVED)) { 15024 /* 15025 * We really don't know if you support sack, 15026 * you have to get to ESTAB or beyond to tell. 15027 */ 15028 return (EAGAIN); 15029 } 15030 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15031 /* 15032 * Rack will only send a FIN after all data is acknowledged. 15033 * So in this case we have more data outstanding. We can't 15034 * switch stacks until either all data and only the FIN 15035 * is left (in which case rack_init() now knows how 15036 * to deal with that) <or> all is acknowledged and we 15037 * are only left with incoming data, though why you 15038 * would want to switch to rack after all data is acknowledged 15039 * I have no idea (rrs)! 15040 */ 15041 return (EAGAIN); 15042 } 15043 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15044 return (0); 15045 } 15046 /* 15047 * If we reach here we don't do SACK on this connection so we can 15048 * never do rack. 15049 */ 15050 return (EINVAL); 15051 } 15052 15053 static void 15054 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15055 { 15056 15057 if (tp->t_fb_ptr) { 15058 uint32_t cnt_free = 0; 15059 struct tcp_rack *rack; 15060 struct rack_sendmap *rsm; 15061 15062 tcp_handle_orphaned_packets(tp); 15063 tp->t_flags &= ~TF_FORCEDATA; 15064 rack = (struct tcp_rack *)tp->t_fb_ptr; 15065 rack_log_pacing_delay_calc(rack, 15066 0, 15067 0, 15068 0, 15069 rack_get_gp_est(rack), /* delRate */ 15070 rack_get_lt_bw(rack), /* rttProp */ 15071 20, __LINE__, NULL, 0); 15072 #ifdef NETFLIX_SHARED_CWND 15073 if (rack->r_ctl.rc_scw) { 15074 uint32_t limit; 15075 15076 if (rack->r_limit_scw) 15077 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15078 else 15079 limit = 0; 15080 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15081 rack->r_ctl.rc_scw_index, 15082 limit); 15083 rack->r_ctl.rc_scw = NULL; 15084 } 15085 #endif 15086 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15087 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15088 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15089 rack->r_ctl.fsb.th = NULL; 15090 } 15091 if (rack->rc_always_pace == 1) { 15092 rack_remove_pacing(rack); 15093 } 15094 /* Clean up any options if they were not applied */ 15095 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15096 struct deferred_opt_list *dol; 15097 15098 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15099 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15100 free(dol, M_TCPDO); 15101 } 15102 /* rack does not use force data but other stacks may clear it */ 15103 if (rack->r_ctl.crte != NULL) { 15104 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15105 rack->rack_hdrw_pacing = 0; 15106 rack->r_ctl.crte = NULL; 15107 } 15108 #ifdef TCP_BLACKBOX 15109 tcp_log_flowend(tp); 15110 #endif 15111 /* 15112 * Lets take a different approach to purging just 15113 * get each one and free it like a cum-ack would and 15114 * not use a foreach loop. 15115 */ 15116 rsm = tqhash_min(rack->r_ctl.tqh); 15117 while (rsm) { 15118 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15119 rack->r_ctl.rc_num_maps_alloced--; 15120 uma_zfree(rack_zone, rsm); 15121 rsm = tqhash_min(rack->r_ctl.tqh); 15122 } 15123 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15124 while (rsm) { 15125 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15126 rack->r_ctl.rc_num_maps_alloced--; 15127 rack->rc_free_cnt--; 15128 cnt_free++; 15129 uma_zfree(rack_zone, rsm); 15130 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15131 } 15132 if (rack->r_ctl.pcm_s != NULL) { 15133 free(rack->r_ctl.pcm_s, M_TCPPCM); 15134 rack->r_ctl.pcm_s = NULL; 15135 rack->r_ctl.pcm_i.cnt_alloc = 0; 15136 rack->r_ctl.pcm_i.cnt = 0; 15137 } 15138 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15139 (tcp_bblogging_on(tp))) { 15140 union tcp_log_stackspecific log; 15141 struct timeval tv; 15142 15143 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15144 log.u_bbr.flex8 = 10; 15145 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15146 log.u_bbr.flex2 = rack->rc_free_cnt; 15147 log.u_bbr.flex3 = cnt_free; 15148 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15149 rsm = tqhash_min(rack->r_ctl.tqh); 15150 log.u_bbr.delRate = (uintptr_t)rsm; 15151 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15152 log.u_bbr.cur_del_rate = (uintptr_t)rsm; 15153 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15154 log.u_bbr.pkt_epoch = __LINE__; 15155 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15156 0, &log, false, NULL, NULL, 0, &tv); 15157 } 15158 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15159 ("rack:%p num_aloc:%u after freeing all?", 15160 rack, 15161 rack->r_ctl.rc_num_maps_alloced)); 15162 rack->rc_free_cnt = 0; 15163 free(rack->r_ctl.tqh, M_TCPFSB); 15164 rack->r_ctl.tqh = NULL; 15165 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15166 tp->t_fb_ptr = NULL; 15167 } 15168 /* Make sure snd_nxt is correctly set */ 15169 tp->snd_nxt = tp->snd_max; 15170 } 15171 15172 static void 15173 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15174 { 15175 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15176 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15177 } 15178 switch (tp->t_state) { 15179 case TCPS_SYN_SENT: 15180 rack->r_state = TCPS_SYN_SENT; 15181 rack->r_substate = rack_do_syn_sent; 15182 break; 15183 case TCPS_SYN_RECEIVED: 15184 rack->r_state = TCPS_SYN_RECEIVED; 15185 rack->r_substate = rack_do_syn_recv; 15186 break; 15187 case TCPS_ESTABLISHED: 15188 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15189 rack->r_state = TCPS_ESTABLISHED; 15190 rack->r_substate = rack_do_established; 15191 break; 15192 case TCPS_CLOSE_WAIT: 15193 rack->r_state = TCPS_CLOSE_WAIT; 15194 rack->r_substate = rack_do_close_wait; 15195 break; 15196 case TCPS_FIN_WAIT_1: 15197 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15198 rack->r_state = TCPS_FIN_WAIT_1; 15199 rack->r_substate = rack_do_fin_wait_1; 15200 break; 15201 case TCPS_CLOSING: 15202 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15203 rack->r_state = TCPS_CLOSING; 15204 rack->r_substate = rack_do_closing; 15205 break; 15206 case TCPS_LAST_ACK: 15207 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15208 rack->r_state = TCPS_LAST_ACK; 15209 rack->r_substate = rack_do_lastack; 15210 break; 15211 case TCPS_FIN_WAIT_2: 15212 rack->r_state = TCPS_FIN_WAIT_2; 15213 rack->r_substate = rack_do_fin_wait_2; 15214 break; 15215 case TCPS_LISTEN: 15216 case TCPS_CLOSED: 15217 case TCPS_TIME_WAIT: 15218 default: 15219 break; 15220 }; 15221 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15222 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15223 15224 } 15225 15226 static void 15227 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15228 { 15229 /* 15230 * We received an ack, and then did not 15231 * call send or were bounced out due to the 15232 * hpts was running. Now a timer is up as well, is 15233 * it the right timer? 15234 */ 15235 struct rack_sendmap *rsm; 15236 int tmr_up; 15237 15238 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15239 if (tcp_in_hpts(rack->rc_tp) == 0) { 15240 /* 15241 * Ok we probably need some timer up, but no 15242 * matter what the mask we are not in hpts. We 15243 * may have received an old ack and thus did nothing. 15244 */ 15245 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15246 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15247 return; 15248 } 15249 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15250 return; 15251 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15252 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15253 (tmr_up == PACE_TMR_RXT)) { 15254 /* Should be an RXT */ 15255 return; 15256 } 15257 if (rsm == NULL) { 15258 /* Nothing outstanding? */ 15259 if (tp->t_flags & TF_DELACK) { 15260 if (tmr_up == PACE_TMR_DELACK) 15261 /* We are supposed to have delayed ack up and we do */ 15262 return; 15263 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 15264 /* 15265 * if we hit enobufs then we would expect the possibility 15266 * of nothing outstanding and the RXT up (and the hptsi timer). 15267 */ 15268 return; 15269 } else if (((V_tcp_always_keepalive || 15270 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 15271 (tp->t_state <= TCPS_CLOSING)) && 15272 (tmr_up == PACE_TMR_KEEP) && 15273 (tp->snd_max == tp->snd_una)) { 15274 /* We should have keep alive up and we do */ 15275 return; 15276 } 15277 } 15278 if (SEQ_GT(tp->snd_max, tp->snd_una) && 15279 ((tmr_up == PACE_TMR_TLP) || 15280 (tmr_up == PACE_TMR_RACK) || 15281 (tmr_up == PACE_TMR_RXT))) { 15282 /* 15283 * Either a Rack, TLP or RXT is fine if we 15284 * have outstanding data. 15285 */ 15286 return; 15287 } else if (tmr_up == PACE_TMR_DELACK) { 15288 /* 15289 * If the delayed ack was going to go off 15290 * before the rtx/tlp/rack timer were going to 15291 * expire, then that would be the timer in control. 15292 * Note we don't check the time here trusting the 15293 * code is correct. 15294 */ 15295 return; 15296 } 15297 /* 15298 * Ok the timer originally started is not what we want now. 15299 * We will force the hpts to be stopped if any, and restart 15300 * with the slot set to what was in the saved slot. 15301 */ 15302 if (tcp_in_hpts(rack->rc_tp)) { 15303 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15304 uint32_t us_cts; 15305 15306 us_cts = tcp_get_usecs(NULL); 15307 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 15308 rack->r_early = 1; 15309 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 15310 } 15311 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 15312 } 15313 tcp_hpts_remove(rack->rc_tp); 15314 } 15315 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15316 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15317 } 15318 15319 15320 static void 15321 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 15322 { 15323 if ((SEQ_LT(tp->snd_wl1, seq) || 15324 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 15325 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 15326 /* keep track of pure window updates */ 15327 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 15328 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 15329 tp->snd_wnd = tiwin; 15330 rack_validate_fo_sendwin_up(tp, rack); 15331 tp->snd_wl1 = seq; 15332 tp->snd_wl2 = ack; 15333 if (tp->snd_wnd > tp->max_sndwnd) 15334 tp->max_sndwnd = tp->snd_wnd; 15335 rack->r_wanted_output = 1; 15336 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 15337 tp->snd_wnd = tiwin; 15338 rack_validate_fo_sendwin_up(tp, rack); 15339 tp->snd_wl1 = seq; 15340 tp->snd_wl2 = ack; 15341 } else { 15342 /* Not a valid win update */ 15343 return; 15344 } 15345 if (tp->snd_wnd > tp->max_sndwnd) 15346 tp->max_sndwnd = tp->snd_wnd; 15347 /* Do we exit persists? */ 15348 if ((rack->rc_in_persist != 0) && 15349 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 15350 rack->r_ctl.rc_pace_min_segs))) { 15351 rack_exit_persist(tp, rack, cts); 15352 } 15353 /* Do we enter persists? */ 15354 if ((rack->rc_in_persist == 0) && 15355 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 15356 TCPS_HAVEESTABLISHED(tp->t_state) && 15357 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 15358 sbavail(&tptosocket(tp)->so_snd) && 15359 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 15360 /* 15361 * Here the rwnd is less than 15362 * the pacing size, we are established, 15363 * nothing is outstanding, and there is 15364 * data to send. Enter persists. 15365 */ 15366 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 15367 } 15368 } 15369 15370 static void 15371 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 15372 { 15373 15374 if (tcp_bblogging_on(rack->rc_tp)) { 15375 struct inpcb *inp = tptoinpcb(tp); 15376 union tcp_log_stackspecific log; 15377 struct timeval ltv; 15378 char tcp_hdr_buf[60]; 15379 struct tcphdr *th; 15380 struct timespec ts; 15381 uint32_t orig_snd_una; 15382 uint8_t xx = 0; 15383 15384 #ifdef TCP_REQUEST_TRK 15385 struct tcp_sendfile_track *tcp_req; 15386 15387 if (SEQ_GT(ae->ack, tp->snd_una)) { 15388 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 15389 } else { 15390 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 15391 } 15392 #endif 15393 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15394 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 15395 if (rack->rack_no_prr == 0) 15396 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15397 else 15398 log.u_bbr.flex1 = 0; 15399 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 15400 log.u_bbr.use_lt_bw <<= 1; 15401 log.u_bbr.use_lt_bw |= rack->r_might_revert; 15402 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 15403 log.u_bbr.bbr_state = rack->rc_free_cnt; 15404 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15405 log.u_bbr.pkts_out = tp->t_maxseg; 15406 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 15407 log.u_bbr.flex7 = 1; 15408 log.u_bbr.lost = ae->flags; 15409 log.u_bbr.cwnd_gain = ackval; 15410 log.u_bbr.pacing_gain = 0x2; 15411 if (ae->flags & TSTMP_HDWR) { 15412 /* Record the hardware timestamp if present */ 15413 log.u_bbr.flex3 = M_TSTMP; 15414 ts.tv_sec = ae->timestamp / 1000000000; 15415 ts.tv_nsec = ae->timestamp % 1000000000; 15416 ltv.tv_sec = ts.tv_sec; 15417 ltv.tv_usec = ts.tv_nsec / 1000; 15418 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 15419 } else if (ae->flags & TSTMP_LRO) { 15420 /* Record the LRO the arrival timestamp */ 15421 log.u_bbr.flex3 = M_TSTMP_LRO; 15422 ts.tv_sec = ae->timestamp / 1000000000; 15423 ts.tv_nsec = ae->timestamp % 1000000000; 15424 ltv.tv_sec = ts.tv_sec; 15425 ltv.tv_usec = ts.tv_nsec / 1000; 15426 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 15427 } 15428 log.u_bbr.timeStamp = tcp_get_usecs(<v); 15429 /* Log the rcv time */ 15430 log.u_bbr.delRate = ae->timestamp; 15431 #ifdef TCP_REQUEST_TRK 15432 log.u_bbr.applimited = tp->t_tcpreq_closed; 15433 log.u_bbr.applimited <<= 8; 15434 log.u_bbr.applimited |= tp->t_tcpreq_open; 15435 log.u_bbr.applimited <<= 8; 15436 log.u_bbr.applimited |= tp->t_tcpreq_req; 15437 if (tcp_req) { 15438 /* Copy out any client req info */ 15439 /* seconds */ 15440 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 15441 /* useconds */ 15442 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 15443 log.u_bbr.rttProp = tcp_req->timestamp; 15444 log.u_bbr.cur_del_rate = tcp_req->start; 15445 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 15446 log.u_bbr.flex8 |= 1; 15447 } else { 15448 log.u_bbr.flex8 |= 2; 15449 log.u_bbr.bw_inuse = tcp_req->end; 15450 } 15451 log.u_bbr.flex6 = tcp_req->start_seq; 15452 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 15453 log.u_bbr.flex8 |= 4; 15454 log.u_bbr.epoch = tcp_req->end_seq; 15455 } 15456 } 15457 #endif 15458 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 15459 th = (struct tcphdr *)tcp_hdr_buf; 15460 th->th_seq = ae->seq; 15461 th->th_ack = ae->ack; 15462 th->th_win = ae->win; 15463 /* Now fill in the ports */ 15464 th->th_sport = inp->inp_fport; 15465 th->th_dport = inp->inp_lport; 15466 tcp_set_flags(th, ae->flags); 15467 /* Now do we have a timestamp option? */ 15468 if (ae->flags & HAS_TSTMP) { 15469 u_char *cp; 15470 uint32_t val; 15471 15472 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 15473 cp = (u_char *)(th + 1); 15474 *cp = TCPOPT_NOP; 15475 cp++; 15476 *cp = TCPOPT_NOP; 15477 cp++; 15478 *cp = TCPOPT_TIMESTAMP; 15479 cp++; 15480 *cp = TCPOLEN_TIMESTAMP; 15481 cp++; 15482 val = htonl(ae->ts_value); 15483 bcopy((char *)&val, 15484 (char *)cp, sizeof(uint32_t)); 15485 val = htonl(ae->ts_echo); 15486 bcopy((char *)&val, 15487 (char *)(cp + 4), sizeof(uint32_t)); 15488 } else 15489 th->th_off = (sizeof(struct tcphdr) >> 2); 15490 15491 /* 15492 * For sane logging we need to play a little trick. 15493 * If the ack were fully processed we would have moved 15494 * snd_una to high_seq, but since compressed acks are 15495 * processed in two phases, at this point (logging) snd_una 15496 * won't be advanced. So we would see multiple acks showing 15497 * the advancement. We can prevent that by "pretending" that 15498 * snd_una was advanced and then un-advancing it so that the 15499 * logging code has the right value for tlb_snd_una. 15500 */ 15501 if (tp->snd_una != high_seq) { 15502 orig_snd_una = tp->snd_una; 15503 tp->snd_una = high_seq; 15504 xx = 1; 15505 } else 15506 xx = 0; 15507 TCP_LOG_EVENTP(tp, th, 15508 &tptosocket(tp)->so_rcv, 15509 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 15510 0, &log, true, <v); 15511 if (xx) { 15512 tp->snd_una = orig_snd_una; 15513 } 15514 } 15515 15516 } 15517 15518 static void 15519 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 15520 { 15521 uint32_t us_rtt; 15522 /* 15523 * A persist or keep-alive was forced out, update our 15524 * min rtt time. Note now worry about lost responses. 15525 * When a subsequent keep-alive or persist times out 15526 * and forced_ack is still on, then the last probe 15527 * was not responded to. In such cases we have a 15528 * sysctl that controls the behavior. Either we apply 15529 * the rtt but with reduced confidence (0). Or we just 15530 * plain don't apply the rtt estimate. Having data flow 15531 * will clear the probe_not_answered flag i.e. cum-ack 15532 * move forward <or> exiting and reentering persists. 15533 */ 15534 15535 rack->forced_ack = 0; 15536 rack->rc_tp->t_rxtshift = 0; 15537 if ((rack->rc_in_persist && 15538 (tiwin == rack->rc_tp->snd_wnd)) || 15539 (rack->rc_in_persist == 0)) { 15540 /* 15541 * In persists only apply the RTT update if this is 15542 * a response to our window probe. And that 15543 * means the rwnd sent must match the current 15544 * snd_wnd. If it does not, then we got a 15545 * window update ack instead. For keepalive 15546 * we allow the answer no matter what the window. 15547 * 15548 * Note that if the probe_not_answered is set then 15549 * the forced_ack_ts is the oldest one i.e. the first 15550 * probe sent that might have been lost. This assures 15551 * us that if we do calculate an RTT it is longer not 15552 * some short thing. 15553 */ 15554 if (rack->rc_in_persist) 15555 counter_u64_add(rack_persists_acks, 1); 15556 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 15557 if (us_rtt == 0) 15558 us_rtt = 1; 15559 if (rack->probe_not_answered == 0) { 15560 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15561 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 15562 } else { 15563 /* We have a retransmitted probe here too */ 15564 if (rack_apply_rtt_with_reduced_conf) { 15565 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15566 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 15567 } 15568 } 15569 } 15570 } 15571 15572 static void 15573 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15574 { 15575 /* 15576 * The next send has occurred mark the end of the round 15577 * as when that data gets acknowledged. We can 15578 * also do common things we might need to do when 15579 * a round begins. 15580 */ 15581 rack->r_ctl.roundends = tp->snd_max; 15582 rack->rc_new_rnd_needed = 0; 15583 rack_log_hystart_event(rack, tp->snd_max, 4); 15584 } 15585 15586 15587 static void 15588 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 15589 uint32_t flex3) 15590 { 15591 if (tcp_bblogging_on(rack->rc_tp)) { 15592 union tcp_log_stackspecific log; 15593 struct timeval tv; 15594 15595 (void)tcp_get_usecs(&tv); 15596 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15597 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 15598 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15599 log.u_bbr.flex8 = mod; 15600 log.u_bbr.flex1 = flex1; 15601 log.u_bbr.flex2 = flex2; 15602 log.u_bbr.flex3 = flex3; 15603 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 15604 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 15605 log.u_bbr.bbr_substate = rack->pcm_needed; 15606 log.u_bbr.bbr_substate <<= 1; 15607 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 15608 log.u_bbr.bbr_substate <<= 1; 15609 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 15610 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 15611 0, &log, false, NULL, NULL, 0, &tv); 15612 } 15613 } 15614 15615 static void 15616 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15617 { 15618 /* 15619 * The round (current_round) has ended. We now 15620 * setup for the next round by incrementing the 15621 * round numnber and doing any round specific 15622 * things. 15623 */ 15624 rack_log_hystart_event(rack, high_seq, 21); 15625 rack->r_ctl.current_round++; 15626 /* New round (current_round) begins at next send */ 15627 rack->rc_new_rnd_needed = 1; 15628 if ((rack->pcm_enabled == 1) && 15629 (rack->pcm_needed == 0) && 15630 (rack->pcm_in_progress == 0)) { 15631 /* 15632 * If we have enabled PCM, then we need to 15633 * check if the round has adanced to the state 15634 * where one is required. 15635 */ 15636 int rnds; 15637 15638 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 15639 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 15640 rack->pcm_needed = 1; 15641 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15642 } else if (rack_verbose_logging) { 15643 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15644 } 15645 } 15646 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 15647 /* We have hystart enabled send the round info in */ 15648 if (CC_ALGO(tp)->newround != NULL) { 15649 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 15650 } 15651 } 15652 /* 15653 * For DGP an initial startup check. We want to validate 15654 * that we are not just pushing on slow-start and just 15655 * not gaining.. i.e. filling buffers without getting any 15656 * boost in b/w during the inital slow-start. 15657 */ 15658 if (rack->dgp_on && 15659 (rack->rc_initial_ss_comp == 0) && 15660 (tp->snd_cwnd < tp->snd_ssthresh) && 15661 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 15662 (rack->r_ctl.gp_rnd_thresh > 0) && 15663 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 15664 15665 /* 15666 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 15667 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 15668 * exit SS. 15669 * 15670 * Pick up the flight size now as we enter slowstart (not the 15671 * cwnd which may be inflated). 15672 */ 15673 rack->rc_initial_ss_comp = 1; 15674 15675 if (tcp_bblogging_on(rack->rc_tp)) { 15676 union tcp_log_stackspecific log; 15677 struct timeval tv; 15678 15679 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15680 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15681 log.u_bbr.flex1 = rack->r_ctl.current_round; 15682 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 15683 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 15684 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 15685 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 15686 log.u_bbr.flex8 = 40; 15687 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 15688 0, &log, false, NULL, __func__, __LINE__,&tv); 15689 } 15690 if ((rack->r_ctl.gate_to_fs == 1) && 15691 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 15692 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 15693 } 15694 tp->snd_ssthresh = tp->snd_cwnd - 1; 15695 /* Turn off any fast output running */ 15696 rack->r_fast_output = 0; 15697 } 15698 } 15699 15700 static int 15701 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 15702 { 15703 /* 15704 * Handle a "special" compressed ack mbuf. Each incoming 15705 * ack has only four possible dispositions: 15706 * 15707 * A) It moves the cum-ack forward 15708 * B) It is behind the cum-ack. 15709 * C) It is a window-update ack. 15710 * D) It is a dup-ack. 15711 * 15712 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 15713 * in the incoming mbuf. We also need to still pay attention 15714 * to nxt_pkt since there may be another packet after this 15715 * one. 15716 */ 15717 #ifdef TCP_ACCOUNTING 15718 uint64_t ts_val; 15719 uint64_t rdstc; 15720 #endif 15721 int segsiz; 15722 struct timespec ts; 15723 struct tcp_rack *rack; 15724 struct tcp_ackent *ae; 15725 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 15726 int cnt, i, did_out, ourfinisacked = 0; 15727 struct tcpopt to_holder, *to = NULL; 15728 #ifdef TCP_ACCOUNTING 15729 int win_up_req = 0; 15730 #endif 15731 int nsegs = 0; 15732 int under_pacing = 0; 15733 int post_recovery = 0; 15734 #ifdef TCP_ACCOUNTING 15735 sched_pin(); 15736 #endif 15737 rack = (struct tcp_rack *)tp->t_fb_ptr; 15738 if (rack->gp_ready && 15739 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 15740 under_pacing = 1; 15741 15742 if (rack->r_state != tp->t_state) 15743 rack_set_state(tp, rack); 15744 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 15745 (tp->t_flags & TF_GPUTINPROG)) { 15746 /* 15747 * We have a goodput in progress 15748 * and we have entered a late state. 15749 * Do we have enough data in the sb 15750 * to handle the GPUT request? 15751 */ 15752 uint32_t bytes; 15753 15754 bytes = tp->gput_ack - tp->gput_seq; 15755 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 15756 bytes += tp->gput_seq - tp->snd_una; 15757 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 15758 /* 15759 * There are not enough bytes in the socket 15760 * buffer that have been sent to cover this 15761 * measurement. Cancel it. 15762 */ 15763 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 15764 rack->r_ctl.rc_gp_srtt /*flex1*/, 15765 tp->gput_seq, 15766 0, 0, 18, __LINE__, NULL, 0); 15767 tp->t_flags &= ~TF_GPUTINPROG; 15768 } 15769 } 15770 to = &to_holder; 15771 to->to_flags = 0; 15772 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 15773 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 15774 cnt = m->m_len / sizeof(struct tcp_ackent); 15775 counter_u64_add(rack_multi_single_eq, cnt); 15776 high_seq = tp->snd_una; 15777 the_win = tp->snd_wnd; 15778 win_seq = tp->snd_wl1; 15779 win_upd_ack = tp->snd_wl2; 15780 cts = tcp_tv_to_usectick(tv); 15781 ms_cts = tcp_tv_to_mssectick(tv); 15782 rack->r_ctl.rc_rcvtime = cts; 15783 segsiz = ctf_fixed_maxseg(tp); 15784 if ((rack->rc_gp_dyn_mul) && 15785 (rack->use_fixed_rate == 0) && 15786 (rack->rc_always_pace)) { 15787 /* Check in on probertt */ 15788 rack_check_probe_rtt(rack, cts); 15789 } 15790 for (i = 0; i < cnt; i++) { 15791 #ifdef TCP_ACCOUNTING 15792 ts_val = get_cyclecount(); 15793 #endif 15794 rack_clear_rate_sample(rack); 15795 ae = ((mtod(m, struct tcp_ackent *)) + i); 15796 if (ae->flags & TH_FIN) 15797 rack_log_pacing_delay_calc(rack, 15798 0, 15799 0, 15800 0, 15801 rack_get_gp_est(rack), /* delRate */ 15802 rack_get_lt_bw(rack), /* rttProp */ 15803 20, __LINE__, NULL, 0); 15804 /* Setup the window */ 15805 tiwin = ae->win << tp->snd_scale; 15806 if (tiwin > rack->r_ctl.rc_high_rwnd) 15807 rack->r_ctl.rc_high_rwnd = tiwin; 15808 /* figure out the type of ack */ 15809 if (SEQ_LT(ae->ack, high_seq)) { 15810 /* Case B*/ 15811 ae->ack_val_set = ACK_BEHIND; 15812 } else if (SEQ_GT(ae->ack, high_seq)) { 15813 /* Case A */ 15814 ae->ack_val_set = ACK_CUMACK; 15815 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 15816 /* Case D */ 15817 ae->ack_val_set = ACK_DUPACK; 15818 } else { 15819 /* Case C */ 15820 ae->ack_val_set = ACK_RWND; 15821 } 15822 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 15823 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 15824 /* Validate timestamp */ 15825 if (ae->flags & HAS_TSTMP) { 15826 /* Setup for a timestamp */ 15827 to->to_flags = TOF_TS; 15828 ae->ts_echo -= tp->ts_offset; 15829 to->to_tsecr = ae->ts_echo; 15830 to->to_tsval = ae->ts_value; 15831 /* 15832 * If echoed timestamp is later than the current time, fall back to 15833 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 15834 * were used when this connection was established. 15835 */ 15836 if (TSTMP_GT(ae->ts_echo, ms_cts)) 15837 to->to_tsecr = 0; 15838 if (tp->ts_recent && 15839 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 15840 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 15841 #ifdef TCP_ACCOUNTING 15842 rdstc = get_cyclecount(); 15843 if (rdstc > ts_val) { 15844 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15845 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15846 } 15847 } 15848 #endif 15849 continue; 15850 } 15851 } 15852 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 15853 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 15854 tp->ts_recent_age = tcp_ts_getticks(); 15855 tp->ts_recent = ae->ts_value; 15856 } 15857 } else { 15858 /* Setup for a no options */ 15859 to->to_flags = 0; 15860 } 15861 /* Update the rcv time and perform idle reduction possibly */ 15862 if (tp->t_idle_reduce && 15863 (tp->snd_max == tp->snd_una) && 15864 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 15865 counter_u64_add(rack_input_idle_reduces, 1); 15866 rack_cc_after_idle(rack, tp); 15867 } 15868 tp->t_rcvtime = ticks; 15869 /* Now what about ECN of a chain of pure ACKs? */ 15870 if (tcp_ecn_input_segment(tp, ae->flags, 0, 15871 tcp_packets_this_ack(tp, ae->ack), 15872 ae->codepoint)) 15873 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 15874 #ifdef TCP_ACCOUNTING 15875 /* Count for the specific type of ack in */ 15876 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15877 tp->tcp_cnt_counters[ae->ack_val_set]++; 15878 } 15879 #endif 15880 /* 15881 * Note how we could move up these in the determination 15882 * above, but we don't so that way the timestamp checks (and ECN) 15883 * is done first before we do any processing on the ACK. 15884 * The non-compressed path through the code has this 15885 * weakness (noted by @jtl) that it actually does some 15886 * processing before verifying the timestamp information. 15887 * We don't take that path here which is why we set 15888 * the ack_val_set first, do the timestamp and ecn 15889 * processing, and then look at what we have setup. 15890 */ 15891 if (ae->ack_val_set == ACK_BEHIND) { 15892 /* 15893 * Case B flag reordering, if window is not closed 15894 * or it could be a keep-alive or persists 15895 */ 15896 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 15897 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15898 if (rack->r_ctl.rc_reorder_ts == 0) 15899 rack->r_ctl.rc_reorder_ts = 1; 15900 } 15901 } else if (ae->ack_val_set == ACK_DUPACK) { 15902 /* Case D */ 15903 rack_strike_dupack(rack, ae->ack); 15904 } else if (ae->ack_val_set == ACK_RWND) { 15905 /* Case C */ 15906 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15907 ts.tv_sec = ae->timestamp / 1000000000; 15908 ts.tv_nsec = ae->timestamp % 1000000000; 15909 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15910 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15911 } else { 15912 rack->r_ctl.act_rcv_time = *tv; 15913 } 15914 if (rack->forced_ack) { 15915 rack_handle_probe_response(rack, tiwin, 15916 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 15917 } 15918 #ifdef TCP_ACCOUNTING 15919 win_up_req = 1; 15920 #endif 15921 win_upd_ack = ae->ack; 15922 win_seq = ae->seq; 15923 the_win = tiwin; 15924 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15925 } else { 15926 /* Case A */ 15927 if (SEQ_GT(ae->ack, tp->snd_max)) { 15928 /* 15929 * We just send an ack since the incoming 15930 * ack is beyond the largest seq we sent. 15931 */ 15932 if ((tp->t_flags & TF_ACKNOW) == 0) { 15933 ctf_ack_war_checks(tp); 15934 if (tp->t_flags && TF_ACKNOW) 15935 rack->r_wanted_output = 1; 15936 } 15937 } else { 15938 nsegs++; 15939 /* If the window changed setup to update */ 15940 if (tiwin != tp->snd_wnd) { 15941 win_upd_ack = ae->ack; 15942 win_seq = ae->seq; 15943 the_win = tiwin; 15944 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15945 } 15946 #ifdef TCP_ACCOUNTING 15947 /* Account for the acks */ 15948 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15949 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 15950 } 15951 #endif 15952 high_seq = ae->ack; 15953 /* Setup our act_rcv_time */ 15954 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15955 ts.tv_sec = ae->timestamp / 1000000000; 15956 ts.tv_nsec = ae->timestamp % 1000000000; 15957 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15958 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15959 } else { 15960 rack->r_ctl.act_rcv_time = *tv; 15961 } 15962 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 15963 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 15964 #ifdef TCP_REQUEST_TRK 15965 rack_req_check_for_comp(rack, high_seq); 15966 #endif 15967 if (rack->rc_dsack_round_seen) { 15968 /* Is the dsack round over? */ 15969 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 15970 /* Yes it is */ 15971 rack->rc_dsack_round_seen = 0; 15972 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 15973 } 15974 } 15975 } 15976 } 15977 /* And lets be sure to commit the rtt measurements for this ack */ 15978 tcp_rack_xmit_timer_commit(rack, tp); 15979 #ifdef TCP_ACCOUNTING 15980 rdstc = get_cyclecount(); 15981 if (rdstc > ts_val) { 15982 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15983 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15984 if (ae->ack_val_set == ACK_CUMACK) 15985 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 15986 } 15987 } 15988 #endif 15989 } 15990 #ifdef TCP_ACCOUNTING 15991 ts_val = get_cyclecount(); 15992 #endif 15993 /* Tend to any collapsed window */ 15994 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 15995 /* The peer collapsed the window */ 15996 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 15997 } else if (rack->rc_has_collapsed) 15998 rack_un_collapse_window(rack, __LINE__); 15999 if ((rack->r_collapse_point_valid) && 16000 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 16001 rack->r_collapse_point_valid = 0; 16002 acked_amount = acked = (high_seq - tp->snd_una); 16003 if (acked) { 16004 /* 16005 * The draft (v3) calls for us to use SEQ_GEQ, but that 16006 * causes issues when we are just going app limited. Lets 16007 * instead use SEQ_GT <or> where its equal but more data 16008 * is outstanding. 16009 * 16010 * Also make sure we are on the last ack of a series. We 16011 * have to have all the ack's processed in queue to know 16012 * if there is something left outstanding. 16013 * 16014 */ 16015 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 16016 (rack->rc_new_rnd_needed == 0) && 16017 (nxt_pkt == 0)) { 16018 /* 16019 * We have crossed into a new round with 16020 * this th_ack value. 16021 */ 16022 rack_new_round_setup(tp, rack, high_seq); 16023 } 16024 /* 16025 * Clear the probe not answered flag 16026 * since cum-ack moved forward. 16027 */ 16028 rack->probe_not_answered = 0; 16029 if (tp->t_flags & TF_NEEDSYN) { 16030 /* 16031 * T/TCP: Connection was half-synchronized, and our SYN has 16032 * been ACK'd (so connection is now fully synchronized). Go 16033 * to non-starred state, increment snd_una for ACK of SYN, 16034 * and check if we can do window scaling. 16035 */ 16036 tp->t_flags &= ~TF_NEEDSYN; 16037 tp->snd_una++; 16038 acked_amount = acked = (high_seq - tp->snd_una); 16039 } 16040 if (acked > sbavail(&so->so_snd)) 16041 acked_amount = sbavail(&so->so_snd); 16042 if (IN_FASTRECOVERY(tp->t_flags) && 16043 (rack->rack_no_prr == 0)) 16044 rack_update_prr(tp, rack, acked_amount, high_seq); 16045 if (IN_RECOVERY(tp->t_flags)) { 16046 if (SEQ_LT(high_seq, tp->snd_recover) && 16047 (SEQ_LT(high_seq, tp->snd_max))) { 16048 tcp_rack_partialack(tp); 16049 } else { 16050 rack_post_recovery(tp, high_seq); 16051 post_recovery = 1; 16052 } 16053 } else if ((rack->rto_from_rec == 1) && 16054 SEQ_GEQ(high_seq, tp->snd_recover)) { 16055 /* 16056 * We were in recovery, hit a rxt timeout 16057 * and never re-entered recovery. The timeout(s) 16058 * made up all the lost data. In such a case 16059 * we need to clear the rto_from_rec flag. 16060 */ 16061 rack->rto_from_rec = 0; 16062 } 16063 /* Handle the rack-log-ack part (sendmap) */ 16064 if ((sbused(&so->so_snd) == 0) && 16065 (acked > acked_amount) && 16066 (tp->t_state >= TCPS_FIN_WAIT_1) && 16067 (tp->t_flags & TF_SENTFIN)) { 16068 /* 16069 * We must be sure our fin 16070 * was sent and acked (we can be 16071 * in FIN_WAIT_1 without having 16072 * sent the fin). 16073 */ 16074 ourfinisacked = 1; 16075 /* 16076 * Lets make sure snd_una is updated 16077 * since most likely acked_amount = 0 (it 16078 * should be). 16079 */ 16080 tp->snd_una = high_seq; 16081 } 16082 /* Did we make a RTO error? */ 16083 if ((tp->t_flags & TF_PREVVALID) && 16084 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16085 tp->t_flags &= ~TF_PREVVALID; 16086 if (tp->t_rxtshift == 1 && 16087 (int)(ticks - tp->t_badrxtwin) < 0) 16088 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16089 } 16090 /* Handle the data in the socket buffer */ 16091 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16092 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16093 if (acked_amount > 0) { 16094 uint32_t p_cwnd; 16095 struct mbuf *mfree; 16096 16097 if (post_recovery) { 16098 /* 16099 * Grab the segsiz, multiply by 2 and add the snd_cwnd 16100 * that is the max the CC should add if we are exiting 16101 * recovery and doing a late add. 16102 */ 16103 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16104 p_cwnd <<= 1; 16105 p_cwnd += tp->snd_cwnd; 16106 } 16107 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 16108 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 16109 /* Must be non-newreno (cubic) getting too ahead of itself */ 16110 tp->snd_cwnd = p_cwnd; 16111 } 16112 SOCK_SENDBUF_LOCK(so); 16113 mfree = sbcut_locked(&so->so_snd, acked_amount); 16114 tp->snd_una = high_seq; 16115 /* Note we want to hold the sb lock through the sendmap adjust */ 16116 rack_adjust_sendmap_head(rack, &so->so_snd); 16117 /* Wake up the socket if we have room to write more */ 16118 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16119 sowwakeup_locked(so); 16120 m_freem(mfree); 16121 } 16122 /* update progress */ 16123 tp->t_acktime = ticks; 16124 rack_log_progress_event(rack, tp, tp->t_acktime, 16125 PROGRESS_UPDATE, __LINE__); 16126 /* Clear out shifts and such */ 16127 tp->t_rxtshift = 0; 16128 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16129 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16130 rack->rc_tlp_in_progress = 0; 16131 rack->r_ctl.rc_tlp_cnt_out = 0; 16132 /* Send recover and snd_nxt must be dragged along */ 16133 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16134 tp->snd_recover = tp->snd_una; 16135 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16136 tp->snd_nxt = tp->snd_max; 16137 /* 16138 * If the RXT timer is running we want to 16139 * stop it, so we can restart a TLP (or new RXT). 16140 */ 16141 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16142 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16143 tp->snd_wl2 = high_seq; 16144 tp->t_dupacks = 0; 16145 if (under_pacing && 16146 (rack->use_fixed_rate == 0) && 16147 (rack->in_probe_rtt == 0) && 16148 rack->rc_gp_dyn_mul && 16149 rack->rc_always_pace) { 16150 /* Check if we are dragging bottom */ 16151 rack_check_bottom_drag(tp, rack, so); 16152 } 16153 if (tp->snd_una == tp->snd_max) { 16154 tp->t_flags &= ~TF_PREVVALID; 16155 rack->r_ctl.retran_during_recovery = 0; 16156 rack->rc_suspicious = 0; 16157 rack->r_ctl.dsack_byte_cnt = 0; 16158 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16159 if (rack->r_ctl.rc_went_idle_time == 0) 16160 rack->r_ctl.rc_went_idle_time = 1; 16161 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16162 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16163 tp->t_acktime = 0; 16164 /* Set so we might enter persists... */ 16165 rack->r_wanted_output = 1; 16166 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16167 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16168 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16169 (sbavail(&so->so_snd) == 0) && 16170 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16171 /* 16172 * The socket was gone and the 16173 * peer sent data (not now in the past), time to 16174 * reset him. 16175 */ 16176 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16177 /* tcp_close will kill the inp pre-log the Reset */ 16178 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16179 #ifdef TCP_ACCOUNTING 16180 rdstc = get_cyclecount(); 16181 if (rdstc > ts_val) { 16182 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16183 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16184 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16185 } 16186 } 16187 #endif 16188 m_freem(m); 16189 tp = tcp_close(tp); 16190 if (tp == NULL) { 16191 #ifdef TCP_ACCOUNTING 16192 sched_unpin(); 16193 #endif 16194 return (1); 16195 } 16196 /* 16197 * We would normally do drop-with-reset which would 16198 * send back a reset. We can't since we don't have 16199 * all the needed bits. Instead lets arrange for 16200 * a call to tcp_output(). That way since we 16201 * are in the closed state we will generate a reset. 16202 * 16203 * Note if tcp_accounting is on we don't unpin since 16204 * we do that after the goto label. 16205 */ 16206 goto send_out_a_rst; 16207 } 16208 if ((sbused(&so->so_snd) == 0) && 16209 (tp->t_state >= TCPS_FIN_WAIT_1) && 16210 (tp->t_flags & TF_SENTFIN)) { 16211 /* 16212 * If we can't receive any more data, then closing user can 16213 * proceed. Starting the timer is contrary to the 16214 * specification, but if we don't get a FIN we'll hang 16215 * forever. 16216 * 16217 */ 16218 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16219 soisdisconnected(so); 16220 tcp_timer_activate(tp, TT_2MSL, 16221 (tcp_fast_finwait2_recycle ? 16222 tcp_finwait2_timeout : 16223 TP_MAXIDLE(tp))); 16224 } 16225 if (ourfinisacked == 0) { 16226 /* 16227 * We don't change to fin-wait-2 if we have our fin acked 16228 * which means we are probably in TCPS_CLOSING. 16229 */ 16230 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16231 } 16232 } 16233 } 16234 /* Wake up the socket if we have room to write more */ 16235 if (sbavail(&so->so_snd)) { 16236 rack->r_wanted_output = 1; 16237 if (ctf_progress_timeout_check(tp, true)) { 16238 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16239 tp, tick, PROGRESS_DROP, __LINE__); 16240 /* 16241 * We cheat here and don't send a RST, we should send one 16242 * when the pacer drops the connection. 16243 */ 16244 #ifdef TCP_ACCOUNTING 16245 rdstc = get_cyclecount(); 16246 if (rdstc > ts_val) { 16247 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16248 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16249 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16250 } 16251 } 16252 sched_unpin(); 16253 #endif 16254 (void)tcp_drop(tp, ETIMEDOUT); 16255 m_freem(m); 16256 return (1); 16257 } 16258 } 16259 if (ourfinisacked) { 16260 switch(tp->t_state) { 16261 case TCPS_CLOSING: 16262 #ifdef TCP_ACCOUNTING 16263 rdstc = get_cyclecount(); 16264 if (rdstc > ts_val) { 16265 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16266 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16267 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16268 } 16269 } 16270 sched_unpin(); 16271 #endif 16272 tcp_twstart(tp); 16273 m_freem(m); 16274 return (1); 16275 break; 16276 case TCPS_LAST_ACK: 16277 #ifdef TCP_ACCOUNTING 16278 rdstc = get_cyclecount(); 16279 if (rdstc > ts_val) { 16280 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16281 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16282 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16283 } 16284 } 16285 sched_unpin(); 16286 #endif 16287 tp = tcp_close(tp); 16288 ctf_do_drop(m, tp); 16289 return (1); 16290 break; 16291 case TCPS_FIN_WAIT_1: 16292 #ifdef TCP_ACCOUNTING 16293 rdstc = get_cyclecount(); 16294 if (rdstc > ts_val) { 16295 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16296 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16297 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16298 } 16299 } 16300 #endif 16301 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16302 soisdisconnected(so); 16303 tcp_timer_activate(tp, TT_2MSL, 16304 (tcp_fast_finwait2_recycle ? 16305 tcp_finwait2_timeout : 16306 TP_MAXIDLE(tp))); 16307 } 16308 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16309 break; 16310 default: 16311 break; 16312 } 16313 } 16314 if (rack->r_fast_output) { 16315 /* 16316 * We re doing fast output.. can we expand that? 16317 */ 16318 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 16319 } 16320 #ifdef TCP_ACCOUNTING 16321 rdstc = get_cyclecount(); 16322 if (rdstc > ts_val) { 16323 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16324 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16325 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16326 } 16327 } 16328 16329 } else if (win_up_req) { 16330 rdstc = get_cyclecount(); 16331 if (rdstc > ts_val) { 16332 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16333 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 16334 } 16335 } 16336 #endif 16337 } 16338 /* Now is there a next packet, if so we are done */ 16339 m_freem(m); 16340 did_out = 0; 16341 if (nxt_pkt) { 16342 #ifdef TCP_ACCOUNTING 16343 sched_unpin(); 16344 #endif 16345 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 16346 return (0); 16347 } 16348 rack_handle_might_revert(tp, rack); 16349 ctf_calc_rwin(so, tp); 16350 if ((rack->r_wanted_output != 0) || 16351 (rack->r_fast_output != 0) || 16352 (tp->t_flags & TF_ACKNOW )) { 16353 send_out_a_rst: 16354 if (tcp_output(tp) < 0) { 16355 #ifdef TCP_ACCOUNTING 16356 sched_unpin(); 16357 #endif 16358 return (1); 16359 } 16360 did_out = 1; 16361 } 16362 if (tp->t_flags2 & TF2_HPTS_CALLS) 16363 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16364 rack_free_trim(rack); 16365 #ifdef TCP_ACCOUNTING 16366 sched_unpin(); 16367 #endif 16368 rack_timer_audit(tp, rack, &so->so_snd); 16369 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 16370 return (0); 16371 } 16372 16373 #define TCP_LRO_TS_OPTION \ 16374 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 16375 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 16376 16377 static int 16378 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16379 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 16380 struct timeval *tv) 16381 { 16382 struct inpcb *inp = tptoinpcb(tp); 16383 struct socket *so = tptosocket(tp); 16384 #ifdef TCP_ACCOUNTING 16385 uint64_t ts_val; 16386 #endif 16387 int32_t thflags, retval, did_out = 0; 16388 int32_t way_out = 0; 16389 /* 16390 * cts - is the current time from tv (caller gets ts) in microseconds. 16391 * ms_cts - is the current time from tv in milliseconds. 16392 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 16393 */ 16394 uint32_t cts, us_cts, ms_cts; 16395 uint32_t tiwin; 16396 struct timespec ts; 16397 struct tcpopt to; 16398 struct tcp_rack *rack; 16399 struct rack_sendmap *rsm; 16400 int32_t prev_state = 0; 16401 int no_output = 0; 16402 int slot_remaining = 0; 16403 #ifdef TCP_ACCOUNTING 16404 int ack_val_set = 0xf; 16405 #endif 16406 int nsegs; 16407 16408 NET_EPOCH_ASSERT(); 16409 INP_WLOCK_ASSERT(inp); 16410 16411 /* 16412 * tv passed from common code is from either M_TSTMP_LRO or 16413 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 16414 */ 16415 rack = (struct tcp_rack *)tp->t_fb_ptr; 16416 if (rack->rack_deferred_inited == 0) { 16417 /* 16418 * If we are the connecting socket we will 16419 * hit rack_init() when no sequence numbers 16420 * are setup. This makes it so we must defer 16421 * some initialization. Call that now. 16422 */ 16423 rack_deferred_init(tp, rack); 16424 } 16425 /* 16426 * Check to see if we need to skip any output plans. This 16427 * can happen in the non-LRO path where we are pacing and 16428 * must process the ack coming in but need to defer sending 16429 * anything becase a pacing timer is running. 16430 */ 16431 us_cts = tcp_tv_to_usectick(tv); 16432 if (m->m_flags & M_ACKCMP) { 16433 /* 16434 * All compressed ack's are ack's by definition so 16435 * remove any ack required flag and then do the processing. 16436 */ 16437 rack->rc_ack_required = 0; 16438 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 16439 } 16440 thflags = tcp_get_flags(th); 16441 if ((rack->rc_always_pace == 1) && 16442 (rack->rc_ack_can_sendout_data == 0) && 16443 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16444 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 16445 /* 16446 * Ok conditions are right for queuing the packets 16447 * but we do have to check the flags in the inp, it 16448 * could be, if a sack is present, we want to be awoken and 16449 * so should process the packets. 16450 */ 16451 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 16452 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 16453 no_output = 1; 16454 } else { 16455 /* 16456 * If there is no options, or just a 16457 * timestamp option, we will want to queue 16458 * the packets. This is the same that LRO does 16459 * and will need to change with accurate ECN. 16460 */ 16461 uint32_t *ts_ptr; 16462 int optlen; 16463 16464 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 16465 ts_ptr = (uint32_t *)(th + 1); 16466 if ((optlen == 0) || 16467 ((optlen == TCPOLEN_TSTAMP_APPA) && 16468 (*ts_ptr == TCP_LRO_TS_OPTION))) 16469 no_output = 1; 16470 } 16471 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 16472 /* 16473 * It is unrealistic to think we can pace in less than 16474 * the minimum granularity of the pacer (def:250usec). So 16475 * if we have less than that time remaining we should go 16476 * ahead and allow output to be "early". We will attempt to 16477 * make up for it in any pacing time we try to apply on 16478 * the outbound packet. 16479 */ 16480 no_output = 0; 16481 } 16482 } 16483 /* 16484 * If there is a RST or FIN lets dump out the bw 16485 * with a FIN the connection may go on but we 16486 * may not. 16487 */ 16488 if ((thflags & TH_FIN) || (thflags & TH_RST)) 16489 rack_log_pacing_delay_calc(rack, 16490 rack->r_ctl.gp_bw, 16491 0, 16492 0, 16493 rack_get_gp_est(rack), /* delRate */ 16494 rack_get_lt_bw(rack), /* rttProp */ 16495 20, __LINE__, NULL, 0); 16496 if (m->m_flags & M_ACKCMP) { 16497 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 16498 } 16499 cts = tcp_tv_to_usectick(tv); 16500 ms_cts = tcp_tv_to_mssectick(tv); 16501 nsegs = m->m_pkthdr.lro_nsegs; 16502 counter_u64_add(rack_proc_non_comp_ack, 1); 16503 #ifdef TCP_ACCOUNTING 16504 sched_pin(); 16505 if (thflags & TH_ACK) 16506 ts_val = get_cyclecount(); 16507 #endif 16508 if ((m->m_flags & M_TSTMP) || 16509 (m->m_flags & M_TSTMP_LRO)) { 16510 mbuf_tstmp2timespec(m, &ts); 16511 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16512 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16513 } else 16514 rack->r_ctl.act_rcv_time = *tv; 16515 kern_prefetch(rack, &prev_state); 16516 prev_state = 0; 16517 /* 16518 * Unscale the window into a 32-bit value. For the SYN_SENT state 16519 * the scale is zero. 16520 */ 16521 tiwin = th->th_win << tp->snd_scale; 16522 #ifdef TCP_ACCOUNTING 16523 if (thflags & TH_ACK) { 16524 /* 16525 * We have a tradeoff here. We can either do what we are 16526 * doing i.e. pinning to this CPU and then doing the accounting 16527 * <or> we could do a critical enter, setup the rdtsc and cpu 16528 * as in below, and then validate we are on the same CPU on 16529 * exit. I have choosen to not do the critical enter since 16530 * that often will gain you a context switch, and instead lock 16531 * us (line above this if) to the same CPU with sched_pin(). This 16532 * means we may be context switched out for a higher priority 16533 * interupt but we won't be moved to another CPU. 16534 * 16535 * If this occurs (which it won't very often since we most likely 16536 * are running this code in interupt context and only a higher 16537 * priority will bump us ... clock?) we will falsely add in 16538 * to the time the interupt processing time plus the ack processing 16539 * time. This is ok since its a rare event. 16540 */ 16541 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 16542 ctf_fixed_maxseg(tp)); 16543 } 16544 #endif 16545 /* 16546 * Parse options on any incoming segment. 16547 */ 16548 memset(&to, 0, sizeof(to)); 16549 tcp_dooptions(&to, (u_char *)(th + 1), 16550 (th->th_off << 2) - sizeof(struct tcphdr), 16551 (thflags & TH_SYN) ? TO_SYN : 0); 16552 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 16553 __func__)); 16554 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 16555 __func__)); 16556 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 16557 /* 16558 * We don't look at sack's from the 16559 * peer because the MSS is too small which 16560 * can subject us to an attack. 16561 */ 16562 to.to_flags &= ~TOF_SACK; 16563 } 16564 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16565 (tp->t_flags & TF_GPUTINPROG)) { 16566 /* 16567 * We have a goodput in progress 16568 * and we have entered a late state. 16569 * Do we have enough data in the sb 16570 * to handle the GPUT request? 16571 */ 16572 uint32_t bytes; 16573 16574 bytes = tp->gput_ack - tp->gput_seq; 16575 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16576 bytes += tp->gput_seq - tp->snd_una; 16577 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16578 /* 16579 * There are not enough bytes in the socket 16580 * buffer that have been sent to cover this 16581 * measurement. Cancel it. 16582 */ 16583 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16584 rack->r_ctl.rc_gp_srtt /*flex1*/, 16585 tp->gput_seq, 16586 0, 0, 18, __LINE__, NULL, 0); 16587 tp->t_flags &= ~TF_GPUTINPROG; 16588 } 16589 } 16590 if (tcp_bblogging_on(rack->rc_tp)) { 16591 union tcp_log_stackspecific log; 16592 struct timeval ltv; 16593 #ifdef TCP_REQUEST_TRK 16594 struct tcp_sendfile_track *tcp_req; 16595 16596 if (SEQ_GT(th->th_ack, tp->snd_una)) { 16597 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 16598 } else { 16599 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 16600 } 16601 #endif 16602 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16603 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16604 if (rack->rack_no_prr == 0) 16605 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16606 else 16607 log.u_bbr.flex1 = 0; 16608 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16609 log.u_bbr.use_lt_bw <<= 1; 16610 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16611 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16612 log.u_bbr.bbr_state = rack->rc_free_cnt; 16613 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16614 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 16615 log.u_bbr.flex3 = m->m_flags; 16616 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16617 log.u_bbr.lost = thflags; 16618 log.u_bbr.pacing_gain = 0x1; 16619 #ifdef TCP_ACCOUNTING 16620 log.u_bbr.cwnd_gain = ack_val_set; 16621 #endif 16622 log.u_bbr.flex7 = 2; 16623 if (m->m_flags & M_TSTMP) { 16624 /* Record the hardware timestamp if present */ 16625 mbuf_tstmp2timespec(m, &ts); 16626 ltv.tv_sec = ts.tv_sec; 16627 ltv.tv_usec = ts.tv_nsec / 1000; 16628 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16629 } else if (m->m_flags & M_TSTMP_LRO) { 16630 /* Record the LRO the arrival timestamp */ 16631 mbuf_tstmp2timespec(m, &ts); 16632 ltv.tv_sec = ts.tv_sec; 16633 ltv.tv_usec = ts.tv_nsec / 1000; 16634 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16635 } 16636 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16637 /* Log the rcv time */ 16638 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 16639 #ifdef TCP_REQUEST_TRK 16640 log.u_bbr.applimited = tp->t_tcpreq_closed; 16641 log.u_bbr.applimited <<= 8; 16642 log.u_bbr.applimited |= tp->t_tcpreq_open; 16643 log.u_bbr.applimited <<= 8; 16644 log.u_bbr.applimited |= tp->t_tcpreq_req; 16645 if (tcp_req) { 16646 /* Copy out any client req info */ 16647 /* seconds */ 16648 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16649 /* useconds */ 16650 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16651 log.u_bbr.rttProp = tcp_req->timestamp; 16652 log.u_bbr.cur_del_rate = tcp_req->start; 16653 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16654 log.u_bbr.flex8 |= 1; 16655 } else { 16656 log.u_bbr.flex8 |= 2; 16657 log.u_bbr.bw_inuse = tcp_req->end; 16658 } 16659 log.u_bbr.flex6 = tcp_req->start_seq; 16660 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16661 log.u_bbr.flex8 |= 4; 16662 log.u_bbr.epoch = tcp_req->end_seq; 16663 } 16664 } 16665 #endif 16666 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 16667 tlen, &log, true, <v); 16668 } 16669 /* Remove ack required flag if set, we have one */ 16670 if (thflags & TH_ACK) 16671 rack->rc_ack_required = 0; 16672 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16673 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 16674 way_out = 4; 16675 retval = 0; 16676 m_freem(m); 16677 goto done_with_input; 16678 } 16679 /* 16680 * If a segment with the ACK-bit set arrives in the SYN-SENT state 16681 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 16682 */ 16683 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 16684 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 16685 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 16686 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 16687 #ifdef TCP_ACCOUNTING 16688 sched_unpin(); 16689 #endif 16690 return (1); 16691 } 16692 /* 16693 * If timestamps were negotiated during SYN/ACK and a 16694 * segment without a timestamp is received, silently drop 16695 * the segment, unless it is a RST segment or missing timestamps are 16696 * tolerated. 16697 * See section 3.2 of RFC 7323. 16698 */ 16699 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 16700 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 16701 way_out = 5; 16702 retval = 0; 16703 m_freem(m); 16704 goto done_with_input; 16705 } 16706 /* 16707 * Segment received on connection. Reset idle time and keep-alive 16708 * timer. XXX: This should be done after segment validation to 16709 * ignore broken/spoofed segs. 16710 */ 16711 if (tp->t_idle_reduce && 16712 (tp->snd_max == tp->snd_una) && 16713 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16714 counter_u64_add(rack_input_idle_reduces, 1); 16715 rack_cc_after_idle(rack, tp); 16716 } 16717 tp->t_rcvtime = ticks; 16718 #ifdef STATS 16719 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 16720 #endif 16721 if (tiwin > rack->r_ctl.rc_high_rwnd) 16722 rack->r_ctl.rc_high_rwnd = tiwin; 16723 /* 16724 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 16725 * this to occur after we've validated the segment. 16726 */ 16727 if (tcp_ecn_input_segment(tp, thflags, tlen, 16728 tcp_packets_this_ack(tp, th->th_ack), 16729 iptos)) 16730 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 16731 16732 /* 16733 * If echoed timestamp is later than the current time, fall back to 16734 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16735 * were used when this connection was established. 16736 */ 16737 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 16738 to.to_tsecr -= tp->ts_offset; 16739 if (TSTMP_GT(to.to_tsecr, ms_cts)) 16740 to.to_tsecr = 0; 16741 } 16742 if ((rack->r_rcvpath_rtt_up == 1) && 16743 (to.to_flags & TOF_TS) && 16744 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 16745 uint32_t rtt = 0; 16746 16747 /* 16748 * We are receiving only and thus not sending 16749 * data to do an RTT. We set a flag when we first 16750 * sent this TS to the peer. We now have it back 16751 * and have an RTT to share. We log it as a conf 16752 * 4, we are not so sure about it.. since we 16753 * may have lost an ack. 16754 */ 16755 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 16756 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 16757 rack->r_rcvpath_rtt_up = 0; 16758 /* Submit and commit the timer */ 16759 if (rtt > 0) { 16760 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 16761 tcp_rack_xmit_timer_commit(rack, tp); 16762 } 16763 } 16764 /* 16765 * If its the first time in we need to take care of options and 16766 * verify we can do SACK for rack! 16767 */ 16768 if (rack->r_state == 0) { 16769 /* Should be init'd by rack_init() */ 16770 KASSERT(rack->rc_inp != NULL, 16771 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 16772 if (rack->rc_inp == NULL) { 16773 rack->rc_inp = inp; 16774 } 16775 16776 /* 16777 * Process options only when we get SYN/ACK back. The SYN 16778 * case for incoming connections is handled in tcp_syncache. 16779 * According to RFC1323 the window field in a SYN (i.e., a 16780 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 16781 * this is traditional behavior, may need to be cleaned up. 16782 */ 16783 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 16784 /* Handle parallel SYN for ECN */ 16785 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 16786 if ((to.to_flags & TOF_SCALE) && 16787 (tp->t_flags & TF_REQ_SCALE)) { 16788 tp->t_flags |= TF_RCVD_SCALE; 16789 tp->snd_scale = to.to_wscale; 16790 } else 16791 tp->t_flags &= ~TF_REQ_SCALE; 16792 /* 16793 * Initial send window. It will be updated with the 16794 * next incoming segment to the scaled value. 16795 */ 16796 tp->snd_wnd = th->th_win; 16797 rack_validate_fo_sendwin_up(tp, rack); 16798 if ((to.to_flags & TOF_TS) && 16799 (tp->t_flags & TF_REQ_TSTMP)) { 16800 tp->t_flags |= TF_RCVD_TSTMP; 16801 tp->ts_recent = to.to_tsval; 16802 tp->ts_recent_age = cts; 16803 } else 16804 tp->t_flags &= ~TF_REQ_TSTMP; 16805 if (to.to_flags & TOF_MSS) { 16806 tcp_mss(tp, to.to_mss); 16807 } 16808 if ((tp->t_flags & TF_SACK_PERMIT) && 16809 (to.to_flags & TOF_SACKPERM) == 0) 16810 tp->t_flags &= ~TF_SACK_PERMIT; 16811 if (tp->t_flags & TF_FASTOPEN) { 16812 if (to.to_flags & TOF_FASTOPEN) { 16813 uint16_t mss; 16814 16815 if (to.to_flags & TOF_MSS) 16816 mss = to.to_mss; 16817 else 16818 if ((inp->inp_vflag & INP_IPV6) != 0) 16819 mss = TCP6_MSS; 16820 else 16821 mss = TCP_MSS; 16822 tcp_fastopen_update_cache(tp, mss, 16823 to.to_tfo_len, to.to_tfo_cookie); 16824 } else 16825 tcp_fastopen_disable_path(tp); 16826 } 16827 } 16828 /* 16829 * At this point we are at the initial call. Here we decide 16830 * if we are doing RACK or not. We do this by seeing if 16831 * TF_SACK_PERMIT is set and the sack-not-required is clear. 16832 * The code now does do dup-ack counting so if you don't 16833 * switch back you won't get rack & TLP, but you will still 16834 * get this stack. 16835 */ 16836 16837 if ((rack_sack_not_required == 0) && 16838 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 16839 tcp_switch_back_to_default(tp); 16840 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 16841 tlen, iptos); 16842 #ifdef TCP_ACCOUNTING 16843 sched_unpin(); 16844 #endif 16845 return (1); 16846 } 16847 tcp_set_hpts(tp); 16848 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 16849 } 16850 if (thflags & TH_FIN) 16851 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 16852 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16853 if ((rack->rc_gp_dyn_mul) && 16854 (rack->use_fixed_rate == 0) && 16855 (rack->rc_always_pace)) { 16856 /* Check in on probertt */ 16857 rack_check_probe_rtt(rack, cts); 16858 } 16859 rack_clear_rate_sample(rack); 16860 if ((rack->forced_ack) && 16861 ((tcp_get_flags(th) & TH_RST) == 0)) { 16862 rack_handle_probe_response(rack, tiwin, us_cts); 16863 } 16864 /* 16865 * This is the one exception case where we set the rack state 16866 * always. All other times (timers etc) we must have a rack-state 16867 * set (so we assure we have done the checks above for SACK). 16868 */ 16869 rack->r_ctl.rc_rcvtime = cts; 16870 if (rack->r_state != tp->t_state) 16871 rack_set_state(tp, rack); 16872 if (SEQ_GT(th->th_ack, tp->snd_una) && 16873 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 16874 kern_prefetch(rsm, &prev_state); 16875 prev_state = rack->r_state; 16876 if ((thflags & TH_RST) && 16877 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 16878 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 16879 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 16880 /* The connection will be killed by a reset check the tracepoint */ 16881 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 16882 } 16883 retval = (*rack->r_substate) (m, th, so, 16884 tp, &to, drop_hdrlen, 16885 tlen, tiwin, thflags, nxt_pkt, iptos); 16886 if (retval == 0) { 16887 /* 16888 * If retval is 1 the tcb is unlocked and most likely the tp 16889 * is gone. 16890 */ 16891 INP_WLOCK_ASSERT(inp); 16892 if ((rack->rc_gp_dyn_mul) && 16893 (rack->rc_always_pace) && 16894 (rack->use_fixed_rate == 0) && 16895 rack->in_probe_rtt && 16896 (rack->r_ctl.rc_time_probertt_starts == 0)) { 16897 /* 16898 * If we are going for target, lets recheck before 16899 * we output. 16900 */ 16901 rack_check_probe_rtt(rack, cts); 16902 } 16903 if (rack->set_pacing_done_a_iw == 0) { 16904 /* How much has been acked? */ 16905 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 16906 /* We have enough to set in the pacing segment size */ 16907 rack->set_pacing_done_a_iw = 1; 16908 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16909 } 16910 } 16911 tcp_rack_xmit_timer_commit(rack, tp); 16912 #ifdef TCP_ACCOUNTING 16913 /* 16914 * If we set the ack_val_se to what ack processing we are doing 16915 * we also want to track how many cycles we burned. Note 16916 * the bits after tcp_output we let be "free". This is because 16917 * we are also tracking the tcp_output times as well. Note the 16918 * use of 0xf here since we only have 11 counter (0 - 0xa) and 16919 * 0xf cannot be returned and is what we initialize it too to 16920 * indicate we are not doing the tabulations. 16921 */ 16922 if (ack_val_set != 0xf) { 16923 uint64_t crtsc; 16924 16925 crtsc = get_cyclecount(); 16926 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16927 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 16928 } 16929 } 16930 #endif 16931 if ((nxt_pkt == 0) && (no_output == 0)) { 16932 if ((rack->r_wanted_output != 0) || 16933 (tp->t_flags & TF_ACKNOW) || 16934 (rack->r_fast_output != 0)) { 16935 16936 do_output_now: 16937 if (tcp_output(tp) < 0) { 16938 #ifdef TCP_ACCOUNTING 16939 sched_unpin(); 16940 #endif 16941 return (1); 16942 } 16943 did_out = 1; 16944 } 16945 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16946 rack_free_trim(rack); 16947 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 16948 goto do_output_now; 16949 } else if ((no_output == 1) && 16950 (nxt_pkt == 0) && 16951 (tcp_in_hpts(rack->rc_tp) == 0)) { 16952 /* 16953 * We are not in hpts and we had a pacing timer up. Use 16954 * the remaining time (slot_remaining) to restart the timer. 16955 */ 16956 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 16957 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 16958 rack_free_trim(rack); 16959 } 16960 /* Clear the flag, it may have been cleared by output but we may not have */ 16961 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 16962 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16963 /* 16964 * The draft (v3) calls for us to use SEQ_GEQ, but that 16965 * causes issues when we are just going app limited. Lets 16966 * instead use SEQ_GT <or> where its equal but more data 16967 * is outstanding. 16968 * 16969 * Also make sure we are on the last ack of a series. We 16970 * have to have all the ack's processed in queue to know 16971 * if there is something left outstanding. 16972 */ 16973 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 16974 (rack->rc_new_rnd_needed == 0) && 16975 (nxt_pkt == 0)) { 16976 /* 16977 * We have crossed into a new round with 16978 * the new snd_unae. 16979 */ 16980 rack_new_round_setup(tp, rack, tp->snd_una); 16981 } 16982 if ((nxt_pkt == 0) && 16983 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 16984 (SEQ_GT(tp->snd_max, tp->snd_una) || 16985 (tp->t_flags & TF_DELACK) || 16986 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16987 (tp->t_state <= TCPS_CLOSING)))) { 16988 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 16989 if ((tp->snd_max == tp->snd_una) && 16990 ((tp->t_flags & TF_DELACK) == 0) && 16991 (tcp_in_hpts(rack->rc_tp)) && 16992 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 16993 /* keep alive not needed if we are hptsi output yet */ 16994 ; 16995 } else { 16996 int late = 0; 16997 if (tcp_in_hpts(tp)) { 16998 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16999 us_cts = tcp_get_usecs(NULL); 17000 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 17001 rack->r_early = 1; 17002 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 17003 } else 17004 late = 1; 17005 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 17006 } 17007 tcp_hpts_remove(tp); 17008 } 17009 if (late && (did_out == 0)) { 17010 /* 17011 * We are late in the sending 17012 * and we did not call the output 17013 * (this probably should not happen). 17014 */ 17015 goto do_output_now; 17016 } 17017 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 17018 } 17019 way_out = 1; 17020 } else if (nxt_pkt == 0) { 17021 /* Do we have the correct timer running? */ 17022 rack_timer_audit(tp, rack, &so->so_snd); 17023 way_out = 2; 17024 } 17025 done_with_input: 17026 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 17027 if (did_out) 17028 rack->r_wanted_output = 0; 17029 } 17030 17031 #ifdef TCP_ACCOUNTING 17032 sched_unpin(); 17033 #endif 17034 return (retval); 17035 } 17036 17037 static void 17038 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17039 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17040 { 17041 struct timeval tv; 17042 17043 /* First lets see if we have old packets */ 17044 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17045 if (ctf_do_queued_segments(tp, 1)) { 17046 m_freem(m); 17047 return; 17048 } 17049 } 17050 if (m->m_flags & M_TSTMP_LRO) { 17051 mbuf_tstmp2timeval(m, &tv); 17052 } else { 17053 /* Should not be should we kassert instead? */ 17054 tcp_get_usecs(&tv); 17055 } 17056 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17057 &tv) == 0) { 17058 INP_WUNLOCK(tptoinpcb(tp)); 17059 } 17060 } 17061 17062 struct rack_sendmap * 17063 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17064 { 17065 struct rack_sendmap *rsm = NULL; 17066 int32_t idx; 17067 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17068 17069 /* Return the next guy to be re-transmitted */ 17070 if (tqhash_empty(rack->r_ctl.tqh)) { 17071 return (NULL); 17072 } 17073 if (tp->t_flags & TF_SENTFIN) { 17074 /* retran the end FIN? */ 17075 return (NULL); 17076 } 17077 /* ok lets look at this one */ 17078 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17079 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17080 return (rsm); 17081 } 17082 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17083 goto check_it; 17084 } 17085 rsm = rack_find_lowest_rsm(rack); 17086 if (rsm == NULL) { 17087 return (NULL); 17088 } 17089 check_it: 17090 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 17091 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17092 /* 17093 * No sack so we automatically do the 3 strikes and 17094 * retransmit (no rack timer would be started). 17095 */ 17096 return (rsm); 17097 } 17098 if (rsm->r_flags & RACK_ACKED) { 17099 return (NULL); 17100 } 17101 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17102 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17103 /* Its not yet ready */ 17104 return (NULL); 17105 } 17106 srtt = rack_grab_rtt(tp, rack); 17107 idx = rsm->r_rtr_cnt - 1; 17108 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17109 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 17110 if ((tsused == ts_low) || 17111 (TSTMP_LT(tsused, ts_low))) { 17112 /* No time since sending */ 17113 return (NULL); 17114 } 17115 if ((tsused - ts_low) < thresh) { 17116 /* It has not been long enough yet */ 17117 return (NULL); 17118 } 17119 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17120 ((rsm->r_flags & RACK_SACK_PASSED))) { 17121 /* 17122 * We have passed the dup-ack threshold <or> 17123 * a SACK has indicated this is missing. 17124 * Note that if you are a declared attacker 17125 * it is only the dup-ack threshold that 17126 * will cause retransmits. 17127 */ 17128 /* log retransmit reason */ 17129 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17130 rack->r_fast_output = 0; 17131 return (rsm); 17132 } 17133 return (NULL); 17134 } 17135 17136 static void 17137 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, 17138 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17139 int line, struct rack_sendmap *rsm, uint8_t quality) 17140 { 17141 if (tcp_bblogging_on(rack->rc_tp)) { 17142 union tcp_log_stackspecific log; 17143 struct timeval tv; 17144 17145 if (rack_verbose_logging == 0) { 17146 /* 17147 * We are not verbose screen out all but 17148 * ones we always want. 17149 */ 17150 if ((method != 2) && 17151 (method != 3) && 17152 (method != 7) && 17153 (method != 89) && 17154 (method != 14) && 17155 (method != 20)) { 17156 return; 17157 } 17158 } 17159 memset(&log, 0, sizeof(log)); 17160 log.u_bbr.flex1 = slot; 17161 log.u_bbr.flex2 = len; 17162 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17163 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17164 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17165 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17166 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17167 log.u_bbr.use_lt_bw <<= 1; 17168 log.u_bbr.use_lt_bw |= rack->r_late; 17169 log.u_bbr.use_lt_bw <<= 1; 17170 log.u_bbr.use_lt_bw |= rack->r_early; 17171 log.u_bbr.use_lt_bw <<= 1; 17172 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17173 log.u_bbr.use_lt_bw <<= 1; 17174 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17175 log.u_bbr.use_lt_bw <<= 1; 17176 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17177 log.u_bbr.use_lt_bw <<= 1; 17178 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17179 log.u_bbr.use_lt_bw <<= 1; 17180 log.u_bbr.use_lt_bw |= rack->gp_ready; 17181 log.u_bbr.pkt_epoch = line; 17182 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17183 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17184 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17185 log.u_bbr.bw_inuse = bw_est; 17186 log.u_bbr.delRate = bw; 17187 if (rack->r_ctl.gp_bw == 0) 17188 log.u_bbr.cur_del_rate = 0; 17189 else 17190 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17191 log.u_bbr.rttProp = len_time; 17192 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17193 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17194 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17195 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17196 /* We are in slow start */ 17197 log.u_bbr.flex7 = 1; 17198 } else { 17199 /* we are on congestion avoidance */ 17200 log.u_bbr.flex7 = 0; 17201 } 17202 log.u_bbr.flex8 = method; 17203 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17204 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17205 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17206 log.u_bbr.cwnd_gain <<= 1; 17207 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17208 log.u_bbr.cwnd_gain <<= 1; 17209 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17210 log.u_bbr.bbr_substate = quality; 17211 log.u_bbr.bbr_state = rack->dgp_on; 17212 log.u_bbr.bbr_state <<= 1; 17213 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17214 log.u_bbr.bbr_state <<= 2; 17215 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17216 &rack->rc_inp->inp_socket->so_rcv, 17217 &rack->rc_inp->inp_socket->so_snd, 17218 BBR_LOG_HPTSI_CALC, 0, 17219 0, &log, false, &tv); 17220 } 17221 } 17222 17223 static uint32_t 17224 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17225 { 17226 uint32_t new_tso, user_max, pace_one; 17227 17228 user_max = rack->rc_user_set_max_segs * mss; 17229 if (rack->rc_force_max_seg) { 17230 return (user_max); 17231 } 17232 if (rack->use_fixed_rate && 17233 ((rack->r_ctl.crte == NULL) || 17234 (bw != rack->r_ctl.crte->rate))) { 17235 /* Use the user mss since we are not exactly matched */ 17236 return (user_max); 17237 } 17238 if (rack_pace_one_seg || 17239 (rack->r_ctl.rc_user_set_min_segs == 1)) 17240 pace_one = 1; 17241 else 17242 pace_one = 0; 17243 17244 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17245 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17246 if (new_tso > user_max) 17247 new_tso = user_max; 17248 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17249 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17250 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17251 } 17252 if (rack->r_ctl.rc_user_set_min_segs && 17253 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17254 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17255 return (new_tso); 17256 } 17257 17258 static uint64_t 17259 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17260 { 17261 uint64_t reduced_win; 17262 uint32_t gain; 17263 17264 if (window_input < rc_init_window(rack)) { 17265 /* 17266 * The cwnd is collapsed to 17267 * nearly zero, maybe because of a time-out? 17268 * Lets drop back to the lt-bw. 17269 */ 17270 reduced_win = rack_get_lt_bw(rack); 17271 /* Set the flag so the caller knows its a rate and not a reduced window */ 17272 *rate_set = 1; 17273 gain = 100; 17274 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 17275 /* 17276 * If we are in recover our cwnd needs to be less for 17277 * our pacing consideration. 17278 */ 17279 if (rack->rack_hibeta == 0) { 17280 reduced_win = window_input / 2; 17281 gain = 50; 17282 } else { 17283 reduced_win = window_input * rack->r_ctl.saved_hibeta; 17284 reduced_win /= 100; 17285 gain = rack->r_ctl.saved_hibeta; 17286 } 17287 } else { 17288 /* 17289 * Apply Timely factor to increase/decrease the 17290 * amount we are pacing at. 17291 */ 17292 gain = rack_get_output_gain(rack, NULL); 17293 if (gain > rack_gain_p5_ub) { 17294 gain = rack_gain_p5_ub; 17295 } 17296 reduced_win = window_input * gain; 17297 reduced_win /= 100; 17298 } 17299 if (gain_b != NULL) 17300 *gain_b = gain; 17301 /* 17302 * What is being returned here is a trimmed down 17303 * window values in all cases where rate_set is left 17304 * at 0. In one case we actually return the rate (lt_bw). 17305 * the "reduced_win" is returned as a slimmed down cwnd that 17306 * is then calculated by the caller into a rate when rate_set 17307 * is 0. 17308 */ 17309 return (reduced_win); 17310 } 17311 17312 static int32_t 17313 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 17314 { 17315 uint64_t lentim, fill_bw; 17316 17317 rack->r_via_fill_cw = 0; 17318 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 17319 return (slot); 17320 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 17321 return (slot); 17322 if (rack->r_ctl.rc_last_us_rtt == 0) 17323 return (slot); 17324 if (rack->rc_pace_fill_if_rttin_range && 17325 (rack->r_ctl.rc_last_us_rtt >= 17326 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 17327 /* The rtt is huge, N * smallest, lets not fill */ 17328 return (slot); 17329 } 17330 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 17331 return (slot); 17332 /* 17333 * first lets calculate the b/w based on the last us-rtt 17334 * and the the smallest send window. 17335 */ 17336 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17337 if (rack->rc_fillcw_apply_discount) { 17338 uint32_t rate_set = 0; 17339 17340 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 17341 if (rate_set) { 17342 goto at_lt_bw; 17343 } 17344 } 17345 /* Take the rwnd if its smaller */ 17346 if (fill_bw > rack->rc_tp->snd_wnd) 17347 fill_bw = rack->rc_tp->snd_wnd; 17348 /* Now lets make it into a b/w */ 17349 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 17350 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17351 /* Adjust to any cap */ 17352 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 17353 fill_bw = rack->r_ctl.fillcw_cap; 17354 17355 at_lt_bw: 17356 if (rack_bw_multipler > 0) { 17357 /* 17358 * We want to limit fill-cw to the some multiplier 17359 * of the max(lt_bw, gp_est). The normal default 17360 * is 0 for off, so a sysctl has enabled it. 17361 */ 17362 uint64_t lt_bw, gp, rate; 17363 17364 gp = rack_get_gp_est(rack); 17365 lt_bw = rack_get_lt_bw(rack); 17366 if (lt_bw > gp) 17367 rate = lt_bw; 17368 else 17369 rate = gp; 17370 rate *= rack_bw_multipler; 17371 rate /= 100; 17372 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 17373 union tcp_log_stackspecific log; 17374 struct timeval tv; 17375 17376 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 17377 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17378 log.u_bbr.flex1 = rack_bw_multipler; 17379 log.u_bbr.flex2 = len; 17380 log.u_bbr.cur_del_rate = gp; 17381 log.u_bbr.delRate = lt_bw; 17382 log.u_bbr.bw_inuse = rate; 17383 log.u_bbr.rttProp = fill_bw; 17384 log.u_bbr.flex8 = 44; 17385 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 17386 BBR_LOG_CWND, 0, 17387 0, &log, false, NULL, 17388 __func__, __LINE__, &tv); 17389 } 17390 if (fill_bw > rate) 17391 fill_bw = rate; 17392 } 17393 /* We are below the min b/w */ 17394 if (non_paced) 17395 *rate_wanted = fill_bw; 17396 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 17397 return (slot); 17398 rack->r_via_fill_cw = 1; 17399 if (rack->r_rack_hw_rate_caps && 17400 (rack->r_ctl.crte != NULL)) { 17401 uint64_t high_rate; 17402 17403 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 17404 if (fill_bw > high_rate) { 17405 /* We are capping bw at the highest rate table entry */ 17406 if (*rate_wanted > high_rate) { 17407 /* The original rate was also capped */ 17408 rack->r_via_fill_cw = 0; 17409 } 17410 rack_log_hdwr_pacing(rack, 17411 fill_bw, high_rate, __LINE__, 17412 0, 3); 17413 fill_bw = high_rate; 17414 if (capped) 17415 *capped = 1; 17416 } 17417 } else if ((rack->r_ctl.crte == NULL) && 17418 (rack->rack_hdrw_pacing == 0) && 17419 (rack->rack_hdw_pace_ena) && 17420 rack->r_rack_hw_rate_caps && 17421 (rack->rack_attempt_hdwr_pace == 0) && 17422 (rack->rc_inp->inp_route.ro_nh != NULL) && 17423 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17424 /* 17425 * Ok we may have a first attempt that is greater than our top rate 17426 * lets check. 17427 */ 17428 uint64_t high_rate; 17429 17430 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 17431 if (high_rate) { 17432 if (fill_bw > high_rate) { 17433 fill_bw = high_rate; 17434 if (capped) 17435 *capped = 1; 17436 } 17437 } 17438 } 17439 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 17440 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 17441 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 17442 fill_bw = rack->r_ctl.bw_rate_cap; 17443 } 17444 /* 17445 * Ok fill_bw holds our mythical b/w to fill the cwnd 17446 * in an rtt (unless it was capped), what does that 17447 * time wise equate too? 17448 */ 17449 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 17450 lentim /= fill_bw; 17451 *rate_wanted = fill_bw; 17452 if (non_paced || (lentim < slot)) { 17453 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 17454 0, lentim, 12, __LINE__, NULL, 0); 17455 return ((int32_t)lentim); 17456 } else 17457 return (slot); 17458 } 17459 17460 static int32_t 17461 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 17462 { 17463 uint64_t srtt; 17464 int32_t slot = 0; 17465 int32_t minslot = 0; 17466 int can_start_hw_pacing = 1; 17467 int err; 17468 int pace_one; 17469 17470 if (rack_pace_one_seg || 17471 (rack->r_ctl.rc_user_set_min_segs == 1)) 17472 pace_one = 1; 17473 else 17474 pace_one = 0; 17475 if (rack->rc_always_pace == 0) { 17476 /* 17477 * We use the most optimistic possible cwnd/srtt for 17478 * sending calculations. This will make our 17479 * calculation anticipate getting more through 17480 * quicker then possible. But thats ok we don't want 17481 * the peer to have a gap in data sending. 17482 */ 17483 uint64_t cwnd, tr_perms = 0; 17484 int32_t reduce = 0; 17485 17486 old_method: 17487 /* 17488 * We keep no precise pacing with the old method 17489 * instead we use the pacer to mitigate bursts. 17490 */ 17491 if (rack->r_ctl.rc_rack_min_rtt) 17492 srtt = rack->r_ctl.rc_rack_min_rtt; 17493 else 17494 srtt = max(tp->t_srtt, 1); 17495 if (rack->r_ctl.rc_rack_largest_cwnd) 17496 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 17497 else 17498 cwnd = rack->r_ctl.cwnd_to_use; 17499 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 17500 tr_perms = (cwnd * 1000) / srtt; 17501 if (tr_perms == 0) { 17502 tr_perms = ctf_fixed_maxseg(tp); 17503 } 17504 /* 17505 * Calculate how long this will take to drain, if 17506 * the calculation comes out to zero, thats ok we 17507 * will use send_a_lot to possibly spin around for 17508 * more increasing tot_len_this_send to the point 17509 * that its going to require a pace, or we hit the 17510 * cwnd. Which in that case we are just waiting for 17511 * a ACK. 17512 */ 17513 slot = len / tr_perms; 17514 /* Now do we reduce the time so we don't run dry? */ 17515 if (slot && rack_slot_reduction) { 17516 reduce = (slot / rack_slot_reduction); 17517 if (reduce < slot) { 17518 slot -= reduce; 17519 } else 17520 slot = 0; 17521 } 17522 slot *= HPTS_USEC_IN_MSEC; 17523 if (rack->rc_pace_to_cwnd) { 17524 uint64_t rate_wanted = 0; 17525 17526 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 17527 rack->rc_ack_can_sendout_data = 1; 17528 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 17529 } else 17530 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 17531 /*******************************************************/ 17532 /* RRS: We insert non-paced call to stats here for len */ 17533 /*******************************************************/ 17534 } else { 17535 uint64_t bw_est, res, lentim, rate_wanted; 17536 uint32_t segs, oh; 17537 int capped = 0; 17538 int prev_fill; 17539 17540 if ((rack->r_rr_config == 1) && rsm) { 17541 return (rack->r_ctl.rc_min_to); 17542 } 17543 if (rack->use_fixed_rate) { 17544 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 17545 } else if ((rack->r_ctl.init_rate == 0) && 17546 (rack->r_ctl.gp_bw == 0)) { 17547 /* no way to yet do an estimate */ 17548 bw_est = rate_wanted = 0; 17549 } else if (rack->dgp_on) { 17550 bw_est = rack_get_bw(rack); 17551 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 17552 } else { 17553 uint32_t gain, rate_set = 0; 17554 17555 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17556 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 17557 if (rate_set == 0) { 17558 if (rate_wanted > rack->rc_tp->snd_wnd) 17559 rate_wanted = rack->rc_tp->snd_wnd; 17560 /* Now lets make it into a b/w */ 17561 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 17562 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17563 } 17564 bw_est = rate_wanted; 17565 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 17566 rack->r_ctl.cwnd_to_use, 17567 rate_wanted, bw_est, 17568 rack->r_ctl.rc_last_us_rtt, 17569 88, __LINE__, NULL, gain); 17570 } 17571 if ((bw_est == 0) || (rate_wanted == 0) || 17572 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 17573 /* 17574 * No way yet to make a b/w estimate or 17575 * our raise is set incorrectly. 17576 */ 17577 goto old_method; 17578 } 17579 rack_rate_cap_bw(rack, &rate_wanted, &capped); 17580 /* We need to account for all the overheads */ 17581 segs = (len + segsiz - 1) / segsiz; 17582 /* 17583 * We need the diff between 1514 bytes (e-mtu with e-hdr) 17584 * and how much data we put in each packet. Yes this 17585 * means we may be off if we are larger than 1500 bytes 17586 * or smaller. But this just makes us more conservative. 17587 */ 17588 17589 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 17590 if (rack->r_is_v6) { 17591 #ifdef INET6 17592 oh += sizeof(struct ip6_hdr); 17593 #endif 17594 } else { 17595 #ifdef INET 17596 oh += sizeof(struct ip); 17597 #endif 17598 } 17599 /* We add a fixed 14 for the ethernet header */ 17600 oh += 14; 17601 segs *= oh; 17602 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 17603 res = lentim / rate_wanted; 17604 slot = (uint32_t)res; 17605 if (rack_hw_rate_min && 17606 (rate_wanted < rack_hw_rate_min)) { 17607 can_start_hw_pacing = 0; 17608 if (rack->r_ctl.crte) { 17609 /* 17610 * Ok we need to release it, we 17611 * have fallen too low. 17612 */ 17613 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17614 rack->r_ctl.crte = NULL; 17615 rack->rack_attempt_hdwr_pace = 0; 17616 rack->rack_hdrw_pacing = 0; 17617 } 17618 } 17619 if (rack->r_ctl.crte && 17620 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17621 /* 17622 * We want more than the hardware can give us, 17623 * don't start any hw pacing. 17624 */ 17625 can_start_hw_pacing = 0; 17626 if (rack->r_rack_hw_rate_caps == 0) { 17627 /* 17628 * Ok we need to release it, we 17629 * want more than the card can give us and 17630 * no rate cap is in place. Set it up so 17631 * when we want less we can retry. 17632 */ 17633 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17634 rack->r_ctl.crte = NULL; 17635 rack->rack_attempt_hdwr_pace = 0; 17636 rack->rack_hdrw_pacing = 0; 17637 } 17638 } 17639 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 17640 /* 17641 * We lost our rate somehow, this can happen 17642 * if the interface changed underneath us. 17643 */ 17644 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17645 rack->r_ctl.crte = NULL; 17646 /* Lets re-allow attempting to setup pacing */ 17647 rack->rack_hdrw_pacing = 0; 17648 rack->rack_attempt_hdwr_pace = 0; 17649 rack_log_hdwr_pacing(rack, 17650 rate_wanted, bw_est, __LINE__, 17651 0, 6); 17652 } 17653 prev_fill = rack->r_via_fill_cw; 17654 if ((rack->rc_pace_to_cwnd) && 17655 (capped == 0) && 17656 (rack->dgp_on == 1) && 17657 (rack->use_fixed_rate == 0) && 17658 (rack->in_probe_rtt == 0) && 17659 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 17660 /* 17661 * We want to pace at our rate *or* faster to 17662 * fill the cwnd to the max if its not full. 17663 */ 17664 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 17665 /* Re-check to make sure we are not exceeding our max b/w */ 17666 if ((rack->r_ctl.crte != NULL) && 17667 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17668 /* 17669 * We want more than the hardware can give us, 17670 * don't start any hw pacing. 17671 */ 17672 can_start_hw_pacing = 0; 17673 if (rack->r_rack_hw_rate_caps == 0) { 17674 /* 17675 * Ok we need to release it, we 17676 * want more than the card can give us and 17677 * no rate cap is in place. Set it up so 17678 * when we want less we can retry. 17679 */ 17680 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17681 rack->r_ctl.crte = NULL; 17682 rack->rack_attempt_hdwr_pace = 0; 17683 rack->rack_hdrw_pacing = 0; 17684 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 17685 } 17686 } 17687 } 17688 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 17689 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17690 if ((rack->rack_hdw_pace_ena) && 17691 (can_start_hw_pacing > 0) && 17692 (rack->rack_hdrw_pacing == 0) && 17693 (rack->rack_attempt_hdwr_pace == 0)) { 17694 /* 17695 * Lets attempt to turn on hardware pacing 17696 * if we can. 17697 */ 17698 rack->rack_attempt_hdwr_pace = 1; 17699 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 17700 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17701 rate_wanted, 17702 RS_PACING_GEQ, 17703 &err, &rack->r_ctl.crte_prev_rate); 17704 if (rack->r_ctl.crte) { 17705 rack->rack_hdrw_pacing = 1; 17706 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 17707 pace_one, rack->r_ctl.crte, 17708 NULL, rack->r_ctl.pace_len_divisor); 17709 rack_log_hdwr_pacing(rack, 17710 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17711 err, 0); 17712 rack->r_ctl.last_hw_bw_req = rate_wanted; 17713 } else { 17714 counter_u64_add(rack_hw_pace_init_fail, 1); 17715 } 17716 } else if (rack->rack_hdrw_pacing && 17717 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 17718 /* Do we need to adjust our rate? */ 17719 const struct tcp_hwrate_limit_table *nrte; 17720 17721 if (rack->r_up_only && 17722 (rate_wanted < rack->r_ctl.crte->rate)) { 17723 /** 17724 * We have four possible states here 17725 * having to do with the previous time 17726 * and this time. 17727 * previous | this-time 17728 * A) 0 | 0 -- fill_cw not in the picture 17729 * B) 1 | 0 -- we were doing a fill-cw but now are not 17730 * C) 1 | 1 -- all rates from fill_cw 17731 * D) 0 | 1 -- we were doing non-fill and now we are filling 17732 * 17733 * For case A, C and D we don't allow a drop. But for 17734 * case B where we now our on our steady rate we do 17735 * allow a drop. 17736 * 17737 */ 17738 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 17739 goto done_w_hdwr; 17740 } 17741 if ((rate_wanted > rack->r_ctl.crte->rate) || 17742 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 17743 if (rack_hw_rate_to_low && 17744 (bw_est < rack_hw_rate_to_low)) { 17745 /* 17746 * The pacing rate is too low for hardware, but 17747 * do allow hardware pacing to be restarted. 17748 */ 17749 rack_log_hdwr_pacing(rack, 17750 bw_est, rack->r_ctl.crte->rate, __LINE__, 17751 0, 5); 17752 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17753 rack->r_ctl.crte = NULL; 17754 rack->rack_attempt_hdwr_pace = 0; 17755 rack->rack_hdrw_pacing = 0; 17756 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17757 goto done_w_hdwr; 17758 } 17759 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 17760 rack->rc_tp, 17761 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17762 rate_wanted, 17763 RS_PACING_GEQ, 17764 &err, &rack->r_ctl.crte_prev_rate); 17765 if (nrte == NULL) { 17766 /* 17767 * Lost the rate, lets drop hardware pacing 17768 * period. 17769 */ 17770 rack->rack_hdrw_pacing = 0; 17771 rack->r_ctl.crte = NULL; 17772 rack_log_hdwr_pacing(rack, 17773 rate_wanted, 0, __LINE__, 17774 err, 1); 17775 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17776 counter_u64_add(rack_hw_pace_lost, 1); 17777 } else if (nrte != rack->r_ctl.crte) { 17778 rack->r_ctl.crte = nrte; 17779 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 17780 segsiz, pace_one, rack->r_ctl.crte, 17781 NULL, rack->r_ctl.pace_len_divisor); 17782 rack_log_hdwr_pacing(rack, 17783 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17784 err, 2); 17785 rack->r_ctl.last_hw_bw_req = rate_wanted; 17786 } 17787 } else { 17788 /* We just need to adjust the segment size */ 17789 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17790 rack_log_hdwr_pacing(rack, 17791 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17792 0, 4); 17793 rack->r_ctl.last_hw_bw_req = rate_wanted; 17794 } 17795 } 17796 } 17797 if (minslot && (minslot > slot)) { 17798 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, 17799 98, __LINE__, NULL, 0); 17800 slot = minslot; 17801 } 17802 done_w_hdwr: 17803 if (rack_limit_time_with_srtt && 17804 (rack->use_fixed_rate == 0) && 17805 (rack->rack_hdrw_pacing == 0)) { 17806 /* 17807 * Sanity check, we do not allow the pacing delay 17808 * to be longer than the SRTT of the path. If it is 17809 * a slow path, then adding a packet should increase 17810 * the RTT and compensate for this i.e. the srtt will 17811 * be greater so the allowed pacing time will be greater. 17812 * 17813 * Note this restriction is not for where a peak rate 17814 * is set, we are doing fixed pacing or hardware pacing. 17815 */ 17816 if (rack->rc_tp->t_srtt) 17817 srtt = rack->rc_tp->t_srtt; 17818 else 17819 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 17820 if (srtt < (uint64_t)slot) { 17821 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 17822 slot = srtt; 17823 } 17824 } 17825 /*******************************************************************/ 17826 /* RRS: We insert paced call to stats here for len and rate_wanted */ 17827 /*******************************************************************/ 17828 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 17829 } 17830 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 17831 /* 17832 * If this rate is seeing enobufs when it 17833 * goes to send then either the nic is out 17834 * of gas or we are mis-estimating the time 17835 * somehow and not letting the queue empty 17836 * completely. Lets add to the pacing time. 17837 */ 17838 int hw_boost_delay; 17839 17840 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 17841 if (hw_boost_delay > rack_enobuf_hw_max) 17842 hw_boost_delay = rack_enobuf_hw_max; 17843 else if (hw_boost_delay < rack_enobuf_hw_min) 17844 hw_boost_delay = rack_enobuf_hw_min; 17845 slot += hw_boost_delay; 17846 } 17847 return (slot); 17848 } 17849 17850 static void 17851 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 17852 tcp_seq startseq, uint32_t sb_offset) 17853 { 17854 struct rack_sendmap *my_rsm = NULL; 17855 17856 if (tp->t_state < TCPS_ESTABLISHED) { 17857 /* 17858 * We don't start any measurements if we are 17859 * not at least established. 17860 */ 17861 return; 17862 } 17863 if (tp->t_state >= TCPS_FIN_WAIT_1) { 17864 /* 17865 * We will get no more data into the SB 17866 * this means we need to have the data available 17867 * before we start a measurement. 17868 */ 17869 17870 if (sbavail(&tptosocket(tp)->so_snd) < 17871 max(rc_init_window(rack), 17872 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 17873 /* Nope not enough data */ 17874 return; 17875 } 17876 } 17877 tp->t_flags |= TF_GPUTINPROG; 17878 rack->r_ctl.rc_gp_cumack_ts = 0; 17879 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 17880 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 17881 tp->gput_seq = startseq; 17882 rack->app_limited_needs_set = 0; 17883 if (rack->in_probe_rtt) 17884 rack->measure_saw_probe_rtt = 1; 17885 else if ((rack->measure_saw_probe_rtt) && 17886 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 17887 rack->measure_saw_probe_rtt = 0; 17888 if (rack->rc_gp_filled) 17889 tp->gput_ts = rack->r_ctl.last_cumack_advance; 17890 else { 17891 /* Special case initial measurement */ 17892 struct timeval tv; 17893 17894 tp->gput_ts = tcp_get_usecs(&tv); 17895 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 17896 } 17897 /* 17898 * We take a guess out into the future, 17899 * if we have no measurement and no 17900 * initial rate, we measure the first 17901 * initial-windows worth of data to 17902 * speed up getting some GP measurement and 17903 * thus start pacing. 17904 */ 17905 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 17906 rack->app_limited_needs_set = 1; 17907 tp->gput_ack = startseq + max(rc_init_window(rack), 17908 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 17909 rack_log_pacing_delay_calc(rack, 17910 tp->gput_seq, 17911 tp->gput_ack, 17912 0, 17913 tp->gput_ts, 17914 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 17915 9, 17916 __LINE__, NULL, 0); 17917 rack_tend_gp_marks(tp, rack); 17918 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 17919 return; 17920 } 17921 if (sb_offset) { 17922 /* 17923 * We are out somewhere in the sb 17924 * can we use the already outstanding data? 17925 */ 17926 17927 if (rack->r_ctl.rc_app_limited_cnt == 0) { 17928 /* 17929 * Yes first one is good and in this case 17930 * the tp->gput_ts is correctly set based on 17931 * the last ack that arrived (no need to 17932 * set things up when an ack comes in). 17933 */ 17934 my_rsm = tqhash_min(rack->r_ctl.tqh); 17935 if ((my_rsm == NULL) || 17936 (my_rsm->r_rtr_cnt != 1)) { 17937 /* retransmission? */ 17938 goto use_latest; 17939 } 17940 } else { 17941 if (rack->r_ctl.rc_first_appl == NULL) { 17942 /* 17943 * If rc_first_appl is NULL 17944 * then the cnt should be 0. 17945 * This is probably an error, maybe 17946 * a KASSERT would be approprate. 17947 */ 17948 goto use_latest; 17949 } 17950 /* 17951 * If we have a marker pointer to the last one that is 17952 * app limited we can use that, but we need to set 17953 * things up so that when it gets ack'ed we record 17954 * the ack time (if its not already acked). 17955 */ 17956 rack->app_limited_needs_set = 1; 17957 /* 17958 * We want to get to the rsm that is either 17959 * next with space i.e. over 1 MSS or the one 17960 * after that (after the app-limited). 17961 */ 17962 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 17963 if (my_rsm) { 17964 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 17965 /* Have to use the next one */ 17966 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17967 else { 17968 /* Use after the first MSS of it is acked */ 17969 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 17970 goto start_set; 17971 } 17972 } 17973 if ((my_rsm == NULL) || 17974 (my_rsm->r_rtr_cnt != 1)) { 17975 /* 17976 * Either its a retransmit or 17977 * the last is the app-limited one. 17978 */ 17979 goto use_latest; 17980 } 17981 } 17982 tp->gput_seq = my_rsm->r_start; 17983 start_set: 17984 if (my_rsm->r_flags & RACK_ACKED) { 17985 /* 17986 * This one has been acked use the arrival ack time 17987 */ 17988 struct rack_sendmap *nrsm; 17989 17990 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 17991 rack->app_limited_needs_set = 0; 17992 /* 17993 * Ok in this path we need to use the r_end now 17994 * since this guy is the starting ack. 17995 */ 17996 tp->gput_seq = my_rsm->r_end; 17997 /* 17998 * We also need to adjust up the sendtime 17999 * to the send of the next data after my_rsm. 18000 */ 18001 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18002 if (nrsm != NULL) 18003 my_rsm = nrsm; 18004 else { 18005 /* 18006 * The next as not been sent, thats the 18007 * case for using the latest. 18008 */ 18009 goto use_latest; 18010 } 18011 } 18012 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18013 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 18014 rack->r_ctl.rc_gp_cumack_ts = 0; 18015 if ((rack->r_ctl.cleared_app_ack == 1) && 18016 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { 18017 /* 18018 * We just cleared an application limited period 18019 * so the next seq out needs to skip the first 18020 * ack. 18021 */ 18022 rack->app_limited_needs_set = 1; 18023 rack->r_ctl.cleared_app_ack = 0; 18024 } 18025 rack_log_pacing_delay_calc(rack, 18026 tp->gput_seq, 18027 tp->gput_ack, 18028 (uintptr_t)my_rsm, 18029 tp->gput_ts, 18030 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18031 9, 18032 __LINE__, my_rsm, 0); 18033 /* Now lets make sure all are marked as they should be */ 18034 rack_tend_gp_marks(tp, rack); 18035 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18036 return; 18037 } 18038 18039 use_latest: 18040 /* 18041 * We don't know how long we may have been 18042 * idle or if this is the first-send. Lets 18043 * setup the flag so we will trim off 18044 * the first ack'd data so we get a true 18045 * measurement. 18046 */ 18047 rack->app_limited_needs_set = 1; 18048 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18049 rack->r_ctl.rc_gp_cumack_ts = 0; 18050 /* Find this guy so we can pull the send time */ 18051 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18052 if (my_rsm) { 18053 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18054 if (my_rsm->r_flags & RACK_ACKED) { 18055 /* 18056 * Unlikely since its probably what was 18057 * just transmitted (but I am paranoid). 18058 */ 18059 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18060 rack->app_limited_needs_set = 0; 18061 } 18062 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18063 /* This also is unlikely */ 18064 tp->gput_seq = my_rsm->r_start; 18065 } 18066 } else { 18067 /* 18068 * TSNH unless we have some send-map limit, 18069 * and even at that it should not be hitting 18070 * that limit (we should have stopped sending). 18071 */ 18072 struct timeval tv; 18073 18074 microuptime(&tv); 18075 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18076 } 18077 rack_tend_gp_marks(tp, rack); 18078 rack_log_pacing_delay_calc(rack, 18079 tp->gput_seq, 18080 tp->gput_ack, 18081 (uintptr_t)my_rsm, 18082 tp->gput_ts, 18083 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18084 9, __LINE__, NULL, 0); 18085 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18086 } 18087 18088 static inline uint32_t 18089 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18090 uint32_t avail, int32_t sb_offset) 18091 { 18092 uint32_t len; 18093 uint32_t sendwin; 18094 18095 if (tp->snd_wnd > cwnd_to_use) 18096 sendwin = cwnd_to_use; 18097 else 18098 sendwin = tp->snd_wnd; 18099 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18100 /* We never want to go over our peers rcv-window */ 18101 len = 0; 18102 } else { 18103 uint32_t flight; 18104 18105 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18106 if (flight >= sendwin) { 18107 /* 18108 * We have in flight what we are allowed by cwnd (if 18109 * it was rwnd blocking it would have hit above out 18110 * >= tp->snd_wnd). 18111 */ 18112 return (0); 18113 } 18114 len = sendwin - flight; 18115 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18116 /* We would send too much (beyond the rwnd) */ 18117 len = tp->snd_wnd - ctf_outstanding(tp); 18118 } 18119 if ((len + sb_offset) > avail) { 18120 /* 18121 * We don't have that much in the SB, how much is 18122 * there? 18123 */ 18124 len = avail - sb_offset; 18125 } 18126 } 18127 return (len); 18128 } 18129 18130 static void 18131 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18132 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18133 int rsm_is_null, int optlen, int line, uint16_t mode) 18134 { 18135 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18136 union tcp_log_stackspecific log; 18137 struct timeval tv; 18138 18139 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18140 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18141 log.u_bbr.flex1 = error; 18142 log.u_bbr.flex2 = flags; 18143 log.u_bbr.flex3 = rsm_is_null; 18144 log.u_bbr.flex4 = ipoptlen; 18145 log.u_bbr.flex5 = tp->rcv_numsacks; 18146 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18147 log.u_bbr.flex7 = optlen; 18148 log.u_bbr.flex8 = rack->r_fsb_inited; 18149 log.u_bbr.applimited = rack->r_fast_output; 18150 log.u_bbr.bw_inuse = rack_get_bw(rack); 18151 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18152 log.u_bbr.cwnd_gain = mode; 18153 log.u_bbr.pkts_out = orig_len; 18154 log.u_bbr.lt_epoch = len; 18155 log.u_bbr.delivered = line; 18156 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18157 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18158 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 18159 len, &log, false, NULL, __func__, __LINE__, &tv); 18160 } 18161 } 18162 18163 18164 static struct mbuf * 18165 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 18166 struct rack_fast_send_blk *fsb, 18167 int32_t seglimit, int32_t segsize, int hw_tls) 18168 { 18169 #ifdef KERN_TLS 18170 struct ktls_session *tls, *ntls; 18171 #ifdef INVARIANTS 18172 struct mbuf *start; 18173 #endif 18174 #endif 18175 struct mbuf *m, *n, **np, *smb; 18176 struct mbuf *top; 18177 int32_t off, soff; 18178 int32_t len = *plen; 18179 int32_t fragsize; 18180 int32_t len_cp = 0; 18181 uint32_t mlen, frags; 18182 18183 soff = off = the_off; 18184 smb = m = the_m; 18185 np = ⊤ 18186 top = NULL; 18187 #ifdef KERN_TLS 18188 if (hw_tls && (m->m_flags & M_EXTPG)) 18189 tls = m->m_epg_tls; 18190 else 18191 tls = NULL; 18192 #ifdef INVARIANTS 18193 start = m; 18194 #endif 18195 #endif 18196 while (len > 0) { 18197 if (m == NULL) { 18198 *plen = len_cp; 18199 break; 18200 } 18201 #ifdef KERN_TLS 18202 if (hw_tls) { 18203 if (m->m_flags & M_EXTPG) 18204 ntls = m->m_epg_tls; 18205 else 18206 ntls = NULL; 18207 18208 /* 18209 * Avoid mixing TLS records with handshake 18210 * data or TLS records from different 18211 * sessions. 18212 */ 18213 if (tls != ntls) { 18214 MPASS(m != start); 18215 *plen = len_cp; 18216 break; 18217 } 18218 } 18219 #endif 18220 mlen = min(len, m->m_len - off); 18221 if (seglimit) { 18222 /* 18223 * For M_EXTPG mbufs, add 3 segments 18224 * + 1 in case we are crossing page boundaries 18225 * + 2 in case the TLS hdr/trailer are used 18226 * It is cheaper to just add the segments 18227 * than it is to take the cache miss to look 18228 * at the mbuf ext_pgs state in detail. 18229 */ 18230 if (m->m_flags & M_EXTPG) { 18231 fragsize = min(segsize, PAGE_SIZE); 18232 frags = 3; 18233 } else { 18234 fragsize = segsize; 18235 frags = 0; 18236 } 18237 18238 /* Break if we really can't fit anymore. */ 18239 if ((frags + 1) >= seglimit) { 18240 *plen = len_cp; 18241 break; 18242 } 18243 18244 /* 18245 * Reduce size if you can't copy the whole 18246 * mbuf. If we can't copy the whole mbuf, also 18247 * adjust len so the loop will end after this 18248 * mbuf. 18249 */ 18250 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 18251 mlen = (seglimit - frags - 1) * fragsize; 18252 len = mlen; 18253 *plen = len_cp + len; 18254 } 18255 frags += howmany(mlen, fragsize); 18256 if (frags == 0) 18257 frags++; 18258 seglimit -= frags; 18259 KASSERT(seglimit > 0, 18260 ("%s: seglimit went too low", __func__)); 18261 } 18262 n = m_get(M_NOWAIT, m->m_type); 18263 *np = n; 18264 if (n == NULL) 18265 goto nospace; 18266 n->m_len = mlen; 18267 soff += mlen; 18268 len_cp += n->m_len; 18269 if (m->m_flags & (M_EXT | M_EXTPG)) { 18270 n->m_data = m->m_data + off; 18271 mb_dupcl(n, m); 18272 } else { 18273 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 18274 (u_int)n->m_len); 18275 } 18276 len -= n->m_len; 18277 off = 0; 18278 m = m->m_next; 18279 np = &n->m_next; 18280 if (len || (soff == smb->m_len)) { 18281 /* 18282 * We have more so we move forward or 18283 * we have consumed the entire mbuf and 18284 * len has fell to 0. 18285 */ 18286 soff = 0; 18287 smb = m; 18288 } 18289 18290 } 18291 if (fsb != NULL) { 18292 fsb->m = smb; 18293 fsb->off = soff; 18294 if (smb) { 18295 /* 18296 * Save off the size of the mbuf. We do 18297 * this so that we can recognize when it 18298 * has been trimmed by sbcut() as acks 18299 * come in. 18300 */ 18301 fsb->o_m_len = smb->m_len; 18302 fsb->o_t_len = M_TRAILINGROOM(smb); 18303 } else { 18304 /* 18305 * This is the case where the next mbuf went to NULL. This 18306 * means with this copy we have sent everything in the sb. 18307 * In theory we could clear the fast_output flag, but lets 18308 * not since its possible that we could get more added 18309 * and acks that call the extend function which would let 18310 * us send more. 18311 */ 18312 fsb->o_m_len = 0; 18313 fsb->o_t_len = 0; 18314 } 18315 } 18316 return (top); 18317 nospace: 18318 if (top) 18319 m_freem(top); 18320 return (NULL); 18321 18322 } 18323 18324 /* 18325 * This is a copy of m_copym(), taking the TSO segment size/limit 18326 * constraints into account, and advancing the sndptr as it goes. 18327 */ 18328 static struct mbuf * 18329 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 18330 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 18331 { 18332 struct mbuf *m, *n; 18333 int32_t soff; 18334 18335 m = rack->r_ctl.fsb.m; 18336 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 18337 /* 18338 * The trailing space changed, mbufs can grow 18339 * at the tail but they can't shrink from 18340 * it, KASSERT that. Adjust the orig_m_len to 18341 * compensate for this change. 18342 */ 18343 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 18344 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 18345 m, 18346 rack, 18347 (intmax_t)M_TRAILINGROOM(m), 18348 rack->r_ctl.fsb.o_t_len, 18349 rack->r_ctl.fsb.o_m_len, 18350 m->m_len)); 18351 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 18352 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 18353 } 18354 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 18355 /* 18356 * Mbuf shrank, trimmed off the top by an ack, our 18357 * offset changes. 18358 */ 18359 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 18360 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 18361 m, m->m_len, 18362 rack, rack->r_ctl.fsb.o_m_len, 18363 rack->r_ctl.fsb.off)); 18364 18365 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 18366 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 18367 else 18368 rack->r_ctl.fsb.off = 0; 18369 rack->r_ctl.fsb.o_m_len = m->m_len; 18370 #ifdef INVARIANTS 18371 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 18372 panic("rack:%p m:%p m_len grew outside of t_space compensation", 18373 rack, m); 18374 #endif 18375 } 18376 soff = rack->r_ctl.fsb.off; 18377 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 18378 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 18379 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 18380 __FUNCTION__, 18381 rack, *plen, m, m->m_len)); 18382 /* Save off the right location before we copy and advance */ 18383 *s_soff = soff; 18384 *s_mb = rack->r_ctl.fsb.m; 18385 n = rack_fo_base_copym(m, soff, plen, 18386 &rack->r_ctl.fsb, 18387 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 18388 return (n); 18389 } 18390 18391 /* Log the buffer level */ 18392 static void 18393 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 18394 int len, struct timeval *tv, 18395 uint32_t cts) 18396 { 18397 uint32_t p_rate = 0, p_queue = 0, err = 0; 18398 union tcp_log_stackspecific log; 18399 18400 #ifdef RATELIMIT 18401 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18402 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18403 #endif 18404 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18405 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18406 log.u_bbr.flex1 = p_rate; 18407 log.u_bbr.flex2 = p_queue; 18408 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18409 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18410 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18411 log.u_bbr.flex7 = 99; 18412 log.u_bbr.flex8 = 0; 18413 log.u_bbr.pkts_out = err; 18414 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18415 log.u_bbr.timeStamp = cts; 18416 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18417 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18418 len, &log, false, NULL, __func__, __LINE__, tv); 18419 18420 } 18421 18422 static uint32_t 18423 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 18424 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 18425 { 18426 uint64_t lentime = 0; 18427 #ifdef RATELIMIT 18428 uint32_t p_rate = 0, p_queue = 0, err; 18429 union tcp_log_stackspecific log; 18430 uint64_t bw; 18431 18432 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18433 /* Failed or queue is zero */ 18434 if (err || (p_queue == 0)) { 18435 lentime = 0; 18436 goto out; 18437 } 18438 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18439 if (err) { 18440 lentime = 0; 18441 goto out; 18442 } 18443 /* 18444 * If we reach here we have some bytes in 18445 * the queue. The number returned is a value 18446 * between 0 and 0xffff where ffff is full 18447 * and 0 is empty. So how best to make this into 18448 * something usable? 18449 * 18450 * The "safer" way is lets take the b/w gotten 18451 * from the query (which should be our b/w rate) 18452 * and pretend that a full send (our rc_pace_max_segs) 18453 * is outstanding. We factor it so its as if a full 18454 * number of our MSS segment is terms of full 18455 * ethernet segments are outstanding. 18456 */ 18457 bw = p_rate / 8; 18458 if (bw) { 18459 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 18460 lentime *= ETHERNET_SEGMENT_SIZE; 18461 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 18462 lentime /= bw; 18463 } else { 18464 /* TSNH -- KASSERT? */ 18465 lentime = 0; 18466 } 18467 out: 18468 if (tcp_bblogging_on(tp)) { 18469 memset(&log, 0, sizeof(log)); 18470 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18471 log.u_bbr.flex1 = p_rate; 18472 log.u_bbr.flex2 = p_queue; 18473 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18474 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18475 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18476 log.u_bbr.flex7 = 99; 18477 log.u_bbr.flex8 = 0; 18478 log.u_bbr.pkts_out = err; 18479 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18480 log.u_bbr.cur_del_rate = lentime; 18481 log.u_bbr.timeStamp = cts; 18482 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18483 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18484 len, &log, false, NULL, __func__, __LINE__,tv); 18485 } 18486 #endif 18487 return ((uint32_t)lentime); 18488 } 18489 18490 static int 18491 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 18492 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 18493 { 18494 /* 18495 * Enter the fast retransmit path. We are given that a sched_pin is 18496 * in place (if accounting is compliled in) and the cycle count taken 18497 * at the entry is in the ts_val. The concept her is that the rsm 18498 * now holds the mbuf offsets and such so we can directly transmit 18499 * without a lot of overhead, the len field is already set for 18500 * us to prohibit us from sending too much (usually its 1MSS). 18501 */ 18502 struct ip *ip = NULL; 18503 struct udphdr *udp = NULL; 18504 struct tcphdr *th = NULL; 18505 struct mbuf *m = NULL; 18506 struct inpcb *inp; 18507 uint8_t *cpto; 18508 struct tcp_log_buffer *lgb; 18509 #ifdef TCP_ACCOUNTING 18510 uint64_t crtsc; 18511 int cnt_thru = 1; 18512 #endif 18513 struct tcpopt to; 18514 u_char opt[TCP_MAXOLEN]; 18515 uint32_t hdrlen, optlen; 18516 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 18517 uint16_t flags; 18518 uint32_t if_hw_tsomaxsegcount = 0, startseq; 18519 uint32_t if_hw_tsomaxsegsize; 18520 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 18521 18522 #ifdef INET6 18523 struct ip6_hdr *ip6 = NULL; 18524 18525 if (rack->r_is_v6) { 18526 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18527 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18528 } else 18529 #endif /* INET6 */ 18530 { 18531 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18532 hdrlen = sizeof(struct tcpiphdr); 18533 } 18534 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 18535 goto failed; 18536 } 18537 if (doing_tlp) { 18538 /* Its a TLP add the flag, it may already be there but be sure */ 18539 rsm->r_flags |= RACK_TLP; 18540 } else { 18541 /* If it was a TLP it is not not on this retransmit */ 18542 rsm->r_flags &= ~RACK_TLP; 18543 } 18544 startseq = rsm->r_start; 18545 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 18546 inp = rack->rc_inp; 18547 to.to_flags = 0; 18548 flags = tcp_outflags[tp->t_state]; 18549 if (flags & (TH_SYN|TH_RST)) { 18550 goto failed; 18551 } 18552 if (rsm->r_flags & RACK_HAS_FIN) { 18553 /* We can't send a FIN here */ 18554 goto failed; 18555 } 18556 if (flags & TH_FIN) { 18557 /* We never send a FIN */ 18558 flags &= ~TH_FIN; 18559 } 18560 if (tp->t_flags & TF_RCVD_TSTMP) { 18561 to.to_tsval = ms_cts + tp->ts_offset; 18562 to.to_tsecr = tp->ts_recent; 18563 to.to_flags = TOF_TS; 18564 } 18565 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18566 /* TCP-MD5 (RFC2385). */ 18567 if (tp->t_flags & TF_SIGNATURE) 18568 to.to_flags |= TOF_SIGNATURE; 18569 #endif 18570 optlen = tcp_addoptions(&to, opt); 18571 hdrlen += optlen; 18572 udp = rack->r_ctl.fsb.udp; 18573 if (udp) 18574 hdrlen += sizeof(struct udphdr); 18575 if (rack->r_ctl.rc_pace_max_segs) 18576 max_val = rack->r_ctl.rc_pace_max_segs; 18577 else if (rack->rc_user_set_max_segs) 18578 max_val = rack->rc_user_set_max_segs * segsiz; 18579 else 18580 max_val = len; 18581 if ((tp->t_flags & TF_TSO) && 18582 V_tcp_do_tso && 18583 (len > segsiz) && 18584 (tp->t_port == 0)) 18585 tso = 1; 18586 #ifdef INET6 18587 if (MHLEN < hdrlen + max_linkhdr) 18588 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18589 else 18590 #endif 18591 m = m_gethdr(M_NOWAIT, MT_DATA); 18592 if (m == NULL) 18593 goto failed; 18594 m->m_data += max_linkhdr; 18595 m->m_len = hdrlen; 18596 th = rack->r_ctl.fsb.th; 18597 /* Establish the len to send */ 18598 if (len > max_val) 18599 len = max_val; 18600 if ((tso) && (len + optlen > segsiz)) { 18601 uint32_t if_hw_tsomax; 18602 int32_t max_len; 18603 18604 /* extract TSO information */ 18605 if_hw_tsomax = tp->t_tsomax; 18606 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18607 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18608 /* 18609 * Check if we should limit by maximum payload 18610 * length: 18611 */ 18612 if (if_hw_tsomax != 0) { 18613 /* compute maximum TSO length */ 18614 max_len = (if_hw_tsomax - hdrlen - 18615 max_linkhdr); 18616 if (max_len <= 0) { 18617 goto failed; 18618 } else if (len > max_len) { 18619 len = max_len; 18620 } 18621 } 18622 if (len <= segsiz) { 18623 /* 18624 * In case there are too many small fragments don't 18625 * use TSO: 18626 */ 18627 tso = 0; 18628 } 18629 } else { 18630 tso = 0; 18631 } 18632 if ((tso == 0) && (len > segsiz)) 18633 len = segsiz; 18634 (void)tcp_get_usecs(tv); 18635 if ((len == 0) || 18636 (len <= MHLEN - hdrlen - max_linkhdr)) { 18637 goto failed; 18638 } 18639 th->th_seq = htonl(rsm->r_start); 18640 th->th_ack = htonl(tp->rcv_nxt); 18641 /* 18642 * The PUSH bit should only be applied 18643 * if the full retransmission is made. If 18644 * we are sending less than this is the 18645 * left hand edge and should not have 18646 * the PUSH bit. 18647 */ 18648 if ((rsm->r_flags & RACK_HAD_PUSH) && 18649 (len == (rsm->r_end - rsm->r_start))) 18650 flags |= TH_PUSH; 18651 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 18652 if (th->th_win == 0) { 18653 tp->t_sndzerowin++; 18654 tp->t_flags |= TF_RXWIN0SENT; 18655 } else 18656 tp->t_flags &= ~TF_RXWIN0SENT; 18657 if (rsm->r_flags & RACK_TLP) { 18658 /* 18659 * TLP should not count in retran count, but 18660 * in its own bin 18661 */ 18662 counter_u64_add(rack_tlp_retran, 1); 18663 counter_u64_add(rack_tlp_retran_bytes, len); 18664 } else { 18665 tp->t_sndrexmitpack++; 18666 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18667 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18668 } 18669 #ifdef STATS 18670 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18671 len); 18672 #endif 18673 if (rsm->m == NULL) 18674 goto failed; 18675 if (rsm->m && 18676 ((rsm->orig_m_len != rsm->m->m_len) || 18677 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 18678 /* Fix up the orig_m_len and possibly the mbuf offset */ 18679 rack_adjust_orig_mlen(rsm); 18680 } 18681 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 18682 if (len <= segsiz) { 18683 /* 18684 * Must have ran out of mbufs for the copy 18685 * shorten it to no longer need tso. Lets 18686 * not put on sendalot since we are low on 18687 * mbufs. 18688 */ 18689 tso = 0; 18690 } 18691 if ((m->m_next == NULL) || (len <= 0)){ 18692 goto failed; 18693 } 18694 if (udp) { 18695 if (rack->r_is_v6) 18696 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18697 else 18698 ulen = hdrlen + len - sizeof(struct ip); 18699 udp->uh_ulen = htons(ulen); 18700 } 18701 m->m_pkthdr.rcvif = (struct ifnet *)0; 18702 if (TCPS_HAVERCVDSYN(tp->t_state) && 18703 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18704 int ect = tcp_ecn_output_established(tp, &flags, len, true); 18705 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18706 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18707 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18708 #ifdef INET6 18709 if (rack->r_is_v6) { 18710 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18711 ip6->ip6_flow |= htonl(ect << 20); 18712 } 18713 else 18714 #endif 18715 { 18716 ip->ip_tos &= ~IPTOS_ECN_MASK; 18717 ip->ip_tos |= ect; 18718 } 18719 } 18720 if (rack->r_ctl.crte != NULL) { 18721 /* See if we can send via the hw queue */ 18722 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 18723 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 18724 if (slot == 0) 18725 ip_sendflag = 0; 18726 } 18727 tcp_set_flags(th, flags); 18728 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18729 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18730 if (to.to_flags & TOF_SIGNATURE) { 18731 /* 18732 * Calculate MD5 signature and put it into the place 18733 * determined before. 18734 * NOTE: since TCP options buffer doesn't point into 18735 * mbuf's data, calculate offset and use it. 18736 */ 18737 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18738 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18739 /* 18740 * Do not send segment if the calculation of MD5 18741 * digest has failed. 18742 */ 18743 goto failed; 18744 } 18745 } 18746 #endif 18747 #ifdef INET6 18748 if (rack->r_is_v6) { 18749 if (tp->t_port) { 18750 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18751 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18752 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18753 th->th_sum = htons(0); 18754 UDPSTAT_INC(udps_opackets); 18755 } else { 18756 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18757 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18758 th->th_sum = in6_cksum_pseudo(ip6, 18759 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18760 0); 18761 } 18762 } 18763 #endif 18764 #if defined(INET6) && defined(INET) 18765 else 18766 #endif 18767 #ifdef INET 18768 { 18769 if (tp->t_port) { 18770 m->m_pkthdr.csum_flags = CSUM_UDP; 18771 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18772 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18773 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18774 th->th_sum = htons(0); 18775 UDPSTAT_INC(udps_opackets); 18776 } else { 18777 m->m_pkthdr.csum_flags = CSUM_TCP; 18778 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18779 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18780 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18781 IPPROTO_TCP + len + optlen)); 18782 } 18783 /* IP version must be set here for ipv4/ipv6 checking later */ 18784 KASSERT(ip->ip_v == IPVERSION, 18785 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18786 } 18787 #endif 18788 if (tso) { 18789 /* 18790 * Here we use segsiz since we have no added options besides 18791 * any standard timestamp options (no DSACKs or SACKS are sent 18792 * via either fast-path). 18793 */ 18794 KASSERT(len > segsiz, 18795 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 18796 m->m_pkthdr.csum_flags |= CSUM_TSO; 18797 m->m_pkthdr.tso_segsz = segsiz; 18798 } 18799 #ifdef INET6 18800 if (rack->r_is_v6) { 18801 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 18802 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18803 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18804 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18805 else 18806 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18807 } 18808 #endif 18809 #if defined(INET) && defined(INET6) 18810 else 18811 #endif 18812 #ifdef INET 18813 { 18814 ip->ip_len = htons(m->m_pkthdr.len); 18815 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 18816 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18817 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18818 if (tp->t_port == 0 || len < V_tcp_minmss) { 18819 ip->ip_off |= htons(IP_DF); 18820 } 18821 } else { 18822 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18823 } 18824 } 18825 #endif 18826 if (doing_tlp == 0) { 18827 /* Set we retransmitted */ 18828 rack->rc_gp_saw_rec = 1; 18829 } else { 18830 /* Its a TLP set ca or ss */ 18831 if (tp->snd_cwnd > tp->snd_ssthresh) { 18832 /* Set we sent in CA */ 18833 rack->rc_gp_saw_ca = 1; 18834 } else { 18835 /* Set we sent in SS */ 18836 rack->rc_gp_saw_ss = 1; 18837 } 18838 } 18839 /* Time to copy in our header */ 18840 cpto = mtod(m, uint8_t *); 18841 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18842 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18843 if (optlen) { 18844 bcopy(opt, th + 1, optlen); 18845 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18846 } else { 18847 th->th_off = sizeof(struct tcphdr) >> 2; 18848 } 18849 if (tcp_bblogging_on(rack->rc_tp)) { 18850 union tcp_log_stackspecific log; 18851 18852 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18853 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18854 counter_u64_add(rack_collapsed_win_rxt, 1); 18855 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18856 } 18857 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18858 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18859 if (rack->rack_no_prr) 18860 log.u_bbr.flex1 = 0; 18861 else 18862 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18863 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18864 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18865 log.u_bbr.flex4 = max_val; 18866 /* Save off the early/late values */ 18867 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18868 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18869 log.u_bbr.bw_inuse = rack_get_bw(rack); 18870 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 18871 if (doing_tlp == 0) 18872 log.u_bbr.flex8 = 1; 18873 else 18874 log.u_bbr.flex8 = 2; 18875 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18876 log.u_bbr.flex7 = 55; 18877 log.u_bbr.pkts_out = tp->t_maxseg; 18878 log.u_bbr.timeStamp = cts; 18879 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18880 if (rsm && (rsm->r_rtr_cnt > 0)) { 18881 /* 18882 * When we have a retransmit we want to log the 18883 * burst at send and flight at send from before. 18884 */ 18885 log.u_bbr.flex5 = rsm->r_fas; 18886 log.u_bbr.bbr_substate = rsm->r_bas; 18887 } else { 18888 /* 18889 * This is currently unlikely until we do the 18890 * packet pair probes but I will add it for completeness. 18891 */ 18892 log.u_bbr.flex5 = log.u_bbr.inflight; 18893 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 18894 } 18895 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 18896 log.u_bbr.delivered = 0; 18897 log.u_bbr.rttProp = (uintptr_t)rsm; 18898 log.u_bbr.delRate = rsm->r_flags; 18899 log.u_bbr.delRate <<= 31; 18900 log.u_bbr.delRate |= rack->r_must_retran; 18901 log.u_bbr.delRate <<= 1; 18902 log.u_bbr.delRate |= 1; 18903 log.u_bbr.pkt_epoch = __LINE__; 18904 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 18905 len, &log, false, NULL, __func__, __LINE__, tv); 18906 } else 18907 lgb = NULL; 18908 if ((rack->r_ctl.crte != NULL) && 18909 tcp_bblogging_on(tp)) { 18910 rack_log_queue_level(tp, rack, len, tv, cts); 18911 } 18912 #ifdef INET6 18913 if (rack->r_is_v6) { 18914 error = ip6_output(m, inp->in6p_outputopts, 18915 &inp->inp_route6, 18916 ip_sendflag, NULL, NULL, inp); 18917 } 18918 else 18919 #endif 18920 #ifdef INET 18921 { 18922 error = ip_output(m, NULL, 18923 &inp->inp_route, 18924 ip_sendflag, 0, inp); 18925 } 18926 #endif 18927 m = NULL; 18928 if (lgb) { 18929 lgb->tlb_errno = error; 18930 lgb = NULL; 18931 } 18932 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 18933 tp->snd_nxt = tp->snd_max; 18934 if (error) { 18935 goto failed; 18936 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 18937 rack->rc_hw_nobuf = 0; 18938 rack->r_ctl.rc_agg_delayed = 0; 18939 rack->r_early = 0; 18940 rack->r_late = 0; 18941 rack->r_ctl.rc_agg_early = 0; 18942 } 18943 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 18944 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 18945 if (doing_tlp) { 18946 rack->rc_tlp_in_progress = 1; 18947 rack->r_ctl.rc_tlp_cnt_out++; 18948 } 18949 if (error == 0) { 18950 counter_u64_add(rack_total_bytes, len); 18951 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 18952 if (doing_tlp) { 18953 rack->rc_last_sent_tlp_past_cumack = 0; 18954 rack->rc_last_sent_tlp_seq_valid = 1; 18955 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18956 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18957 } 18958 if (rack->r_ctl.rc_prr_sndcnt >= len) 18959 rack->r_ctl.rc_prr_sndcnt -= len; 18960 else 18961 rack->r_ctl.rc_prr_sndcnt = 0; 18962 } 18963 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18964 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18965 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18966 rack->r_ctl.retran_during_recovery += len; 18967 { 18968 int idx; 18969 18970 idx = (len / segsiz) + 3; 18971 if (idx >= TCP_MSS_ACCT_ATIMER) 18972 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18973 else 18974 counter_u64_add(rack_out_size[idx], 1); 18975 } 18976 if (tp->t_rtttime == 0) { 18977 tp->t_rtttime = ticks; 18978 tp->t_rtseq = startseq; 18979 KMOD_TCPSTAT_INC(tcps_segstimed); 18980 } 18981 counter_u64_add(rack_fto_rsm_send, 1); 18982 if (error && (error == ENOBUFS)) { 18983 if (rack->r_ctl.crte != NULL) { 18984 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 18985 if (tcp_bblogging_on(rack->rc_tp)) 18986 rack_log_queue_level(tp, rack, len, tv, cts); 18987 } else 18988 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 18989 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18990 if (rack->rc_enobuf < 0x7f) 18991 rack->rc_enobuf++; 18992 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18993 slot = 10 * HPTS_USEC_IN_MSEC; 18994 if (rack->r_ctl.crte != NULL) { 18995 counter_u64_add(rack_saw_enobuf_hw, 1); 18996 tcp_rl_log_enobuf(rack->r_ctl.crte); 18997 } 18998 counter_u64_add(rack_saw_enobuf, 1); 18999 } else { 19000 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 19001 } 19002 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 19003 #ifdef TCP_ACCOUNTING 19004 crtsc = get_cyclecount(); 19005 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19006 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19007 } 19008 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19009 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19010 } 19011 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19012 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 19013 } 19014 sched_unpin(); 19015 #endif 19016 return (0); 19017 failed: 19018 if (m) 19019 m_free(m); 19020 return (-1); 19021 } 19022 19023 static void 19024 rack_sndbuf_autoscale(struct tcp_rack *rack) 19025 { 19026 /* 19027 * Automatic sizing of send socket buffer. Often the send buffer 19028 * size is not optimally adjusted to the actual network conditions 19029 * at hand (delay bandwidth product). Setting the buffer size too 19030 * small limits throughput on links with high bandwidth and high 19031 * delay (eg. trans-continental/oceanic links). Setting the 19032 * buffer size too big consumes too much real kernel memory, 19033 * especially with many connections on busy servers. 19034 * 19035 * The criteria to step up the send buffer one notch are: 19036 * 1. receive window of remote host is larger than send buffer 19037 * (with a fudge factor of 5/4th); 19038 * 2. send buffer is filled to 7/8th with data (so we actually 19039 * have data to make use of it); 19040 * 3. send buffer fill has not hit maximal automatic size; 19041 * 4. our send window (slow start and cogestion controlled) is 19042 * larger than sent but unacknowledged data in send buffer. 19043 * 19044 * Note that the rack version moves things much faster since 19045 * we want to avoid hitting cache lines in the rack_fast_output() 19046 * path so this is called much less often and thus moves 19047 * the SB forward by a percentage. 19048 */ 19049 struct socket *so; 19050 struct tcpcb *tp; 19051 uint32_t sendwin, scaleup; 19052 19053 tp = rack->rc_tp; 19054 so = rack->rc_inp->inp_socket; 19055 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19056 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19057 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19058 sbused(&so->so_snd) >= 19059 (so->so_snd.sb_hiwat / 8 * 7) && 19060 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19061 sendwin >= (sbused(&so->so_snd) - 19062 (tp->snd_max - tp->snd_una))) { 19063 if (rack_autosndbuf_inc) 19064 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19065 else 19066 scaleup = V_tcp_autosndbuf_inc; 19067 if (scaleup < V_tcp_autosndbuf_inc) 19068 scaleup = V_tcp_autosndbuf_inc; 19069 scaleup += so->so_snd.sb_hiwat; 19070 if (scaleup > V_tcp_autosndbuf_max) 19071 scaleup = V_tcp_autosndbuf_max; 19072 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19073 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19074 } 19075 } 19076 } 19077 19078 static int 19079 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19080 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 19081 { 19082 /* 19083 * Enter to do fast output. We are given that the sched_pin is 19084 * in place (if accounting is compiled in) and the cycle count taken 19085 * at entry is in place in ts_val. The idea here is that 19086 * we know how many more bytes needs to be sent (presumably either 19087 * during pacing or to fill the cwnd and that was greater than 19088 * the max-burst). We have how much to send and all the info we 19089 * need to just send. 19090 */ 19091 #ifdef INET 19092 struct ip *ip = NULL; 19093 #endif 19094 struct udphdr *udp = NULL; 19095 struct tcphdr *th = NULL; 19096 struct mbuf *m, *s_mb; 19097 struct inpcb *inp; 19098 uint8_t *cpto; 19099 struct tcp_log_buffer *lgb; 19100 #ifdef TCP_ACCOUNTING 19101 uint64_t crtsc; 19102 #endif 19103 struct tcpopt to; 19104 u_char opt[TCP_MAXOLEN]; 19105 uint32_t hdrlen, optlen; 19106 #ifdef TCP_ACCOUNTING 19107 int cnt_thru = 1; 19108 #endif 19109 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19110 uint16_t flags; 19111 uint32_t s_soff; 19112 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19113 uint32_t if_hw_tsomaxsegsize; 19114 uint32_t add_flag = RACK_SENT_FP; 19115 #ifdef INET6 19116 struct ip6_hdr *ip6 = NULL; 19117 19118 if (rack->r_is_v6) { 19119 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19120 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19121 } else 19122 #endif /* INET6 */ 19123 { 19124 #ifdef INET 19125 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19126 hdrlen = sizeof(struct tcpiphdr); 19127 #endif 19128 } 19129 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19130 m = NULL; 19131 goto failed; 19132 } 19133 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19134 startseq = tp->snd_max; 19135 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19136 inp = rack->rc_inp; 19137 len = rack->r_ctl.fsb.left_to_send; 19138 to.to_flags = 0; 19139 flags = rack->r_ctl.fsb.tcp_flags; 19140 if (tp->t_flags & TF_RCVD_TSTMP) { 19141 to.to_tsval = ms_cts + tp->ts_offset; 19142 to.to_tsecr = tp->ts_recent; 19143 to.to_flags = TOF_TS; 19144 } 19145 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19146 /* TCP-MD5 (RFC2385). */ 19147 if (tp->t_flags & TF_SIGNATURE) 19148 to.to_flags |= TOF_SIGNATURE; 19149 #endif 19150 optlen = tcp_addoptions(&to, opt); 19151 hdrlen += optlen; 19152 udp = rack->r_ctl.fsb.udp; 19153 if (udp) 19154 hdrlen += sizeof(struct udphdr); 19155 if (rack->r_ctl.rc_pace_max_segs) 19156 max_val = rack->r_ctl.rc_pace_max_segs; 19157 else if (rack->rc_user_set_max_segs) 19158 max_val = rack->rc_user_set_max_segs * segsiz; 19159 else 19160 max_val = len; 19161 if ((tp->t_flags & TF_TSO) && 19162 V_tcp_do_tso && 19163 (len > segsiz) && 19164 (tp->t_port == 0)) 19165 tso = 1; 19166 again: 19167 #ifdef INET6 19168 if (MHLEN < hdrlen + max_linkhdr) 19169 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19170 else 19171 #endif 19172 m = m_gethdr(M_NOWAIT, MT_DATA); 19173 if (m == NULL) 19174 goto failed; 19175 m->m_data += max_linkhdr; 19176 m->m_len = hdrlen; 19177 th = rack->r_ctl.fsb.th; 19178 /* Establish the len to send */ 19179 if (len > max_val) 19180 len = max_val; 19181 if ((tso) && (len + optlen > segsiz)) { 19182 uint32_t if_hw_tsomax; 19183 int32_t max_len; 19184 19185 /* extract TSO information */ 19186 if_hw_tsomax = tp->t_tsomax; 19187 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19188 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19189 /* 19190 * Check if we should limit by maximum payload 19191 * length: 19192 */ 19193 if (if_hw_tsomax != 0) { 19194 /* compute maximum TSO length */ 19195 max_len = (if_hw_tsomax - hdrlen - 19196 max_linkhdr); 19197 if (max_len <= 0) { 19198 goto failed; 19199 } else if (len > max_len) { 19200 len = max_len; 19201 } 19202 } 19203 if (len <= segsiz) { 19204 /* 19205 * In case there are too many small fragments don't 19206 * use TSO: 19207 */ 19208 tso = 0; 19209 } 19210 } else { 19211 tso = 0; 19212 } 19213 if ((tso == 0) && (len > segsiz)) 19214 len = segsiz; 19215 (void)tcp_get_usecs(tv); 19216 if ((len == 0) || 19217 (len <= MHLEN - hdrlen - max_linkhdr)) { 19218 goto failed; 19219 } 19220 sb_offset = tp->snd_max - tp->snd_una; 19221 th->th_seq = htonl(tp->snd_max); 19222 th->th_ack = htonl(tp->rcv_nxt); 19223 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19224 if (th->th_win == 0) { 19225 tp->t_sndzerowin++; 19226 tp->t_flags |= TF_RXWIN0SENT; 19227 } else 19228 tp->t_flags &= ~TF_RXWIN0SENT; 19229 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 19230 KMOD_TCPSTAT_INC(tcps_sndpack); 19231 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 19232 #ifdef STATS 19233 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 19234 len); 19235 #endif 19236 if (rack->r_ctl.fsb.m == NULL) 19237 goto failed; 19238 19239 /* s_mb and s_soff are saved for rack_log_output */ 19240 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 19241 &s_mb, &s_soff); 19242 if (len <= segsiz) { 19243 /* 19244 * Must have ran out of mbufs for the copy 19245 * shorten it to no longer need tso. Lets 19246 * not put on sendalot since we are low on 19247 * mbufs. 19248 */ 19249 tso = 0; 19250 } 19251 if (rack->r_ctl.fsb.rfo_apply_push && 19252 (len == rack->r_ctl.fsb.left_to_send)) { 19253 tcp_set_flags(th, flags | TH_PUSH); 19254 add_flag |= RACK_HAD_PUSH; 19255 } 19256 if ((m->m_next == NULL) || (len <= 0)){ 19257 goto failed; 19258 } 19259 if (udp) { 19260 if (rack->r_is_v6) 19261 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19262 else 19263 ulen = hdrlen + len - sizeof(struct ip); 19264 udp->uh_ulen = htons(ulen); 19265 } 19266 m->m_pkthdr.rcvif = (struct ifnet *)0; 19267 if (TCPS_HAVERCVDSYN(tp->t_state) && 19268 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19269 int ect = tcp_ecn_output_established(tp, &flags, len, false); 19270 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19271 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19272 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19273 #ifdef INET6 19274 if (rack->r_is_v6) { 19275 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19276 ip6->ip6_flow |= htonl(ect << 20); 19277 } 19278 else 19279 #endif 19280 { 19281 #ifdef INET 19282 ip->ip_tos &= ~IPTOS_ECN_MASK; 19283 ip->ip_tos |= ect; 19284 #endif 19285 } 19286 } 19287 tcp_set_flags(th, flags); 19288 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19289 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19290 if (to.to_flags & TOF_SIGNATURE) { 19291 /* 19292 * Calculate MD5 signature and put it into the place 19293 * determined before. 19294 * NOTE: since TCP options buffer doesn't point into 19295 * mbuf's data, calculate offset and use it. 19296 */ 19297 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19298 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19299 /* 19300 * Do not send segment if the calculation of MD5 19301 * digest has failed. 19302 */ 19303 goto failed; 19304 } 19305 } 19306 #endif 19307 #ifdef INET6 19308 if (rack->r_is_v6) { 19309 if (tp->t_port) { 19310 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19311 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19312 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19313 th->th_sum = htons(0); 19314 UDPSTAT_INC(udps_opackets); 19315 } else { 19316 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19317 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19318 th->th_sum = in6_cksum_pseudo(ip6, 19319 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19320 0); 19321 } 19322 } 19323 #endif 19324 #if defined(INET6) && defined(INET) 19325 else 19326 #endif 19327 #ifdef INET 19328 { 19329 if (tp->t_port) { 19330 m->m_pkthdr.csum_flags = CSUM_UDP; 19331 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19332 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19333 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19334 th->th_sum = htons(0); 19335 UDPSTAT_INC(udps_opackets); 19336 } else { 19337 m->m_pkthdr.csum_flags = CSUM_TCP; 19338 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19339 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19340 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19341 IPPROTO_TCP + len + optlen)); 19342 } 19343 /* IP version must be set here for ipv4/ipv6 checking later */ 19344 KASSERT(ip->ip_v == IPVERSION, 19345 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19346 } 19347 #endif 19348 if (tso) { 19349 /* 19350 * Here we use segsiz since we have no added options besides 19351 * any standard timestamp options (no DSACKs or SACKS are sent 19352 * via either fast-path). 19353 */ 19354 KASSERT(len > segsiz, 19355 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19356 m->m_pkthdr.csum_flags |= CSUM_TSO; 19357 m->m_pkthdr.tso_segsz = segsiz; 19358 } 19359 #ifdef INET6 19360 if (rack->r_is_v6) { 19361 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19362 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19363 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19364 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19365 else 19366 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19367 } 19368 #endif 19369 #if defined(INET) && defined(INET6) 19370 else 19371 #endif 19372 #ifdef INET 19373 { 19374 ip->ip_len = htons(m->m_pkthdr.len); 19375 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19376 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19377 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19378 if (tp->t_port == 0 || len < V_tcp_minmss) { 19379 ip->ip_off |= htons(IP_DF); 19380 } 19381 } else { 19382 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19383 } 19384 } 19385 #endif 19386 if (tp->snd_cwnd > tp->snd_ssthresh) { 19387 /* Set we sent in CA */ 19388 rack->rc_gp_saw_ca = 1; 19389 } else { 19390 /* Set we sent in SS */ 19391 rack->rc_gp_saw_ss = 1; 19392 } 19393 /* Time to copy in our header */ 19394 cpto = mtod(m, uint8_t *); 19395 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19396 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19397 if (optlen) { 19398 bcopy(opt, th + 1, optlen); 19399 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19400 } else { 19401 th->th_off = sizeof(struct tcphdr) >> 2; 19402 } 19403 if ((rack->r_ctl.crte != NULL) && 19404 tcp_bblogging_on(tp)) { 19405 rack_log_queue_level(tp, rack, len, tv, cts); 19406 } 19407 if (tcp_bblogging_on(rack->rc_tp)) { 19408 union tcp_log_stackspecific log; 19409 19410 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19411 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19412 if (rack->rack_no_prr) 19413 log.u_bbr.flex1 = 0; 19414 else 19415 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19416 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19417 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19418 log.u_bbr.flex4 = max_val; 19419 /* Save off the early/late values */ 19420 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19421 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19422 log.u_bbr.bw_inuse = rack_get_bw(rack); 19423 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19424 log.u_bbr.flex8 = 0; 19425 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19426 log.u_bbr.flex7 = 44; 19427 log.u_bbr.pkts_out = tp->t_maxseg; 19428 log.u_bbr.timeStamp = cts; 19429 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19430 log.u_bbr.flex5 = log.u_bbr.inflight; 19431 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19432 log.u_bbr.delivered = 0; 19433 log.u_bbr.rttProp = 0; 19434 log.u_bbr.delRate = rack->r_must_retran; 19435 log.u_bbr.delRate <<= 1; 19436 log.u_bbr.pkt_epoch = __LINE__; 19437 /* For fast output no retrans so just inflight and how many mss we send */ 19438 log.u_bbr.flex5 = log.u_bbr.inflight; 19439 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19440 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19441 len, &log, false, NULL, __func__, __LINE__, tv); 19442 } else 19443 lgb = NULL; 19444 #ifdef INET6 19445 if (rack->r_is_v6) { 19446 error = ip6_output(m, inp->in6p_outputopts, 19447 &inp->inp_route6, 19448 0, NULL, NULL, inp); 19449 } 19450 #endif 19451 #if defined(INET) && defined(INET6) 19452 else 19453 #endif 19454 #ifdef INET 19455 { 19456 error = ip_output(m, NULL, 19457 &inp->inp_route, 19458 0, 0, inp); 19459 } 19460 #endif 19461 if (lgb) { 19462 lgb->tlb_errno = error; 19463 lgb = NULL; 19464 } 19465 if (error) { 19466 *send_err = error; 19467 m = NULL; 19468 goto failed; 19469 } else if (rack->rc_hw_nobuf) { 19470 rack->rc_hw_nobuf = 0; 19471 rack->r_ctl.rc_agg_delayed = 0; 19472 rack->r_early = 0; 19473 rack->r_late = 0; 19474 rack->r_ctl.rc_agg_early = 0; 19475 } 19476 if ((error == 0) && (rack->lt_bw_up == 0)) { 19477 /* Unlikely */ 19478 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 19479 rack->r_ctl.lt_seq = tp->snd_una; 19480 rack->lt_bw_up = 1; 19481 } else if ((error == 0) && 19482 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 19483 /* 19484 * Need to record what we have since we are 19485 * approaching seq wrap. 19486 */ 19487 struct timeval tv; 19488 uint64_t tmark; 19489 19490 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 19491 rack->r_ctl.lt_seq = tp->snd_una; 19492 tmark = tcp_get_u64_usecs(&tv); 19493 if (tmark > rack->r_ctl.lt_timemark) { 19494 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 19495 rack->r_ctl.lt_timemark = tmark; 19496 } 19497 } 19498 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 19499 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 19500 m = NULL; 19501 if (tp->snd_una == tp->snd_max) { 19502 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19503 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19504 tp->t_acktime = ticks; 19505 } 19506 counter_u64_add(rack_total_bytes, len); 19507 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 19508 19509 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19510 tot_len += len; 19511 if ((tp->t_flags & TF_GPUTINPROG) == 0) 19512 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 19513 tp->snd_max += len; 19514 tp->snd_nxt = tp->snd_max; 19515 if (rack->rc_new_rnd_needed) { 19516 rack_new_round_starts(tp, rack, tp->snd_max); 19517 } 19518 { 19519 int idx; 19520 19521 idx = (len / segsiz) + 3; 19522 if (idx >= TCP_MSS_ACCT_ATIMER) 19523 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19524 else 19525 counter_u64_add(rack_out_size[idx], 1); 19526 } 19527 if (len <= rack->r_ctl.fsb.left_to_send) 19528 rack->r_ctl.fsb.left_to_send -= len; 19529 else 19530 rack->r_ctl.fsb.left_to_send = 0; 19531 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19532 rack->r_fast_output = 0; 19533 rack->r_ctl.fsb.left_to_send = 0; 19534 /* At the end of fast_output scale up the sb */ 19535 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); 19536 rack_sndbuf_autoscale(rack); 19537 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); 19538 } 19539 if (tp->t_rtttime == 0) { 19540 tp->t_rtttime = ticks; 19541 tp->t_rtseq = startseq; 19542 KMOD_TCPSTAT_INC(tcps_segstimed); 19543 } 19544 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 19545 (max_val > len) && 19546 (tso == 0)) { 19547 max_val -= len; 19548 len = segsiz; 19549 th = rack->r_ctl.fsb.th; 19550 #ifdef TCP_ACCOUNTING 19551 cnt_thru++; 19552 #endif 19553 goto again; 19554 } 19555 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19556 counter_u64_add(rack_fto_send, 1); 19557 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__); 19558 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 19559 #ifdef TCP_ACCOUNTING 19560 crtsc = get_cyclecount(); 19561 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19562 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19563 } 19564 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19565 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19566 } 19567 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19568 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 19569 } 19570 sched_unpin(); 19571 #endif 19572 return (0); 19573 failed: 19574 if (m) 19575 m_free(m); 19576 rack->r_fast_output = 0; 19577 return (-1); 19578 } 19579 19580 static inline void 19581 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 19582 struct sockbuf *sb, 19583 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 19584 bool hw_tls, 19585 uint16_t flags) 19586 { 19587 rack->r_fast_output = 1; 19588 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19589 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19590 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 19591 rack->r_ctl.fsb.tcp_flags = flags; 19592 rack->r_ctl.fsb.left_to_send = orig_len - len; 19593 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 19594 /* Less than a full sized pace, lets not */ 19595 rack->r_fast_output = 0; 19596 return; 19597 } else { 19598 /* Round down to the nearest pace_max_seg */ 19599 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 19600 } 19601 if (hw_tls) 19602 rack->r_ctl.fsb.hw_tls = 1; 19603 else 19604 rack->r_ctl.fsb.hw_tls = 0; 19605 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19606 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19607 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19608 (tp->snd_max - tp->snd_una))); 19609 if (rack->r_ctl.fsb.left_to_send < segsiz) 19610 rack->r_fast_output = 0; 19611 else { 19612 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19613 rack->r_ctl.fsb.rfo_apply_push = 1; 19614 else 19615 rack->r_ctl.fsb.rfo_apply_push = 0; 19616 } 19617 } 19618 19619 static uint32_t 19620 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 19621 { 19622 uint64_t min_time; 19623 uint32_t maxlen; 19624 19625 min_time = (uint64_t)get_hpts_min_sleep_time(); 19626 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 19627 maxlen = roundup(maxlen, segsiz); 19628 return (maxlen); 19629 } 19630 19631 static struct rack_sendmap * 19632 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 19633 { 19634 struct rack_sendmap *rsm = NULL; 19635 int thresh; 19636 19637 restart: 19638 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 19639 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 19640 /* Nothing, strange turn off validity */ 19641 rack->r_collapse_point_valid = 0; 19642 return (NULL); 19643 } 19644 /* Can we send it yet? */ 19645 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 19646 /* 19647 * Receiver window has not grown enough for 19648 * the segment to be put on the wire. 19649 */ 19650 return (NULL); 19651 } 19652 if (rsm->r_flags & RACK_ACKED) { 19653 /* 19654 * It has been sacked, lets move to the 19655 * next one if possible. 19656 */ 19657 rack->r_ctl.last_collapse_point = rsm->r_end; 19658 /* Are we done? */ 19659 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 19660 rack->r_ctl.high_collapse_point)) { 19661 rack->r_collapse_point_valid = 0; 19662 return (NULL); 19663 } 19664 goto restart; 19665 } 19666 /* Now has it been long enough ? */ 19667 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 19668 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 19669 rack_log_collapse(rack, rsm->r_start, 19670 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19671 thresh, __LINE__, 6, rsm->r_flags, rsm); 19672 return (rsm); 19673 } 19674 /* Not enough time */ 19675 rack_log_collapse(rack, rsm->r_start, 19676 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19677 thresh, __LINE__, 7, rsm->r_flags, rsm); 19678 return (NULL); 19679 } 19680 19681 static inline void 19682 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 19683 { 19684 if ((rack->full_size_rxt == 0) && 19685 (rack->shape_rxt_to_pacing_min == 0) && 19686 (*len >= segsiz)) { 19687 *len = segsiz; 19688 } else if (rack->shape_rxt_to_pacing_min && 19689 rack->gp_ready) { 19690 /* We use pacing min as shaping len req */ 19691 uint32_t maxlen; 19692 19693 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 19694 if (*len > maxlen) 19695 *len = maxlen; 19696 } else { 19697 /* 19698 * The else is full_size_rxt is on so send it all 19699 * note we do need to check this for exceeding 19700 * our max segment size due to the fact that 19701 * we do sometimes merge chunks together i.e. 19702 * we cannot just assume that we will never have 19703 * a chunk greater than pace_max_seg 19704 */ 19705 if (*len > pace_max_seg) 19706 *len = pace_max_seg; 19707 } 19708 } 19709 19710 static int 19711 rack_output(struct tcpcb *tp) 19712 { 19713 struct socket *so; 19714 uint32_t recwin; 19715 uint32_t sb_offset, s_moff = 0; 19716 int32_t len, error = 0; 19717 uint16_t flags; 19718 struct mbuf *m, *s_mb = NULL; 19719 struct mbuf *mb; 19720 uint32_t if_hw_tsomaxsegcount = 0; 19721 uint32_t if_hw_tsomaxsegsize; 19722 int32_t segsiz, minseg; 19723 long tot_len_this_send = 0; 19724 #ifdef INET 19725 struct ip *ip = NULL; 19726 #endif 19727 struct udphdr *udp = NULL; 19728 struct tcp_rack *rack; 19729 struct tcphdr *th; 19730 uint8_t pass = 0; 19731 uint8_t mark = 0; 19732 uint8_t check_done = 0; 19733 uint8_t wanted_cookie = 0; 19734 u_char opt[TCP_MAXOLEN]; 19735 unsigned ipoptlen, optlen, hdrlen, ulen=0; 19736 uint32_t rack_seq; 19737 19738 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 19739 unsigned ipsec_optlen = 0; 19740 19741 #endif 19742 int32_t idle, sendalot; 19743 uint32_t tot_idle; 19744 int32_t sub_from_prr = 0; 19745 volatile int32_t sack_rxmit; 19746 struct rack_sendmap *rsm = NULL; 19747 int32_t tso, mtu; 19748 struct tcpopt to; 19749 int32_t slot = 0; 19750 int32_t sup_rack = 0; 19751 uint32_t cts, ms_cts, delayed, early; 19752 uint32_t add_flag = RACK_SENT_SP; 19753 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 19754 uint8_t doing_tlp = 0; 19755 uint32_t cwnd_to_use, pace_max_seg; 19756 int32_t do_a_prefetch = 0; 19757 int32_t prefetch_rsm = 0; 19758 int32_t orig_len = 0; 19759 struct timeval tv; 19760 int32_t prefetch_so_done = 0; 19761 struct tcp_log_buffer *lgb; 19762 struct inpcb *inp = tptoinpcb(tp); 19763 struct sockbuf *sb; 19764 uint64_t ts_val = 0; 19765 #ifdef TCP_ACCOUNTING 19766 uint64_t crtsc; 19767 #endif 19768 #ifdef INET6 19769 struct ip6_hdr *ip6 = NULL; 19770 int32_t isipv6; 19771 #endif 19772 bool hpts_calling, hw_tls = false; 19773 19774 NET_EPOCH_ASSERT(); 19775 INP_WLOCK_ASSERT(inp); 19776 19777 /* setup and take the cache hits here */ 19778 rack = (struct tcp_rack *)tp->t_fb_ptr; 19779 #ifdef TCP_ACCOUNTING 19780 sched_pin(); 19781 ts_val = get_cyclecount(); 19782 #endif 19783 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 19784 tp->t_flags2 &= ~TF2_HPTS_CALLS; 19785 #ifdef TCP_OFFLOAD 19786 if (tp->t_flags & TF_TOE) { 19787 #ifdef TCP_ACCOUNTING 19788 sched_unpin(); 19789 #endif 19790 return (tcp_offload_output(tp)); 19791 } 19792 #endif 19793 if (rack->rack_deferred_inited == 0) { 19794 /* 19795 * If we are the connecting socket we will 19796 * hit rack_init() when no sequence numbers 19797 * are setup. This makes it so we must defer 19798 * some initialization. Call that now. 19799 */ 19800 rack_deferred_init(tp, rack); 19801 } 19802 /* 19803 * For TFO connections in SYN_RECEIVED, only allow the initial 19804 * SYN|ACK and those sent by the retransmit timer. 19805 */ 19806 if ((tp->t_flags & TF_FASTOPEN) && 19807 (tp->t_state == TCPS_SYN_RECEIVED) && 19808 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 19809 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 19810 #ifdef TCP_ACCOUNTING 19811 sched_unpin(); 19812 #endif 19813 return (0); 19814 } 19815 #ifdef INET6 19816 if (rack->r_state) { 19817 /* Use the cache line loaded if possible */ 19818 isipv6 = rack->r_is_v6; 19819 } else { 19820 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 19821 } 19822 #endif 19823 early = 0; 19824 cts = tcp_get_usecs(&tv); 19825 ms_cts = tcp_tv_to_mssectick(&tv); 19826 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 19827 tcp_in_hpts(rack->rc_tp)) { 19828 /* 19829 * We are on the hpts for some timer but not hptsi output. 19830 * Remove from the hpts unconditionally. 19831 */ 19832 rack_timer_cancel(tp, rack, cts, __LINE__); 19833 } 19834 /* Are we pacing and late? */ 19835 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19836 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 19837 /* We are delayed */ 19838 delayed = cts - rack->r_ctl.rc_last_output_to; 19839 } else { 19840 delayed = 0; 19841 } 19842 /* Do the timers, which may override the pacer */ 19843 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 19844 int retval; 19845 19846 retval = rack_process_timers(tp, rack, cts, hpts_calling, 19847 &doing_tlp); 19848 if (retval != 0) { 19849 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 19850 #ifdef TCP_ACCOUNTING 19851 sched_unpin(); 19852 #endif 19853 /* 19854 * If timers want tcp_drop(), then pass error out, 19855 * otherwise suppress it. 19856 */ 19857 return (retval < 0 ? retval : 0); 19858 } 19859 } 19860 if (rack->rc_in_persist) { 19861 if (tcp_in_hpts(rack->rc_tp) == 0) { 19862 /* Timer is not running */ 19863 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19864 } 19865 #ifdef TCP_ACCOUNTING 19866 sched_unpin(); 19867 #endif 19868 return (0); 19869 } 19870 if ((rack->rc_ack_required == 1) && 19871 (rack->r_timer_override == 0)){ 19872 /* A timeout occurred and no ack has arrived */ 19873 if (tcp_in_hpts(rack->rc_tp) == 0) { 19874 /* Timer is not running */ 19875 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19876 } 19877 #ifdef TCP_ACCOUNTING 19878 sched_unpin(); 19879 #endif 19880 return (0); 19881 } 19882 if ((rack->r_timer_override) || 19883 (rack->rc_ack_can_sendout_data) || 19884 (delayed) || 19885 (tp->t_state < TCPS_ESTABLISHED)) { 19886 rack->rc_ack_can_sendout_data = 0; 19887 if (tcp_in_hpts(rack->rc_tp)) 19888 tcp_hpts_remove(rack->rc_tp); 19889 } else if (tcp_in_hpts(rack->rc_tp)) { 19890 /* 19891 * On the hpts you can't pass even if ACKNOW is on, we will 19892 * when the hpts fires. 19893 */ 19894 #ifdef TCP_ACCOUNTING 19895 crtsc = get_cyclecount(); 19896 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19897 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 19898 } 19899 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19900 tp->tcp_cnt_counters[SND_BLOCKED]++; 19901 } 19902 sched_unpin(); 19903 #endif 19904 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 19905 return (0); 19906 } 19907 /* Finish out both pacing early and late accounting */ 19908 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19909 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 19910 early = rack->r_ctl.rc_last_output_to - cts; 19911 } else 19912 early = 0; 19913 if (delayed && (rack->rc_always_pace == 1)) { 19914 rack->r_ctl.rc_agg_delayed += delayed; 19915 rack->r_late = 1; 19916 } else if (early && (rack->rc_always_pace == 1)) { 19917 rack->r_ctl.rc_agg_early += early; 19918 rack->r_early = 1; 19919 } else if (rack->rc_always_pace == 0) { 19920 /* Non-paced we are not late */ 19921 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 19922 rack->r_early = rack->r_late = 0; 19923 } 19924 /* Now that early/late accounting is done turn off the flag */ 19925 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 19926 rack->r_wanted_output = 0; 19927 rack->r_timer_override = 0; 19928 if ((tp->t_state != rack->r_state) && 19929 TCPS_HAVEESTABLISHED(tp->t_state)) { 19930 rack_set_state(tp, rack); 19931 } 19932 if ((rack->r_fast_output) && 19933 (doing_tlp == 0) && 19934 (tp->rcv_numsacks == 0)) { 19935 int ret; 19936 19937 error = 0; 19938 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19939 if (ret >= 0) 19940 return(ret); 19941 else if (error) { 19942 inp = rack->rc_inp; 19943 so = inp->inp_socket; 19944 sb = &so->so_snd; 19945 goto nomore; 19946 } 19947 } 19948 inp = rack->rc_inp; 19949 /* 19950 * For TFO connections in SYN_SENT or SYN_RECEIVED, 19951 * only allow the initial SYN or SYN|ACK and those sent 19952 * by the retransmit timer. 19953 */ 19954 if ((tp->t_flags & TF_FASTOPEN) && 19955 ((tp->t_state == TCPS_SYN_RECEIVED) || 19956 (tp->t_state == TCPS_SYN_SENT)) && 19957 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 19958 (tp->t_rxtshift == 0)) { /* not a retransmit */ 19959 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19960 so = inp->inp_socket; 19961 sb = &so->so_snd; 19962 goto just_return_nolock; 19963 } 19964 /* 19965 * Determine length of data that should be transmitted, and flags 19966 * that will be used. If there is some data or critical controls 19967 * (SYN, RST) to send, then transmit; otherwise, investigate 19968 * further. 19969 */ 19970 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 19971 if (tp->t_idle_reduce) { 19972 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 19973 rack_cc_after_idle(rack, tp); 19974 } 19975 tp->t_flags &= ~TF_LASTIDLE; 19976 if (idle) { 19977 if (tp->t_flags & TF_MORETOCOME) { 19978 tp->t_flags |= TF_LASTIDLE; 19979 idle = 0; 19980 } 19981 } 19982 if ((tp->snd_una == tp->snd_max) && 19983 rack->r_ctl.rc_went_idle_time && 19984 (cts > rack->r_ctl.rc_went_idle_time)) { 19985 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 19986 if (tot_idle > rack_min_probertt_hold) { 19987 /* Count as a probe rtt */ 19988 if (rack->in_probe_rtt == 0) { 19989 rack->r_ctl.rc_lower_rtt_us_cts = cts; 19990 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 19991 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 19992 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 19993 } else { 19994 rack_exit_probertt(rack, cts); 19995 } 19996 } 19997 } else 19998 tot_idle = 0; 19999 if (rack_use_fsb && 20000 (rack->r_ctl.fsb.tcp_ip_hdr) && 20001 (rack->r_fsb_inited == 0) && 20002 (rack->r_state != TCPS_CLOSED)) 20003 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 20004 if (rack->rc_sendvars_notset == 1) { 20005 rack->rc_sendvars_notset = 0; 20006 /* 20007 * Make sure any TCP timers (keep-alive) is not running. 20008 */ 20009 tcp_timer_stop(tp); 20010 } 20011 if ((rack->rack_no_prr == 1) && 20012 (rack->rc_always_pace == 0)) { 20013 /* 20014 * Sanity check before sending, if we have 20015 * no-pacing enabled and prr is turned off that 20016 * is a logistics error. Correct this by turnning 20017 * prr back on. A user *must* set some form of 20018 * pacing in order to turn PRR off. We do this 20019 * in the output path so that we can avoid socket 20020 * option ordering issues that would occur if we 20021 * tried to do it while setting rack_no_prr on. 20022 */ 20023 rack->rack_no_prr = 0; 20024 } 20025 if ((rack->pcm_enabled == 1) && 20026 (rack->pcm_needed == 0) && 20027 (tot_idle > 0)) { 20028 /* 20029 * We have been idle some micro seconds. We need 20030 * to factor this in to see if a PCM is needed. 20031 */ 20032 uint32_t rtts_idle, rnds; 20033 20034 if (tp->t_srtt) 20035 rtts_idle = tot_idle / tp->t_srtt; 20036 else 20037 rtts_idle = 0; 20038 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 20039 rack->r_ctl.pcm_idle_rounds += rtts_idle; 20040 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 20041 rack->pcm_needed = 1; 20042 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 20043 } 20044 } 20045 again: 20046 sendalot = 0; 20047 cts = tcp_get_usecs(&tv); 20048 ms_cts = tcp_tv_to_mssectick(&tv); 20049 tso = 0; 20050 mtu = 0; 20051 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20052 minseg = segsiz; 20053 if (rack->r_ctl.rc_pace_max_segs == 0) 20054 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 20055 else 20056 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 20057 if (TCPS_HAVEESTABLISHED(tp->t_state) && 20058 (rack->r_ctl.pcm_max_seg == 0)) { 20059 /* 20060 * We set in our first send so we know that the ctf_fixed_maxseg 20061 * has been fully set. If we do it in rack_init() we most likely 20062 * see 512 bytes so we end up at 5120, not desirable. 20063 */ 20064 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 20065 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 20066 /* 20067 * Assure our initial PCM probe is at least 10 MSS. 20068 */ 20069 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 20070 } 20071 } 20072 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 20073 uint32_t rw_avail, cwa; 20074 20075 if (tp->snd_wnd > ctf_outstanding(tp)) 20076 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 20077 else 20078 rw_avail = 0; 20079 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 20080 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20081 else 20082 cwa = 0; 20083 if ((cwa >= rack->r_ctl.pcm_max_seg) && 20084 (rw_avail > rack->r_ctl.pcm_max_seg)) { 20085 /* Raise up the max seg for this trip through */ 20086 pace_max_seg = rack->r_ctl.pcm_max_seg; 20087 /* Disable any fast output */ 20088 rack->r_fast_output = 0; 20089 } 20090 if (rack_verbose_logging) { 20091 rack_log_pcm(rack, 4, 20092 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 20093 } 20094 } 20095 sb_offset = tp->snd_max - tp->snd_una; 20096 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20097 flags = tcp_outflags[tp->t_state]; 20098 while (rack->rc_free_cnt < rack_free_cache) { 20099 rsm = rack_alloc(rack); 20100 if (rsm == NULL) { 20101 if (hpts_calling) 20102 /* Retry in a ms */ 20103 slot = (1 * HPTS_USEC_IN_MSEC); 20104 so = inp->inp_socket; 20105 sb = &so->so_snd; 20106 goto just_return_nolock; 20107 } 20108 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 20109 rack->rc_free_cnt++; 20110 rsm = NULL; 20111 } 20112 sack_rxmit = 0; 20113 len = 0; 20114 rsm = NULL; 20115 if (flags & TH_RST) { 20116 SOCK_SENDBUF_LOCK(inp->inp_socket); 20117 so = inp->inp_socket; 20118 sb = &so->so_snd; 20119 goto send; 20120 } 20121 if (rack->r_ctl.rc_resend) { 20122 /* Retransmit timer */ 20123 rsm = rack->r_ctl.rc_resend; 20124 rack->r_ctl.rc_resend = NULL; 20125 len = rsm->r_end - rsm->r_start; 20126 sack_rxmit = 1; 20127 sendalot = 0; 20128 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20129 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20130 __func__, __LINE__, 20131 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20132 sb_offset = rsm->r_start - tp->snd_una; 20133 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20134 } else if (rack->r_collapse_point_valid && 20135 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 20136 /* 20137 * If an RSM is returned then enough time has passed 20138 * for us to retransmit it. Move up the collapse point, 20139 * since this rsm has its chance to retransmit now. 20140 */ 20141 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 20142 rack->r_ctl.last_collapse_point = rsm->r_end; 20143 /* Are we done? */ 20144 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20145 rack->r_ctl.high_collapse_point)) 20146 rack->r_collapse_point_valid = 0; 20147 sack_rxmit = 1; 20148 /* We are not doing a TLP */ 20149 doing_tlp = 0; 20150 len = rsm->r_end - rsm->r_start; 20151 sb_offset = rsm->r_start - tp->snd_una; 20152 sendalot = 0; 20153 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20154 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 20155 /* We have a retransmit that takes precedence */ 20156 if ((!IN_FASTRECOVERY(tp->t_flags)) && 20157 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 20158 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 20159 /* Enter recovery if not induced by a time-out */ 20160 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 20161 } 20162 #ifdef INVARIANTS 20163 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 20164 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 20165 tp, rack, rsm, rsm->r_start, tp->snd_una); 20166 } 20167 #endif 20168 len = rsm->r_end - rsm->r_start; 20169 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20170 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20171 __func__, __LINE__, 20172 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20173 sb_offset = rsm->r_start - tp->snd_una; 20174 sendalot = 0; 20175 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20176 if (len > 0) { 20177 sack_rxmit = 1; 20178 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 20179 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 20180 min(len, segsiz)); 20181 } 20182 } else if (rack->r_ctl.rc_tlpsend) { 20183 /* Tail loss probe */ 20184 long cwin; 20185 long tlen; 20186 20187 /* 20188 * Check if we can do a TLP with a RACK'd packet 20189 * this can happen if we are not doing the rack 20190 * cheat and we skipped to a TLP and it 20191 * went off. 20192 */ 20193 rsm = rack->r_ctl.rc_tlpsend; 20194 /* We are doing a TLP make sure the flag is preent */ 20195 rsm->r_flags |= RACK_TLP; 20196 rack->r_ctl.rc_tlpsend = NULL; 20197 sack_rxmit = 1; 20198 tlen = rsm->r_end - rsm->r_start; 20199 if (tlen > segsiz) 20200 tlen = segsiz; 20201 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20202 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20203 __func__, __LINE__, 20204 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20205 sb_offset = rsm->r_start - tp->snd_una; 20206 cwin = min(tp->snd_wnd, tlen); 20207 len = cwin; 20208 } 20209 if (rack->r_must_retran && 20210 (doing_tlp == 0) && 20211 (SEQ_GT(tp->snd_max, tp->snd_una)) && 20212 (rsm == NULL)) { 20213 /* 20214 * There are two different ways that we 20215 * can get into this block: 20216 * a) This is a non-sack connection, we had a time-out 20217 * and thus r_must_retran was set and everything 20218 * left outstanding as been marked for retransmit. 20219 * b) The MTU of the path shrank, so that everything 20220 * was marked to be retransmitted with the smaller 20221 * mtu and r_must_retran was set. 20222 * 20223 * This means that we expect the sendmap (outstanding) 20224 * to all be marked must. We can use the tmap to 20225 * look at them. 20226 * 20227 */ 20228 int sendwin, flight; 20229 20230 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 20231 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 20232 if (flight >= sendwin) { 20233 /* 20234 * We can't send yet. 20235 */ 20236 so = inp->inp_socket; 20237 sb = &so->so_snd; 20238 goto just_return_nolock; 20239 } 20240 /* 20241 * This is the case a/b mentioned above. All 20242 * outstanding/not-acked should be marked. 20243 * We can use the tmap to find them. 20244 */ 20245 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 20246 if (rsm == NULL) { 20247 /* TSNH */ 20248 rack->r_must_retran = 0; 20249 rack->r_ctl.rc_out_at_rto = 0; 20250 so = inp->inp_socket; 20251 sb = &so->so_snd; 20252 goto just_return_nolock; 20253 } 20254 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 20255 /* 20256 * The first one does not have the flag, did we collapse 20257 * further up in our list? 20258 */ 20259 rack->r_must_retran = 0; 20260 rack->r_ctl.rc_out_at_rto = 0; 20261 rsm = NULL; 20262 sack_rxmit = 0; 20263 } else { 20264 sack_rxmit = 1; 20265 len = rsm->r_end - rsm->r_start; 20266 sb_offset = rsm->r_start - tp->snd_una; 20267 sendalot = 0; 20268 if ((rack->full_size_rxt == 0) && 20269 (rack->shape_rxt_to_pacing_min == 0) && 20270 (len >= segsiz)) 20271 len = segsiz; 20272 else if (rack->shape_rxt_to_pacing_min && 20273 rack->gp_ready) { 20274 /* We use pacing min as shaping len req */ 20275 uint32_t maxlen; 20276 20277 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20278 if (len > maxlen) 20279 len = maxlen; 20280 } 20281 /* 20282 * Delay removing the flag RACK_MUST_RXT so 20283 * that the fastpath for retransmit will 20284 * work with this rsm. 20285 */ 20286 } 20287 } 20288 /* 20289 * Enforce a connection sendmap count limit if set 20290 * as long as we are not retransmiting. 20291 */ 20292 if ((rsm == NULL) && 20293 (V_tcp_map_entries_limit > 0) && 20294 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 20295 counter_u64_add(rack_to_alloc_limited, 1); 20296 if (!rack->alloc_limit_reported) { 20297 rack->alloc_limit_reported = 1; 20298 counter_u64_add(rack_alloc_limited_conns, 1); 20299 } 20300 so = inp->inp_socket; 20301 sb = &so->so_snd; 20302 goto just_return_nolock; 20303 } 20304 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 20305 /* we are retransmitting the fin */ 20306 len--; 20307 if (len) { 20308 /* 20309 * When retransmitting data do *not* include the 20310 * FIN. This could happen from a TLP probe. 20311 */ 20312 flags &= ~TH_FIN; 20313 } 20314 } 20315 if (rsm && rack->r_fsb_inited && 20316 rack_use_rsm_rfo && 20317 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 20318 int ret; 20319 20320 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 20321 if (ret == 0) 20322 return (0); 20323 } 20324 so = inp->inp_socket; 20325 sb = &so->so_snd; 20326 if (do_a_prefetch == 0) { 20327 kern_prefetch(sb, &do_a_prefetch); 20328 do_a_prefetch = 1; 20329 } 20330 #ifdef NETFLIX_SHARED_CWND 20331 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 20332 rack->rack_enable_scwnd) { 20333 /* We are doing cwnd sharing */ 20334 if (rack->gp_ready && 20335 (rack->rack_attempted_scwnd == 0) && 20336 (rack->r_ctl.rc_scw == NULL) && 20337 tp->t_lib) { 20338 /* The pcbid is in, lets make an attempt */ 20339 counter_u64_add(rack_try_scwnd, 1); 20340 rack->rack_attempted_scwnd = 1; 20341 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 20342 &rack->r_ctl.rc_scw_index, 20343 segsiz); 20344 } 20345 if (rack->r_ctl.rc_scw && 20346 (rack->rack_scwnd_is_idle == 1) && 20347 sbavail(&so->so_snd)) { 20348 /* we are no longer out of data */ 20349 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 20350 rack->rack_scwnd_is_idle = 0; 20351 } 20352 if (rack->r_ctl.rc_scw) { 20353 /* First lets update and get the cwnd */ 20354 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 20355 rack->r_ctl.rc_scw_index, 20356 tp->snd_cwnd, tp->snd_wnd, segsiz); 20357 } 20358 } 20359 #endif 20360 /* 20361 * Get standard flags, and add SYN or FIN if requested by 'hidden' 20362 * state flags. 20363 */ 20364 if (tp->t_flags & TF_NEEDFIN) 20365 flags |= TH_FIN; 20366 if (tp->t_flags & TF_NEEDSYN) 20367 flags |= TH_SYN; 20368 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 20369 void *end_rsm; 20370 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 20371 if (end_rsm) 20372 kern_prefetch(end_rsm, &prefetch_rsm); 20373 prefetch_rsm = 1; 20374 } 20375 SOCK_SENDBUF_LOCK(so); 20376 if ((sack_rxmit == 0) && 20377 (TCPS_HAVEESTABLISHED(tp->t_state) || 20378 (tp->t_flags & TF_FASTOPEN))) { 20379 /* 20380 * We are not retransmitting (sack_rxmit is 0) so we 20381 * are sending new data. This is always based on snd_max. 20382 * Now in theory snd_max may be equal to snd_una, if so 20383 * then nothing is outstanding and the offset would be 0. 20384 */ 20385 uint32_t avail; 20386 20387 avail = sbavail(sb); 20388 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 20389 sb_offset = tp->snd_max - tp->snd_una; 20390 else 20391 sb_offset = 0; 20392 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 20393 if (rack->r_ctl.rc_tlp_new_data) { 20394 /* TLP is forcing out new data */ 20395 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 20396 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 20397 } 20398 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 20399 if (tp->snd_wnd > sb_offset) 20400 len = tp->snd_wnd - sb_offset; 20401 else 20402 len = 0; 20403 } else { 20404 len = rack->r_ctl.rc_tlp_new_data; 20405 } 20406 rack->r_ctl.rc_tlp_new_data = 0; 20407 } else { 20408 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 20409 } 20410 if ((rack->r_ctl.crte == NULL) && 20411 IN_FASTRECOVERY(tp->t_flags) && 20412 (rack->full_size_rxt == 0) && 20413 (rack->shape_rxt_to_pacing_min == 0) && 20414 (len > segsiz)) { 20415 /* 20416 * For prr=off, we need to send only 1 MSS 20417 * at a time. We do this because another sack could 20418 * be arriving that causes us to send retransmits and 20419 * we don't want to be on a long pace due to a larger send 20420 * that keeps us from sending out the retransmit. 20421 */ 20422 len = segsiz; 20423 } else if (rack->shape_rxt_to_pacing_min && 20424 rack->gp_ready) { 20425 /* We use pacing min as shaping len req */ 20426 uint32_t maxlen; 20427 20428 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20429 if (len > maxlen) 20430 len = maxlen; 20431 }/* The else is full_size_rxt is on so send it all */ 20432 } else { 20433 uint32_t outstanding; 20434 /* 20435 * We are inside of a Fast recovery episode, this 20436 * is caused by a SACK or 3 dup acks. At this point 20437 * we have sent all the retransmissions and we rely 20438 * on PRR to dictate what we will send in the form of 20439 * new data. 20440 */ 20441 20442 outstanding = tp->snd_max - tp->snd_una; 20443 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 20444 if (tp->snd_wnd > outstanding) { 20445 len = tp->snd_wnd - outstanding; 20446 /* Check to see if we have the data */ 20447 if ((sb_offset + len) > avail) { 20448 /* It does not all fit */ 20449 if (avail > sb_offset) 20450 len = avail - sb_offset; 20451 else 20452 len = 0; 20453 } 20454 } else { 20455 len = 0; 20456 } 20457 } else if (avail > sb_offset) { 20458 len = avail - sb_offset; 20459 } else { 20460 len = 0; 20461 } 20462 if (len > 0) { 20463 if (len > rack->r_ctl.rc_prr_sndcnt) { 20464 len = rack->r_ctl.rc_prr_sndcnt; 20465 } 20466 if (len > 0) { 20467 sub_from_prr = 1; 20468 } 20469 } 20470 if (len > segsiz) { 20471 /* 20472 * We should never send more than a MSS when 20473 * retransmitting or sending new data in prr 20474 * mode unless the override flag is on. Most 20475 * likely the PRR algorithm is not going to 20476 * let us send a lot as well :-) 20477 */ 20478 if (rack->r_ctl.rc_prr_sendalot == 0) { 20479 len = segsiz; 20480 } 20481 } else if (len < segsiz) { 20482 /* 20483 * Do we send any? The idea here is if the 20484 * send empty's the socket buffer we want to 20485 * do it. However if not then lets just wait 20486 * for our prr_sndcnt to get bigger. 20487 */ 20488 long leftinsb; 20489 20490 leftinsb = sbavail(sb) - sb_offset; 20491 if (leftinsb > len) { 20492 /* This send does not empty the sb */ 20493 len = 0; 20494 } 20495 } 20496 } 20497 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 20498 /* 20499 * If you have not established 20500 * and are not doing FAST OPEN 20501 * no data please. 20502 */ 20503 if ((sack_rxmit == 0) && 20504 !(tp->t_flags & TF_FASTOPEN)) { 20505 len = 0; 20506 sb_offset = 0; 20507 } 20508 } 20509 if (prefetch_so_done == 0) { 20510 kern_prefetch(so, &prefetch_so_done); 20511 prefetch_so_done = 1; 20512 } 20513 orig_len = len; 20514 /* 20515 * Lop off SYN bit if it has already been sent. However, if this is 20516 * SYN-SENT state and if segment contains data and if we don't know 20517 * that foreign host supports TAO, suppress sending segment. 20518 */ 20519 if ((flags & TH_SYN) && 20520 SEQ_GT(tp->snd_max, tp->snd_una) && 20521 ((sack_rxmit == 0) && 20522 (tp->t_rxtshift == 0))) { 20523 /* 20524 * When sending additional segments following a TFO SYN|ACK, 20525 * do not include the SYN bit. 20526 */ 20527 if ((tp->t_flags & TF_FASTOPEN) && 20528 (tp->t_state == TCPS_SYN_RECEIVED)) 20529 flags &= ~TH_SYN; 20530 } 20531 /* 20532 * Be careful not to send data and/or FIN on SYN segments. This 20533 * measure is needed to prevent interoperability problems with not 20534 * fully conformant TCP implementations. 20535 */ 20536 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 20537 len = 0; 20538 flags &= ~TH_FIN; 20539 } 20540 /* 20541 * On TFO sockets, ensure no data is sent in the following cases: 20542 * 20543 * - When retransmitting SYN|ACK on a passively-created socket 20544 * 20545 * - When retransmitting SYN on an actively created socket 20546 * 20547 * - When sending a zero-length cookie (cookie request) on an 20548 * actively created socket 20549 * 20550 * - When the socket is in the CLOSED state (RST is being sent) 20551 */ 20552 if ((tp->t_flags & TF_FASTOPEN) && 20553 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 20554 ((tp->t_state == TCPS_SYN_SENT) && 20555 (tp->t_tfo_client_cookie_len == 0)) || 20556 (flags & TH_RST))) { 20557 sack_rxmit = 0; 20558 len = 0; 20559 } 20560 /* Without fast-open there should never be data sent on a SYN */ 20561 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 20562 len = 0; 20563 } 20564 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 20565 /* We only send 1 MSS if we have a DSACK block */ 20566 add_flag |= RACK_SENT_W_DSACK; 20567 len = segsiz; 20568 } 20569 if (len <= 0) { 20570 /* 20571 * We have nothing to send, or the window shrank, or 20572 * is closed, do we need to go into persists? 20573 */ 20574 len = 0; 20575 if ((tp->snd_wnd == 0) && 20576 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20577 (tp->snd_una == tp->snd_max) && 20578 (sb_offset < (int)sbavail(sb))) { 20579 rack_enter_persist(tp, rack, cts, tp->snd_una); 20580 } 20581 } else if ((rsm == NULL) && 20582 (doing_tlp == 0) && 20583 (len < pace_max_seg)) { 20584 /* 20585 * We are not sending a maximum sized segment for 20586 * some reason. Should we not send anything (think 20587 * sws or persists)? 20588 */ 20589 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20590 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20591 (len < minseg) && 20592 (len < (int)(sbavail(sb) - sb_offset))) { 20593 /* 20594 * Here the rwnd is less than 20595 * the minimum pacing size, this is not a retransmit, 20596 * we are established and 20597 * the send is not the last in the socket buffer 20598 * we send nothing, and we may enter persists 20599 * if nothing is outstanding. 20600 */ 20601 len = 0; 20602 if (tp->snd_max == tp->snd_una) { 20603 /* 20604 * Nothing out we can 20605 * go into persists. 20606 */ 20607 rack_enter_persist(tp, rack, cts, tp->snd_una); 20608 } 20609 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 20610 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20611 (len < (int)(sbavail(sb) - sb_offset)) && 20612 (len < minseg)) { 20613 /* 20614 * Here we are not retransmitting, and 20615 * the cwnd is not so small that we could 20616 * not send at least a min size (rxt timer 20617 * not having gone off), We have 2 segments or 20618 * more already in flight, its not the tail end 20619 * of the socket buffer and the cwnd is blocking 20620 * us from sending out a minimum pacing segment size. 20621 * Lets not send anything. 20622 */ 20623 len = 0; 20624 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 20625 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20626 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20627 (len < (int)(sbavail(sb) - sb_offset)) && 20628 (TCPS_HAVEESTABLISHED(tp->t_state))) { 20629 /* 20630 * Here we have a send window but we have 20631 * filled it up and we can't send another pacing segment. 20632 * We also have in flight more than 2 segments 20633 * and we are not completing the sb i.e. we allow 20634 * the last bytes of the sb to go out even if 20635 * its not a full pacing segment. 20636 */ 20637 len = 0; 20638 } else if ((rack->r_ctl.crte != NULL) && 20639 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 20640 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 20641 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 20642 (len < (int)(sbavail(sb) - sb_offset))) { 20643 /* 20644 * Here we are doing hardware pacing, this is not a TLP, 20645 * we are not sending a pace max segment size, there is rwnd 20646 * room to send at least N pace_max_seg, the cwnd is greater 20647 * than or equal to a full pacing segments plus 4 mss and we have 2 or 20648 * more segments in flight and its not the tail of the socket buffer. 20649 * 20650 * We don't want to send instead we need to get more ack's in to 20651 * allow us to send a full pacing segment. Normally, if we are pacing 20652 * about the right speed, we should have finished our pacing 20653 * send as most of the acks have come back if we are at the 20654 * right rate. This is a bit fuzzy since return path delay 20655 * can delay the acks, which is why we want to make sure we 20656 * have cwnd space to have a bit more than a max pace segments in flight. 20657 * 20658 * If we have not gotten our acks back we are pacing at too high a 20659 * rate delaying will not hurt and will bring our GP estimate down by 20660 * injecting the delay. If we don't do this we will send 20661 * 2 MSS out in response to the acks being clocked in which 20662 * defeats the point of hw-pacing (i.e. to help us get 20663 * larger TSO's out). 20664 */ 20665 len = 0; 20666 } 20667 20668 } 20669 /* len will be >= 0 after this point. */ 20670 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 20671 rack_sndbuf_autoscale(rack); 20672 /* 20673 * Decide if we can use TCP Segmentation Offloading (if supported by 20674 * hardware). 20675 * 20676 * TSO may only be used if we are in a pure bulk sending state. The 20677 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 20678 * options prevent using TSO. With TSO the TCP header is the same 20679 * (except for the sequence number) for all generated packets. This 20680 * makes it impossible to transmit any options which vary per 20681 * generated segment or packet. 20682 * 20683 * IPv4 handling has a clear separation of ip options and ip header 20684 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 20685 * the right thing below to provide length of just ip options and thus 20686 * checking for ipoptlen is enough to decide if ip options are present. 20687 */ 20688 ipoptlen = 0; 20689 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20690 /* 20691 * Pre-calculate here as we save another lookup into the darknesses 20692 * of IPsec that way and can actually decide if TSO is ok. 20693 */ 20694 #ifdef INET6 20695 if (isipv6 && IPSEC_ENABLED(ipv6)) 20696 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 20697 #ifdef INET 20698 else 20699 #endif 20700 #endif /* INET6 */ 20701 #ifdef INET 20702 if (IPSEC_ENABLED(ipv4)) 20703 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 20704 #endif /* INET */ 20705 #endif 20706 20707 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20708 ipoptlen += ipsec_optlen; 20709 #endif 20710 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 20711 (tp->t_port == 0) && 20712 ((tp->t_flags & TF_SIGNATURE) == 0) && 20713 sack_rxmit == 0 && 20714 ipoptlen == 0) 20715 tso = 1; 20716 { 20717 uint32_t outstanding __unused; 20718 20719 outstanding = tp->snd_max - tp->snd_una; 20720 if (tp->t_flags & TF_SENTFIN) { 20721 /* 20722 * If we sent a fin, snd_max is 1 higher than 20723 * snd_una 20724 */ 20725 outstanding--; 20726 } 20727 if (sack_rxmit) { 20728 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 20729 flags &= ~TH_FIN; 20730 } 20731 } 20732 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 20733 (long)TCP_MAXWIN << tp->rcv_scale); 20734 20735 /* 20736 * Sender silly window avoidance. We transmit under the following 20737 * conditions when len is non-zero: 20738 * 20739 * - We have a full segment (or more with TSO) - This is the last 20740 * buffer in a write()/send() and we are either idle or running 20741 * NODELAY - we've timed out (e.g. persist timer) - we have more 20742 * then 1/2 the maximum send window's worth of data (receiver may be 20743 * limited the window size) - we need to retransmit 20744 */ 20745 if (len) { 20746 if (len >= segsiz) { 20747 goto send; 20748 } 20749 /* 20750 * NOTE! on localhost connections an 'ack' from the remote 20751 * end may occur synchronously with the output and cause us 20752 * to flush a buffer queued with moretocome. XXX 20753 * 20754 */ 20755 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 20756 (idle || (tp->t_flags & TF_NODELAY)) && 20757 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20758 (tp->t_flags & TF_NOPUSH) == 0) { 20759 pass = 2; 20760 goto send; 20761 } 20762 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 20763 pass = 22; 20764 goto send; 20765 } 20766 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 20767 pass = 4; 20768 goto send; 20769 } 20770 if (sack_rxmit) { 20771 pass = 6; 20772 goto send; 20773 } 20774 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 20775 (ctf_outstanding(tp) < (segsiz * 2))) { 20776 /* 20777 * We have less than two MSS outstanding (delayed ack) 20778 * and our rwnd will not let us send a full sized 20779 * MSS. Lets go ahead and let this small segment 20780 * out because we want to try to have at least two 20781 * packets inflight to not be caught by delayed ack. 20782 */ 20783 pass = 12; 20784 goto send; 20785 } 20786 } 20787 /* 20788 * Sending of standalone window updates. 20789 * 20790 * Window updates are important when we close our window due to a 20791 * full socket buffer and are opening it again after the application 20792 * reads data from it. Once the window has opened again and the 20793 * remote end starts to send again the ACK clock takes over and 20794 * provides the most current window information. 20795 * 20796 * We must avoid the silly window syndrome whereas every read from 20797 * the receive buffer, no matter how small, causes a window update 20798 * to be sent. We also should avoid sending a flurry of window 20799 * updates when the socket buffer had queued a lot of data and the 20800 * application is doing small reads. 20801 * 20802 * Prevent a flurry of pointless window updates by only sending an 20803 * update when we can increase the advertized window by more than 20804 * 1/4th of the socket buffer capacity. When the buffer is getting 20805 * full or is very small be more aggressive and send an update 20806 * whenever we can increase by two mss sized segments. In all other 20807 * situations the ACK's to new incoming data will carry further 20808 * window increases. 20809 * 20810 * Don't send an independent window update if a delayed ACK is 20811 * pending (it will get piggy-backed on it) or the remote side 20812 * already has done a half-close and won't send more data. Skip 20813 * this if the connection is in T/TCP half-open state. 20814 */ 20815 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 20816 !(tp->t_flags & TF_DELACK) && 20817 !TCPS_HAVERCVDFIN(tp->t_state)) { 20818 /* 20819 * "adv" is the amount we could increase the window, taking 20820 * into account that we are limited by TCP_MAXWIN << 20821 * tp->rcv_scale. 20822 */ 20823 int32_t adv; 20824 int oldwin; 20825 20826 adv = recwin; 20827 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 20828 oldwin = (tp->rcv_adv - tp->rcv_nxt); 20829 if (adv > oldwin) 20830 adv -= oldwin; 20831 else { 20832 /* We can't increase the window */ 20833 adv = 0; 20834 } 20835 } else 20836 oldwin = 0; 20837 20838 /* 20839 * If the new window size ends up being the same as or less 20840 * than the old size when it is scaled, then don't force 20841 * a window update. 20842 */ 20843 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 20844 goto dontupdate; 20845 20846 if (adv >= (int32_t)(2 * segsiz) && 20847 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 20848 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 20849 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 20850 pass = 7; 20851 goto send; 20852 } 20853 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 20854 pass = 23; 20855 goto send; 20856 } 20857 } 20858 dontupdate: 20859 20860 /* 20861 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 20862 * is also a catch-all for the retransmit timer timeout case. 20863 */ 20864 if (tp->t_flags & TF_ACKNOW) { 20865 pass = 8; 20866 goto send; 20867 } 20868 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 20869 pass = 9; 20870 goto send; 20871 } 20872 /* 20873 * If our state indicates that FIN should be sent and we have not 20874 * yet done so, then we need to send. 20875 */ 20876 if ((flags & TH_FIN) && 20877 (tp->snd_max == tp->snd_una)) { 20878 pass = 11; 20879 goto send; 20880 } 20881 /* 20882 * No reason to send a segment, just return. 20883 */ 20884 just_return: 20885 SOCK_SENDBUF_UNLOCK(so); 20886 just_return_nolock: 20887 { 20888 int app_limited = CTF_JR_SENT_DATA; 20889 20890 if ((tp->t_flags & TF_FASTOPEN) == 0 && 20891 (flags & TH_FIN) && 20892 (len == 0) && 20893 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 20894 ((tp->snd_max - tp->snd_una) <= segsiz)) { 20895 /* 20896 * Ok less than or right at a MSS is 20897 * outstanding. The original FreeBSD stack would 20898 * have sent a FIN, which can speed things up for 20899 * a transactional application doing a MSG_WAITALL. 20900 * To speed things up since we do *not* send a FIN 20901 * if data is outstanding, we send a "challenge ack". 20902 * The idea behind that is instead of having to have 20903 * the peer wait for the delayed-ack timer to run off 20904 * we send an ack that makes the peer send us an ack. 20905 */ 20906 rack_send_ack_challange(rack); 20907 } 20908 if (tot_len_this_send > 0) { 20909 rack->r_ctl.fsb.recwin = recwin; 20910 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 20911 if ((error == 0) && 20912 rack_use_rfo && 20913 ((flags & (TH_SYN|TH_FIN)) == 0) && 20914 (ipoptlen == 0) && 20915 rack->r_fsb_inited && 20916 TCPS_HAVEESTABLISHED(tp->t_state) && 20917 ((IN_RECOVERY(tp->t_flags)) == 0) && 20918 (rack->r_must_retran == 0) && 20919 ((tp->t_flags & TF_NEEDFIN) == 0) && 20920 (len > 0) && (orig_len > 0) && 20921 (orig_len > len) && 20922 ((orig_len - len) >= segsiz) && 20923 ((optlen == 0) || 20924 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 20925 /* We can send at least one more MSS using our fsb */ 20926 rack_setup_fast_output(tp, rack, sb, len, orig_len, 20927 segsiz, pace_max_seg, hw_tls, flags); 20928 } else 20929 rack->r_fast_output = 0; 20930 rack_log_fsb(rack, tp, so, flags, 20931 ipoptlen, orig_len, len, 0, 20932 1, optlen, __LINE__, 1); 20933 /* Assure when we leave that snd_nxt will point to top */ 20934 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 20935 tp->snd_nxt = tp->snd_max; 20936 } else { 20937 int end_window = 0; 20938 uint32_t seq = tp->gput_ack; 20939 20940 rsm = tqhash_max(rack->r_ctl.tqh); 20941 if (rsm) { 20942 /* 20943 * Mark the last sent that we just-returned (hinting 20944 * that delayed ack may play a role in any rtt measurement). 20945 */ 20946 rsm->r_just_ret = 1; 20947 } 20948 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 20949 rack->r_ctl.rc_agg_delayed = 0; 20950 rack->r_early = 0; 20951 rack->r_late = 0; 20952 rack->r_ctl.rc_agg_early = 0; 20953 if ((ctf_outstanding(tp) + 20954 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 20955 minseg)) >= tp->snd_wnd) { 20956 /* We are limited by the rwnd */ 20957 app_limited = CTF_JR_RWND_LIMITED; 20958 if (IN_FASTRECOVERY(tp->t_flags)) 20959 rack->r_ctl.rc_prr_sndcnt = 0; 20960 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 20961 /* We are limited by whats available -- app limited */ 20962 app_limited = CTF_JR_APP_LIMITED; 20963 if (IN_FASTRECOVERY(tp->t_flags)) 20964 rack->r_ctl.rc_prr_sndcnt = 0; 20965 } else if ((idle == 0) && 20966 ((tp->t_flags & TF_NODELAY) == 0) && 20967 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20968 (len < segsiz)) { 20969 /* 20970 * No delay is not on and the 20971 * user is sending less than 1MSS. This 20972 * brings out SWS avoidance so we 20973 * don't send. Another app-limited case. 20974 */ 20975 app_limited = CTF_JR_APP_LIMITED; 20976 } else if (tp->t_flags & TF_NOPUSH) { 20977 /* 20978 * The user has requested no push of 20979 * the last segment and we are 20980 * at the last segment. Another app 20981 * limited case. 20982 */ 20983 app_limited = CTF_JR_APP_LIMITED; 20984 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 20985 /* Its the cwnd */ 20986 app_limited = CTF_JR_CWND_LIMITED; 20987 } else if (IN_FASTRECOVERY(tp->t_flags) && 20988 (rack->rack_no_prr == 0) && 20989 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 20990 app_limited = CTF_JR_PRR; 20991 } else { 20992 /* Now why here are we not sending? */ 20993 #ifdef NOW 20994 #ifdef INVARIANTS 20995 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 20996 #endif 20997 #endif 20998 app_limited = CTF_JR_ASSESSING; 20999 } 21000 /* 21001 * App limited in some fashion, for our pacing GP 21002 * measurements we don't want any gap (even cwnd). 21003 * Close down the measurement window. 21004 */ 21005 if (rack_cwnd_block_ends_measure && 21006 ((app_limited == CTF_JR_CWND_LIMITED) || 21007 (app_limited == CTF_JR_PRR))) { 21008 /* 21009 * The reason we are not sending is 21010 * the cwnd (or prr). We have been configured 21011 * to end the measurement window in 21012 * this case. 21013 */ 21014 end_window = 1; 21015 } else if (rack_rwnd_block_ends_measure && 21016 (app_limited == CTF_JR_RWND_LIMITED)) { 21017 /* 21018 * We are rwnd limited and have been 21019 * configured to end the measurement 21020 * window in this case. 21021 */ 21022 end_window = 1; 21023 } else if (app_limited == CTF_JR_APP_LIMITED) { 21024 /* 21025 * A true application limited period, we have 21026 * ran out of data. 21027 */ 21028 end_window = 1; 21029 } else if (app_limited == CTF_JR_ASSESSING) { 21030 /* 21031 * In the assessing case we hit the end of 21032 * the if/else and had no known reason 21033 * This will panic us under invariants.. 21034 * 21035 * If we get this out in logs we need to 21036 * investagate which reason we missed. 21037 */ 21038 end_window = 1; 21039 } 21040 if (end_window) { 21041 uint8_t log = 0; 21042 21043 /* Adjust the Gput measurement */ 21044 if ((tp->t_flags & TF_GPUTINPROG) && 21045 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21046 tp->gput_ack = tp->snd_max; 21047 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21048 /* 21049 * There is not enough to measure. 21050 */ 21051 tp->t_flags &= ~TF_GPUTINPROG; 21052 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 21053 rack->r_ctl.rc_gp_srtt /*flex1*/, 21054 tp->gput_seq, 21055 0, 0, 18, __LINE__, NULL, 0); 21056 } else 21057 log = 1; 21058 } 21059 /* Mark the last packet has app limited */ 21060 rsm = tqhash_max(rack->r_ctl.tqh); 21061 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 21062 if (rack->r_ctl.rc_app_limited_cnt == 0) 21063 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 21064 else { 21065 /* 21066 * Go out to the end app limited and mark 21067 * this new one as next and move the end_appl up 21068 * to this guy. 21069 */ 21070 if (rack->r_ctl.rc_end_appl) 21071 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 21072 rack->r_ctl.rc_end_appl = rsm; 21073 } 21074 rsm->r_flags |= RACK_APP_LIMITED; 21075 rack->r_ctl.rc_app_limited_cnt++; 21076 } 21077 if (log) 21078 rack_log_pacing_delay_calc(rack, 21079 rack->r_ctl.rc_app_limited_cnt, seq, 21080 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 21081 } 21082 } 21083 /* Check if we need to go into persists or not */ 21084 if ((tp->snd_max == tp->snd_una) && 21085 TCPS_HAVEESTABLISHED(tp->t_state) && 21086 sbavail(sb) && 21087 (sbavail(sb) > tp->snd_wnd) && 21088 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 21089 /* Yes lets make sure to move to persist before timer-start */ 21090 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 21091 } 21092 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 21093 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 21094 } 21095 #ifdef NETFLIX_SHARED_CWND 21096 if ((sbavail(sb) == 0) && 21097 rack->r_ctl.rc_scw) { 21098 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21099 rack->rack_scwnd_is_idle = 1; 21100 } 21101 #endif 21102 #ifdef TCP_ACCOUNTING 21103 if (tot_len_this_send > 0) { 21104 crtsc = get_cyclecount(); 21105 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21106 tp->tcp_cnt_counters[SND_OUT_DATA]++; 21107 } 21108 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21109 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21110 } 21111 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21112 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 21113 } 21114 } else { 21115 crtsc = get_cyclecount(); 21116 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21117 tp->tcp_cnt_counters[SND_LIMITED]++; 21118 } 21119 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21120 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 21121 } 21122 } 21123 sched_unpin(); 21124 #endif 21125 return (0); 21126 21127 send: 21128 if ((rack->r_ctl.crte != NULL) && 21129 (rsm == NULL) && 21130 ((rack->rc_hw_nobuf == 1) || 21131 (rack_hw_check_queue && (check_done == 0)))) { 21132 /* 21133 * We only want to do this once with the hw_check_queue, 21134 * for the enobuf case we would only do it once if 21135 * we come around to again, the flag will be clear. 21136 */ 21137 check_done = 1; 21138 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 21139 if (slot) { 21140 rack->r_ctl.rc_agg_delayed = 0; 21141 rack->r_ctl.rc_agg_early = 0; 21142 rack->r_early = 0; 21143 rack->r_late = 0; 21144 SOCK_SENDBUF_UNLOCK(so); 21145 goto skip_all_send; 21146 } 21147 } 21148 if (rsm || sack_rxmit) 21149 counter_u64_add(rack_nfto_resend, 1); 21150 else 21151 counter_u64_add(rack_non_fto_send, 1); 21152 if ((flags & TH_FIN) && 21153 sbavail(sb)) { 21154 /* 21155 * We do not transmit a FIN 21156 * with data outstanding. We 21157 * need to make it so all data 21158 * is acked first. 21159 */ 21160 flags &= ~TH_FIN; 21161 if (TCPS_HAVEESTABLISHED(tp->t_state) && 21162 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 21163 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21164 /* 21165 * Ok less than or right at a MSS is 21166 * outstanding. The original FreeBSD stack would 21167 * have sent a FIN, which can speed things up for 21168 * a transactional application doing a MSG_WAITALL. 21169 * To speed things up since we do *not* send a FIN 21170 * if data is outstanding, we send a "challenge ack". 21171 * The idea behind that is instead of having to have 21172 * the peer wait for the delayed-ack timer to run off 21173 * we send an ack that makes the peer send us an ack. 21174 */ 21175 rack_send_ack_challange(rack); 21176 } 21177 } 21178 /* Enforce stack imposed max seg size if we have one */ 21179 if (pace_max_seg && 21180 (len > pace_max_seg)) { 21181 mark = 1; 21182 len = pace_max_seg; 21183 } 21184 if ((rsm == NULL) && 21185 (rack->pcm_in_progress == 0) && 21186 (rack->r_ctl.pcm_max_seg > 0) && 21187 (len >= rack->r_ctl.pcm_max_seg)) { 21188 /* It is large enough for a measurement */ 21189 add_flag |= RACK_IS_PCM; 21190 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 21191 } else if (rack_verbose_logging) { 21192 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 21193 } 21194 21195 SOCKBUF_LOCK_ASSERT(sb); 21196 if (len > 0) { 21197 if (len >= segsiz) 21198 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 21199 else 21200 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 21201 } 21202 /* 21203 * Before ESTABLISHED, force sending of initial options unless TCP 21204 * set not to do any options. NOTE: we assume that the IP/TCP header 21205 * plus TCP options always fit in a single mbuf, leaving room for a 21206 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 21207 * + optlen <= MCLBYTES 21208 */ 21209 optlen = 0; 21210 #ifdef INET6 21211 if (isipv6) 21212 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 21213 else 21214 #endif 21215 hdrlen = sizeof(struct tcpiphdr); 21216 21217 /* 21218 * Ok what seq are we sending from. If we have 21219 * no rsm to use, then we look at various bits, 21220 * if we are putting out a SYN it will be ISS. 21221 * If we are retransmitting a FIN it will 21222 * be snd_max-1 else its snd_max. 21223 */ 21224 if (rsm == NULL) { 21225 if (flags & TH_SYN) 21226 rack_seq = tp->iss; 21227 else if ((flags & TH_FIN) && 21228 (tp->t_flags & TF_SENTFIN)) 21229 rack_seq = tp->snd_max - 1; 21230 else 21231 rack_seq = tp->snd_max; 21232 } else { 21233 rack_seq = rsm->r_start; 21234 } 21235 /* 21236 * Compute options for segment. We only have to care about SYN and 21237 * established connection segments. Options for SYN-ACK segments 21238 * are handled in TCP syncache. 21239 */ 21240 to.to_flags = 0; 21241 if ((tp->t_flags & TF_NOOPT) == 0) { 21242 /* Maximum segment size. */ 21243 if (flags & TH_SYN) { 21244 to.to_mss = tcp_mssopt(&inp->inp_inc); 21245 if (tp->t_port) 21246 to.to_mss -= V_tcp_udp_tunneling_overhead; 21247 to.to_flags |= TOF_MSS; 21248 21249 /* 21250 * On SYN or SYN|ACK transmits on TFO connections, 21251 * only include the TFO option if it is not a 21252 * retransmit, as the presence of the TFO option may 21253 * have caused the original SYN or SYN|ACK to have 21254 * been dropped by a middlebox. 21255 */ 21256 if ((tp->t_flags & TF_FASTOPEN) && 21257 (tp->t_rxtshift == 0)) { 21258 if (tp->t_state == TCPS_SYN_RECEIVED) { 21259 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 21260 to.to_tfo_cookie = 21261 (u_int8_t *)&tp->t_tfo_cookie.server; 21262 to.to_flags |= TOF_FASTOPEN; 21263 wanted_cookie = 1; 21264 } else if (tp->t_state == TCPS_SYN_SENT) { 21265 to.to_tfo_len = 21266 tp->t_tfo_client_cookie_len; 21267 to.to_tfo_cookie = 21268 tp->t_tfo_cookie.client; 21269 to.to_flags |= TOF_FASTOPEN; 21270 wanted_cookie = 1; 21271 /* 21272 * If we wind up having more data to 21273 * send with the SYN than can fit in 21274 * one segment, don't send any more 21275 * until the SYN|ACK comes back from 21276 * the other end. 21277 */ 21278 sendalot = 0; 21279 } 21280 } 21281 } 21282 /* Window scaling. */ 21283 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 21284 to.to_wscale = tp->request_r_scale; 21285 to.to_flags |= TOF_SCALE; 21286 } 21287 /* Timestamps. */ 21288 if ((tp->t_flags & TF_RCVD_TSTMP) || 21289 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 21290 uint32_t ts_to_use; 21291 21292 if ((rack->r_rcvpath_rtt_up == 1) && 21293 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 21294 /* 21295 * When we are doing a rcv_rtt probe all 21296 * other timestamps use the next msec. This 21297 * is safe since our previous ack is in the 21298 * air and we will just have a few more 21299 * on the next ms. This assures that only 21300 * the one ack has the ms_cts that was on 21301 * our ack-probe. 21302 */ 21303 ts_to_use = ms_cts + 1; 21304 } else { 21305 ts_to_use = ms_cts; 21306 } 21307 to.to_tsval = ts_to_use + tp->ts_offset; 21308 to.to_tsecr = tp->ts_recent; 21309 to.to_flags |= TOF_TS; 21310 if ((len == 0) && 21311 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21312 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 21313 (tp->snd_una == tp->snd_max) && 21314 (flags & TH_ACK) && 21315 (sbavail(sb) == 0) && 21316 (rack->r_ctl.current_round != 0) && 21317 ((flags & (TH_SYN|TH_FIN)) == 0) && 21318 (rack->r_rcvpath_rtt_up == 0)) { 21319 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 21320 rack->r_ctl.last_time_of_arm_rcv = cts; 21321 rack->r_rcvpath_rtt_up = 1; 21322 /* Subtract 1 from seq to force a response */ 21323 rack_seq--; 21324 } 21325 } 21326 /* Set receive buffer autosizing timestamp. */ 21327 if (tp->rfbuf_ts == 0 && 21328 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 21329 tp->rfbuf_ts = ms_cts; 21330 } 21331 /* Selective ACK's. */ 21332 if (tp->t_flags & TF_SACK_PERMIT) { 21333 if (flags & TH_SYN) 21334 to.to_flags |= TOF_SACKPERM; 21335 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 21336 tp->rcv_numsacks > 0) { 21337 to.to_flags |= TOF_SACK; 21338 to.to_nsacks = tp->rcv_numsacks; 21339 to.to_sacks = (u_char *)tp->sackblks; 21340 } 21341 } 21342 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21343 /* TCP-MD5 (RFC2385). */ 21344 if (tp->t_flags & TF_SIGNATURE) 21345 to.to_flags |= TOF_SIGNATURE; 21346 #endif 21347 21348 /* Processing the options. */ 21349 hdrlen += optlen = tcp_addoptions(&to, opt); 21350 /* 21351 * If we wanted a TFO option to be added, but it was unable 21352 * to fit, ensure no data is sent. 21353 */ 21354 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 21355 !(to.to_flags & TOF_FASTOPEN)) 21356 len = 0; 21357 } 21358 if (tp->t_port) { 21359 if (V_tcp_udp_tunneling_port == 0) { 21360 /* The port was removed?? */ 21361 SOCK_SENDBUF_UNLOCK(so); 21362 #ifdef TCP_ACCOUNTING 21363 crtsc = get_cyclecount(); 21364 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21365 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 21366 } 21367 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21368 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 21369 } 21370 sched_unpin(); 21371 #endif 21372 return (EHOSTUNREACH); 21373 } 21374 hdrlen += sizeof(struct udphdr); 21375 } 21376 #ifdef INET6 21377 if (isipv6) 21378 ipoptlen = ip6_optlen(inp); 21379 else 21380 #endif 21381 if (inp->inp_options) 21382 ipoptlen = inp->inp_options->m_len - 21383 offsetof(struct ipoption, ipopt_list); 21384 else 21385 ipoptlen = 0; 21386 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21387 ipoptlen += ipsec_optlen; 21388 #endif 21389 21390 /* 21391 * Adjust data length if insertion of options will bump the packet 21392 * length beyond the t_maxseg length. Clear the FIN bit because we 21393 * cut off the tail of the segment. 21394 */ 21395 if (len + optlen + ipoptlen > tp->t_maxseg) { 21396 if (tso) { 21397 uint32_t if_hw_tsomax; 21398 uint32_t moff; 21399 int32_t max_len; 21400 21401 /* extract TSO information */ 21402 if_hw_tsomax = tp->t_tsomax; 21403 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 21404 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 21405 KASSERT(ipoptlen == 0, 21406 ("%s: TSO can't do IP options", __func__)); 21407 21408 /* 21409 * Check if we should limit by maximum payload 21410 * length: 21411 */ 21412 if (if_hw_tsomax != 0) { 21413 /* compute maximum TSO length */ 21414 max_len = (if_hw_tsomax - hdrlen - 21415 max_linkhdr); 21416 if (max_len <= 0) { 21417 len = 0; 21418 } else if (len > max_len) { 21419 sendalot = 1; 21420 len = max_len; 21421 mark = 2; 21422 } 21423 } 21424 /* 21425 * Prevent the last segment from being fractional 21426 * unless the send sockbuf can be emptied: 21427 */ 21428 max_len = (tp->t_maxseg - optlen); 21429 if ((sb_offset + len) < sbavail(sb)) { 21430 moff = len % (u_int)max_len; 21431 if (moff != 0) { 21432 mark = 3; 21433 len -= moff; 21434 } 21435 } 21436 /* 21437 * In case there are too many small fragments don't 21438 * use TSO: 21439 */ 21440 if (len <= max_len) { 21441 mark = 4; 21442 tso = 0; 21443 } 21444 /* 21445 * Send the FIN in a separate segment after the bulk 21446 * sending is done. We don't trust the TSO 21447 * implementations to clear the FIN flag on all but 21448 * the last segment. 21449 */ 21450 if (tp->t_flags & TF_NEEDFIN) { 21451 sendalot = 4; 21452 } 21453 } else { 21454 mark = 5; 21455 if (optlen + ipoptlen >= tp->t_maxseg) { 21456 /* 21457 * Since we don't have enough space to put 21458 * the IP header chain and the TCP header in 21459 * one packet as required by RFC 7112, don't 21460 * send it. Also ensure that at least one 21461 * byte of the payload can be put into the 21462 * TCP segment. 21463 */ 21464 SOCK_SENDBUF_UNLOCK(so); 21465 error = EMSGSIZE; 21466 sack_rxmit = 0; 21467 goto out; 21468 } 21469 len = tp->t_maxseg - optlen - ipoptlen; 21470 sendalot = 5; 21471 } 21472 } else { 21473 tso = 0; 21474 mark = 6; 21475 } 21476 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 21477 ("%s: len > IP_MAXPACKET", __func__)); 21478 #ifdef DIAGNOSTIC 21479 #ifdef INET6 21480 if (max_linkhdr + hdrlen > MCLBYTES) 21481 #else 21482 if (max_linkhdr + hdrlen > MHLEN) 21483 #endif 21484 panic("tcphdr too big"); 21485 #endif 21486 21487 /* 21488 * This KASSERT is here to catch edge cases at a well defined place. 21489 * Before, those had triggered (random) panic conditions further 21490 * down. 21491 */ 21492 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21493 if ((len == 0) && 21494 (flags & TH_FIN) && 21495 (sbused(sb))) { 21496 /* 21497 * We have outstanding data, don't send a fin by itself!. 21498 * 21499 * Check to see if we need to send a challenge ack. 21500 */ 21501 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 21502 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21503 /* 21504 * Ok less than or right at a MSS is 21505 * outstanding. The original FreeBSD stack would 21506 * have sent a FIN, which can speed things up for 21507 * a transactional application doing a MSG_WAITALL. 21508 * To speed things up since we do *not* send a FIN 21509 * if data is outstanding, we send a "challenge ack". 21510 * The idea behind that is instead of having to have 21511 * the peer wait for the delayed-ack timer to run off 21512 * we send an ack that makes the peer send us an ack. 21513 */ 21514 rack_send_ack_challange(rack); 21515 } 21516 goto just_return; 21517 } 21518 /* 21519 * Grab a header mbuf, attaching a copy of data to be transmitted, 21520 * and initialize the header from the template for sends on this 21521 * connection. 21522 */ 21523 hw_tls = tp->t_nic_ktls_xmit != 0; 21524 if (len) { 21525 uint32_t max_val; 21526 uint32_t moff; 21527 21528 if (pace_max_seg) 21529 max_val = pace_max_seg; 21530 else 21531 max_val = len; 21532 /* 21533 * We allow a limit on sending with hptsi. 21534 */ 21535 if (len > max_val) { 21536 mark = 7; 21537 len = max_val; 21538 } 21539 #ifdef INET6 21540 if (MHLEN < hdrlen + max_linkhdr) 21541 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 21542 else 21543 #endif 21544 m = m_gethdr(M_NOWAIT, MT_DATA); 21545 21546 if (m == NULL) { 21547 SOCK_SENDBUF_UNLOCK(so); 21548 error = ENOBUFS; 21549 sack_rxmit = 0; 21550 goto out; 21551 } 21552 m->m_data += max_linkhdr; 21553 m->m_len = hdrlen; 21554 21555 /* 21556 * Start the m_copy functions from the closest mbuf to the 21557 * sb_offset in the socket buffer chain. 21558 */ 21559 mb = sbsndptr_noadv(sb, sb_offset, &moff); 21560 s_mb = mb; 21561 s_moff = moff; 21562 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 21563 m_copydata(mb, moff, (int)len, 21564 mtod(m, caddr_t)+hdrlen); 21565 /* 21566 * If we are not retransmitting advance the 21567 * sndptr to help remember the next place in 21568 * the sb. 21569 */ 21570 if (rsm == NULL) 21571 sbsndptr_adv(sb, mb, len); 21572 m->m_len += len; 21573 } else { 21574 struct sockbuf *msb; 21575 21576 /* 21577 * If we are not retransmitting pass in msb so 21578 * the socket buffer can be advanced. Otherwise 21579 * set it to NULL if its a retransmission since 21580 * we don't want to change the sb remembered 21581 * location. 21582 */ 21583 if (rsm == NULL) 21584 msb = sb; 21585 else 21586 msb = NULL; 21587 m->m_next = tcp_m_copym( 21588 mb, moff, &len, 21589 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 21590 ((rsm == NULL) ? hw_tls : 0) 21591 #ifdef NETFLIX_COPY_ARGS 21592 , &s_mb, &s_moff 21593 #endif 21594 ); 21595 if (len <= (tp->t_maxseg - optlen)) { 21596 /* 21597 * Must have ran out of mbufs for the copy 21598 * shorten it to no longer need tso. Lets 21599 * not put on sendalot since we are low on 21600 * mbufs. 21601 */ 21602 tso = 0; 21603 } 21604 if (m->m_next == NULL) { 21605 SOCK_SENDBUF_UNLOCK(so); 21606 (void)m_free(m); 21607 error = ENOBUFS; 21608 sack_rxmit = 0; 21609 goto out; 21610 } 21611 } 21612 if (sack_rxmit) { 21613 if (rsm && (rsm->r_flags & RACK_TLP)) { 21614 /* 21615 * TLP should not count in retran count, but 21616 * in its own bin 21617 */ 21618 counter_u64_add(rack_tlp_retran, 1); 21619 counter_u64_add(rack_tlp_retran_bytes, len); 21620 } else { 21621 tp->t_sndrexmitpack++; 21622 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 21623 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 21624 } 21625 #ifdef STATS 21626 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 21627 len); 21628 #endif 21629 } else { 21630 KMOD_TCPSTAT_INC(tcps_sndpack); 21631 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 21632 #ifdef STATS 21633 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 21634 len); 21635 #endif 21636 } 21637 /* 21638 * If we're sending everything we've got, set PUSH. (This 21639 * will keep happy those implementations which only give 21640 * data to the user when a buffer fills or a PUSH comes in.) 21641 */ 21642 if (sb_offset + len == sbused(sb) && 21643 sbused(sb) && 21644 !(flags & TH_SYN)) { 21645 flags |= TH_PUSH; 21646 add_flag |= RACK_HAD_PUSH; 21647 } 21648 21649 SOCK_SENDBUF_UNLOCK(so); 21650 } else { 21651 SOCK_SENDBUF_UNLOCK(so); 21652 if (tp->t_flags & TF_ACKNOW) 21653 KMOD_TCPSTAT_INC(tcps_sndacks); 21654 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 21655 KMOD_TCPSTAT_INC(tcps_sndctrl); 21656 else 21657 KMOD_TCPSTAT_INC(tcps_sndwinup); 21658 21659 m = m_gethdr(M_NOWAIT, MT_DATA); 21660 if (m == NULL) { 21661 error = ENOBUFS; 21662 sack_rxmit = 0; 21663 goto out; 21664 } 21665 #ifdef INET6 21666 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 21667 MHLEN >= hdrlen) { 21668 M_ALIGN(m, hdrlen); 21669 } else 21670 #endif 21671 m->m_data += max_linkhdr; 21672 m->m_len = hdrlen; 21673 } 21674 SOCK_SENDBUF_UNLOCK_ASSERT(so); 21675 m->m_pkthdr.rcvif = (struct ifnet *)0; 21676 #ifdef MAC 21677 mac_inpcb_create_mbuf(inp, m); 21678 #endif 21679 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21680 #ifdef INET6 21681 if (isipv6) 21682 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 21683 else 21684 #endif /* INET6 */ 21685 #ifdef INET 21686 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 21687 #endif 21688 th = rack->r_ctl.fsb.th; 21689 udp = rack->r_ctl.fsb.udp; 21690 if (udp) { 21691 #ifdef INET6 21692 if (isipv6) 21693 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21694 else 21695 #endif /* INET6 */ 21696 ulen = hdrlen + len - sizeof(struct ip); 21697 udp->uh_ulen = htons(ulen); 21698 } 21699 } else { 21700 #ifdef INET6 21701 if (isipv6) { 21702 ip6 = mtod(m, struct ip6_hdr *); 21703 if (tp->t_port) { 21704 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 21705 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21706 udp->uh_dport = tp->t_port; 21707 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21708 udp->uh_ulen = htons(ulen); 21709 th = (struct tcphdr *)(udp + 1); 21710 } else 21711 th = (struct tcphdr *)(ip6 + 1); 21712 tcpip_fillheaders(inp, tp->t_port, ip6, th); 21713 } else 21714 #endif /* INET6 */ 21715 { 21716 #ifdef INET 21717 ip = mtod(m, struct ip *); 21718 if (tp->t_port) { 21719 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 21720 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21721 udp->uh_dport = tp->t_port; 21722 ulen = hdrlen + len - sizeof(struct ip); 21723 udp->uh_ulen = htons(ulen); 21724 th = (struct tcphdr *)(udp + 1); 21725 } else 21726 th = (struct tcphdr *)(ip + 1); 21727 tcpip_fillheaders(inp, tp->t_port, ip, th); 21728 #endif 21729 } 21730 } 21731 /* 21732 * If we are starting a connection, send ECN setup SYN packet. If we 21733 * are on a retransmit, we may resend those bits a number of times 21734 * as per RFC 3168. 21735 */ 21736 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 21737 flags |= tcp_ecn_output_syn_sent(tp); 21738 } 21739 /* Also handle parallel SYN for ECN */ 21740 if (TCPS_HAVERCVDSYN(tp->t_state) && 21741 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 21742 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 21743 if ((tp->t_state == TCPS_SYN_RECEIVED) && 21744 (tp->t_flags2 & TF2_ECN_SND_ECE)) 21745 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 21746 #ifdef INET6 21747 if (isipv6) { 21748 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 21749 ip6->ip6_flow |= htonl(ect << 20); 21750 } 21751 else 21752 #endif 21753 { 21754 #ifdef INET 21755 ip->ip_tos &= ~IPTOS_ECN_MASK; 21756 ip->ip_tos |= ect; 21757 #endif 21758 } 21759 } 21760 th->th_seq = htonl(rack_seq); 21761 th->th_ack = htonl(tp->rcv_nxt); 21762 tcp_set_flags(th, flags); 21763 /* 21764 * Calculate receive window. Don't shrink window, but avoid silly 21765 * window syndrome. 21766 * If a RST segment is sent, advertise a window of zero. 21767 */ 21768 if (flags & TH_RST) { 21769 recwin = 0; 21770 } else { 21771 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 21772 recwin < (long)segsiz) { 21773 recwin = 0; 21774 } 21775 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 21776 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 21777 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 21778 } 21779 21780 /* 21781 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 21782 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 21783 * handled in syncache. 21784 */ 21785 if (flags & TH_SYN) 21786 th->th_win = htons((u_short) 21787 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 21788 else { 21789 /* Avoid shrinking window with window scaling. */ 21790 recwin = roundup2(recwin, 1 << tp->rcv_scale); 21791 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 21792 } 21793 /* 21794 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 21795 * window. This may cause the remote transmitter to stall. This 21796 * flag tells soreceive() to disable delayed acknowledgements when 21797 * draining the buffer. This can occur if the receiver is 21798 * attempting to read more data than can be buffered prior to 21799 * transmitting on the connection. 21800 */ 21801 if (th->th_win == 0) { 21802 tp->t_sndzerowin++; 21803 tp->t_flags |= TF_RXWIN0SENT; 21804 } else 21805 tp->t_flags &= ~TF_RXWIN0SENT; 21806 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 21807 /* Now are we using fsb?, if so copy the template data to the mbuf */ 21808 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21809 uint8_t *cpto; 21810 21811 cpto = mtod(m, uint8_t *); 21812 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 21813 /* 21814 * We have just copied in: 21815 * IP/IP6 21816 * <optional udphdr> 21817 * tcphdr (no options) 21818 * 21819 * We need to grab the correct pointers into the mbuf 21820 * for both the tcp header, and possibly the udp header (if tunneling). 21821 * We do this by using the offset in the copy buffer and adding it 21822 * to the mbuf base pointer (cpto). 21823 */ 21824 #ifdef INET6 21825 if (isipv6) 21826 ip6 = mtod(m, struct ip6_hdr *); 21827 else 21828 #endif /* INET6 */ 21829 #ifdef INET 21830 ip = mtod(m, struct ip *); 21831 #endif 21832 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 21833 /* If we have a udp header lets set it into the mbuf as well */ 21834 if (udp) 21835 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 21836 } 21837 if (optlen) { 21838 bcopy(opt, th + 1, optlen); 21839 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 21840 } 21841 /* 21842 * Put TCP length in extended header, and then checksum extended 21843 * header and data. 21844 */ 21845 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 21846 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21847 if (to.to_flags & TOF_SIGNATURE) { 21848 /* 21849 * Calculate MD5 signature and put it into the place 21850 * determined before. 21851 * NOTE: since TCP options buffer doesn't point into 21852 * mbuf's data, calculate offset and use it. 21853 */ 21854 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 21855 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 21856 /* 21857 * Do not send segment if the calculation of MD5 21858 * digest has failed. 21859 */ 21860 goto out; 21861 } 21862 } 21863 #endif 21864 #ifdef INET6 21865 if (isipv6) { 21866 /* 21867 * ip6_plen is not need to be filled now, and will be filled 21868 * in ip6_output. 21869 */ 21870 if (tp->t_port) { 21871 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 21872 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21873 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 21874 th->th_sum = htons(0); 21875 UDPSTAT_INC(udps_opackets); 21876 } else { 21877 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 21878 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21879 th->th_sum = in6_cksum_pseudo(ip6, 21880 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 21881 0); 21882 } 21883 } 21884 #endif 21885 #if defined(INET6) && defined(INET) 21886 else 21887 #endif 21888 #ifdef INET 21889 { 21890 if (tp->t_port) { 21891 m->m_pkthdr.csum_flags = CSUM_UDP; 21892 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21893 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 21894 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 21895 th->th_sum = htons(0); 21896 UDPSTAT_INC(udps_opackets); 21897 } else { 21898 m->m_pkthdr.csum_flags = CSUM_TCP; 21899 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21900 th->th_sum = in_pseudo(ip->ip_src.s_addr, 21901 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 21902 IPPROTO_TCP + len + optlen)); 21903 } 21904 /* IP version must be set here for ipv4/ipv6 checking later */ 21905 KASSERT(ip->ip_v == IPVERSION, 21906 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 21907 } 21908 #endif 21909 /* 21910 * Enable TSO and specify the size of the segments. The TCP pseudo 21911 * header checksum is always provided. XXX: Fixme: This is currently 21912 * not the case for IPv6. 21913 */ 21914 if (tso) { 21915 /* 21916 * Here we must use t_maxseg and the optlen since 21917 * the optlen may include SACK's (or DSACK). 21918 */ 21919 KASSERT(len > tp->t_maxseg - optlen, 21920 ("%s: len <= tso_segsz", __func__)); 21921 m->m_pkthdr.csum_flags |= CSUM_TSO; 21922 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 21923 } 21924 KASSERT(len + hdrlen == m_length(m, NULL), 21925 ("%s: mbuf chain different than expected: %d + %u != %u", 21926 __func__, len, hdrlen, m_length(m, NULL))); 21927 21928 #ifdef TCP_HHOOK 21929 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 21930 hhook_run_tcp_est_out(tp, th, &to, len, tso); 21931 #endif 21932 if ((rack->r_ctl.crte != NULL) && 21933 (rack->rc_hw_nobuf == 0) && 21934 tcp_bblogging_on(tp)) { 21935 rack_log_queue_level(tp, rack, len, &tv, cts); 21936 } 21937 /* We're getting ready to send; log now. */ 21938 if (tcp_bblogging_on(rack->rc_tp)) { 21939 union tcp_log_stackspecific log; 21940 21941 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 21942 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 21943 if (rack->rack_no_prr) 21944 log.u_bbr.flex1 = 0; 21945 else 21946 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 21947 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 21948 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 21949 log.u_bbr.flex4 = orig_len; 21950 /* Save off the early/late values */ 21951 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 21952 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 21953 log.u_bbr.bw_inuse = rack_get_bw(rack); 21954 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 21955 log.u_bbr.flex8 = 0; 21956 if (rsm) { 21957 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 21958 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 21959 counter_u64_add(rack_collapsed_win_rxt, 1); 21960 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 21961 } 21962 if (doing_tlp) 21963 log.u_bbr.flex8 = 2; 21964 else 21965 log.u_bbr.flex8 = 1; 21966 } else { 21967 if (doing_tlp) 21968 log.u_bbr.flex8 = 3; 21969 } 21970 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 21971 log.u_bbr.flex7 = mark; 21972 log.u_bbr.flex7 <<= 8; 21973 log.u_bbr.flex7 |= pass; 21974 log.u_bbr.pkts_out = tp->t_maxseg; 21975 log.u_bbr.timeStamp = cts; 21976 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 21977 if (rsm && (rsm->r_rtr_cnt > 0)) { 21978 /* 21979 * When we have a retransmit we want to log the 21980 * burst at send and flight at send from before. 21981 */ 21982 log.u_bbr.flex5 = rsm->r_fas; 21983 log.u_bbr.bbr_substate = rsm->r_bas; 21984 } else { 21985 /* 21986 * New transmits we log in flex5 the inflight again as 21987 * well as the number of segments in our send in the 21988 * substate field. 21989 */ 21990 log.u_bbr.flex5 = log.u_bbr.inflight; 21991 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 21992 } 21993 log.u_bbr.lt_epoch = cwnd_to_use; 21994 log.u_bbr.delivered = sendalot; 21995 log.u_bbr.rttProp = (uintptr_t)rsm; 21996 log.u_bbr.pkt_epoch = __LINE__; 21997 if (rsm) { 21998 log.u_bbr.delRate = rsm->r_flags; 21999 log.u_bbr.delRate <<= 31; 22000 log.u_bbr.delRate |= rack->r_must_retran; 22001 log.u_bbr.delRate <<= 1; 22002 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22003 } else { 22004 log.u_bbr.delRate = rack->r_must_retran; 22005 log.u_bbr.delRate <<= 1; 22006 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22007 } 22008 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 22009 len, &log, false, NULL, __func__, __LINE__, &tv); 22010 } else 22011 lgb = NULL; 22012 22013 /* 22014 * Fill in IP length and desired time to live and send to IP level. 22015 * There should be a better way to handle ttl and tos; we could keep 22016 * them in the template, but need a way to checksum without them. 22017 */ 22018 /* 22019 * m->m_pkthdr.len should have been set before cksum calcuration, 22020 * because in6_cksum() need it. 22021 */ 22022 #ifdef INET6 22023 if (isipv6) { 22024 /* 22025 * we separately set hoplimit for every segment, since the 22026 * user might want to change the value via setsockopt. Also, 22027 * desired default hop limit might be changed via Neighbor 22028 * Discovery. 22029 */ 22030 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 22031 22032 /* 22033 * Set the packet size here for the benefit of DTrace 22034 * probes. ip6_output() will set it properly; it's supposed 22035 * to include the option header lengths as well. 22036 */ 22037 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 22038 22039 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 22040 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22041 else 22042 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22043 22044 if (tp->t_state == TCPS_SYN_SENT) 22045 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 22046 22047 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 22048 /* TODO: IPv6 IP6TOS_ECT bit on */ 22049 error = ip6_output(m, 22050 inp->in6p_outputopts, 22051 &inp->inp_route6, 22052 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 22053 NULL, NULL, inp); 22054 22055 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 22056 mtu = inp->inp_route6.ro_nh->nh_mtu; 22057 } 22058 #endif /* INET6 */ 22059 #if defined(INET) && defined(INET6) 22060 else 22061 #endif 22062 #ifdef INET 22063 { 22064 ip->ip_len = htons(m->m_pkthdr.len); 22065 #ifdef INET6 22066 if (inp->inp_vflag & INP_IPV6PROTO) 22067 ip->ip_ttl = in6_selecthlim(inp, NULL); 22068 #endif /* INET6 */ 22069 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 22070 /* 22071 * If we do path MTU discovery, then we set DF on every 22072 * packet. This might not be the best thing to do according 22073 * to RFC3390 Section 2. However the tcp hostcache migitates 22074 * the problem so it affects only the first tcp connection 22075 * with a host. 22076 * 22077 * NB: Don't set DF on small MTU/MSS to have a safe 22078 * fallback. 22079 */ 22080 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 22081 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22082 if (tp->t_port == 0 || len < V_tcp_minmss) { 22083 ip->ip_off |= htons(IP_DF); 22084 } 22085 } else { 22086 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22087 } 22088 22089 if (tp->t_state == TCPS_SYN_SENT) 22090 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 22091 22092 TCP_PROBE5(send, NULL, tp, ip, tp, th); 22093 22094 error = ip_output(m, 22095 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22096 inp->inp_options, 22097 #else 22098 NULL, 22099 #endif 22100 &inp->inp_route, 22101 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 22102 inp); 22103 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 22104 mtu = inp->inp_route.ro_nh->nh_mtu; 22105 } 22106 #endif /* INET */ 22107 if (lgb) { 22108 lgb->tlb_errno = error; 22109 lgb = NULL; 22110 } 22111 22112 out: 22113 /* 22114 * In transmit state, time the transmission and arrange for the 22115 * retransmit. In persist state, just set snd_max. 22116 */ 22117 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 22118 rack_to_usec_ts(&tv), 22119 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 22120 if (error == 0) { 22121 if (add_flag & RACK_IS_PCM) { 22122 /* We just launched a PCM */ 22123 /* rrs here log */ 22124 rack->pcm_in_progress = 1; 22125 rack->pcm_needed = 0; 22126 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 22127 } 22128 if (rsm == NULL) { 22129 if (rack->lt_bw_up == 0) { 22130 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 22131 rack->r_ctl.lt_seq = tp->snd_una; 22132 rack->lt_bw_up = 1; 22133 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 22134 /* 22135 * Need to record what we have since we are 22136 * approaching seq wrap. 22137 */ 22138 uint64_t tmark; 22139 22140 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 22141 rack->r_ctl.lt_seq = tp->snd_una; 22142 tmark = tcp_get_u64_usecs(&tv); 22143 if (tmark > rack->r_ctl.lt_timemark) { 22144 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 22145 rack->r_ctl.lt_timemark = tmark; 22146 } 22147 } 22148 } 22149 rack->forced_ack = 0; /* If we send something zap the FA flag */ 22150 counter_u64_add(rack_total_bytes, len); 22151 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 22152 if (rsm && doing_tlp) { 22153 rack->rc_last_sent_tlp_past_cumack = 0; 22154 rack->rc_last_sent_tlp_seq_valid = 1; 22155 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 22156 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 22157 } 22158 if (rack->rc_hw_nobuf) { 22159 rack->rc_hw_nobuf = 0; 22160 rack->r_ctl.rc_agg_delayed = 0; 22161 rack->r_early = 0; 22162 rack->r_late = 0; 22163 rack->r_ctl.rc_agg_early = 0; 22164 } 22165 if (rsm && (doing_tlp == 0)) { 22166 /* Set we retransmitted */ 22167 rack->rc_gp_saw_rec = 1; 22168 } else { 22169 if (cwnd_to_use > tp->snd_ssthresh) { 22170 /* Set we sent in CA */ 22171 rack->rc_gp_saw_ca = 1; 22172 } else { 22173 /* Set we sent in SS */ 22174 rack->rc_gp_saw_ss = 1; 22175 } 22176 } 22177 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22178 (tp->t_flags & TF_SACK_PERMIT) && 22179 tp->rcv_numsacks > 0) 22180 tcp_clean_dsack_blocks(tp); 22181 tot_len_this_send += len; 22182 if (len == 0) { 22183 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 22184 } else { 22185 int idx; 22186 22187 idx = (len / segsiz) + 3; 22188 if (idx >= TCP_MSS_ACCT_ATIMER) 22189 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 22190 else 22191 counter_u64_add(rack_out_size[idx], 1); 22192 } 22193 } 22194 if ((rack->rack_no_prr == 0) && 22195 sub_from_prr && 22196 (error == 0)) { 22197 if (rack->r_ctl.rc_prr_sndcnt >= len) 22198 rack->r_ctl.rc_prr_sndcnt -= len; 22199 else 22200 rack->r_ctl.rc_prr_sndcnt = 0; 22201 } 22202 sub_from_prr = 0; 22203 if (doing_tlp) { 22204 /* Make sure the TLP is added */ 22205 add_flag |= RACK_TLP; 22206 } else if (rsm) { 22207 /* If its a resend without TLP then it must not have the flag */ 22208 rsm->r_flags &= ~RACK_TLP; 22209 } 22210 22211 22212 if ((error == 0) && 22213 (len > 0) && 22214 (tp->snd_una == tp->snd_max)) 22215 rack->r_ctl.rc_tlp_rxt_last_time = cts; 22216 22217 { 22218 /* 22219 * This block is not associated with the above error == 0 test. 22220 * It is used to advance snd_max if we have a new transmit. 22221 */ 22222 tcp_seq startseq = tp->snd_max; 22223 22224 22225 if (rsm && (doing_tlp == 0)) 22226 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 22227 if (error) 22228 /* We don't log or do anything with errors */ 22229 goto nomore; 22230 if (doing_tlp == 0) { 22231 if (rsm == NULL) { 22232 /* 22233 * Not a retransmission of some 22234 * sort, new data is going out so 22235 * clear our TLP count and flag. 22236 */ 22237 rack->rc_tlp_in_progress = 0; 22238 rack->r_ctl.rc_tlp_cnt_out = 0; 22239 } 22240 } else { 22241 /* 22242 * We have just sent a TLP, mark that it is true 22243 * and make sure our in progress is set so we 22244 * continue to check the count. 22245 */ 22246 rack->rc_tlp_in_progress = 1; 22247 rack->r_ctl.rc_tlp_cnt_out++; 22248 } 22249 /* 22250 * If we are retransmitting we are done, snd_max 22251 * does not get updated. 22252 */ 22253 if (sack_rxmit) 22254 goto nomore; 22255 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 22256 /* 22257 * Update the time we just added data since 22258 * nothing was outstanding. 22259 */ 22260 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 22261 tp->t_acktime = ticks; 22262 } 22263 /* 22264 * Now for special SYN/FIN handling. 22265 */ 22266 if (flags & (TH_SYN | TH_FIN)) { 22267 if ((flags & TH_SYN) && 22268 ((tp->t_flags & TF_SENTSYN) == 0)) { 22269 tp->snd_max++; 22270 tp->t_flags |= TF_SENTSYN; 22271 } 22272 if ((flags & TH_FIN) && 22273 ((tp->t_flags & TF_SENTFIN) == 0)) { 22274 tp->snd_max++; 22275 tp->t_flags |= TF_SENTFIN; 22276 } 22277 } 22278 tp->snd_max += len; 22279 if (rack->rc_new_rnd_needed) { 22280 rack_new_round_starts(tp, rack, tp->snd_max); 22281 } 22282 /* 22283 * Time this transmission if not a retransmission and 22284 * not currently timing anything. 22285 * This is only relevant in case of switching back to 22286 * the base stack. 22287 */ 22288 if (tp->t_rtttime == 0) { 22289 tp->t_rtttime = ticks; 22290 tp->t_rtseq = startseq; 22291 KMOD_TCPSTAT_INC(tcps_segstimed); 22292 } 22293 if (len && 22294 ((tp->t_flags & TF_GPUTINPROG) == 0)) 22295 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 22296 /* 22297 * If we are doing FO we need to update the mbuf position and subtract 22298 * this happens when the peer sends us duplicate information and 22299 * we thus want to send a DSACK. 22300 * 22301 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 22302 * turned off? If not then we are going to echo multiple DSACK blocks 22303 * out (with the TSO), which we should not be doing. 22304 */ 22305 if (rack->r_fast_output && len) { 22306 if (rack->r_ctl.fsb.left_to_send > len) 22307 rack->r_ctl.fsb.left_to_send -= len; 22308 else 22309 rack->r_ctl.fsb.left_to_send = 0; 22310 if (rack->r_ctl.fsb.left_to_send < segsiz) 22311 rack->r_fast_output = 0; 22312 if (rack->r_fast_output) { 22313 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 22314 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 22315 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 22316 } 22317 } 22318 if (rack_pcm_blast == 0) { 22319 if ((orig_len > len) && 22320 (add_flag & RACK_IS_PCM) && 22321 (len < pace_max_seg) && 22322 ((pace_max_seg - len) > segsiz)) { 22323 /* 22324 * We are doing a PCM measurement and we did 22325 * not get enough data in the TSO to meet the 22326 * burst requirement. 22327 */ 22328 uint32_t n_len; 22329 22330 n_len = (orig_len - len); 22331 orig_len -= len; 22332 pace_max_seg -= len; 22333 len = n_len; 22334 sb_offset = tp->snd_max - tp->snd_una; 22335 /* Re-lock for the next spin */ 22336 SOCK_SENDBUF_LOCK(so); 22337 goto send; 22338 } 22339 } else { 22340 if ((orig_len > len) && 22341 (add_flag & RACK_IS_PCM) && 22342 ((orig_len - len) > segsiz)) { 22343 /* 22344 * We are doing a PCM measurement and we did 22345 * not get enough data in the TSO to meet the 22346 * burst requirement. 22347 */ 22348 uint32_t n_len; 22349 22350 n_len = (orig_len - len); 22351 orig_len -= len; 22352 len = n_len; 22353 sb_offset = tp->snd_max - tp->snd_una; 22354 /* Re-lock for the next spin */ 22355 SOCK_SENDBUF_LOCK(so); 22356 goto send; 22357 } 22358 } 22359 } 22360 nomore: 22361 if (error) { 22362 rack->r_ctl.rc_agg_delayed = 0; 22363 rack->r_early = 0; 22364 rack->r_late = 0; 22365 rack->r_ctl.rc_agg_early = 0; 22366 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 22367 /* 22368 * Failures do not advance the seq counter above. For the 22369 * case of ENOBUFS we will fall out and retry in 1ms with 22370 * the hpts. Everything else will just have to retransmit 22371 * with the timer. 22372 * 22373 * In any case, we do not want to loop around for another 22374 * send without a good reason. 22375 */ 22376 sendalot = 0; 22377 switch (error) { 22378 case EPERM: 22379 case EACCES: 22380 tp->t_softerror = error; 22381 #ifdef TCP_ACCOUNTING 22382 crtsc = get_cyclecount(); 22383 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22384 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22385 } 22386 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22387 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22388 } 22389 sched_unpin(); 22390 #endif 22391 return (error); 22392 case ENOBUFS: 22393 /* 22394 * Pace us right away to retry in a some 22395 * time 22396 */ 22397 if (rack->r_ctl.crte != NULL) { 22398 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 22399 if (tcp_bblogging_on(rack->rc_tp)) 22400 rack_log_queue_level(tp, rack, len, &tv, cts); 22401 } else 22402 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 22403 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 22404 if (rack->rc_enobuf < 0x7f) 22405 rack->rc_enobuf++; 22406 if (slot < (10 * HPTS_USEC_IN_MSEC)) 22407 slot = 10 * HPTS_USEC_IN_MSEC; 22408 if (rack->r_ctl.crte != NULL) { 22409 counter_u64_add(rack_saw_enobuf_hw, 1); 22410 tcp_rl_log_enobuf(rack->r_ctl.crte); 22411 } 22412 counter_u64_add(rack_saw_enobuf, 1); 22413 goto enobufs; 22414 case EMSGSIZE: 22415 /* 22416 * For some reason the interface we used initially 22417 * to send segments changed to another or lowered 22418 * its MTU. If TSO was active we either got an 22419 * interface without TSO capabilits or TSO was 22420 * turned off. If we obtained mtu from ip_output() 22421 * then update it and try again. 22422 */ 22423 if (tso) 22424 tp->t_flags &= ~TF_TSO; 22425 if (mtu != 0) { 22426 int saved_mtu; 22427 22428 saved_mtu = tp->t_maxseg; 22429 tcp_mss_update(tp, -1, mtu, NULL, NULL); 22430 if (saved_mtu > tp->t_maxseg) { 22431 goto again; 22432 } 22433 } 22434 slot = 10 * HPTS_USEC_IN_MSEC; 22435 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22436 #ifdef TCP_ACCOUNTING 22437 crtsc = get_cyclecount(); 22438 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22439 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22440 } 22441 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22442 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22443 } 22444 sched_unpin(); 22445 #endif 22446 return (error); 22447 case ENETUNREACH: 22448 counter_u64_add(rack_saw_enetunreach, 1); 22449 /* FALLTHROUGH */ 22450 case EHOSTDOWN: 22451 case EHOSTUNREACH: 22452 case ENETDOWN: 22453 if (TCPS_HAVERCVDSYN(tp->t_state)) { 22454 tp->t_softerror = error; 22455 error = 0; 22456 } 22457 /* FALLTHROUGH */ 22458 default: 22459 slot = 10 * HPTS_USEC_IN_MSEC; 22460 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22461 #ifdef TCP_ACCOUNTING 22462 crtsc = get_cyclecount(); 22463 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22464 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22465 } 22466 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22467 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22468 } 22469 sched_unpin(); 22470 #endif 22471 return (error); 22472 } 22473 } else { 22474 rack->rc_enobuf = 0; 22475 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 22476 rack->r_ctl.retran_during_recovery += len; 22477 } 22478 KMOD_TCPSTAT_INC(tcps_sndtotal); 22479 22480 /* 22481 * Data sent (as far as we can tell). If this advertises a larger 22482 * window than any other segment, then remember the size of the 22483 * advertised window. Any pending ACK has now been sent. 22484 */ 22485 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 22486 tp->rcv_adv = tp->rcv_nxt + recwin; 22487 22488 tp->last_ack_sent = tp->rcv_nxt; 22489 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 22490 enobufs: 22491 if (sendalot) { 22492 /* Do we need to turn off sendalot? */ 22493 if (pace_max_seg && 22494 (tot_len_this_send >= pace_max_seg)) { 22495 /* We hit our max. */ 22496 sendalot = 0; 22497 } 22498 } 22499 if ((error == 0) && (flags & TH_FIN)) 22500 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 22501 if (flags & TH_RST) { 22502 /* 22503 * We don't send again after sending a RST. 22504 */ 22505 slot = 0; 22506 sendalot = 0; 22507 if (error == 0) 22508 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 22509 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 22510 /* 22511 * Get our pacing rate, if an error 22512 * occurred in sending (ENOBUF) we would 22513 * hit the else if with slot preset. Other 22514 * errors return. 22515 */ 22516 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 22517 } 22518 /* We have sent clear the flag */ 22519 rack->r_ent_rec_ns = 0; 22520 if (rack->r_must_retran) { 22521 if (rsm) { 22522 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 22523 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 22524 /* 22525 * We have retransmitted all. 22526 */ 22527 rack->r_must_retran = 0; 22528 rack->r_ctl.rc_out_at_rto = 0; 22529 } 22530 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22531 /* 22532 * Sending new data will also kill 22533 * the loop. 22534 */ 22535 rack->r_must_retran = 0; 22536 rack->r_ctl.rc_out_at_rto = 0; 22537 } 22538 } 22539 rack->r_ctl.fsb.recwin = recwin; 22540 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 22541 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22542 /* 22543 * We hit an RTO and now have past snd_max at the RTO 22544 * clear all the WAS flags. 22545 */ 22546 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 22547 } 22548 if (slot) { 22549 /* set the rack tcb into the slot N */ 22550 if ((error == 0) && 22551 rack_use_rfo && 22552 ((flags & (TH_SYN|TH_FIN)) == 0) && 22553 (rsm == NULL) && 22554 (ipoptlen == 0) && 22555 rack->r_fsb_inited && 22556 TCPS_HAVEESTABLISHED(tp->t_state) && 22557 ((IN_RECOVERY(tp->t_flags)) == 0) && 22558 (rack->r_must_retran == 0) && 22559 ((tp->t_flags & TF_NEEDFIN) == 0) && 22560 (len > 0) && (orig_len > 0) && 22561 (orig_len > len) && 22562 ((orig_len - len) >= segsiz) && 22563 ((optlen == 0) || 22564 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22565 /* We can send at least one more MSS using our fsb */ 22566 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22567 segsiz, pace_max_seg, hw_tls, flags); 22568 } else 22569 rack->r_fast_output = 0; 22570 rack_log_fsb(rack, tp, so, flags, 22571 ipoptlen, orig_len, len, error, 22572 (rsm == NULL), optlen, __LINE__, 2); 22573 } else if (sendalot) { 22574 int ret; 22575 22576 sack_rxmit = 0; 22577 if ((error == 0) && 22578 rack_use_rfo && 22579 ((flags & (TH_SYN|TH_FIN)) == 0) && 22580 (rsm == NULL) && 22581 (ipoptlen == 0) && 22582 (rack->r_must_retran == 0) && 22583 rack->r_fsb_inited && 22584 TCPS_HAVEESTABLISHED(tp->t_state) && 22585 ((IN_RECOVERY(tp->t_flags)) == 0) && 22586 ((tp->t_flags & TF_NEEDFIN) == 0) && 22587 (len > 0) && (orig_len > 0) && 22588 (orig_len > len) && 22589 ((orig_len - len) >= segsiz) && 22590 ((optlen == 0) || 22591 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22592 /* we can use fast_output for more */ 22593 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22594 segsiz, pace_max_seg, hw_tls, flags); 22595 if (rack->r_fast_output) { 22596 error = 0; 22597 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 22598 if (ret >= 0) 22599 return (ret); 22600 else if (error) 22601 goto nomore; 22602 22603 } 22604 } 22605 goto again; 22606 } 22607 skip_all_send: 22608 /* Assure when we leave that snd_nxt will point to top */ 22609 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22610 tp->snd_nxt = tp->snd_max; 22611 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 22612 #ifdef TCP_ACCOUNTING 22613 crtsc = get_cyclecount() - ts_val; 22614 if (tot_len_this_send) { 22615 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22616 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22617 } 22618 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22619 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 22620 } 22621 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22622 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 22623 } 22624 } else { 22625 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22626 tp->tcp_cnt_counters[SND_OUT_ACK]++; 22627 } 22628 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22629 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 22630 } 22631 } 22632 sched_unpin(); 22633 #endif 22634 if (error == ENOBUFS) 22635 error = 0; 22636 return (error); 22637 } 22638 22639 static void 22640 rack_update_seg(struct tcp_rack *rack) 22641 { 22642 uint32_t orig_val; 22643 22644 orig_val = rack->r_ctl.rc_pace_max_segs; 22645 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 22646 if (orig_val != rack->r_ctl.rc_pace_max_segs) 22647 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 22648 } 22649 22650 static void 22651 rack_mtu_change(struct tcpcb *tp) 22652 { 22653 /* 22654 * The MSS may have changed 22655 */ 22656 struct tcp_rack *rack; 22657 struct rack_sendmap *rsm; 22658 22659 rack = (struct tcp_rack *)tp->t_fb_ptr; 22660 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 22661 /* 22662 * The MTU has changed we need to resend everything 22663 * since all we have sent is lost. We first fix 22664 * up the mtu though. 22665 */ 22666 rack_set_pace_segments(tp, rack, __LINE__, NULL); 22667 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 22668 rack_remxt_tmr(tp); 22669 rack->r_fast_output = 0; 22670 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 22671 rack->r_ctl.rc_sacked); 22672 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 22673 rack->r_must_retran = 1; 22674 /* Mark all inflight to needing to be rxt'd */ 22675 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 22676 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 22677 } 22678 } 22679 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 22680 /* We don't use snd_nxt to retransmit */ 22681 tp->snd_nxt = tp->snd_max; 22682 } 22683 22684 static int 22685 rack_set_dgp(struct tcp_rack *rack) 22686 { 22687 if (rack->dgp_on == 1) 22688 return(0); 22689 if ((rack->use_fixed_rate == 1) && 22690 (rack->rc_always_pace == 1)) { 22691 /* 22692 * We are already pacing another 22693 * way. 22694 */ 22695 return (EBUSY); 22696 } 22697 if (rack->rc_always_pace == 1) { 22698 rack_remove_pacing(rack); 22699 } 22700 if (tcp_incr_dgp_pacing_cnt() == 0) 22701 return (ENOSPC); 22702 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 22703 rack->rc_fillcw_apply_discount = 0; 22704 rack->dgp_on = 1; 22705 rack->rc_always_pace = 1; 22706 rack->rc_pace_dnd = 1; 22707 rack->use_fixed_rate = 0; 22708 if (rack->gp_ready) 22709 rack_set_cc_pacing(rack); 22710 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22711 rack->rack_attempt_hdwr_pace = 0; 22712 /* rxt settings */ 22713 rack->full_size_rxt = 1; 22714 rack->shape_rxt_to_pacing_min = 0; 22715 /* cmpack=1 */ 22716 rack->r_use_cmp_ack = 1; 22717 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 22718 rack->r_use_cmp_ack) 22719 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22720 /* scwnd=1 */ 22721 rack->rack_enable_scwnd = 1; 22722 /* dynamic=100 */ 22723 rack->rc_gp_dyn_mul = 1; 22724 /* gp_inc_ca */ 22725 rack->r_ctl.rack_per_of_gp_ca = 100; 22726 /* rrr_conf=3 */ 22727 rack->r_rr_config = 3; 22728 /* npush=2 */ 22729 rack->r_ctl.rc_no_push_at_mrtt = 2; 22730 /* fillcw=1 */ 22731 rack->rc_pace_to_cwnd = 1; 22732 rack->rc_pace_fill_if_rttin_range = 0; 22733 rack->rtt_limit_mul = 0; 22734 /* noprr=1 */ 22735 rack->rack_no_prr = 1; 22736 /* lscwnd=1 */ 22737 rack->r_limit_scw = 1; 22738 /* gp_inc_rec */ 22739 rack->r_ctl.rack_per_of_gp_rec = 90; 22740 return (0); 22741 } 22742 22743 static int 22744 rack_set_profile(struct tcp_rack *rack, int prof) 22745 { 22746 int err = EINVAL; 22747 if (prof == 1) { 22748 /* 22749 * Profile 1 is "standard" DGP. It ignores 22750 * client buffer level. 22751 */ 22752 err = rack_set_dgp(rack); 22753 if (err) 22754 return (err); 22755 } else if (prof == 6) { 22756 err = rack_set_dgp(rack); 22757 if (err) 22758 return (err); 22759 /* 22760 * Profile 6 tweaks DGP so that it will apply to 22761 * fill-cw the same settings that profile5 does 22762 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 22763 */ 22764 rack->rc_fillcw_apply_discount = 1; 22765 } else if (prof == 0) { 22766 /* This changes things back to the default settings */ 22767 if (rack->rc_always_pace == 1) { 22768 rack_remove_pacing(rack); 22769 } else { 22770 /* Make sure any stray flags are off */ 22771 rack->dgp_on = 0; 22772 rack->rc_hybrid_mode = 0; 22773 rack->use_fixed_rate = 0; 22774 } 22775 err = 0; 22776 if (rack_fill_cw_state) 22777 rack->rc_pace_to_cwnd = 1; 22778 else 22779 rack->rc_pace_to_cwnd = 0; 22780 22781 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 22782 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22783 rack->rc_always_pace = 1; 22784 if (rack->rack_hibeta) 22785 rack_set_cc_pacing(rack); 22786 } else 22787 rack->rc_always_pace = 0; 22788 if (rack_dsack_std_based & 0x1) { 22789 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 22790 rack->rc_rack_tmr_std_based = 1; 22791 } 22792 if (rack_dsack_std_based & 0x2) { 22793 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 22794 rack->rc_rack_use_dsack = 1; 22795 } 22796 if (rack_use_cmp_acks) 22797 rack->r_use_cmp_ack = 1; 22798 else 22799 rack->r_use_cmp_ack = 0; 22800 if (rack_disable_prr) 22801 rack->rack_no_prr = 1; 22802 else 22803 rack->rack_no_prr = 0; 22804 if (rack_gp_no_rec_chg) 22805 rack->rc_gp_no_rec_chg = 1; 22806 else 22807 rack->rc_gp_no_rec_chg = 0; 22808 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 22809 rack->r_mbuf_queue = 1; 22810 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 22811 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22812 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22813 } else { 22814 rack->r_mbuf_queue = 0; 22815 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 22816 } 22817 if (rack_enable_shared_cwnd) 22818 rack->rack_enable_scwnd = 1; 22819 else 22820 rack->rack_enable_scwnd = 0; 22821 if (rack_do_dyn_mul) { 22822 /* When dynamic adjustment is on CA needs to start at 100% */ 22823 rack->rc_gp_dyn_mul = 1; 22824 if (rack_do_dyn_mul >= 100) 22825 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 22826 } else { 22827 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 22828 rack->rc_gp_dyn_mul = 0; 22829 } 22830 rack->r_rr_config = 0; 22831 rack->r_ctl.rc_no_push_at_mrtt = 0; 22832 rack->rc_pace_fill_if_rttin_range = 0; 22833 rack->rtt_limit_mul = 0; 22834 22835 if (rack_enable_hw_pacing) 22836 rack->rack_hdw_pace_ena = 1; 22837 else 22838 rack->rack_hdw_pace_ena = 0; 22839 if (rack_disable_prr) 22840 rack->rack_no_prr = 1; 22841 else 22842 rack->rack_no_prr = 0; 22843 if (rack_limits_scwnd) 22844 rack->r_limit_scw = 1; 22845 else 22846 rack->r_limit_scw = 0; 22847 rack_init_retransmit_value(rack, rack_rxt_controls); 22848 err = 0; 22849 } 22850 return (err); 22851 } 22852 22853 static int 22854 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 22855 { 22856 struct deferred_opt_list *dol; 22857 22858 dol = malloc(sizeof(struct deferred_opt_list), 22859 M_TCPDO, M_NOWAIT|M_ZERO); 22860 if (dol == NULL) { 22861 /* 22862 * No space yikes -- fail out.. 22863 */ 22864 return (0); 22865 } 22866 dol->optname = sopt_name; 22867 dol->optval = loptval; 22868 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 22869 return (1); 22870 } 22871 22872 static int 22873 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 22874 { 22875 #ifdef TCP_REQUEST_TRK 22876 struct tcp_sendfile_track *sft; 22877 struct timeval tv; 22878 tcp_seq seq; 22879 int err; 22880 22881 microuptime(&tv); 22882 22883 /* Make sure no fixed rate is on */ 22884 rack->use_fixed_rate = 0; 22885 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 22886 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 22887 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 22888 /* Now allocate or find our entry that will have these settings */ 22889 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 22890 if (sft == NULL) { 22891 rack->rc_tp->tcp_hybrid_error++; 22892 /* no space, where would it have gone? */ 22893 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 22894 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 22895 return (ENOSPC); 22896 } 22897 /* mask our internal flags */ 22898 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 22899 /* The seq will be snd_una + everything in the buffer */ 22900 seq = sft->start_seq; 22901 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 22902 /* Disabling hybrid pacing */ 22903 if (rack->rc_hybrid_mode) { 22904 rack_set_profile(rack, 0); 22905 rack->rc_tp->tcp_hybrid_stop++; 22906 } 22907 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 22908 return (0); 22909 } 22910 if (rack->dgp_on == 0) { 22911 /* 22912 * If we have not yet turned DGP on, do so 22913 * now setting pure DGP mode, no buffer level 22914 * response. 22915 */ 22916 if ((err = rack_set_profile(rack, 1)) != 0){ 22917 /* Failed to turn pacing on */ 22918 rack->rc_tp->tcp_hybrid_error++; 22919 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 22920 return (err); 22921 } 22922 } 22923 /* 22924 * Now we must switch to hybrid mode as well which also 22925 * means moving to regular pacing. 22926 */ 22927 if (rack->rc_hybrid_mode == 0) { 22928 /* First time */ 22929 if (tcp_can_enable_pacing()) { 22930 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22931 rack->rc_hybrid_mode = 1; 22932 } else { 22933 return (ENOSPC); 22934 } 22935 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 22936 /* 22937 * This should be true. 22938 */ 22939 tcp_dec_dgp_pacing_cnt(); 22940 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 22941 } 22942 } 22943 /* Now set in our flags */ 22944 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 22945 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 22946 sft->cspr = hybrid->cspr; 22947 else 22948 sft->cspr = 0; 22949 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 22950 sft->hint_maxseg = hybrid->hint_maxseg; 22951 else 22952 sft->hint_maxseg = 0; 22953 rack->rc_tp->tcp_hybrid_start++; 22954 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 22955 return (0); 22956 #else 22957 return (ENOTSUP); 22958 #endif 22959 } 22960 22961 static int 22962 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 22963 { 22964 /* We pulled a SSI info log out what was there */ 22965 si->bytes_transmitted = tp->t_sndbytes; 22966 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 22967 return (0); 22968 } 22969 22970 static int 22971 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 22972 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 22973 22974 { 22975 struct epoch_tracker et; 22976 struct sockopt sopt; 22977 struct cc_newreno_opts opt; 22978 uint64_t val; 22979 int error = 0; 22980 uint16_t ca, ss; 22981 22982 switch (sopt_name) { 22983 case TCP_RACK_SET_RXT_OPTIONS: 22984 if (optval <= 2) { 22985 rack_init_retransmit_value(rack, optval); 22986 } else { 22987 /* 22988 * You must send in 0, 1 or 2 all else is 22989 * invalid. 22990 */ 22991 error = EINVAL; 22992 } 22993 break; 22994 case TCP_RACK_DSACK_OPT: 22995 RACK_OPTS_INC(tcp_rack_dsack_opt); 22996 if (optval & 0x1) { 22997 rack->rc_rack_tmr_std_based = 1; 22998 } else { 22999 rack->rc_rack_tmr_std_based = 0; 23000 } 23001 if (optval & 0x2) { 23002 rack->rc_rack_use_dsack = 1; 23003 } else { 23004 rack->rc_rack_use_dsack = 0; 23005 } 23006 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 23007 break; 23008 case TCP_RACK_PACING_DIVISOR: 23009 RACK_OPTS_INC(tcp_rack_pacing_divisor); 23010 if (optval == 0) { 23011 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 23012 } else { 23013 if (optval < RL_MIN_DIVISOR) 23014 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 23015 else 23016 rack->r_ctl.pace_len_divisor = optval; 23017 } 23018 break; 23019 case TCP_RACK_HI_BETA: 23020 RACK_OPTS_INC(tcp_rack_hi_beta); 23021 if (optval > 0) { 23022 rack->rack_hibeta = 1; 23023 if ((optval >= 50) && 23024 (optval <= 100)) { 23025 /* 23026 * User wants to set a custom beta. 23027 */ 23028 rack->r_ctl.saved_hibeta = optval; 23029 if (rack->rc_pacing_cc_set) 23030 rack_undo_cc_pacing(rack); 23031 rack->r_ctl.rc_saved_beta.beta = optval; 23032 } 23033 if (rack->rc_pacing_cc_set == 0) 23034 rack_set_cc_pacing(rack); 23035 } else { 23036 rack->rack_hibeta = 0; 23037 if (rack->rc_pacing_cc_set) 23038 rack_undo_cc_pacing(rack); 23039 } 23040 break; 23041 case TCP_RACK_PACING_BETA: 23042 error = EINVAL; 23043 break; 23044 case TCP_RACK_TIMER_SLOP: 23045 RACK_OPTS_INC(tcp_rack_timer_slop); 23046 rack->r_ctl.timer_slop = optval; 23047 if (rack->rc_tp->t_srtt) { 23048 /* 23049 * If we have an SRTT lets update t_rxtcur 23050 * to have the new slop. 23051 */ 23052 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 23053 rack_rto_min, rack_rto_max, 23054 rack->r_ctl.timer_slop); 23055 } 23056 break; 23057 case TCP_RACK_PACING_BETA_ECN: 23058 RACK_OPTS_INC(tcp_rack_beta_ecn); 23059 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23060 /* This only works for newreno. */ 23061 error = EINVAL; 23062 break; 23063 } 23064 if (rack->rc_pacing_cc_set) { 23065 /* 23066 * Set them into the real CC module 23067 * whats in the rack pcb is the old values 23068 * to be used on restoral/ 23069 */ 23070 sopt.sopt_dir = SOPT_SET; 23071 opt.name = CC_NEWRENO_BETA_ECN; 23072 opt.val = optval; 23073 if (CC_ALGO(tp)->ctl_output != NULL) 23074 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23075 else 23076 error = ENOENT; 23077 } else { 23078 /* 23079 * Not pacing yet so set it into our local 23080 * rack pcb storage. 23081 */ 23082 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 23083 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 23084 } 23085 break; 23086 case TCP_DEFER_OPTIONS: 23087 RACK_OPTS_INC(tcp_defer_opt); 23088 if (optval) { 23089 if (rack->gp_ready) { 23090 /* Too late */ 23091 error = EINVAL; 23092 break; 23093 } 23094 rack->defer_options = 1; 23095 } else 23096 rack->defer_options = 0; 23097 break; 23098 case TCP_RACK_MEASURE_CNT: 23099 RACK_OPTS_INC(tcp_rack_measure_cnt); 23100 if (optval && (optval <= 0xff)) { 23101 rack->r_ctl.req_measurements = optval; 23102 } else 23103 error = EINVAL; 23104 break; 23105 case TCP_REC_ABC_VAL: 23106 RACK_OPTS_INC(tcp_rec_abc_val); 23107 if (optval > 0) 23108 rack->r_use_labc_for_rec = 1; 23109 else 23110 rack->r_use_labc_for_rec = 0; 23111 break; 23112 case TCP_RACK_ABC_VAL: 23113 RACK_OPTS_INC(tcp_rack_abc_val); 23114 if ((optval > 0) && (optval < 255)) 23115 rack->rc_labc = optval; 23116 else 23117 error = EINVAL; 23118 break; 23119 case TCP_HDWR_UP_ONLY: 23120 RACK_OPTS_INC(tcp_pacing_up_only); 23121 if (optval) 23122 rack->r_up_only = 1; 23123 else 23124 rack->r_up_only = 0; 23125 break; 23126 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 23127 RACK_OPTS_INC(tcp_fillcw_rate_cap); 23128 rack->r_ctl.fillcw_cap = loptval; 23129 break; 23130 case TCP_PACING_RATE_CAP: 23131 RACK_OPTS_INC(tcp_pacing_rate_cap); 23132 if ((rack->dgp_on == 1) && 23133 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23134 /* 23135 * If we are doing DGP we need to switch 23136 * to using the pacing limit. 23137 */ 23138 if (tcp_can_enable_pacing() == 0) { 23139 error = ENOSPC; 23140 break; 23141 } 23142 /* 23143 * Now change up the flags and counts to be correct. 23144 */ 23145 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23146 tcp_dec_dgp_pacing_cnt(); 23147 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23148 } 23149 rack->r_ctl.bw_rate_cap = loptval; 23150 break; 23151 case TCP_HYBRID_PACING: 23152 if (hybrid == NULL) { 23153 error = EINVAL; 23154 break; 23155 } 23156 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 23157 error = EPERM; 23158 break; 23159 } 23160 error = process_hybrid_pacing(rack, hybrid); 23161 break; 23162 case TCP_SIDECHAN_DIS: /* URL:scodm */ 23163 if (optval) 23164 rack->r_ctl.side_chan_dis_mask = optval; 23165 else 23166 rack->r_ctl.side_chan_dis_mask = 0; 23167 break; 23168 case TCP_RACK_PROFILE: 23169 RACK_OPTS_INC(tcp_profile); 23170 error = rack_set_profile(rack, optval); 23171 break; 23172 case TCP_USE_CMP_ACKS: 23173 RACK_OPTS_INC(tcp_use_cmp_acks); 23174 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 23175 /* You can't turn it off once its on! */ 23176 error = EINVAL; 23177 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 23178 rack->r_use_cmp_ack = 1; 23179 rack->r_mbuf_queue = 1; 23180 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23181 } 23182 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 23183 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23184 break; 23185 case TCP_SHARED_CWND_TIME_LIMIT: 23186 RACK_OPTS_INC(tcp_lscwnd); 23187 if (optval) 23188 rack->r_limit_scw = 1; 23189 else 23190 rack->r_limit_scw = 0; 23191 break; 23192 case TCP_RACK_DGP_IN_REC: 23193 error = EINVAL; 23194 break; 23195 case TCP_RACK_PACE_TO_FILL: 23196 RACK_OPTS_INC(tcp_fillcw); 23197 if (optval == 0) 23198 rack->rc_pace_to_cwnd = 0; 23199 else { 23200 rack->rc_pace_to_cwnd = 1; 23201 } 23202 if ((optval >= rack_gp_rtt_maxmul) && 23203 rack_gp_rtt_maxmul && 23204 (optval < 0xf)) { 23205 rack->rc_pace_fill_if_rttin_range = 1; 23206 rack->rtt_limit_mul = optval; 23207 } else { 23208 rack->rc_pace_fill_if_rttin_range = 0; 23209 rack->rtt_limit_mul = 0; 23210 } 23211 break; 23212 case TCP_RACK_NO_PUSH_AT_MAX: 23213 RACK_OPTS_INC(tcp_npush); 23214 if (optval == 0) 23215 rack->r_ctl.rc_no_push_at_mrtt = 0; 23216 else if (optval < 0xff) 23217 rack->r_ctl.rc_no_push_at_mrtt = optval; 23218 else 23219 error = EINVAL; 23220 break; 23221 case TCP_SHARED_CWND_ENABLE: 23222 RACK_OPTS_INC(tcp_rack_scwnd); 23223 if (optval == 0) 23224 rack->rack_enable_scwnd = 0; 23225 else 23226 rack->rack_enable_scwnd = 1; 23227 break; 23228 case TCP_RACK_MBUF_QUEUE: 23229 /* Now do we use the LRO mbuf-queue feature */ 23230 RACK_OPTS_INC(tcp_rack_mbufq); 23231 if (optval || rack->r_use_cmp_ack) 23232 rack->r_mbuf_queue = 1; 23233 else 23234 rack->r_mbuf_queue = 0; 23235 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23236 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23237 else 23238 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23239 break; 23240 case TCP_RACK_NONRXT_CFG_RATE: 23241 RACK_OPTS_INC(tcp_rack_cfg_rate); 23242 if (optval == 0) 23243 rack->rack_rec_nonrxt_use_cr = 0; 23244 else 23245 rack->rack_rec_nonrxt_use_cr = 1; 23246 break; 23247 case TCP_NO_PRR: 23248 RACK_OPTS_INC(tcp_rack_noprr); 23249 if (optval == 0) 23250 rack->rack_no_prr = 0; 23251 else if (optval == 1) 23252 rack->rack_no_prr = 1; 23253 else if (optval == 2) 23254 rack->no_prr_addback = 1; 23255 else 23256 error = EINVAL; 23257 break; 23258 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 23259 if (optval > 0) 23260 rack->cspr_is_fcc = 1; 23261 else 23262 rack->cspr_is_fcc = 0; 23263 break; 23264 case TCP_TIMELY_DYN_ADJ: 23265 RACK_OPTS_INC(tcp_timely_dyn); 23266 if (optval == 0) 23267 rack->rc_gp_dyn_mul = 0; 23268 else { 23269 rack->rc_gp_dyn_mul = 1; 23270 if (optval >= 100) { 23271 /* 23272 * If the user sets something 100 or more 23273 * its the gp_ca value. 23274 */ 23275 rack->r_ctl.rack_per_of_gp_ca = optval; 23276 } 23277 } 23278 break; 23279 case TCP_RACK_DO_DETECTION: 23280 error = EINVAL; 23281 break; 23282 case TCP_RACK_TLP_USE: 23283 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 23284 error = EINVAL; 23285 break; 23286 } 23287 RACK_OPTS_INC(tcp_tlp_use); 23288 rack->rack_tlp_threshold_use = optval; 23289 break; 23290 case TCP_RACK_TLP_REDUCE: 23291 /* RACK TLP cwnd reduction (bool) */ 23292 RACK_OPTS_INC(tcp_rack_tlp_reduce); 23293 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 23294 break; 23295 /* Pacing related ones */ 23296 case TCP_RACK_PACE_ALWAYS: 23297 /* 23298 * zero is old rack method, 1 is new 23299 * method using a pacing rate. 23300 */ 23301 RACK_OPTS_INC(tcp_rack_pace_always); 23302 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23303 error = EPERM; 23304 break; 23305 } 23306 if (optval > 0) { 23307 if (rack->rc_always_pace) { 23308 error = EALREADY; 23309 break; 23310 } else if (tcp_can_enable_pacing()) { 23311 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23312 rack->rc_always_pace = 1; 23313 if (rack->rack_hibeta) 23314 rack_set_cc_pacing(rack); 23315 } 23316 else { 23317 error = ENOSPC; 23318 break; 23319 } 23320 } else { 23321 if (rack->rc_always_pace == 1) { 23322 rack_remove_pacing(rack); 23323 } 23324 } 23325 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23326 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23327 else 23328 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23329 /* A rate may be set irate or other, if so set seg size */ 23330 rack_update_seg(rack); 23331 break; 23332 case TCP_BBR_RACK_INIT_RATE: 23333 RACK_OPTS_INC(tcp_initial_rate); 23334 val = optval; 23335 /* Change from kbits per second to bytes per second */ 23336 val *= 1000; 23337 val /= 8; 23338 rack->r_ctl.init_rate = val; 23339 if (rack->rc_always_pace) 23340 rack_update_seg(rack); 23341 break; 23342 case TCP_BBR_IWINTSO: 23343 error = EINVAL; 23344 break; 23345 case TCP_RACK_FORCE_MSEG: 23346 RACK_OPTS_INC(tcp_rack_force_max_seg); 23347 if (optval) 23348 rack->rc_force_max_seg = 1; 23349 else 23350 rack->rc_force_max_seg = 0; 23351 break; 23352 case TCP_RACK_PACE_MIN_SEG: 23353 RACK_OPTS_INC(tcp_rack_min_seg); 23354 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 23355 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23356 break; 23357 case TCP_RACK_PACE_MAX_SEG: 23358 /* Max segments size in a pace in bytes */ 23359 RACK_OPTS_INC(tcp_rack_max_seg); 23360 if ((rack->dgp_on == 1) && 23361 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23362 /* 23363 * If we set a max-seg and are doing DGP then 23364 * we now fall under the pacing limits not the 23365 * DGP ones. 23366 */ 23367 if (tcp_can_enable_pacing() == 0) { 23368 error = ENOSPC; 23369 break; 23370 } 23371 /* 23372 * Now change up the flags and counts to be correct. 23373 */ 23374 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23375 tcp_dec_dgp_pacing_cnt(); 23376 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23377 } 23378 if (optval <= MAX_USER_SET_SEG) 23379 rack->rc_user_set_max_segs = optval; 23380 else 23381 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 23382 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23383 break; 23384 case TCP_RACK_PACE_RATE_REC: 23385 /* Set the fixed pacing rate in Bytes per second ca */ 23386 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 23387 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23388 error = EPERM; 23389 break; 23390 } 23391 if (rack->dgp_on) { 23392 /* 23393 * We are already pacing another 23394 * way. 23395 */ 23396 error = EBUSY; 23397 break; 23398 } 23399 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23400 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23401 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23402 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23403 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23404 rack->use_fixed_rate = 1; 23405 if (rack->rack_hibeta) 23406 rack_set_cc_pacing(rack); 23407 rack_log_pacing_delay_calc(rack, 23408 rack->r_ctl.rc_fixed_pacing_rate_ss, 23409 rack->r_ctl.rc_fixed_pacing_rate_ca, 23410 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23411 __LINE__, NULL,0); 23412 break; 23413 23414 case TCP_RACK_PACE_RATE_SS: 23415 /* Set the fixed pacing rate in Bytes per second ca */ 23416 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 23417 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23418 error = EPERM; 23419 break; 23420 } 23421 if (rack->dgp_on) { 23422 /* 23423 * We are already pacing another 23424 * way. 23425 */ 23426 error = EBUSY; 23427 break; 23428 } 23429 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23430 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23431 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23432 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23433 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23434 rack->use_fixed_rate = 1; 23435 if (rack->rack_hibeta) 23436 rack_set_cc_pacing(rack); 23437 rack_log_pacing_delay_calc(rack, 23438 rack->r_ctl.rc_fixed_pacing_rate_ss, 23439 rack->r_ctl.rc_fixed_pacing_rate_ca, 23440 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23441 __LINE__, NULL, 0); 23442 break; 23443 23444 case TCP_RACK_PACE_RATE_CA: 23445 /* Set the fixed pacing rate in Bytes per second ca */ 23446 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 23447 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23448 error = EPERM; 23449 break; 23450 } 23451 if (rack->dgp_on) { 23452 /* 23453 * We are already pacing another 23454 * way. 23455 */ 23456 error = EBUSY; 23457 break; 23458 } 23459 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23460 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23461 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23462 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23463 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23464 rack->use_fixed_rate = 1; 23465 if (rack->rack_hibeta) 23466 rack_set_cc_pacing(rack); 23467 rack_log_pacing_delay_calc(rack, 23468 rack->r_ctl.rc_fixed_pacing_rate_ss, 23469 rack->r_ctl.rc_fixed_pacing_rate_ca, 23470 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23471 __LINE__, NULL, 0); 23472 break; 23473 case TCP_RACK_GP_INCREASE_REC: 23474 RACK_OPTS_INC(tcp_gp_inc_rec); 23475 rack->r_ctl.rack_per_of_gp_rec = optval; 23476 rack_log_pacing_delay_calc(rack, 23477 rack->r_ctl.rack_per_of_gp_ss, 23478 rack->r_ctl.rack_per_of_gp_ca, 23479 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23480 __LINE__, NULL, 0); 23481 break; 23482 case TCP_RACK_GP_INCREASE_CA: 23483 RACK_OPTS_INC(tcp_gp_inc_ca); 23484 ca = optval; 23485 if (ca < 100) { 23486 /* 23487 * We don't allow any reduction 23488 * over the GP b/w. 23489 */ 23490 error = EINVAL; 23491 break; 23492 } 23493 rack->r_ctl.rack_per_of_gp_ca = ca; 23494 rack_log_pacing_delay_calc(rack, 23495 rack->r_ctl.rack_per_of_gp_ss, 23496 rack->r_ctl.rack_per_of_gp_ca, 23497 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23498 __LINE__, NULL, 0); 23499 break; 23500 case TCP_RACK_GP_INCREASE_SS: 23501 RACK_OPTS_INC(tcp_gp_inc_ss); 23502 ss = optval; 23503 if (ss < 100) { 23504 /* 23505 * We don't allow any reduction 23506 * over the GP b/w. 23507 */ 23508 error = EINVAL; 23509 break; 23510 } 23511 rack->r_ctl.rack_per_of_gp_ss = ss; 23512 rack_log_pacing_delay_calc(rack, 23513 rack->r_ctl.rack_per_of_gp_ss, 23514 rack->r_ctl.rack_per_of_gp_ca, 23515 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23516 __LINE__, NULL, 0); 23517 break; 23518 case TCP_RACK_RR_CONF: 23519 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 23520 if (optval && optval <= 3) 23521 rack->r_rr_config = optval; 23522 else 23523 rack->r_rr_config = 0; 23524 break; 23525 case TCP_PACING_DND: /* URL:dnd */ 23526 if (optval > 0) 23527 rack->rc_pace_dnd = 1; 23528 else 23529 rack->rc_pace_dnd = 0; 23530 break; 23531 case TCP_HDWR_RATE_CAP: 23532 RACK_OPTS_INC(tcp_hdwr_rate_cap); 23533 if (optval) { 23534 if (rack->r_rack_hw_rate_caps == 0) 23535 rack->r_rack_hw_rate_caps = 1; 23536 else 23537 error = EALREADY; 23538 } else { 23539 rack->r_rack_hw_rate_caps = 0; 23540 } 23541 break; 23542 case TCP_DGP_UPPER_BOUNDS: 23543 { 23544 uint8_t val; 23545 val = optval & 0x0000ff; 23546 rack->r_ctl.rack_per_upper_bound_ca = val; 23547 val = (optval >> 16) & 0x0000ff; 23548 rack->r_ctl.rack_per_upper_bound_ss = val; 23549 break; 23550 } 23551 case TCP_SS_EEXIT: /* URL:eexit */ 23552 if (optval > 0) { 23553 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 23554 if (optval & 0x10000) { 23555 rack->r_ctl.gate_to_fs = 1; 23556 } else { 23557 rack->r_ctl.gate_to_fs = 0; 23558 } 23559 if (optval & 0x20000) { 23560 rack->r_ctl.use_gp_not_last = 1; 23561 } else { 23562 rack->r_ctl.use_gp_not_last = 0; 23563 } 23564 if (optval & 0xfffc0000) { 23565 uint32_t v; 23566 23567 v = (optval >> 18) & 0x00003fff; 23568 if (v >= 1000) 23569 rack->r_ctl.gp_gain_req = v; 23570 } 23571 } else { 23572 /* We do not do ss early exit at all */ 23573 rack->rc_initial_ss_comp = 1; 23574 rack->r_ctl.gp_rnd_thresh = 0; 23575 } 23576 break; 23577 case TCP_RACK_SPLIT_LIMIT: 23578 RACK_OPTS_INC(tcp_split_limit); 23579 rack->r_ctl.rc_split_limit = optval; 23580 break; 23581 case TCP_BBR_HDWR_PACE: 23582 RACK_OPTS_INC(tcp_hdwr_pacing); 23583 if (optval){ 23584 if (rack->rack_hdrw_pacing == 0) { 23585 rack->rack_hdw_pace_ena = 1; 23586 rack->rack_attempt_hdwr_pace = 0; 23587 } else 23588 error = EALREADY; 23589 } else { 23590 rack->rack_hdw_pace_ena = 0; 23591 #ifdef RATELIMIT 23592 if (rack->r_ctl.crte != NULL) { 23593 rack->rack_hdrw_pacing = 0; 23594 rack->rack_attempt_hdwr_pace = 0; 23595 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 23596 rack->r_ctl.crte = NULL; 23597 } 23598 #endif 23599 } 23600 break; 23601 /* End Pacing related ones */ 23602 case TCP_RACK_PRR_SENDALOT: 23603 /* Allow PRR to send more than one seg */ 23604 RACK_OPTS_INC(tcp_rack_prr_sendalot); 23605 rack->r_ctl.rc_prr_sendalot = optval; 23606 break; 23607 case TCP_RACK_MIN_TO: 23608 /* Minimum time between rack t-o's in ms */ 23609 RACK_OPTS_INC(tcp_rack_min_to); 23610 rack->r_ctl.rc_min_to = optval; 23611 break; 23612 case TCP_RACK_EARLY_SEG: 23613 /* If early recovery max segments */ 23614 RACK_OPTS_INC(tcp_rack_early_seg); 23615 rack->r_ctl.rc_early_recovery_segs = optval; 23616 break; 23617 case TCP_RACK_ENABLE_HYSTART: 23618 { 23619 if (optval) { 23620 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23621 if (rack_do_hystart > RACK_HYSTART_ON) 23622 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23623 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23624 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23625 } else { 23626 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23627 } 23628 } 23629 break; 23630 case TCP_RACK_REORD_THRESH: 23631 /* RACK reorder threshold (shift amount) */ 23632 RACK_OPTS_INC(tcp_rack_reord_thresh); 23633 if ((optval > 0) && (optval < 31)) 23634 rack->r_ctl.rc_reorder_shift = optval; 23635 else 23636 error = EINVAL; 23637 break; 23638 case TCP_RACK_REORD_FADE: 23639 /* Does reordering fade after ms time */ 23640 RACK_OPTS_INC(tcp_rack_reord_fade); 23641 rack->r_ctl.rc_reorder_fade = optval; 23642 break; 23643 case TCP_RACK_TLP_THRESH: 23644 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 23645 RACK_OPTS_INC(tcp_rack_tlp_thresh); 23646 if (optval) 23647 rack->r_ctl.rc_tlp_threshold = optval; 23648 else 23649 error = EINVAL; 23650 break; 23651 case TCP_BBR_USE_RACK_RR: 23652 RACK_OPTS_INC(tcp_rack_rr); 23653 if (optval) 23654 rack->use_rack_rr = 1; 23655 else 23656 rack->use_rack_rr = 0; 23657 break; 23658 case TCP_RACK_PKT_DELAY: 23659 /* RACK added ms i.e. rack-rtt + reord + N */ 23660 RACK_OPTS_INC(tcp_rack_pkt_delay); 23661 rack->r_ctl.rc_pkt_delay = optval; 23662 break; 23663 case TCP_DELACK: 23664 RACK_OPTS_INC(tcp_rack_delayed_ack); 23665 if (optval == 0) 23666 tp->t_delayed_ack = 0; 23667 else 23668 tp->t_delayed_ack = 1; 23669 if (tp->t_flags & TF_DELACK) { 23670 tp->t_flags &= ~TF_DELACK; 23671 tp->t_flags |= TF_ACKNOW; 23672 NET_EPOCH_ENTER(et); 23673 rack_output(tp); 23674 NET_EPOCH_EXIT(et); 23675 } 23676 break; 23677 23678 case TCP_BBR_RACK_RTT_USE: 23679 RACK_OPTS_INC(tcp_rack_rtt_use); 23680 if ((optval != USE_RTT_HIGH) && 23681 (optval != USE_RTT_LOW) && 23682 (optval != USE_RTT_AVG)) 23683 error = EINVAL; 23684 else 23685 rack->r_ctl.rc_rate_sample_method = optval; 23686 break; 23687 case TCP_HONOR_HPTS_MIN: 23688 RACK_OPTS_INC(tcp_honor_hpts); 23689 if (optval) { 23690 rack->r_use_hpts_min = 1; 23691 /* 23692 * Must be between 2 - 80% to be a reduction else 23693 * we keep the default (10%). 23694 */ 23695 if ((optval > 1) && (optval <= 80)) { 23696 rack->r_ctl.max_reduction = optval; 23697 } 23698 } else 23699 rack->r_use_hpts_min = 0; 23700 break; 23701 case TCP_REC_IS_DYN: /* URL:dynrec */ 23702 RACK_OPTS_INC(tcp_dyn_rec); 23703 if (optval) 23704 rack->rc_gp_no_rec_chg = 1; 23705 else 23706 rack->rc_gp_no_rec_chg = 0; 23707 break; 23708 case TCP_NO_TIMELY: 23709 RACK_OPTS_INC(tcp_notimely); 23710 if (optval) { 23711 rack->rc_skip_timely = 1; 23712 rack->r_ctl.rack_per_of_gp_rec = 90; 23713 rack->r_ctl.rack_per_of_gp_ca = 100; 23714 rack->r_ctl.rack_per_of_gp_ss = 250; 23715 } else { 23716 rack->rc_skip_timely = 0; 23717 } 23718 break; 23719 case TCP_GP_USE_LTBW: 23720 if (optval == 0) { 23721 rack->use_lesser_lt_bw = 0; 23722 rack->dis_lt_bw = 1; 23723 } else if (optval == 1) { 23724 rack->use_lesser_lt_bw = 1; 23725 rack->dis_lt_bw = 0; 23726 } else if (optval == 2) { 23727 rack->use_lesser_lt_bw = 0; 23728 rack->dis_lt_bw = 0; 23729 } 23730 break; 23731 case TCP_DATA_AFTER_CLOSE: 23732 RACK_OPTS_INC(tcp_data_after_close); 23733 if (optval) 23734 rack->rc_allow_data_af_clo = 1; 23735 else 23736 rack->rc_allow_data_af_clo = 0; 23737 break; 23738 default: 23739 break; 23740 } 23741 tcp_log_socket_option(tp, sopt_name, optval, error); 23742 return (error); 23743 } 23744 23745 static void 23746 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 23747 { 23748 /* 23749 * A new connection has been created (tp) and 23750 * the parent is the inpcb given. We want to 23751 * apply a read-lock to the parent (we are already 23752 * holding a write lock on the tp) and copy anything 23753 * out of the rack specific data as long as its tfb is 23754 * the same as ours i.e. we are the same stack. Otherwise 23755 * we just return. 23756 */ 23757 struct tcpcb *par; 23758 struct tcp_rack *dest, *src; 23759 int cnt = 0; 23760 23761 par = intotcpcb(parent); 23762 if (par->t_fb != tp->t_fb) { 23763 /* Not the same stack */ 23764 tcp_log_socket_option(tp, 0, 0, 1); 23765 return; 23766 } 23767 /* Ok if we reach here lets setup the two rack pointers */ 23768 dest = (struct tcp_rack *)tp->t_fb_ptr; 23769 src = (struct tcp_rack *)par->t_fb_ptr; 23770 if ((src == NULL) || (dest == NULL)) { 23771 /* Huh? */ 23772 tcp_log_socket_option(tp, 0, 0, 2); 23773 return; 23774 } 23775 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 23776 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 23777 if ((src->dgp_on) && (dest->dgp_on == 0)) { 23778 /* Profile 1 had to be set via sock opt */ 23779 rack_set_dgp(dest); 23780 cnt++; 23781 } 23782 /* TCP_RACK_SET_RXT_OPTIONS */ 23783 if (dest->full_size_rxt != src->full_size_rxt) { 23784 dest->full_size_rxt = src->full_size_rxt; 23785 cnt++; 23786 } 23787 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 23788 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 23789 cnt++; 23790 } 23791 /* TCP_RACK_DSACK_OPT */ 23792 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 23793 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 23794 cnt++; 23795 } 23796 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 23797 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 23798 cnt++; 23799 } 23800 /* TCP_RACK_PACING_DIVISOR */ 23801 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 23802 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 23803 cnt++; 23804 } 23805 /* TCP_RACK_HI_BETA */ 23806 if (src->rack_hibeta != dest->rack_hibeta) { 23807 cnt++; 23808 if (src->rack_hibeta) { 23809 dest->r_ctl.rc_saved_beta.beta = src->r_ctl.rc_saved_beta.beta; 23810 dest->rack_hibeta = 1; 23811 } else { 23812 dest->rack_hibeta = 0; 23813 } 23814 } 23815 /* TCP_RACK_TIMER_SLOP */ 23816 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 23817 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 23818 cnt++; 23819 } 23820 /* TCP_RACK_PACING_BETA_ECN */ 23821 if (dest->r_ctl.rc_saved_beta.beta_ecn != src->r_ctl.rc_saved_beta.beta_ecn) { 23822 dest->r_ctl.rc_saved_beta.beta_ecn = src->r_ctl.rc_saved_beta.beta_ecn; 23823 cnt++; 23824 } 23825 if (dest->r_ctl.rc_saved_beta.newreno_flags != src->r_ctl.rc_saved_beta.newreno_flags) { 23826 dest->r_ctl.rc_saved_beta.newreno_flags = src->r_ctl.rc_saved_beta.newreno_flags; 23827 cnt++; 23828 } 23829 /* We do not do TCP_DEFER_OPTIONS */ 23830 /* TCP_RACK_MEASURE_CNT */ 23831 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 23832 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 23833 cnt++; 23834 } 23835 /* TCP_HDWR_UP_ONLY */ 23836 if (dest->r_up_only != src->r_up_only) { 23837 dest->r_up_only = src->r_up_only; 23838 cnt++; 23839 } 23840 /* TCP_FILLCW_RATE_CAP */ 23841 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 23842 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 23843 cnt++; 23844 } 23845 /* TCP_PACING_RATE_CAP */ 23846 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 23847 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 23848 cnt++; 23849 } 23850 /* A listener can't set TCP_HYBRID_PACING */ 23851 /* TCP_SIDECHAN_DIS */ 23852 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 23853 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 23854 cnt++; 23855 } 23856 /* TCP_SHARED_CWND_TIME_LIMIT */ 23857 if (dest->r_limit_scw != src->r_limit_scw) { 23858 dest->r_limit_scw = src->r_limit_scw; 23859 cnt++; 23860 } 23861 /* TCP_RACK_PACE_TO_FILL */ 23862 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 23863 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 23864 cnt++; 23865 } 23866 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 23867 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 23868 cnt++; 23869 } 23870 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 23871 dest->rtt_limit_mul = src->rtt_limit_mul; 23872 cnt++; 23873 } 23874 /* TCP_RACK_NO_PUSH_AT_MAX */ 23875 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 23876 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 23877 cnt++; 23878 } 23879 /* TCP_SHARED_CWND_ENABLE */ 23880 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 23881 dest->rack_enable_scwnd = src->rack_enable_scwnd; 23882 cnt++; 23883 } 23884 /* TCP_USE_CMP_ACKS */ 23885 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 23886 dest->r_use_cmp_ack = src->r_use_cmp_ack; 23887 cnt++; 23888 } 23889 23890 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23891 dest->r_mbuf_queue = src->r_mbuf_queue; 23892 cnt++; 23893 } 23894 /* TCP_RACK_MBUF_QUEUE */ 23895 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23896 dest->r_mbuf_queue = src->r_mbuf_queue; 23897 cnt++; 23898 } 23899 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 23900 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23901 } else { 23902 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23903 } 23904 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 23905 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23906 } 23907 /* TCP_RACK_NONRXT_CFG_RATE */ 23908 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 23909 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 23910 cnt++; 23911 } 23912 /* TCP_NO_PRR */ 23913 if (dest->rack_no_prr != src->rack_no_prr) { 23914 dest->rack_no_prr = src->rack_no_prr; 23915 cnt++; 23916 } 23917 if (dest->no_prr_addback != src->no_prr_addback) { 23918 dest->no_prr_addback = src->no_prr_addback; 23919 cnt++; 23920 } 23921 /* RACK_CSPR_IS_FCC */ 23922 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 23923 dest->cspr_is_fcc = src->cspr_is_fcc; 23924 cnt++; 23925 } 23926 /* TCP_TIMELY_DYN_ADJ */ 23927 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 23928 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 23929 cnt++; 23930 } 23931 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23932 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23933 cnt++; 23934 } 23935 /* TCP_RACK_TLP_USE */ 23936 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 23937 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 23938 cnt++; 23939 } 23940 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 23941 /* TCP_BBR_RACK_INIT_RATE */ 23942 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 23943 dest->r_ctl.init_rate = src->r_ctl.init_rate; 23944 cnt++; 23945 } 23946 /* TCP_RACK_FORCE_MSEG */ 23947 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 23948 dest->rc_force_max_seg = src->rc_force_max_seg; 23949 cnt++; 23950 } 23951 /* TCP_RACK_PACE_MIN_SEG */ 23952 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 23953 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 23954 cnt++; 23955 } 23956 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 23957 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 23958 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 23959 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 23960 cnt++; 23961 } 23962 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 23963 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 23964 cnt++; 23965 } 23966 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 23967 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 23968 cnt++; 23969 } 23970 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 23971 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 23972 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 23973 cnt++; 23974 } 23975 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23976 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23977 cnt++; 23978 } 23979 23980 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 23981 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 23982 cnt++; 23983 } 23984 /* TCP_RACK_RR_CONF */ 23985 if (dest->r_rr_config != src->r_rr_config) { 23986 dest->r_rr_config = src->r_rr_config; 23987 cnt++; 23988 } 23989 /* TCP_PACING_DND */ 23990 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 23991 dest->rc_pace_dnd = src->rc_pace_dnd; 23992 cnt++; 23993 } 23994 /* TCP_HDWR_RATE_CAP */ 23995 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 23996 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 23997 cnt++; 23998 } 23999 /* TCP_DGP_UPPER_BOUNDS */ 24000 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 24001 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 24002 cnt++; 24003 } 24004 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 24005 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 24006 cnt++; 24007 } 24008 /* TCP_SS_EEXIT */ 24009 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 24010 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 24011 cnt++; 24012 } 24013 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 24014 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 24015 cnt++; 24016 } 24017 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 24018 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 24019 cnt++; 24020 } 24021 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 24022 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 24023 cnt++; 24024 } 24025 /* TCP_BBR_HDWR_PACE */ 24026 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 24027 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 24028 cnt++; 24029 } 24030 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 24031 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 24032 cnt++; 24033 } 24034 /* TCP_RACK_PRR_SENDALOT */ 24035 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 24036 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 24037 cnt++; 24038 } 24039 /* TCP_RACK_MIN_TO */ 24040 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 24041 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 24042 cnt++; 24043 } 24044 /* TCP_RACK_EARLY_SEG */ 24045 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 24046 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 24047 cnt++; 24048 } 24049 /* TCP_RACK_ENABLE_HYSTART */ 24050 if (par->t_ccv.flags != tp->t_ccv.flags) { 24051 cnt++; 24052 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24053 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 24054 if (rack_do_hystart > RACK_HYSTART_ON) 24055 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 24056 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 24057 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 24058 } else { 24059 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 24060 } 24061 } 24062 /* TCP_RACK_REORD_THRESH */ 24063 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 24064 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 24065 cnt++; 24066 } 24067 /* TCP_RACK_REORD_FADE */ 24068 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 24069 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 24070 cnt++; 24071 } 24072 /* TCP_RACK_TLP_THRESH */ 24073 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 24074 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 24075 cnt++; 24076 } 24077 /* TCP_BBR_USE_RACK_RR */ 24078 if (dest->use_rack_rr != src->use_rack_rr) { 24079 dest->use_rack_rr = src->use_rack_rr; 24080 cnt++; 24081 } 24082 /* TCP_RACK_PKT_DELAY */ 24083 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 24084 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 24085 cnt++; 24086 } 24087 /* TCP_DELACK will get copied via the main code if applicable */ 24088 /* TCP_BBR_RACK_RTT_USE */ 24089 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 24090 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 24091 cnt++; 24092 } 24093 /* TCP_HONOR_HPTS_MIN */ 24094 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 24095 dest->r_use_hpts_min = src->r_use_hpts_min; 24096 cnt++; 24097 } 24098 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 24099 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 24100 cnt++; 24101 } 24102 /* TCP_REC_IS_DYN */ 24103 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 24104 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 24105 cnt++; 24106 } 24107 if (dest->rc_skip_timely != src->rc_skip_timely) { 24108 dest->rc_skip_timely = src->rc_skip_timely; 24109 cnt++; 24110 } 24111 /* TCP_DATA_AFTER_CLOSE */ 24112 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 24113 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 24114 cnt++; 24115 } 24116 /* TCP_GP_USE_LTBW */ 24117 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 24118 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 24119 cnt++; 24120 } 24121 if (dest->dis_lt_bw != src->dis_lt_bw) { 24122 dest->dis_lt_bw = src->dis_lt_bw; 24123 cnt++; 24124 } 24125 tcp_log_socket_option(tp, 0, cnt, 0); 24126 } 24127 24128 24129 static void 24130 rack_apply_deferred_options(struct tcp_rack *rack) 24131 { 24132 struct deferred_opt_list *dol, *sdol; 24133 uint32_t s_optval; 24134 24135 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 24136 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 24137 /* Disadvantage of deferal is you loose the error return */ 24138 s_optval = (uint32_t)dol->optval; 24139 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 24140 free(dol, M_TCPDO); 24141 } 24142 } 24143 24144 static void 24145 rack_hw_tls_change(struct tcpcb *tp, int chg) 24146 { 24147 /* Update HW tls state */ 24148 struct tcp_rack *rack; 24149 24150 rack = (struct tcp_rack *)tp->t_fb_ptr; 24151 if (chg) 24152 rack->r_ctl.fsb.hw_tls = 1; 24153 else 24154 rack->r_ctl.fsb.hw_tls = 0; 24155 } 24156 24157 static int 24158 rack_pru_options(struct tcpcb *tp, int flags) 24159 { 24160 if (flags & PRUS_OOB) 24161 return (EOPNOTSUPP); 24162 return (0); 24163 } 24164 24165 static bool 24166 rack_wake_check(struct tcpcb *tp) 24167 { 24168 struct tcp_rack *rack; 24169 struct timeval tv; 24170 uint32_t cts; 24171 24172 rack = (struct tcp_rack *)tp->t_fb_ptr; 24173 if (rack->r_ctl.rc_hpts_flags) { 24174 cts = tcp_get_usecs(&tv); 24175 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 24176 /* 24177 * Pacing timer is up, check if we are ready. 24178 */ 24179 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 24180 return (true); 24181 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 24182 /* 24183 * A timer is up, check if we are ready. 24184 */ 24185 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 24186 return (true); 24187 } 24188 } 24189 return (false); 24190 } 24191 24192 static struct tcp_function_block __tcp_rack = { 24193 .tfb_tcp_block_name = __XSTRING(STACKNAME), 24194 .tfb_tcp_output = rack_output, 24195 .tfb_do_queued_segments = ctf_do_queued_segments, 24196 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 24197 .tfb_tcp_do_segment = rack_do_segment, 24198 .tfb_tcp_ctloutput = rack_ctloutput, 24199 .tfb_tcp_fb_init = rack_init, 24200 .tfb_tcp_fb_fini = rack_fini, 24201 .tfb_tcp_timer_stop_all = rack_stopall, 24202 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 24203 .tfb_tcp_handoff_ok = rack_handoff_ok, 24204 .tfb_tcp_mtu_chg = rack_mtu_change, 24205 .tfb_pru_options = rack_pru_options, 24206 .tfb_hwtls_change = rack_hw_tls_change, 24207 .tfb_chg_query = rack_chg_query, 24208 .tfb_switch_failed = rack_switch_failed, 24209 .tfb_early_wake_check = rack_wake_check, 24210 .tfb_compute_pipe = rack_compute_pipe, 24211 .tfb_stack_info = rack_stack_information, 24212 .tfb_inherit = rack_inherit, 24213 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP | TCP_FUNC_DEFAULT_OK, 24214 24215 }; 24216 24217 /* 24218 * rack_ctloutput() must drop the inpcb lock before performing copyin on 24219 * socket option arguments. When it re-acquires the lock after the copy, it 24220 * has to revalidate that the connection is still valid for the socket 24221 * option. 24222 */ 24223 static int 24224 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24225 { 24226 struct inpcb *inp = tptoinpcb(tp); 24227 #ifdef INET 24228 struct ip *ip; 24229 #endif 24230 struct tcp_rack *rack; 24231 struct tcp_hybrid_req hybrid; 24232 uint64_t loptval; 24233 int32_t error = 0, optval; 24234 24235 rack = (struct tcp_rack *)tp->t_fb_ptr; 24236 if (rack == NULL) { 24237 INP_WUNLOCK(inp); 24238 return (EINVAL); 24239 } 24240 #ifdef INET 24241 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 24242 #endif 24243 24244 switch (sopt->sopt_level) { 24245 #ifdef INET6 24246 case IPPROTO_IPV6: 24247 MPASS(inp->inp_vflag & INP_IPV6PROTO); 24248 switch (sopt->sopt_name) { 24249 case IPV6_USE_MIN_MTU: 24250 tcp6_use_min_mtu(tp); 24251 break; 24252 } 24253 INP_WUNLOCK(inp); 24254 return (0); 24255 #endif 24256 #ifdef INET 24257 case IPPROTO_IP: 24258 switch (sopt->sopt_name) { 24259 case IP_TOS: 24260 /* 24261 * The DSCP codepoint has changed, update the fsb. 24262 */ 24263 ip->ip_tos = rack->rc_inp->inp_ip_tos; 24264 break; 24265 case IP_TTL: 24266 /* 24267 * The TTL has changed, update the fsb. 24268 */ 24269 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 24270 break; 24271 } 24272 INP_WUNLOCK(inp); 24273 return (0); 24274 #endif 24275 #ifdef SO_PEERPRIO 24276 case SOL_SOCKET: 24277 switch (sopt->sopt_name) { 24278 case SO_PEERPRIO: /* SC-URL:bs */ 24279 /* Already read in and sanity checked in sosetopt(). */ 24280 if (inp->inp_socket) { 24281 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 24282 } 24283 break; 24284 } 24285 INP_WUNLOCK(inp); 24286 return (0); 24287 #endif 24288 case IPPROTO_TCP: 24289 switch (sopt->sopt_name) { 24290 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 24291 /* Pacing related ones */ 24292 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 24293 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 24294 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 24295 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 24296 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 24297 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 24298 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 24299 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 24300 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 24301 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 24302 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 24303 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 24304 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 24305 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 24306 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 24307 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 24308 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24309 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 24310 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 24311 /* End pacing related */ 24312 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 24313 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 24314 case TCP_RACK_MIN_TO: /* URL:min_to */ 24315 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 24316 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 24317 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 24318 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 24319 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 24320 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 24321 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 24322 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 24323 case TCP_NO_PRR: /* URL:noprr */ 24324 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 24325 case TCP_DATA_AFTER_CLOSE: /* no URL */ 24326 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 24327 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 24328 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 24329 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 24330 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 24331 case TCP_RACK_PROFILE: /* URL:profile */ 24332 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24333 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 24334 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 24335 case TCP_RACK_ABC_VAL: /* URL:labc */ 24336 case TCP_REC_ABC_VAL: /* URL:reclabc */ 24337 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 24338 case TCP_DEFER_OPTIONS: /* URL:defer */ 24339 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 24340 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 24341 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 24342 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 24343 case TCP_RACK_HI_BETA: /* URL:hibeta */ 24344 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 24345 case TCP_SS_EEXIT: /* URL:eexit */ 24346 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 24347 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 24348 case TCP_PACING_DND: /* URL:dnd */ 24349 case TCP_NO_TIMELY: /* URL:notimely */ 24350 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24351 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 24352 case TCP_REC_IS_DYN: /* URL:dynrec */ 24353 case TCP_GP_USE_LTBW: /* URL:useltbw */ 24354 goto process_opt; 24355 break; 24356 default: 24357 /* Filter off all unknown options to the base stack */ 24358 return (tcp_default_ctloutput(tp, sopt)); 24359 break; 24360 } 24361 default: 24362 INP_WUNLOCK(inp); 24363 return (0); 24364 } 24365 process_opt: 24366 INP_WUNLOCK(inp); 24367 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24368 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 24369 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 24370 /* 24371 * We truncate it down to 32 bits for the socket-option trace this 24372 * means rates > 34Gbps won't show right, but thats probably ok. 24373 */ 24374 optval = (uint32_t)loptval; 24375 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 24376 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 24377 } else { 24378 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 24379 /* Save it in 64 bit form too */ 24380 loptval = optval; 24381 } 24382 if (error) 24383 return (error); 24384 INP_WLOCK(inp); 24385 if (tp->t_fb != &__tcp_rack) { 24386 INP_WUNLOCK(inp); 24387 return (ENOPROTOOPT); 24388 } 24389 if (rack->defer_options && (rack->gp_ready == 0) && 24390 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 24391 (sopt->sopt_name != TCP_HYBRID_PACING) && 24392 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 24393 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 24394 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 24395 /* Options are being deferred */ 24396 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 24397 INP_WUNLOCK(inp); 24398 return (0); 24399 } else { 24400 /* No memory to defer, fail */ 24401 INP_WUNLOCK(inp); 24402 return (ENOMEM); 24403 } 24404 } 24405 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 24406 INP_WUNLOCK(inp); 24407 return (error); 24408 } 24409 24410 static void 24411 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 24412 { 24413 24414 INP_WLOCK_ASSERT(tptoinpcb(tp)); 24415 bzero(ti, sizeof(*ti)); 24416 24417 ti->tcpi_state = tp->t_state; 24418 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 24419 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 24420 if (tp->t_flags & TF_SACK_PERMIT) 24421 ti->tcpi_options |= TCPI_OPT_SACK; 24422 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 24423 ti->tcpi_options |= TCPI_OPT_WSCALE; 24424 ti->tcpi_snd_wscale = tp->snd_scale; 24425 ti->tcpi_rcv_wscale = tp->rcv_scale; 24426 } 24427 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 24428 ti->tcpi_options |= TCPI_OPT_ECN; 24429 if (tp->t_flags & TF_FASTOPEN) 24430 ti->tcpi_options |= TCPI_OPT_TFO; 24431 /* still kept in ticks is t_rcvtime */ 24432 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 24433 /* Since we hold everything in precise useconds this is easy */ 24434 ti->tcpi_rtt = tp->t_srtt; 24435 ti->tcpi_rttvar = tp->t_rttvar; 24436 ti->tcpi_rto = tp->t_rxtcur; 24437 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 24438 ti->tcpi_snd_cwnd = tp->snd_cwnd; 24439 /* 24440 * FreeBSD-specific extension fields for tcp_info. 24441 */ 24442 ti->tcpi_rcv_space = tp->rcv_wnd; 24443 ti->tcpi_rcv_nxt = tp->rcv_nxt; 24444 ti->tcpi_snd_wnd = tp->snd_wnd; 24445 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 24446 ti->tcpi_snd_nxt = tp->snd_nxt; 24447 ti->tcpi_snd_mss = tp->t_maxseg; 24448 ti->tcpi_rcv_mss = tp->t_maxseg; 24449 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 24450 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 24451 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 24452 ti->tcpi_total_tlp = tp->t_sndtlppack; 24453 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 24454 ti->tcpi_rttmin = tp->t_rttlow; 24455 #ifdef NETFLIX_STATS 24456 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 24457 #endif 24458 #ifdef TCP_OFFLOAD 24459 if (tp->t_flags & TF_TOE) { 24460 ti->tcpi_options |= TCPI_OPT_TOE; 24461 tcp_offload_tcp_info(tp, ti); 24462 } 24463 #endif 24464 } 24465 24466 static int 24467 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24468 { 24469 struct inpcb *inp = tptoinpcb(tp); 24470 struct tcp_rack *rack; 24471 int32_t error, optval; 24472 uint64_t val, loptval; 24473 struct tcp_info ti; 24474 /* 24475 * Because all our options are either boolean or an int, we can just 24476 * pull everything into optval and then unlock and copy. If we ever 24477 * add a option that is not a int, then this will have quite an 24478 * impact to this routine. 24479 */ 24480 error = 0; 24481 rack = (struct tcp_rack *)tp->t_fb_ptr; 24482 if (rack == NULL) { 24483 INP_WUNLOCK(inp); 24484 return (EINVAL); 24485 } 24486 switch (sopt->sopt_name) { 24487 case TCP_INFO: 24488 /* First get the info filled */ 24489 rack_fill_info(tp, &ti); 24490 /* Fix up the rtt related fields if needed */ 24491 INP_WUNLOCK(inp); 24492 error = sooptcopyout(sopt, &ti, sizeof ti); 24493 return (error); 24494 /* 24495 * Beta is the congestion control value for NewReno that influences how 24496 * much of a backoff happens when loss is detected. It is normally set 24497 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 24498 * when you exit recovery. 24499 */ 24500 case TCP_RACK_PACING_BETA: 24501 break; 24502 /* 24503 * Beta_ecn is the congestion control value for NewReno that influences how 24504 * much of a backoff happens when a ECN mark is detected. It is normally set 24505 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 24506 * you exit recovery. Note that classic ECN has a beta of 50, it is only 24507 * ABE Ecn that uses this "less" value, but we do too with pacing :) 24508 */ 24509 24510 case TCP_RACK_PACING_BETA_ECN: 24511 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24512 error = EINVAL; 24513 else if (rack->rc_pacing_cc_set == 0) 24514 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 24515 else { 24516 /* 24517 * Reach out into the CC data and report back what 24518 * I have previously set. Yeah it looks hackish but 24519 * we don't want to report the saved values. 24520 */ 24521 if (tp->t_ccv.cc_data) 24522 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 24523 else 24524 error = EINVAL; 24525 } 24526 break; 24527 case TCP_RACK_DSACK_OPT: 24528 optval = 0; 24529 if (rack->rc_rack_tmr_std_based) { 24530 optval |= 1; 24531 } 24532 if (rack->rc_rack_use_dsack) { 24533 optval |= 2; 24534 } 24535 break; 24536 case TCP_RACK_ENABLE_HYSTART: 24537 { 24538 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24539 optval = RACK_HYSTART_ON; 24540 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 24541 optval = RACK_HYSTART_ON_W_SC; 24542 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 24543 optval = RACK_HYSTART_ON_W_SC_C; 24544 } else { 24545 optval = RACK_HYSTART_OFF; 24546 } 24547 } 24548 break; 24549 case TCP_RACK_DGP_IN_REC: 24550 error = EINVAL; 24551 break; 24552 case TCP_RACK_HI_BETA: 24553 optval = rack->rack_hibeta; 24554 break; 24555 case TCP_DEFER_OPTIONS: 24556 optval = rack->defer_options; 24557 break; 24558 case TCP_RACK_MEASURE_CNT: 24559 optval = rack->r_ctl.req_measurements; 24560 break; 24561 case TCP_REC_ABC_VAL: 24562 optval = rack->r_use_labc_for_rec; 24563 break; 24564 case TCP_RACK_ABC_VAL: 24565 optval = rack->rc_labc; 24566 break; 24567 case TCP_HDWR_UP_ONLY: 24568 optval= rack->r_up_only; 24569 break; 24570 case TCP_FILLCW_RATE_CAP: 24571 loptval = rack->r_ctl.fillcw_cap; 24572 break; 24573 case TCP_PACING_RATE_CAP: 24574 loptval = rack->r_ctl.bw_rate_cap; 24575 break; 24576 case TCP_RACK_PROFILE: 24577 /* You cannot retrieve a profile, its write only */ 24578 error = EINVAL; 24579 break; 24580 case TCP_SIDECHAN_DIS: 24581 optval = rack->r_ctl.side_chan_dis_mask; 24582 break; 24583 case TCP_HYBRID_PACING: 24584 /* You cannot retrieve hybrid pacing information, its write only */ 24585 error = EINVAL; 24586 break; 24587 case TCP_USE_CMP_ACKS: 24588 optval = rack->r_use_cmp_ack; 24589 break; 24590 case TCP_RACK_PACE_TO_FILL: 24591 optval = rack->rc_pace_to_cwnd; 24592 break; 24593 case TCP_RACK_NO_PUSH_AT_MAX: 24594 optval = rack->r_ctl.rc_no_push_at_mrtt; 24595 break; 24596 case TCP_SHARED_CWND_ENABLE: 24597 optval = rack->rack_enable_scwnd; 24598 break; 24599 case TCP_RACK_NONRXT_CFG_RATE: 24600 optval = rack->rack_rec_nonrxt_use_cr; 24601 break; 24602 case TCP_NO_PRR: 24603 if (rack->rack_no_prr == 1) 24604 optval = 1; 24605 else if (rack->no_prr_addback == 1) 24606 optval = 2; 24607 else 24608 optval = 0; 24609 break; 24610 case TCP_GP_USE_LTBW: 24611 if (rack->dis_lt_bw) { 24612 /* It is not used */ 24613 optval = 0; 24614 } else if (rack->use_lesser_lt_bw) { 24615 /* we use min() */ 24616 optval = 1; 24617 } else { 24618 /* we use max() */ 24619 optval = 2; 24620 } 24621 break; 24622 case TCP_RACK_DO_DETECTION: 24623 error = EINVAL; 24624 break; 24625 case TCP_RACK_MBUF_QUEUE: 24626 /* Now do we use the LRO mbuf-queue feature */ 24627 optval = rack->r_mbuf_queue; 24628 break; 24629 case RACK_CSPR_IS_FCC: 24630 optval = rack->cspr_is_fcc; 24631 break; 24632 case TCP_TIMELY_DYN_ADJ: 24633 optval = rack->rc_gp_dyn_mul; 24634 break; 24635 case TCP_BBR_IWINTSO: 24636 error = EINVAL; 24637 break; 24638 case TCP_RACK_TLP_REDUCE: 24639 /* RACK TLP cwnd reduction (bool) */ 24640 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 24641 break; 24642 case TCP_BBR_RACK_INIT_RATE: 24643 val = rack->r_ctl.init_rate; 24644 /* convert to kbits per sec */ 24645 val *= 8; 24646 val /= 1000; 24647 optval = (uint32_t)val; 24648 break; 24649 case TCP_RACK_FORCE_MSEG: 24650 optval = rack->rc_force_max_seg; 24651 break; 24652 case TCP_RACK_PACE_MIN_SEG: 24653 optval = rack->r_ctl.rc_user_set_min_segs; 24654 break; 24655 case TCP_RACK_PACE_MAX_SEG: 24656 /* Max segments in a pace */ 24657 optval = rack->rc_user_set_max_segs; 24658 break; 24659 case TCP_RACK_PACE_ALWAYS: 24660 /* Use the always pace method */ 24661 optval = rack->rc_always_pace; 24662 break; 24663 case TCP_RACK_PRR_SENDALOT: 24664 /* Allow PRR to send more than one seg */ 24665 optval = rack->r_ctl.rc_prr_sendalot; 24666 break; 24667 case TCP_RACK_MIN_TO: 24668 /* Minimum time between rack t-o's in ms */ 24669 optval = rack->r_ctl.rc_min_to; 24670 break; 24671 case TCP_RACK_SPLIT_LIMIT: 24672 optval = rack->r_ctl.rc_split_limit; 24673 break; 24674 case TCP_RACK_EARLY_SEG: 24675 /* If early recovery max segments */ 24676 optval = rack->r_ctl.rc_early_recovery_segs; 24677 break; 24678 case TCP_RACK_REORD_THRESH: 24679 /* RACK reorder threshold (shift amount) */ 24680 optval = rack->r_ctl.rc_reorder_shift; 24681 break; 24682 case TCP_SS_EEXIT: 24683 if (rack->r_ctl.gp_rnd_thresh) { 24684 uint32_t v; 24685 24686 v = rack->r_ctl.gp_gain_req; 24687 v <<= 17; 24688 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 24689 if (rack->r_ctl.gate_to_fs == 1) 24690 optval |= 0x10000; 24691 } else 24692 optval = 0; 24693 break; 24694 case TCP_RACK_REORD_FADE: 24695 /* Does reordering fade after ms time */ 24696 optval = rack->r_ctl.rc_reorder_fade; 24697 break; 24698 case TCP_BBR_USE_RACK_RR: 24699 /* Do we use the rack cheat for rxt */ 24700 optval = rack->use_rack_rr; 24701 break; 24702 case TCP_RACK_RR_CONF: 24703 optval = rack->r_rr_config; 24704 break; 24705 case TCP_HDWR_RATE_CAP: 24706 optval = rack->r_rack_hw_rate_caps; 24707 break; 24708 case TCP_BBR_HDWR_PACE: 24709 optval = rack->rack_hdw_pace_ena; 24710 break; 24711 case TCP_RACK_TLP_THRESH: 24712 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24713 optval = rack->r_ctl.rc_tlp_threshold; 24714 break; 24715 case TCP_RACK_PKT_DELAY: 24716 /* RACK added ms i.e. rack-rtt + reord + N */ 24717 optval = rack->r_ctl.rc_pkt_delay; 24718 break; 24719 case TCP_RACK_TLP_USE: 24720 optval = rack->rack_tlp_threshold_use; 24721 break; 24722 case TCP_PACING_DND: 24723 optval = rack->rc_pace_dnd; 24724 break; 24725 case TCP_RACK_PACE_RATE_CA: 24726 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 24727 break; 24728 case TCP_RACK_PACE_RATE_SS: 24729 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 24730 break; 24731 case TCP_RACK_PACE_RATE_REC: 24732 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 24733 break; 24734 case TCP_DGP_UPPER_BOUNDS: 24735 optval = rack->r_ctl.rack_per_upper_bound_ss; 24736 optval <<= 16; 24737 optval |= rack->r_ctl.rack_per_upper_bound_ca; 24738 break; 24739 case TCP_RACK_GP_INCREASE_SS: 24740 optval = rack->r_ctl.rack_per_of_gp_ca; 24741 break; 24742 case TCP_RACK_GP_INCREASE_CA: 24743 optval = rack->r_ctl.rack_per_of_gp_ss; 24744 break; 24745 case TCP_RACK_PACING_DIVISOR: 24746 optval = rack->r_ctl.pace_len_divisor; 24747 break; 24748 case TCP_BBR_RACK_RTT_USE: 24749 optval = rack->r_ctl.rc_rate_sample_method; 24750 break; 24751 case TCP_DELACK: 24752 optval = tp->t_delayed_ack; 24753 break; 24754 case TCP_DATA_AFTER_CLOSE: 24755 optval = rack->rc_allow_data_af_clo; 24756 break; 24757 case TCP_SHARED_CWND_TIME_LIMIT: 24758 optval = rack->r_limit_scw; 24759 break; 24760 case TCP_HONOR_HPTS_MIN: 24761 if (rack->r_use_hpts_min) 24762 optval = rack->r_ctl.max_reduction; 24763 else 24764 optval = 0; 24765 break; 24766 case TCP_REC_IS_DYN: 24767 optval = rack->rc_gp_no_rec_chg; 24768 break; 24769 case TCP_NO_TIMELY: 24770 optval = rack->rc_skip_timely; 24771 break; 24772 case TCP_RACK_TIMER_SLOP: 24773 optval = rack->r_ctl.timer_slop; 24774 break; 24775 default: 24776 return (tcp_default_ctloutput(tp, sopt)); 24777 break; 24778 } 24779 INP_WUNLOCK(inp); 24780 if (error == 0) { 24781 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24782 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 24783 error = sooptcopyout(sopt, &loptval, sizeof loptval); 24784 else 24785 error = sooptcopyout(sopt, &optval, sizeof optval); 24786 } 24787 return (error); 24788 } 24789 24790 static int 24791 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 24792 { 24793 if (sopt->sopt_dir == SOPT_SET) { 24794 return (rack_set_sockopt(tp, sopt)); 24795 } else if (sopt->sopt_dir == SOPT_GET) { 24796 return (rack_get_sockopt(tp, sopt)); 24797 } else { 24798 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 24799 } 24800 } 24801 24802 static const char *rack_stack_names[] = { 24803 __XSTRING(STACKNAME), 24804 #ifdef STACKALIAS 24805 __XSTRING(STACKALIAS), 24806 #endif 24807 }; 24808 24809 static int 24810 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 24811 { 24812 memset(mem, 0, size); 24813 return (0); 24814 } 24815 24816 static void 24817 rack_dtor(void *mem, int32_t size, void *arg) 24818 { 24819 24820 } 24821 24822 static bool rack_mod_inited = false; 24823 24824 static int 24825 tcp_addrack(module_t mod, int32_t type, void *data) 24826 { 24827 int32_t err = 0; 24828 int num_stacks; 24829 24830 switch (type) { 24831 case MOD_LOAD: 24832 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 24833 sizeof(struct rack_sendmap), 24834 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 24835 24836 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 24837 sizeof(struct tcp_rack), 24838 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 24839 24840 sysctl_ctx_init(&rack_sysctl_ctx); 24841 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 24842 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 24843 OID_AUTO, 24844 #ifdef STACKALIAS 24845 __XSTRING(STACKALIAS), 24846 #else 24847 __XSTRING(STACKNAME), 24848 #endif 24849 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 24850 ""); 24851 if (rack_sysctl_root == NULL) { 24852 printf("Failed to add sysctl node\n"); 24853 err = EFAULT; 24854 goto free_uma; 24855 } 24856 rack_init_sysctls(); 24857 num_stacks = nitems(rack_stack_names); 24858 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 24859 rack_stack_names, &num_stacks); 24860 if (err) { 24861 printf("Failed to register %s stack name for " 24862 "%s module\n", rack_stack_names[num_stacks], 24863 __XSTRING(MODNAME)); 24864 sysctl_ctx_free(&rack_sysctl_ctx); 24865 free_uma: 24866 uma_zdestroy(rack_zone); 24867 uma_zdestroy(rack_pcb_zone); 24868 rack_counter_destroy(); 24869 printf("Failed to register rack module -- err:%d\n", err); 24870 return (err); 24871 } 24872 tcp_lro_reg_mbufq(); 24873 rack_mod_inited = true; 24874 break; 24875 case MOD_QUIESCE: 24876 err = deregister_tcp_functions(&__tcp_rack, true, false); 24877 break; 24878 case MOD_UNLOAD: 24879 err = deregister_tcp_functions(&__tcp_rack, false, true); 24880 if (err == EBUSY) 24881 break; 24882 if (rack_mod_inited) { 24883 uma_zdestroy(rack_zone); 24884 uma_zdestroy(rack_pcb_zone); 24885 sysctl_ctx_free(&rack_sysctl_ctx); 24886 rack_counter_destroy(); 24887 rack_mod_inited = false; 24888 } 24889 tcp_lro_dereg_mbufq(); 24890 err = 0; 24891 break; 24892 default: 24893 return (EOPNOTSUPP); 24894 } 24895 return (err); 24896 } 24897 24898 static moduledata_t tcp_rack = { 24899 .name = __XSTRING(MODNAME), 24900 .evhand = tcp_addrack, 24901 .priv = 0 24902 }; 24903 24904 MODULE_VERSION(MODNAME, 1); 24905 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 24906 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 24907 24908 #endif /* #if !defined(INET) && !defined(INET6) */ 24909