1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> /* for proc0 declaration */ 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #ifdef STATS 52 #include <sys/qmath.h> 53 #include <sys/tree.h> 54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 55 #else 56 #include <sys/tree.h> 57 #endif 58 #include <sys/refcount.h> 59 #include <sys/queue.h> 60 #include <sys/tim_filter.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 #include <sys/protosw.h> 65 #ifdef TCP_ACCOUNTING 66 #include <sys/sched.h> 67 #include <machine/cpu.h> 68 #endif 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/route/nhop.h> 73 #include <net/vnet.h> 74 75 #define TCPSTATES /* for logging */ 76 77 #include <netinet/in.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/ip.h> 81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 83 #include <netinet/ip_var.h> 84 #include <netinet/ip6.h> 85 #include <netinet6/in6_pcb.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/tcp.h> 88 #define TCPOUTFLAGS 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_log_buf.h> 94 #include <netinet/tcp_syncache.h> 95 #include <netinet/tcp_hpts.h> 96 #include <netinet/tcp_ratelimit.h> 97 #include <netinet/tcp_accounting.h> 98 #include <netinet/tcpip.h> 99 #include <netinet/cc/cc.h> 100 #include <netinet/cc/cc_newreno.h> 101 #include <netinet/tcp_fastopen.h> 102 #include <netinet/tcp_lro.h> 103 #ifdef NETFLIX_SHARED_CWND 104 #include <netinet/tcp_shared_cwnd.h> 105 #endif 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_ecn.h> 113 114 #include <netipsec/ipsec_support.h> 115 116 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 117 #include <netipsec/ipsec.h> 118 #include <netipsec/ipsec6.h> 119 #endif /* IPSEC */ 120 121 #include <netinet/udp.h> 122 #include <netinet/udp_var.h> 123 #include <machine/in_cksum.h> 124 125 #ifdef MAC 126 #include <security/mac/mac_framework.h> 127 #endif 128 #include "sack_filter.h" 129 #include "tcp_rack.h" 130 #include "tailq_hash.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 146 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 150 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 151 152 struct sysctl_ctx_list rack_sysctl_ctx; 153 struct sysctl_oid *rack_sysctl_root; 154 155 #define CUM_ACKED 1 156 #define SACKED 2 157 158 /* 159 * The RACK module incorporates a number of 160 * TCP ideas that have been put out into the IETF 161 * over the last few years: 162 * - Matt Mathis's Rate Halving which slowly drops 163 * the congestion window so that the ack clock can 164 * be maintained during a recovery. 165 * - Yuchung Cheng's RACK TCP (for which its named) that 166 * will stop us using the number of dup acks and instead 167 * use time as the gage of when we retransmit. 168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 169 * of Dukkipati et.al. 170 * RACK depends on SACK, so if an endpoint arrives that 171 * cannot do SACK the state machine below will shuttle the 172 * connection back to using the "default" TCP stack that is 173 * in FreeBSD. 174 * 175 * To implement RACK the original TCP stack was first decomposed 176 * into a functional state machine with individual states 177 * for each of the possible TCP connection states. The do_segment 178 * functions role in life is to mandate the connection supports SACK 179 * initially and then assure that the RACK state matches the conenction 180 * state before calling the states do_segment function. Each 181 * state is simplified due to the fact that the original do_segment 182 * has been decomposed and we *know* what state we are in (no 183 * switches on the state) and all tests for SACK are gone. This 184 * greatly simplifies what each state does. 185 * 186 * TCP output is also over-written with a new version since it 187 * must maintain the new rack scoreboard. 188 * 189 */ 190 static int32_t rack_tlp_thresh = 1; 191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 192 static int32_t rack_tlp_use_greater = 1; 193 static int32_t rack_reorder_thresh = 2; 194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 195 * - 60 seconds */ 196 static uint16_t rack_policer_rxt_thresh= 0; /* 499 = 49.9%, 0 is off */ 197 static uint8_t rack_policer_avg_thresh = 0; /* 3.2 */ 198 static uint8_t rack_policer_med_thresh = 0; /* 1 - 16 */ 199 static uint16_t rack_policer_bucket_reserve = 20; /* How much % is reserved in the bucket */ 200 static uint64_t rack_pol_min_bw = 125000; /* 1mbps in Bytes per sec */ 201 static uint32_t rack_policer_data_thresh = 64000; /* 64,000 bytes must be sent before we engage */ 202 static uint32_t rack_policing_do_bw_comp = 1; 203 static uint32_t rack_pcm_every_n_rounds = 100; 204 static uint32_t rack_pcm_blast = 0; 205 static uint32_t rack_pcm_is_enabled = 1; 206 static uint8_t rack_req_del_mss = 18; /* How many segments need to be sent in a recovery episode to do policer_detection */ 207 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 208 209 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */ 210 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 211 212 213 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 214 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 215 static int32_t rack_rxt_controls = 0; 216 static int32_t rack_fill_cw_state = 0; 217 static uint8_t rack_req_measurements = 1; 218 /* Attack threshold detections */ 219 static uint32_t rack_highest_sack_thresh_seen = 0; 220 static uint32_t rack_highest_move_thresh_seen = 0; 221 static uint32_t rack_merge_out_sacks_on_attack = 0; 222 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 223 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */ 224 static int32_t rack_hw_rate_caps = 0; /* 1; */ 225 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 226 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 227 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 228 static int32_t rack_hw_up_only = 0; 229 static int32_t rack_stats_gets_ms_rtt = 1; 230 static int32_t rack_prr_addbackmax = 2; 231 static int32_t rack_do_hystart = 0; 232 static int32_t rack_apply_rtt_with_reduced_conf = 0; 233 static int32_t rack_hibeta_setting = 0; 234 static int32_t rack_default_pacing_divisor = 250; 235 static uint16_t rack_pacing_min_seg = 0; 236 static int32_t rack_timely_off = 0; 237 238 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 239 static int32_t rack_pkt_delay = 1000; 240 static int32_t rack_send_a_lot_in_prr = 1; 241 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 242 static int32_t rack_verbose_logging = 0; 243 static int32_t rack_ignore_data_after_close = 1; 244 static int32_t rack_enable_shared_cwnd = 1; 245 static int32_t rack_use_cmp_acks = 1; 246 static int32_t rack_use_fsb = 1; 247 static int32_t rack_use_rfo = 1; 248 static int32_t rack_use_rsm_rfo = 1; 249 static int32_t rack_max_abc_post_recovery = 2; 250 static int32_t rack_client_low_buf = 0; 251 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 252 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 253 #ifdef TCP_ACCOUNTING 254 static int32_t rack_tcp_accounting = 0; 255 #endif 256 static int32_t rack_limits_scwnd = 1; 257 static int32_t rack_enable_mqueue_for_nonpaced = 0; 258 static int32_t rack_hybrid_allow_set_maxseg = 0; 259 static int32_t rack_disable_prr = 0; 260 static int32_t use_rack_rr = 1; 261 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 262 static int32_t rack_persist_min = 250000; /* 250usec */ 263 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 264 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 265 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ 266 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 267 static int32_t rack_limit_time_with_srtt = 0; 268 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 269 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 270 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 271 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 272 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 273 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 274 static int32_t rack_full_buffer_discount = 10; 275 /* 276 * Currently regular tcp has a rto_min of 30ms 277 * the backoff goes 12 times so that ends up 278 * being a total of 122.850 seconds before a 279 * connection is killed. 280 */ 281 static uint32_t rack_def_data_window = 20; 282 static uint32_t rack_goal_bdp = 2; 283 static uint32_t rack_min_srtts = 1; 284 static uint32_t rack_min_measure_usec = 0; 285 static int32_t rack_tlp_min = 10000; /* 10ms */ 286 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 287 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 288 static const int32_t rack_free_cache = 2; 289 static int32_t rack_hptsi_segments = 40; 290 static int32_t rack_rate_sample_method = USE_RTT_LOW; 291 static int32_t rack_pace_every_seg = 0; 292 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 293 static int32_t rack_slot_reduction = 4; 294 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 295 static int32_t rack_cwnd_block_ends_measure = 0; 296 static int32_t rack_rwnd_block_ends_measure = 0; 297 static int32_t rack_def_profile = 0; 298 299 static int32_t rack_lower_cwnd_at_tlp = 0; 300 static int32_t rack_always_send_oldest = 0; 301 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 302 303 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 304 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 305 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 306 307 /* Probertt */ 308 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 309 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 310 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 311 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 312 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 313 314 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 315 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 316 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 317 static uint32_t rack_probertt_use_min_rtt_exit = 0; 318 static uint32_t rack_probe_rtt_sets_cwnd = 0; 319 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 320 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 321 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 322 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 323 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 324 static uint32_t rack_probertt_filter_life = 10000000; 325 static uint32_t rack_probertt_lower_within = 10; 326 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 327 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 328 static int32_t rack_probertt_clear_is = 1; 329 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 330 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 331 332 /* Part of pacing */ 333 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 334 335 /* Timely information: 336 * 337 * Here we have various control parameters on how 338 * timely may change the multiplier. rack_gain_p5_ub 339 * is associated with timely but not directly influencing 340 * the rate decision like the other variables. It controls 341 * the way fill-cw interacts with timely and caps how much 342 * timely can boost the fill-cw b/w. 343 * 344 * The other values are various boost/shrink numbers as well 345 * as potential caps when adjustments are made to the timely 346 * gain (returned by rack_get_output_gain(). Remember too that 347 * the gain returned can be overriden by other factors such as 348 * probeRTT as well as fixed-rate-pacing. 349 */ 350 static int32_t rack_gain_p5_ub = 250; 351 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 352 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 353 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 354 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 355 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 356 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 357 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 358 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 359 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 360 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 361 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 362 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 363 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 364 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 365 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 366 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 367 static int32_t rack_use_max_for_nobackoff = 0; 368 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 369 static int32_t rack_timely_no_stopping = 0; 370 static int32_t rack_down_raise_thresh = 100; 371 static int32_t rack_req_segs = 1; 372 static uint64_t rack_bw_rate_cap = 0; 373 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 374 375 376 /* Rack specific counters */ 377 counter_u64_t rack_saw_enobuf; 378 counter_u64_t rack_saw_enobuf_hw; 379 counter_u64_t rack_saw_enetunreach; 380 counter_u64_t rack_persists_sends; 381 counter_u64_t rack_persists_acks; 382 counter_u64_t rack_persists_loss; 383 counter_u64_t rack_persists_lost_ends; 384 counter_u64_t rack_total_bytes; 385 #ifdef INVARIANTS 386 counter_u64_t rack_adjust_map_bw; 387 #endif 388 /* Tail loss probe counters */ 389 counter_u64_t rack_tlp_tot; 390 counter_u64_t rack_tlp_newdata; 391 counter_u64_t rack_tlp_retran; 392 counter_u64_t rack_tlp_retran_bytes; 393 counter_u64_t rack_to_tot; 394 counter_u64_t rack_hot_alloc; 395 counter_u64_t tcp_policer_detected; 396 counter_u64_t rack_to_alloc; 397 counter_u64_t rack_to_alloc_hard; 398 counter_u64_t rack_to_alloc_emerg; 399 counter_u64_t rack_to_alloc_limited; 400 counter_u64_t rack_alloc_limited_conns; 401 counter_u64_t rack_split_limited; 402 counter_u64_t rack_rxt_clamps_cwnd; 403 counter_u64_t rack_rxt_clamps_cwnd_uniq; 404 405 counter_u64_t rack_multi_single_eq; 406 counter_u64_t rack_proc_non_comp_ack; 407 408 counter_u64_t rack_fto_send; 409 counter_u64_t rack_fto_rsm_send; 410 counter_u64_t rack_nfto_resend; 411 counter_u64_t rack_non_fto_send; 412 counter_u64_t rack_extended_rfo; 413 414 counter_u64_t rack_sack_proc_all; 415 counter_u64_t rack_sack_proc_short; 416 counter_u64_t rack_sack_proc_restart; 417 counter_u64_t rack_sack_attacks_detected; 418 counter_u64_t rack_sack_attacks_reversed; 419 counter_u64_t rack_sack_attacks_suspect; 420 counter_u64_t rack_sack_used_next_merge; 421 counter_u64_t rack_sack_splits; 422 counter_u64_t rack_sack_used_prev_merge; 423 counter_u64_t rack_sack_skipped_acked; 424 counter_u64_t rack_ack_total; 425 counter_u64_t rack_express_sack; 426 counter_u64_t rack_sack_total; 427 counter_u64_t rack_move_none; 428 counter_u64_t rack_move_some; 429 430 counter_u64_t rack_input_idle_reduces; 431 counter_u64_t rack_collapsed_win; 432 counter_u64_t rack_collapsed_win_seen; 433 counter_u64_t rack_collapsed_win_rxt; 434 counter_u64_t rack_collapsed_win_rxt_bytes; 435 counter_u64_t rack_try_scwnd; 436 counter_u64_t rack_hw_pace_init_fail; 437 counter_u64_t rack_hw_pace_lost; 438 439 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 440 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 441 442 443 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 444 445 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 446 (tv) = (value) + slop; \ 447 if ((u_long)(tv) < (u_long)(tvmin)) \ 448 (tv) = (tvmin); \ 449 if ((u_long)(tv) > (u_long)(tvmax)) \ 450 (tv) = (tvmax); \ 451 } while (0) 452 453 static void 454 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 455 456 static int 457 rack_process_ack(struct mbuf *m, struct tcphdr *th, 458 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 459 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 460 static int 461 rack_process_data(struct mbuf *m, struct tcphdr *th, 462 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 463 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 464 static void 465 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 466 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 467 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 468 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 469 uint8_t limit_type); 470 static struct rack_sendmap * 471 rack_check_recovery_mode(struct tcpcb *tp, 472 uint32_t tsused); 473 static uint32_t 474 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 475 static void 476 rack_cong_signal(struct tcpcb *tp, 477 uint32_t type, uint32_t ack, int ); 478 static void rack_counter_destroy(void); 479 static int 480 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 481 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 482 static void 483 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 484 static void 485 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 486 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 487 static void rack_dtor(void *mem, int32_t size, void *arg); 488 static void 489 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 490 uint32_t flex1, uint32_t flex2, 491 uint32_t flex3, uint32_t flex4, 492 uint32_t flex5, uint32_t flex6, 493 uint16_t flex7, uint8_t mod); 494 495 static void 496 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 497 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 498 struct rack_sendmap *rsm, uint8_t quality); 499 static struct rack_sendmap * 500 rack_find_high_nonack(struct tcp_rack *rack, 501 struct rack_sendmap *rsm); 502 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 503 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 504 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 505 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 506 static void 507 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 508 tcp_seq th_ack, int line, uint8_t quality); 509 static void 510 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 511 512 static uint32_t 513 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 514 static int32_t rack_handoff_ok(struct tcpcb *tp); 515 static int32_t rack_init(struct tcpcb *tp, void **ptr); 516 static void rack_init_sysctls(void); 517 518 static void 519 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 520 struct tcphdr *th, int entered_rec, int dup_ack_struck, 521 int *dsack_seen, int *sacks_seen); 522 static void 523 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 524 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 525 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 526 527 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 528 529 530 static void 531 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 532 struct rack_sendmap *rsm, uint32_t cts); 533 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 534 static int32_t rack_output(struct tcpcb *tp); 535 536 static uint32_t 537 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 538 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 539 uint32_t cts, uint32_t segsiz); 540 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 541 static void rack_remxt_tmr(struct tcpcb *tp); 542 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 543 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 544 static int32_t rack_stopall(struct tcpcb *tp); 545 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 546 static uint32_t 547 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 548 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 549 static void 550 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 551 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 552 static int 553 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 554 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 555 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 556 static int 557 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 559 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 560 561 static void 562 rack_peg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz); 563 564 static int 565 rack_do_closing(struct mbuf *m, struct tcphdr *th, 566 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 567 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 568 static int 569 rack_do_established(struct mbuf *m, struct tcphdr *th, 570 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 571 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 572 static int 573 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 574 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 575 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 576 static int 577 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 578 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 579 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 580 static int 581 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 582 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 583 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 584 static int 585 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 586 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 587 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 588 static int 589 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 590 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 591 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 592 static int 593 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 594 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 595 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 596 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 597 struct rack_sendmap * 598 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 599 uint32_t tsused); 600 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 601 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 602 static void 603 tcp_rack_partialack(struct tcpcb *tp); 604 static int 605 rack_set_profile(struct tcp_rack *rack, int prof); 606 static void 607 rack_apply_deferred_options(struct tcp_rack *rack); 608 609 int32_t rack_clear_counter=0; 610 611 static uint64_t 612 rack_get_lt_bw(struct tcp_rack *rack) 613 { 614 struct timeval tv; 615 uint64_t tim, bytes; 616 617 tim = rack->r_ctl.lt_bw_time; 618 bytes = rack->r_ctl.lt_bw_bytes; 619 if (rack->lt_bw_up) { 620 /* Include all the current bytes too */ 621 microuptime(&tv); 622 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 623 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 624 } 625 if ((bytes != 0) && (tim != 0)) 626 return ((bytes * (uint64_t)1000000) / tim); 627 else 628 return (0); 629 } 630 631 static void 632 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 633 { 634 struct sockopt sopt; 635 struct cc_newreno_opts opt; 636 struct newreno old; 637 struct tcpcb *tp; 638 int error, failed = 0; 639 640 tp = rack->rc_tp; 641 if (tp->t_cc == NULL) { 642 /* Tcb is leaving */ 643 return; 644 } 645 rack->rc_pacing_cc_set = 1; 646 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 647 /* Not new-reno we can't play games with beta! */ 648 failed = 1; 649 goto out; 650 651 } 652 if (CC_ALGO(tp)->ctl_output == NULL) { 653 /* Huh, not using new-reno so no swaps.? */ 654 failed = 2; 655 goto out; 656 } 657 /* Get the current values out */ 658 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 659 sopt.sopt_dir = SOPT_GET; 660 opt.name = CC_NEWRENO_BETA; 661 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 662 if (error) { 663 failed = 3; 664 goto out; 665 } 666 old.beta = opt.val; 667 opt.name = CC_NEWRENO_BETA_ECN; 668 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 669 if (error) { 670 failed = 4; 671 goto out; 672 } 673 old.beta_ecn = opt.val; 674 675 /* Now lets set in the values we have stored */ 676 sopt.sopt_dir = SOPT_SET; 677 opt.name = CC_NEWRENO_BETA; 678 opt.val = rack->r_ctl.rc_saved_beta.beta; 679 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 680 if (error) { 681 failed = 5; 682 goto out; 683 } 684 opt.name = CC_NEWRENO_BETA_ECN; 685 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 686 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 687 if (error) { 688 failed = 6; 689 goto out; 690 } 691 /* Save off the values for restoral */ 692 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 693 out: 694 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 695 union tcp_log_stackspecific log; 696 struct timeval tv; 697 struct newreno *ptr; 698 699 ptr = ((struct newreno *)tp->t_ccv.cc_data); 700 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 701 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 702 log.u_bbr.flex1 = ptr->beta; 703 log.u_bbr.flex2 = ptr->beta_ecn; 704 log.u_bbr.flex3 = ptr->newreno_flags; 705 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 706 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 707 log.u_bbr.flex6 = failed; 708 log.u_bbr.flex7 = rack->gp_ready; 709 log.u_bbr.flex7 <<= 1; 710 log.u_bbr.flex7 |= rack->use_fixed_rate; 711 log.u_bbr.flex7 <<= 1; 712 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 713 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 714 log.u_bbr.flex8 = flex8; 715 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 716 0, &log, false, NULL, NULL, 0, &tv); 717 } 718 } 719 720 static void 721 rack_set_cc_pacing(struct tcp_rack *rack) 722 { 723 if (rack->rc_pacing_cc_set) 724 return; 725 /* 726 * Use the swap utility placing in 3 for flex8 to id a 727 * set of a new set of values. 728 */ 729 rack->rc_pacing_cc_set = 1; 730 rack_swap_beta_values(rack, 3); 731 } 732 733 static void 734 rack_undo_cc_pacing(struct tcp_rack *rack) 735 { 736 if (rack->rc_pacing_cc_set == 0) 737 return; 738 /* 739 * Use the swap utility placing in 4 for flex8 to id a 740 * restoral of the old values. 741 */ 742 rack->rc_pacing_cc_set = 0; 743 rack_swap_beta_values(rack, 4); 744 } 745 746 static void 747 rack_remove_pacing(struct tcp_rack *rack) 748 { 749 if (rack->rc_pacing_cc_set) 750 rack_undo_cc_pacing(rack); 751 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 752 tcp_decrement_paced_conn(); 753 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 754 tcp_dec_dgp_pacing_cnt(); 755 rack->rc_always_pace = 0; 756 rack->r_ctl.pacing_method = RACK_PACING_NONE; 757 rack->dgp_on = 0; 758 rack->rc_hybrid_mode = 0; 759 rack->use_fixed_rate = 0; 760 } 761 762 static void 763 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 764 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 765 { 766 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 767 union tcp_log_stackspecific log; 768 struct timeval tv; 769 770 memset(&log, 0, sizeof(log)); 771 log.u_bbr.flex1 = seq_end; 772 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 773 log.u_bbr.flex3 = ack_end_t; 774 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 775 log.u_bbr.flex5 = send_end_t; 776 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 777 log.u_bbr.flex7 = mode; 778 log.u_bbr.flex8 = 69; 779 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 780 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 781 log.u_bbr.pkts_out = line; 782 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 783 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 784 log.u_bbr.epoch = rack->r_ctl.current_round; 785 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 786 if (rsm != NULL) { 787 log.u_bbr.applimited = rsm->r_start; 788 log.u_bbr.delivered = rsm->r_end; 789 log.u_bbr.epoch = rsm->r_flags; 790 } 791 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 792 TCP_LOG_EVENTP(rack->rc_tp, NULL, 793 &rack->rc_inp->inp_socket->so_rcv, 794 &rack->rc_inp->inp_socket->so_snd, 795 BBR_LOG_HPTSI_CALC, 0, 796 0, &log, false, &tv); 797 } 798 } 799 800 static int 801 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 802 { 803 uint32_t stat; 804 int32_t error; 805 806 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 807 if (error || req->newptr == NULL) 808 return error; 809 810 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 811 if (error) 812 return (error); 813 if (stat == 1) { 814 #ifdef INVARIANTS 815 printf("Clearing RACK counters\n"); 816 #endif 817 counter_u64_zero(rack_tlp_tot); 818 counter_u64_zero(rack_tlp_newdata); 819 counter_u64_zero(rack_tlp_retran); 820 counter_u64_zero(rack_tlp_retran_bytes); 821 counter_u64_zero(rack_to_tot); 822 counter_u64_zero(rack_saw_enobuf); 823 counter_u64_zero(rack_saw_enobuf_hw); 824 counter_u64_zero(rack_saw_enetunreach); 825 counter_u64_zero(rack_persists_sends); 826 counter_u64_zero(rack_total_bytes); 827 counter_u64_zero(rack_persists_acks); 828 counter_u64_zero(rack_persists_loss); 829 counter_u64_zero(rack_persists_lost_ends); 830 #ifdef INVARIANTS 831 counter_u64_zero(rack_adjust_map_bw); 832 #endif 833 counter_u64_zero(rack_to_alloc_hard); 834 counter_u64_zero(rack_to_alloc_emerg); 835 counter_u64_zero(rack_sack_proc_all); 836 counter_u64_zero(rack_fto_send); 837 counter_u64_zero(rack_fto_rsm_send); 838 counter_u64_zero(rack_extended_rfo); 839 counter_u64_zero(rack_hw_pace_init_fail); 840 counter_u64_zero(rack_hw_pace_lost); 841 counter_u64_zero(rack_non_fto_send); 842 counter_u64_zero(rack_nfto_resend); 843 counter_u64_zero(rack_sack_proc_short); 844 counter_u64_zero(rack_sack_proc_restart); 845 counter_u64_zero(rack_to_alloc); 846 counter_u64_zero(rack_to_alloc_limited); 847 counter_u64_zero(rack_alloc_limited_conns); 848 counter_u64_zero(rack_split_limited); 849 counter_u64_zero(rack_rxt_clamps_cwnd); 850 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 851 counter_u64_zero(rack_multi_single_eq); 852 counter_u64_zero(rack_proc_non_comp_ack); 853 counter_u64_zero(rack_sack_attacks_detected); 854 counter_u64_zero(rack_sack_attacks_reversed); 855 counter_u64_zero(rack_sack_attacks_suspect); 856 counter_u64_zero(rack_sack_used_next_merge); 857 counter_u64_zero(rack_sack_used_prev_merge); 858 counter_u64_zero(rack_sack_splits); 859 counter_u64_zero(rack_sack_skipped_acked); 860 counter_u64_zero(rack_ack_total); 861 counter_u64_zero(rack_express_sack); 862 counter_u64_zero(rack_sack_total); 863 counter_u64_zero(rack_move_none); 864 counter_u64_zero(rack_move_some); 865 counter_u64_zero(rack_try_scwnd); 866 counter_u64_zero(rack_collapsed_win); 867 counter_u64_zero(rack_collapsed_win_rxt); 868 counter_u64_zero(rack_collapsed_win_seen); 869 counter_u64_zero(rack_collapsed_win_rxt_bytes); 870 } else if (stat == 2) { 871 #ifdef INVARIANTS 872 printf("Clearing RACK option array\n"); 873 #endif 874 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 875 } else if (stat == 3) { 876 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 877 } else if (stat == 4) { 878 #ifdef INVARIANTS 879 printf("Clearing RACK out size array\n"); 880 #endif 881 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 882 } 883 rack_clear_counter = 0; 884 return (0); 885 } 886 887 static void 888 rack_init_sysctls(void) 889 { 890 struct sysctl_oid *rack_counters; 891 struct sysctl_oid *rack_attack; 892 struct sysctl_oid *rack_pacing; 893 struct sysctl_oid *rack_timely; 894 struct sysctl_oid *rack_timers; 895 struct sysctl_oid *rack_tlp; 896 struct sysctl_oid *rack_misc; 897 struct sysctl_oid *rack_features; 898 struct sysctl_oid *rack_measure; 899 struct sysctl_oid *rack_probertt; 900 struct sysctl_oid *rack_hw_pacing; 901 struct sysctl_oid *rack_policing; 902 903 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 904 SYSCTL_CHILDREN(rack_sysctl_root), 905 OID_AUTO, 906 "sack_attack", 907 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 908 "Rack Sack Attack Counters and Controls"); 909 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 910 SYSCTL_CHILDREN(rack_sysctl_root), 911 OID_AUTO, 912 "stats", 913 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 914 "Rack Counters"); 915 SYSCTL_ADD_S32(&rack_sysctl_ctx, 916 SYSCTL_CHILDREN(rack_sysctl_root), 917 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 918 &rack_rate_sample_method , USE_RTT_LOW, 919 "What method should we use for rate sampling 0=high, 1=low "); 920 /* Probe rtt related controls */ 921 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 922 SYSCTL_CHILDREN(rack_sysctl_root), 923 OID_AUTO, 924 "probertt", 925 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 926 "ProbeRTT related Controls"); 927 SYSCTL_ADD_U16(&rack_sysctl_ctx, 928 SYSCTL_CHILDREN(rack_probertt), 929 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 930 &rack_atexit_prtt_hbp, 130, 931 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 932 SYSCTL_ADD_U16(&rack_sysctl_ctx, 933 SYSCTL_CHILDREN(rack_probertt), 934 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 935 &rack_atexit_prtt, 130, 936 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 937 SYSCTL_ADD_U16(&rack_sysctl_ctx, 938 SYSCTL_CHILDREN(rack_probertt), 939 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 940 &rack_per_of_gp_probertt, 60, 941 "What percentage of goodput do we pace at in probertt"); 942 SYSCTL_ADD_U16(&rack_sysctl_ctx, 943 SYSCTL_CHILDREN(rack_probertt), 944 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 945 &rack_per_of_gp_probertt_reduce, 10, 946 "What percentage of goodput do we reduce every gp_srtt"); 947 SYSCTL_ADD_U16(&rack_sysctl_ctx, 948 SYSCTL_CHILDREN(rack_probertt), 949 OID_AUTO, "gp_per_low", CTLFLAG_RW, 950 &rack_per_of_gp_lowthresh, 40, 951 "What percentage of goodput do we allow the multiplier to fall to"); 952 SYSCTL_ADD_U32(&rack_sysctl_ctx, 953 SYSCTL_CHILDREN(rack_probertt), 954 OID_AUTO, "time_between", CTLFLAG_RW, 955 & rack_time_between_probertt, 96000000, 956 "How many useconds between the lowest rtt falling must past before we enter probertt"); 957 SYSCTL_ADD_U32(&rack_sysctl_ctx, 958 SYSCTL_CHILDREN(rack_probertt), 959 OID_AUTO, "safety", CTLFLAG_RW, 960 &rack_probe_rtt_safety_val, 2000000, 961 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 962 SYSCTL_ADD_U32(&rack_sysctl_ctx, 963 SYSCTL_CHILDREN(rack_probertt), 964 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 965 &rack_probe_rtt_sets_cwnd, 0, 966 "Do we set the cwnd too (if always_lower is on)"); 967 SYSCTL_ADD_U32(&rack_sysctl_ctx, 968 SYSCTL_CHILDREN(rack_probertt), 969 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 970 &rack_max_drain_wait, 2, 971 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 972 SYSCTL_ADD_U32(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_probertt), 974 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 975 &rack_must_drain, 1, 976 "We must drain this many gp_srtt's waiting for flight to reach goal"); 977 SYSCTL_ADD_U32(&rack_sysctl_ctx, 978 SYSCTL_CHILDREN(rack_probertt), 979 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 980 &rack_probertt_use_min_rtt_entry, 1, 981 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 982 SYSCTL_ADD_U32(&rack_sysctl_ctx, 983 SYSCTL_CHILDREN(rack_probertt), 984 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 985 &rack_probertt_use_min_rtt_exit, 0, 986 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 987 SYSCTL_ADD_U32(&rack_sysctl_ctx, 988 SYSCTL_CHILDREN(rack_probertt), 989 OID_AUTO, "length_div", CTLFLAG_RW, 990 &rack_probertt_gpsrtt_cnt_div, 0, 991 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 992 SYSCTL_ADD_U32(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_probertt), 994 OID_AUTO, "length_mul", CTLFLAG_RW, 995 &rack_probertt_gpsrtt_cnt_mul, 0, 996 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 997 SYSCTL_ADD_U32(&rack_sysctl_ctx, 998 SYSCTL_CHILDREN(rack_probertt), 999 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 1000 &rack_min_probertt_hold, 200000, 1001 "What is the minimum time we hold probertt at target"); 1002 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_probertt), 1004 OID_AUTO, "filter_life", CTLFLAG_RW, 1005 &rack_probertt_filter_life, 10000000, 1006 "What is the time for the filters life in useconds"); 1007 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_probertt), 1009 OID_AUTO, "lower_within", CTLFLAG_RW, 1010 &rack_probertt_lower_within, 10, 1011 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 1012 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_probertt), 1014 OID_AUTO, "must_move", CTLFLAG_RW, 1015 &rack_min_rtt_movement, 250, 1016 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1017 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1018 SYSCTL_CHILDREN(rack_probertt), 1019 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1020 &rack_probertt_clear_is, 1, 1021 "Do we clear I/S counts on exiting probe-rtt"); 1022 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_probertt), 1024 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1025 &rack_max_drain_hbp, 1, 1026 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1027 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_probertt), 1029 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1030 &rack_hbp_thresh, 3, 1031 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1032 /* Pacing related sysctls */ 1033 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_sysctl_root), 1035 OID_AUTO, 1036 "pacing", 1037 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1038 "Pacing related Controls"); 1039 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_pacing), 1041 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1042 &rack_pcm_is_enabled, 1, 1043 "Do we by default do PCM measurements?"); 1044 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1045 SYSCTL_CHILDREN(rack_pacing), 1046 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1047 &rack_pcm_every_n_rounds, 100, 1048 "How many rounds before we need to do a PCM measurement"); 1049 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1050 SYSCTL_CHILDREN(rack_pacing), 1051 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1052 &rack_pcm_blast, 0, 1053 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1054 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1055 SYSCTL_CHILDREN(rack_pacing), 1056 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1057 &rack_gp_gain_req, 1200, 1058 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1059 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1060 SYSCTL_CHILDREN(rack_pacing), 1061 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1062 &rack_rnd_cnt_req, 0x10005, 1063 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_pacing), 1066 OID_AUTO, "no_timely", CTLFLAG_RW, 1067 &rack_timely_off, 0, 1068 "Do we not use timely in DGP?"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_pacing), 1071 OID_AUTO, "fullbufdisc", CTLFLAG_RW, 1072 &rack_full_buffer_discount, 10, 1073 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_pacing), 1076 OID_AUTO, "fillcw", CTLFLAG_RW, 1077 &rack_fill_cw_state, 0, 1078 "Enable fillcw on new connections (default=0 off)?"); 1079 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_pacing), 1081 OID_AUTO, "min_burst", CTLFLAG_RW, 1082 &rack_pacing_min_seg, 0, 1083 "What is the min burst size for pacing (0 disables)?"); 1084 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_pacing), 1086 OID_AUTO, "divisor", CTLFLAG_RW, 1087 &rack_default_pacing_divisor, 250, 1088 "What is the default divisor given to the rl code?"); 1089 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1090 SYSCTL_CHILDREN(rack_pacing), 1091 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1092 &rack_bw_multipler, 0, 1093 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1094 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_pacing), 1096 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1097 &rack_max_per_above, 30, 1098 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1099 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_pacing), 1101 OID_AUTO, "allow1mss", CTLFLAG_RW, 1102 &rack_pace_one_seg, 0, 1103 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_pacing), 1106 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1107 &rack_limit_time_with_srtt, 0, 1108 "Do we limit pacing time based on srtt"); 1109 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_pacing), 1111 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1112 &rack_per_of_gp_ss, 250, 1113 "If non zero, what percentage of goodput to pace at in slow start"); 1114 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_pacing), 1116 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1117 &rack_per_of_gp_ca, 150, 1118 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1119 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_pacing), 1121 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1122 &rack_per_of_gp_rec, 200, 1123 "If non zero, what percentage of goodput to pace at in recovery"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_pacing), 1126 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1127 &rack_hptsi_segments, 40, 1128 "What size is the max for TSO segments in pacing and burst mitigation"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_pacing), 1131 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1132 &rack_slot_reduction, 4, 1133 "When doing only burst mitigation what is the reduce divisor"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_sysctl_root), 1136 OID_AUTO, "use_pacing", CTLFLAG_RW, 1137 &rack_pace_every_seg, 0, 1138 "If set we use pacing, if clear we use only the original burst mitigation"); 1139 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_pacing), 1141 OID_AUTO, "rate_cap", CTLFLAG_RW, 1142 &rack_bw_rate_cap, 0, 1143 "If set we apply this value to the absolute rate cap used by pacing"); 1144 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_pacing), 1146 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1147 &rack_fillcw_bw_cap, 3750000, 1148 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1149 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_sysctl_root), 1151 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1152 &rack_req_measurements, 1, 1153 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1154 /* Hardware pacing */ 1155 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1156 SYSCTL_CHILDREN(rack_sysctl_root), 1157 OID_AUTO, 1158 "hdwr_pacing", 1159 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1160 "Pacing related Controls"); 1161 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1162 SYSCTL_CHILDREN(rack_hw_pacing), 1163 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1164 &rack_hw_rwnd_factor, 2, 1165 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1166 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1167 SYSCTL_CHILDREN(rack_hw_pacing), 1168 OID_AUTO, "precheck", CTLFLAG_RW, 1169 &rack_hw_check_queue, 0, 1170 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1171 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1172 SYSCTL_CHILDREN(rack_hw_pacing), 1173 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1174 &rack_enobuf_hw_boost_mult, 0, 1175 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1176 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1177 SYSCTL_CHILDREN(rack_hw_pacing), 1178 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1179 &rack_enobuf_hw_max, 2, 1180 "What is the max boost the pacing time if we see a ENOBUFS?"); 1181 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1182 SYSCTL_CHILDREN(rack_hw_pacing), 1183 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1184 &rack_enobuf_hw_min, 2, 1185 "What is the min boost the pacing time if we see a ENOBUFS?"); 1186 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1187 SYSCTL_CHILDREN(rack_hw_pacing), 1188 OID_AUTO, "enable", CTLFLAG_RW, 1189 &rack_enable_hw_pacing, 0, 1190 "Should RACK attempt to use hw pacing?"); 1191 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1192 SYSCTL_CHILDREN(rack_hw_pacing), 1193 OID_AUTO, "rate_cap", CTLFLAG_RW, 1194 &rack_hw_rate_caps, 0, 1195 "Does the highest hardware pacing rate cap the rate we will send at??"); 1196 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1197 SYSCTL_CHILDREN(rack_hw_pacing), 1198 OID_AUTO, "uncap_per", CTLFLAG_RW, 1199 &rack_hw_rate_cap_per, 0, 1200 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1201 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1202 SYSCTL_CHILDREN(rack_hw_pacing), 1203 OID_AUTO, "rate_min", CTLFLAG_RW, 1204 &rack_hw_rate_min, 0, 1205 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1206 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1207 SYSCTL_CHILDREN(rack_hw_pacing), 1208 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1209 &rack_hw_rate_to_low, 0, 1210 "If we fall below this rate, dis-engage hw pacing?"); 1211 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1212 SYSCTL_CHILDREN(rack_hw_pacing), 1213 OID_AUTO, "up_only", CTLFLAG_RW, 1214 &rack_hw_up_only, 0, 1215 "Do we allow hw pacing to lower the rate selected?"); 1216 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1217 SYSCTL_CHILDREN(rack_hw_pacing), 1218 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1219 &rack_hw_pace_extra_slots, 0, 1220 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1221 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1222 SYSCTL_CHILDREN(rack_sysctl_root), 1223 OID_AUTO, 1224 "timely", 1225 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1226 "Rack Timely RTT Controls"); 1227 /* Timely based GP dynmics */ 1228 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1229 SYSCTL_CHILDREN(rack_timely), 1230 OID_AUTO, "upper", CTLFLAG_RW, 1231 &rack_gp_per_bw_mul_up, 2, 1232 "Rack timely upper range for equal b/w (in percentage)"); 1233 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1234 SYSCTL_CHILDREN(rack_timely), 1235 OID_AUTO, "lower", CTLFLAG_RW, 1236 &rack_gp_per_bw_mul_down, 4, 1237 "Rack timely lower range for equal b/w (in percentage)"); 1238 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1239 SYSCTL_CHILDREN(rack_timely), 1240 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1241 &rack_gp_rtt_maxmul, 3, 1242 "Rack timely multiplier of lowest rtt for rtt_max"); 1243 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1244 SYSCTL_CHILDREN(rack_timely), 1245 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1246 &rack_gp_rtt_mindiv, 4, 1247 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1248 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1249 SYSCTL_CHILDREN(rack_timely), 1250 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1251 &rack_gp_rtt_minmul, 1, 1252 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1253 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1254 SYSCTL_CHILDREN(rack_timely), 1255 OID_AUTO, "decrease", CTLFLAG_RW, 1256 &rack_gp_decrease_per, 80, 1257 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1258 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1259 SYSCTL_CHILDREN(rack_timely), 1260 OID_AUTO, "increase", CTLFLAG_RW, 1261 &rack_gp_increase_per, 2, 1262 "Rack timely increase perentage of our GP multiplication factor"); 1263 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1264 SYSCTL_CHILDREN(rack_timely), 1265 OID_AUTO, "lowerbound", CTLFLAG_RW, 1266 &rack_per_lower_bound, 50, 1267 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1268 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1269 SYSCTL_CHILDREN(rack_timely), 1270 OID_AUTO, "p5_upper", CTLFLAG_RW, 1271 &rack_gain_p5_ub, 250, 1272 "Profile 5 upper bound to timely gain"); 1273 1274 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1275 SYSCTL_CHILDREN(rack_timely), 1276 OID_AUTO, "upperboundss", CTLFLAG_RW, 1277 &rack_per_upper_bound_ss, 0, 1278 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1279 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1280 SYSCTL_CHILDREN(rack_timely), 1281 OID_AUTO, "upperboundca", CTLFLAG_RW, 1282 &rack_per_upper_bound_ca, 0, 1283 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1284 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1285 SYSCTL_CHILDREN(rack_timely), 1286 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1287 &rack_do_dyn_mul, 0, 1288 "Rack timely do we enable dynmaic timely goodput by default"); 1289 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1290 SYSCTL_CHILDREN(rack_timely), 1291 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1292 &rack_gp_no_rec_chg, 1, 1293 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1294 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1295 SYSCTL_CHILDREN(rack_timely), 1296 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1297 &rack_timely_dec_clear, 6, 1298 "Rack timely what threshold do we count to before another boost during b/w decent"); 1299 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1300 SYSCTL_CHILDREN(rack_timely), 1301 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1302 &rack_timely_max_push_rise, 3, 1303 "Rack timely how many times do we push up with b/w increase"); 1304 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1305 SYSCTL_CHILDREN(rack_timely), 1306 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1307 &rack_timely_max_push_drop, 3, 1308 "Rack timely how many times do we push back on b/w decent"); 1309 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1310 SYSCTL_CHILDREN(rack_timely), 1311 OID_AUTO, "min_segs", CTLFLAG_RW, 1312 &rack_timely_min_segs, 4, 1313 "Rack timely when setting the cwnd what is the min num segments"); 1314 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1315 SYSCTL_CHILDREN(rack_timely), 1316 OID_AUTO, "noback_max", CTLFLAG_RW, 1317 &rack_use_max_for_nobackoff, 0, 1318 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1319 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1320 SYSCTL_CHILDREN(rack_timely), 1321 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1322 &rack_timely_int_timely_only, 0, 1323 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1324 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1325 SYSCTL_CHILDREN(rack_timely), 1326 OID_AUTO, "nonstop", CTLFLAG_RW, 1327 &rack_timely_no_stopping, 0, 1328 "Rack timely don't stop increase"); 1329 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_timely), 1331 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1332 &rack_down_raise_thresh, 100, 1333 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_timely), 1336 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1337 &rack_req_segs, 1, 1338 "Bottom dragging if not these many segments outstanding and room"); 1339 1340 /* TLP and Rack related parameters */ 1341 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1342 SYSCTL_CHILDREN(rack_sysctl_root), 1343 OID_AUTO, 1344 "tlp", 1345 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1346 "TLP and Rack related Controls"); 1347 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1348 SYSCTL_CHILDREN(rack_tlp), 1349 OID_AUTO, "use_rrr", CTLFLAG_RW, 1350 &use_rack_rr, 1, 1351 "Do we use Rack Rapid Recovery"); 1352 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1353 SYSCTL_CHILDREN(rack_tlp), 1354 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1355 &rack_max_abc_post_recovery, 2, 1356 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1357 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1358 SYSCTL_CHILDREN(rack_tlp), 1359 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1360 &rack_non_rxt_use_cr, 0, 1361 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1362 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1363 SYSCTL_CHILDREN(rack_tlp), 1364 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1365 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1366 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1367 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1368 SYSCTL_CHILDREN(rack_tlp), 1369 OID_AUTO, "limit", CTLFLAG_RW, 1370 &rack_tlp_limit, 2, 1371 "How many TLP's can be sent without sending new data"); 1372 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1373 SYSCTL_CHILDREN(rack_tlp), 1374 OID_AUTO, "use_greater", CTLFLAG_RW, 1375 &rack_tlp_use_greater, 1, 1376 "Should we use the rack_rtt time if its greater than srtt"); 1377 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1378 SYSCTL_CHILDREN(rack_tlp), 1379 OID_AUTO, "tlpminto", CTLFLAG_RW, 1380 &rack_tlp_min, 10000, 1381 "TLP minimum timeout per the specification (in microseconds)"); 1382 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1383 SYSCTL_CHILDREN(rack_tlp), 1384 OID_AUTO, "send_oldest", CTLFLAG_RW, 1385 &rack_always_send_oldest, 0, 1386 "Should we always send the oldest TLP and RACK-TLP"); 1387 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1388 SYSCTL_CHILDREN(rack_tlp), 1389 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1390 &rack_lower_cwnd_at_tlp, 0, 1391 "When a TLP completes a retran should we enter recovery"); 1392 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1393 SYSCTL_CHILDREN(rack_tlp), 1394 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1395 &rack_reorder_thresh, 2, 1396 "What factor for rack will be added when seeing reordering (shift right)"); 1397 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1398 SYSCTL_CHILDREN(rack_tlp), 1399 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1400 &rack_tlp_thresh, 1, 1401 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1402 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1403 SYSCTL_CHILDREN(rack_tlp), 1404 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1405 &rack_reorder_fade, 60000000, 1406 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1407 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1408 SYSCTL_CHILDREN(rack_tlp), 1409 OID_AUTO, "pktdelay", CTLFLAG_RW, 1410 &rack_pkt_delay, 1000, 1411 "Extra RACK time (in microseconds) besides reordering thresh"); 1412 1413 /* Timer related controls */ 1414 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1415 SYSCTL_CHILDREN(rack_sysctl_root), 1416 OID_AUTO, 1417 "timers", 1418 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1419 "Timer related controls"); 1420 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1421 SYSCTL_CHILDREN(rack_timers), 1422 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1423 &rack_ssthresh_rest_rto_rec, 0, 1424 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1425 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1426 SYSCTL_CHILDREN(rack_timers), 1427 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1428 &rack_rxt_scoreboard_clear_thresh, 2, 1429 "How many RTO's are allowed before we clear the scoreboard"); 1430 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1431 SYSCTL_CHILDREN(rack_timers), 1432 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1433 &rack_honors_hpts_min_to, 1, 1434 "Do rack pacing timers honor hpts min timeout"); 1435 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1436 SYSCTL_CHILDREN(rack_timers), 1437 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1438 &rack_max_reduce, 10, 1439 "Max percentage we will reduce slot by for pacing when we are behind"); 1440 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1441 SYSCTL_CHILDREN(rack_timers), 1442 OID_AUTO, "persmin", CTLFLAG_RW, 1443 &rack_persist_min, 250000, 1444 "What is the minimum time in microseconds between persists"); 1445 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_timers), 1447 OID_AUTO, "persmax", CTLFLAG_RW, 1448 &rack_persist_max, 2000000, 1449 "What is the largest delay in microseconds between persists"); 1450 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1451 SYSCTL_CHILDREN(rack_timers), 1452 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1453 &rack_delayed_ack_time, 40000, 1454 "Delayed ack time (40ms in microseconds)"); 1455 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1456 SYSCTL_CHILDREN(rack_timers), 1457 OID_AUTO, "minrto", CTLFLAG_RW, 1458 &rack_rto_min, 30000, 1459 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1460 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1461 SYSCTL_CHILDREN(rack_timers), 1462 OID_AUTO, "maxrto", CTLFLAG_RW, 1463 &rack_rto_max, 4000000, 1464 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1465 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_timers), 1467 OID_AUTO, "minto", CTLFLAG_RW, 1468 &rack_min_to, 1000, 1469 "Minimum rack timeout in microseconds"); 1470 /* Measure controls */ 1471 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1472 SYSCTL_CHILDREN(rack_sysctl_root), 1473 OID_AUTO, 1474 "measure", 1475 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1476 "Measure related controls"); 1477 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1478 SYSCTL_CHILDREN(rack_measure), 1479 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1480 &rack_wma_divisor, 8, 1481 "When doing b/w calculation what is the divisor for the WMA"); 1482 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1483 SYSCTL_CHILDREN(rack_measure), 1484 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1485 &rack_cwnd_block_ends_measure, 0, 1486 "Does a cwnd just-return end the measurement window (app limited)"); 1487 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1488 SYSCTL_CHILDREN(rack_measure), 1489 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1490 &rack_rwnd_block_ends_measure, 0, 1491 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1492 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1493 SYSCTL_CHILDREN(rack_measure), 1494 OID_AUTO, "min_target", CTLFLAG_RW, 1495 &rack_def_data_window, 20, 1496 "What is the minimum target window (in mss) for a GP measurements"); 1497 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_measure), 1499 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1500 &rack_goal_bdp, 2, 1501 "What is the goal BDP to measure"); 1502 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_measure), 1504 OID_AUTO, "min_srtts", CTLFLAG_RW, 1505 &rack_min_srtts, 1, 1506 "What is the goal BDP to measure"); 1507 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1508 SYSCTL_CHILDREN(rack_measure), 1509 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1510 &rack_min_measure_usec, 0, 1511 "What is the Minimum time time for a measurement if 0, this is off"); 1512 /* Features */ 1513 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1514 SYSCTL_CHILDREN(rack_sysctl_root), 1515 OID_AUTO, 1516 "features", 1517 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1518 "Feature controls"); 1519 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1520 SYSCTL_CHILDREN(rack_features), 1521 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1522 &rack_hybrid_allow_set_maxseg, 0, 1523 "Should hybrid pacing allow the setmss command"); 1524 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1525 SYSCTL_CHILDREN(rack_features), 1526 OID_AUTO, "cmpack", CTLFLAG_RW, 1527 &rack_use_cmp_acks, 1, 1528 "Should RACK have LRO send compressed acks"); 1529 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1530 SYSCTL_CHILDREN(rack_features), 1531 OID_AUTO, "fsb", CTLFLAG_RW, 1532 &rack_use_fsb, 1, 1533 "Should RACK use the fast send block?"); 1534 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1535 SYSCTL_CHILDREN(rack_features), 1536 OID_AUTO, "rfo", CTLFLAG_RW, 1537 &rack_use_rfo, 1, 1538 "Should RACK use rack_fast_output()?"); 1539 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1540 SYSCTL_CHILDREN(rack_features), 1541 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1542 &rack_use_rsm_rfo, 1, 1543 "Should RACK use rack_fast_rsm_output()?"); 1544 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1545 SYSCTL_CHILDREN(rack_features), 1546 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1547 &rack_enable_mqueue_for_nonpaced, 0, 1548 "Should RACK use mbuf queuing for non-paced connections"); 1549 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1550 SYSCTL_CHILDREN(rack_features), 1551 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1552 &rack_do_hystart, 0, 1553 "Should RACK enable HyStart++ on connections?"); 1554 /* Policer detection */ 1555 rack_policing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1556 SYSCTL_CHILDREN(rack_sysctl_root), 1557 OID_AUTO, 1558 "policing", 1559 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1560 "policer detection"); 1561 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_policing), 1563 OID_AUTO, "rxt_thresh", CTLFLAG_RW, 1564 &rack_policer_rxt_thresh, 0, 1565 "Percentage of retransmits we need to be a possible policer (499 = 49.9 percent)"); 1566 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1567 SYSCTL_CHILDREN(rack_policing), 1568 OID_AUTO, "avg_thresh", CTLFLAG_RW, 1569 &rack_policer_avg_thresh, 0, 1570 "What threshold of average retransmits needed to recover a lost packet (1 - 169 aka 21 = 2.1)?"); 1571 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1572 SYSCTL_CHILDREN(rack_policing), 1573 OID_AUTO, "med_thresh", CTLFLAG_RW, 1574 &rack_policer_med_thresh, 0, 1575 "What threshold of Median retransmits needed to recover a lost packet (1 - 16)?"); 1576 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1577 SYSCTL_CHILDREN(rack_policing), 1578 OID_AUTO, "data_thresh", CTLFLAG_RW, 1579 &rack_policer_data_thresh, 64000, 1580 "How many bytes must have gotten through before we can start doing policer detection?"); 1581 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_policing), 1583 OID_AUTO, "bwcomp", CTLFLAG_RW, 1584 &rack_policing_do_bw_comp, 1, 1585 "Do we raise up low b/w so that at least pace_max_seg can be sent in the srtt?"); 1586 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1587 SYSCTL_CHILDREN(rack_policing), 1588 OID_AUTO, "recmss", CTLFLAG_RW, 1589 &rack_req_del_mss, 18, 1590 "How many MSS must be delivered during recovery to engage policer detection?"); 1591 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_policing), 1593 OID_AUTO, "res_div", CTLFLAG_RW, 1594 &rack_policer_bucket_reserve, 20, 1595 "What percentage is reserved in the policer bucket?"); 1596 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1597 SYSCTL_CHILDREN(rack_policing), 1598 OID_AUTO, "min_comp_bw", CTLFLAG_RW, 1599 &rack_pol_min_bw, 125000, 1600 "Do we have a min b/w for b/w compensation (0 = no)?"); 1601 /* Misc rack controls */ 1602 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1603 SYSCTL_CHILDREN(rack_sysctl_root), 1604 OID_AUTO, 1605 "misc", 1606 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1607 "Misc related controls"); 1608 #ifdef TCP_ACCOUNTING 1609 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1610 SYSCTL_CHILDREN(rack_misc), 1611 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1612 &rack_tcp_accounting, 0, 1613 "Should we turn on TCP accounting for all rack sessions?"); 1614 #endif 1615 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1616 SYSCTL_CHILDREN(rack_misc), 1617 OID_AUTO, "dnd", CTLFLAG_RW, 1618 &rack_dnd_default, 0, 1619 "Do not disturb default for rack_rrr = 3"); 1620 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1621 SYSCTL_CHILDREN(rack_misc), 1622 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1623 &sad_seg_size_per, 800, 1624 "Percentage of segment size needed in a sack 800 = 80.0?"); 1625 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_misc), 1627 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1628 &rack_rxt_controls, 0, 1629 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1630 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1631 SYSCTL_CHILDREN(rack_misc), 1632 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1633 &rack_hibeta_setting, 0, 1634 "Do we ue a high beta (80 instead of 50)?"); 1635 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_misc), 1637 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1638 &rack_apply_rtt_with_reduced_conf, 0, 1639 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1640 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1641 SYSCTL_CHILDREN(rack_misc), 1642 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1643 &rack_dsack_std_based, 3, 1644 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1645 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1646 SYSCTL_CHILDREN(rack_misc), 1647 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1648 &rack_prr_addbackmax, 2, 1649 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1650 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1651 SYSCTL_CHILDREN(rack_misc), 1652 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1653 &rack_stats_gets_ms_rtt, 1, 1654 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1655 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_misc), 1657 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1658 &rack_client_low_buf, 0, 1659 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1660 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1661 SYSCTL_CHILDREN(rack_misc), 1662 OID_AUTO, "defprofile", CTLFLAG_RW, 1663 &rack_def_profile, 0, 1664 "Should RACK use a default profile (0=no, num == profile num)?"); 1665 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_misc), 1667 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1668 &rack_enable_shared_cwnd, 1, 1669 "Should RACK try to use the shared cwnd on connections where allowed"); 1670 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1671 SYSCTL_CHILDREN(rack_misc), 1672 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1673 &rack_limits_scwnd, 1, 1674 "Should RACK place low end time limits on the shared cwnd feature"); 1675 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1676 SYSCTL_CHILDREN(rack_misc), 1677 OID_AUTO, "no_prr", CTLFLAG_RW, 1678 &rack_disable_prr, 0, 1679 "Should RACK not use prr and only pace (must have pacing on)"); 1680 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1681 SYSCTL_CHILDREN(rack_misc), 1682 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1683 &rack_verbose_logging, 0, 1684 "Should RACK black box logging be verbose"); 1685 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_misc), 1687 OID_AUTO, "data_after_close", CTLFLAG_RW, 1688 &rack_ignore_data_after_close, 1, 1689 "Do we hold off sending a RST until all pending data is ack'd"); 1690 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1691 SYSCTL_CHILDREN(rack_misc), 1692 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1693 &rack_sack_not_required, 1, 1694 "Do we allow rack to run on connections not supporting SACK"); 1695 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1696 SYSCTL_CHILDREN(rack_misc), 1697 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1698 &rack_send_a_lot_in_prr, 1, 1699 "Send a lot in prr"); 1700 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1701 SYSCTL_CHILDREN(rack_misc), 1702 OID_AUTO, "autoscale", CTLFLAG_RW, 1703 &rack_autosndbuf_inc, 20, 1704 "What percentage should rack scale up its snd buffer by?"); 1705 1706 1707 /* Sack Attacker detection stuff */ 1708 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1709 SYSCTL_CHILDREN(rack_attack), 1710 OID_AUTO, "merge_out", CTLFLAG_RW, 1711 &rack_merge_out_sacks_on_attack, 0, 1712 "Do we merge the sendmap when we decide we are being attacked?"); 1713 1714 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1715 SYSCTL_CHILDREN(rack_attack), 1716 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1717 &rack_highest_sack_thresh_seen, 0, 1718 "Highest sack to ack ratio seen"); 1719 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1720 SYSCTL_CHILDREN(rack_attack), 1721 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1722 &rack_highest_move_thresh_seen, 0, 1723 "Highest move to non-move ratio seen"); 1724 rack_ack_total = counter_u64_alloc(M_WAITOK); 1725 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1726 SYSCTL_CHILDREN(rack_attack), 1727 OID_AUTO, "acktotal", CTLFLAG_RD, 1728 &rack_ack_total, 1729 "Total number of Ack's"); 1730 rack_express_sack = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_attack), 1733 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1734 &rack_express_sack, 1735 "Total expresss number of Sack's"); 1736 rack_sack_total = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_attack), 1739 OID_AUTO, "sacktotal", CTLFLAG_RD, 1740 &rack_sack_total, 1741 "Total number of SACKs"); 1742 rack_move_none = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_attack), 1745 OID_AUTO, "move_none", CTLFLAG_RD, 1746 &rack_move_none, 1747 "Total number of SACK index reuse of positions under threshold"); 1748 rack_move_some = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_attack), 1751 OID_AUTO, "move_some", CTLFLAG_RD, 1752 &rack_move_some, 1753 "Total number of SACK index reuse of positions over threshold"); 1754 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_attack), 1757 OID_AUTO, "attacks", CTLFLAG_RD, 1758 &rack_sack_attacks_detected, 1759 "Total number of SACK attackers that had sack disabled"); 1760 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1762 SYSCTL_CHILDREN(rack_attack), 1763 OID_AUTO, "reversed", CTLFLAG_RD, 1764 &rack_sack_attacks_reversed, 1765 "Total number of SACK attackers that were later determined false positive"); 1766 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_attack), 1769 OID_AUTO, "suspect", CTLFLAG_RD, 1770 &rack_sack_attacks_suspect, 1771 "Total number of SACKs that triggered early detection"); 1772 1773 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1775 SYSCTL_CHILDREN(rack_attack), 1776 OID_AUTO, "nextmerge", CTLFLAG_RD, 1777 &rack_sack_used_next_merge, 1778 "Total number of times we used the next merge"); 1779 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1780 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1781 SYSCTL_CHILDREN(rack_attack), 1782 OID_AUTO, "prevmerge", CTLFLAG_RD, 1783 &rack_sack_used_prev_merge, 1784 "Total number of times we used the prev merge"); 1785 /* Counters */ 1786 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1787 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1788 SYSCTL_CHILDREN(rack_counters), 1789 OID_AUTO, "totalbytes", CTLFLAG_RD, 1790 &rack_total_bytes, 1791 "Total number of bytes sent"); 1792 rack_fto_send = counter_u64_alloc(M_WAITOK); 1793 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1794 SYSCTL_CHILDREN(rack_counters), 1795 OID_AUTO, "fto_send", CTLFLAG_RD, 1796 &rack_fto_send, "Total number of rack_fast_output sends"); 1797 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1798 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1799 SYSCTL_CHILDREN(rack_counters), 1800 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1801 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1802 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1803 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1804 SYSCTL_CHILDREN(rack_counters), 1805 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1806 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1807 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1809 SYSCTL_CHILDREN(rack_counters), 1810 OID_AUTO, "nfto_send", CTLFLAG_RD, 1811 &rack_non_fto_send, "Total number of rack_output first sends"); 1812 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1813 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1814 SYSCTL_CHILDREN(rack_counters), 1815 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1816 &rack_extended_rfo, "Total number of times we extended rfo"); 1817 1818 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1819 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1820 SYSCTL_CHILDREN(rack_counters), 1821 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1822 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1823 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1824 1825 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1826 SYSCTL_CHILDREN(rack_counters), 1827 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1828 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1829 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1830 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1831 SYSCTL_CHILDREN(rack_counters), 1832 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1833 &rack_tlp_tot, 1834 "Total number of tail loss probe expirations"); 1835 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1836 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1837 SYSCTL_CHILDREN(rack_counters), 1838 OID_AUTO, "tlp_new", CTLFLAG_RD, 1839 &rack_tlp_newdata, 1840 "Total number of tail loss probe sending new data"); 1841 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1842 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1843 SYSCTL_CHILDREN(rack_counters), 1844 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1845 &rack_tlp_retran, 1846 "Total number of tail loss probe sending retransmitted data"); 1847 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1848 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1849 SYSCTL_CHILDREN(rack_counters), 1850 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1851 &rack_tlp_retran_bytes, 1852 "Total bytes of tail loss probe sending retransmitted data"); 1853 rack_to_tot = counter_u64_alloc(M_WAITOK); 1854 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1855 SYSCTL_CHILDREN(rack_counters), 1856 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1857 &rack_to_tot, 1858 "Total number of times the rack to expired"); 1859 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1861 SYSCTL_CHILDREN(rack_counters), 1862 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1863 &rack_saw_enobuf, 1864 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1865 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1867 SYSCTL_CHILDREN(rack_counters), 1868 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1869 &rack_saw_enobuf_hw, 1870 "Total number of times a send returned enobuf for hdwr paced connections"); 1871 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1873 SYSCTL_CHILDREN(rack_counters), 1874 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1875 &rack_saw_enetunreach, 1876 "Total number of times a send received a enetunreachable"); 1877 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1879 SYSCTL_CHILDREN(rack_counters), 1880 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1881 &rack_hot_alloc, 1882 "Total allocations from the top of our list"); 1883 tcp_policer_detected = counter_u64_alloc(M_WAITOK); 1884 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1885 SYSCTL_CHILDREN(rack_counters), 1886 OID_AUTO, "policer_detected", CTLFLAG_RD, 1887 &tcp_policer_detected, 1888 "Total policer_detections"); 1889 1890 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1891 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1892 SYSCTL_CHILDREN(rack_counters), 1893 OID_AUTO, "allocs", CTLFLAG_RD, 1894 &rack_to_alloc, 1895 "Total allocations of tracking structures"); 1896 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1898 SYSCTL_CHILDREN(rack_counters), 1899 OID_AUTO, "allochard", CTLFLAG_RD, 1900 &rack_to_alloc_hard, 1901 "Total allocations done with sleeping the hard way"); 1902 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1903 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1904 SYSCTL_CHILDREN(rack_counters), 1905 OID_AUTO, "allocemerg", CTLFLAG_RD, 1906 &rack_to_alloc_emerg, 1907 "Total allocations done from emergency cache"); 1908 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1909 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1910 SYSCTL_CHILDREN(rack_counters), 1911 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1912 &rack_to_alloc_limited, 1913 "Total allocations dropped due to limit"); 1914 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1915 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1916 SYSCTL_CHILDREN(rack_counters), 1917 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1918 &rack_alloc_limited_conns, 1919 "Connections with allocations dropped due to limit"); 1920 rack_split_limited = counter_u64_alloc(M_WAITOK); 1921 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1922 SYSCTL_CHILDREN(rack_counters), 1923 OID_AUTO, "split_limited", CTLFLAG_RD, 1924 &rack_split_limited, 1925 "Split allocations dropped due to limit"); 1926 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1927 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1928 SYSCTL_CHILDREN(rack_counters), 1929 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1930 &rack_rxt_clamps_cwnd, 1931 "Number of times that excessive rxt clamped the cwnd down"); 1932 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1933 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1934 SYSCTL_CHILDREN(rack_counters), 1935 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1936 &rack_rxt_clamps_cwnd_uniq, 1937 "Number of connections that have had excessive rxt clamped the cwnd down"); 1938 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1939 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1940 SYSCTL_CHILDREN(rack_counters), 1941 OID_AUTO, "persist_sends", CTLFLAG_RD, 1942 &rack_persists_sends, 1943 "Number of times we sent a persist probe"); 1944 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1945 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1946 SYSCTL_CHILDREN(rack_counters), 1947 OID_AUTO, "persist_acks", CTLFLAG_RD, 1948 &rack_persists_acks, 1949 "Number of times a persist probe was acked"); 1950 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1951 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1952 SYSCTL_CHILDREN(rack_counters), 1953 OID_AUTO, "persist_loss", CTLFLAG_RD, 1954 &rack_persists_loss, 1955 "Number of times we detected a lost persist probe (no ack)"); 1956 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1957 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1958 SYSCTL_CHILDREN(rack_counters), 1959 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1960 &rack_persists_lost_ends, 1961 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1962 #ifdef INVARIANTS 1963 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1964 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1965 SYSCTL_CHILDREN(rack_counters), 1966 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1967 &rack_adjust_map_bw, 1968 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1969 #endif 1970 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1971 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1972 SYSCTL_CHILDREN(rack_counters), 1973 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1974 &rack_multi_single_eq, 1975 "Number of compressed acks total represented"); 1976 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1977 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1978 SYSCTL_CHILDREN(rack_counters), 1979 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1980 &rack_proc_non_comp_ack, 1981 "Number of non compresseds acks that we processed"); 1982 1983 1984 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1985 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1986 SYSCTL_CHILDREN(rack_counters), 1987 OID_AUTO, "sack_long", CTLFLAG_RD, 1988 &rack_sack_proc_all, 1989 "Total times we had to walk whole list for sack processing"); 1990 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1991 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1992 SYSCTL_CHILDREN(rack_counters), 1993 OID_AUTO, "sack_restart", CTLFLAG_RD, 1994 &rack_sack_proc_restart, 1995 "Total times we had to walk whole list due to a restart"); 1996 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1997 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1998 SYSCTL_CHILDREN(rack_counters), 1999 OID_AUTO, "sack_short", CTLFLAG_RD, 2000 &rack_sack_proc_short, 2001 "Total times we took shortcut for sack processing"); 2002 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 2003 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2004 SYSCTL_CHILDREN(rack_attack), 2005 OID_AUTO, "skipacked", CTLFLAG_RD, 2006 &rack_sack_skipped_acked, 2007 "Total number of times we skipped previously sacked"); 2008 rack_sack_splits = counter_u64_alloc(M_WAITOK); 2009 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2010 SYSCTL_CHILDREN(rack_attack), 2011 OID_AUTO, "ofsplit", CTLFLAG_RD, 2012 &rack_sack_splits, 2013 "Total number of times we did the old fashion tree split"); 2014 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 2015 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2016 SYSCTL_CHILDREN(rack_counters), 2017 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 2018 &rack_input_idle_reduces, 2019 "Total number of idle reductions on input"); 2020 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 2021 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2022 SYSCTL_CHILDREN(rack_counters), 2023 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 2024 &rack_collapsed_win_seen, 2025 "Total number of collapsed window events seen (where our window shrinks)"); 2026 2027 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 2028 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2029 SYSCTL_CHILDREN(rack_counters), 2030 OID_AUTO, "collapsed_win", CTLFLAG_RD, 2031 &rack_collapsed_win, 2032 "Total number of collapsed window events where we mark packets"); 2033 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 2034 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2035 SYSCTL_CHILDREN(rack_counters), 2036 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 2037 &rack_collapsed_win_rxt, 2038 "Total number of packets that were retransmitted"); 2039 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 2040 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2041 SYSCTL_CHILDREN(rack_counters), 2042 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 2043 &rack_collapsed_win_rxt_bytes, 2044 "Total number of bytes that were retransmitted"); 2045 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 2046 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2047 SYSCTL_CHILDREN(rack_counters), 2048 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 2049 &rack_try_scwnd, 2050 "Total number of scwnd attempts"); 2051 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 2052 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 2053 OID_AUTO, "outsize", CTLFLAG_RD, 2054 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 2055 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 2056 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 2057 OID_AUTO, "opts", CTLFLAG_RD, 2058 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 2059 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 2060 SYSCTL_CHILDREN(rack_sysctl_root), 2061 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2062 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 2063 } 2064 2065 static uint32_t 2066 rc_init_window(struct tcp_rack *rack) 2067 { 2068 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 2069 2070 } 2071 2072 static uint64_t 2073 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 2074 { 2075 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 2076 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 2077 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2078 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 2079 else 2080 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 2081 } 2082 2083 static void 2084 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 2085 uint64_t data, uint8_t mod, uint16_t aux, 2086 struct tcp_sendfile_track *cur, int line) 2087 { 2088 #ifdef TCP_REQUEST_TRK 2089 int do_log = 0; 2090 2091 /* 2092 * The rate cap one is noisy and only should come out when normal BB logging 2093 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2094 * once per chunk and make up the BBpoint that can be turned on by the client. 2095 */ 2096 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2097 /* 2098 * The very noisy two need to only come out when 2099 * we have verbose logging on. 2100 */ 2101 if (rack_verbose_logging != 0) 2102 do_log = tcp_bblogging_on(rack->rc_tp); 2103 else 2104 do_log = 0; 2105 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2106 /* 2107 * All other less noisy logs here except the measure which 2108 * also needs to come out on the point and the log. 2109 */ 2110 do_log = tcp_bblogging_on(rack->rc_tp); 2111 } else { 2112 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2113 } 2114 2115 if (do_log) { 2116 union tcp_log_stackspecific log; 2117 struct timeval tv; 2118 uint64_t lt_bw; 2119 2120 /* Convert our ms to a microsecond */ 2121 memset(&log, 0, sizeof(log)); 2122 2123 log.u_bbr.cwnd_gain = line; 2124 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2125 log.u_bbr.rttProp = tim; 2126 log.u_bbr.bw_inuse = cbw; 2127 log.u_bbr.delRate = rack_get_gp_est(rack); 2128 lt_bw = rack_get_lt_bw(rack); 2129 log.u_bbr.flex1 = seq; 2130 log.u_bbr.pacing_gain = aux; 2131 /* lt_bw = < flex3 | flex2 > */ 2132 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2133 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2134 /* Record the last obtained us rtt in inflight */ 2135 if (cur == NULL) { 2136 /* Make sure we are looking at the right log if an overide comes in */ 2137 cur = rack->r_ctl.rc_last_sft; 2138 } 2139 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2140 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2141 else { 2142 /* Use the last known rtt i.e. the rack-rtt */ 2143 log.u_bbr.inflight = rack->rc_rack_rtt; 2144 } 2145 if (cur != NULL) { 2146 uint64_t off; 2147 2148 log.u_bbr.cur_del_rate = cur->deadline; 2149 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2150 /* start = < lost | pkt_epoch > */ 2151 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2152 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2153 log.u_bbr.flex6 = cur->start_seq; 2154 log.u_bbr.pkts_out = cur->end_seq; 2155 } else { 2156 /* start = < lost | pkt_epoch > */ 2157 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2158 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2159 /* end = < pkts_out | flex6 > */ 2160 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2161 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2162 } 2163 /* first_send = <lt_epoch | epoch> */ 2164 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2165 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2166 /* localtime = <delivered | applimited>*/ 2167 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2168 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2169 #ifdef TCP_REQUEST_TRK 2170 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2171 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2172 #endif 2173 log.u_bbr.inhpts = 1; 2174 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2175 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2176 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2177 } else { 2178 log.u_bbr.flex7 = 0xffff; 2179 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2180 } 2181 /* 2182 * Compose bbr_state to be a bit wise 0000ADHF 2183 * where A is the always_pace flag 2184 * where D is the dgp_on flag 2185 * where H is the hybrid_mode on flag 2186 * where F is the use_fixed_rate flag. 2187 */ 2188 log.u_bbr.bbr_state = rack->rc_always_pace; 2189 log.u_bbr.bbr_state <<= 1; 2190 log.u_bbr.bbr_state |= rack->dgp_on; 2191 log.u_bbr.bbr_state <<= 1; 2192 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2193 log.u_bbr.bbr_state <<= 1; 2194 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2195 log.u_bbr.flex8 = mod; 2196 tcp_log_event(rack->rc_tp, NULL, 2197 &rack->rc_inp->inp_socket->so_rcv, 2198 &rack->rc_inp->inp_socket->so_snd, 2199 TCP_HYBRID_PACING_LOG, 0, 2200 0, &log, false, NULL, __func__, __LINE__, &tv); 2201 2202 } 2203 #endif 2204 } 2205 2206 #ifdef TCP_REQUEST_TRK 2207 static void 2208 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2209 { 2210 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2211 union tcp_log_stackspecific log; 2212 struct timeval tv; 2213 uint64_t off; 2214 2215 /* Convert our ms to a microsecond */ 2216 memset(&log, 0, sizeof(log)); 2217 2218 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2219 log.u_bbr.delRate = cur->sent_at_fs; 2220 2221 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2222 /* 2223 * We did not get a new Rules Applied to set so 2224 * no overlapping send occured, this means the 2225 * current byte counts are correct. 2226 */ 2227 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2228 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2229 } else { 2230 /* 2231 * Overlapping send case, we switched to a new 2232 * send and did a rules applied. 2233 */ 2234 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2235 log.u_bbr.rttProp = cur->rxt_at_ls; 2236 } 2237 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2238 log.u_bbr.cwnd_gain = line; 2239 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2240 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2241 /* start = < flex1 | flex2 > */ 2242 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2243 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2244 /* end = < flex3 | flex4 > */ 2245 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2246 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2247 2248 /* localtime = <delivered | applimited>*/ 2249 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2250 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2251 /* client timestamp = <lt_epoch | epoch>*/ 2252 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2253 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2254 /* now set all the flags in */ 2255 log.u_bbr.pkts_out = cur->hybrid_flags; 2256 log.u_bbr.lost = cur->playout_ms; 2257 log.u_bbr.flex6 = cur->flags; 2258 /* 2259 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2260 * where a false retransmit occurred so first_send <-> lastsend may 2261 * include longer time then it actually took if we have a false rxt. 2262 */ 2263 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2264 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2265 /* 2266 * Compose bbr_state to be a bit wise 0000ADHF 2267 * where A is the always_pace flag 2268 * where D is the dgp_on flag 2269 * where H is the hybrid_mode on flag 2270 * where F is the use_fixed_rate flag. 2271 */ 2272 log.u_bbr.bbr_state = rack->rc_always_pace; 2273 log.u_bbr.bbr_state <<= 1; 2274 log.u_bbr.bbr_state |= rack->dgp_on; 2275 log.u_bbr.bbr_state <<= 1; 2276 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2277 log.u_bbr.bbr_state <<= 1; 2278 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2279 2280 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2281 tcp_log_event(rack->rc_tp, NULL, 2282 &rack->rc_inp->inp_socket->so_rcv, 2283 &rack->rc_inp->inp_socket->so_snd, 2284 TCP_HYBRID_PACING_LOG, 0, 2285 0, &log, false, NULL, __func__, __LINE__, &tv); 2286 } 2287 } 2288 #endif 2289 2290 static inline uint64_t 2291 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2292 { 2293 uint64_t ret_bw, ether; 2294 uint64_t u_segsiz; 2295 2296 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2297 if (rack->r_is_v6){ 2298 #ifdef INET6 2299 ether += sizeof(struct ip6_hdr); 2300 #endif 2301 ether += 14; /* eheader size 6+6+2 */ 2302 } else { 2303 #ifdef INET 2304 ether += sizeof(struct ip); 2305 #endif 2306 ether += 14; /* eheader size 6+6+2 */ 2307 } 2308 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2309 ret_bw = bw; 2310 ret_bw *= ether; 2311 ret_bw /= u_segsiz; 2312 return (ret_bw); 2313 } 2314 2315 static void 2316 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2317 { 2318 #ifdef TCP_REQUEST_TRK 2319 struct timeval tv; 2320 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2321 #endif 2322 2323 if (rack->r_ctl.bw_rate_cap == 0) 2324 return; 2325 #ifdef TCP_REQUEST_TRK 2326 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2327 (rack->r_ctl.rc_last_sft != NULL)) { 2328 /* 2329 * We have a dynamic cap. The original target 2330 * is in bw_rate_cap, but we need to look at 2331 * how long it is until we hit the deadline. 2332 */ 2333 struct tcp_sendfile_track *ent; 2334 2335 ent = rack->r_ctl.rc_last_sft; 2336 microuptime(&tv); 2337 timenow = tcp_tv_to_lusectick(&tv); 2338 if (timenow >= ent->deadline) { 2339 /* No time left we do DGP only */ 2340 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2341 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2342 rack->r_ctl.bw_rate_cap = 0; 2343 return; 2344 } 2345 /* We have the time */ 2346 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2347 if (timeleft < HPTS_MSEC_IN_SEC) { 2348 /* If there is less than a ms left just use DGPs rate */ 2349 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2350 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2351 rack->r_ctl.bw_rate_cap = 0; 2352 return; 2353 } 2354 /* 2355 * Now lets find the amount of data left to send. 2356 * 2357 * Now ideally we want to use the end_seq to figure out how much more 2358 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2359 */ 2360 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2361 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2362 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2363 else { 2364 /* TSNH, we should catch it at the send */ 2365 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2366 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2367 rack->r_ctl.bw_rate_cap = 0; 2368 return; 2369 } 2370 } else { 2371 /* 2372 * The hard way, figure out how much is gone and then 2373 * take that away from the total the client asked for 2374 * (thats off by tls overhead if this is tls). 2375 */ 2376 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2377 lengone = rack->rc_tp->snd_una - ent->start_seq; 2378 else 2379 lengone = 0; 2380 if (lengone < (ent->end - ent->start)) 2381 lenleft = (ent->end - ent->start) - lengone; 2382 else { 2383 /* TSNH, we should catch it at the send */ 2384 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2385 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2386 rack->r_ctl.bw_rate_cap = 0; 2387 return; 2388 } 2389 } 2390 if (lenleft == 0) { 2391 /* We have it all sent */ 2392 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2393 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2394 if (rack->r_ctl.bw_rate_cap) 2395 goto normal_ratecap; 2396 else 2397 return; 2398 } 2399 calcbw = lenleft * HPTS_USEC_IN_SEC; 2400 calcbw /= timeleft; 2401 /* Now we must compensate for IP/TCP overhead */ 2402 calcbw = rack_compensate_for_linerate(rack, calcbw); 2403 /* Update the bit rate cap */ 2404 rack->r_ctl.bw_rate_cap = calcbw; 2405 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2406 (rack_hybrid_allow_set_maxseg == 1) && 2407 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2408 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2409 uint32_t orig_max; 2410 2411 orig_max = rack->r_ctl.rc_pace_max_segs; 2412 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2413 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2414 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2415 } 2416 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2417 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2418 if ((calcbw > 0) && (*bw > calcbw)) { 2419 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2420 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2421 *capped = 1; 2422 *bw = calcbw; 2423 } 2424 return; 2425 } 2426 normal_ratecap: 2427 #endif 2428 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2429 #ifdef TCP_REQUEST_TRK 2430 if (rack->rc_hybrid_mode && 2431 rack->rc_catch_up && 2432 (rack->r_ctl.rc_last_sft != NULL) && 2433 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2434 (rack_hybrid_allow_set_maxseg == 1) && 2435 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2436 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2437 uint32_t orig_max; 2438 2439 orig_max = rack->r_ctl.rc_pace_max_segs; 2440 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2441 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2442 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2443 } 2444 #endif 2445 *capped = 1; 2446 *bw = rack->r_ctl.bw_rate_cap; 2447 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2448 *bw, 0, 0, 2449 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2450 } 2451 } 2452 2453 static uint64_t 2454 rack_get_gp_est(struct tcp_rack *rack) 2455 { 2456 uint64_t bw, lt_bw, ret_bw; 2457 2458 if (rack->rc_gp_filled == 0) { 2459 /* 2460 * We have yet no b/w measurement, 2461 * if we have a user set initial bw 2462 * return it. If we don't have that and 2463 * we have an srtt, use the tcp IW (10) to 2464 * calculate a fictional b/w over the SRTT 2465 * which is more or less a guess. Note 2466 * we don't use our IW from rack on purpose 2467 * so if we have like IW=30, we are not 2468 * calculating a "huge" b/w. 2469 */ 2470 uint64_t srtt; 2471 2472 if (rack->dis_lt_bw == 1) 2473 lt_bw = 0; 2474 else 2475 lt_bw = rack_get_lt_bw(rack); 2476 if (lt_bw) { 2477 /* 2478 * No goodput bw but a long-term b/w does exist 2479 * lets use that. 2480 */ 2481 ret_bw = lt_bw; 2482 goto compensate; 2483 } 2484 if (rack->r_ctl.init_rate) 2485 return (rack->r_ctl.init_rate); 2486 2487 /* Ok lets come up with the IW guess, if we have a srtt */ 2488 if (rack->rc_tp->t_srtt == 0) { 2489 /* 2490 * Go with old pacing method 2491 * i.e. burst mitigation only. 2492 */ 2493 return (0); 2494 } 2495 /* Ok lets get the initial TCP win (not racks) */ 2496 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2497 srtt = (uint64_t)rack->rc_tp->t_srtt; 2498 bw *= (uint64_t)USECS_IN_SECOND; 2499 bw /= srtt; 2500 ret_bw = bw; 2501 goto compensate; 2502 2503 } 2504 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2505 /* Averaging is done, we can return the value */ 2506 bw = rack->r_ctl.gp_bw; 2507 } else { 2508 /* Still doing initial average must calculate */ 2509 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2510 } 2511 if (rack->dis_lt_bw) { 2512 /* We are not using lt-bw */ 2513 ret_bw = bw; 2514 goto compensate; 2515 } 2516 lt_bw = rack_get_lt_bw(rack); 2517 if (lt_bw == 0) { 2518 /* If we don't have one then equate it to the gp_bw */ 2519 lt_bw = rack->r_ctl.gp_bw; 2520 } 2521 if (rack->use_lesser_lt_bw) { 2522 if (lt_bw < bw) 2523 ret_bw = lt_bw; 2524 else 2525 ret_bw = bw; 2526 } else { 2527 if (lt_bw > bw) 2528 ret_bw = lt_bw; 2529 else 2530 ret_bw = bw; 2531 } 2532 /* 2533 * Now lets compensate based on the TCP/IP overhead. Our 2534 * Goodput estimate does not include this so we must pace out 2535 * a bit faster since our pacing calculations do. The pacing 2536 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2537 * we are using to do this, so we do that here in the opposite 2538 * direction as well. This means that if we are tunneled and the 2539 * segsiz is say 1200 bytes we will get quite a boost, but its 2540 * compensated for in the pacing time the opposite way. 2541 */ 2542 compensate: 2543 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2544 return(ret_bw); 2545 } 2546 2547 2548 static uint64_t 2549 rack_get_bw(struct tcp_rack *rack) 2550 { 2551 uint64_t bw; 2552 2553 if (rack->use_fixed_rate) { 2554 /* Return the fixed pacing rate */ 2555 return (rack_get_fixed_pacing_bw(rack)); 2556 } 2557 bw = rack_get_gp_est(rack); 2558 return (bw); 2559 } 2560 2561 static uint16_t 2562 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2563 { 2564 if (rack->use_fixed_rate) { 2565 return (100); 2566 } else if (rack->in_probe_rtt && (rsm == NULL)) 2567 return (rack->r_ctl.rack_per_of_gp_probertt); 2568 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2569 rack->r_ctl.rack_per_of_gp_rec)) { 2570 if (rsm) { 2571 /* a retransmission always use the recovery rate */ 2572 return (rack->r_ctl.rack_per_of_gp_rec); 2573 } else if (rack->rack_rec_nonrxt_use_cr) { 2574 /* Directed to use the configured rate */ 2575 goto configured_rate; 2576 } else if (rack->rack_no_prr && 2577 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2578 /* No PRR, lets just use the b/w estimate only */ 2579 return (100); 2580 } else { 2581 /* 2582 * Here we may have a non-retransmit but we 2583 * have no overrides, so just use the recovery 2584 * rate (prr is in effect). 2585 */ 2586 return (rack->r_ctl.rack_per_of_gp_rec); 2587 } 2588 } 2589 configured_rate: 2590 /* For the configured rate we look at our cwnd vs the ssthresh */ 2591 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2592 return (rack->r_ctl.rack_per_of_gp_ss); 2593 else 2594 return (rack->r_ctl.rack_per_of_gp_ca); 2595 } 2596 2597 static void 2598 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2599 { 2600 /* 2601 * Types of logs (mod value) 2602 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2603 * 2 = a dsack round begins, persist is reset to 16. 2604 * 3 = a dsack round ends 2605 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2606 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2607 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2608 */ 2609 if (tcp_bblogging_on(rack->rc_tp)) { 2610 union tcp_log_stackspecific log; 2611 struct timeval tv; 2612 2613 memset(&log, 0, sizeof(log)); 2614 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2615 log.u_bbr.flex1 <<= 1; 2616 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2617 log.u_bbr.flex1 <<= 1; 2618 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2619 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2620 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2621 log.u_bbr.flex4 = flex4; 2622 log.u_bbr.flex5 = flex5; 2623 log.u_bbr.flex6 = flex6; 2624 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2625 log.u_bbr.flex8 = mod; 2626 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2627 log.u_bbr.epoch = rack->r_ctl.current_round; 2628 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2629 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2630 &rack->rc_inp->inp_socket->so_rcv, 2631 &rack->rc_inp->inp_socket->so_snd, 2632 RACK_DSACK_HANDLING, 0, 2633 0, &log, false, &tv); 2634 } 2635 } 2636 2637 static void 2638 rack_log_hdwr_pacing(struct tcp_rack *rack, 2639 uint64_t rate, uint64_t hw_rate, int line, 2640 int error, uint16_t mod) 2641 { 2642 if (tcp_bblogging_on(rack->rc_tp)) { 2643 union tcp_log_stackspecific log; 2644 struct timeval tv; 2645 const struct ifnet *ifp; 2646 uint64_t ifp64; 2647 2648 memset(&log, 0, sizeof(log)); 2649 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2650 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2651 if (rack->r_ctl.crte) { 2652 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2653 } else if (rack->rc_inp->inp_route.ro_nh && 2654 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2655 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2656 } else 2657 ifp = NULL; 2658 if (ifp) { 2659 ifp64 = (uintptr_t)ifp; 2660 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff); 2661 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff); 2662 } 2663 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2664 log.u_bbr.bw_inuse = rate; 2665 log.u_bbr.flex5 = line; 2666 log.u_bbr.flex6 = error; 2667 log.u_bbr.flex7 = mod; 2668 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2669 log.u_bbr.flex8 = rack->use_fixed_rate; 2670 log.u_bbr.flex8 <<= 1; 2671 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2672 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2673 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2674 if (rack->r_ctl.crte) 2675 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2676 else 2677 log.u_bbr.cur_del_rate = 0; 2678 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2679 log.u_bbr.epoch = rack->r_ctl.current_round; 2680 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2681 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2682 &rack->rc_inp->inp_socket->so_rcv, 2683 &rack->rc_inp->inp_socket->so_snd, 2684 BBR_LOG_HDWR_PACE, 0, 2685 0, &log, false, &tv); 2686 } 2687 } 2688 2689 static uint64_t 2690 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2691 { 2692 /* 2693 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2694 */ 2695 uint64_t bw_est, high_rate; 2696 uint64_t gain; 2697 2698 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2699 bw_est = bw * gain; 2700 bw_est /= (uint64_t)100; 2701 /* Never fall below the minimum (def 64kbps) */ 2702 if (bw_est < RACK_MIN_BW) 2703 bw_est = RACK_MIN_BW; 2704 if (rack->r_rack_hw_rate_caps) { 2705 /* Rate caps are in place */ 2706 if (rack->r_ctl.crte != NULL) { 2707 /* We have a hdwr rate already */ 2708 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2709 if (bw_est >= high_rate) { 2710 /* We are capping bw at the highest rate table entry */ 2711 if (rack_hw_rate_cap_per && 2712 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2713 rack->r_rack_hw_rate_caps = 0; 2714 goto done; 2715 } 2716 rack_log_hdwr_pacing(rack, 2717 bw_est, high_rate, __LINE__, 2718 0, 3); 2719 bw_est = high_rate; 2720 if (capped) 2721 *capped = 1; 2722 } 2723 } else if ((rack->rack_hdrw_pacing == 0) && 2724 (rack->rack_hdw_pace_ena) && 2725 (rack->rack_attempt_hdwr_pace == 0) && 2726 (rack->rc_inp->inp_route.ro_nh != NULL) && 2727 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2728 /* 2729 * Special case, we have not yet attempted hardware 2730 * pacing, and yet we may, when we do, find out if we are 2731 * above the highest rate. We need to know the maxbw for the interface 2732 * in question (if it supports ratelimiting). We get back 2733 * a 0, if the interface is not found in the RL lists. 2734 */ 2735 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2736 if (high_rate) { 2737 /* Yep, we have a rate is it above this rate? */ 2738 if (bw_est > high_rate) { 2739 bw_est = high_rate; 2740 if (capped) 2741 *capped = 1; 2742 } 2743 } 2744 } 2745 } 2746 done: 2747 return (bw_est); 2748 } 2749 2750 static void 2751 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2752 { 2753 if (tcp_bblogging_on(rack->rc_tp)) { 2754 union tcp_log_stackspecific log; 2755 struct timeval tv; 2756 2757 if ((mod != 1) && (rack_verbose_logging == 0)) { 2758 /* 2759 * We get 3 values currently for mod 2760 * 1 - We are retransmitting and this tells the reason. 2761 * 2 - We are clearing a dup-ack count. 2762 * 3 - We are incrementing a dup-ack count. 2763 * 2764 * The clear/increment are only logged 2765 * if you have BBverbose on. 2766 */ 2767 return; 2768 } 2769 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2770 log.u_bbr.flex1 = tsused; 2771 log.u_bbr.flex2 = thresh; 2772 log.u_bbr.flex3 = rsm->r_flags; 2773 log.u_bbr.flex4 = rsm->r_dupack; 2774 log.u_bbr.flex5 = rsm->r_start; 2775 log.u_bbr.flex6 = rsm->r_end; 2776 log.u_bbr.flex8 = mod; 2777 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2778 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2779 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2780 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2781 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2782 log.u_bbr.pacing_gain = rack->r_must_retran; 2783 log.u_bbr.epoch = rack->r_ctl.current_round; 2784 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2785 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2786 &rack->rc_inp->inp_socket->so_rcv, 2787 &rack->rc_inp->inp_socket->so_snd, 2788 BBR_LOG_SETTINGS_CHG, 0, 2789 0, &log, false, &tv); 2790 } 2791 } 2792 2793 static void 2794 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2795 { 2796 if (tcp_bblogging_on(rack->rc_tp)) { 2797 union tcp_log_stackspecific log; 2798 struct timeval tv; 2799 2800 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2801 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2802 log.u_bbr.flex2 = to; 2803 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2804 log.u_bbr.flex4 = slot; 2805 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2806 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2807 log.u_bbr.flex7 = rack->rc_in_persist; 2808 log.u_bbr.flex8 = which; 2809 if (rack->rack_no_prr) 2810 log.u_bbr.pkts_out = 0; 2811 else 2812 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2813 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2814 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2815 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2816 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2817 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2818 log.u_bbr.pacing_gain = rack->r_must_retran; 2819 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2820 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2821 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2822 log.u_bbr.lost = rack_rto_min; 2823 log.u_bbr.epoch = rack->r_ctl.roundends; 2824 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2825 log.u_bbr.bw_inuse <<= 32; 2826 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2827 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2828 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2829 &rack->rc_inp->inp_socket->so_rcv, 2830 &rack->rc_inp->inp_socket->so_snd, 2831 BBR_LOG_TIMERSTAR, 0, 2832 0, &log, false, &tv); 2833 } 2834 } 2835 2836 static void 2837 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2838 { 2839 if (tcp_bblogging_on(rack->rc_tp)) { 2840 union tcp_log_stackspecific log; 2841 struct timeval tv; 2842 2843 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2844 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2845 log.u_bbr.flex8 = to_num; 2846 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2847 log.u_bbr.flex2 = rack->rc_rack_rtt; 2848 if (rsm == NULL) 2849 log.u_bbr.flex3 = 0; 2850 else 2851 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2852 if (rack->rack_no_prr) 2853 log.u_bbr.flex5 = 0; 2854 else 2855 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2856 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2857 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2858 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2859 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2860 log.u_bbr.pacing_gain = rack->r_must_retran; 2861 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2862 log.u_bbr.bw_inuse <<= 32; 2863 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2864 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2865 &rack->rc_inp->inp_socket->so_rcv, 2866 &rack->rc_inp->inp_socket->so_snd, 2867 BBR_LOG_RTO, 0, 2868 0, &log, false, &tv); 2869 } 2870 } 2871 2872 static void 2873 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2874 struct rack_sendmap *prev, 2875 struct rack_sendmap *rsm, 2876 struct rack_sendmap *next, 2877 int flag, uint32_t th_ack, int line) 2878 { 2879 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2880 union tcp_log_stackspecific log; 2881 struct timeval tv; 2882 2883 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2884 log.u_bbr.flex8 = flag; 2885 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2886 log.u_bbr.cur_del_rate = (uintptr_t)prev; 2887 log.u_bbr.delRate = (uintptr_t)rsm; 2888 log.u_bbr.rttProp = (uintptr_t)next; 2889 log.u_bbr.flex7 = 0; 2890 if (prev) { 2891 log.u_bbr.flex1 = prev->r_start; 2892 log.u_bbr.flex2 = prev->r_end; 2893 log.u_bbr.flex7 |= 0x4; 2894 } 2895 if (rsm) { 2896 log.u_bbr.flex3 = rsm->r_start; 2897 log.u_bbr.flex4 = rsm->r_end; 2898 log.u_bbr.flex7 |= 0x2; 2899 } 2900 if (next) { 2901 log.u_bbr.flex5 = next->r_start; 2902 log.u_bbr.flex6 = next->r_end; 2903 log.u_bbr.flex7 |= 0x1; 2904 } 2905 log.u_bbr.applimited = line; 2906 log.u_bbr.pkts_out = th_ack; 2907 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2908 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2909 if (rack->rack_no_prr) 2910 log.u_bbr.lost = 0; 2911 else 2912 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2913 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2914 log.u_bbr.bw_inuse <<= 32; 2915 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2916 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2917 &rack->rc_inp->inp_socket->so_rcv, 2918 &rack->rc_inp->inp_socket->so_snd, 2919 TCP_LOG_MAPCHG, 0, 2920 0, &log, false, &tv); 2921 } 2922 } 2923 2924 static void 2925 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2926 struct rack_sendmap *rsm, int conf) 2927 { 2928 if (tcp_bblogging_on(tp)) { 2929 union tcp_log_stackspecific log; 2930 struct timeval tv; 2931 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2932 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2933 log.u_bbr.flex1 = t; 2934 log.u_bbr.flex2 = len; 2935 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2936 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2937 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2938 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2939 log.u_bbr.flex7 = conf; 2940 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2941 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2942 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2943 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2944 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2945 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2946 if (rsm) { 2947 log.u_bbr.pkt_epoch = rsm->r_start; 2948 log.u_bbr.lost = rsm->r_end; 2949 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2950 /* We loose any upper of the 24 bits */ 2951 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2952 } else { 2953 /* Its a SYN */ 2954 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2955 log.u_bbr.lost = 0; 2956 log.u_bbr.cwnd_gain = 0; 2957 log.u_bbr.pacing_gain = 0; 2958 } 2959 /* Write out general bits of interest rrs here */ 2960 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2961 log.u_bbr.use_lt_bw <<= 1; 2962 log.u_bbr.use_lt_bw |= rack->forced_ack; 2963 log.u_bbr.use_lt_bw <<= 1; 2964 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2965 log.u_bbr.use_lt_bw <<= 1; 2966 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2967 log.u_bbr.use_lt_bw <<= 1; 2968 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2969 log.u_bbr.use_lt_bw <<= 1; 2970 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2971 log.u_bbr.use_lt_bw <<= 1; 2972 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2973 log.u_bbr.use_lt_bw <<= 1; 2974 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2975 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2976 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2977 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2978 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2979 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2980 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2981 log.u_bbr.bw_inuse <<= 32; 2982 if (rsm) 2983 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2984 TCP_LOG_EVENTP(tp, NULL, 2985 &rack->rc_inp->inp_socket->so_rcv, 2986 &rack->rc_inp->inp_socket->so_snd, 2987 BBR_LOG_BBRRTT, 0, 2988 0, &log, false, &tv); 2989 2990 2991 } 2992 } 2993 2994 static void 2995 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2996 { 2997 /* 2998 * Log the rtt sample we are 2999 * applying to the srtt algorithm in 3000 * useconds. 3001 */ 3002 if (tcp_bblogging_on(rack->rc_tp)) { 3003 union tcp_log_stackspecific log; 3004 struct timeval tv; 3005 3006 /* Convert our ms to a microsecond */ 3007 memset(&log, 0, sizeof(log)); 3008 log.u_bbr.flex1 = rtt; 3009 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3010 log.u_bbr.flex7 = 1; 3011 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3012 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3013 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3014 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3015 log.u_bbr.pacing_gain = rack->r_must_retran; 3016 /* 3017 * We capture in delRate the upper 32 bits as 3018 * the confidence level we had declared, and the 3019 * lower 32 bits as the actual RTT using the arrival 3020 * timestamp. 3021 */ 3022 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 3023 log.u_bbr.delRate <<= 32; 3024 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 3025 /* Lets capture all the things that make up t_rtxcur */ 3026 log.u_bbr.applimited = rack_rto_min; 3027 log.u_bbr.epoch = rack_rto_max; 3028 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 3029 log.u_bbr.lost = rack_rto_min; 3030 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 3031 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 3032 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 3033 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 3034 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 3035 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3036 &rack->rc_inp->inp_socket->so_rcv, 3037 &rack->rc_inp->inp_socket->so_snd, 3038 TCP_LOG_RTT, 0, 3039 0, &log, false, &tv); 3040 } 3041 } 3042 3043 static void 3044 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 3045 { 3046 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3047 union tcp_log_stackspecific log; 3048 struct timeval tv; 3049 3050 /* Convert our ms to a microsecond */ 3051 memset(&log, 0, sizeof(log)); 3052 log.u_bbr.flex1 = rtt; 3053 log.u_bbr.flex2 = send_time; 3054 log.u_bbr.flex3 = ack_time; 3055 log.u_bbr.flex4 = where; 3056 log.u_bbr.flex7 = 2; 3057 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3058 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3059 log.u_bbr.bw_inuse <<= 32; 3060 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3061 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3062 &rack->rc_inp->inp_socket->so_rcv, 3063 &rack->rc_inp->inp_socket->so_snd, 3064 TCP_LOG_RTT, 0, 3065 0, &log, false, &tv); 3066 } 3067 } 3068 3069 3070 static void 3071 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 3072 { 3073 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3074 union tcp_log_stackspecific log; 3075 struct timeval tv; 3076 3077 /* Convert our ms to a microsecond */ 3078 memset(&log, 0, sizeof(log)); 3079 log.u_bbr.flex1 = idx; 3080 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 3081 log.u_bbr.flex3 = tsecho; 3082 log.u_bbr.flex7 = 3; 3083 log.u_bbr.rttProp = tsv; 3084 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3085 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3086 log.u_bbr.bw_inuse <<= 32; 3087 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3088 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3089 &rack->rc_inp->inp_socket->so_rcv, 3090 &rack->rc_inp->inp_socket->so_snd, 3091 TCP_LOG_RTT, 0, 3092 0, &log, false, &tv); 3093 } 3094 } 3095 3096 3097 static inline void 3098 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3099 { 3100 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3101 union tcp_log_stackspecific log; 3102 struct timeval tv; 3103 3104 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3105 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3106 log.u_bbr.flex1 = line; 3107 log.u_bbr.flex2 = tick; 3108 log.u_bbr.flex3 = tp->t_maxunacktime; 3109 log.u_bbr.flex4 = tp->t_acktime; 3110 log.u_bbr.flex8 = event; 3111 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3112 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3113 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3114 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3115 log.u_bbr.pacing_gain = rack->r_must_retran; 3116 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3117 log.u_bbr.bw_inuse <<= 32; 3118 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3119 TCP_LOG_EVENTP(tp, NULL, 3120 &rack->rc_inp->inp_socket->so_rcv, 3121 &rack->rc_inp->inp_socket->so_snd, 3122 BBR_LOG_PROGRESS, 0, 3123 0, &log, false, &tv); 3124 } 3125 } 3126 3127 static void 3128 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 3129 { 3130 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3131 union tcp_log_stackspecific log; 3132 3133 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3134 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3135 log.u_bbr.flex1 = slot; 3136 if (rack->rack_no_prr) 3137 log.u_bbr.flex2 = 0; 3138 else 3139 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3140 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3141 log.u_bbr.flex6 = line; 3142 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3143 log.u_bbr.flex8 = rack->rc_in_persist; 3144 log.u_bbr.timeStamp = cts; 3145 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3146 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3147 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3148 log.u_bbr.pacing_gain = rack->r_must_retran; 3149 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3150 &rack->rc_inp->inp_socket->so_rcv, 3151 &rack->rc_inp->inp_socket->so_snd, 3152 BBR_LOG_BBRSND, 0, 3153 0, &log, false, tv); 3154 } 3155 } 3156 3157 static void 3158 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3159 { 3160 if (tcp_bblogging_on(rack->rc_tp)) { 3161 union tcp_log_stackspecific log; 3162 struct timeval tv; 3163 3164 memset(&log, 0, sizeof(log)); 3165 log.u_bbr.flex1 = did_out; 3166 log.u_bbr.flex2 = nxt_pkt; 3167 log.u_bbr.flex3 = way_out; 3168 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3169 if (rack->rack_no_prr) 3170 log.u_bbr.flex5 = 0; 3171 else 3172 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3173 log.u_bbr.flex6 = nsegs; 3174 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3175 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3176 log.u_bbr.flex7 <<= 1; 3177 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3178 log.u_bbr.flex7 <<= 1; 3179 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3180 log.u_bbr.flex8 = rack->rc_in_persist; 3181 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3182 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3183 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3184 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3185 log.u_bbr.use_lt_bw <<= 1; 3186 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3187 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3188 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3189 log.u_bbr.pacing_gain = rack->r_must_retran; 3190 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3191 log.u_bbr.bw_inuse <<= 32; 3192 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3193 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3194 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3195 log.u_bbr.lost = rack->rc_tp->t_srtt; 3196 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3197 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3198 &rack->rc_inp->inp_socket->so_rcv, 3199 &rack->rc_inp->inp_socket->so_snd, 3200 BBR_LOG_DOSEG_DONE, 0, 3201 0, &log, false, &tv); 3202 } 3203 } 3204 3205 static void 3206 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3207 { 3208 if (tcp_bblogging_on(rack->rc_tp)) { 3209 union tcp_log_stackspecific log; 3210 struct timeval tv; 3211 3212 memset(&log, 0, sizeof(log)); 3213 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3214 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3215 log.u_bbr.flex4 = arg1; 3216 log.u_bbr.flex5 = arg2; 3217 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3218 log.u_bbr.flex6 = arg3; 3219 log.u_bbr.flex8 = frm; 3220 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3221 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3222 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3223 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3224 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3225 log.u_bbr.pacing_gain = rack->r_must_retran; 3226 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3227 &tptosocket(tp)->so_snd, 3228 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3229 } 3230 } 3231 3232 static void 3233 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3234 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3235 { 3236 if (tcp_bblogging_on(rack->rc_tp)) { 3237 union tcp_log_stackspecific log; 3238 struct timeval tv; 3239 3240 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3241 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3242 log.u_bbr.flex1 = slot; 3243 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3244 log.u_bbr.flex4 = reason; 3245 if (rack->rack_no_prr) 3246 log.u_bbr.flex5 = 0; 3247 else 3248 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3249 log.u_bbr.flex7 = hpts_calling; 3250 log.u_bbr.flex8 = rack->rc_in_persist; 3251 log.u_bbr.lt_epoch = cwnd_to_use; 3252 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3253 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3254 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3255 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3256 log.u_bbr.pacing_gain = rack->r_must_retran; 3257 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3258 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3259 log.u_bbr.bw_inuse <<= 32; 3260 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3261 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3262 &rack->rc_inp->inp_socket->so_rcv, 3263 &rack->rc_inp->inp_socket->so_snd, 3264 BBR_LOG_JUSTRET, 0, 3265 tlen, &log, false, &tv); 3266 } 3267 } 3268 3269 static void 3270 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3271 struct timeval *tv, uint32_t flags_on_entry) 3272 { 3273 if (tcp_bblogging_on(rack->rc_tp)) { 3274 union tcp_log_stackspecific log; 3275 3276 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3277 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3278 log.u_bbr.flex1 = line; 3279 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3280 log.u_bbr.flex3 = flags_on_entry; 3281 log.u_bbr.flex4 = us_cts; 3282 if (rack->rack_no_prr) 3283 log.u_bbr.flex5 = 0; 3284 else 3285 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3286 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3287 log.u_bbr.flex7 = hpts_removed; 3288 log.u_bbr.flex8 = 1; 3289 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3290 log.u_bbr.timeStamp = us_cts; 3291 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3292 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3293 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3294 log.u_bbr.pacing_gain = rack->r_must_retran; 3295 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3296 log.u_bbr.bw_inuse <<= 32; 3297 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3298 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3299 &rack->rc_inp->inp_socket->so_rcv, 3300 &rack->rc_inp->inp_socket->so_snd, 3301 BBR_LOG_TIMERCANC, 0, 3302 0, &log, false, tv); 3303 } 3304 } 3305 3306 static void 3307 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3308 uint32_t flex1, uint32_t flex2, 3309 uint32_t flex3, uint32_t flex4, 3310 uint32_t flex5, uint32_t flex6, 3311 uint16_t flex7, uint8_t mod) 3312 { 3313 if (tcp_bblogging_on(rack->rc_tp)) { 3314 union tcp_log_stackspecific log; 3315 struct timeval tv; 3316 3317 if (mod == 1) { 3318 /* No you can't use 1, its for the real to cancel */ 3319 return; 3320 } 3321 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3322 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3323 log.u_bbr.flex1 = flex1; 3324 log.u_bbr.flex2 = flex2; 3325 log.u_bbr.flex3 = flex3; 3326 log.u_bbr.flex4 = flex4; 3327 log.u_bbr.flex5 = flex5; 3328 log.u_bbr.flex6 = flex6; 3329 log.u_bbr.flex7 = flex7; 3330 log.u_bbr.flex8 = mod; 3331 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3332 &rack->rc_inp->inp_socket->so_rcv, 3333 &rack->rc_inp->inp_socket->so_snd, 3334 BBR_LOG_TIMERCANC, 0, 3335 0, &log, false, &tv); 3336 } 3337 } 3338 3339 static void 3340 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3341 { 3342 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3343 union tcp_log_stackspecific log; 3344 struct timeval tv; 3345 3346 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3347 log.u_bbr.flex1 = timers; 3348 log.u_bbr.flex2 = ret; 3349 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3350 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3351 log.u_bbr.flex5 = cts; 3352 if (rack->rack_no_prr) 3353 log.u_bbr.flex6 = 0; 3354 else 3355 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3356 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3357 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3358 log.u_bbr.pacing_gain = rack->r_must_retran; 3359 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3360 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3361 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3362 &rack->rc_inp->inp_socket->so_rcv, 3363 &rack->rc_inp->inp_socket->so_snd, 3364 BBR_LOG_TO_PROCESS, 0, 3365 0, &log, false, &tv); 3366 } 3367 } 3368 3369 static void 3370 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3371 { 3372 if (tcp_bblogging_on(rack->rc_tp)) { 3373 union tcp_log_stackspecific log; 3374 struct timeval tv; 3375 3376 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3377 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3378 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3379 if (rack->rack_no_prr) 3380 log.u_bbr.flex3 = 0; 3381 else 3382 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3383 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3384 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3385 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3386 log.u_bbr.flex7 = line; 3387 log.u_bbr.flex8 = frm; 3388 log.u_bbr.pkts_out = orig_cwnd; 3389 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3390 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3391 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3392 log.u_bbr.use_lt_bw <<= 1; 3393 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3394 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3395 &rack->rc_inp->inp_socket->so_rcv, 3396 &rack->rc_inp->inp_socket->so_snd, 3397 BBR_LOG_BBRUPD, 0, 3398 0, &log, false, &tv); 3399 } 3400 } 3401 3402 static void 3403 rack_counter_destroy(void) 3404 { 3405 counter_u64_free(rack_total_bytes); 3406 counter_u64_free(rack_fto_send); 3407 counter_u64_free(rack_fto_rsm_send); 3408 counter_u64_free(rack_nfto_resend); 3409 counter_u64_free(rack_hw_pace_init_fail); 3410 counter_u64_free(rack_hw_pace_lost); 3411 counter_u64_free(rack_non_fto_send); 3412 counter_u64_free(rack_extended_rfo); 3413 counter_u64_free(rack_ack_total); 3414 counter_u64_free(rack_express_sack); 3415 counter_u64_free(rack_sack_total); 3416 counter_u64_free(rack_move_none); 3417 counter_u64_free(rack_move_some); 3418 counter_u64_free(rack_sack_attacks_detected); 3419 counter_u64_free(rack_sack_attacks_reversed); 3420 counter_u64_free(rack_sack_attacks_suspect); 3421 counter_u64_free(rack_sack_used_next_merge); 3422 counter_u64_free(rack_sack_used_prev_merge); 3423 counter_u64_free(rack_tlp_tot); 3424 counter_u64_free(rack_tlp_newdata); 3425 counter_u64_free(rack_tlp_retran); 3426 counter_u64_free(rack_tlp_retran_bytes); 3427 counter_u64_free(rack_to_tot); 3428 counter_u64_free(rack_saw_enobuf); 3429 counter_u64_free(rack_saw_enobuf_hw); 3430 counter_u64_free(rack_saw_enetunreach); 3431 counter_u64_free(rack_hot_alloc); 3432 counter_u64_free(tcp_policer_detected); 3433 counter_u64_free(rack_to_alloc); 3434 counter_u64_free(rack_to_alloc_hard); 3435 counter_u64_free(rack_to_alloc_emerg); 3436 counter_u64_free(rack_to_alloc_limited); 3437 counter_u64_free(rack_alloc_limited_conns); 3438 counter_u64_free(rack_split_limited); 3439 counter_u64_free(rack_multi_single_eq); 3440 counter_u64_free(rack_rxt_clamps_cwnd); 3441 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3442 counter_u64_free(rack_proc_non_comp_ack); 3443 counter_u64_free(rack_sack_proc_all); 3444 counter_u64_free(rack_sack_proc_restart); 3445 counter_u64_free(rack_sack_proc_short); 3446 counter_u64_free(rack_sack_skipped_acked); 3447 counter_u64_free(rack_sack_splits); 3448 counter_u64_free(rack_input_idle_reduces); 3449 counter_u64_free(rack_collapsed_win); 3450 counter_u64_free(rack_collapsed_win_rxt); 3451 counter_u64_free(rack_collapsed_win_rxt_bytes); 3452 counter_u64_free(rack_collapsed_win_seen); 3453 counter_u64_free(rack_try_scwnd); 3454 counter_u64_free(rack_persists_sends); 3455 counter_u64_free(rack_persists_acks); 3456 counter_u64_free(rack_persists_loss); 3457 counter_u64_free(rack_persists_lost_ends); 3458 #ifdef INVARIANTS 3459 counter_u64_free(rack_adjust_map_bw); 3460 #endif 3461 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3462 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3463 } 3464 3465 static struct rack_sendmap * 3466 rack_alloc(struct tcp_rack *rack) 3467 { 3468 struct rack_sendmap *rsm; 3469 3470 /* 3471 * First get the top of the list it in 3472 * theory is the "hottest" rsm we have, 3473 * possibly just freed by ack processing. 3474 */ 3475 if (rack->rc_free_cnt > rack_free_cache) { 3476 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3477 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3478 counter_u64_add(rack_hot_alloc, 1); 3479 rack->rc_free_cnt--; 3480 return (rsm); 3481 } 3482 /* 3483 * Once we get under our free cache we probably 3484 * no longer have a "hot" one available. Lets 3485 * get one from UMA. 3486 */ 3487 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3488 if (rsm) { 3489 rack->r_ctl.rc_num_maps_alloced++; 3490 counter_u64_add(rack_to_alloc, 1); 3491 return (rsm); 3492 } 3493 /* 3494 * Dig in to our aux rsm's (the last two) since 3495 * UMA failed to get us one. 3496 */ 3497 if (rack->rc_free_cnt) { 3498 counter_u64_add(rack_to_alloc_emerg, 1); 3499 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3500 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3501 rack->rc_free_cnt--; 3502 return (rsm); 3503 } 3504 return (NULL); 3505 } 3506 3507 static struct rack_sendmap * 3508 rack_alloc_full_limit(struct tcp_rack *rack) 3509 { 3510 if ((V_tcp_map_entries_limit > 0) && 3511 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3512 counter_u64_add(rack_to_alloc_limited, 1); 3513 if (!rack->alloc_limit_reported) { 3514 rack->alloc_limit_reported = 1; 3515 counter_u64_add(rack_alloc_limited_conns, 1); 3516 } 3517 return (NULL); 3518 } 3519 return (rack_alloc(rack)); 3520 } 3521 3522 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3523 static struct rack_sendmap * 3524 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3525 { 3526 struct rack_sendmap *rsm; 3527 3528 if (limit_type) { 3529 /* currently there is only one limit type */ 3530 if (rack->r_ctl.rc_split_limit > 0 && 3531 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3532 counter_u64_add(rack_split_limited, 1); 3533 if (!rack->alloc_limit_reported) { 3534 rack->alloc_limit_reported = 1; 3535 counter_u64_add(rack_alloc_limited_conns, 1); 3536 } 3537 return (NULL); 3538 } 3539 } 3540 3541 /* allocate and mark in the limit type, if set */ 3542 rsm = rack_alloc(rack); 3543 if (rsm != NULL && limit_type) { 3544 rsm->r_limit_type = limit_type; 3545 rack->r_ctl.rc_num_split_allocs++; 3546 } 3547 return (rsm); 3548 } 3549 3550 static void 3551 rack_free_trim(struct tcp_rack *rack) 3552 { 3553 struct rack_sendmap *rsm; 3554 3555 /* 3556 * Free up all the tail entries until 3557 * we get our list down to the limit. 3558 */ 3559 while (rack->rc_free_cnt > rack_free_cache) { 3560 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3561 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3562 rack->rc_free_cnt--; 3563 rack->r_ctl.rc_num_maps_alloced--; 3564 uma_zfree(rack_zone, rsm); 3565 } 3566 } 3567 3568 static void 3569 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3570 { 3571 if (rsm->r_flags & RACK_APP_LIMITED) { 3572 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3573 rack->r_ctl.rc_app_limited_cnt--; 3574 } 3575 } 3576 if (rsm->r_limit_type) { 3577 /* currently there is only one limit type */ 3578 rack->r_ctl.rc_num_split_allocs--; 3579 } 3580 if (rsm == rack->r_ctl.rc_first_appl) { 3581 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); 3582 rack->r_ctl.cleared_app_ack = 1; 3583 if (rack->r_ctl.rc_app_limited_cnt == 0) 3584 rack->r_ctl.rc_first_appl = NULL; 3585 else 3586 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3587 } 3588 if (rsm == rack->r_ctl.rc_resend) 3589 rack->r_ctl.rc_resend = NULL; 3590 if (rsm == rack->r_ctl.rc_end_appl) 3591 rack->r_ctl.rc_end_appl = NULL; 3592 if (rack->r_ctl.rc_tlpsend == rsm) 3593 rack->r_ctl.rc_tlpsend = NULL; 3594 if (rack->r_ctl.rc_sacklast == rsm) 3595 rack->r_ctl.rc_sacklast = NULL; 3596 memset(rsm, 0, sizeof(struct rack_sendmap)); 3597 /* Make sure we are not going to overrun our count limit of 0xff */ 3598 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3599 rack_free_trim(rack); 3600 } 3601 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3602 rack->rc_free_cnt++; 3603 } 3604 3605 static uint32_t 3606 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3607 { 3608 uint64_t srtt, bw, len, tim; 3609 uint32_t segsiz, def_len, minl; 3610 3611 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3612 def_len = rack_def_data_window * segsiz; 3613 if (rack->rc_gp_filled == 0) { 3614 /* 3615 * We have no measurement (IW is in flight?) so 3616 * we can only guess using our data_window sysctl 3617 * value (usually 20MSS). 3618 */ 3619 return (def_len); 3620 } 3621 /* 3622 * Now we have a number of factors to consider. 3623 * 3624 * 1) We have a desired BDP which is usually 3625 * at least 2. 3626 * 2) We have a minimum number of rtt's usually 1 SRTT 3627 * but we allow it too to be more. 3628 * 3) We want to make sure a measurement last N useconds (if 3629 * we have set rack_min_measure_usec. 3630 * 3631 * We handle the first concern here by trying to create a data 3632 * window of max(rack_def_data_window, DesiredBDP). The 3633 * second concern we handle in not letting the measurement 3634 * window end normally until at least the required SRTT's 3635 * have gone by which is done further below in 3636 * rack_enough_for_measurement(). Finally the third concern 3637 * we also handle here by calculating how long that time 3638 * would take at the current BW and then return the 3639 * max of our first calculation and that length. Note 3640 * that if rack_min_measure_usec is 0, we don't deal 3641 * with concern 3. Also for both Concern 1 and 3 an 3642 * application limited period could end the measurement 3643 * earlier. 3644 * 3645 * So lets calculate the BDP with the "known" b/w using 3646 * the SRTT has our rtt and then multiply it by the 3647 * goal. 3648 */ 3649 bw = rack_get_bw(rack); 3650 srtt = (uint64_t)tp->t_srtt; 3651 len = bw * srtt; 3652 len /= (uint64_t)HPTS_USEC_IN_SEC; 3653 len *= max(1, rack_goal_bdp); 3654 /* Now we need to round up to the nearest MSS */ 3655 len = roundup(len, segsiz); 3656 if (rack_min_measure_usec) { 3657 /* Now calculate our min length for this b/w */ 3658 tim = rack_min_measure_usec; 3659 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3660 if (minl == 0) 3661 minl = 1; 3662 minl = roundup(minl, segsiz); 3663 if (len < minl) 3664 len = minl; 3665 } 3666 /* 3667 * Now if we have a very small window we want 3668 * to attempt to get the window that is 3669 * as small as possible. This happens on 3670 * low b/w connections and we don't want to 3671 * span huge numbers of rtt's between measurements. 3672 * 3673 * We basically include 2 over our "MIN window" so 3674 * that the measurement can be shortened (possibly) by 3675 * an ack'ed packet. 3676 */ 3677 if (len < def_len) 3678 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3679 else 3680 return (max((uint32_t)len, def_len)); 3681 3682 } 3683 3684 static int 3685 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3686 { 3687 uint32_t tim, srtts, segsiz; 3688 3689 /* 3690 * Has enough time passed for the GP measurement to be valid? 3691 */ 3692 if (SEQ_LT(th_ack, tp->gput_seq)) { 3693 /* Not enough bytes yet */ 3694 return (0); 3695 } 3696 if ((tp->snd_max == tp->snd_una) || 3697 (th_ack == tp->snd_max)){ 3698 /* 3699 * All is acked quality of all acked is 3700 * usually low or medium, but we in theory could split 3701 * all acked into two cases, where you got 3702 * a signifigant amount of your window and 3703 * where you did not. For now we leave it 3704 * but it is something to contemplate in the 3705 * future. The danger here is that delayed ack 3706 * is effecting the last byte (which is a 50:50 chance). 3707 */ 3708 *quality = RACK_QUALITY_ALLACKED; 3709 return (1); 3710 } 3711 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3712 /* 3713 * We obtained our entire window of data we wanted 3714 * no matter if we are in recovery or not then 3715 * its ok since expanding the window does not 3716 * make things fuzzy (or at least not as much). 3717 */ 3718 *quality = RACK_QUALITY_HIGH; 3719 return (1); 3720 } 3721 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3722 if (SEQ_LT(th_ack, tp->gput_ack) && 3723 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3724 /* Not enough bytes yet */ 3725 return (0); 3726 } 3727 if (rack->r_ctl.rc_first_appl && 3728 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3729 /* 3730 * We are up to the app limited send point 3731 * we have to measure irrespective of the time.. 3732 */ 3733 *quality = RACK_QUALITY_APPLIMITED; 3734 return (1); 3735 } 3736 /* Now what about time? */ 3737 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3738 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3739 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3740 /* 3741 * We do not allow a measurement if we are in recovery 3742 * that would shrink the goodput window we wanted. 3743 * This is to prevent cloudyness of when the last send 3744 * was actually made. 3745 */ 3746 *quality = RACK_QUALITY_HIGH; 3747 return (1); 3748 } 3749 /* Nope not even a full SRTT has passed */ 3750 return (0); 3751 } 3752 3753 static void 3754 rack_log_timely(struct tcp_rack *rack, 3755 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3756 uint64_t up_bnd, int line, uint8_t method) 3757 { 3758 if (tcp_bblogging_on(rack->rc_tp)) { 3759 union tcp_log_stackspecific log; 3760 struct timeval tv; 3761 3762 memset(&log, 0, sizeof(log)); 3763 log.u_bbr.flex1 = logged; 3764 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3765 log.u_bbr.flex2 <<= 4; 3766 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3767 log.u_bbr.flex2 <<= 4; 3768 log.u_bbr.flex2 |= rack->rc_gp_incr; 3769 log.u_bbr.flex2 <<= 4; 3770 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3771 log.u_bbr.flex3 = rack->rc_gp_incr; 3772 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3773 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3774 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3775 log.u_bbr.flex7 = rack->rc_gp_bwred; 3776 log.u_bbr.flex8 = method; 3777 log.u_bbr.cur_del_rate = cur_bw; 3778 log.u_bbr.delRate = low_bnd; 3779 log.u_bbr.bw_inuse = up_bnd; 3780 log.u_bbr.rttProp = rack_get_bw(rack); 3781 log.u_bbr.pkt_epoch = line; 3782 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3783 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3784 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3785 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3786 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3787 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3788 log.u_bbr.cwnd_gain <<= 1; 3789 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3790 log.u_bbr.cwnd_gain <<= 1; 3791 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3792 log.u_bbr.cwnd_gain <<= 1; 3793 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3794 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3795 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3796 &rack->rc_inp->inp_socket->so_rcv, 3797 &rack->rc_inp->inp_socket->so_snd, 3798 TCP_TIMELY_WORK, 0, 3799 0, &log, false, &tv); 3800 } 3801 } 3802 3803 static int 3804 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3805 { 3806 /* 3807 * Before we increase we need to know if 3808 * the estimate just made was less than 3809 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3810 * 3811 * If we already are pacing at a fast enough 3812 * rate to push us faster there is no sense of 3813 * increasing. 3814 * 3815 * We first caculate our actual pacing rate (ss or ca multiplier 3816 * times our cur_bw). 3817 * 3818 * Then we take the last measured rate and multipy by our 3819 * maximum pacing overage to give us a max allowable rate. 3820 * 3821 * If our act_rate is smaller than our max_allowable rate 3822 * then we should increase. Else we should hold steady. 3823 * 3824 */ 3825 uint64_t act_rate, max_allow_rate; 3826 3827 if (rack_timely_no_stopping) 3828 return (1); 3829 3830 if ((cur_bw == 0) || (last_bw_est == 0)) { 3831 /* 3832 * Initial startup case or 3833 * everything is acked case. 3834 */ 3835 rack_log_timely(rack, mult, cur_bw, 0, 0, 3836 __LINE__, 9); 3837 return (1); 3838 } 3839 if (mult <= 100) { 3840 /* 3841 * We can always pace at or slightly above our rate. 3842 */ 3843 rack_log_timely(rack, mult, cur_bw, 0, 0, 3844 __LINE__, 9); 3845 return (1); 3846 } 3847 act_rate = cur_bw * (uint64_t)mult; 3848 act_rate /= 100; 3849 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3850 max_allow_rate /= 100; 3851 if (act_rate < max_allow_rate) { 3852 /* 3853 * Here the rate we are actually pacing at 3854 * is smaller than 10% above our last measurement. 3855 * This means we are pacing below what we would 3856 * like to try to achieve (plus some wiggle room). 3857 */ 3858 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3859 __LINE__, 9); 3860 return (1); 3861 } else { 3862 /* 3863 * Here we are already pacing at least rack_max_per_above(10%) 3864 * what we are getting back. This indicates most likely 3865 * that we are being limited (cwnd/rwnd/app) and can't 3866 * get any more b/w. There is no sense of trying to 3867 * raise up the pacing rate its not speeding us up 3868 * and we already are pacing faster than we are getting. 3869 */ 3870 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3871 __LINE__, 8); 3872 return (0); 3873 } 3874 } 3875 3876 static void 3877 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3878 { 3879 /* 3880 * When we drag bottom, we want to assure 3881 * that no multiplier is below 1.0, if so 3882 * we want to restore it to at least that. 3883 */ 3884 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3885 /* This is unlikely we usually do not touch recovery */ 3886 rack->r_ctl.rack_per_of_gp_rec = 100; 3887 } 3888 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3889 rack->r_ctl.rack_per_of_gp_ca = 100; 3890 } 3891 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3892 rack->r_ctl.rack_per_of_gp_ss = 100; 3893 } 3894 } 3895 3896 static void 3897 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3898 { 3899 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3900 rack->r_ctl.rack_per_of_gp_ca = 100; 3901 } 3902 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3903 rack->r_ctl.rack_per_of_gp_ss = 100; 3904 } 3905 } 3906 3907 static void 3908 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3909 { 3910 int32_t calc, logged, plus; 3911 3912 logged = 0; 3913 3914 if (rack->rc_skip_timely) 3915 return; 3916 if (override) { 3917 /* 3918 * override is passed when we are 3919 * loosing b/w and making one last 3920 * gasp at trying to not loose out 3921 * to a new-reno flow. 3922 */ 3923 goto extra_boost; 3924 } 3925 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3926 if (rack->rc_gp_incr && 3927 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3928 /* 3929 * Reset and get 5 strokes more before the boost. Note 3930 * that the count is 0 based so we have to add one. 3931 */ 3932 extra_boost: 3933 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3934 rack->rc_gp_timely_inc_cnt = 0; 3935 } else 3936 plus = (uint32_t)rack_gp_increase_per; 3937 /* Must be at least 1% increase for true timely increases */ 3938 if ((plus < 1) && 3939 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3940 plus = 1; 3941 if (rack->rc_gp_saw_rec && 3942 (rack->rc_gp_no_rec_chg == 0) && 3943 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3944 rack->r_ctl.rack_per_of_gp_rec)) { 3945 /* We have been in recovery ding it too */ 3946 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3947 if (calc > 0xffff) 3948 calc = 0xffff; 3949 logged |= 1; 3950 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3951 if (rack->r_ctl.rack_per_upper_bound_ca && 3952 (rack->rc_dragged_bottom == 0) && 3953 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3954 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3955 } 3956 if (rack->rc_gp_saw_ca && 3957 (rack->rc_gp_saw_ss == 0) && 3958 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3959 rack->r_ctl.rack_per_of_gp_ca)) { 3960 /* In CA */ 3961 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3962 if (calc > 0xffff) 3963 calc = 0xffff; 3964 logged |= 2; 3965 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3966 if (rack->r_ctl.rack_per_upper_bound_ca && 3967 (rack->rc_dragged_bottom == 0) && 3968 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3969 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3970 } 3971 if (rack->rc_gp_saw_ss && 3972 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3973 rack->r_ctl.rack_per_of_gp_ss)) { 3974 /* In SS */ 3975 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3976 if (calc > 0xffff) 3977 calc = 0xffff; 3978 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3979 if (rack->r_ctl.rack_per_upper_bound_ss && 3980 (rack->rc_dragged_bottom == 0) && 3981 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3982 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3983 logged |= 4; 3984 } 3985 if (logged && 3986 (rack->rc_gp_incr == 0)){ 3987 /* Go into increment mode */ 3988 rack->rc_gp_incr = 1; 3989 rack->rc_gp_timely_inc_cnt = 0; 3990 } 3991 if (rack->rc_gp_incr && 3992 logged && 3993 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3994 rack->rc_gp_timely_inc_cnt++; 3995 } 3996 rack_log_timely(rack, logged, plus, 0, 0, 3997 __LINE__, 1); 3998 } 3999 4000 static uint32_t 4001 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 4002 { 4003 /*- 4004 * norm_grad = rtt_diff / minrtt; 4005 * new_per = curper * (1 - B * norm_grad) 4006 * 4007 * B = rack_gp_decrease_per (default 80%) 4008 * rtt_dif = input var current rtt-diff 4009 * curper = input var current percentage 4010 * minrtt = from rack filter 4011 * 4012 * In order to do the floating point calculations above we 4013 * do an integer conversion. The code looks confusing so let me 4014 * translate it into something that use more variables and 4015 * is clearer for us humans :) 4016 * 4017 * uint64_t norm_grad, inverse, reduce_by, final_result; 4018 * uint32_t perf; 4019 * 4020 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 4021 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 4022 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 4023 * inverse /= 1000000; 4024 * reduce_by = (1000000 - inverse); 4025 * final_result = (cur_per * reduce_by) / 1000000; 4026 * perf = (uint32_t)final_result; 4027 */ 4028 uint64_t perf; 4029 4030 perf = (((uint64_t)curper * ((uint64_t)1000000 - 4031 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 4032 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 4033 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 4034 (uint64_t)1000000)) / 4035 (uint64_t)1000000); 4036 if (perf > curper) { 4037 /* TSNH */ 4038 perf = curper - 1; 4039 } 4040 return ((uint32_t)perf); 4041 } 4042 4043 static uint32_t 4044 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 4045 { 4046 /* 4047 * highrttthresh 4048 * result = curper * (1 - (B * ( 1 - ------ )) 4049 * gp_srtt 4050 * 4051 * B = rack_gp_decrease_per (default .8 i.e. 80) 4052 * highrttthresh = filter_min * rack_gp_rtt_maxmul 4053 */ 4054 uint64_t perf; 4055 uint32_t highrttthresh; 4056 4057 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4058 4059 perf = (((uint64_t)curper * ((uint64_t)1000000 - 4060 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 4061 ((uint64_t)highrttthresh * (uint64_t)1000000) / 4062 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 4063 if (tcp_bblogging_on(rack->rc_tp)) { 4064 uint64_t log1; 4065 4066 log1 = rtt; 4067 log1 <<= 32; 4068 log1 |= highrttthresh; 4069 rack_log_timely(rack, 4070 rack_gp_decrease_per, 4071 (uint64_t)curper, 4072 log1, 4073 perf, 4074 __LINE__, 4075 15); 4076 } 4077 return (perf); 4078 } 4079 4080 static void 4081 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 4082 { 4083 uint64_t logvar, logvar2, logvar3; 4084 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 4085 4086 if (rack->rc_skip_timely) 4087 return; 4088 if (rack->rc_gp_incr) { 4089 /* Turn off increment counting */ 4090 rack->rc_gp_incr = 0; 4091 rack->rc_gp_timely_inc_cnt = 0; 4092 } 4093 ss_red = ca_red = rec_red = 0; 4094 logged = 0; 4095 /* Calculate the reduction value */ 4096 if (rtt_diff < 0) { 4097 rtt_diff *= -1; 4098 } 4099 /* Must be at least 1% reduction */ 4100 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4101 /* We have been in recovery ding it too */ 4102 if (timely_says == 2) { 4103 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4104 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4105 if (alt < new_per) 4106 val = alt; 4107 else 4108 val = new_per; 4109 } else 4110 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4111 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4112 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4113 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4114 } else { 4115 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4116 rec_red = 0; 4117 } 4118 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4119 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4120 logged |= 1; 4121 } 4122 if (rack->rc_gp_saw_ss) { 4123 /* Sent in SS */ 4124 if (timely_says == 2) { 4125 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4126 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4127 if (alt < new_per) 4128 val = alt; 4129 else 4130 val = new_per; 4131 } else 4132 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4133 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4134 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4135 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4136 } else { 4137 ss_red = new_per; 4138 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4139 logvar = new_per; 4140 logvar <<= 32; 4141 logvar |= alt; 4142 logvar2 = (uint32_t)rtt; 4143 logvar2 <<= 32; 4144 logvar2 |= (uint32_t)rtt_diff; 4145 logvar3 = rack_gp_rtt_maxmul; 4146 logvar3 <<= 32; 4147 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4148 rack_log_timely(rack, timely_says, 4149 logvar2, logvar3, 4150 logvar, __LINE__, 10); 4151 } 4152 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4153 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4154 logged |= 4; 4155 } else if (rack->rc_gp_saw_ca) { 4156 /* Sent in CA */ 4157 if (timely_says == 2) { 4158 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4159 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4160 if (alt < new_per) 4161 val = alt; 4162 else 4163 val = new_per; 4164 } else 4165 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4166 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4167 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4168 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4169 } else { 4170 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4171 ca_red = 0; 4172 logvar = new_per; 4173 logvar <<= 32; 4174 logvar |= alt; 4175 logvar2 = (uint32_t)rtt; 4176 logvar2 <<= 32; 4177 logvar2 |= (uint32_t)rtt_diff; 4178 logvar3 = rack_gp_rtt_maxmul; 4179 logvar3 <<= 32; 4180 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4181 rack_log_timely(rack, timely_says, 4182 logvar2, logvar3, 4183 logvar, __LINE__, 10); 4184 } 4185 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4186 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4187 logged |= 2; 4188 } 4189 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4190 rack->rc_gp_timely_dec_cnt++; 4191 if (rack_timely_dec_clear && 4192 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4193 rack->rc_gp_timely_dec_cnt = 0; 4194 } 4195 logvar = ss_red; 4196 logvar <<= 32; 4197 logvar |= ca_red; 4198 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4199 __LINE__, 2); 4200 } 4201 4202 static void 4203 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4204 uint32_t rtt, uint32_t line, uint8_t reas) 4205 { 4206 if (tcp_bblogging_on(rack->rc_tp)) { 4207 union tcp_log_stackspecific log; 4208 struct timeval tv; 4209 4210 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4211 log.u_bbr.flex1 = line; 4212 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4213 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4214 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4215 log.u_bbr.flex5 = rtt; 4216 log.u_bbr.flex6 = rack->rc_highly_buffered; 4217 log.u_bbr.flex6 <<= 1; 4218 log.u_bbr.flex6 |= rack->forced_ack; 4219 log.u_bbr.flex6 <<= 1; 4220 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4221 log.u_bbr.flex6 <<= 1; 4222 log.u_bbr.flex6 |= rack->in_probe_rtt; 4223 log.u_bbr.flex6 <<= 1; 4224 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4225 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4226 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4227 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4228 log.u_bbr.flex8 = reas; 4229 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4230 log.u_bbr.delRate = rack_get_bw(rack); 4231 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4232 log.u_bbr.cur_del_rate <<= 32; 4233 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4234 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4235 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4236 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4237 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4238 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4239 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4240 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4241 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4242 log.u_bbr.rttProp = us_cts; 4243 log.u_bbr.rttProp <<= 32; 4244 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4245 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4246 &rack->rc_inp->inp_socket->so_rcv, 4247 &rack->rc_inp->inp_socket->so_snd, 4248 BBR_LOG_RTT_SHRINKS, 0, 4249 0, &log, false, &rack->r_ctl.act_rcv_time); 4250 } 4251 } 4252 4253 static void 4254 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4255 { 4256 uint64_t bwdp; 4257 4258 bwdp = rack_get_bw(rack); 4259 bwdp *= (uint64_t)rtt; 4260 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4261 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4262 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4263 /* 4264 * A window protocol must be able to have 4 packets 4265 * outstanding as the floor in order to function 4266 * (especially considering delayed ack :D). 4267 */ 4268 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4269 } 4270 } 4271 4272 static void 4273 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4274 { 4275 /** 4276 * ProbeRTT is a bit different in rack_pacing than in 4277 * BBR. It is like BBR in that it uses the lowering of 4278 * the RTT as a signal that we saw something new and 4279 * counts from there for how long between. But it is 4280 * different in that its quite simple. It does not 4281 * play with the cwnd and wait until we get down 4282 * to N segments outstanding and hold that for 4283 * 200ms. Instead it just sets the pacing reduction 4284 * rate to a set percentage (70 by default) and hold 4285 * that for a number of recent GP Srtt's. 4286 */ 4287 uint32_t segsiz; 4288 4289 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4290 if (rack->rc_gp_dyn_mul == 0) 4291 return; 4292 4293 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4294 /* We are idle */ 4295 return; 4296 } 4297 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4298 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4299 /* 4300 * Stop the goodput now, the idea here is 4301 * that future measurements with in_probe_rtt 4302 * won't register if they are not greater so 4303 * we want to get what info (if any) is available 4304 * now. 4305 */ 4306 rack_do_goodput_measurement(rack->rc_tp, rack, 4307 rack->rc_tp->snd_una, __LINE__, 4308 RACK_QUALITY_PROBERTT); 4309 } 4310 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4311 rack->r_ctl.rc_time_probertt_entered = us_cts; 4312 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4313 rack->r_ctl.rc_pace_min_segs); 4314 rack->in_probe_rtt = 1; 4315 rack->measure_saw_probe_rtt = 1; 4316 rack->r_ctl.rc_time_probertt_starts = 0; 4317 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4318 if (rack_probertt_use_min_rtt_entry) 4319 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4320 else 4321 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4322 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4323 __LINE__, RACK_RTTS_ENTERPROBE); 4324 } 4325 4326 static void 4327 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4328 { 4329 struct rack_sendmap *rsm; 4330 uint32_t segsiz; 4331 4332 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4333 rack->r_ctl.rc_pace_min_segs); 4334 rack->in_probe_rtt = 0; 4335 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4336 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4337 /* 4338 * Stop the goodput now, the idea here is 4339 * that future measurements with in_probe_rtt 4340 * won't register if they are not greater so 4341 * we want to get what info (if any) is available 4342 * now. 4343 */ 4344 rack_do_goodput_measurement(rack->rc_tp, rack, 4345 rack->rc_tp->snd_una, __LINE__, 4346 RACK_QUALITY_PROBERTT); 4347 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4348 /* 4349 * We don't have enough data to make a measurement. 4350 * So lets just stop and start here after exiting 4351 * probe-rtt. We probably are not interested in 4352 * the results anyway. 4353 */ 4354 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4355 } 4356 /* 4357 * Measurements through the current snd_max are going 4358 * to be limited by the slower pacing rate. 4359 * 4360 * We need to mark these as app-limited so we 4361 * don't collapse the b/w. 4362 */ 4363 rsm = tqhash_max(rack->r_ctl.tqh); 4364 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4365 if (rack->r_ctl.rc_app_limited_cnt == 0) 4366 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4367 else { 4368 /* 4369 * Go out to the end app limited and mark 4370 * this new one as next and move the end_appl up 4371 * to this guy. 4372 */ 4373 if (rack->r_ctl.rc_end_appl) 4374 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4375 rack->r_ctl.rc_end_appl = rsm; 4376 } 4377 rsm->r_flags |= RACK_APP_LIMITED; 4378 rack->r_ctl.rc_app_limited_cnt++; 4379 } 4380 /* 4381 * Now, we need to examine our pacing rate multipliers. 4382 * If its under 100%, we need to kick it back up to 4383 * 100%. We also don't let it be over our "max" above 4384 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4385 * Note setting clamp_atexit_prtt to 0 has the effect 4386 * of setting CA/SS to 100% always at exit (which is 4387 * the default behavior). 4388 */ 4389 if (rack_probertt_clear_is) { 4390 rack->rc_gp_incr = 0; 4391 rack->rc_gp_bwred = 0; 4392 rack->rc_gp_timely_inc_cnt = 0; 4393 rack->rc_gp_timely_dec_cnt = 0; 4394 } 4395 /* Do we do any clamping at exit? */ 4396 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4397 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4398 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4399 } 4400 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4401 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4402 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4403 } 4404 /* 4405 * Lets set rtt_diff to 0, so that we will get a "boost" 4406 * after exiting. 4407 */ 4408 rack->r_ctl.rc_rtt_diff = 0; 4409 4410 /* Clear all flags so we start fresh */ 4411 rack->rc_tp->t_bytes_acked = 0; 4412 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4413 /* 4414 * If configured to, set the cwnd and ssthresh to 4415 * our targets. 4416 */ 4417 if (rack_probe_rtt_sets_cwnd) { 4418 uint64_t ebdp; 4419 uint32_t setto; 4420 4421 /* Set ssthresh so we get into CA once we hit our target */ 4422 if (rack_probertt_use_min_rtt_exit == 1) { 4423 /* Set to min rtt */ 4424 rack_set_prtt_target(rack, segsiz, 4425 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4426 } else if (rack_probertt_use_min_rtt_exit == 2) { 4427 /* Set to current gp rtt */ 4428 rack_set_prtt_target(rack, segsiz, 4429 rack->r_ctl.rc_gp_srtt); 4430 } else if (rack_probertt_use_min_rtt_exit == 3) { 4431 /* Set to entry gp rtt */ 4432 rack_set_prtt_target(rack, segsiz, 4433 rack->r_ctl.rc_entry_gp_rtt); 4434 } else { 4435 uint64_t sum; 4436 uint32_t setval; 4437 4438 sum = rack->r_ctl.rc_entry_gp_rtt; 4439 sum *= 10; 4440 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4441 if (sum >= 20) { 4442 /* 4443 * A highly buffered path needs 4444 * cwnd space for timely to work. 4445 * Lets set things up as if 4446 * we are heading back here again. 4447 */ 4448 setval = rack->r_ctl.rc_entry_gp_rtt; 4449 } else if (sum >= 15) { 4450 /* 4451 * Lets take the smaller of the 4452 * two since we are just somewhat 4453 * buffered. 4454 */ 4455 setval = rack->r_ctl.rc_gp_srtt; 4456 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4457 setval = rack->r_ctl.rc_entry_gp_rtt; 4458 } else { 4459 /* 4460 * Here we are not highly buffered 4461 * and should pick the min we can to 4462 * keep from causing loss. 4463 */ 4464 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4465 } 4466 rack_set_prtt_target(rack, segsiz, 4467 setval); 4468 } 4469 if (rack_probe_rtt_sets_cwnd > 1) { 4470 /* There is a percentage here to boost */ 4471 ebdp = rack->r_ctl.rc_target_probertt_flight; 4472 ebdp *= rack_probe_rtt_sets_cwnd; 4473 ebdp /= 100; 4474 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4475 } else 4476 setto = rack->r_ctl.rc_target_probertt_flight; 4477 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4478 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4479 /* Enforce a min */ 4480 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4481 } 4482 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4483 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4484 } 4485 rack_log_rtt_shrinks(rack, us_cts, 4486 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4487 __LINE__, RACK_RTTS_EXITPROBE); 4488 /* Clear times last so log has all the info */ 4489 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4490 rack->r_ctl.rc_time_probertt_entered = us_cts; 4491 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4492 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4493 } 4494 4495 static void 4496 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4497 { 4498 /* Check in on probe-rtt */ 4499 4500 if (rack->rc_gp_filled == 0) { 4501 /* We do not do p-rtt unless we have gp measurements */ 4502 return; 4503 } 4504 if (rack->in_probe_rtt) { 4505 uint64_t no_overflow; 4506 uint32_t endtime, must_stay; 4507 4508 if (rack->r_ctl.rc_went_idle_time && 4509 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4510 /* 4511 * We went idle during prtt, just exit now. 4512 */ 4513 rack_exit_probertt(rack, us_cts); 4514 } else if (rack_probe_rtt_safety_val && 4515 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4516 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4517 /* 4518 * Probe RTT safety value triggered! 4519 */ 4520 rack_log_rtt_shrinks(rack, us_cts, 4521 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4522 __LINE__, RACK_RTTS_SAFETY); 4523 rack_exit_probertt(rack, us_cts); 4524 } 4525 /* Calculate the max we will wait */ 4526 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4527 if (rack->rc_highly_buffered) 4528 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4529 /* Calculate the min we must wait */ 4530 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4531 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4532 TSTMP_LT(us_cts, endtime)) { 4533 uint32_t calc; 4534 /* Do we lower more? */ 4535 no_exit: 4536 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4537 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4538 else 4539 calc = 0; 4540 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4541 if (calc) { 4542 /* Maybe */ 4543 calc *= rack_per_of_gp_probertt_reduce; 4544 if (calc > rack_per_of_gp_probertt) 4545 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4546 else 4547 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4548 /* Limit it too */ 4549 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4550 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4551 } 4552 /* We must reach target or the time set */ 4553 return; 4554 } 4555 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4556 if ((TSTMP_LT(us_cts, must_stay) && 4557 rack->rc_highly_buffered) || 4558 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4559 rack->r_ctl.rc_target_probertt_flight)) { 4560 /* We are not past the must_stay time */ 4561 goto no_exit; 4562 } 4563 rack_log_rtt_shrinks(rack, us_cts, 4564 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4565 __LINE__, RACK_RTTS_REACHTARGET); 4566 rack->r_ctl.rc_time_probertt_starts = us_cts; 4567 if (rack->r_ctl.rc_time_probertt_starts == 0) 4568 rack->r_ctl.rc_time_probertt_starts = 1; 4569 /* Restore back to our rate we want to pace at in prtt */ 4570 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4571 } 4572 /* 4573 * Setup our end time, some number of gp_srtts plus 200ms. 4574 */ 4575 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4576 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4577 if (rack_probertt_gpsrtt_cnt_div) 4578 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4579 else 4580 endtime = 0; 4581 endtime += rack_min_probertt_hold; 4582 endtime += rack->r_ctl.rc_time_probertt_starts; 4583 if (TSTMP_GEQ(us_cts, endtime)) { 4584 /* yes, exit probertt */ 4585 rack_exit_probertt(rack, us_cts); 4586 } 4587 4588 } else if ((rack->rc_skip_timely == 0) && 4589 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4590 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4591 /* Go into probertt, its been too long since we went lower */ 4592 rack_enter_probertt(rack, us_cts); 4593 } 4594 } 4595 4596 static void 4597 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4598 uint32_t rtt, int32_t rtt_diff) 4599 { 4600 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4601 uint32_t losses; 4602 4603 if ((rack->rc_gp_dyn_mul == 0) || 4604 (rack->use_fixed_rate) || 4605 (rack->in_probe_rtt) || 4606 (rack->rc_always_pace == 0)) { 4607 /* No dynamic GP multiplier in play */ 4608 return; 4609 } 4610 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4611 cur_bw = rack_get_bw(rack); 4612 /* Calculate our up and down range */ 4613 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4614 up_bnd /= 100; 4615 up_bnd += rack->r_ctl.last_gp_comp_bw; 4616 4617 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4618 subfr /= 100; 4619 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4620 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4621 /* 4622 * This is the case where our RTT is above 4623 * the max target and we have been configured 4624 * to just do timely no bonus up stuff in that case. 4625 * 4626 * There are two configurations, set to 1, and we 4627 * just do timely if we are over our max. If its 4628 * set above 1 then we slam the multipliers down 4629 * to 100 and then decrement per timely. 4630 */ 4631 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4632 __LINE__, 3); 4633 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4634 rack_validate_multipliers_at_or_below_100(rack); 4635 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4636 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4637 /* 4638 * We are decreasing this is a bit complicated this 4639 * means we are loosing ground. This could be 4640 * because another flow entered and we are competing 4641 * for b/w with it. This will push the RTT up which 4642 * makes timely unusable unless we want to get shoved 4643 * into a corner and just be backed off (the age 4644 * old problem with delay based CC). 4645 * 4646 * On the other hand if it was a route change we 4647 * would like to stay somewhat contained and not 4648 * blow out the buffers. 4649 */ 4650 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4651 __LINE__, 3); 4652 rack->r_ctl.last_gp_comp_bw = cur_bw; 4653 if (rack->rc_gp_bwred == 0) { 4654 /* Go into reduction counting */ 4655 rack->rc_gp_bwred = 1; 4656 rack->rc_gp_timely_dec_cnt = 0; 4657 } 4658 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4659 /* 4660 * Push another time with a faster pacing 4661 * to try to gain back (we include override to 4662 * get a full raise factor). 4663 */ 4664 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4665 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4666 (timely_says == 0) || 4667 (rack_down_raise_thresh == 0)) { 4668 /* 4669 * Do an override up in b/w if we were 4670 * below the threshold or if the threshold 4671 * is zero we always do the raise. 4672 */ 4673 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4674 } else { 4675 /* Log it stays the same */ 4676 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4677 __LINE__, 11); 4678 } 4679 rack->rc_gp_timely_dec_cnt++; 4680 /* We are not incrementing really no-count */ 4681 rack->rc_gp_incr = 0; 4682 rack->rc_gp_timely_inc_cnt = 0; 4683 } else { 4684 /* 4685 * Lets just use the RTT 4686 * information and give up 4687 * pushing. 4688 */ 4689 goto use_timely; 4690 } 4691 } else if ((timely_says != 2) && 4692 !losses && 4693 (last_bw_est > up_bnd)) { 4694 /* 4695 * We are increasing b/w lets keep going, updating 4696 * our b/w and ignoring any timely input, unless 4697 * of course we are at our max raise (if there is one). 4698 */ 4699 4700 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4701 __LINE__, 3); 4702 rack->r_ctl.last_gp_comp_bw = cur_bw; 4703 if (rack->rc_gp_saw_ss && 4704 rack->r_ctl.rack_per_upper_bound_ss && 4705 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4706 /* 4707 * In cases where we can't go higher 4708 * we should just use timely. 4709 */ 4710 goto use_timely; 4711 } 4712 if (rack->rc_gp_saw_ca && 4713 rack->r_ctl.rack_per_upper_bound_ca && 4714 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4715 /* 4716 * In cases where we can't go higher 4717 * we should just use timely. 4718 */ 4719 goto use_timely; 4720 } 4721 rack->rc_gp_bwred = 0; 4722 rack->rc_gp_timely_dec_cnt = 0; 4723 /* You get a set number of pushes if timely is trying to reduce */ 4724 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4725 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4726 } else { 4727 /* Log it stays the same */ 4728 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4729 __LINE__, 12); 4730 } 4731 return; 4732 } else { 4733 /* 4734 * We are staying between the lower and upper range bounds 4735 * so use timely to decide. 4736 */ 4737 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4738 __LINE__, 3); 4739 use_timely: 4740 if (timely_says) { 4741 rack->rc_gp_incr = 0; 4742 rack->rc_gp_timely_inc_cnt = 0; 4743 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4744 !losses && 4745 (last_bw_est < low_bnd)) { 4746 /* We are loosing ground */ 4747 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4748 rack->rc_gp_timely_dec_cnt++; 4749 /* We are not incrementing really no-count */ 4750 rack->rc_gp_incr = 0; 4751 rack->rc_gp_timely_inc_cnt = 0; 4752 } else 4753 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4754 } else { 4755 rack->rc_gp_bwred = 0; 4756 rack->rc_gp_timely_dec_cnt = 0; 4757 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4758 } 4759 } 4760 } 4761 4762 static int32_t 4763 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4764 { 4765 int32_t timely_says; 4766 uint64_t log_mult, log_rtt_a_diff; 4767 4768 log_rtt_a_diff = rtt; 4769 log_rtt_a_diff <<= 32; 4770 log_rtt_a_diff |= (uint32_t)rtt_diff; 4771 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4772 rack_gp_rtt_maxmul)) { 4773 /* Reduce the b/w multiplier */ 4774 timely_says = 2; 4775 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4776 log_mult <<= 32; 4777 log_mult |= prev_rtt; 4778 rack_log_timely(rack, timely_says, log_mult, 4779 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4780 log_rtt_a_diff, __LINE__, 4); 4781 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4782 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4783 max(rack_gp_rtt_mindiv , 1)))) { 4784 /* Increase the b/w multiplier */ 4785 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4786 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4787 max(rack_gp_rtt_mindiv , 1)); 4788 log_mult <<= 32; 4789 log_mult |= prev_rtt; 4790 timely_says = 0; 4791 rack_log_timely(rack, timely_says, log_mult , 4792 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4793 log_rtt_a_diff, __LINE__, 5); 4794 } else { 4795 /* 4796 * Use a gradient to find it the timely gradient 4797 * is: 4798 * grad = rc_rtt_diff / min_rtt; 4799 * 4800 * anything below or equal to 0 will be 4801 * a increase indication. Anything above 4802 * zero is a decrease. Note we take care 4803 * of the actual gradient calculation 4804 * in the reduction (its not needed for 4805 * increase). 4806 */ 4807 log_mult = prev_rtt; 4808 if (rtt_diff <= 0) { 4809 /* 4810 * Rttdiff is less than zero, increase the 4811 * b/w multiplier (its 0 or negative) 4812 */ 4813 timely_says = 0; 4814 rack_log_timely(rack, timely_says, log_mult, 4815 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4816 } else { 4817 /* Reduce the b/w multiplier */ 4818 timely_says = 1; 4819 rack_log_timely(rack, timely_says, log_mult, 4820 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4821 } 4822 } 4823 return (timely_says); 4824 } 4825 4826 static __inline int 4827 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4828 { 4829 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4830 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4831 /** 4832 * This covers the case that the 4833 * resent is completely inside 4834 * the gp range or up to it. 4835 * |----------------| 4836 * |-----| <or> 4837 * |----| 4838 * <or> |---| 4839 */ 4840 return (1); 4841 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4842 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4843 /** 4844 * This covers the case of 4845 * |--------------| 4846 * |-------->| 4847 */ 4848 return (1); 4849 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4850 SEQ_LT(rsm->r_start, tp->gput_ack) && 4851 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4852 4853 /** 4854 * This covers the case of 4855 * |--------------| 4856 * |-------->| 4857 */ 4858 return (1); 4859 } 4860 return (0); 4861 } 4862 4863 static __inline void 4864 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4865 { 4866 4867 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4868 return; 4869 /* 4870 * We have a Goodput measurement in progress. Mark 4871 * the send if its within the window. If its not 4872 * in the window make sure it does not have the mark. 4873 */ 4874 if (rack_in_gp_window(tp, rsm)) 4875 rsm->r_flags |= RACK_IN_GP_WIN; 4876 else 4877 rsm->r_flags &= ~RACK_IN_GP_WIN; 4878 } 4879 4880 static __inline void 4881 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4882 { 4883 /* A GP measurement is ending, clear all marks on the send map*/ 4884 struct rack_sendmap *rsm = NULL; 4885 4886 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4887 if (rsm == NULL) { 4888 rsm = tqhash_min(rack->r_ctl.tqh); 4889 } 4890 /* Nothing left? */ 4891 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4892 rsm->r_flags &= ~RACK_IN_GP_WIN; 4893 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4894 } 4895 } 4896 4897 4898 static __inline void 4899 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4900 { 4901 struct rack_sendmap *rsm = NULL; 4902 4903 if (tp->snd_una == tp->snd_max) { 4904 /* Nothing outstanding yet, nothing to do here */ 4905 return; 4906 } 4907 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4908 /* 4909 * We are measuring ahead of some outstanding 4910 * data. We need to walk through up until we get 4911 * to gp_seq marking so that no rsm is set incorrectly 4912 * with RACK_IN_GP_WIN. 4913 */ 4914 rsm = tqhash_min(rack->r_ctl.tqh); 4915 while (rsm != NULL) { 4916 rack_mark_in_gp_win(tp, rsm); 4917 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4918 break; 4919 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4920 } 4921 } 4922 if (rsm == NULL) { 4923 /* 4924 * Need to find the GP seq, if rsm is 4925 * set we stopped as we hit it. 4926 */ 4927 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4928 if (rsm == NULL) 4929 return; 4930 rack_mark_in_gp_win(tp, rsm); 4931 } 4932 /* 4933 * Now we may need to mark already sent rsm, ahead of 4934 * gput_seq in the window since they may have been sent 4935 * *before* we started our measurment. The rsm, if non-null 4936 * has been marked (note if rsm would have been NULL we would have 4937 * returned in the previous block). So we go to the next, and continue 4938 * until we run out of entries or we exceed the gp_ack value. 4939 */ 4940 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4941 while (rsm) { 4942 rack_mark_in_gp_win(tp, rsm); 4943 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4944 break; 4945 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4946 } 4947 } 4948 4949 static void 4950 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 4951 { 4952 if (tcp_bblogging_on(rack->rc_tp)) { 4953 union tcp_log_stackspecific log; 4954 struct timeval tv; 4955 4956 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4957 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4958 log.u_bbr.flex1 = add_part; 4959 log.u_bbr.flex2 = sub_part; 4960 log.u_bbr.flex3 = rack_wma_divisor; 4961 log.u_bbr.flex4 = srtt; 4962 log.u_bbr.flex7 = (uint16_t)line; 4963 log.u_bbr.flex8 = meth; 4964 log.u_bbr.delRate = rack->r_ctl.gp_bw; 4965 log.u_bbr.cur_del_rate = meas_bw; 4966 log.u_bbr.rttProp = utim; 4967 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4968 &rack->rc_inp->inp_socket->so_rcv, 4969 &rack->rc_inp->inp_socket->so_snd, 4970 BBR_LOG_THRESH_CALC, 0, 4971 0, &log, false, &rack->r_ctl.act_rcv_time); 4972 } 4973 } 4974 4975 static void 4976 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4977 tcp_seq th_ack, int line, uint8_t quality) 4978 { 4979 uint64_t tim, bytes_ps, stim, utim; 4980 uint32_t segsiz, bytes, reqbytes, us_cts; 4981 int32_t gput, new_rtt_diff, timely_says; 4982 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4983 int did_add = 0; 4984 4985 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4986 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4987 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4988 tim = us_cts - tp->gput_ts; 4989 else 4990 tim = 0; 4991 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4992 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4993 else 4994 stim = 0; 4995 /* 4996 * Use the larger of the send time or ack time. This prevents us 4997 * from being influenced by ack artifacts to come up with too 4998 * high of measurement. Note that since we are spanning over many more 4999 * bytes in most of our measurements hopefully that is less likely to 5000 * occur. 5001 */ 5002 if (tim > stim) 5003 utim = max(tim, 1); 5004 else 5005 utim = max(stim, 1); 5006 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 5007 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 5008 if ((tim == 0) && (stim == 0)) { 5009 /* 5010 * Invalid measurement time, maybe 5011 * all on one ack/one send? 5012 */ 5013 bytes = 0; 5014 bytes_ps = 0; 5015 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5016 0, 0, 0, 10, __LINE__, NULL, quality); 5017 goto skip_measurement; 5018 } 5019 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 5020 /* We never made a us_rtt measurement? */ 5021 bytes = 0; 5022 bytes_ps = 0; 5023 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5024 0, 0, 0, 10, __LINE__, NULL, quality); 5025 goto skip_measurement; 5026 } 5027 /* 5028 * Calculate the maximum possible b/w this connection 5029 * could have. We base our calculation on the lowest 5030 * rtt we have seen during the measurement and the 5031 * largest rwnd the client has given us in that time. This 5032 * forms a BDP that is the maximum that we could ever 5033 * get to the client. Anything larger is not valid. 5034 * 5035 * I originally had code here that rejected measurements 5036 * where the time was less than 1/2 the latest us_rtt. 5037 * But after thinking on that I realized its wrong since 5038 * say you had a 150Mbps or even 1Gbps link, and you 5039 * were a long way away.. example I am in Europe (100ms rtt) 5040 * talking to my 1Gbps link in S.C. Now measuring say 150,000 5041 * bytes my time would be 1.2ms, and yet my rtt would say 5042 * the measurement was invalid the time was < 50ms. The 5043 * same thing is true for 150Mb (8ms of time). 5044 * 5045 * A better way I realized is to look at what the maximum 5046 * the connection could possibly do. This is gated on 5047 * the lowest RTT we have seen and the highest rwnd. 5048 * We should in theory never exceed that, if we are 5049 * then something on the path is storing up packets 5050 * and then feeding them all at once to our endpoint 5051 * messing up our measurement. 5052 */ 5053 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 5054 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 5055 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 5056 if (SEQ_LT(th_ack, tp->gput_seq)) { 5057 /* No measurement can be made */ 5058 bytes = 0; 5059 bytes_ps = 0; 5060 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5061 0, 0, 0, 10, __LINE__, NULL, quality); 5062 goto skip_measurement; 5063 } else 5064 bytes = (th_ack - tp->gput_seq); 5065 bytes_ps = (uint64_t)bytes; 5066 /* 5067 * Don't measure a b/w for pacing unless we have gotten at least 5068 * an initial windows worth of data in this measurement interval. 5069 * 5070 * Small numbers of bytes get badly influenced by delayed ack and 5071 * other artifacts. Note we take the initial window or our 5072 * defined minimum GP (defaulting to 10 which hopefully is the 5073 * IW). 5074 */ 5075 if (rack->rc_gp_filled == 0) { 5076 /* 5077 * The initial estimate is special. We 5078 * have blasted out an IW worth of packets 5079 * without a real valid ack ts results. We 5080 * then setup the app_limited_needs_set flag, 5081 * this should get the first ack in (probably 2 5082 * MSS worth) to be recorded as the timestamp. 5083 * We thus allow a smaller number of bytes i.e. 5084 * IW - 2MSS. 5085 */ 5086 reqbytes -= (2 * segsiz); 5087 /* Also lets fill previous for our first measurement to be neutral */ 5088 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5089 } 5090 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 5091 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5092 rack->r_ctl.rc_app_limited_cnt, 5093 0, 0, 10, __LINE__, NULL, quality); 5094 goto skip_measurement; 5095 } 5096 /* 5097 * We now need to calculate the Timely like status so 5098 * we can update (possibly) the b/w multipliers. 5099 */ 5100 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5101 if (rack->rc_gp_filled == 0) { 5102 /* No previous reading */ 5103 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5104 } else { 5105 if (rack->measure_saw_probe_rtt == 0) { 5106 /* 5107 * We don't want a probertt to be counted 5108 * since it will be negative incorrectly. We 5109 * expect to be reducing the RTT when we 5110 * pace at a slower rate. 5111 */ 5112 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5113 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5114 } 5115 } 5116 timely_says = rack_make_timely_judgement(rack, 5117 rack->r_ctl.rc_gp_srtt, 5118 rack->r_ctl.rc_rtt_diff, 5119 rack->r_ctl.rc_prev_gp_srtt 5120 ); 5121 bytes_ps *= HPTS_USEC_IN_SEC; 5122 bytes_ps /= utim; 5123 if (bytes_ps > rack->r_ctl.last_max_bw) { 5124 /* 5125 * Something is on path playing 5126 * since this b/w is not possible based 5127 * on our BDP (highest rwnd and lowest rtt 5128 * we saw in the measurement window). 5129 * 5130 * Another option here would be to 5131 * instead skip the measurement. 5132 */ 5133 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5134 bytes_ps, rack->r_ctl.last_max_bw, 0, 5135 11, __LINE__, NULL, quality); 5136 bytes_ps = rack->r_ctl.last_max_bw; 5137 } 5138 /* We store gp for b/w in bytes per second */ 5139 if (rack->rc_gp_filled == 0) { 5140 /* Initial measurement */ 5141 if (bytes_ps) { 5142 rack->r_ctl.gp_bw = bytes_ps; 5143 rack->rc_gp_filled = 1; 5144 rack->r_ctl.num_measurements = 1; 5145 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5146 } else { 5147 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5148 rack->r_ctl.rc_app_limited_cnt, 5149 0, 0, 10, __LINE__, NULL, quality); 5150 } 5151 if (tcp_in_hpts(rack->rc_tp) && 5152 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5153 /* 5154 * Ok we can't trust the pacer in this case 5155 * where we transition from un-paced to paced. 5156 * Or for that matter when the burst mitigation 5157 * was making a wild guess and got it wrong. 5158 * Stop the pacer and clear up all the aggregate 5159 * delays etc. 5160 */ 5161 tcp_hpts_remove(rack->rc_tp); 5162 rack->r_ctl.rc_hpts_flags = 0; 5163 rack->r_ctl.rc_last_output_to = 0; 5164 } 5165 did_add = 2; 5166 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5167 /* Still a small number run an average */ 5168 rack->r_ctl.gp_bw += bytes_ps; 5169 addpart = rack->r_ctl.num_measurements; 5170 rack->r_ctl.num_measurements++; 5171 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5172 /* We have collected enough to move forward */ 5173 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5174 } 5175 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5176 did_add = 3; 5177 } else { 5178 /* 5179 * We want to take 1/wma of the goodput and add in to 7/8th 5180 * of the old value weighted by the srtt. So if your measurement 5181 * period is say 2 SRTT's long you would get 1/4 as the 5182 * value, if it was like 1/2 SRTT then you would get 1/16th. 5183 * 5184 * But we must be careful not to take too much i.e. if the 5185 * srtt is say 20ms and the measurement is taken over 5186 * 400ms our weight would be 400/20 i.e. 20. On the 5187 * other hand if we get a measurement over 1ms with a 5188 * 10ms rtt we only want to take a much smaller portion. 5189 */ 5190 uint8_t meth; 5191 5192 if (rack->r_ctl.num_measurements < 0xff) { 5193 rack->r_ctl.num_measurements++; 5194 } 5195 srtt = (uint64_t)tp->t_srtt; 5196 if (srtt == 0) { 5197 /* 5198 * Strange why did t_srtt go back to zero? 5199 */ 5200 if (rack->r_ctl.rc_rack_min_rtt) 5201 srtt = rack->r_ctl.rc_rack_min_rtt; 5202 else 5203 srtt = HPTS_USEC_IN_MSEC; 5204 } 5205 /* 5206 * XXXrrs: Note for reviewers, in playing with 5207 * dynamic pacing I discovered this GP calculation 5208 * as done originally leads to some undesired results. 5209 * Basically you can get longer measurements contributing 5210 * too much to the WMA. Thus I changed it if you are doing 5211 * dynamic adjustments to only do the aportioned adjustment 5212 * if we have a very small (time wise) measurement. Longer 5213 * measurements just get there weight (defaulting to 1/8) 5214 * add to the WMA. We may want to think about changing 5215 * this to always do that for both sides i.e. dynamic 5216 * and non-dynamic... but considering lots of folks 5217 * were playing with this I did not want to change the 5218 * calculation per.se. without your thoughts.. Lawerence? 5219 * Peter?? 5220 */ 5221 if (rack->rc_gp_dyn_mul == 0) { 5222 subpart = rack->r_ctl.gp_bw * utim; 5223 subpart /= (srtt * 8); 5224 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5225 /* 5226 * The b/w update takes no more 5227 * away then 1/2 our running total 5228 * so factor it in. 5229 */ 5230 addpart = bytes_ps * utim; 5231 addpart /= (srtt * 8); 5232 meth = 1; 5233 } else { 5234 /* 5235 * Don't allow a single measurement 5236 * to account for more than 1/2 of the 5237 * WMA. This could happen on a retransmission 5238 * where utim becomes huge compared to 5239 * srtt (multiple retransmissions when using 5240 * the sending rate which factors in all the 5241 * transmissions from the first one). 5242 */ 5243 subpart = rack->r_ctl.gp_bw / 2; 5244 addpart = bytes_ps / 2; 5245 meth = 2; 5246 } 5247 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5248 resid_bw = rack->r_ctl.gp_bw - subpart; 5249 rack->r_ctl.gp_bw = resid_bw + addpart; 5250 did_add = 1; 5251 } else { 5252 if ((utim / srtt) <= 1) { 5253 /* 5254 * The b/w update was over a small period 5255 * of time. The idea here is to prevent a small 5256 * measurement time period from counting 5257 * too much. So we scale it based on the 5258 * time so it attributes less than 1/rack_wma_divisor 5259 * of its measurement. 5260 */ 5261 subpart = rack->r_ctl.gp_bw * utim; 5262 subpart /= (srtt * rack_wma_divisor); 5263 addpart = bytes_ps * utim; 5264 addpart /= (srtt * rack_wma_divisor); 5265 meth = 3; 5266 } else { 5267 /* 5268 * The scaled measurement was long 5269 * enough so lets just add in the 5270 * portion of the measurement i.e. 1/rack_wma_divisor 5271 */ 5272 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5273 addpart = bytes_ps / rack_wma_divisor; 5274 meth = 4; 5275 } 5276 if ((rack->measure_saw_probe_rtt == 0) || 5277 (bytes_ps > rack->r_ctl.gp_bw)) { 5278 /* 5279 * For probe-rtt we only add it in 5280 * if its larger, all others we just 5281 * add in. 5282 */ 5283 did_add = 1; 5284 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5285 resid_bw = rack->r_ctl.gp_bw - subpart; 5286 rack->r_ctl.gp_bw = resid_bw + addpart; 5287 } 5288 } 5289 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5290 } 5291 /* 5292 * We only watch the growth of the GP during the initial startup 5293 * or first-slowstart that ensues. If we ever needed to watch 5294 * growth of gp outside of that period all we need to do is 5295 * remove the first clause of this if (rc_initial_ss_comp). 5296 */ 5297 if ((rack->rc_initial_ss_comp == 0) && 5298 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5299 uint64_t gp_est; 5300 5301 gp_est = bytes_ps; 5302 if (tcp_bblogging_on(rack->rc_tp)) { 5303 union tcp_log_stackspecific log; 5304 struct timeval tv; 5305 5306 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5307 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5308 log.u_bbr.flex1 = rack->r_ctl.current_round; 5309 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5310 log.u_bbr.delRate = gp_est; 5311 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5312 log.u_bbr.flex8 = 41; 5313 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5314 0, &log, false, NULL, __func__, __LINE__,&tv); 5315 } 5316 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5317 (rack->r_ctl.last_gpest == 0)) { 5318 /* 5319 * The round we get our measurement averaging going 5320 * is the base round so it always is the source point 5321 * for when we had our first increment. From there on 5322 * we only record the round that had a rise. 5323 */ 5324 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5325 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5326 } else if (gp_est >= rack->r_ctl.last_gpest) { 5327 /* 5328 * Test to see if its gone up enough 5329 * to set the round count up to now. Note 5330 * that on the seeding of the 4th measurement we 5331 */ 5332 gp_est *= 1000; 5333 gp_est /= rack->r_ctl.last_gpest; 5334 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5335 /* 5336 * We went up enough to record the round. 5337 */ 5338 if (tcp_bblogging_on(rack->rc_tp)) { 5339 union tcp_log_stackspecific log; 5340 struct timeval tv; 5341 5342 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5343 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5344 log.u_bbr.flex1 = rack->r_ctl.current_round; 5345 log.u_bbr.flex2 = (uint32_t)gp_est; 5346 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5347 log.u_bbr.delRate = gp_est; 5348 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5349 log.u_bbr.flex8 = 42; 5350 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5351 0, &log, false, NULL, __func__, __LINE__,&tv); 5352 } 5353 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5354 if (rack->r_ctl.use_gp_not_last == 1) 5355 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5356 else 5357 rack->r_ctl.last_gpest = bytes_ps; 5358 } 5359 } 5360 } 5361 if ((rack->gp_ready == 0) && 5362 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5363 /* We have enough measurements now */ 5364 rack->gp_ready = 1; 5365 if (rack->dgp_on || 5366 rack->rack_hibeta) 5367 rack_set_cc_pacing(rack); 5368 if (rack->defer_options) 5369 rack_apply_deferred_options(rack); 5370 } 5371 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5372 rack_get_bw(rack), 22, did_add, NULL, quality); 5373 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5374 5375 if ((rack->measure_saw_probe_rtt == 0) && 5376 rack->rc_gp_rtt_set) { 5377 if (rack->rc_skip_timely == 0) { 5378 rack_update_multiplier(rack, timely_says, bytes_ps, 5379 rack->r_ctl.rc_gp_srtt, 5380 rack->r_ctl.rc_rtt_diff); 5381 } 5382 } 5383 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5384 rack_get_bw(rack), 3, line, NULL, quality); 5385 rack_log_pacing_delay_calc(rack, 5386 bytes, /* flex2 */ 5387 tim, /* flex1 */ 5388 bytes_ps, /* bw_inuse */ 5389 rack->r_ctl.gp_bw, /* delRate */ 5390 rack_get_lt_bw(rack), /* rttProp */ 5391 20, line, NULL, 0); 5392 /* reset the gp srtt and setup the new prev */ 5393 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5394 /* Record the lost count for the next measurement */ 5395 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5396 skip_measurement: 5397 /* 5398 * We restart our diffs based on the gpsrtt in the 5399 * measurement window. 5400 */ 5401 rack->rc_gp_rtt_set = 0; 5402 rack->rc_gp_saw_rec = 0; 5403 rack->rc_gp_saw_ca = 0; 5404 rack->rc_gp_saw_ss = 0; 5405 rack->rc_dragged_bottom = 0; 5406 if (quality == RACK_QUALITY_HIGH) { 5407 /* 5408 * Gput in the stats world is in kbps where bytes_ps is 5409 * bytes per second so we do ((x * 8)/ 1000). 5410 */ 5411 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5412 #ifdef STATS 5413 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5414 gput); 5415 /* 5416 * XXXLAS: This is a temporary hack, and should be 5417 * chained off VOI_TCP_GPUT when stats(9) grows an 5418 * API to deal with chained VOIs. 5419 */ 5420 if (tp->t_stats_gput_prev > 0) 5421 stats_voi_update_abs_s32(tp->t_stats, 5422 VOI_TCP_GPUT_ND, 5423 ((gput - tp->t_stats_gput_prev) * 100) / 5424 tp->t_stats_gput_prev); 5425 #endif 5426 tp->t_stats_gput_prev = gput; 5427 } 5428 tp->t_flags &= ~TF_GPUTINPROG; 5429 /* 5430 * Now are we app limited now and there is space from where we 5431 * were to where we want to go? 5432 * 5433 * We don't do the other case i.e. non-applimited here since 5434 * the next send will trigger us picking up the missing data. 5435 */ 5436 if (rack->r_ctl.rc_first_appl && 5437 TCPS_HAVEESTABLISHED(tp->t_state) && 5438 rack->r_ctl.rc_app_limited_cnt && 5439 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5440 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5441 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5442 /* 5443 * Yep there is enough outstanding to make a measurement here. 5444 */ 5445 struct rack_sendmap *rsm; 5446 5447 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5448 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5449 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5450 rack->app_limited_needs_set = 0; 5451 tp->gput_seq = th_ack; 5452 if (rack->in_probe_rtt) 5453 rack->measure_saw_probe_rtt = 1; 5454 else if ((rack->measure_saw_probe_rtt) && 5455 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5456 rack->measure_saw_probe_rtt = 0; 5457 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5458 /* There is a full window to gain info from */ 5459 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5460 } else { 5461 /* We can only measure up to the applimited point */ 5462 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5463 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5464 /* 5465 * We don't have enough to make a measurement. 5466 */ 5467 tp->t_flags &= ~TF_GPUTINPROG; 5468 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5469 0, 0, 0, 6, __LINE__, NULL, quality); 5470 return; 5471 } 5472 } 5473 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5474 /* 5475 * We will get no more data into the SB 5476 * this means we need to have the data available 5477 * before we start a measurement. 5478 */ 5479 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5480 /* Nope not enough data. */ 5481 return; 5482 } 5483 } 5484 tp->t_flags |= TF_GPUTINPROG; 5485 /* 5486 * Now we need to find the timestamp of the send at tp->gput_seq 5487 * for the send based measurement. 5488 */ 5489 rack->r_ctl.rc_gp_cumack_ts = 0; 5490 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5491 if (rsm) { 5492 /* Ok send-based limit is set */ 5493 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5494 /* 5495 * Move back to include the earlier part 5496 * so our ack time lines up right (this may 5497 * make an overlapping measurement but thats 5498 * ok). 5499 */ 5500 tp->gput_seq = rsm->r_start; 5501 } 5502 if (rsm->r_flags & RACK_ACKED) { 5503 struct rack_sendmap *nrsm; 5504 5505 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5506 tp->gput_seq = rsm->r_end; 5507 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5508 if (nrsm) 5509 rsm = nrsm; 5510 else { 5511 rack->app_limited_needs_set = 1; 5512 } 5513 } else 5514 rack->app_limited_needs_set = 1; 5515 /* We always go from the first send */ 5516 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5517 } else { 5518 /* 5519 * If we don't find the rsm due to some 5520 * send-limit set the current time, which 5521 * basically disables the send-limit. 5522 */ 5523 struct timeval tv; 5524 5525 microuptime(&tv); 5526 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5527 } 5528 rack_tend_gp_marks(tp, rack); 5529 rack_log_pacing_delay_calc(rack, 5530 tp->gput_seq, 5531 tp->gput_ack, 5532 (uintptr_t)rsm, 5533 tp->gput_ts, 5534 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5535 9, 5536 __LINE__, rsm, quality); 5537 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5538 } else { 5539 /* 5540 * To make sure proper timestamp merging occurs, we need to clear 5541 * all GP marks if we don't start a measurement. 5542 */ 5543 rack_clear_gp_marks(tp, rack); 5544 } 5545 } 5546 5547 /* 5548 * CC wrapper hook functions 5549 */ 5550 static void 5551 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5552 uint16_t type, int32_t post_recovery) 5553 { 5554 uint32_t prior_cwnd, acked; 5555 struct tcp_log_buffer *lgb = NULL; 5556 uint8_t labc_to_use, quality; 5557 5558 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5559 tp->t_ccv.nsegs = nsegs; 5560 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5561 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5562 uint32_t max; 5563 5564 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5565 if (tp->t_ccv.bytes_this_ack > max) { 5566 tp->t_ccv.bytes_this_ack = max; 5567 } 5568 } 5569 #ifdef STATS 5570 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5571 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5572 #endif 5573 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5574 /* 5575 * We will ack all the data, time to end any 5576 * lt_bw_up we have running until something 5577 * new is sent. Note we need to use the actual 5578 * ack_rcv_time which with pacing may be different. 5579 */ 5580 uint64_t tmark; 5581 5582 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5583 rack->r_ctl.lt_seq = tp->snd_max; 5584 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 5585 if (tmark >= rack->r_ctl.lt_timemark) { 5586 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5587 } 5588 rack->r_ctl.lt_timemark = tmark; 5589 rack->lt_bw_up = 0; 5590 } 5591 quality = RACK_QUALITY_NONE; 5592 if ((tp->t_flags & TF_GPUTINPROG) && 5593 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5594 /* Measure the Goodput */ 5595 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5596 } 5597 /* Which way our we limited, if not cwnd limited no advance in CA */ 5598 if (tp->snd_cwnd <= tp->snd_wnd) 5599 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5600 else 5601 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5602 if (tp->snd_cwnd > tp->snd_ssthresh) { 5603 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5604 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5605 /* For the setting of a window past use the actual scwnd we are using */ 5606 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5607 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5608 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5609 } 5610 } else { 5611 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5612 tp->t_bytes_acked = 0; 5613 } 5614 prior_cwnd = tp->snd_cwnd; 5615 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5616 (rack_client_low_buf && rack->client_bufferlvl && 5617 (rack->client_bufferlvl < rack_client_low_buf))) 5618 labc_to_use = rack->rc_labc; 5619 else 5620 labc_to_use = rack_max_abc_post_recovery; 5621 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5622 union tcp_log_stackspecific log; 5623 struct timeval tv; 5624 5625 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5626 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5627 log.u_bbr.flex1 = th_ack; 5628 log.u_bbr.flex2 = tp->t_ccv.flags; 5629 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5630 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5631 log.u_bbr.flex5 = labc_to_use; 5632 log.u_bbr.flex6 = prior_cwnd; 5633 log.u_bbr.flex7 = V_tcp_do_newsack; 5634 log.u_bbr.flex8 = 1; 5635 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5636 0, &log, false, NULL, __func__, __LINE__,&tv); 5637 } 5638 if (CC_ALGO(tp)->ack_received != NULL) { 5639 /* XXXLAS: Find a way to live without this */ 5640 tp->t_ccv.curack = th_ack; 5641 tp->t_ccv.labc = labc_to_use; 5642 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5643 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5644 } 5645 if (lgb) { 5646 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5647 } 5648 if (rack->r_must_retran) { 5649 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5650 /* 5651 * We now are beyond the rxt point so lets disable 5652 * the flag. 5653 */ 5654 rack->r_ctl.rc_out_at_rto = 0; 5655 rack->r_must_retran = 0; 5656 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5657 /* 5658 * Only decrement the rc_out_at_rto if the cwnd advances 5659 * at least a whole segment. Otherwise next time the peer 5660 * acks, we won't be able to send this generaly happens 5661 * when we are in Congestion Avoidance. 5662 */ 5663 if (acked <= rack->r_ctl.rc_out_at_rto){ 5664 rack->r_ctl.rc_out_at_rto -= acked; 5665 } else { 5666 rack->r_ctl.rc_out_at_rto = 0; 5667 } 5668 } 5669 } 5670 #ifdef STATS 5671 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5672 #endif 5673 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5674 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5675 } 5676 if ((rack->rc_initial_ss_comp == 0) && 5677 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5678 /* 5679 * The cwnd has grown beyond ssthresh we have 5680 * entered ca and completed our first Slowstart. 5681 */ 5682 rack->rc_initial_ss_comp = 1; 5683 } 5684 } 5685 5686 static void 5687 tcp_rack_partialack(struct tcpcb *tp) 5688 { 5689 struct tcp_rack *rack; 5690 5691 rack = (struct tcp_rack *)tp->t_fb_ptr; 5692 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5693 /* 5694 * If we are doing PRR and have enough 5695 * room to send <or> we are pacing and prr 5696 * is disabled we will want to see if we 5697 * can send data (by setting r_wanted_output to 5698 * true). 5699 */ 5700 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5701 rack->rack_no_prr) 5702 rack->r_wanted_output = 1; 5703 } 5704 5705 static inline uint64_t 5706 rack_get_rxt_per(uint64_t snds, uint64_t rxts) 5707 { 5708 uint64_t rxt_per; 5709 5710 if (snds > 0) { 5711 rxt_per = rxts * 1000; 5712 rxt_per /= snds; 5713 } else { 5714 /* This is an unlikely path */ 5715 if (rxts) { 5716 /* Its the max it was all re-transmits */ 5717 rxt_per = 0xffffffffffffffff; 5718 } else { 5719 rxt_per = 0; 5720 } 5721 } 5722 return (rxt_per); 5723 } 5724 5725 static void 5726 policer_detection_log(struct tcp_rack *rack, uint32_t flex1, uint32_t flex2, uint32_t flex3, uint32_t flex4, uint8_t flex8) 5727 { 5728 if (tcp_bblogging_on(rack->rc_tp)) { 5729 union tcp_log_stackspecific log; 5730 struct timeval tv; 5731 5732 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5733 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5734 log.u_bbr.flex1 = flex1; 5735 log.u_bbr.flex2 = flex2; 5736 log.u_bbr.flex3 = flex3; 5737 log.u_bbr.flex4 = flex4; 5738 log.u_bbr.flex5 = rack->r_ctl.current_policer_bucket; 5739 log.u_bbr.flex6 = rack->r_ctl.policer_bucket_size; 5740 log.u_bbr.flex7 = 0; 5741 log.u_bbr.flex8 = flex8; 5742 log.u_bbr.bw_inuse = rack->r_ctl.policer_bw; 5743 log.u_bbr.applimited = rack->r_ctl.current_round; 5744 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 5745 log.u_bbr.delivered = (uint32_t)rack->r_ctl.bytes_acked_in_recovery; 5746 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 5747 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 5748 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 5749 log.u_bbr.bbr_state = rack->rc_policer_detected; 5750 log.u_bbr.bbr_substate = 0; 5751 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5752 log.u_bbr.use_lt_bw = rack->policer_detect_on; 5753 log.u_bbr.lt_epoch = 0; 5754 log.u_bbr.pkts_out = 0; 5755 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 5756 0, &log, false, NULL, NULL, 0, &tv); 5757 } 5758 5759 } 5760 5761 static void 5762 policer_detection(struct tcpcb *tp, struct tcp_rack *rack, int post_recovery) 5763 { 5764 /* 5765 * Rack excess rxt accounting is turned on. If we 5766 * are above a threshold of rxt's in at least N 5767 * rounds, then back off the cwnd and ssthresh 5768 * to fit into the long-term b/w. 5769 */ 5770 5771 uint32_t pkts, mid, med, alt_med, avg, segsiz, tot_retran_pkt_count = 0; 5772 uint32_t cnt_of_mape_rxt = 0; 5773 uint64_t snds, rxts, rxt_per, tim, del, del_bw; 5774 int i; 5775 struct timeval tv; 5776 5777 5778 /* 5779 * First is there enough packets delivered during recovery to make 5780 * a determiniation of b/w? 5781 */ 5782 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5783 if ((rack->rc_policer_detected == 0) && 5784 (rack->r_ctl.policer_del_mss > 0) && 5785 ((uint32_t)rack->r_ctl.policer_del_mss > ((rack->r_ctl.bytes_acked_in_recovery + segsiz - 1)/segsiz))) { 5786 /* 5787 * Not enough data sent in recovery for initial detection. Once 5788 * we have deteced a policer we allow less than the threshold (polcer_del_mss) 5789 * amount of data in a recovery to let us fall through and double check 5790 * our policer settings and possibly expand or collapse the bucket size and 5791 * the polcier b/w. 5792 * 5793 * Once you are declared to be policed. this block of code cannot be 5794 * reached, instead blocks further down will re-check the policer detection 5795 * triggers and possibly reset the measurements if somehow we have let the 5796 * policer bucket size grow too large. 5797 */ 5798 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5799 policer_detection_log(rack, rack->r_ctl.policer_del_mss, 5800 ((rack->r_ctl.bytes_acked_in_recovery + segsiz - 1)/segsiz), 5801 rack->r_ctl.bytes_acked_in_recovery, segsiz, 18); 5802 } 5803 return; 5804 } 5805 tcp_get_usecs(&tv); 5806 tim = tcp_tv_to_lusectick(&tv) - rack->r_ctl.time_entered_recovery; 5807 del = rack->r_ctl.bytes_acked_in_recovery; 5808 if (tim > 0) 5809 del_bw = (del * (uint64_t)1000000) / tim; 5810 else 5811 del_bw = 0; 5812 /* B/W compensation? */ 5813 5814 if (rack->r_ctl.pol_bw_comp && ((rack->r_ctl.policer_bw > 0) || 5815 (del_bw > 0))) { 5816 /* 5817 * Sanity check now that the data is in. How long does it 5818 * take for us to pace out two of our policer_max_seg's? 5819 * 5820 * If it is longer than the RTT then we are set 5821 * too slow, maybe because of not enough data 5822 * sent during recovery. 5823 */ 5824 uint64_t lentime, res, srtt, max_delbw, alt_bw; 5825 5826 srtt = (uint64_t)rack_grab_rtt(tp, rack); 5827 if ((tp->t_srtt > 0) && (srtt > tp->t_srtt)) 5828 srtt = tp->t_srtt; 5829 lentime = rack->r_ctl.policer_max_seg * (uint64_t)HPTS_USEC_IN_SEC * 2; 5830 if (del_bw > rack->r_ctl.policer_bw) { 5831 max_delbw = del_bw; 5832 } else { 5833 max_delbw = rack->r_ctl.policer_bw; 5834 } 5835 res = lentime / max_delbw; 5836 if ((srtt > 0) && (res > srtt)) { 5837 /* 5838 * At this rate we can not get two policer_maxsegs 5839 * out before the ack arrives back. 5840 * 5841 * Lets at least get it raised up so that 5842 * we can be a bit faster than that if possible. 5843 */ 5844 lentime = (rack->r_ctl.policer_max_seg * 2); 5845 tim = srtt; 5846 alt_bw = (lentime * (uint64_t)HPTS_USEC_IN_SEC) / tim; 5847 if (alt_bw > max_delbw) { 5848 uint64_t cap_alt_bw; 5849 5850 cap_alt_bw = (max_delbw + (max_delbw * rack->r_ctl.pol_bw_comp)); 5851 if ((rack_pol_min_bw > 0) && (cap_alt_bw < rack_pol_min_bw)) { 5852 /* We place a min on the cap which defaults to 1Mbps */ 5853 cap_alt_bw = rack_pol_min_bw; 5854 } 5855 if (alt_bw <= cap_alt_bw) { 5856 /* It should be */ 5857 del_bw = alt_bw; 5858 policer_detection_log(rack, 5859 (uint32_t)tim, 5860 rack->r_ctl.policer_max_seg, 5861 0, 5862 0, 5863 16); 5864 } else { 5865 /* 5866 * This is an odd case where likely the RTT is very very 5867 * low. And yet it is still being policed. We don't want 5868 * to get more than (rack_policing_do_bw_comp+1) x del-rate 5869 * where del-rate is what we got in recovery for either the 5870 * first Policer Detection(PD) or this PD we are on now. 5871 */ 5872 del_bw = cap_alt_bw; 5873 policer_detection_log(rack, 5874 (uint32_t)tim, 5875 rack->r_ctl.policer_max_seg, 5876 (uint32_t)max_delbw, 5877 (rack->r_ctl.pol_bw_comp + 1), 5878 16); 5879 } 5880 } 5881 } 5882 } 5883 snds = tp->t_sndbytes - rack->r_ctl.last_policer_sndbytes; 5884 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_policer_snd_rxt_bytes; 5885 rxt_per = rack_get_rxt_per(snds, rxts); 5886 /* Figure up the average and median */ 5887 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5888 if (rack->r_ctl.rc_cnt_of_retran[i] > 0) { 5889 tot_retran_pkt_count += (i + 1) * rack->r_ctl.rc_cnt_of_retran[i]; 5890 cnt_of_mape_rxt += rack->r_ctl.rc_cnt_of_retran[i]; 5891 } 5892 } 5893 if (cnt_of_mape_rxt) 5894 avg = (tot_retran_pkt_count * 10)/cnt_of_mape_rxt; 5895 else 5896 avg = 0; 5897 alt_med = med = 0; 5898 mid = tot_retran_pkt_count/2; 5899 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5900 pkts = (i + 1) * rack->r_ctl.rc_cnt_of_retran[i]; 5901 if (mid > pkts) { 5902 mid -= pkts; 5903 continue; 5904 } 5905 med = (i + 1); 5906 break; 5907 } 5908 mid = cnt_of_mape_rxt / 2; 5909 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5910 if (mid > rack->r_ctl.rc_cnt_of_retran[i]) { 5911 mid -= rack->r_ctl.rc_cnt_of_retran[i]; 5912 continue; 5913 } 5914 alt_med = (i + 1); 5915 break; 5916 } 5917 if (rack->r_ctl.policer_alt_median) { 5918 /* Swap the medians */ 5919 uint32_t swap; 5920 5921 swap = med; 5922 med = alt_med; 5923 alt_med = swap; 5924 } 5925 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5926 union tcp_log_stackspecific log; 5927 struct timeval tv; 5928 5929 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5930 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5931 log.u_bbr.flex1 = avg; 5932 log.u_bbr.flex2 = med; 5933 log.u_bbr.flex3 = (uint32_t)rxt_per; 5934 log.u_bbr.flex4 = rack->r_ctl.policer_avg_threshold; 5935 log.u_bbr.flex5 = rack->r_ctl.policer_med_threshold; 5936 log.u_bbr.flex6 = rack->r_ctl.policer_rxt_threshold; 5937 log.u_bbr.flex7 = rack->r_ctl.policer_alt_median; 5938 log.u_bbr.flex8 = 1; 5939 log.u_bbr.delivered = rack->r_ctl.policer_bucket_size; 5940 log.u_bbr.applimited = rack->r_ctl.current_round; 5941 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 5942 log.u_bbr.bw_inuse = del_bw; 5943 log.u_bbr.cur_del_rate = rxts; 5944 log.u_bbr.delRate = snds; 5945 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 5946 log.u_bbr.bbr_state = rack->rc_policer_detected; 5947 log.u_bbr.bbr_substate = 0; 5948 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5949 log.u_bbr.use_lt_bw = rack->policer_detect_on; 5950 log.u_bbr.lt_epoch = (uint32_t)tim; 5951 log.u_bbr.pkts_out = rack->r_ctl.bytes_acked_in_recovery; 5952 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 5953 0, &log, false, NULL, NULL, 0, &tv); 5954 } 5955 if (med == RETRAN_CNT_SIZE) { 5956 /* 5957 * If the median is the maximum, then what we 5958 * likely have here is a network breakage. Either that 5959 * or we are so unlucky that all of our traffic is being 5960 * dropped and having to be retransmitted the maximum times 5961 * and this just is not how a policer works. 5962 * 5963 * If it is truely a policer eventually we will come 5964 * through and it won't be the maximum. 5965 */ 5966 return; 5967 } 5968 /* Has enough rounds progressed for us to re-measure? */ 5969 if ((rxt_per >= (uint64_t)rack->r_ctl.policer_rxt_threshold) && 5970 (avg >= rack->r_ctl.policer_avg_threshold) && 5971 (med >= rack->r_ctl.policer_med_threshold)) { 5972 /* 5973 * We hit all thresholds that indicate we are 5974 * being policed. Now we may be doing this from a rack timeout 5975 * which then means the rest of recovery will hopefully go 5976 * smoother as we pace. At the end of recovery we will 5977 * fall back in here and reset the values using the 5978 * results of the entire recovery episode (we could also 5979 * hit this as we exit recovery as well which means only 5980 * one time in here). 5981 * 5982 * This is done explicitly that if we hit the thresholds 5983 * again in a second recovery we overwrite the values. We do 5984 * that because over time, as we pace the policer_bucket_size may 5985 * continue to grow. This then provides more and more times when 5986 * we are not pacing to the policer rate. This lets us compensate 5987 * for when we hit a false positive and those flows continue to 5988 * increase. However if its a real policer we will then get over its 5989 * limit, over time, again and thus end up back here hitting the 5990 * thresholds again. 5991 * 5992 * The alternative to this is to instead whenever we pace due to 5993 * policing in rack_policed_sending we could add the amount len paced to the 5994 * idle_snd_una value (which decreases the amount in last_amount_before_rec 5995 * since that is always [th_ack - idle_snd_una]). This would then prevent 5996 * the polcier_bucket_size from growing in additional recovery episodes 5997 * Which would then mean false postives would be pretty much stuck 5998 * after things got back to normal (assuming that what caused the 5999 * false positive was a small network outage). 6000 * 6001 */ 6002 tcp_trace_point(rack->rc_tp, TCP_TP_POLICER_DET); 6003 if (rack->rc_policer_detected == 0) { 6004 /* 6005 * Increment the stat that tells us we identified 6006 * a policer only once. Note that if we ever allow 6007 * the flag to be cleared (reverted) then we need 6008 * to adjust this to not do multi-counting. 6009 */ 6010 counter_u64_add(tcp_policer_detected, 1); 6011 } 6012 rack->r_ctl.last_policer_sndbytes = tp->t_sndbytes; 6013 rack->r_ctl.last_policer_snd_rxt_bytes = tp->t_snd_rxt_bytes; 6014 rack->r_ctl.policer_bw = del_bw; 6015 rack->r_ctl.policer_max_seg = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, 6016 rack->r_ctl.policer_bw, 6017 min(ctf_fixed_maxseg(rack->rc_tp), 6018 rack->r_ctl.rc_pace_min_segs), 6019 0, NULL, 6020 NULL, rack->r_ctl.pace_len_divisor); 6021 /* Now what about the policer bucket size */ 6022 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6023 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.policer_max_seg) { 6024 /* We must be able to send our max-seg or else chaos ensues */ 6025 rack->r_ctl.policer_bucket_size = rack->r_ctl.policer_max_seg * 2; 6026 } 6027 if (rack->rc_policer_detected == 0) 6028 rack->r_ctl.current_policer_bucket = 0; 6029 if (tcp_bblogging_on(rack->rc_tp)) { 6030 union tcp_log_stackspecific log; 6031 struct timeval tv; 6032 6033 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6034 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6035 log.u_bbr.flex1 = avg; 6036 log.u_bbr.flex2 = med; 6037 log.u_bbr.flex3 = rxt_per; 6038 log.u_bbr.flex4 = rack->r_ctl.policer_avg_threshold; 6039 log.u_bbr.flex5 = rack->r_ctl.policer_med_threshold; 6040 log.u_bbr.flex6 = rack->r_ctl.policer_rxt_threshold; 6041 log.u_bbr.flex7 = rack->r_ctl.policer_alt_median; 6042 log.u_bbr.flex8 = 2; 6043 log.u_bbr.applimited = rack->r_ctl.current_round; 6044 log.u_bbr.bw_inuse = del_bw; 6045 log.u_bbr.delivered = rack->r_ctl.policer_bucket_size; 6046 log.u_bbr.cur_del_rate = rxts; 6047 log.u_bbr.delRate = snds; 6048 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 6049 log.u_bbr.bbr_state = rack->rc_policer_detected; 6050 log.u_bbr.bbr_substate = 0; 6051 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 6052 log.u_bbr.use_lt_bw = rack->policer_detect_on; 6053 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 6054 log.u_bbr.lt_epoch = (uint32_t)tim; 6055 log.u_bbr.pkts_out = rack->r_ctl.bytes_acked_in_recovery; 6056 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 6057 0, &log, false, NULL, NULL, 0, &tv); 6058 /* 6059 * Put out an added log, 19, for the sole purpose 6060 * of getting the txt/rxt so that we can benchmark 6061 * in read-bbrlog the ongoing rxt rate after our 6062 * policer invocation in the HYSTART announcments. 6063 */ 6064 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6065 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 6066 log.u_bbr.flex1 = alt_med; 6067 log.u_bbr.flex8 = 19; 6068 log.u_bbr.cur_del_rate = tp->t_sndbytes; 6069 log.u_bbr.delRate = tp->t_snd_rxt_bytes; 6070 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 6071 0, &log, false, NULL, NULL, 0, &tv); 6072 } 6073 /* Turn off any fast output, thats ended */ 6074 rack->r_fast_output = 0; 6075 /* Mark the time for credits */ 6076 rack->r_ctl.last_sendtime = tcp_get_u64_usecs(NULL); 6077 if (rack->r_rr_config < 2) { 6078 /* 6079 * We need to be stricter on the RR config so 6080 * the pacing has priority. 6081 */ 6082 rack->r_rr_config = 2; 6083 } 6084 policer_detection_log(rack, 6085 rack->r_ctl.idle_snd_una, 6086 rack->r_ctl.ack_for_idle, 6087 0, 6088 (uint32_t)tim, 6089 14); 6090 rack->rc_policer_detected = 1; 6091 } else if ((rack->rc_policer_detected == 1) && 6092 (post_recovery == 1)) { 6093 /* 6094 * If we are exiting recovery and have already detected 6095 * we need to possibly update the values. 6096 * 6097 * First: Update the idle -> recovery sent value. 6098 */ 6099 uint32_t srtt; 6100 6101 if (rack->r_ctl.last_amount_before_rec > rack->r_ctl.policer_bucket_size) { 6102 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6103 } 6104 srtt = (uint64_t)rack_grab_rtt(tp, rack); 6105 if ((tp->t_srtt > 0) && (srtt > tp->t_srtt)) 6106 srtt = tp->t_srtt; 6107 if ((srtt != 0) && 6108 (tim < (uint64_t)srtt)) { 6109 /* 6110 * Not long enough. 6111 */ 6112 if (rack_verbose_logging) 6113 policer_detection_log(rack, 6114 (uint32_t)tim, 6115 0, 6116 0, 6117 0, 6118 15); 6119 return; 6120 } 6121 /* 6122 * Finally update the b/w if its grown. 6123 */ 6124 if (del_bw > rack->r_ctl.policer_bw) { 6125 rack->r_ctl.policer_bw = del_bw; 6126 rack->r_ctl.policer_max_seg = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, 6127 rack->r_ctl.policer_bw, 6128 min(ctf_fixed_maxseg(rack->rc_tp), 6129 rack->r_ctl.rc_pace_min_segs), 6130 0, NULL, 6131 NULL, rack->r_ctl.pace_len_divisor); 6132 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.policer_max_seg) { 6133 /* We must be able to send our max-seg or else chaos ensues */ 6134 rack->r_ctl.policer_bucket_size = rack->r_ctl.policer_max_seg * 2; 6135 } 6136 } 6137 policer_detection_log(rack, 6138 rack->r_ctl.idle_snd_una, 6139 rack->r_ctl.ack_for_idle, 6140 0, 6141 (uint32_t)tim, 6142 3); 6143 } 6144 } 6145 6146 static void 6147 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 6148 { 6149 /* now check with the policer if on */ 6150 if (rack->policer_detect_on == 1) { 6151 policer_detection(tp, rack, 1); 6152 } 6153 /* 6154 * Now exit recovery, note we must do the idle set after the policer_detection 6155 * to get the amount acked prior to recovery correct. 6156 */ 6157 rack->r_ctl.idle_snd_una = tp->snd_una; 6158 EXIT_RECOVERY(tp->t_flags); 6159 } 6160 6161 static void 6162 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 6163 { 6164 struct tcp_rack *rack; 6165 uint32_t orig_cwnd; 6166 6167 orig_cwnd = tp->snd_cwnd; 6168 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6169 rack = (struct tcp_rack *)tp->t_fb_ptr; 6170 /* only alert CC if we alerted when we entered */ 6171 if (CC_ALGO(tp)->post_recovery != NULL) { 6172 tp->t_ccv.curack = th_ack; 6173 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 6174 if (tp->snd_cwnd < tp->snd_ssthresh) { 6175 /* 6176 * Rack has burst control and pacing 6177 * so lets not set this any lower than 6178 * snd_ssthresh per RFC-6582 (option 2). 6179 */ 6180 tp->snd_cwnd = tp->snd_ssthresh; 6181 } 6182 } 6183 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6184 union tcp_log_stackspecific log; 6185 struct timeval tv; 6186 6187 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6188 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6189 log.u_bbr.flex1 = th_ack; 6190 log.u_bbr.flex2 = tp->t_ccv.flags; 6191 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 6192 log.u_bbr.flex4 = tp->t_ccv.nsegs; 6193 log.u_bbr.flex5 = V_tcp_abc_l_var; 6194 log.u_bbr.flex6 = orig_cwnd; 6195 log.u_bbr.flex7 = V_tcp_do_newsack; 6196 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 6197 log.u_bbr.flex8 = 2; 6198 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 6199 0, &log, false, NULL, __func__, __LINE__, &tv); 6200 } 6201 if ((rack->rack_no_prr == 0) && 6202 (rack->no_prr_addback == 0) && 6203 (rack->r_ctl.rc_prr_sndcnt > 0)) { 6204 /* 6205 * Suck the next prr cnt back into cwnd, but 6206 * only do that if we are not application limited. 6207 */ 6208 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 6209 /* 6210 * We are allowed to add back to the cwnd the amount we did 6211 * not get out if: 6212 * a) no_prr_addback is off. 6213 * b) we are not app limited 6214 * c) we are doing prr 6215 * <and> 6216 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 6217 */ 6218 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 6219 rack->r_ctl.rc_prr_sndcnt); 6220 } 6221 rack->r_ctl.rc_prr_sndcnt = 0; 6222 rack_log_to_prr(rack, 1, 0, __LINE__); 6223 } 6224 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 6225 tp->snd_recover = tp->snd_una; 6226 if (rack->r_ctl.dsack_persist) { 6227 rack->r_ctl.dsack_persist--; 6228 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6229 rack->r_ctl.num_dsack = 0; 6230 } 6231 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6232 } 6233 if (rack->rto_from_rec == 1) { 6234 rack->rto_from_rec = 0; 6235 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 6236 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 6237 } 6238 rack_exit_recovery(tp, rack, 1); 6239 } 6240 6241 static void 6242 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 6243 { 6244 struct tcp_rack *rack; 6245 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 6246 6247 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6248 #ifdef STATS 6249 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 6250 #endif 6251 if (IN_RECOVERY(tp->t_flags) == 0) { 6252 in_rec_at_entry = 0; 6253 ssthresh_enter = tp->snd_ssthresh; 6254 cwnd_enter = tp->snd_cwnd; 6255 } else 6256 in_rec_at_entry = 1; 6257 rack = (struct tcp_rack *)tp->t_fb_ptr; 6258 switch (type) { 6259 case CC_NDUPACK: 6260 tp->t_flags &= ~TF_WASFRECOVERY; 6261 tp->t_flags &= ~TF_WASCRECOVERY; 6262 if (!IN_FASTRECOVERY(tp->t_flags)) { 6263 struct rack_sendmap *rsm; 6264 struct timeval tv; 6265 uint32_t segsiz; 6266 6267 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 6268 if (rack->rc_initial_ss_comp == 0) { 6269 /* Yep it is the end of the initial slowstart */ 6270 rack->rc_initial_ss_comp = 1; 6271 } 6272 microuptime(&tv); 6273 rack->r_ctl.time_entered_recovery = tcp_tv_to_lusectick(&tv); 6274 if (SEQ_GEQ(ack, tp->snd_una)) { 6275 /* 6276 * The ack is above snd_una. Lets see 6277 * if we can establish a postive distance from 6278 * our idle mark. 6279 */ 6280 rack->r_ctl.ack_for_idle = ack; 6281 if (SEQ_GT(ack, rack->r_ctl.idle_snd_una)) { 6282 rack->r_ctl.last_amount_before_rec = ack - rack->r_ctl.idle_snd_una; 6283 } else { 6284 /* No data thru yet */ 6285 rack->r_ctl.last_amount_before_rec = 0; 6286 } 6287 } else if (SEQ_GT(tp->snd_una, rack->r_ctl.idle_snd_una)) { 6288 /* 6289 * The ack is out of order and behind the snd_una. It may 6290 * have contained SACK information which we processed else 6291 * we would have rejected it. 6292 */ 6293 rack->r_ctl.ack_for_idle = tp->snd_una; 6294 rack->r_ctl.last_amount_before_rec = tp->snd_una - rack->r_ctl.idle_snd_una; 6295 } else { 6296 rack->r_ctl.ack_for_idle = ack; 6297 rack->r_ctl.last_amount_before_rec = 0; 6298 } 6299 if (rack->rc_policer_detected) { 6300 /* 6301 * If we are being policed and we have a loss, it 6302 * means our bucket is now empty. This can happen 6303 * where some other flow on the same host sends 6304 * that this connection is not aware of. 6305 */ 6306 rack->r_ctl.current_policer_bucket = 0; 6307 if (rack_verbose_logging) 6308 policer_detection_log(rack, rack->r_ctl.last_amount_before_rec, 0, 0, 0, 4); 6309 if (rack->r_ctl.last_amount_before_rec > rack->r_ctl.policer_bucket_size) { 6310 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6311 } 6312 } 6313 memset(rack->r_ctl.rc_cnt_of_retran, 0, sizeof(rack->r_ctl.rc_cnt_of_retran)); 6314 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6315 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6316 /* 6317 * Go through the outstanding and re-peg 6318 * any that should have been left in the 6319 * retransmit list (on a double recovery). 6320 */ 6321 if (rsm->r_act_rxt_cnt > 0) { 6322 rack_peg_rxt(rack, rsm, segsiz); 6323 } 6324 } 6325 rack->r_ctl.bytes_acked_in_recovery = 0; 6326 rack->r_ctl.rc_prr_delivered = 0; 6327 rack->r_ctl.rc_prr_out = 0; 6328 rack->r_fast_output = 0; 6329 if (rack->rack_no_prr == 0) { 6330 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 6331 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 6332 } 6333 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 6334 tp->snd_recover = tp->snd_max; 6335 if (tp->t_flags2 & TF2_ECN_PERMIT) 6336 tp->t_flags2 |= TF2_ECN_SND_CWR; 6337 } 6338 break; 6339 case CC_ECN: 6340 if (!IN_CONGRECOVERY(tp->t_flags) || 6341 /* 6342 * Allow ECN reaction on ACK to CWR, if 6343 * that data segment was also CE marked. 6344 */ 6345 SEQ_GEQ(ack, tp->snd_recover)) { 6346 EXIT_CONGRECOVERY(tp->t_flags); 6347 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 6348 rack->r_fast_output = 0; 6349 tp->snd_recover = tp->snd_max + 1; 6350 if (tp->t_flags2 & TF2_ECN_PERMIT) 6351 tp->t_flags2 |= TF2_ECN_SND_CWR; 6352 } 6353 break; 6354 case CC_RTO: 6355 tp->t_dupacks = 0; 6356 tp->t_bytes_acked = 0; 6357 rack->r_fast_output = 0; 6358 if (IN_RECOVERY(tp->t_flags)) 6359 rack_exit_recovery(tp, rack, 2); 6360 rack->r_ctl.bytes_acked_in_recovery = 0; 6361 rack->r_ctl.time_entered_recovery = 0; 6362 orig_cwnd = tp->snd_cwnd; 6363 rack_log_to_prr(rack, 16, orig_cwnd, line); 6364 if (CC_ALGO(tp)->cong_signal == NULL) { 6365 /* TSNH */ 6366 tp->snd_ssthresh = max(2, 6367 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 6368 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 6369 tp->snd_cwnd = ctf_fixed_maxseg(tp); 6370 } 6371 if (tp->t_flags2 & TF2_ECN_PERMIT) 6372 tp->t_flags2 |= TF2_ECN_SND_CWR; 6373 break; 6374 case CC_RTO_ERR: 6375 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 6376 /* RTO was unnecessary, so reset everything. */ 6377 tp->snd_cwnd = tp->snd_cwnd_prev; 6378 tp->snd_ssthresh = tp->snd_ssthresh_prev; 6379 tp->snd_recover = tp->snd_recover_prev; 6380 if (tp->t_flags & TF_WASFRECOVERY) { 6381 ENTER_FASTRECOVERY(tp->t_flags); 6382 tp->t_flags &= ~TF_WASFRECOVERY; 6383 } 6384 if (tp->t_flags & TF_WASCRECOVERY) { 6385 ENTER_CONGRECOVERY(tp->t_flags); 6386 tp->t_flags &= ~TF_WASCRECOVERY; 6387 } 6388 tp->snd_nxt = tp->snd_max; 6389 tp->t_badrxtwin = 0; 6390 break; 6391 } 6392 if ((CC_ALGO(tp)->cong_signal != NULL) && 6393 (type != CC_RTO)){ 6394 tp->t_ccv.curack = ack; 6395 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 6396 } 6397 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 6398 rack_log_to_prr(rack, 15, cwnd_enter, line); 6399 rack->r_ctl.dsack_byte_cnt = 0; 6400 rack->r_ctl.retran_during_recovery = 0; 6401 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 6402 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 6403 rack->r_ent_rec_ns = 1; 6404 } 6405 } 6406 6407 static inline void 6408 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 6409 { 6410 uint32_t i_cwnd; 6411 6412 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6413 6414 if (CC_ALGO(tp)->after_idle != NULL) 6415 CC_ALGO(tp)->after_idle(&tp->t_ccv); 6416 6417 if (tp->snd_cwnd == 1) 6418 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 6419 else 6420 i_cwnd = rc_init_window(rack); 6421 6422 /* 6423 * Being idle is no different than the initial window. If the cc 6424 * clamps it down below the initial window raise it to the initial 6425 * window. 6426 */ 6427 if (tp->snd_cwnd < i_cwnd) { 6428 tp->snd_cwnd = i_cwnd; 6429 } 6430 } 6431 6432 /* 6433 * Indicate whether this ack should be delayed. We can delay the ack if 6434 * following conditions are met: 6435 * - There is no delayed ack timer in progress. 6436 * - Our last ack wasn't a 0-sized window. We never want to delay 6437 * the ack that opens up a 0-sized window. 6438 * - LRO wasn't used for this segment. We make sure by checking that the 6439 * segment size is not larger than the MSS. 6440 * - Delayed acks are enabled or this is a half-synchronized T/TCP 6441 * connection. 6442 */ 6443 #define DELAY_ACK(tp, tlen) \ 6444 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 6445 ((tp->t_flags & TF_DELACK) == 0) && \ 6446 (tlen <= tp->t_maxseg) && \ 6447 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 6448 6449 static struct rack_sendmap * 6450 rack_find_lowest_rsm(struct tcp_rack *rack) 6451 { 6452 struct rack_sendmap *rsm; 6453 6454 /* 6455 * Walk the time-order transmitted list looking for an rsm that is 6456 * not acked. This will be the one that was sent the longest time 6457 * ago that is still outstanding. 6458 */ 6459 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6460 if (rsm->r_flags & RACK_ACKED) { 6461 continue; 6462 } 6463 goto finish; 6464 } 6465 finish: 6466 return (rsm); 6467 } 6468 6469 static struct rack_sendmap * 6470 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 6471 { 6472 struct rack_sendmap *prsm; 6473 6474 /* 6475 * Walk the sequence order list backward until we hit and arrive at 6476 * the highest seq not acked. In theory when this is called it 6477 * should be the last segment (which it was not). 6478 */ 6479 prsm = rsm; 6480 6481 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 6482 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 6483 continue; 6484 } 6485 return (prsm); 6486 } 6487 return (NULL); 6488 } 6489 6490 static uint32_t 6491 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 6492 { 6493 int32_t lro; 6494 uint32_t thresh; 6495 6496 /* 6497 * lro is the flag we use to determine if we have seen reordering. 6498 * If it gets set we have seen reordering. The reorder logic either 6499 * works in one of two ways: 6500 * 6501 * If reorder-fade is configured, then we track the last time we saw 6502 * re-ordering occur. If we reach the point where enough time as 6503 * passed we no longer consider reordering has occuring. 6504 * 6505 * Or if reorder-face is 0, then once we see reordering we consider 6506 * the connection to alway be subject to reordering and just set lro 6507 * to 1. 6508 * 6509 * In the end if lro is non-zero we add the extra time for 6510 * reordering in. 6511 */ 6512 if (srtt == 0) 6513 srtt = 1; 6514 if (rack->r_ctl.rc_reorder_ts) { 6515 if (rack->r_ctl.rc_reorder_fade) { 6516 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 6517 lro = cts - rack->r_ctl.rc_reorder_ts; 6518 if (lro == 0) { 6519 /* 6520 * No time as passed since the last 6521 * reorder, mark it as reordering. 6522 */ 6523 lro = 1; 6524 } 6525 } else { 6526 /* Negative time? */ 6527 lro = 0; 6528 } 6529 if (lro > rack->r_ctl.rc_reorder_fade) { 6530 /* Turn off reordering seen too */ 6531 rack->r_ctl.rc_reorder_ts = 0; 6532 lro = 0; 6533 } 6534 } else { 6535 /* Reodering does not fade */ 6536 lro = 1; 6537 } 6538 } else { 6539 lro = 0; 6540 } 6541 if (rack->rc_rack_tmr_std_based == 0) { 6542 thresh = srtt + rack->r_ctl.rc_pkt_delay; 6543 } else { 6544 /* Standards based pkt-delay is 1/4 srtt */ 6545 thresh = srtt + (srtt >> 2); 6546 } 6547 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 6548 /* It must be set, if not you get 1/4 rtt */ 6549 if (rack->r_ctl.rc_reorder_shift) 6550 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 6551 else 6552 thresh += (srtt >> 2); 6553 } 6554 if (rack->rc_rack_use_dsack && 6555 lro && 6556 (rack->r_ctl.num_dsack > 0)) { 6557 /* 6558 * We only increase the reordering window if we 6559 * have seen reordering <and> we have a DSACK count. 6560 */ 6561 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 6562 if (log_allowed) 6563 rack_log_dsack_event(rack, 4, line, srtt, thresh); 6564 } 6565 /* SRTT * 2 is the ceiling */ 6566 if (thresh > (srtt * 2)) { 6567 thresh = srtt * 2; 6568 } 6569 /* And we don't want it above the RTO max either */ 6570 if (thresh > rack_rto_max) { 6571 thresh = rack_rto_max; 6572 } 6573 if (log_allowed) 6574 rack_log_dsack_event(rack, 6, line, srtt, thresh); 6575 return (thresh); 6576 } 6577 6578 static uint32_t 6579 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 6580 struct rack_sendmap *rsm, uint32_t srtt) 6581 { 6582 struct rack_sendmap *prsm; 6583 uint32_t thresh, len; 6584 int segsiz; 6585 6586 if (srtt == 0) 6587 srtt = 1; 6588 if (rack->r_ctl.rc_tlp_threshold) 6589 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 6590 else 6591 thresh = (srtt * 2); 6592 6593 /* Get the previous sent packet, if any */ 6594 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6595 len = rsm->r_end - rsm->r_start; 6596 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6597 /* Exactly like the ID */ 6598 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6599 uint32_t alt_thresh; 6600 /* 6601 * Compensate for delayed-ack with the d-ack time. 6602 */ 6603 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6604 if (alt_thresh > thresh) 6605 thresh = alt_thresh; 6606 } 6607 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6608 /* 2.1 behavior */ 6609 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6610 if (prsm && (len <= segsiz)) { 6611 /* 6612 * Two packets outstanding, thresh should be (2*srtt) + 6613 * possible inter-packet delay (if any). 6614 */ 6615 uint32_t inter_gap = 0; 6616 int idx, nidx; 6617 6618 idx = rsm->r_rtr_cnt - 1; 6619 nidx = prsm->r_rtr_cnt - 1; 6620 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6621 /* Yes it was sent later (or at the same time) */ 6622 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6623 } 6624 thresh += inter_gap; 6625 } else if (len <= segsiz) { 6626 /* 6627 * Possibly compensate for delayed-ack. 6628 */ 6629 uint32_t alt_thresh; 6630 6631 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6632 if (alt_thresh > thresh) 6633 thresh = alt_thresh; 6634 } 6635 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6636 /* 2.2 behavior */ 6637 if (len <= segsiz) { 6638 uint32_t alt_thresh; 6639 /* 6640 * Compensate for delayed-ack with the d-ack time. 6641 */ 6642 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6643 if (alt_thresh > thresh) 6644 thresh = alt_thresh; 6645 } 6646 } 6647 /* Not above an RTO */ 6648 if (thresh > tp->t_rxtcur) { 6649 thresh = tp->t_rxtcur; 6650 } 6651 /* Not above a RTO max */ 6652 if (thresh > rack_rto_max) { 6653 thresh = rack_rto_max; 6654 } 6655 /* Apply user supplied min TLP */ 6656 if (thresh < rack_tlp_min) { 6657 thresh = rack_tlp_min; 6658 } 6659 return (thresh); 6660 } 6661 6662 static uint32_t 6663 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6664 { 6665 /* 6666 * We want the rack_rtt which is the 6667 * last rtt we measured. However if that 6668 * does not exist we fallback to the srtt (which 6669 * we probably will never do) and then as a last 6670 * resort we use RACK_INITIAL_RTO if no srtt is 6671 * yet set. 6672 */ 6673 if (rack->rc_rack_rtt) 6674 return (rack->rc_rack_rtt); 6675 else if (tp->t_srtt == 0) 6676 return (RACK_INITIAL_RTO); 6677 return (tp->t_srtt); 6678 } 6679 6680 static struct rack_sendmap * 6681 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6682 { 6683 /* 6684 * Check to see that we don't need to fall into recovery. We will 6685 * need to do so if our oldest transmit is past the time we should 6686 * have had an ack. 6687 */ 6688 struct tcp_rack *rack; 6689 struct rack_sendmap *rsm; 6690 int32_t idx; 6691 uint32_t srtt, thresh; 6692 6693 rack = (struct tcp_rack *)tp->t_fb_ptr; 6694 if (tqhash_empty(rack->r_ctl.tqh)) { 6695 return (NULL); 6696 } 6697 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6698 if (rsm == NULL) 6699 return (NULL); 6700 6701 6702 if (rsm->r_flags & RACK_ACKED) { 6703 rsm = rack_find_lowest_rsm(rack); 6704 if (rsm == NULL) 6705 return (NULL); 6706 } 6707 idx = rsm->r_rtr_cnt - 1; 6708 srtt = rack_grab_rtt(tp, rack); 6709 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6710 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6711 return (NULL); 6712 } 6713 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6714 return (NULL); 6715 } 6716 /* Ok if we reach here we are over-due and this guy can be sent */ 6717 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6718 return (rsm); 6719 } 6720 6721 static uint32_t 6722 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6723 { 6724 int32_t t; 6725 int32_t tt; 6726 uint32_t ret_val; 6727 6728 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6729 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6730 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6731 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6732 ret_val = (uint32_t)tt; 6733 return (ret_val); 6734 } 6735 6736 static uint32_t 6737 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6738 { 6739 /* 6740 * Start the FR timer, we do this based on getting the first one in 6741 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6742 * events we need to stop the running timer (if its running) before 6743 * starting the new one. 6744 */ 6745 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6746 uint32_t srtt_cur; 6747 int32_t idx; 6748 int32_t is_tlp_timer = 0; 6749 struct rack_sendmap *rsm; 6750 6751 if (rack->t_timers_stopped) { 6752 /* All timers have been stopped none are to run */ 6753 return (0); 6754 } 6755 if (rack->rc_in_persist) { 6756 /* We can't start any timer in persists */ 6757 return (rack_get_persists_timer_val(tp, rack)); 6758 } 6759 rack->rc_on_min_to = 0; 6760 if ((tp->t_state < TCPS_ESTABLISHED) || 6761 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6762 goto activate_rxt; 6763 } 6764 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6765 if ((rsm == NULL) || sup_rack) { 6766 /* Nothing on the send map or no rack */ 6767 activate_rxt: 6768 time_since_sent = 0; 6769 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6770 if (rsm) { 6771 /* 6772 * Should we discount the RTX timer any? 6773 * 6774 * We want to discount it the smallest amount. 6775 * If a timer (Rack/TLP or RXT) has gone off more 6776 * recently thats the discount we want to use (now - timer time). 6777 * If the retransmit of the oldest packet was more recent then 6778 * we want to use that (now - oldest-packet-last_transmit_time). 6779 * 6780 */ 6781 idx = rsm->r_rtr_cnt - 1; 6782 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6783 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6784 else 6785 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6786 if (TSTMP_GT(cts, tstmp_touse)) 6787 time_since_sent = cts - tstmp_touse; 6788 } 6789 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6790 sbavail(&tptosocket(tp)->so_snd)) { 6791 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6792 to = tp->t_rxtcur; 6793 if (to > time_since_sent) 6794 to -= time_since_sent; 6795 else 6796 to = rack->r_ctl.rc_min_to; 6797 if (to == 0) 6798 to = 1; 6799 /* Special case for KEEPINIT */ 6800 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6801 (TP_KEEPINIT(tp) != 0) && 6802 rsm) { 6803 /* 6804 * We have to put a ceiling on the rxt timer 6805 * of the keep-init timeout. 6806 */ 6807 uint32_t max_time, red; 6808 6809 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6810 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6811 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6812 if (red < max_time) 6813 max_time -= red; 6814 else 6815 max_time = 1; 6816 } 6817 /* Reduce timeout to the keep value if needed */ 6818 if (max_time < to) 6819 to = max_time; 6820 } 6821 return (to); 6822 } 6823 return (0); 6824 } 6825 if (rsm->r_flags & RACK_ACKED) { 6826 rsm = rack_find_lowest_rsm(rack); 6827 if (rsm == NULL) { 6828 /* No lowest? */ 6829 goto activate_rxt; 6830 } 6831 } 6832 /* Convert from ms to usecs */ 6833 if ((rsm->r_flags & RACK_SACK_PASSED) || 6834 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6835 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6836 if ((tp->t_flags & TF_SENTFIN) && 6837 ((tp->snd_max - tp->snd_una) == 1) && 6838 (rsm->r_flags & RACK_HAS_FIN)) { 6839 /* 6840 * We don't start a rack timer if all we have is a 6841 * FIN outstanding. 6842 */ 6843 goto activate_rxt; 6844 } 6845 if ((rack->use_rack_rr == 0) && 6846 (IN_FASTRECOVERY(tp->t_flags)) && 6847 (rack->rack_no_prr == 0) && 6848 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6849 /* 6850 * We are not cheating, in recovery and 6851 * not enough ack's to yet get our next 6852 * retransmission out. 6853 * 6854 * Note that classified attackers do not 6855 * get to use the rack-cheat. 6856 */ 6857 goto activate_tlp; 6858 } 6859 srtt = rack_grab_rtt(tp, rack); 6860 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6861 idx = rsm->r_rtr_cnt - 1; 6862 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6863 if (SEQ_GEQ(exp, cts)) { 6864 to = exp - cts; 6865 if (to < rack->r_ctl.rc_min_to) { 6866 to = rack->r_ctl.rc_min_to; 6867 if (rack->r_rr_config == 3) 6868 rack->rc_on_min_to = 1; 6869 } 6870 } else { 6871 to = rack->r_ctl.rc_min_to; 6872 if (rack->r_rr_config == 3) 6873 rack->rc_on_min_to = 1; 6874 } 6875 } else { 6876 /* Ok we need to do a TLP not RACK */ 6877 activate_tlp: 6878 if ((rack->rc_tlp_in_progress != 0) && 6879 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6880 /* 6881 * The previous send was a TLP and we have sent 6882 * N TLP's without sending new data. 6883 */ 6884 goto activate_rxt; 6885 } 6886 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6887 if (rsm == NULL) { 6888 /* We found no rsm to TLP with. */ 6889 goto activate_rxt; 6890 } 6891 if (rsm->r_flags & RACK_HAS_FIN) { 6892 /* If its a FIN we dont do TLP */ 6893 rsm = NULL; 6894 goto activate_rxt; 6895 } 6896 idx = rsm->r_rtr_cnt - 1; 6897 time_since_sent = 0; 6898 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6899 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6900 else 6901 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6902 if (TSTMP_GT(cts, tstmp_touse)) 6903 time_since_sent = cts - tstmp_touse; 6904 is_tlp_timer = 1; 6905 if (tp->t_srtt) { 6906 if ((rack->rc_srtt_measure_made == 0) && 6907 (tp->t_srtt == 1)) { 6908 /* 6909 * If another stack as run and set srtt to 1, 6910 * then the srtt was 0, so lets use the initial. 6911 */ 6912 srtt = RACK_INITIAL_RTO; 6913 } else { 6914 srtt_cur = tp->t_srtt; 6915 srtt = srtt_cur; 6916 } 6917 } else 6918 srtt = RACK_INITIAL_RTO; 6919 /* 6920 * If the SRTT is not keeping up and the 6921 * rack RTT has spiked we want to use 6922 * the last RTT not the smoothed one. 6923 */ 6924 if (rack_tlp_use_greater && 6925 tp->t_srtt && 6926 (srtt < rack_grab_rtt(tp, rack))) { 6927 srtt = rack_grab_rtt(tp, rack); 6928 } 6929 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6930 if (thresh > time_since_sent) { 6931 to = thresh - time_since_sent; 6932 } else { 6933 to = rack->r_ctl.rc_min_to; 6934 rack_log_alt_to_to_cancel(rack, 6935 thresh, /* flex1 */ 6936 time_since_sent, /* flex2 */ 6937 tstmp_touse, /* flex3 */ 6938 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6939 (uint32_t)rsm->r_tim_lastsent[idx], 6940 srtt, 6941 idx, 99); 6942 } 6943 if (to < rack_tlp_min) { 6944 to = rack_tlp_min; 6945 } 6946 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 6947 /* 6948 * If the TLP time works out to larger than the max 6949 * RTO lets not do TLP.. just RTO. 6950 */ 6951 goto activate_rxt; 6952 } 6953 } 6954 if (is_tlp_timer == 0) { 6955 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6956 } else { 6957 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6958 } 6959 if (to == 0) 6960 to = 1; 6961 return (to); 6962 } 6963 6964 static void 6965 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6966 { 6967 if (rack->rc_in_persist == 0) { 6968 if (tp->t_flags & TF_GPUTINPROG) { 6969 /* 6970 * Stop the goodput now, the calling of the 6971 * measurement function clears the flag. 6972 */ 6973 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6974 RACK_QUALITY_PERSIST); 6975 } 6976 #ifdef NETFLIX_SHARED_CWND 6977 if (rack->r_ctl.rc_scw) { 6978 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6979 rack->rack_scwnd_is_idle = 1; 6980 } 6981 #endif 6982 rack->r_ctl.rc_went_idle_time = cts; 6983 if (rack->r_ctl.rc_went_idle_time == 0) 6984 rack->r_ctl.rc_went_idle_time = 1; 6985 if (rack->lt_bw_up) { 6986 /* Suspend our LT BW measurement */ 6987 uint64_t tmark; 6988 6989 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6990 rack->r_ctl.lt_seq = snd_una; 6991 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 6992 if (tmark >= rack->r_ctl.lt_timemark) { 6993 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6994 } 6995 rack->r_ctl.lt_timemark = tmark; 6996 rack->lt_bw_up = 0; 6997 rack->r_persist_lt_bw_off = 1; 6998 } 6999 rack_timer_cancel(tp, rack, cts, __LINE__); 7000 rack->r_ctl.persist_lost_ends = 0; 7001 rack->probe_not_answered = 0; 7002 rack->forced_ack = 0; 7003 tp->t_rxtshift = 0; 7004 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7005 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7006 rack->rc_in_persist = 1; 7007 } 7008 } 7009 7010 static void 7011 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7012 { 7013 if (tcp_in_hpts(rack->rc_tp)) { 7014 tcp_hpts_remove(rack->rc_tp); 7015 rack->r_ctl.rc_hpts_flags = 0; 7016 } 7017 #ifdef NETFLIX_SHARED_CWND 7018 if (rack->r_ctl.rc_scw) { 7019 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 7020 rack->rack_scwnd_is_idle = 0; 7021 } 7022 #endif 7023 if (rack->rc_gp_dyn_mul && 7024 (rack->use_fixed_rate == 0) && 7025 (rack->rc_always_pace)) { 7026 /* 7027 * Do we count this as if a probe-rtt just 7028 * finished? 7029 */ 7030 uint32_t time_idle, idle_min; 7031 7032 time_idle = cts - rack->r_ctl.rc_went_idle_time; 7033 idle_min = rack_min_probertt_hold; 7034 if (rack_probertt_gpsrtt_cnt_div) { 7035 uint64_t extra; 7036 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 7037 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 7038 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 7039 idle_min += (uint32_t)extra; 7040 } 7041 if (time_idle >= idle_min) { 7042 /* Yes, we count it as a probe-rtt. */ 7043 uint32_t us_cts; 7044 7045 us_cts = tcp_get_usecs(NULL); 7046 if (rack->in_probe_rtt == 0) { 7047 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7048 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 7049 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 7050 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 7051 } else { 7052 rack_exit_probertt(rack, us_cts); 7053 } 7054 } 7055 } 7056 if (rack->r_persist_lt_bw_off) { 7057 /* Continue where we left off */ 7058 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 7059 rack->lt_bw_up = 1; 7060 rack->r_persist_lt_bw_off = 0; 7061 } 7062 rack->r_ctl.idle_snd_una = tp->snd_una; 7063 rack->rc_in_persist = 0; 7064 rack->r_ctl.rc_went_idle_time = 0; 7065 tp->t_rxtshift = 0; 7066 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7067 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7068 rack->r_ctl.rc_agg_delayed = 0; 7069 rack->r_early = 0; 7070 rack->r_late = 0; 7071 rack->r_ctl.rc_agg_early = 0; 7072 } 7073 7074 static void 7075 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 7076 struct hpts_diag *diag, struct timeval *tv) 7077 { 7078 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 7079 union tcp_log_stackspecific log; 7080 7081 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7082 log.u_bbr.flex1 = diag->p_nxt_slot; 7083 log.u_bbr.flex2 = diag->p_cur_slot; 7084 log.u_bbr.flex3 = diag->slot_req; 7085 log.u_bbr.flex4 = diag->inp_hptsslot; 7086 log.u_bbr.flex5 = diag->slot_remaining; 7087 log.u_bbr.flex6 = diag->need_new_to; 7088 log.u_bbr.flex7 = diag->p_hpts_active; 7089 log.u_bbr.flex8 = diag->p_on_min_sleep; 7090 /* Hijack other fields as needed */ 7091 log.u_bbr.epoch = diag->have_slept; 7092 log.u_bbr.lt_epoch = diag->yet_to_sleep; 7093 log.u_bbr.pkts_out = diag->co_ret; 7094 log.u_bbr.applimited = diag->hpts_sleep_time; 7095 log.u_bbr.delivered = diag->p_prev_slot; 7096 log.u_bbr.inflight = diag->p_runningslot; 7097 log.u_bbr.bw_inuse = diag->wheel_slot; 7098 log.u_bbr.rttProp = diag->wheel_cts; 7099 log.u_bbr.timeStamp = cts; 7100 log.u_bbr.delRate = diag->maxslots; 7101 log.u_bbr.cur_del_rate = diag->p_curtick; 7102 log.u_bbr.cur_del_rate <<= 32; 7103 log.u_bbr.cur_del_rate |= diag->p_lasttick; 7104 TCP_LOG_EVENTP(rack->rc_tp, NULL, 7105 &rack->rc_inp->inp_socket->so_rcv, 7106 &rack->rc_inp->inp_socket->so_snd, 7107 BBR_LOG_HPTSDIAG, 0, 7108 0, &log, false, tv); 7109 } 7110 7111 } 7112 7113 static void 7114 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 7115 { 7116 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 7117 union tcp_log_stackspecific log; 7118 struct timeval tv; 7119 7120 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7121 log.u_bbr.flex1 = sb->sb_flags; 7122 log.u_bbr.flex2 = len; 7123 log.u_bbr.flex3 = sb->sb_state; 7124 log.u_bbr.flex8 = type; 7125 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 7126 TCP_LOG_EVENTP(rack->rc_tp, NULL, 7127 &rack->rc_inp->inp_socket->so_rcv, 7128 &rack->rc_inp->inp_socket->so_snd, 7129 TCP_LOG_SB_WAKE, 0, 7130 len, &log, false, &tv); 7131 } 7132 } 7133 7134 static void 7135 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 7136 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 7137 { 7138 struct hpts_diag diag; 7139 struct inpcb *inp = tptoinpcb(tp); 7140 struct timeval tv; 7141 uint32_t delayed_ack = 0; 7142 uint32_t hpts_timeout; 7143 uint32_t entry_slot = slot; 7144 uint8_t stopped; 7145 uint32_t left = 0; 7146 uint32_t us_cts; 7147 7148 if ((tp->t_state == TCPS_CLOSED) || 7149 (tp->t_state == TCPS_LISTEN)) { 7150 return; 7151 } 7152 if (tcp_in_hpts(tp)) { 7153 /* Already on the pacer */ 7154 return; 7155 } 7156 stopped = rack->rc_tmr_stopped; 7157 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 7158 left = rack->r_ctl.rc_timer_exp - cts; 7159 } 7160 rack->r_ctl.rc_timer_exp = 0; 7161 rack->r_ctl.rc_hpts_flags = 0; 7162 us_cts = tcp_get_usecs(&tv); 7163 /* Now early/late accounting */ 7164 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 7165 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 7166 /* 7167 * We have a early carry over set, 7168 * we can always add more time so we 7169 * can always make this compensation. 7170 * 7171 * Note if ack's are allowed to wake us do not 7172 * penalize the next timer for being awoke 7173 * by an ack aka the rc_agg_early (non-paced mode). 7174 */ 7175 slot += rack->r_ctl.rc_agg_early; 7176 rack->r_early = 0; 7177 rack->r_ctl.rc_agg_early = 0; 7178 } 7179 if ((rack->r_late) && 7180 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 7181 /* 7182 * This is harder, we can 7183 * compensate some but it 7184 * really depends on what 7185 * the current pacing time is. 7186 */ 7187 if (rack->r_ctl.rc_agg_delayed >= slot) { 7188 /* 7189 * We can't compensate for it all. 7190 * And we have to have some time 7191 * on the clock. We always have a min 7192 * 10 slots (10 x 10 i.e. 100 usecs). 7193 */ 7194 if (slot <= HPTS_TICKS_PER_SLOT) { 7195 /* We gain delay */ 7196 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 7197 slot = HPTS_TICKS_PER_SLOT; 7198 } else { 7199 /* We take off some */ 7200 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 7201 slot = HPTS_TICKS_PER_SLOT; 7202 } 7203 } else { 7204 slot -= rack->r_ctl.rc_agg_delayed; 7205 rack->r_ctl.rc_agg_delayed = 0; 7206 /* Make sure we have 100 useconds at minimum */ 7207 if (slot < HPTS_TICKS_PER_SLOT) { 7208 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 7209 slot = HPTS_TICKS_PER_SLOT; 7210 } 7211 if (rack->r_ctl.rc_agg_delayed == 0) 7212 rack->r_late = 0; 7213 } 7214 } else if (rack->r_late) { 7215 /* r_use_hpts_min is on and so is DGP */ 7216 uint32_t max_red; 7217 7218 max_red = (slot * rack->r_ctl.max_reduction) / 100; 7219 if (max_red >= rack->r_ctl.rc_agg_delayed) { 7220 slot -= rack->r_ctl.rc_agg_delayed; 7221 rack->r_ctl.rc_agg_delayed = 0; 7222 } else { 7223 slot -= max_red; 7224 rack->r_ctl.rc_agg_delayed -= max_red; 7225 } 7226 } 7227 if ((rack->r_use_hpts_min == 1) && 7228 (slot > 0) && 7229 (rack->dgp_on == 1)) { 7230 /* 7231 * We are enforcing a min pacing timer 7232 * based on our hpts min timeout. 7233 */ 7234 uint32_t min; 7235 7236 min = get_hpts_min_sleep_time(); 7237 if (min > slot) { 7238 slot = min; 7239 } 7240 } 7241 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 7242 if (tp->t_flags & TF_DELACK) { 7243 delayed_ack = TICKS_2_USEC(tcp_delacktime); 7244 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 7245 } 7246 if (delayed_ack && ((hpts_timeout == 0) || 7247 (delayed_ack < hpts_timeout))) 7248 hpts_timeout = delayed_ack; 7249 else 7250 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7251 /* 7252 * If no timers are going to run and we will fall off the hptsi 7253 * wheel, we resort to a keep-alive timer if its configured. 7254 */ 7255 if ((hpts_timeout == 0) && 7256 (slot == 0)) { 7257 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7258 (tp->t_state <= TCPS_CLOSING)) { 7259 /* 7260 * Ok we have no timer (persists, rack, tlp, rxt or 7261 * del-ack), we don't have segments being paced. So 7262 * all that is left is the keepalive timer. 7263 */ 7264 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7265 /* Get the established keep-alive time */ 7266 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 7267 } else { 7268 /* 7269 * Get the initial setup keep-alive time, 7270 * note that this is probably not going to 7271 * happen, since rack will be running a rxt timer 7272 * if a SYN of some sort is outstanding. It is 7273 * actually handled in rack_timeout_rxt(). 7274 */ 7275 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 7276 } 7277 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 7278 if (rack->in_probe_rtt) { 7279 /* 7280 * We want to instead not wake up a long time from 7281 * now but to wake up about the time we would 7282 * exit probe-rtt and initiate a keep-alive ack. 7283 * This will get us out of probe-rtt and update 7284 * our min-rtt. 7285 */ 7286 hpts_timeout = rack_min_probertt_hold; 7287 } 7288 } 7289 } 7290 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 7291 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 7292 /* 7293 * RACK, TLP, persists and RXT timers all are restartable 7294 * based on actions input .. i.e we received a packet (ack 7295 * or sack) and that changes things (rw, or snd_una etc). 7296 * Thus we can restart them with a new value. For 7297 * keep-alive, delayed_ack we keep track of what was left 7298 * and restart the timer with a smaller value. 7299 */ 7300 if (left < hpts_timeout) 7301 hpts_timeout = left; 7302 } 7303 if (hpts_timeout) { 7304 /* 7305 * Hack alert for now we can't time-out over 2,147,483 7306 * seconds (a bit more than 596 hours), which is probably ok 7307 * :). 7308 */ 7309 if (hpts_timeout > 0x7ffffffe) 7310 hpts_timeout = 0x7ffffffe; 7311 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 7312 } 7313 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 7314 if ((rack->gp_ready == 0) && 7315 (rack->use_fixed_rate == 0) && 7316 (hpts_timeout < slot) && 7317 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 7318 /* 7319 * We have no good estimate yet for the 7320 * old clunky burst mitigation or the 7321 * real pacing. And the tlp or rxt is smaller 7322 * than the pacing calculation. Lets not 7323 * pace that long since we know the calculation 7324 * so far is not accurate. 7325 */ 7326 slot = hpts_timeout; 7327 } 7328 /** 7329 * Turn off all the flags for queuing by default. The 7330 * flags have important meanings to what happens when 7331 * LRO interacts with the transport. Most likely (by default now) 7332 * mbuf_queueing and ack compression are on. So the transport 7333 * has a couple of flags that control what happens (if those 7334 * are not on then these flags won't have any effect since it 7335 * won't go through the queuing LRO path). 7336 * 7337 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 7338 * pacing output, so don't disturb. But 7339 * it also means LRO can wake me if there 7340 * is a SACK arrival. 7341 * 7342 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 7343 * with the above flag (QUEUE_READY) and 7344 * when present it says don't even wake me 7345 * if a SACK arrives. 7346 * 7347 * The idea behind these flags is that if we are pacing we 7348 * set the MBUF_QUEUE_READY and only get woken up if 7349 * a SACK arrives (which could change things) or if 7350 * our pacing timer expires. If, however, we have a rack 7351 * timer running, then we don't even want a sack to wake 7352 * us since the rack timer has to expire before we can send. 7353 * 7354 * Other cases should usually have none of the flags set 7355 * so LRO can call into us. 7356 */ 7357 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 7358 if (slot) { 7359 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 7360 rack->r_ctl.rc_last_output_to = us_cts + slot; 7361 /* 7362 * A pacing timer (slot) is being set, in 7363 * such a case we cannot send (we are blocked by 7364 * the timer). So lets tell LRO that it should not 7365 * wake us unless there is a SACK. Note this only 7366 * will be effective if mbuf queueing is on or 7367 * compressed acks are being processed. 7368 */ 7369 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 7370 /* 7371 * But wait if we have a Rack timer running 7372 * even a SACK should not disturb us (with 7373 * the exception of r_rr_config 3). 7374 */ 7375 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 7376 (IN_RECOVERY(tp->t_flags))) { 7377 if (rack->r_rr_config != 3) 7378 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 7379 else if (rack->rc_pace_dnd) { 7380 /* 7381 * When DND is on, we only let a sack 7382 * interrupt us if we are not in recovery. 7383 * 7384 * If DND is off, then we never hit here 7385 * and let all sacks wake us up. 7386 * 7387 */ 7388 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 7389 } 7390 } 7391 if (rack->rc_ack_can_sendout_data) { 7392 /* 7393 * Ahh but wait, this is that special case 7394 * where the pacing timer can be disturbed 7395 * backout the changes (used for non-paced 7396 * burst limiting). 7397 */ 7398 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 7399 TF2_MBUF_QUEUE_READY); 7400 } 7401 if ((rack->use_rack_rr) && 7402 (rack->r_rr_config < 2) && 7403 ((hpts_timeout) && (hpts_timeout < slot))) { 7404 /* 7405 * Arrange for the hpts to kick back in after the 7406 * t-o if the t-o does not cause a send. 7407 */ 7408 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7409 __LINE__, &diag); 7410 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7411 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7412 } else { 7413 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 7414 __LINE__, &diag); 7415 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7416 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 7417 } 7418 } else if (hpts_timeout) { 7419 /* 7420 * With respect to t_flags2(?) here, lets let any new acks wake 7421 * us up here. Since we are not pacing (no pacing timer), output 7422 * can happen so we should let it. If its a Rack timer, then any inbound 7423 * packet probably won't change the sending (we will be blocked) 7424 * but it may change the prr stats so letting it in (the set defaults 7425 * at the start of this block) are good enough. 7426 */ 7427 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7428 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7429 __LINE__, &diag); 7430 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7431 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7432 } else { 7433 /* No timer starting */ 7434 #ifdef INVARIANTS 7435 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 7436 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 7437 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 7438 } 7439 #endif 7440 } 7441 rack->rc_tmr_stopped = 0; 7442 if (slot) 7443 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 7444 } 7445 7446 static void 7447 rack_mark_lost(struct tcpcb *tp, 7448 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 7449 { 7450 struct rack_sendmap *nrsm; 7451 uint32_t thresh, exp; 7452 7453 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 7454 nrsm = rsm; 7455 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 7456 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 7457 /* Got up to all that were marked sack-passed */ 7458 break; 7459 } 7460 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 7461 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 7462 if (TSTMP_LT(exp, cts) || (exp == cts)) { 7463 /* We now consider it lost */ 7464 nrsm->r_flags |= RACK_WAS_LOST; 7465 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 7466 } else { 7467 /* Past here it won't be lost so stop */ 7468 break; 7469 } 7470 } 7471 } 7472 } 7473 7474 /* 7475 * RACK Timer, here we simply do logging and house keeping. 7476 * the normal rack_output() function will call the 7477 * appropriate thing to check if we need to do a RACK retransmit. 7478 * We return 1, saying don't proceed with rack_output only 7479 * when all timers have been stopped (destroyed PCB?). 7480 */ 7481 static int 7482 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7483 { 7484 /* 7485 * This timer simply provides an internal trigger to send out data. 7486 * The check_recovery_mode call will see if there are needed 7487 * retransmissions, if so we will enter fast-recovery. The output 7488 * call may or may not do the same thing depending on sysctl 7489 * settings. 7490 */ 7491 struct rack_sendmap *rsm; 7492 7493 counter_u64_add(rack_to_tot, 1); 7494 if (rack->r_state && (rack->r_state != tp->t_state)) 7495 rack_set_state(tp, rack); 7496 rack->rc_on_min_to = 0; 7497 rsm = rack_check_recovery_mode(tp, cts); 7498 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 7499 if (rsm) { 7500 /* We need to stroke any lost that are now declared as lost */ 7501 rack_mark_lost(tp, rack, rsm, cts); 7502 rack->r_ctl.rc_resend = rsm; 7503 rack->r_timer_override = 1; 7504 if (rack->use_rack_rr) { 7505 /* 7506 * Don't accumulate extra pacing delay 7507 * we are allowing the rack timer to 7508 * over-ride pacing i.e. rrr takes precedence 7509 * if the pacing interval is longer than the rrr 7510 * time (in other words we get the min pacing 7511 * time versus rrr pacing time). 7512 */ 7513 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7514 } 7515 } 7516 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 7517 if (rsm == NULL) { 7518 /* restart a timer and return 1 */ 7519 rack_start_hpts_timer(rack, tp, cts, 7520 0, 0, 0); 7521 return (1); 7522 } 7523 if ((rack->policer_detect_on == 1) && 7524 (rack->rc_policer_detected == 0)) { 7525 /* 7526 * We do this early if we have not 7527 * deteceted to attempt to detect 7528 * quicker. Normally we want to do this 7529 * as recovery exits (and we will again). 7530 */ 7531 policer_detection(tp, rack, 0); 7532 } 7533 return (0); 7534 } 7535 7536 7537 7538 static void 7539 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 7540 { 7541 7542 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 7543 /* 7544 * The trailing space changed, mbufs can grow 7545 * at the tail but they can't shrink from 7546 * it, KASSERT that. Adjust the orig_m_len to 7547 * compensate for this change. 7548 */ 7549 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 7550 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 7551 rsm->m, 7552 rsm, 7553 (intmax_t)M_TRAILINGROOM(rsm->m), 7554 rsm->orig_t_space, 7555 rsm->orig_m_len, 7556 rsm->m->m_len)); 7557 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 7558 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7559 } 7560 if (rsm->m->m_len < rsm->orig_m_len) { 7561 /* 7562 * Mbuf shrank, trimmed off the top by an ack, our 7563 * offset changes. 7564 */ 7565 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 7566 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 7567 rsm->m, rsm->m->m_len, 7568 rsm, rsm->orig_m_len, 7569 rsm->soff)); 7570 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 7571 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 7572 else 7573 rsm->soff = 0; 7574 rsm->orig_m_len = rsm->m->m_len; 7575 #ifdef INVARIANTS 7576 } else if (rsm->m->m_len > rsm->orig_m_len) { 7577 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 7578 rsm, rsm->m); 7579 #endif 7580 } 7581 } 7582 7583 static void 7584 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 7585 { 7586 struct mbuf *m; 7587 uint32_t soff; 7588 7589 if (src_rsm->m && 7590 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 7591 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 7592 /* Fix up the orig_m_len and possibly the mbuf offset */ 7593 rack_adjust_orig_mlen(src_rsm); 7594 } 7595 m = src_rsm->m; 7596 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 7597 while (soff >= m->m_len) { 7598 /* Move out past this mbuf */ 7599 soff -= m->m_len; 7600 m = m->m_next; 7601 KASSERT((m != NULL), 7602 ("rsm:%p nrsm:%p hit at soff:%u null m", 7603 src_rsm, rsm, soff)); 7604 if (m == NULL) { 7605 /* This should *not* happen which is why there is a kassert */ 7606 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7607 (src_rsm->r_start - rack->rc_tp->snd_una), 7608 &src_rsm->soff); 7609 src_rsm->orig_m_len = src_rsm->m->m_len; 7610 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7611 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7612 (rsm->r_start - rack->rc_tp->snd_una), 7613 &rsm->soff); 7614 rsm->orig_m_len = rsm->m->m_len; 7615 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7616 return; 7617 } 7618 } 7619 rsm->m = m; 7620 rsm->soff = soff; 7621 rsm->orig_m_len = m->m_len; 7622 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7623 } 7624 7625 static __inline void 7626 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7627 struct rack_sendmap *rsm, uint32_t start) 7628 { 7629 int idx; 7630 7631 nrsm->r_start = start; 7632 nrsm->r_end = rsm->r_end; 7633 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7634 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7635 nrsm->r_flags = rsm->r_flags; 7636 nrsm->r_dupack = rsm->r_dupack; 7637 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7638 nrsm->r_rtr_bytes = 0; 7639 nrsm->r_fas = rsm->r_fas; 7640 nrsm->r_bas = rsm->r_bas; 7641 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7642 nrsm->r_just_ret = rsm->r_just_ret; 7643 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7644 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7645 } 7646 /* Now if we have SYN flag we keep it on the left edge */ 7647 if (nrsm->r_flags & RACK_HAS_SYN) 7648 nrsm->r_flags &= ~RACK_HAS_SYN; 7649 /* Now if we have a FIN flag we keep it on the right edge */ 7650 if (rsm->r_flags & RACK_HAS_FIN) 7651 rsm->r_flags &= ~RACK_HAS_FIN; 7652 /* Push bit must go to the right edge as well */ 7653 if (rsm->r_flags & RACK_HAD_PUSH) 7654 rsm->r_flags &= ~RACK_HAD_PUSH; 7655 /* Clone over the state of the hw_tls flag */ 7656 nrsm->r_hw_tls = rsm->r_hw_tls; 7657 /* 7658 * Now we need to find nrsm's new location in the mbuf chain 7659 * we basically calculate a new offset, which is soff + 7660 * how much is left in original rsm. Then we walk out the mbuf 7661 * chain to find the righ position, it may be the same mbuf 7662 * or maybe not. 7663 */ 7664 KASSERT(((rsm->m != NULL) || 7665 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7666 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7667 if (rsm->m) 7668 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7669 } 7670 7671 static struct rack_sendmap * 7672 rack_merge_rsm(struct tcp_rack *rack, 7673 struct rack_sendmap *l_rsm, 7674 struct rack_sendmap *r_rsm) 7675 { 7676 /* 7677 * We are merging two ack'd RSM's, 7678 * the l_rsm is on the left (lower seq 7679 * values) and the r_rsm is on the right 7680 * (higher seq value). The simplest way 7681 * to merge these is to move the right 7682 * one into the left. I don't think there 7683 * is any reason we need to try to find 7684 * the oldest (or last oldest retransmitted). 7685 */ 7686 rack_log_map_chg(rack->rc_tp, rack, NULL, 7687 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7688 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7689 if (l_rsm->r_dupack < r_rsm->r_dupack) 7690 l_rsm->r_dupack = r_rsm->r_dupack; 7691 if (r_rsm->r_rtr_bytes) 7692 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7693 if (r_rsm->r_in_tmap) { 7694 /* This really should not happen */ 7695 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7696 r_rsm->r_in_tmap = 0; 7697 } 7698 7699 /* Now the flags */ 7700 if (r_rsm->r_flags & RACK_HAS_FIN) 7701 l_rsm->r_flags |= RACK_HAS_FIN; 7702 if (r_rsm->r_flags & RACK_TLP) 7703 l_rsm->r_flags |= RACK_TLP; 7704 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7705 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7706 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7707 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7708 /* 7709 * If both are app-limited then let the 7710 * free lower the count. If right is app 7711 * limited and left is not, transfer. 7712 */ 7713 l_rsm->r_flags |= RACK_APP_LIMITED; 7714 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7715 if (r_rsm == rack->r_ctl.rc_first_appl) 7716 rack->r_ctl.rc_first_appl = l_rsm; 7717 } 7718 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7719 /* 7720 * We keep the largest value, which is the newest 7721 * send. We do this in case a segment that is 7722 * joined together and not part of a GP estimate 7723 * later gets expanded into the GP estimate. 7724 * 7725 * We prohibit the merging of unlike kinds i.e. 7726 * all pieces that are in the GP estimate can be 7727 * merged and all pieces that are not in a GP estimate 7728 * can be merged, but not disimilar pieces. Combine 7729 * this with taking the highest here and we should 7730 * be ok unless of course the client reneges. Then 7731 * all bets are off. 7732 */ 7733 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7734 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7735 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7736 } 7737 /* 7738 * When merging two RSM's we also need to consider the ack time and keep 7739 * newest. If the ack gets merged into a measurement then that is the 7740 * one we will want to be using. 7741 */ 7742 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7743 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7744 7745 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7746 /* Transfer the split limit to the map we free */ 7747 r_rsm->r_limit_type = l_rsm->r_limit_type; 7748 l_rsm->r_limit_type = 0; 7749 } 7750 rack_free(rack, r_rsm); 7751 l_rsm->r_flags |= RACK_MERGED; 7752 return (l_rsm); 7753 } 7754 7755 /* 7756 * TLP Timer, here we simply setup what segment we want to 7757 * have the TLP expire on, the normal rack_output() will then 7758 * send it out. 7759 * 7760 * We return 1, saying don't proceed with rack_output only 7761 * when all timers have been stopped (destroyed PCB?). 7762 */ 7763 static int 7764 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7765 { 7766 /* 7767 * Tail Loss Probe. 7768 */ 7769 struct rack_sendmap *rsm = NULL; 7770 int insret __diagused; 7771 struct socket *so = tptosocket(tp); 7772 uint32_t amm; 7773 uint32_t out, avail; 7774 int collapsed_win = 0; 7775 7776 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7777 /* Its not time yet */ 7778 return (0); 7779 } 7780 if (ctf_progress_timeout_check(tp, true)) { 7781 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7782 return (-ETIMEDOUT); /* tcp_drop() */ 7783 } 7784 /* 7785 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7786 * need to figure out how to force a full MSS segment out. 7787 */ 7788 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7789 rack->r_ctl.retran_during_recovery = 0; 7790 rack->r_might_revert = 0; 7791 rack->r_ctl.dsack_byte_cnt = 0; 7792 counter_u64_add(rack_tlp_tot, 1); 7793 if (rack->r_state && (rack->r_state != tp->t_state)) 7794 rack_set_state(tp, rack); 7795 avail = sbavail(&so->so_snd); 7796 out = tp->snd_max - tp->snd_una; 7797 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7798 /* special case, we need a retransmission */ 7799 collapsed_win = 1; 7800 goto need_retran; 7801 } 7802 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7803 rack->r_ctl.dsack_persist--; 7804 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7805 rack->r_ctl.num_dsack = 0; 7806 } 7807 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7808 } 7809 if ((tp->t_flags & TF_GPUTINPROG) && 7810 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7811 /* 7812 * If this is the second in a row 7813 * TLP and we are doing a measurement 7814 * its time to abandon the measurement. 7815 * Something is likely broken on 7816 * the clients network and measuring a 7817 * broken network does us no good. 7818 */ 7819 tp->t_flags &= ~TF_GPUTINPROG; 7820 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7821 rack->r_ctl.rc_gp_srtt /*flex1*/, 7822 tp->gput_seq, 7823 0, 0, 18, __LINE__, NULL, 0); 7824 } 7825 /* 7826 * Check our send oldest always settings, and if 7827 * there is an oldest to send jump to the need_retran. 7828 */ 7829 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7830 goto need_retran; 7831 7832 if (avail > out) { 7833 /* New data is available */ 7834 amm = avail - out; 7835 if (amm > ctf_fixed_maxseg(tp)) { 7836 amm = ctf_fixed_maxseg(tp); 7837 if ((amm + out) > tp->snd_wnd) { 7838 /* We are rwnd limited */ 7839 goto need_retran; 7840 } 7841 } else if (amm < ctf_fixed_maxseg(tp)) { 7842 /* not enough to fill a MTU */ 7843 goto need_retran; 7844 } 7845 if (IN_FASTRECOVERY(tp->t_flags)) { 7846 /* Unlikely */ 7847 if (rack->rack_no_prr == 0) { 7848 if (out + amm <= tp->snd_wnd) { 7849 rack->r_ctl.rc_prr_sndcnt = amm; 7850 rack->r_ctl.rc_tlp_new_data = amm; 7851 rack_log_to_prr(rack, 4, 0, __LINE__); 7852 } 7853 } else 7854 goto need_retran; 7855 } else { 7856 /* Set the send-new override */ 7857 if (out + amm <= tp->snd_wnd) 7858 rack->r_ctl.rc_tlp_new_data = amm; 7859 else 7860 goto need_retran; 7861 } 7862 rack->r_ctl.rc_tlpsend = NULL; 7863 counter_u64_add(rack_tlp_newdata, 1); 7864 goto send; 7865 } 7866 need_retran: 7867 /* 7868 * Ok we need to arrange the last un-acked segment to be re-sent, or 7869 * optionally the first un-acked segment. 7870 */ 7871 if (collapsed_win == 0) { 7872 if (rack_always_send_oldest) 7873 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7874 else { 7875 rsm = tqhash_max(rack->r_ctl.tqh); 7876 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7877 rsm = rack_find_high_nonack(rack, rsm); 7878 } 7879 } 7880 if (rsm == NULL) { 7881 #ifdef TCP_BLACKBOX 7882 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7883 #endif 7884 goto out; 7885 } 7886 } else { 7887 /* 7888 * We had a collapsed window, lets find 7889 * the point before the collapse. 7890 */ 7891 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7892 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7893 else { 7894 rsm = tqhash_min(rack->r_ctl.tqh); 7895 } 7896 if (rsm == NULL) { 7897 /* Huh */ 7898 goto out; 7899 } 7900 } 7901 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7902 /* 7903 * We need to split this the last segment in two. 7904 */ 7905 struct rack_sendmap *nrsm; 7906 7907 nrsm = rack_alloc_full_limit(rack); 7908 if (nrsm == NULL) { 7909 /* 7910 * No memory to split, we will just exit and punt 7911 * off to the RXT timer. 7912 */ 7913 goto out; 7914 } 7915 rack_clone_rsm(rack, nrsm, rsm, 7916 (rsm->r_end - ctf_fixed_maxseg(tp))); 7917 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7918 #ifndef INVARIANTS 7919 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7920 #else 7921 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7922 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7923 nrsm, insret, rack, rsm); 7924 } 7925 #endif 7926 if (rsm->r_in_tmap) { 7927 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7928 nrsm->r_in_tmap = 1; 7929 } 7930 rsm = nrsm; 7931 } 7932 rack->r_ctl.rc_tlpsend = rsm; 7933 send: 7934 /* Make sure output path knows we are doing a TLP */ 7935 *doing_tlp = 1; 7936 rack->r_timer_override = 1; 7937 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7938 return (0); 7939 out: 7940 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7941 return (0); 7942 } 7943 7944 /* 7945 * Delayed ack Timer, here we simply need to setup the 7946 * ACK_NOW flag and remove the DELACK flag. From there 7947 * the output routine will send the ack out. 7948 * 7949 * We only return 1, saying don't proceed, if all timers 7950 * are stopped (destroyed PCB?). 7951 */ 7952 static int 7953 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7954 { 7955 7956 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7957 tp->t_flags &= ~TF_DELACK; 7958 tp->t_flags |= TF_ACKNOW; 7959 KMOD_TCPSTAT_INC(tcps_delack); 7960 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7961 return (0); 7962 } 7963 7964 static inline int 7965 rack_send_ack_challange(struct tcp_rack *rack) 7966 { 7967 struct tcptemp *t_template; 7968 7969 t_template = tcpip_maketemplate(rack->rc_inp); 7970 if (t_template) { 7971 if (rack->forced_ack == 0) { 7972 rack->forced_ack = 1; 7973 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7974 } else { 7975 rack->probe_not_answered = 1; 7976 } 7977 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 7978 &t_template->tt_t, (struct mbuf *)NULL, 7979 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 7980 free(t_template, M_TEMP); 7981 /* This does send an ack so kill any D-ack timer */ 7982 if (rack->rc_tp->t_flags & TF_DELACK) 7983 rack->rc_tp->t_flags &= ~TF_DELACK; 7984 return(1); 7985 } else 7986 return (0); 7987 7988 } 7989 7990 /* 7991 * Persists timer, here we simply send the 7992 * same thing as a keepalive will. 7993 * the one byte send. 7994 * 7995 * We only return 1, saying don't proceed, if all timers 7996 * are stopped (destroyed PCB?). 7997 */ 7998 static int 7999 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8000 { 8001 int32_t retval = 1; 8002 8003 if (rack->rc_in_persist == 0) 8004 return (0); 8005 if (ctf_progress_timeout_check(tp, false)) { 8006 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8007 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 8008 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8009 return (-ETIMEDOUT); /* tcp_drop() */ 8010 } 8011 /* 8012 * Persistence timer into zero window. Force a byte to be output, if 8013 * possible. 8014 */ 8015 KMOD_TCPSTAT_INC(tcps_persisttimeo); 8016 /* 8017 * Hack: if the peer is dead/unreachable, we do not time out if the 8018 * window is closed. After a full backoff, drop the connection if 8019 * the idle time (no responses to probes) reaches the maximum 8020 * backoff that we would use if retransmitting. 8021 */ 8022 if (tp->t_rxtshift >= V_tcp_retries && 8023 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 8024 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 8025 KMOD_TCPSTAT_INC(tcps_persistdrop); 8026 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8027 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8028 retval = -ETIMEDOUT; /* tcp_drop() */ 8029 goto out; 8030 } 8031 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 8032 tp->snd_una == tp->snd_max) 8033 rack_exit_persist(tp, rack, cts); 8034 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 8035 /* 8036 * If the user has closed the socket then drop a persisting 8037 * connection after a much reduced timeout. 8038 */ 8039 if (tp->t_state > TCPS_CLOSE_WAIT && 8040 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 8041 KMOD_TCPSTAT_INC(tcps_persistdrop); 8042 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8043 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8044 retval = -ETIMEDOUT; /* tcp_drop() */ 8045 goto out; 8046 } 8047 if (rack_send_ack_challange(rack)) { 8048 /* only set it if we were answered */ 8049 if (rack->probe_not_answered) { 8050 counter_u64_add(rack_persists_loss, 1); 8051 rack->r_ctl.persist_lost_ends++; 8052 } 8053 counter_u64_add(rack_persists_sends, 1); 8054 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 8055 } 8056 if (tp->t_rxtshift < V_tcp_retries) 8057 tp->t_rxtshift++; 8058 out: 8059 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 8060 rack_start_hpts_timer(rack, tp, cts, 8061 0, 0, 0); 8062 return (retval); 8063 } 8064 8065 /* 8066 * If a keepalive goes off, we had no other timers 8067 * happening. We always return 1 here since this 8068 * routine either drops the connection or sends 8069 * out a segment with respond. 8070 */ 8071 static int 8072 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8073 { 8074 struct inpcb *inp = tptoinpcb(tp); 8075 8076 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 8077 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 8078 /* 8079 * Keep-alive timer went off; send something or drop connection if 8080 * idle for too long. 8081 */ 8082 KMOD_TCPSTAT_INC(tcps_keeptimeo); 8083 if (tp->t_state < TCPS_ESTABLISHED) 8084 goto dropit; 8085 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 8086 tp->t_state <= TCPS_CLOSING) { 8087 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 8088 goto dropit; 8089 /* 8090 * Send a packet designed to force a response if the peer is 8091 * up and reachable: either an ACK if the connection is 8092 * still alive, or an RST if the peer has closed the 8093 * connection due to timeout or reboot. Using sequence 8094 * number tp->snd_una-1 causes the transmitted zero-length 8095 * segment to lie outside the receive window; by the 8096 * protocol spec, this requires the correspondent TCP to 8097 * respond. 8098 */ 8099 KMOD_TCPSTAT_INC(tcps_keepprobe); 8100 rack_send_ack_challange(rack); 8101 } 8102 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 8103 return (1); 8104 dropit: 8105 KMOD_TCPSTAT_INC(tcps_keepdrops); 8106 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 8107 return (-ETIMEDOUT); /* tcp_drop() */ 8108 } 8109 8110 /* 8111 * Retransmit helper function, clear up all the ack 8112 * flags and take care of important book keeping. 8113 */ 8114 static void 8115 rack_remxt_tmr(struct tcpcb *tp) 8116 { 8117 /* 8118 * The retransmit timer went off, all sack'd blocks must be 8119 * un-acked. 8120 */ 8121 struct rack_sendmap *rsm, *trsm = NULL; 8122 struct tcp_rack *rack; 8123 8124 rack = (struct tcp_rack *)tp->t_fb_ptr; 8125 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 8126 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 8127 rack->r_timer_override = 1; 8128 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 8129 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 8130 rack->r_late = 0; 8131 rack->r_early = 0; 8132 rack->r_ctl.rc_agg_delayed = 0; 8133 rack->r_ctl.rc_agg_early = 0; 8134 if (rack->r_state && (rack->r_state != tp->t_state)) 8135 rack_set_state(tp, rack); 8136 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 8137 /* 8138 * We do not clear the scoreboard until we have had 8139 * more than rack_rxt_scoreboard_clear_thresh time-outs. 8140 */ 8141 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 8142 if (rack->r_ctl.rc_resend != NULL) 8143 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8144 8145 return; 8146 } 8147 /* 8148 * Ideally we would like to be able to 8149 * mark SACK-PASS on anything not acked here. 8150 * 8151 * However, if we do that we would burst out 8152 * all that data 1ms apart. This would be unwise, 8153 * so for now we will just let the normal rxt timer 8154 * and tlp timer take care of it. 8155 * 8156 * Also we really need to stick them back in sequence 8157 * order. This way we send in the proper order and any 8158 * sacks that come floating in will "re-ack" the data. 8159 * To do this we zap the tmap with an INIT and then 8160 * walk through and place every rsm in the tail queue 8161 * hash table back in its seq ordered place. 8162 */ 8163 TAILQ_INIT(&rack->r_ctl.rc_tmap); 8164 8165 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8166 rsm->r_dupack = 0; 8167 if (rack_verbose_logging) 8168 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8169 /* We must re-add it back to the tlist */ 8170 if (trsm == NULL) { 8171 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8172 } else { 8173 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 8174 } 8175 rsm->r_in_tmap = 1; 8176 trsm = rsm; 8177 if (rsm->r_flags & RACK_ACKED) 8178 rsm->r_flags |= RACK_WAS_ACKED; 8179 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 8180 rsm->r_flags |= RACK_MUST_RXT; 8181 } 8182 /* zero the lost since it's all gone */ 8183 rack->r_ctl.rc_considered_lost = 0; 8184 /* Clear the count (we just un-acked them) */ 8185 rack->r_ctl.rc_sacked = 0; 8186 rack->r_ctl.rc_sacklast = NULL; 8187 /* Clear the tlp rtx mark */ 8188 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 8189 if (rack->r_ctl.rc_resend != NULL) 8190 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8191 rack->r_ctl.rc_prr_sndcnt = 0; 8192 rack_log_to_prr(rack, 6, 0, __LINE__); 8193 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 8194 if (rack->r_ctl.rc_resend != NULL) 8195 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8196 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && 8197 ((tp->t_flags & TF_SENTFIN) == 0)) { 8198 /* 8199 * For non-sack customers new data 8200 * needs to go out as retransmits until 8201 * we retransmit up to snd_max. 8202 */ 8203 rack->r_must_retran = 1; 8204 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 8205 rack->r_ctl.rc_sacked); 8206 } 8207 } 8208 8209 static void 8210 rack_convert_rtts(struct tcpcb *tp) 8211 { 8212 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 8213 tp->t_rxtcur = RACK_REXMTVAL(tp); 8214 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 8215 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 8216 } 8217 if (tp->t_rxtcur > rack_rto_max) { 8218 tp->t_rxtcur = rack_rto_max; 8219 } 8220 } 8221 8222 static void 8223 rack_cc_conn_init(struct tcpcb *tp) 8224 { 8225 struct tcp_rack *rack; 8226 uint32_t srtt; 8227 8228 rack = (struct tcp_rack *)tp->t_fb_ptr; 8229 srtt = tp->t_srtt; 8230 cc_conn_init(tp); 8231 /* 8232 * Now convert to rack's internal format, 8233 * if required. 8234 */ 8235 if ((srtt == 0) && (tp->t_srtt != 0)) 8236 rack_convert_rtts(tp); 8237 /* 8238 * We want a chance to stay in slowstart as 8239 * we create a connection. TCP spec says that 8240 * initially ssthresh is infinite. For our 8241 * purposes that is the snd_wnd. 8242 */ 8243 if (tp->snd_ssthresh < tp->snd_wnd) { 8244 tp->snd_ssthresh = tp->snd_wnd; 8245 } 8246 /* 8247 * We also want to assure a IW worth of 8248 * data can get inflight. 8249 */ 8250 if (rc_init_window(rack) < tp->snd_cwnd) 8251 tp->snd_cwnd = rc_init_window(rack); 8252 } 8253 8254 /* 8255 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 8256 * we will setup to retransmit the lowest seq number outstanding. 8257 */ 8258 static int 8259 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8260 { 8261 struct inpcb *inp = tptoinpcb(tp); 8262 int32_t rexmt; 8263 int32_t retval = 0; 8264 bool isipv6; 8265 8266 if ((tp->t_flags & TF_GPUTINPROG) && 8267 (tp->t_rxtshift)) { 8268 /* 8269 * We have had a second timeout 8270 * measurements on successive rxt's are not profitable. 8271 * It is unlikely to be of any use (the network is 8272 * broken or the client went away). 8273 */ 8274 tp->t_flags &= ~TF_GPUTINPROG; 8275 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8276 rack->r_ctl.rc_gp_srtt /*flex1*/, 8277 tp->gput_seq, 8278 0, 0, 18, __LINE__, NULL, 0); 8279 } 8280 if (ctf_progress_timeout_check(tp, false)) { 8281 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 8282 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 8283 return (-ETIMEDOUT); /* tcp_drop() */ 8284 } 8285 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 8286 rack->r_ctl.retran_during_recovery = 0; 8287 rack->rc_ack_required = 1; 8288 rack->r_ctl.dsack_byte_cnt = 0; 8289 if (IN_RECOVERY(tp->t_flags) && 8290 (rack->rto_from_rec == 0)) { 8291 /* 8292 * Mark that we had a rto while in recovery 8293 * and save the ssthresh so if we go back 8294 * into recovery we will have a chance 8295 * to slowstart back to the level. 8296 */ 8297 rack->rto_from_rec = 1; 8298 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 8299 } 8300 if (IN_FASTRECOVERY(tp->t_flags)) 8301 tp->t_flags |= TF_WASFRECOVERY; 8302 else 8303 tp->t_flags &= ~TF_WASFRECOVERY; 8304 if (IN_CONGRECOVERY(tp->t_flags)) 8305 tp->t_flags |= TF_WASCRECOVERY; 8306 else 8307 tp->t_flags &= ~TF_WASCRECOVERY; 8308 if (TCPS_HAVEESTABLISHED(tp->t_state) && 8309 (tp->snd_una == tp->snd_max)) { 8310 /* Nothing outstanding .. nothing to do */ 8311 return (0); 8312 } 8313 if (rack->r_ctl.dsack_persist) { 8314 rack->r_ctl.dsack_persist--; 8315 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 8316 rack->r_ctl.num_dsack = 0; 8317 } 8318 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 8319 } 8320 /* 8321 * Rack can only run one timer at a time, so we cannot 8322 * run a KEEPINIT (gating SYN sending) and a retransmit 8323 * timer for the SYN. So if we are in a front state and 8324 * have a KEEPINIT timer we need to check the first transmit 8325 * against now to see if we have exceeded the KEEPINIT time 8326 * (if one is set). 8327 */ 8328 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 8329 (TP_KEEPINIT(tp) != 0)) { 8330 struct rack_sendmap *rsm; 8331 8332 rsm = tqhash_min(rack->r_ctl.tqh); 8333 if (rsm) { 8334 /* Ok we have something outstanding to test keepinit with */ 8335 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 8336 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 8337 /* We have exceeded the KEEPINIT time */ 8338 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 8339 goto drop_it; 8340 } 8341 } 8342 } 8343 /* 8344 * Retransmission timer went off. Message has not been acked within 8345 * retransmit interval. Back off to a longer retransmit interval 8346 * and retransmit one segment. 8347 */ 8348 if ((rack->r_ctl.rc_resend == NULL) || 8349 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 8350 /* 8351 * If the rwnd collapsed on 8352 * the one we are retransmitting 8353 * it does not count against the 8354 * rxt count. 8355 */ 8356 tp->t_rxtshift++; 8357 } 8358 rack_remxt_tmr(tp); 8359 if (tp->t_rxtshift > V_tcp_retries) { 8360 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 8361 drop_it: 8362 tp->t_rxtshift = V_tcp_retries; 8363 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 8364 /* XXXGL: previously t_softerror was casted to uint16_t */ 8365 MPASS(tp->t_softerror >= 0); 8366 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 8367 goto out; /* tcp_drop() */ 8368 } 8369 if (tp->t_state == TCPS_SYN_SENT) { 8370 /* 8371 * If the SYN was retransmitted, indicate CWND to be limited 8372 * to 1 segment in cc_conn_init(). 8373 */ 8374 tp->snd_cwnd = 1; 8375 } else if (tp->t_rxtshift == 1) { 8376 /* 8377 * first retransmit; record ssthresh and cwnd so they can be 8378 * recovered if this turns out to be a "bad" retransmit. A 8379 * retransmit is considered "bad" if an ACK for this segment 8380 * is received within RTT/2 interval; the assumption here is 8381 * that the ACK was already in flight. See "On Estimating 8382 * End-to-End Network Path Properties" by Allman and Paxson 8383 * for more details. 8384 */ 8385 tp->snd_cwnd_prev = tp->snd_cwnd; 8386 tp->snd_ssthresh_prev = tp->snd_ssthresh; 8387 tp->snd_recover_prev = tp->snd_recover; 8388 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 8389 tp->t_flags |= TF_PREVVALID; 8390 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 8391 tp->t_flags &= ~TF_PREVVALID; 8392 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 8393 if ((tp->t_state == TCPS_SYN_SENT) || 8394 (tp->t_state == TCPS_SYN_RECEIVED)) 8395 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 8396 else 8397 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 8398 8399 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 8400 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 8401 /* 8402 * We enter the path for PLMTUD if connection is established or, if 8403 * connection is FIN_WAIT_1 status, reason for the last is that if 8404 * amount of data we send is very small, we could send it in couple 8405 * of packets and process straight to FIN. In that case we won't 8406 * catch ESTABLISHED state. 8407 */ 8408 #ifdef INET6 8409 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 8410 #else 8411 isipv6 = false; 8412 #endif 8413 if (((V_tcp_pmtud_blackhole_detect == 1) || 8414 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 8415 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 8416 ((tp->t_state == TCPS_ESTABLISHED) || 8417 (tp->t_state == TCPS_FIN_WAIT_1))) { 8418 /* 8419 * Idea here is that at each stage of mtu probe (usually, 8420 * 1448 -> 1188 -> 524) should be given 2 chances to recover 8421 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 8422 * should take care of that. 8423 */ 8424 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 8425 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 8426 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 8427 tp->t_rxtshift % 2 == 0)) { 8428 /* 8429 * Enter Path MTU Black-hole Detection mechanism: - 8430 * Disable Path MTU Discovery (IP "DF" bit). - 8431 * Reduce MTU to lower value than what we negotiated 8432 * with peer. 8433 */ 8434 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 8435 /* Record that we may have found a black hole. */ 8436 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 8437 /* Keep track of previous MSS. */ 8438 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 8439 } 8440 8441 /* 8442 * Reduce the MSS to blackhole value or to the 8443 * default in an attempt to retransmit. 8444 */ 8445 #ifdef INET6 8446 if (isipv6 && 8447 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 8448 /* Use the sysctl tuneable blackhole MSS. */ 8449 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 8450 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 8451 } else if (isipv6) { 8452 /* Use the default MSS. */ 8453 tp->t_maxseg = V_tcp_v6mssdflt; 8454 /* 8455 * Disable Path MTU Discovery when we switch 8456 * to minmss. 8457 */ 8458 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8459 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8460 } 8461 #endif 8462 #if defined(INET6) && defined(INET) 8463 else 8464 #endif 8465 #ifdef INET 8466 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 8467 /* Use the sysctl tuneable blackhole MSS. */ 8468 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 8469 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 8470 } else { 8471 /* Use the default MSS. */ 8472 tp->t_maxseg = V_tcp_mssdflt; 8473 /* 8474 * Disable Path MTU Discovery when we switch 8475 * to minmss. 8476 */ 8477 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8478 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8479 } 8480 #endif 8481 } else { 8482 /* 8483 * If further retransmissions are still unsuccessful 8484 * with a lowered MTU, maybe this isn't a blackhole 8485 * and we restore the previous MSS and blackhole 8486 * detection flags. The limit '6' is determined by 8487 * giving each probe stage (1448, 1188, 524) 2 8488 * chances to recover. 8489 */ 8490 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 8491 (tp->t_rxtshift >= 6)) { 8492 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 8493 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 8494 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 8495 if (tp->t_maxseg < V_tcp_mssdflt) { 8496 /* 8497 * The MSS is so small we should not 8498 * process incoming SACK's since we are 8499 * subject to attack in such a case. 8500 */ 8501 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 8502 } else { 8503 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 8504 } 8505 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 8506 } 8507 } 8508 } 8509 /* 8510 * Disable RFC1323 and SACK if we haven't got any response to 8511 * our third SYN to work-around some broken terminal servers 8512 * (most of which have hopefully been retired) that have bad VJ 8513 * header compression code which trashes TCP segments containing 8514 * unknown-to-them TCP options. 8515 */ 8516 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 8517 (tp->t_rxtshift == 3)) 8518 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 8519 /* 8520 * If we backed off this far, our srtt estimate is probably bogus. 8521 * Clobber it so we'll take the next rtt measurement as our srtt; 8522 * move the current srtt into rttvar to keep the current retransmit 8523 * times until then. 8524 */ 8525 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 8526 #ifdef INET6 8527 if ((inp->inp_vflag & INP_IPV6) != 0) 8528 in6_losing(inp); 8529 else 8530 #endif 8531 in_losing(inp); 8532 tp->t_rttvar += tp->t_srtt; 8533 tp->t_srtt = 0; 8534 } 8535 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 8536 tp->snd_recover = tp->snd_max; 8537 tp->t_flags |= TF_ACKNOW; 8538 tp->t_rtttime = 0; 8539 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 8540 out: 8541 return (retval); 8542 } 8543 8544 static int 8545 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 8546 { 8547 int32_t ret = 0; 8548 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 8549 8550 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 8551 (tp->t_flags & TF_GPUTINPROG)) { 8552 /* 8553 * We have a goodput in progress 8554 * and we have entered a late state. 8555 * Do we have enough data in the sb 8556 * to handle the GPUT request? 8557 */ 8558 uint32_t bytes; 8559 8560 bytes = tp->gput_ack - tp->gput_seq; 8561 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 8562 bytes += tp->gput_seq - tp->snd_una; 8563 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 8564 /* 8565 * There are not enough bytes in the socket 8566 * buffer that have been sent to cover this 8567 * measurement. Cancel it. 8568 */ 8569 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8570 rack->r_ctl.rc_gp_srtt /*flex1*/, 8571 tp->gput_seq, 8572 0, 0, 18, __LINE__, NULL, 0); 8573 tp->t_flags &= ~TF_GPUTINPROG; 8574 } 8575 } 8576 if (timers == 0) { 8577 return (0); 8578 } 8579 if (tp->t_state == TCPS_LISTEN) { 8580 /* no timers on listen sockets */ 8581 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 8582 return (0); 8583 return (1); 8584 } 8585 if ((timers & PACE_TMR_RACK) && 8586 rack->rc_on_min_to) { 8587 /* 8588 * For the rack timer when we 8589 * are on a min-timeout (which means rrr_conf = 3) 8590 * we don't want to check the timer. It may 8591 * be going off for a pace and thats ok we 8592 * want to send the retransmit (if its ready). 8593 * 8594 * If its on a normal rack timer (non-min) then 8595 * we will check if its expired. 8596 */ 8597 goto skip_time_check; 8598 } 8599 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 8600 uint32_t left; 8601 8602 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8603 ret = -1; 8604 rack_log_to_processing(rack, cts, ret, 0); 8605 return (0); 8606 } 8607 if (hpts_calling == 0) { 8608 /* 8609 * A user send or queued mbuf (sack) has called us? We 8610 * return 0 and let the pacing guards 8611 * deal with it if they should or 8612 * should not cause a send. 8613 */ 8614 ret = -2; 8615 rack_log_to_processing(rack, cts, ret, 0); 8616 return (0); 8617 } 8618 /* 8619 * Ok our timer went off early and we are not paced false 8620 * alarm, go back to sleep. We make sure we don't have 8621 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8622 * flag in place. 8623 */ 8624 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8625 ret = -3; 8626 left = rack->r_ctl.rc_timer_exp - cts; 8627 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8628 rack_log_to_processing(rack, cts, ret, left); 8629 return (1); 8630 } 8631 skip_time_check: 8632 rack->rc_tmr_stopped = 0; 8633 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8634 if (timers & PACE_TMR_DELACK) { 8635 ret = rack_timeout_delack(tp, rack, cts); 8636 } else if (timers & PACE_TMR_RACK) { 8637 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8638 rack->r_fast_output = 0; 8639 ret = rack_timeout_rack(tp, rack, cts); 8640 } else if (timers & PACE_TMR_TLP) { 8641 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8642 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8643 } else if (timers & PACE_TMR_RXT) { 8644 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8645 rack->r_fast_output = 0; 8646 ret = rack_timeout_rxt(tp, rack, cts); 8647 } else if (timers & PACE_TMR_PERSIT) { 8648 ret = rack_timeout_persist(tp, rack, cts); 8649 } else if (timers & PACE_TMR_KEEP) { 8650 ret = rack_timeout_keepalive(tp, rack, cts); 8651 } 8652 rack_log_to_processing(rack, cts, ret, timers); 8653 return (ret); 8654 } 8655 8656 static void 8657 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8658 { 8659 struct timeval tv; 8660 uint32_t us_cts, flags_on_entry; 8661 uint8_t hpts_removed = 0; 8662 8663 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8664 us_cts = tcp_get_usecs(&tv); 8665 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8666 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8667 ((tp->snd_max - tp->snd_una) == 0))) { 8668 tcp_hpts_remove(rack->rc_tp); 8669 hpts_removed = 1; 8670 /* If we were not delayed cancel out the flag. */ 8671 if ((tp->snd_max - tp->snd_una) == 0) 8672 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8673 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8674 } 8675 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8676 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8677 if (tcp_in_hpts(rack->rc_tp) && 8678 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8679 /* 8680 * Canceling timer's when we have no output being 8681 * paced. We also must remove ourselves from the 8682 * hpts. 8683 */ 8684 tcp_hpts_remove(rack->rc_tp); 8685 hpts_removed = 1; 8686 } 8687 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8688 } 8689 if (hpts_removed == 0) 8690 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8691 } 8692 8693 static int 8694 rack_stopall(struct tcpcb *tp) 8695 { 8696 struct tcp_rack *rack; 8697 8698 rack = (struct tcp_rack *)tp->t_fb_ptr; 8699 rack->t_timers_stopped = 1; 8700 8701 tcp_hpts_remove(tp); 8702 8703 return (0); 8704 } 8705 8706 static void 8707 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8708 { 8709 /* 8710 * Assure no timers are running. 8711 */ 8712 if (tcp_timer_active(tp, TT_PERSIST)) { 8713 /* We enter in persists, set the flag appropriately */ 8714 rack->rc_in_persist = 1; 8715 } 8716 if (tcp_in_hpts(rack->rc_tp)) { 8717 tcp_hpts_remove(rack->rc_tp); 8718 } 8719 } 8720 8721 /* 8722 * We maintain an array fo 16 (RETRAN_CNT_SIZE) entries. This 8723 * array is zeroed at the start of recovery. Each time a segment 8724 * is retransmitted, we translate that into a number of packets 8725 * (based on segsiz) and based on how many times its been retransmitted 8726 * increment by the number of packets the counter that represents 8727 * retansmitted N times. Index 0 is retransmitted 1 time, index 1 8728 * is retransmitted 2 times etc. 8729 * 8730 * So for example when we send a 4344 byte transmission with a 1448 8731 * byte segsize, and its the third time we have retransmitted this 8732 * segment, we would add to the rc_cnt_of_retran[2] the value of 8733 * 3. That represents 3 MSS were retransmitted 3 times (index is 8734 * the number of times retranmitted minus 1). 8735 */ 8736 static void 8737 rack_peg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz) 8738 { 8739 int idx; 8740 uint32_t peg; 8741 8742 peg = ((rsm->r_end - rsm->r_start) + segsiz) - 1; 8743 peg /= segsiz; 8744 idx = rsm->r_act_rxt_cnt - 1; 8745 if (idx >= RETRAN_CNT_SIZE) 8746 idx = RETRAN_CNT_SIZE - 1; 8747 /* Max of a uint16_t retransmits in a bucket */ 8748 if ((rack->r_ctl.rc_cnt_of_retran[idx] + peg) < 0xffff) 8749 rack->r_ctl.rc_cnt_of_retran[idx] += peg; 8750 else 8751 rack->r_ctl.rc_cnt_of_retran[idx] = 0xffff; 8752 } 8753 8754 /* 8755 * We maintain an array fo 16 (RETRAN_CNT_SIZE) entries. This 8756 * array is zeroed at the start of recovery. Each time a segment 8757 * is retransmitted, we translate that into a number of packets 8758 * (based on segsiz) and based on how many times its been retransmitted 8759 * increment by the number of packets the counter that represents 8760 * retansmitted N times. Index 0 is retransmitted 1 time, index 1 8761 * is retransmitted 2 times etc. 8762 * 8763 * The rack_unpeg_rxt is used when we go to retransmit a segment 8764 * again. Basically if the segment had previously been retransmitted 8765 * say 3 times (as our previous example illustrated in the comment 8766 * above rack_peg_rxt() prior to calling that and incrementing 8767 * r_ack_rxt_cnt we would have called rack_unpeg_rxt() that would 8768 * subtract back the previous add from its last rxt (in this 8769 * example r_act_cnt would have been 2 for 2 retransmissions. So 8770 * we would have subtracted 3 from rc_cnt_of_reetran[1] to remove 8771 * those 3 segments. You will see this in the rack_update_rsm() 8772 * below where we do: 8773 * if (rsm->r_act_rxt_cnt > 0) { 8774 * rack_unpeg_rxt(rack, rsm, segsiz); 8775 * } 8776 * rsm->r_act_rxt_cnt++; 8777 * rack_peg_rxt(rack, rsm, segsiz); 8778 * 8779 * This effectively moves the count from rc_cnt_of_retran[1] to 8780 * rc_cnt_of_retran[2]. 8781 */ 8782 static void 8783 rack_unpeg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz) 8784 { 8785 int idx; 8786 uint32_t peg; 8787 8788 idx = rsm->r_act_rxt_cnt - 1; 8789 if (idx >= RETRAN_CNT_SIZE) 8790 idx = RETRAN_CNT_SIZE - 1; 8791 peg = ((rsm->r_end - rsm->r_start) + segsiz) - 1; 8792 peg /= segsiz; 8793 if (peg < rack->r_ctl.rc_cnt_of_retran[idx]) 8794 rack->r_ctl.rc_cnt_of_retran[idx] -= peg; 8795 else { 8796 /* TSNH */ 8797 rack->r_ctl.rc_cnt_of_retran[idx] = 0; 8798 } 8799 } 8800 8801 static void 8802 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8803 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8804 { 8805 int32_t idx; 8806 8807 rsm->r_rtr_cnt++; 8808 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8809 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8810 rsm->r_flags |= RACK_OVERMAX; 8811 } 8812 if (rsm->r_act_rxt_cnt > 0) { 8813 /* Drop the count back for this, its retransmitting again */ 8814 rack_unpeg_rxt(rack, rsm, segsiz); 8815 } 8816 rsm->r_act_rxt_cnt++; 8817 /* Peg the count/index */ 8818 rack_peg_rxt(rack, rsm, segsiz); 8819 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8820 rsm->r_dupack = 0; 8821 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8822 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8823 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8824 } 8825 if (rsm->r_flags & RACK_WAS_LOST) { 8826 /* 8827 * We retransmitted it putting it back in flight 8828 * remove the lost desgination and reduce the 8829 * bytes considered lost. 8830 */ 8831 rsm->r_flags &= ~RACK_WAS_LOST; 8832 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 8833 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 8834 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 8835 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 8836 else 8837 rack->r_ctl.rc_considered_lost = 0; 8838 } 8839 idx = rsm->r_rtr_cnt - 1; 8840 rsm->r_tim_lastsent[idx] = ts; 8841 /* 8842 * Here we don't add in the len of send, since its already 8843 * in snduna <->snd_max. 8844 */ 8845 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8846 rack->r_ctl.rc_sacked); 8847 if (rsm->r_flags & RACK_ACKED) { 8848 /* Problably MTU discovery messing with us */ 8849 rsm->r_flags &= ~RACK_ACKED; 8850 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8851 } 8852 if (rsm->r_in_tmap) { 8853 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8854 rsm->r_in_tmap = 0; 8855 } 8856 /* Lets make sure it really is in or not the GP window */ 8857 rack_mark_in_gp_win(tp, rsm); 8858 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8859 rsm->r_in_tmap = 1; 8860 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8861 /* Take off the must retransmit flag, if its on */ 8862 if (rsm->r_flags & RACK_MUST_RXT) { 8863 if (rack->r_must_retran) 8864 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8865 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8866 /* 8867 * We have retransmitted all we need. Clear 8868 * any must retransmit flags. 8869 */ 8870 rack->r_must_retran = 0; 8871 rack->r_ctl.rc_out_at_rto = 0; 8872 } 8873 rsm->r_flags &= ~RACK_MUST_RXT; 8874 } 8875 /* Remove any collapsed flag */ 8876 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8877 if (rsm->r_flags & RACK_SACK_PASSED) { 8878 /* We have retransmitted due to the SACK pass */ 8879 rsm->r_flags &= ~RACK_SACK_PASSED; 8880 rsm->r_flags |= RACK_WAS_SACKPASS; 8881 } 8882 } 8883 8884 static uint32_t 8885 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8886 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8887 { 8888 /* 8889 * We (re-)transmitted starting at rsm->r_start for some length 8890 * (possibly less than r_end. 8891 */ 8892 struct rack_sendmap *nrsm; 8893 int insret __diagused; 8894 uint32_t c_end; 8895 int32_t len; 8896 8897 len = *lenp; 8898 c_end = rsm->r_start + len; 8899 if (SEQ_GEQ(c_end, rsm->r_end)) { 8900 /* 8901 * We retransmitted the whole piece or more than the whole 8902 * slopping into the next rsm. 8903 */ 8904 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8905 if (c_end == rsm->r_end) { 8906 *lenp = 0; 8907 return (0); 8908 } else { 8909 int32_t act_len; 8910 8911 /* Hangs over the end return whats left */ 8912 act_len = rsm->r_end - rsm->r_start; 8913 *lenp = (len - act_len); 8914 return (rsm->r_end); 8915 } 8916 /* We don't get out of this block. */ 8917 } 8918 /* 8919 * Here we retransmitted less than the whole thing which means we 8920 * have to split this into what was transmitted and what was not. 8921 */ 8922 nrsm = rack_alloc_full_limit(rack); 8923 if (nrsm == NULL) { 8924 /* 8925 * We can't get memory, so lets not proceed. 8926 */ 8927 *lenp = 0; 8928 return (0); 8929 } 8930 /* 8931 * So here we are going to take the original rsm and make it what we 8932 * retransmitted. nrsm will be the tail portion we did not 8933 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8934 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8935 * 1, 6 and the new piece will be 6, 11. 8936 */ 8937 rack_clone_rsm(rack, nrsm, rsm, c_end); 8938 nrsm->r_dupack = 0; 8939 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8940 #ifndef INVARIANTS 8941 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8942 #else 8943 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8944 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8945 nrsm, insret, rack, rsm); 8946 } 8947 #endif 8948 if (rsm->r_in_tmap) { 8949 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8950 nrsm->r_in_tmap = 1; 8951 } 8952 rsm->r_flags &= (~RACK_HAS_FIN); 8953 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8954 /* Log a split of rsm into rsm and nrsm */ 8955 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8956 *lenp = 0; 8957 return (0); 8958 } 8959 8960 static void 8961 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8962 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8963 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 8964 uint32_t s_moff, int hw_tls, int segsiz) 8965 { 8966 struct tcp_rack *rack; 8967 struct rack_sendmap *rsm, *nrsm; 8968 int insret __diagused; 8969 8970 register uint32_t snd_max, snd_una; 8971 8972 /* 8973 * Add to the RACK log of packets in flight or retransmitted. If 8974 * there is a TS option we will use the TS echoed, if not we will 8975 * grab a TS. 8976 * 8977 * Retransmissions will increment the count and move the ts to its 8978 * proper place. Note that if options do not include TS's then we 8979 * won't be able to effectively use the ACK for an RTT on a retran. 8980 * 8981 * Notes about r_start and r_end. Lets consider a send starting at 8982 * sequence 1 for 10 bytes. In such an example the r_start would be 8983 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8984 * This means that r_end is actually the first sequence for the next 8985 * slot (11). 8986 * 8987 */ 8988 /* 8989 * If err is set what do we do XXXrrs? should we not add the thing? 8990 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8991 * i.e. proceed with add ** do this for now. 8992 */ 8993 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8994 if (err) 8995 /* 8996 * We don't log errors -- we could but snd_max does not 8997 * advance in this case either. 8998 */ 8999 return; 9000 9001 if (th_flags & TH_RST) { 9002 /* 9003 * We don't log resets and we return immediately from 9004 * sending 9005 */ 9006 return; 9007 } 9008 rack = (struct tcp_rack *)tp->t_fb_ptr; 9009 snd_una = tp->snd_una; 9010 snd_max = tp->snd_max; 9011 if (th_flags & (TH_SYN | TH_FIN)) { 9012 /* 9013 * The call to rack_log_output is made before bumping 9014 * snd_max. This means we can record one extra byte on a SYN 9015 * or FIN if seq_out is adding more on and a FIN is present 9016 * (and we are not resending). 9017 */ 9018 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 9019 len++; 9020 if (th_flags & TH_FIN) 9021 len++; 9022 } 9023 if (SEQ_LEQ((seq_out + len), snd_una)) { 9024 /* Are sending an old segment to induce an ack (keep-alive)? */ 9025 return; 9026 } 9027 if (SEQ_LT(seq_out, snd_una)) { 9028 /* huh? should we panic? */ 9029 uint32_t end; 9030 9031 end = seq_out + len; 9032 seq_out = snd_una; 9033 if (SEQ_GEQ(end, seq_out)) 9034 len = end - seq_out; 9035 else 9036 len = 0; 9037 } 9038 if (len == 0) { 9039 /* We don't log zero window probes */ 9040 return; 9041 } 9042 if (IN_FASTRECOVERY(tp->t_flags)) { 9043 rack->r_ctl.rc_prr_out += len; 9044 } 9045 /* First question is it a retransmission or new? */ 9046 if (seq_out == snd_max) { 9047 /* Its new */ 9048 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 9049 again: 9050 rsm = rack_alloc(rack); 9051 if (rsm == NULL) { 9052 /* 9053 * Hmm out of memory and the tcb got destroyed while 9054 * we tried to wait. 9055 */ 9056 return; 9057 } 9058 if (th_flags & TH_FIN) { 9059 rsm->r_flags = RACK_HAS_FIN|add_flag; 9060 } else { 9061 rsm->r_flags = add_flag; 9062 } 9063 if (hw_tls) 9064 rsm->r_hw_tls = 1; 9065 rsm->r_tim_lastsent[0] = cts; 9066 rsm->r_rtr_cnt = 1; 9067 rsm->r_act_rxt_cnt = 0; 9068 rsm->r_rtr_bytes = 0; 9069 if (th_flags & TH_SYN) { 9070 /* The data space is one beyond snd_una */ 9071 rsm->r_flags |= RACK_HAS_SYN; 9072 } 9073 rsm->r_start = seq_out; 9074 rsm->r_end = rsm->r_start + len; 9075 rack_mark_in_gp_win(tp, rsm); 9076 rsm->r_dupack = 0; 9077 /* 9078 * save off the mbuf location that 9079 * sndmbuf_noadv returned (which is 9080 * where we started copying from).. 9081 */ 9082 rsm->m = s_mb; 9083 rsm->soff = s_moff; 9084 /* 9085 * Here we do add in the len of send, since its not yet 9086 * reflected in in snduna <->snd_max 9087 */ 9088 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 9089 rack->r_ctl.rc_sacked) + 9090 (rsm->r_end - rsm->r_start)); 9091 if ((rack->rc_initial_ss_comp == 0) && 9092 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 9093 rack->r_ctl.ss_hi_fs = rsm->r_fas; 9094 } 9095 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 9096 if (rsm->m) { 9097 if (rsm->m->m_len <= rsm->soff) { 9098 /* 9099 * XXXrrs Question, will this happen? 9100 * 9101 * If sbsndptr is set at the correct place 9102 * then s_moff should always be somewhere 9103 * within rsm->m. But if the sbsndptr was 9104 * off then that won't be true. If it occurs 9105 * we need to walkout to the correct location. 9106 */ 9107 struct mbuf *lm; 9108 9109 lm = rsm->m; 9110 while (lm->m_len <= rsm->soff) { 9111 rsm->soff -= lm->m_len; 9112 lm = lm->m_next; 9113 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 9114 __func__, rack, s_moff, s_mb, rsm->soff)); 9115 } 9116 rsm->m = lm; 9117 } 9118 rsm->orig_m_len = rsm->m->m_len; 9119 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 9120 } else { 9121 rsm->orig_m_len = 0; 9122 rsm->orig_t_space = 0; 9123 } 9124 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 9125 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9126 /* Log a new rsm */ 9127 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 9128 #ifndef INVARIANTS 9129 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 9130 #else 9131 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 9132 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9133 nrsm, insret, rack, rsm); 9134 } 9135 #endif 9136 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9137 rsm->r_in_tmap = 1; 9138 if (rsm->r_flags & RACK_IS_PCM) { 9139 rack->r_ctl.pcm_i.send_time = cts; 9140 rack->r_ctl.pcm_i.eseq = rsm->r_end; 9141 /* First time through we set the start too */ 9142 if (rack->pcm_in_progress == 0) 9143 rack->r_ctl.pcm_i.sseq = rsm->r_start; 9144 } 9145 /* 9146 * Special case detection, is there just a single 9147 * packet outstanding when we are not in recovery? 9148 * 9149 * If this is true mark it so. 9150 */ 9151 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9152 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 9153 struct rack_sendmap *prsm; 9154 9155 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 9156 if (prsm) 9157 prsm->r_one_out_nr = 1; 9158 } 9159 return; 9160 } 9161 /* 9162 * If we reach here its a retransmission and we need to find it. 9163 */ 9164 more: 9165 if (hintrsm && (hintrsm->r_start == seq_out)) { 9166 rsm = hintrsm; 9167 hintrsm = NULL; 9168 } else { 9169 /* No hints sorry */ 9170 rsm = NULL; 9171 } 9172 if ((rsm) && (rsm->r_start == seq_out)) { 9173 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 9174 if (len == 0) { 9175 return; 9176 } else { 9177 goto more; 9178 } 9179 } 9180 /* Ok it was not the last pointer go through it the hard way. */ 9181 refind: 9182 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 9183 if (rsm) { 9184 if (rsm->r_start == seq_out) { 9185 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 9186 if (len == 0) { 9187 return; 9188 } else { 9189 goto refind; 9190 } 9191 } 9192 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 9193 /* Transmitted within this piece */ 9194 /* 9195 * Ok we must split off the front and then let the 9196 * update do the rest 9197 */ 9198 nrsm = rack_alloc_full_limit(rack); 9199 if (nrsm == NULL) { 9200 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 9201 return; 9202 } 9203 /* 9204 * copy rsm to nrsm and then trim the front of rsm 9205 * to not include this part. 9206 */ 9207 rack_clone_rsm(rack, nrsm, rsm, seq_out); 9208 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 9209 #ifndef INVARIANTS 9210 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9211 #else 9212 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9213 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9214 nrsm, insret, rack, rsm); 9215 } 9216 #endif 9217 if (rsm->r_in_tmap) { 9218 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9219 nrsm->r_in_tmap = 1; 9220 } 9221 rsm->r_flags &= (~RACK_HAS_FIN); 9222 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 9223 if (len == 0) { 9224 return; 9225 } else if (len > 0) 9226 goto refind; 9227 } 9228 } 9229 /* 9230 * Hmm not found in map did they retransmit both old and on into the 9231 * new? 9232 */ 9233 if (seq_out == tp->snd_max) { 9234 goto again; 9235 } else if (SEQ_LT(seq_out, tp->snd_max)) { 9236 #ifdef INVARIANTS 9237 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 9238 seq_out, len, tp->snd_una, tp->snd_max); 9239 printf("Starting Dump of all rack entries\n"); 9240 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 9241 printf("rsm:%p start:%u end:%u\n", 9242 rsm, rsm->r_start, rsm->r_end); 9243 } 9244 printf("Dump complete\n"); 9245 panic("seq_out not found rack:%p tp:%p", 9246 rack, tp); 9247 #endif 9248 } else { 9249 #ifdef INVARIANTS 9250 /* 9251 * Hmm beyond sndmax? (only if we are using the new rtt-pack 9252 * flag) 9253 */ 9254 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 9255 seq_out, len, tp->snd_max, tp); 9256 #endif 9257 } 9258 } 9259 9260 /* 9261 * Record one of the RTT updates from an ack into 9262 * our sample structure. 9263 */ 9264 9265 static void 9266 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 9267 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 9268 { 9269 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9270 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 9271 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 9272 } 9273 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9274 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 9275 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 9276 } 9277 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 9278 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 9279 rack->r_ctl.rc_gp_lowrtt = us_rtt; 9280 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 9281 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 9282 } 9283 if ((confidence == 1) && 9284 ((rsm == NULL) || 9285 (rsm->r_just_ret) || 9286 (rsm->r_one_out_nr && 9287 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 9288 /* 9289 * If the rsm had a just return 9290 * hit it then we can't trust the 9291 * rtt measurement for buffer deterimination 9292 * Note that a confidence of 2, indicates 9293 * SACK'd which overrides the r_just_ret or 9294 * the r_one_out_nr. If it was a CUM-ACK and 9295 * we had only two outstanding, but get an 9296 * ack for only 1. Then that also lowers our 9297 * confidence. 9298 */ 9299 confidence = 0; 9300 } 9301 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9302 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 9303 if (rack->r_ctl.rack_rs.confidence == 0) { 9304 /* 9305 * We take anything with no current confidence 9306 * saved. 9307 */ 9308 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 9309 rack->r_ctl.rack_rs.confidence = confidence; 9310 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 9311 } else if (confidence != 0) { 9312 /* 9313 * Once we have a confident number, 9314 * we can update it with a smaller 9315 * value since this confident number 9316 * may include the DSACK time until 9317 * the next segment (the second one) arrived. 9318 */ 9319 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 9320 rack->r_ctl.rack_rs.confidence = confidence; 9321 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 9322 } 9323 } 9324 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 9325 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 9326 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 9327 rack->r_ctl.rack_rs.rs_rtt_cnt++; 9328 } 9329 9330 /* 9331 * Collect new round-trip time estimate 9332 * and update averages and current timeout. 9333 */ 9334 static void 9335 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 9336 { 9337 int32_t delta; 9338 int32_t rtt; 9339 9340 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 9341 /* No valid sample */ 9342 return; 9343 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 9344 /* We are to use the lowest RTT seen in a single ack */ 9345 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9346 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 9347 /* We are to use the highest RTT seen in a single ack */ 9348 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 9349 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 9350 /* We are to use the average RTT seen in a single ack */ 9351 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 9352 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 9353 } else { 9354 #ifdef INVARIANTS 9355 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 9356 #endif 9357 return; 9358 } 9359 if (rtt == 0) 9360 rtt = 1; 9361 if (rack->rc_gp_rtt_set == 0) { 9362 /* 9363 * With no RTT we have to accept 9364 * even one we are not confident of. 9365 */ 9366 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 9367 rack->rc_gp_rtt_set = 1; 9368 } else if (rack->r_ctl.rack_rs.confidence) { 9369 /* update the running gp srtt */ 9370 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 9371 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 9372 } 9373 if (rack->r_ctl.rack_rs.confidence) { 9374 /* 9375 * record the low and high for highly buffered path computation, 9376 * we only do this if we are confident (not a retransmission). 9377 */ 9378 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 9379 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9380 } 9381 if (rack->rc_highly_buffered == 0) { 9382 /* 9383 * Currently once we declare a path has 9384 * highly buffered there is no going 9385 * back, which may be a problem... 9386 */ 9387 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 9388 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 9389 rack->r_ctl.rc_highest_us_rtt, 9390 rack->r_ctl.rc_lowest_us_rtt, 9391 RACK_RTTS_SEEHBP); 9392 rack->rc_highly_buffered = 1; 9393 } 9394 } 9395 } 9396 if ((rack->r_ctl.rack_rs.confidence) || 9397 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 9398 /* 9399 * If we are highly confident of it <or> it was 9400 * never retransmitted we accept it as the last us_rtt. 9401 */ 9402 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9403 /* The lowest rtt can be set if its was not retransmited */ 9404 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 9405 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9406 if (rack->r_ctl.rc_lowest_us_rtt == 0) 9407 rack->r_ctl.rc_lowest_us_rtt = 1; 9408 } 9409 } 9410 rack = (struct tcp_rack *)tp->t_fb_ptr; 9411 if (tp->t_srtt != 0) { 9412 /* 9413 * We keep a simple srtt in microseconds, like our rtt 9414 * measurement. We don't need to do any tricks with shifting 9415 * etc. Instead we just add in 1/8th of the new measurement 9416 * and subtract out 1/8 of the old srtt. We do the same with 9417 * the variance after finding the absolute value of the 9418 * difference between this sample and the current srtt. 9419 */ 9420 delta = tp->t_srtt - rtt; 9421 /* Take off 1/8th of the current sRTT */ 9422 tp->t_srtt -= (tp->t_srtt >> 3); 9423 /* Add in 1/8th of the new RTT just measured */ 9424 tp->t_srtt += (rtt >> 3); 9425 if (tp->t_srtt <= 0) 9426 tp->t_srtt = 1; 9427 /* Now lets make the absolute value of the variance */ 9428 if (delta < 0) 9429 delta = -delta; 9430 /* Subtract out 1/8th */ 9431 tp->t_rttvar -= (tp->t_rttvar >> 3); 9432 /* Add in 1/8th of the new variance we just saw */ 9433 tp->t_rttvar += (delta >> 3); 9434 if (tp->t_rttvar <= 0) 9435 tp->t_rttvar = 1; 9436 } else { 9437 /* 9438 * No rtt measurement yet - use the unsmoothed rtt. Set the 9439 * variance to half the rtt (so our first retransmit happens 9440 * at 3*rtt). 9441 */ 9442 tp->t_srtt = rtt; 9443 tp->t_rttvar = rtt >> 1; 9444 } 9445 rack->rc_srtt_measure_made = 1; 9446 KMOD_TCPSTAT_INC(tcps_rttupdated); 9447 if (tp->t_rttupdated < UCHAR_MAX) 9448 tp->t_rttupdated++; 9449 #ifdef STATS 9450 if (rack_stats_gets_ms_rtt == 0) { 9451 /* Send in the microsecond rtt used for rxt timeout purposes */ 9452 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 9453 } else if (rack_stats_gets_ms_rtt == 1) { 9454 /* Send in the millisecond rtt used for rxt timeout purposes */ 9455 int32_t ms_rtt; 9456 9457 /* Round up */ 9458 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 9459 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 9460 } else if (rack_stats_gets_ms_rtt == 2) { 9461 /* Send in the millisecond rtt has close to the path RTT as we can get */ 9462 int32_t ms_rtt; 9463 9464 /* Round up */ 9465 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 9466 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 9467 } else { 9468 /* Send in the microsecond rtt has close to the path RTT as we can get */ 9469 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 9470 } 9471 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 9472 #endif 9473 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 9474 /* 9475 * the retransmit should happen at rtt + 4 * rttvar. Because of the 9476 * way we do the smoothing, srtt and rttvar will each average +1/2 9477 * tick of bias. When we compute the retransmit timer, we want 1/2 9478 * tick of rounding and 1 extra tick because of +-1/2 tick 9479 * uncertainty in the firing of the timer. The bias will give us 9480 * exactly the 1.5 tick we need. But, because the bias is 9481 * statistical, we have to test that we don't drop below the minimum 9482 * feasible timer (which is 2 ticks). 9483 */ 9484 tp->t_rxtshift = 0; 9485 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9486 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 9487 rack_log_rtt_sample(rack, rtt); 9488 tp->t_softerror = 0; 9489 } 9490 9491 9492 static void 9493 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 9494 { 9495 /* 9496 * Apply to filter the inbound us-rtt at us_cts. 9497 */ 9498 uint32_t old_rtt; 9499 9500 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 9501 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 9502 us_rtt, us_cts); 9503 if (old_rtt > us_rtt) { 9504 /* We just hit a new lower rtt time */ 9505 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 9506 __LINE__, RACK_RTTS_NEWRTT); 9507 /* 9508 * Only count it if its lower than what we saw within our 9509 * calculated range. 9510 */ 9511 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 9512 if (rack_probertt_lower_within && 9513 rack->rc_gp_dyn_mul && 9514 (rack->use_fixed_rate == 0) && 9515 (rack->rc_always_pace)) { 9516 /* 9517 * We are seeing a new lower rtt very close 9518 * to the time that we would have entered probe-rtt. 9519 * This is probably due to the fact that a peer flow 9520 * has entered probe-rtt. Lets go in now too. 9521 */ 9522 uint32_t val; 9523 9524 val = rack_probertt_lower_within * rack_time_between_probertt; 9525 val /= 100; 9526 if ((rack->in_probe_rtt == 0) && 9527 (rack->rc_skip_timely == 0) && 9528 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 9529 rack_enter_probertt(rack, us_cts); 9530 } 9531 } 9532 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 9533 } 9534 } 9535 } 9536 9537 static int 9538 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 9539 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 9540 { 9541 uint32_t us_rtt; 9542 int32_t i, all; 9543 uint32_t t, len_acked; 9544 9545 if ((rsm->r_flags & RACK_ACKED) || 9546 (rsm->r_flags & RACK_WAS_ACKED)) 9547 /* Already done */ 9548 return (0); 9549 if (rsm->r_no_rtt_allowed) { 9550 /* Not allowed */ 9551 return (0); 9552 } 9553 if (ack_type == CUM_ACKED) { 9554 if (SEQ_GT(th_ack, rsm->r_end)) { 9555 len_acked = rsm->r_end - rsm->r_start; 9556 all = 1; 9557 } else { 9558 len_acked = th_ack - rsm->r_start; 9559 all = 0; 9560 } 9561 } else { 9562 len_acked = rsm->r_end - rsm->r_start; 9563 all = 0; 9564 } 9565 if (rsm->r_rtr_cnt == 1) { 9566 9567 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9568 if ((int)t <= 0) 9569 t = 1; 9570 if (!tp->t_rttlow || tp->t_rttlow > t) 9571 tp->t_rttlow = t; 9572 if (!rack->r_ctl.rc_rack_min_rtt || 9573 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9574 rack->r_ctl.rc_rack_min_rtt = t; 9575 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9576 rack->r_ctl.rc_rack_min_rtt = 1; 9577 } 9578 } 9579 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 9580 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9581 else 9582 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9583 if (us_rtt == 0) 9584 us_rtt = 1; 9585 if (CC_ALGO(tp)->rttsample != NULL) { 9586 /* Kick the RTT to the CC */ 9587 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9588 } 9589 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 9590 if (ack_type == SACKED) { 9591 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 9592 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 9593 } else { 9594 /* 9595 * We need to setup what our confidence 9596 * is in this ack. 9597 * 9598 * If the rsm was app limited and it is 9599 * less than a mss in length (the end 9600 * of the send) then we have a gap. If we 9601 * were app limited but say we were sending 9602 * multiple MSS's then we are more confident 9603 * int it. 9604 * 9605 * When we are not app-limited then we see if 9606 * the rsm is being included in the current 9607 * measurement, we tell this by the app_limited_needs_set 9608 * flag. 9609 * 9610 * Note that being cwnd blocked is not applimited 9611 * as well as the pacing delay between packets which 9612 * are sending only 1 or 2 MSS's also will show up 9613 * in the RTT. We probably need to examine this algorithm 9614 * a bit more and enhance it to account for the delay 9615 * between rsm's. We could do that by saving off the 9616 * pacing delay of each rsm (in an rsm) and then 9617 * factoring that in somehow though for now I am 9618 * not sure how :) 9619 */ 9620 int calc_conf = 0; 9621 9622 if (rsm->r_flags & RACK_APP_LIMITED) { 9623 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 9624 calc_conf = 0; 9625 else 9626 calc_conf = 1; 9627 } else if (rack->app_limited_needs_set == 0) { 9628 calc_conf = 1; 9629 } else { 9630 calc_conf = 0; 9631 } 9632 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 9633 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 9634 calc_conf, rsm, rsm->r_rtr_cnt); 9635 } 9636 if ((rsm->r_flags & RACK_TLP) && 9637 (!IN_FASTRECOVERY(tp->t_flags))) { 9638 /* Segment was a TLP and our retrans matched */ 9639 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 9640 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 9641 } 9642 } 9643 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9644 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9645 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9646 /* New more recent rack_tmit_time */ 9647 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9648 if (rack->r_ctl.rc_rack_tmit_time == 0) 9649 rack->r_ctl.rc_rack_tmit_time = 1; 9650 rack->rc_rack_rtt = t; 9651 } 9652 return (1); 9653 } 9654 /* 9655 * We clear the soft/rxtshift since we got an ack. 9656 * There is no assurance we will call the commit() function 9657 * so we need to clear these to avoid incorrect handling. 9658 */ 9659 tp->t_rxtshift = 0; 9660 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9661 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 9662 tp->t_softerror = 0; 9663 if (to && (to->to_flags & TOF_TS) && 9664 (ack_type == CUM_ACKED) && 9665 (to->to_tsecr) && 9666 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 9667 /* 9668 * Now which timestamp does it match? In this block the ACK 9669 * must be coming from a previous transmission. 9670 */ 9671 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9672 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 9673 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9674 if ((int)t <= 0) 9675 t = 1; 9676 if (CC_ALGO(tp)->rttsample != NULL) { 9677 /* 9678 * Kick the RTT to the CC, here 9679 * we lie a bit in that we know the 9680 * retransmission is correct even though 9681 * we retransmitted. This is because 9682 * we match the timestamps. 9683 */ 9684 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 9685 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 9686 else 9687 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 9688 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9689 } 9690 if ((i + 1) < rsm->r_rtr_cnt) { 9691 /* 9692 * The peer ack'd from our previous 9693 * transmission. We have a spurious 9694 * retransmission and thus we dont 9695 * want to update our rack_rtt. 9696 * 9697 * Hmm should there be a CC revert here? 9698 * 9699 */ 9700 return (0); 9701 } 9702 if (!tp->t_rttlow || tp->t_rttlow > t) 9703 tp->t_rttlow = t; 9704 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9705 rack->r_ctl.rc_rack_min_rtt = t; 9706 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9707 rack->r_ctl.rc_rack_min_rtt = 1; 9708 } 9709 } 9710 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9711 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9712 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9713 /* New more recent rack_tmit_time */ 9714 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9715 if (rack->r_ctl.rc_rack_tmit_time == 0) 9716 rack->r_ctl.rc_rack_tmit_time = 1; 9717 rack->rc_rack_rtt = t; 9718 } 9719 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9720 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9721 rsm->r_rtr_cnt); 9722 return (1); 9723 } 9724 } 9725 /* If we are logging log out the sendmap */ 9726 if (tcp_bblogging_on(rack->rc_tp)) { 9727 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9728 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9729 } 9730 } 9731 goto ts_not_found; 9732 } else { 9733 /* 9734 * Ok its a SACK block that we retransmitted. or a windows 9735 * machine without timestamps. We can tell nothing from the 9736 * time-stamp since its not there or the time the peer last 9737 * received a segment that moved forward its cum-ack point. 9738 */ 9739 ts_not_found: 9740 i = rsm->r_rtr_cnt - 1; 9741 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9742 if ((int)t <= 0) 9743 t = 1; 9744 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9745 /* 9746 * We retransmitted and the ack came back in less 9747 * than the smallest rtt we have observed. We most 9748 * likely did an improper retransmit as outlined in 9749 * 6.2 Step 2 point 2 in the rack-draft so we 9750 * don't want to update our rack_rtt. We in 9751 * theory (in future) might want to think about reverting our 9752 * cwnd state but we won't for now. 9753 */ 9754 return (0); 9755 } else if (rack->r_ctl.rc_rack_min_rtt) { 9756 /* 9757 * We retransmitted it and the retransmit did the 9758 * job. 9759 */ 9760 if (!rack->r_ctl.rc_rack_min_rtt || 9761 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9762 rack->r_ctl.rc_rack_min_rtt = t; 9763 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9764 rack->r_ctl.rc_rack_min_rtt = 1; 9765 } 9766 } 9767 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9768 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9769 (uint32_t)rsm->r_tim_lastsent[i]))) { 9770 /* New more recent rack_tmit_time */ 9771 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9772 if (rack->r_ctl.rc_rack_tmit_time == 0) 9773 rack->r_ctl.rc_rack_tmit_time = 1; 9774 rack->rc_rack_rtt = t; 9775 } 9776 return (1); 9777 } 9778 } 9779 return (0); 9780 } 9781 9782 /* 9783 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9784 */ 9785 static void 9786 rack_log_sack_passed(struct tcpcb *tp, 9787 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9788 { 9789 struct rack_sendmap *nrsm; 9790 uint32_t thresh; 9791 9792 /* Get our rxt threshold for lost consideration */ 9793 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9794 /* Now start looking at rsm's */ 9795 nrsm = rsm; 9796 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9797 rack_head, r_tnext) { 9798 if (nrsm == rsm) { 9799 /* Skip original segment he is acked */ 9800 continue; 9801 } 9802 if (nrsm->r_flags & RACK_ACKED) { 9803 /* 9804 * Skip ack'd segments, though we 9805 * should not see these, since tmap 9806 * should not have ack'd segments. 9807 */ 9808 continue; 9809 } 9810 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9811 /* 9812 * If the peer dropped the rwnd on 9813 * these then we don't worry about them. 9814 */ 9815 continue; 9816 } 9817 /* Check lost state */ 9818 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9819 uint32_t exp; 9820 9821 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9822 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9823 /* We consider it lost */ 9824 nrsm->r_flags |= RACK_WAS_LOST; 9825 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9826 } 9827 } 9828 if (nrsm->r_flags & RACK_SACK_PASSED) { 9829 /* 9830 * We found one that is already marked 9831 * passed, we have been here before and 9832 * so all others below this are marked. 9833 */ 9834 break; 9835 } 9836 nrsm->r_flags |= RACK_SACK_PASSED; 9837 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9838 } 9839 } 9840 9841 static void 9842 rack_need_set_test(struct tcpcb *tp, 9843 struct tcp_rack *rack, 9844 struct rack_sendmap *rsm, 9845 tcp_seq th_ack, 9846 int line, 9847 int use_which) 9848 { 9849 struct rack_sendmap *s_rsm; 9850 9851 if ((tp->t_flags & TF_GPUTINPROG) && 9852 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9853 /* 9854 * We were app limited, and this ack 9855 * butts up or goes beyond the point where we want 9856 * to start our next measurement. We need 9857 * to record the new gput_ts as here and 9858 * possibly update the start sequence. 9859 */ 9860 uint32_t seq, ts; 9861 9862 if (rsm->r_rtr_cnt > 1) { 9863 /* 9864 * This is a retransmit, can we 9865 * really make any assessment at this 9866 * point? We are not really sure of 9867 * the timestamp, is it this or the 9868 * previous transmission? 9869 * 9870 * Lets wait for something better that 9871 * is not retransmitted. 9872 */ 9873 return; 9874 } 9875 seq = tp->gput_seq; 9876 ts = tp->gput_ts; 9877 rack->app_limited_needs_set = 0; 9878 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9879 /* Do we start at a new end? */ 9880 if ((use_which == RACK_USE_BEG) && 9881 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9882 /* 9883 * When we get an ACK that just eats 9884 * up some of the rsm, we set RACK_USE_BEG 9885 * since whats at r_start (i.e. th_ack) 9886 * is left unacked and thats where the 9887 * measurement now starts. 9888 */ 9889 tp->gput_seq = rsm->r_start; 9890 } 9891 if ((use_which == RACK_USE_END) && 9892 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9893 /* 9894 * We use the end when the cumack 9895 * is moving forward and completely 9896 * deleting the rsm passed so basically 9897 * r_end holds th_ack. 9898 * 9899 * For SACK's we also want to use the end 9900 * since this piece just got sacked and 9901 * we want to target anything after that 9902 * in our measurement. 9903 */ 9904 tp->gput_seq = rsm->r_end; 9905 } 9906 if (use_which == RACK_USE_END_OR_THACK) { 9907 /* 9908 * special case for ack moving forward, 9909 * not a sack, we need to move all the 9910 * way up to where this ack cum-ack moves 9911 * to. 9912 */ 9913 if (SEQ_GT(th_ack, rsm->r_end)) 9914 tp->gput_seq = th_ack; 9915 else 9916 tp->gput_seq = rsm->r_end; 9917 } 9918 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9919 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9920 else 9921 s_rsm = NULL; 9922 /* 9923 * Pick up the correct send time if we can the rsm passed in 9924 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9925 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9926 * find a different seq i.e. the next send up. 9927 * 9928 * If that has not been sent, s_rsm will be NULL and we must 9929 * arrange it so this function will get called again by setting 9930 * app_limited_needs_set. 9931 */ 9932 if (s_rsm) 9933 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9934 else { 9935 /* If we hit here we have to have *not* sent tp->gput_seq */ 9936 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9937 /* Set it up so we will go through here again */ 9938 rack->app_limited_needs_set = 1; 9939 } 9940 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9941 /* 9942 * We moved beyond this guy's range, re-calculate 9943 * the new end point. 9944 */ 9945 if (rack->rc_gp_filled == 0) { 9946 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9947 } else { 9948 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9949 } 9950 } 9951 /* 9952 * We are moving the goal post, we may be able to clear the 9953 * measure_saw_probe_rtt flag. 9954 */ 9955 if ((rack->in_probe_rtt == 0) && 9956 (rack->measure_saw_probe_rtt) && 9957 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9958 rack->measure_saw_probe_rtt = 0; 9959 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9960 seq, tp->gput_seq, 9961 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9962 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9963 5, line, NULL, 0); 9964 if (rack->rc_gp_filled && 9965 ((tp->gput_ack - tp->gput_seq) < 9966 max(rc_init_window(rack), (MIN_GP_WIN * 9967 ctf_fixed_maxseg(tp))))) { 9968 uint32_t ideal_amount; 9969 9970 ideal_amount = rack_get_measure_window(tp, rack); 9971 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9972 /* 9973 * There is no sense of continuing this measurement 9974 * because its too small to gain us anything we 9975 * trust. Skip it and that way we can start a new 9976 * measurement quicker. 9977 */ 9978 tp->t_flags &= ~TF_GPUTINPROG; 9979 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9980 0, 0, 9981 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9982 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9983 6, __LINE__, NULL, 0); 9984 } else { 9985 /* 9986 * Reset the window further out. 9987 */ 9988 tp->gput_ack = tp->gput_seq + ideal_amount; 9989 } 9990 } 9991 rack_tend_gp_marks(tp, rack); 9992 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9993 } 9994 } 9995 9996 static inline int 9997 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9998 { 9999 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 10000 /* Behind our TLP definition or right at */ 10001 return (0); 10002 } 10003 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 10004 /* The start is beyond or right at our end of TLP definition */ 10005 return (0); 10006 } 10007 /* It has to be a sub-part of the original TLP recorded */ 10008 return (1); 10009 } 10010 10011 static uint32_t 10012 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 10013 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 10014 uint32_t segsiz) 10015 { 10016 uint32_t start, end, changed = 0; 10017 struct rack_sendmap stack_map; 10018 struct rack_sendmap *rsm, *nrsm, *prev, *next; 10019 int insret __diagused; 10020 int32_t used_ref = 1; 10021 int can_use_hookery = 0; 10022 10023 start = sack->start; 10024 end = sack->end; 10025 rsm = *prsm; 10026 10027 do_rest_ofb: 10028 if ((rsm == NULL) || 10029 (SEQ_LT(end, rsm->r_start)) || 10030 (SEQ_GEQ(start, rsm->r_end)) || 10031 (SEQ_LT(start, rsm->r_start))) { 10032 /* 10033 * We are not in the right spot, 10034 * find the correct spot in the tree. 10035 */ 10036 used_ref = 0; 10037 rsm = tqhash_find(rack->r_ctl.tqh, start); 10038 } 10039 if (rsm == NULL) { 10040 /* TSNH */ 10041 goto out; 10042 } 10043 /* Ok we have an ACK for some piece of this rsm */ 10044 if (rsm->r_start != start) { 10045 if ((rsm->r_flags & RACK_ACKED) == 0) { 10046 /* 10047 * Before any splitting or hookery is 10048 * done is it a TLP of interest i.e. rxt? 10049 */ 10050 if ((rsm->r_flags & RACK_TLP) && 10051 (rsm->r_rtr_cnt > 1)) { 10052 /* 10053 * We are splitting a rxt TLP, check 10054 * if we need to save off the start/end 10055 */ 10056 if (rack->rc_last_tlp_acked_set && 10057 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10058 /* 10059 * We already turned this on since we are inside 10060 * the previous one was a partially sack now we 10061 * are getting another one (maybe all of it). 10062 * 10063 */ 10064 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10065 /* 10066 * Lets make sure we have all of it though. 10067 */ 10068 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10069 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10070 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10071 rack->r_ctl.last_tlp_acked_end); 10072 } 10073 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10074 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10075 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10076 rack->r_ctl.last_tlp_acked_end); 10077 } 10078 } else { 10079 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10080 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10081 rack->rc_last_tlp_past_cumack = 0; 10082 rack->rc_last_tlp_acked_set = 1; 10083 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10084 } 10085 } 10086 /** 10087 * Need to split this in two pieces the before and after, 10088 * the before remains in the map, the after must be 10089 * added. In other words we have: 10090 * rsm |--------------| 10091 * sackblk |-------> 10092 * rsm will become 10093 * rsm |---| 10094 * and nrsm will be the sacked piece 10095 * nrsm |----------| 10096 * 10097 * But before we start down that path lets 10098 * see if the sack spans over on top of 10099 * the next guy and it is already sacked. 10100 * 10101 */ 10102 /* 10103 * Hookery can only be used if the two entries 10104 * are in the same bucket and neither one of 10105 * them staddle the bucket line. 10106 */ 10107 next = tqhash_next(rack->r_ctl.tqh, rsm); 10108 if (next && 10109 (rsm->bindex == next->bindex) && 10110 ((rsm->r_flags & RACK_STRADDLE) == 0) && 10111 ((next->r_flags & RACK_STRADDLE) == 0) && 10112 ((rsm->r_flags & RACK_IS_PCM) == 0) && 10113 ((next->r_flags & RACK_IS_PCM) == 0) && 10114 (rsm->r_flags & RACK_IN_GP_WIN) && 10115 (next->r_flags & RACK_IN_GP_WIN)) 10116 can_use_hookery = 1; 10117 else 10118 can_use_hookery = 0; 10119 if (next && can_use_hookery && 10120 (next->r_flags & RACK_ACKED) && 10121 SEQ_GEQ(end, next->r_start)) { 10122 /** 10123 * So the next one is already acked, and 10124 * we can thus by hookery use our stack_map 10125 * to reflect the piece being sacked and 10126 * then adjust the two tree entries moving 10127 * the start and ends around. So we start like: 10128 * rsm |------------| (not-acked) 10129 * next |-----------| (acked) 10130 * sackblk |--------> 10131 * We want to end like so: 10132 * rsm |------| (not-acked) 10133 * next |-----------------| (acked) 10134 * nrsm |-----| 10135 * Where nrsm is a temporary stack piece we 10136 * use to update all the gizmos. 10137 */ 10138 /* Copy up our fudge block */ 10139 nrsm = &stack_map; 10140 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10141 /* Now adjust our tree blocks */ 10142 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 10143 next->r_start = start; 10144 rsm->r_flags |= RACK_SHUFFLED; 10145 next->r_flags |= RACK_SHUFFLED; 10146 /* Now we must adjust back where next->m is */ 10147 rack_setup_offset_for_rsm(rack, rsm, next); 10148 /* 10149 * Which timestamp do we keep? It is rather 10150 * important in GP measurements to have the 10151 * accurate end of the send window. 10152 * 10153 * We keep the largest value, which is the newest 10154 * send. We do this in case a segment that is 10155 * joined together and not part of a GP estimate 10156 * later gets expanded into the GP estimate. 10157 * 10158 * We prohibit the merging of unlike kinds i.e. 10159 * all pieces that are in the GP estimate can be 10160 * merged and all pieces that are not in a GP estimate 10161 * can be merged, but not disimilar pieces. Combine 10162 * this with taking the highest here and we should 10163 * be ok unless of course the client reneges. Then 10164 * all bets are off. 10165 */ 10166 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 10167 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 10168 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 10169 /* 10170 * And we must keep the newest ack arrival time. 10171 */ 10172 if (next->r_ack_arrival < 10173 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10174 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10175 10176 10177 /* We don't need to adjust rsm, it did not change */ 10178 /* Clear out the dup ack count of the remainder */ 10179 rsm->r_dupack = 0; 10180 rsm->r_just_ret = 0; 10181 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10182 /* Now lets make sure our fudge block is right */ 10183 nrsm->r_start = start; 10184 /* Now lets update all the stats and such */ 10185 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10186 if (rack->app_limited_needs_set) 10187 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10188 changed += (nrsm->r_end - nrsm->r_start); 10189 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10190 if (rsm->r_flags & RACK_WAS_LOST) { 10191 int my_chg; 10192 10193 my_chg = (nrsm->r_end - nrsm->r_start); 10194 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10195 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10196 if (my_chg <= rack->r_ctl.rc_considered_lost) 10197 rack->r_ctl.rc_considered_lost -= my_chg; 10198 else 10199 rack->r_ctl.rc_considered_lost = 0; 10200 } 10201 if (nrsm->r_flags & RACK_SACK_PASSED) { 10202 rack->r_ctl.rc_reorder_ts = cts; 10203 if (rack->r_ctl.rc_reorder_ts == 0) 10204 rack->r_ctl.rc_reorder_ts = 1; 10205 } 10206 /* 10207 * Now we want to go up from rsm (the 10208 * one left un-acked) to the next one 10209 * in the tmap. We do this so when 10210 * we walk backwards we include marking 10211 * sack-passed on rsm (The one passed in 10212 * is skipped since it is generally called 10213 * on something sacked before removing it 10214 * from the tmap). 10215 */ 10216 if (rsm->r_in_tmap) { 10217 nrsm = TAILQ_NEXT(rsm, r_tnext); 10218 /* 10219 * Now that we have the next 10220 * one walk backwards from there. 10221 */ 10222 if (nrsm && nrsm->r_in_tmap) 10223 rack_log_sack_passed(tp, rack, nrsm, cts); 10224 } 10225 /* Now are we done? */ 10226 if (SEQ_LT(end, next->r_end) || 10227 (end == next->r_end)) { 10228 /* Done with block */ 10229 goto out; 10230 } 10231 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 10232 counter_u64_add(rack_sack_used_next_merge, 1); 10233 /* Postion for the next block */ 10234 start = next->r_end; 10235 rsm = tqhash_next(rack->r_ctl.tqh, next); 10236 if (rsm == NULL) 10237 goto out; 10238 } else { 10239 /** 10240 * We can't use any hookery here, so we 10241 * need to split the map. We enter like 10242 * so: 10243 * rsm |--------| 10244 * sackblk |-----> 10245 * We will add the new block nrsm and 10246 * that will be the new portion, and then 10247 * fall through after reseting rsm. So we 10248 * split and look like this: 10249 * rsm |----| 10250 * sackblk |-----> 10251 * nrsm |---| 10252 * We then fall through reseting 10253 * rsm to nrsm, so the next block 10254 * picks it up. 10255 */ 10256 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10257 if (nrsm == NULL) { 10258 /* 10259 * failed XXXrrs what can we do but loose the sack 10260 * info? 10261 */ 10262 goto out; 10263 } 10264 counter_u64_add(rack_sack_splits, 1); 10265 rack_clone_rsm(rack, nrsm, rsm, start); 10266 rsm->r_just_ret = 0; 10267 #ifndef INVARIANTS 10268 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10269 #else 10270 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10271 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 10272 nrsm, insret, rack, rsm); 10273 } 10274 #endif 10275 if (rsm->r_in_tmap) { 10276 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10277 nrsm->r_in_tmap = 1; 10278 } 10279 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 10280 rsm->r_flags &= (~RACK_HAS_FIN); 10281 /* Position us to point to the new nrsm that starts the sack blk */ 10282 rsm = nrsm; 10283 } 10284 } else { 10285 /* Already sacked this piece */ 10286 counter_u64_add(rack_sack_skipped_acked, 1); 10287 if (end == rsm->r_end) { 10288 /* Done with block */ 10289 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10290 goto out; 10291 } else if (SEQ_LT(end, rsm->r_end)) { 10292 /* A partial sack to a already sacked block */ 10293 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10294 goto out; 10295 } else { 10296 /* 10297 * The end goes beyond this guy 10298 * reposition the start to the 10299 * next block. 10300 */ 10301 start = rsm->r_end; 10302 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10303 if (rsm == NULL) 10304 goto out; 10305 } 10306 } 10307 } 10308 if (SEQ_GEQ(end, rsm->r_end)) { 10309 /** 10310 * The end of this block is either beyond this guy or right 10311 * at this guy. I.e.: 10312 * rsm --- |-----| 10313 * end |-----| 10314 * <or> 10315 * end |---------| 10316 */ 10317 if ((rsm->r_flags & RACK_ACKED) == 0) { 10318 /* 10319 * Is it a TLP of interest? 10320 */ 10321 if ((rsm->r_flags & RACK_TLP) && 10322 (rsm->r_rtr_cnt > 1)) { 10323 /* 10324 * We are splitting a rxt TLP, check 10325 * if we need to save off the start/end 10326 */ 10327 if (rack->rc_last_tlp_acked_set && 10328 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10329 /* 10330 * We already turned this on since we are inside 10331 * the previous one was a partially sack now we 10332 * are getting another one (maybe all of it). 10333 */ 10334 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10335 /* 10336 * Lets make sure we have all of it though. 10337 */ 10338 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10339 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10340 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10341 rack->r_ctl.last_tlp_acked_end); 10342 } 10343 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10344 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10345 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10346 rack->r_ctl.last_tlp_acked_end); 10347 } 10348 } else { 10349 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10350 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10351 rack->rc_last_tlp_past_cumack = 0; 10352 rack->rc_last_tlp_acked_set = 1; 10353 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10354 } 10355 } 10356 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10357 changed += (rsm->r_end - rsm->r_start); 10358 /* You get a count for acking a whole segment or more */ 10359 if (rsm->r_flags & RACK_WAS_LOST) { 10360 int my_chg; 10361 10362 my_chg = (rsm->r_end - rsm->r_start); 10363 rsm->r_flags &= ~RACK_WAS_LOST; 10364 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10365 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10366 if (my_chg <= rack->r_ctl.rc_considered_lost) 10367 rack->r_ctl.rc_considered_lost -= my_chg; 10368 else 10369 rack->r_ctl.rc_considered_lost = 0; 10370 } 10371 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10372 if (rsm->r_in_tmap) /* should be true */ 10373 rack_log_sack_passed(tp, rack, rsm, cts); 10374 /* Is Reordering occuring? */ 10375 if (rsm->r_flags & RACK_SACK_PASSED) { 10376 rsm->r_flags &= ~RACK_SACK_PASSED; 10377 rack->r_ctl.rc_reorder_ts = cts; 10378 if (rack->r_ctl.rc_reorder_ts == 0) 10379 rack->r_ctl.rc_reorder_ts = 1; 10380 } 10381 if (rack->app_limited_needs_set) 10382 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10383 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10384 rsm->r_flags |= RACK_ACKED; 10385 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10386 if (rsm->r_in_tmap) { 10387 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10388 rsm->r_in_tmap = 0; 10389 } 10390 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 10391 } else { 10392 counter_u64_add(rack_sack_skipped_acked, 1); 10393 } 10394 if (end == rsm->r_end) { 10395 /* This block only - done, setup for next */ 10396 goto out; 10397 } 10398 /* 10399 * There is more not coverend by this rsm move on 10400 * to the next block in the tail queue hash table. 10401 */ 10402 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 10403 start = rsm->r_end; 10404 rsm = nrsm; 10405 if (rsm == NULL) 10406 goto out; 10407 goto do_rest_ofb; 10408 } 10409 /** 10410 * The end of this sack block is smaller than 10411 * our rsm i.e.: 10412 * rsm --- |-----| 10413 * end |--| 10414 */ 10415 if ((rsm->r_flags & RACK_ACKED) == 0) { 10416 /* 10417 * Is it a TLP of interest? 10418 */ 10419 if ((rsm->r_flags & RACK_TLP) && 10420 (rsm->r_rtr_cnt > 1)) { 10421 /* 10422 * We are splitting a rxt TLP, check 10423 * if we need to save off the start/end 10424 */ 10425 if (rack->rc_last_tlp_acked_set && 10426 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10427 /* 10428 * We already turned this on since we are inside 10429 * the previous one was a partially sack now we 10430 * are getting another one (maybe all of it). 10431 */ 10432 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10433 /* 10434 * Lets make sure we have all of it though. 10435 */ 10436 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10437 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10438 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10439 rack->r_ctl.last_tlp_acked_end); 10440 } 10441 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10442 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10443 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10444 rack->r_ctl.last_tlp_acked_end); 10445 } 10446 } else { 10447 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10448 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10449 rack->rc_last_tlp_past_cumack = 0; 10450 rack->rc_last_tlp_acked_set = 1; 10451 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10452 } 10453 } 10454 /* 10455 * Hookery can only be used if the two entries 10456 * are in the same bucket and neither one of 10457 * them staddle the bucket line. 10458 */ 10459 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10460 if (prev && 10461 (rsm->bindex == prev->bindex) && 10462 ((rsm->r_flags & RACK_STRADDLE) == 0) && 10463 ((prev->r_flags & RACK_STRADDLE) == 0) && 10464 ((rsm->r_flags & RACK_IS_PCM) == 0) && 10465 ((prev->r_flags & RACK_IS_PCM) == 0) && 10466 (rsm->r_flags & RACK_IN_GP_WIN) && 10467 (prev->r_flags & RACK_IN_GP_WIN)) 10468 can_use_hookery = 1; 10469 else 10470 can_use_hookery = 0; 10471 if (prev && can_use_hookery && 10472 (prev->r_flags & RACK_ACKED)) { 10473 /** 10474 * Goal, we want the right remainder of rsm to shrink 10475 * in place and span from (rsm->r_start = end) to rsm->r_end. 10476 * We want to expand prev to go all the way 10477 * to prev->r_end <- end. 10478 * so in the tree we have before: 10479 * prev |--------| (acked) 10480 * rsm |-------| (non-acked) 10481 * sackblk |-| 10482 * We churn it so we end up with 10483 * prev |----------| (acked) 10484 * rsm |-----| (non-acked) 10485 * nrsm |-| (temporary) 10486 * 10487 * Note if either prev/rsm is a TLP we don't 10488 * do this. 10489 */ 10490 nrsm = &stack_map; 10491 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10492 tqhash_update_end(rack->r_ctl.tqh, prev, end); 10493 rsm->r_start = end; 10494 rsm->r_flags |= RACK_SHUFFLED; 10495 prev->r_flags |= RACK_SHUFFLED; 10496 /* Now adjust nrsm (stack copy) to be 10497 * the one that is the small 10498 * piece that was "sacked". 10499 */ 10500 nrsm->r_end = end; 10501 rsm->r_dupack = 0; 10502 /* 10503 * Which timestamp do we keep? It is rather 10504 * important in GP measurements to have the 10505 * accurate end of the send window. 10506 * 10507 * We keep the largest value, which is the newest 10508 * send. We do this in case a segment that is 10509 * joined together and not part of a GP estimate 10510 * later gets expanded into the GP estimate. 10511 * 10512 * We prohibit the merging of unlike kinds i.e. 10513 * all pieces that are in the GP estimate can be 10514 * merged and all pieces that are not in a GP estimate 10515 * can be merged, but not disimilar pieces. Combine 10516 * this with taking the highest here and we should 10517 * be ok unless of course the client reneges. Then 10518 * all bets are off. 10519 */ 10520 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 10521 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 10522 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 10523 } 10524 /* 10525 * And we must keep the newest ack arrival time. 10526 */ 10527 10528 if(prev->r_ack_arrival < 10529 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10530 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10531 10532 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10533 /* 10534 * Now that the rsm has had its start moved forward 10535 * lets go ahead and get its new place in the world. 10536 */ 10537 rack_setup_offset_for_rsm(rack, prev, rsm); 10538 /* 10539 * Now nrsm is our new little piece 10540 * that is acked (which was merged 10541 * to prev). Update the rtt and changed 10542 * based on that. Also check for reordering. 10543 */ 10544 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10545 if (rack->app_limited_needs_set) 10546 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10547 changed += (nrsm->r_end - nrsm->r_start); 10548 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10549 if (rsm->r_flags & RACK_WAS_LOST) { 10550 int my_chg; 10551 10552 my_chg = (nrsm->r_end - nrsm->r_start); 10553 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10554 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10555 if (my_chg <= rack->r_ctl.rc_considered_lost) 10556 rack->r_ctl.rc_considered_lost -= my_chg; 10557 else 10558 rack->r_ctl.rc_considered_lost = 0; 10559 } 10560 if (nrsm->r_flags & RACK_SACK_PASSED) { 10561 rack->r_ctl.rc_reorder_ts = cts; 10562 if (rack->r_ctl.rc_reorder_ts == 0) 10563 rack->r_ctl.rc_reorder_ts = 1; 10564 } 10565 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 10566 rsm = prev; 10567 counter_u64_add(rack_sack_used_prev_merge, 1); 10568 } else { 10569 /** 10570 * This is the case where our previous 10571 * block is not acked either, so we must 10572 * split the block in two. 10573 */ 10574 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10575 if (nrsm == NULL) { 10576 /* failed rrs what can we do but loose the sack info? */ 10577 goto out; 10578 } 10579 if ((rsm->r_flags & RACK_TLP) && 10580 (rsm->r_rtr_cnt > 1)) { 10581 /* 10582 * We are splitting a rxt TLP, check 10583 * if we need to save off the start/end 10584 */ 10585 if (rack->rc_last_tlp_acked_set && 10586 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10587 /* 10588 * We already turned this on since this block is inside 10589 * the previous one was a partially sack now we 10590 * are getting another one (maybe all of it). 10591 */ 10592 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10593 /* 10594 * Lets make sure we have all of it though. 10595 */ 10596 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10597 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10598 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10599 rack->r_ctl.last_tlp_acked_end); 10600 } 10601 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10602 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10603 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10604 rack->r_ctl.last_tlp_acked_end); 10605 } 10606 } else { 10607 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10608 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10609 rack->rc_last_tlp_acked_set = 1; 10610 rack->rc_last_tlp_past_cumack = 0; 10611 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10612 } 10613 } 10614 /** 10615 * In this case nrsm becomes 10616 * nrsm->r_start = end; 10617 * nrsm->r_end = rsm->r_end; 10618 * which is un-acked. 10619 * <and> 10620 * rsm->r_end = nrsm->r_start; 10621 * i.e. the remaining un-acked 10622 * piece is left on the left 10623 * hand side. 10624 * 10625 * So we start like this 10626 * rsm |----------| (not acked) 10627 * sackblk |---| 10628 * build it so we have 10629 * rsm |---| (acked) 10630 * nrsm |------| (not acked) 10631 */ 10632 counter_u64_add(rack_sack_splits, 1); 10633 rack_clone_rsm(rack, nrsm, rsm, end); 10634 rsm->r_flags &= (~RACK_HAS_FIN); 10635 rsm->r_just_ret = 0; 10636 #ifndef INVARIANTS 10637 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10638 #else 10639 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10640 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 10641 nrsm, insret, rack, rsm); 10642 } 10643 #endif 10644 if (rsm->r_in_tmap) { 10645 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10646 nrsm->r_in_tmap = 1; 10647 } 10648 nrsm->r_dupack = 0; 10649 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 10650 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10651 changed += (rsm->r_end - rsm->r_start); 10652 if (rsm->r_flags & RACK_WAS_LOST) { 10653 int my_chg; 10654 10655 my_chg = (rsm->r_end - rsm->r_start); 10656 rsm->r_flags &= ~RACK_WAS_LOST; 10657 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10658 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10659 if (my_chg <= rack->r_ctl.rc_considered_lost) 10660 rack->r_ctl.rc_considered_lost -= my_chg; 10661 else 10662 rack->r_ctl.rc_considered_lost = 0; 10663 } 10664 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10665 10666 if (rsm->r_in_tmap) /* should be true */ 10667 rack_log_sack_passed(tp, rack, rsm, cts); 10668 /* Is Reordering occuring? */ 10669 if (rsm->r_flags & RACK_SACK_PASSED) { 10670 rsm->r_flags &= ~RACK_SACK_PASSED; 10671 rack->r_ctl.rc_reorder_ts = cts; 10672 if (rack->r_ctl.rc_reorder_ts == 0) 10673 rack->r_ctl.rc_reorder_ts = 1; 10674 } 10675 if (rack->app_limited_needs_set) 10676 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10677 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10678 rsm->r_flags |= RACK_ACKED; 10679 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10680 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 10681 if (rsm->r_in_tmap) { 10682 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10683 rsm->r_in_tmap = 0; 10684 } 10685 } 10686 } else if (start != end){ 10687 /* 10688 * The block was already acked. 10689 */ 10690 counter_u64_add(rack_sack_skipped_acked, 1); 10691 } 10692 out: 10693 if (rsm && 10694 ((rsm->r_flags & RACK_TLP) == 0) && 10695 (rsm->r_flags & RACK_ACKED)) { 10696 /* 10697 * Now can we merge where we worked 10698 * with either the previous or 10699 * next block? 10700 */ 10701 next = tqhash_next(rack->r_ctl.tqh, rsm); 10702 while (next) { 10703 if (next->r_flags & RACK_TLP) 10704 break; 10705 /* Only allow merges between ones in or out of GP window */ 10706 if ((next->r_flags & RACK_IN_GP_WIN) && 10707 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10708 break; 10709 } 10710 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10711 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10712 break; 10713 } 10714 if (rsm->bindex != next->bindex) 10715 break; 10716 if (rsm->r_flags & RACK_STRADDLE) 10717 break; 10718 if (rsm->r_flags & RACK_IS_PCM) 10719 break; 10720 if (next->r_flags & RACK_STRADDLE) 10721 break; 10722 if (next->r_flags & RACK_IS_PCM) 10723 break; 10724 if (next->r_flags & RACK_ACKED) { 10725 /* yep this and next can be merged */ 10726 rsm = rack_merge_rsm(rack, rsm, next); 10727 next = tqhash_next(rack->r_ctl.tqh, rsm); 10728 } else 10729 break; 10730 } 10731 /* Now what about the previous? */ 10732 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10733 while (prev) { 10734 if (prev->r_flags & RACK_TLP) 10735 break; 10736 /* Only allow merges between ones in or out of GP window */ 10737 if ((prev->r_flags & RACK_IN_GP_WIN) && 10738 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10739 break; 10740 } 10741 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10742 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10743 break; 10744 } 10745 if (rsm->bindex != prev->bindex) 10746 break; 10747 if (rsm->r_flags & RACK_STRADDLE) 10748 break; 10749 if (rsm->r_flags & RACK_IS_PCM) 10750 break; 10751 if (prev->r_flags & RACK_STRADDLE) 10752 break; 10753 if (prev->r_flags & RACK_IS_PCM) 10754 break; 10755 if (prev->r_flags & RACK_ACKED) { 10756 /* yep the previous and this can be merged */ 10757 rsm = rack_merge_rsm(rack, prev, rsm); 10758 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10759 } else 10760 break; 10761 } 10762 } 10763 if (used_ref == 0) { 10764 counter_u64_add(rack_sack_proc_all, 1); 10765 } else { 10766 counter_u64_add(rack_sack_proc_short, 1); 10767 } 10768 /* Save off the next one for quick reference. */ 10769 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10770 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10771 if (IN_RECOVERY(tp->t_flags)) { 10772 rack->r_ctl.bytes_acked_in_recovery += changed; 10773 } 10774 return (changed); 10775 } 10776 10777 static void inline 10778 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10779 { 10780 struct rack_sendmap *tmap; 10781 10782 tmap = NULL; 10783 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10784 /* Its no longer sacked, mark it so */ 10785 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10786 #ifdef INVARIANTS 10787 if (rsm->r_in_tmap) { 10788 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10789 rack, rsm, rsm->r_flags); 10790 } 10791 #endif 10792 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10793 /* Rebuild it into our tmap */ 10794 if (tmap == NULL) { 10795 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10796 tmap = rsm; 10797 } else { 10798 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10799 tmap = rsm; 10800 } 10801 tmap->r_in_tmap = 1; 10802 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10803 } 10804 /* 10805 * Now lets possibly clear the sack filter so we start 10806 * recognizing sacks that cover this area. 10807 */ 10808 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10809 10810 } 10811 10812 10813 static void inline 10814 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10815 { 10816 /* 10817 * We look at advancing the end send time for our GP 10818 * measurement tracking only as the cumulative acknowledgment 10819 * moves forward. You might wonder about this, why not 10820 * at every transmission or retransmission within the 10821 * GP window update the rc_gp_cumack_ts? Well its rather 10822 * nuanced but basically the GP window *may* expand (as 10823 * it does below) or worse and harder to track it may shrink. 10824 * 10825 * This last makes it impossible to track at the time of 10826 * the send, since you may set forward your rc_gp_cumack_ts 10827 * when you send, because that send *is* in your currently 10828 * "guessed" window, but then it shrinks. Now which was 10829 * the send time of the last bytes in the window, by the 10830 * time you ask that question that part of the sendmap 10831 * is freed. So you don't know and you will have too 10832 * long of send window. Instead by updating the time 10833 * marker only when the cumack advances this assures us 10834 * that we will have only the sends in the window of our 10835 * GP measurement. 10836 * 10837 * Another complication from this is the 10838 * merging of sendmap entries. During SACK processing this 10839 * can happen to conserve the sendmap size. That breaks 10840 * everything down in tracking the send window of the GP 10841 * estimate. So to prevent that and keep it working with 10842 * a tiny bit more limited merging, we only allow like 10843 * types to be merged. I.e. if two sends are in the GP window 10844 * then its ok to merge them together. If two sends are not 10845 * in the GP window its ok to merge them together too. Though 10846 * one send in and one send out cannot be merged. We combine 10847 * this with never allowing the shrinking of the GP window when 10848 * we are in recovery so that we can properly calculate the 10849 * sending times. 10850 * 10851 * This all of course seems complicated, because it is.. :) 10852 * 10853 * The cum-ack is being advanced upon the sendmap. 10854 * If we are not doing a GP estimate don't 10855 * proceed. 10856 */ 10857 uint64_t ts; 10858 10859 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10860 return; 10861 /* 10862 * If this sendmap entry is going 10863 * beyond the measurement window we had picked, 10864 * expand the measurement window by that much. 10865 */ 10866 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10867 tp->gput_ack = rsm->r_end; 10868 } 10869 /* 10870 * If we have not setup a ack, then we 10871 * have no idea if the newly acked pieces 10872 * will be "in our seq measurement range". If 10873 * it is when we clear the app_limited_needs_set 10874 * flag the timestamp will be updated. 10875 */ 10876 if (rack->app_limited_needs_set) 10877 return; 10878 /* 10879 * Finally, we grab out the latest timestamp 10880 * that this packet was sent and then see 10881 * if: 10882 * a) The packet touches are newly defined GP range. 10883 * b) The time is greater than (newer) than the 10884 * one we currently have. If so we update 10885 * our sending end time window. 10886 * 10887 * Note we *do not* do this at send time. The reason 10888 * is that if you do you *may* pick up a newer timestamp 10889 * for a range you are not going to measure. We project 10890 * out how far and then sometimes modify that to be 10891 * smaller. If that occurs then you will have a send 10892 * that does not belong to the range included. 10893 */ 10894 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10895 rack->r_ctl.rc_gp_cumack_ts) 10896 return; 10897 if (rack_in_gp_window(tp, rsm)) { 10898 rack->r_ctl.rc_gp_cumack_ts = ts; 10899 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10900 __LINE__, from, rsm); 10901 } 10902 } 10903 10904 static void 10905 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10906 { 10907 struct rack_sendmap *rsm; 10908 /* 10909 * The ACK point is advancing to th_ack, we must drop off 10910 * the packets in the rack log and calculate any eligble 10911 * RTT's. 10912 */ 10913 10914 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 10915 /* 10916 * If we have some sack blocks in the filter 10917 * lets prune them out by calling sfb with no blocks. 10918 */ 10919 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 10920 } 10921 if (SEQ_GT(th_ack, tp->snd_una)) { 10922 /* Clear any app ack remembered settings */ 10923 rack->r_ctl.cleared_app_ack = 0; 10924 } 10925 rack->r_wanted_output = 1; 10926 if (SEQ_GT(th_ack, tp->snd_una)) 10927 rack->r_ctl.last_cumack_advance = acktime; 10928 10929 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10930 if ((rack->rc_last_tlp_acked_set == 1)&& 10931 (rack->rc_last_tlp_past_cumack == 1) && 10932 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10933 /* 10934 * We have reached the point where our last rack 10935 * tlp retransmit sequence is ahead of the cum-ack. 10936 * This can only happen when the cum-ack moves all 10937 * the way around (its been a full 2^^31+1 bytes 10938 * or more since we sent a retransmitted TLP). Lets 10939 * turn off the valid flag since its not really valid. 10940 * 10941 * Note since sack's also turn on this event we have 10942 * a complication, we have to wait to age it out until 10943 * the cum-ack is by the TLP before checking which is 10944 * what the next else clause does. 10945 */ 10946 rack_log_dsack_event(rack, 9, __LINE__, 10947 rack->r_ctl.last_tlp_acked_start, 10948 rack->r_ctl.last_tlp_acked_end); 10949 rack->rc_last_tlp_acked_set = 0; 10950 rack->rc_last_tlp_past_cumack = 0; 10951 } else if ((rack->rc_last_tlp_acked_set == 1) && 10952 (rack->rc_last_tlp_past_cumack == 0) && 10953 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10954 /* 10955 * It is safe to start aging TLP's out. 10956 */ 10957 rack->rc_last_tlp_past_cumack = 1; 10958 } 10959 /* We do the same for the tlp send seq as well */ 10960 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10961 (rack->rc_last_sent_tlp_past_cumack == 1) && 10962 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10963 rack_log_dsack_event(rack, 9, __LINE__, 10964 rack->r_ctl.last_sent_tlp_seq, 10965 (rack->r_ctl.last_sent_tlp_seq + 10966 rack->r_ctl.last_sent_tlp_len)); 10967 rack->rc_last_sent_tlp_seq_valid = 0; 10968 rack->rc_last_sent_tlp_past_cumack = 0; 10969 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10970 (rack->rc_last_sent_tlp_past_cumack == 0) && 10971 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10972 /* 10973 * It is safe to start aging TLP's send. 10974 */ 10975 rack->rc_last_sent_tlp_past_cumack = 1; 10976 } 10977 more: 10978 rsm = tqhash_min(rack->r_ctl.tqh); 10979 if (rsm == NULL) { 10980 if ((th_ack - 1) == tp->iss) { 10981 /* 10982 * For the SYN incoming case we will not 10983 * have called tcp_output for the sending of 10984 * the SYN, so there will be no map. All 10985 * other cases should probably be a panic. 10986 */ 10987 return; 10988 } 10989 if (tp->t_flags & TF_SENTFIN) { 10990 /* if we sent a FIN we often will not have map */ 10991 return; 10992 } 10993 #ifdef INVARIANTS 10994 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 10995 tp, 10996 tp->t_state, th_ack, rack, 10997 tp->snd_una, tp->snd_max); 10998 #endif 10999 return; 11000 } 11001 if (SEQ_LT(th_ack, rsm->r_start)) { 11002 /* Huh map is missing this */ 11003 #ifdef INVARIANTS 11004 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 11005 rsm->r_start, 11006 th_ack, tp->t_state, rack->r_state); 11007 #endif 11008 return; 11009 } 11010 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 11011 11012 /* Now was it a retransmitted TLP? */ 11013 if ((rsm->r_flags & RACK_TLP) && 11014 (rsm->r_rtr_cnt > 1)) { 11015 /* 11016 * Yes, this rsm was a TLP and retransmitted, remember that 11017 * since if a DSACK comes back on this we don't want 11018 * to think of it as a reordered segment. This may 11019 * get updated again with possibly even other TLPs 11020 * in flight, but thats ok. Only when we don't send 11021 * a retransmitted TLP for 1/2 the sequences space 11022 * will it get turned off (above). 11023 */ 11024 if (rack->rc_last_tlp_acked_set && 11025 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 11026 /* 11027 * We already turned this on since the end matches, 11028 * the previous one was a partially ack now we 11029 * are getting another one (maybe all of it). 11030 */ 11031 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 11032 /* 11033 * Lets make sure we have all of it though. 11034 */ 11035 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 11036 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 11037 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 11038 rack->r_ctl.last_tlp_acked_end); 11039 } 11040 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 11041 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 11042 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 11043 rack->r_ctl.last_tlp_acked_end); 11044 } 11045 } else { 11046 rack->rc_last_tlp_past_cumack = 1; 11047 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 11048 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 11049 rack->rc_last_tlp_acked_set = 1; 11050 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 11051 } 11052 } 11053 /* Now do we consume the whole thing? */ 11054 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 11055 if (SEQ_GEQ(th_ack, rsm->r_end)) { 11056 /* Its all consumed. */ 11057 uint32_t left; 11058 uint8_t newly_acked; 11059 11060 if (rsm->r_flags & RACK_WAS_LOST) { 11061 /* 11062 * This can happen when we marked it as lost 11063 * and yet before retransmitting we get an ack 11064 * which can happen due to reordering. 11065 */ 11066 rsm->r_flags &= ~RACK_WAS_LOST; 11067 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 11068 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 11069 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 11070 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 11071 else 11072 rack->r_ctl.rc_considered_lost = 0; 11073 } 11074 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 11075 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 11076 rsm->r_rtr_bytes = 0; 11077 /* 11078 * Record the time of highest cumack sent if its in our measurement 11079 * window and possibly bump out the end. 11080 */ 11081 rack_rsm_sender_update(rack, tp, rsm, 4); 11082 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 11083 if (rsm->r_in_tmap) { 11084 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 11085 rsm->r_in_tmap = 0; 11086 } 11087 newly_acked = 1; 11088 if (((rsm->r_flags & RACK_ACKED) == 0) && 11089 (IN_RECOVERY(tp->t_flags))) { 11090 rack->r_ctl.bytes_acked_in_recovery += (rsm->r_end - rsm->r_start); 11091 } 11092 if (rsm->r_flags & RACK_ACKED) { 11093 /* 11094 * It was acked on the scoreboard -- remove 11095 * it from total 11096 */ 11097 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 11098 newly_acked = 0; 11099 } else if (rsm->r_flags & RACK_SACK_PASSED) { 11100 /* 11101 * There are segments ACKED on the 11102 * scoreboard further up. We are seeing 11103 * reordering. 11104 */ 11105 rsm->r_flags &= ~RACK_SACK_PASSED; 11106 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 11107 rsm->r_flags |= RACK_ACKED; 11108 rack->r_ctl.rc_reorder_ts = cts; 11109 if (rack->r_ctl.rc_reorder_ts == 0) 11110 rack->r_ctl.rc_reorder_ts = 1; 11111 if (rack->r_ent_rec_ns) { 11112 /* 11113 * We have sent no more, and we saw an sack 11114 * then ack arrive. 11115 */ 11116 rack->r_might_revert = 1; 11117 } 11118 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 11119 } else { 11120 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 11121 } 11122 if ((rsm->r_flags & RACK_TO_REXT) && 11123 (tp->t_flags & TF_RCVD_TSTMP) && 11124 (to->to_flags & TOF_TS) && 11125 (to->to_tsecr != 0) && 11126 (tp->t_flags & TF_PREVVALID)) { 11127 /* 11128 * We can use the timestamp to see 11129 * if this retransmission was from the 11130 * first transmit. If so we made a mistake. 11131 */ 11132 tp->t_flags &= ~TF_PREVVALID; 11133 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 11134 /* The first transmit is what this ack is for */ 11135 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 11136 } 11137 } 11138 left = th_ack - rsm->r_end; 11139 if (rack->app_limited_needs_set && newly_acked) 11140 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 11141 /* Free back to zone */ 11142 rack_free(rack, rsm); 11143 if (left) { 11144 goto more; 11145 } 11146 /* Check for reneging */ 11147 rsm = tqhash_min(rack->r_ctl.tqh); 11148 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 11149 /* 11150 * The peer has moved snd_una up to 11151 * the edge of this send, i.e. one 11152 * that it had previously acked. The only 11153 * way that can be true if the peer threw 11154 * away data (space issues) that it had 11155 * previously sacked (else it would have 11156 * given us snd_una up to (rsm->r_end). 11157 * We need to undo the acked markings here. 11158 * 11159 * Note we have to look to make sure th_ack is 11160 * our rsm->r_start in case we get an old ack 11161 * where th_ack is behind snd_una. 11162 */ 11163 rack_peer_reneges(rack, rsm, th_ack); 11164 } 11165 return; 11166 } 11167 if (rsm->r_flags & RACK_ACKED) { 11168 /* 11169 * It was acked on the scoreboard -- remove it from 11170 * total for the part being cum-acked. 11171 */ 11172 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 11173 } else { 11174 if (((rsm->r_flags & RACK_ACKED) == 0) && 11175 (IN_RECOVERY(tp->t_flags))) { 11176 rack->r_ctl.bytes_acked_in_recovery += (th_ack - rsm->r_start); 11177 } 11178 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 11179 } 11180 /* And what about the lost flag? */ 11181 if (rsm->r_flags & RACK_WAS_LOST) { 11182 /* 11183 * This can happen when we marked it as lost 11184 * and yet before retransmitting we get an ack 11185 * which can happen due to reordering. In this 11186 * case its only a partial ack of the send. 11187 */ 11188 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), 11189 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); 11190 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) 11191 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; 11192 else 11193 rack->r_ctl.rc_considered_lost = 0; 11194 } 11195 /* 11196 * Clear the dup ack count for 11197 * the piece that remains. 11198 */ 11199 rsm->r_dupack = 0; 11200 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 11201 if (rsm->r_rtr_bytes) { 11202 /* 11203 * It was retransmitted adjust the 11204 * sack holes for what was acked. 11205 */ 11206 int ack_am; 11207 11208 ack_am = (th_ack - rsm->r_start); 11209 if (ack_am >= rsm->r_rtr_bytes) { 11210 rack->r_ctl.rc_holes_rxt -= ack_am; 11211 rsm->r_rtr_bytes -= ack_am; 11212 } 11213 } 11214 /* 11215 * Update where the piece starts and record 11216 * the time of send of highest cumack sent if 11217 * its in our GP range. 11218 */ 11219 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 11220 /* Now we need to move our offset forward too */ 11221 if (rsm->m && 11222 ((rsm->orig_m_len != rsm->m->m_len) || 11223 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 11224 /* Fix up the orig_m_len and possibly the mbuf offset */ 11225 rack_adjust_orig_mlen(rsm); 11226 } 11227 rsm->soff += (th_ack - rsm->r_start); 11228 rack_rsm_sender_update(rack, tp, rsm, 5); 11229 /* The trim will move th_ack into r_start for us */ 11230 tqhash_trim(rack->r_ctl.tqh, th_ack); 11231 /* Now do we need to move the mbuf fwd too? */ 11232 { 11233 struct mbuf *m; 11234 uint32_t soff; 11235 11236 m = rsm->m; 11237 soff = rsm->soff; 11238 if (m) { 11239 while (soff >= m->m_len) { 11240 soff -= m->m_len; 11241 KASSERT((m->m_next != NULL), 11242 (" rsm:%p off:%u soff:%u m:%p", 11243 rsm, rsm->soff, soff, m)); 11244 m = m->m_next; 11245 if (m == NULL) { 11246 /* 11247 * This is a fall-back that prevents a panic. In reality 11248 * we should be able to walk the mbuf's and find our place. 11249 * At this point snd_una has not been updated with the sbcut() yet 11250 * but tqhash_trim did update rsm->r_start so the offset calcuation 11251 * should work fine. This is undesirable since we will take cache 11252 * hits to access the socket buffer. And even more puzzling is that 11253 * it happens occasionally. It should not :( 11254 */ 11255 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 11256 (rsm->r_start - tp->snd_una), 11257 &soff); 11258 break; 11259 } 11260 } 11261 /* 11262 * Now save in our updated values. 11263 */ 11264 rsm->m = m; 11265 rsm->soff = soff; 11266 rsm->orig_m_len = rsm->m->m_len; 11267 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11268 } 11269 } 11270 if (rack->app_limited_needs_set && 11271 SEQ_GEQ(th_ack, tp->gput_seq)) 11272 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 11273 } 11274 11275 static void 11276 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 11277 { 11278 struct rack_sendmap *rsm; 11279 int sack_pass_fnd = 0; 11280 11281 if (rack->r_might_revert) { 11282 /* 11283 * Ok we have reordering, have not sent anything, we 11284 * might want to revert the congestion state if nothing 11285 * further has SACK_PASSED on it. Lets check. 11286 * 11287 * We also get here when we have DSACKs come in for 11288 * all the data that we FR'd. Note that a rxt or tlp 11289 * timer clears this from happening. 11290 */ 11291 11292 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 11293 if (rsm->r_flags & RACK_SACK_PASSED) { 11294 sack_pass_fnd = 1; 11295 break; 11296 } 11297 } 11298 if (sack_pass_fnd == 0) { 11299 /* 11300 * We went into recovery 11301 * incorrectly due to reordering! 11302 */ 11303 int orig_cwnd; 11304 11305 rack->r_ent_rec_ns = 0; 11306 orig_cwnd = tp->snd_cwnd; 11307 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 11308 tp->snd_recover = tp->snd_una; 11309 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 11310 if (IN_RECOVERY(tp->t_flags)) { 11311 rack_exit_recovery(tp, rack, 3); 11312 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 11313 /* 11314 * We were in recovery, had an RTO 11315 * and then re-entered recovery (more sack's arrived) 11316 * and we have properly recorded the old ssthresh from 11317 * the first recovery. We want to be able to slow-start 11318 * back to this level. The ssthresh from the timeout 11319 * and then back into recovery will end up most likely 11320 * to be min(cwnd=1mss, 2mss). Which makes it basically 11321 * so we get no slow-start after our RTO. 11322 */ 11323 rack->rto_from_rec = 0; 11324 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 11325 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 11326 } 11327 } 11328 rack->r_ctl.bytes_acked_in_recovery = 0; 11329 rack->r_ctl.time_entered_recovery = 0; 11330 } 11331 rack->r_might_revert = 0; 11332 } 11333 } 11334 11335 11336 static int 11337 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 11338 { 11339 11340 uint32_t am, l_end; 11341 int was_tlp = 0; 11342 11343 if (SEQ_GT(end, start)) 11344 am = end - start; 11345 else 11346 am = 0; 11347 if ((rack->rc_last_tlp_acked_set ) && 11348 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 11349 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 11350 /* 11351 * The DSACK is because of a TLP which we don't 11352 * do anything with the reordering window over since 11353 * it was not reordering that caused the DSACK but 11354 * our previous retransmit TLP. 11355 */ 11356 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11357 was_tlp = 1; 11358 goto skip_dsack_round; 11359 } 11360 if (rack->rc_last_sent_tlp_seq_valid) { 11361 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 11362 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 11363 (SEQ_LEQ(end, l_end))) { 11364 /* 11365 * This dsack is from the last sent TLP, ignore it 11366 * for reordering purposes. 11367 */ 11368 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11369 was_tlp = 1; 11370 goto skip_dsack_round; 11371 } 11372 } 11373 if (rack->rc_dsack_round_seen == 0) { 11374 rack->rc_dsack_round_seen = 1; 11375 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 11376 rack->r_ctl.num_dsack++; 11377 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 11378 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 11379 } 11380 skip_dsack_round: 11381 /* 11382 * We keep track of how many DSACK blocks we get 11383 * after a recovery incident. 11384 */ 11385 rack->r_ctl.dsack_byte_cnt += am; 11386 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 11387 rack->r_ctl.retran_during_recovery && 11388 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 11389 /* 11390 * False recovery most likely culprit is reordering. If 11391 * nothing else is missing we need to revert. 11392 */ 11393 rack->r_might_revert = 1; 11394 rack_handle_might_revert(rack->rc_tp, rack); 11395 rack->r_might_revert = 0; 11396 rack->r_ctl.retran_during_recovery = 0; 11397 rack->r_ctl.dsack_byte_cnt = 0; 11398 } 11399 return (was_tlp); 11400 } 11401 11402 static uint32_t 11403 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 11404 { 11405 return (((tp->snd_max - snd_una) - 11406 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 11407 } 11408 11409 static int32_t 11410 rack_compute_pipe(struct tcpcb *tp) 11411 { 11412 return ((int32_t)do_rack_compute_pipe(tp, 11413 (struct tcp_rack *)tp->t_fb_ptr, 11414 tp->snd_una)); 11415 } 11416 11417 static void 11418 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 11419 { 11420 /* Deal with changed and PRR here (in recovery only) */ 11421 uint32_t pipe, snd_una; 11422 11423 rack->r_ctl.rc_prr_delivered += changed; 11424 11425 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 11426 /* 11427 * It is all outstanding, we are application limited 11428 * and thus we don't need more room to send anything. 11429 * Note we use tp->snd_una here and not th_ack because 11430 * the data as yet not been cut from the sb. 11431 */ 11432 rack->r_ctl.rc_prr_sndcnt = 0; 11433 return; 11434 } 11435 /* Compute prr_sndcnt */ 11436 if (SEQ_GT(tp->snd_una, th_ack)) { 11437 snd_una = tp->snd_una; 11438 } else { 11439 snd_una = th_ack; 11440 } 11441 pipe = do_rack_compute_pipe(tp, rack, snd_una); 11442 if (pipe > tp->snd_ssthresh) { 11443 long sndcnt; 11444 11445 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 11446 if (rack->r_ctl.rc_prr_recovery_fs > 0) 11447 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 11448 else { 11449 rack->r_ctl.rc_prr_sndcnt = 0; 11450 rack_log_to_prr(rack, 9, 0, __LINE__); 11451 sndcnt = 0; 11452 } 11453 sndcnt++; 11454 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 11455 sndcnt -= rack->r_ctl.rc_prr_out; 11456 else 11457 sndcnt = 0; 11458 rack->r_ctl.rc_prr_sndcnt = sndcnt; 11459 rack_log_to_prr(rack, 10, 0, __LINE__); 11460 } else { 11461 uint32_t limit; 11462 11463 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 11464 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 11465 else 11466 limit = 0; 11467 if (changed > limit) 11468 limit = changed; 11469 limit += ctf_fixed_maxseg(tp); 11470 if (tp->snd_ssthresh > pipe) { 11471 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 11472 rack_log_to_prr(rack, 11, 0, __LINE__); 11473 } else { 11474 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 11475 rack_log_to_prr(rack, 12, 0, __LINE__); 11476 } 11477 } 11478 } 11479 11480 static void 11481 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 11482 int *dsack_seen, int *sacks_seen) 11483 { 11484 uint32_t changed; 11485 struct tcp_rack *rack; 11486 struct rack_sendmap *rsm; 11487 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 11488 register uint32_t th_ack; 11489 int32_t i, j, k, num_sack_blks = 0; 11490 uint32_t cts, acked, ack_point; 11491 int loop_start = 0; 11492 uint32_t tsused; 11493 uint32_t segsiz; 11494 11495 11496 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11497 if (tcp_get_flags(th) & TH_RST) { 11498 /* We don't log resets */ 11499 return; 11500 } 11501 rack = (struct tcp_rack *)tp->t_fb_ptr; 11502 cts = tcp_get_usecs(NULL); 11503 rsm = tqhash_min(rack->r_ctl.tqh); 11504 changed = 0; 11505 th_ack = th->th_ack; 11506 segsiz = ctf_fixed_maxseg(rack->rc_tp); 11507 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 11508 /* 11509 * You only get credit for 11510 * MSS and greater (and you get extra 11511 * credit for larger cum-ack moves). 11512 */ 11513 int ac; 11514 11515 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 11516 counter_u64_add(rack_ack_total, ac); 11517 } 11518 if (SEQ_GT(th_ack, tp->snd_una)) { 11519 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 11520 tp->t_acktime = ticks; 11521 } 11522 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 11523 changed = th_ack - rsm->r_start; 11524 if (changed) { 11525 rack_process_to_cumack(tp, rack, th_ack, cts, to, 11526 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 11527 } 11528 if ((to->to_flags & TOF_SACK) == 0) { 11529 /* We are done nothing left and no sack. */ 11530 rack_handle_might_revert(tp, rack); 11531 /* 11532 * For cases where we struck a dup-ack 11533 * with no SACK, add to the changes so 11534 * PRR will work right. 11535 */ 11536 if (dup_ack_struck && (changed == 0)) { 11537 changed += ctf_fixed_maxseg(rack->rc_tp); 11538 } 11539 goto out; 11540 } 11541 /* Sack block processing */ 11542 if (SEQ_GT(th_ack, tp->snd_una)) 11543 ack_point = th_ack; 11544 else 11545 ack_point = tp->snd_una; 11546 for (i = 0; i < to->to_nsacks; i++) { 11547 bcopy((to->to_sacks + i * TCPOLEN_SACK), 11548 &sack, sizeof(sack)); 11549 sack.start = ntohl(sack.start); 11550 sack.end = ntohl(sack.end); 11551 if (SEQ_GT(sack.end, sack.start) && 11552 SEQ_GT(sack.start, ack_point) && 11553 SEQ_LT(sack.start, tp->snd_max) && 11554 SEQ_GT(sack.end, ack_point) && 11555 SEQ_LEQ(sack.end, tp->snd_max)) { 11556 sack_blocks[num_sack_blks] = sack; 11557 num_sack_blks++; 11558 } else if (SEQ_LEQ(sack.start, th_ack) && 11559 SEQ_LEQ(sack.end, th_ack)) { 11560 int was_tlp; 11561 11562 if (dsack_seen != NULL) 11563 *dsack_seen = 1; 11564 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 11565 /* 11566 * Its a D-SACK block. 11567 */ 11568 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 11569 } 11570 } 11571 if (rack->rc_dsack_round_seen) { 11572 /* Is the dsack roound over? */ 11573 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 11574 /* Yes it is */ 11575 rack->rc_dsack_round_seen = 0; 11576 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 11577 } 11578 } 11579 /* 11580 * Sort the SACK blocks so we can update the rack scoreboard with 11581 * just one pass. 11582 */ 11583 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 11584 num_sack_blks, th->th_ack); 11585 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 11586 if (sacks_seen != NULL) 11587 *sacks_seen = num_sack_blks; 11588 if (num_sack_blks == 0) { 11589 /* Nothing to sack, but we need to update counts */ 11590 goto out_with_totals; 11591 } 11592 /* Its a sack of some sort */ 11593 if (num_sack_blks < 2) { 11594 /* Only one, we don't need to sort */ 11595 goto do_sack_work; 11596 } 11597 /* Sort the sacks */ 11598 for (i = 0; i < num_sack_blks; i++) { 11599 for (j = i + 1; j < num_sack_blks; j++) { 11600 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 11601 sack = sack_blocks[i]; 11602 sack_blocks[i] = sack_blocks[j]; 11603 sack_blocks[j] = sack; 11604 } 11605 } 11606 } 11607 /* 11608 * Now are any of the sack block ends the same (yes some 11609 * implementations send these)? 11610 */ 11611 again: 11612 if (num_sack_blks == 0) 11613 goto out_with_totals; 11614 if (num_sack_blks > 1) { 11615 for (i = 0; i < num_sack_blks; i++) { 11616 for (j = i + 1; j < num_sack_blks; j++) { 11617 if (sack_blocks[i].end == sack_blocks[j].end) { 11618 /* 11619 * Ok these two have the same end we 11620 * want the smallest end and then 11621 * throw away the larger and start 11622 * again. 11623 */ 11624 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 11625 /* 11626 * The second block covers 11627 * more area use that 11628 */ 11629 sack_blocks[i].start = sack_blocks[j].start; 11630 } 11631 /* 11632 * Now collapse out the dup-sack and 11633 * lower the count 11634 */ 11635 for (k = (j + 1); k < num_sack_blks; k++) { 11636 sack_blocks[j].start = sack_blocks[k].start; 11637 sack_blocks[j].end = sack_blocks[k].end; 11638 j++; 11639 } 11640 num_sack_blks--; 11641 goto again; 11642 } 11643 } 11644 } 11645 } 11646 do_sack_work: 11647 /* 11648 * First lets look to see if 11649 * we have retransmitted and 11650 * can use the transmit next? 11651 */ 11652 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11653 if (rsm && 11654 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 11655 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 11656 /* 11657 * We probably did the FR and the next 11658 * SACK in continues as we would expect. 11659 */ 11660 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); 11661 if (acked) { 11662 rack->r_wanted_output = 1; 11663 changed += acked; 11664 } 11665 if (num_sack_blks == 1) { 11666 /* 11667 * This is what we would expect from 11668 * a normal implementation to happen 11669 * after we have retransmitted the FR, 11670 * i.e the sack-filter pushes down 11671 * to 1 block and the next to be retransmitted 11672 * is the sequence in the sack block (has more 11673 * are acked). Count this as ACK'd data to boost 11674 * up the chances of recovering any false positives. 11675 */ 11676 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 11677 counter_u64_add(rack_express_sack, 1); 11678 goto out_with_totals; 11679 } else { 11680 /* 11681 * Start the loop through the 11682 * rest of blocks, past the first block. 11683 */ 11684 loop_start = 1; 11685 } 11686 } 11687 counter_u64_add(rack_sack_total, 1); 11688 rsm = rack->r_ctl.rc_sacklast; 11689 for (i = loop_start; i < num_sack_blks; i++) { 11690 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); 11691 if (acked) { 11692 rack->r_wanted_output = 1; 11693 changed += acked; 11694 } 11695 } 11696 out_with_totals: 11697 if (num_sack_blks > 1) { 11698 /* 11699 * You get an extra stroke if 11700 * you have more than one sack-blk, this 11701 * could be where we are skipping forward 11702 * and the sack-filter is still working, or 11703 * it could be an attacker constantly 11704 * moving us. 11705 */ 11706 counter_u64_add(rack_move_some, 1); 11707 } 11708 out: 11709 if (changed) { 11710 /* Something changed cancel the rack timer */ 11711 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11712 } 11713 tsused = tcp_get_usecs(NULL); 11714 rsm = tcp_rack_output(tp, rack, tsused); 11715 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11716 rsm && 11717 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11718 /* Enter recovery */ 11719 entered_recovery = 1; 11720 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 11721 /* 11722 * When we enter recovery we need to assure we send 11723 * one packet. 11724 */ 11725 if (rack->rack_no_prr == 0) { 11726 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11727 rack_log_to_prr(rack, 8, 0, __LINE__); 11728 } 11729 rack->r_timer_override = 1; 11730 rack->r_early = 0; 11731 rack->r_ctl.rc_agg_early = 0; 11732 } else if (IN_FASTRECOVERY(tp->t_flags) && 11733 rsm && 11734 (rack->r_rr_config == 3)) { 11735 /* 11736 * Assure we can output and we get no 11737 * remembered pace time except the retransmit. 11738 */ 11739 rack->r_timer_override = 1; 11740 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11741 rack->r_ctl.rc_resend = rsm; 11742 } 11743 if (IN_FASTRECOVERY(tp->t_flags) && 11744 (rack->rack_no_prr == 0) && 11745 (entered_recovery == 0)) { 11746 rack_update_prr(tp, rack, changed, th_ack); 11747 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11748 ((tcp_in_hpts(rack->rc_tp) == 0) && 11749 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11750 /* 11751 * If you are pacing output you don't want 11752 * to override. 11753 */ 11754 rack->r_early = 0; 11755 rack->r_ctl.rc_agg_early = 0; 11756 rack->r_timer_override = 1; 11757 } 11758 } 11759 } 11760 11761 static void 11762 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 11763 { 11764 struct rack_sendmap *rsm; 11765 11766 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11767 while (rsm) { 11768 /* 11769 * We need to skip anything already set 11770 * to be retransmitted. 11771 */ 11772 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11773 (rsm->r_flags & RACK_MUST_RXT)) { 11774 rsm = TAILQ_NEXT(rsm, r_tnext); 11775 continue; 11776 } 11777 break; 11778 } 11779 if (rsm && (rsm->r_dupack < 0xff)) { 11780 rsm->r_dupack++; 11781 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11782 struct timeval tv; 11783 uint32_t cts; 11784 /* 11785 * Here we see if we need to retransmit. For 11786 * a SACK type connection if enough time has passed 11787 * we will get a return of the rsm. For a non-sack 11788 * connection we will get the rsm returned if the 11789 * dupack value is 3 or more. 11790 */ 11791 cts = tcp_get_usecs(&tv); 11792 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11793 if (rack->r_ctl.rc_resend != NULL) { 11794 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11795 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11796 th_ack, __LINE__); 11797 } 11798 rack->r_wanted_output = 1; 11799 rack->r_timer_override = 1; 11800 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11801 } 11802 } else { 11803 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11804 } 11805 } 11806 } 11807 11808 static void 11809 rack_check_bottom_drag(struct tcpcb *tp, 11810 struct tcp_rack *rack, 11811 struct socket *so) 11812 { 11813 /* 11814 * So what is dragging bottom? 11815 * 11816 * Dragging bottom means you were under pacing and had a 11817 * delay in processing inbound acks waiting on our pacing 11818 * timer to expire. While you were waiting all of the acknowledgments 11819 * for the packets you sent have arrived. This means we are pacing 11820 * way underneath the bottleneck to the point where our Goodput 11821 * measurements stop working, since they require more than one 11822 * ack (usually at least 8 packets worth with multiple acks so we can 11823 * gauge the inter-ack times). If that occurs we have a real problem 11824 * since we are stuck in a hole that we can't get out of without 11825 * something speeding us up. 11826 * 11827 * We also check to see if we are widdling down to just one segment 11828 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 11829 * then we are adding the delayed ack interval into our measurments and 11830 * we need to speed up slightly. 11831 */ 11832 uint32_t segsiz, minseg; 11833 11834 segsiz = ctf_fixed_maxseg(tp); 11835 minseg = segsiz; 11836 if (tp->snd_max == tp->snd_una) { 11837 /* 11838 * We are doing dynamic pacing and we are way 11839 * under. Basically everything got acked while 11840 * we were still waiting on the pacer to expire. 11841 * 11842 * This means we need to boost the b/w in 11843 * addition to any earlier boosting of 11844 * the multiplier. 11845 */ 11846 uint64_t lt_bw; 11847 11848 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 11849 lt_bw = rack_get_lt_bw(rack); 11850 rack->rc_dragged_bottom = 1; 11851 rack_validate_multipliers_at_or_above100(rack); 11852 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11853 (rack->dis_lt_bw == 0) && 11854 (rack->use_lesser_lt_bw == 0) && 11855 (lt_bw > 0)) { 11856 /* 11857 * Lets use the long-term b/w we have 11858 * been getting as a base. 11859 */ 11860 if (rack->rc_gp_filled == 0) { 11861 if (lt_bw > ONE_POINT_TWO_MEG) { 11862 /* 11863 * If we have no measurement 11864 * don't let us set in more than 11865 * 1.2Mbps. If we are still too 11866 * low after pacing with this we 11867 * will hopefully have a max b/w 11868 * available to sanity check things. 11869 */ 11870 lt_bw = ONE_POINT_TWO_MEG; 11871 } 11872 rack->r_ctl.rc_rtt_diff = 0; 11873 rack->r_ctl.gp_bw = lt_bw; 11874 rack->rc_gp_filled = 1; 11875 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11876 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11877 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11878 } else if (lt_bw > rack->r_ctl.gp_bw) { 11879 rack->r_ctl.rc_rtt_diff = 0; 11880 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11881 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11882 rack->r_ctl.gp_bw = lt_bw; 11883 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11884 } else 11885 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11886 if ((rack->gp_ready == 0) && 11887 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11888 /* We have enough measurements now */ 11889 rack->gp_ready = 1; 11890 if (rack->dgp_on || 11891 rack->rack_hibeta) 11892 rack_set_cc_pacing(rack); 11893 if (rack->defer_options) 11894 rack_apply_deferred_options(rack); 11895 } 11896 } else { 11897 /* 11898 * zero rtt possibly?, settle for just an old increase. 11899 */ 11900 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11901 } 11902 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11903 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11904 minseg)) && 11905 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11906 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11907 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11908 (segsiz * rack_req_segs))) { 11909 /* 11910 * We are doing dynamic GP pacing and 11911 * we have everything except 1MSS or less 11912 * bytes left out. We are still pacing away. 11913 * And there is data that could be sent, This 11914 * means we are inserting delayed ack time in 11915 * our measurements because we are pacing too slow. 11916 */ 11917 rack_validate_multipliers_at_or_above100(rack); 11918 rack->rc_dragged_bottom = 1; 11919 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11920 } 11921 } 11922 11923 #ifdef TCP_REQUEST_TRK 11924 static void 11925 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11926 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11927 { 11928 int do_log; 11929 11930 do_log = tcp_bblogging_on(rack->rc_tp); 11931 if (do_log == 0) { 11932 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11933 return; 11934 /* We only allow the three below with point logging on */ 11935 if ((mod != HYBRID_LOG_RULES_APP) && 11936 (mod != HYBRID_LOG_RULES_SET) && 11937 (mod != HYBRID_LOG_REQ_COMP)) 11938 return; 11939 11940 } 11941 if (do_log) { 11942 union tcp_log_stackspecific log; 11943 struct timeval tv; 11944 11945 /* Convert our ms to a microsecond */ 11946 memset(&log, 0, sizeof(log)); 11947 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11948 log.u_bbr.flex1 = seq; 11949 log.u_bbr.cwnd_gain = line; 11950 if (cur != NULL) { 11951 uint64_t off; 11952 11953 log.u_bbr.flex2 = cur->start_seq; 11954 log.u_bbr.flex3 = cur->end_seq; 11955 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11956 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11957 log.u_bbr.flex6 = cur->flags; 11958 log.u_bbr.pkts_out = cur->hybrid_flags; 11959 log.u_bbr.rttProp = cur->timestamp; 11960 log.u_bbr.cur_del_rate = cur->cspr; 11961 log.u_bbr.bw_inuse = cur->start; 11962 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11963 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11964 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11965 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11966 log.u_bbr.inhpts = 1; 11967 #ifdef TCP_REQUEST_TRK 11968 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11969 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11970 #endif 11971 } else { 11972 log.u_bbr.flex2 = err; 11973 } 11974 /* 11975 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11976 */ 11977 log.u_bbr.flex7 = rack->rc_catch_up; 11978 log.u_bbr.flex7 <<= 1; 11979 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11980 log.u_bbr.flex7 <<= 1; 11981 log.u_bbr.flex7 |= rack->dgp_on; 11982 /* 11983 * Compose bbr_state to be a bit wise 0000ADHF 11984 * where A is the always_pace flag 11985 * where D is the dgp_on flag 11986 * where H is the hybrid_mode on flag 11987 * where F is the use_fixed_rate flag. 11988 */ 11989 log.u_bbr.bbr_state = rack->rc_always_pace; 11990 log.u_bbr.bbr_state <<= 1; 11991 log.u_bbr.bbr_state |= rack->dgp_on; 11992 log.u_bbr.bbr_state <<= 1; 11993 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 11994 log.u_bbr.bbr_state <<= 1; 11995 log.u_bbr.bbr_state |= rack->use_fixed_rate; 11996 log.u_bbr.flex8 = mod; 11997 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11998 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11999 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12000 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 12001 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 12002 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 12003 tcp_log_event(rack->rc_tp, NULL, 12004 &rack->rc_inp->inp_socket->so_rcv, 12005 &rack->rc_inp->inp_socket->so_snd, 12006 TCP_HYBRID_PACING_LOG, 0, 12007 0, &log, false, NULL, __func__, __LINE__, &tv); 12008 } 12009 } 12010 #endif 12011 12012 #ifdef TCP_REQUEST_TRK 12013 static void 12014 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 12015 { 12016 struct tcp_sendfile_track *rc_cur, *orig_ent; 12017 struct tcpcb *tp; 12018 int err = 0; 12019 12020 orig_ent = rack->r_ctl.rc_last_sft; 12021 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 12022 if (rc_cur == NULL) { 12023 /* If not in the beginning what about the end piece */ 12024 if (rack->rc_hybrid_mode) 12025 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 12026 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 12027 } else { 12028 err = 12345; 12029 } 12030 /* If we find no parameters we are in straight DGP mode */ 12031 if(rc_cur == NULL) { 12032 /* None found for this seq, just DGP for now */ 12033 if (rack->rc_hybrid_mode) { 12034 rack->r_ctl.client_suggested_maxseg = 0; 12035 rack->rc_catch_up = 0; 12036 if (rack->cspr_is_fcc == 0) 12037 rack->r_ctl.bw_rate_cap = 0; 12038 else 12039 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12040 } 12041 if (rack->rc_hybrid_mode) { 12042 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 12043 } 12044 if (rack->r_ctl.rc_last_sft) { 12045 rack->r_ctl.rc_last_sft = NULL; 12046 } 12047 return; 12048 } 12049 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 12050 /* This entry was never setup for hybrid pacing on/off etc */ 12051 if (rack->rc_hybrid_mode) { 12052 rack->r_ctl.client_suggested_maxseg = 0; 12053 rack->rc_catch_up = 0; 12054 rack->r_ctl.bw_rate_cap = 0; 12055 } 12056 if (rack->r_ctl.rc_last_sft) { 12057 rack->r_ctl.rc_last_sft = NULL; 12058 } 12059 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 12060 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 12061 rc_cur->first_send = cts; 12062 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 12063 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 12064 } 12065 return; 12066 } 12067 /* 12068 * Ok if we have a new entry *or* have never 12069 * set up an entry we need to proceed. If 12070 * we have already set it up this entry we 12071 * just continue along with what we already 12072 * setup. 12073 */ 12074 tp = rack->rc_tp; 12075 if ((rack->r_ctl.rc_last_sft != NULL) && 12076 (rack->r_ctl.rc_last_sft == rc_cur)) { 12077 /* Its already in place */ 12078 if (rack->rc_hybrid_mode) 12079 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 12080 return; 12081 } 12082 if (rack->rc_hybrid_mode == 0) { 12083 rack->r_ctl.rc_last_sft = rc_cur; 12084 if (orig_ent) { 12085 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 12086 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 12087 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 12088 } 12089 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 12090 return; 12091 } 12092 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 12093 /* Compensate for all the header overhead's */ 12094 if (rack->cspr_is_fcc == 0) 12095 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 12096 else 12097 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 12098 } else { 12099 if (rack->rc_hybrid_mode) { 12100 if (rack->cspr_is_fcc == 0) 12101 rack->r_ctl.bw_rate_cap = 0; 12102 else 12103 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12104 } 12105 } 12106 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 12107 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 12108 else 12109 rack->r_ctl.client_suggested_maxseg = 0; 12110 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 12111 /* 12112 * It is the same timestamp as the previous one 12113 * add the hybrid flag that will indicate we use 12114 * sendtime not arrival time for catch-up mode. 12115 */ 12116 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 12117 } 12118 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 12119 (rc_cur->cspr > 0)) { 12120 uint64_t len; 12121 12122 rack->rc_catch_up = 1; 12123 /* 12124 * Calculate the deadline time, first set the 12125 * time to when the request arrived. 12126 */ 12127 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 12128 /* 12129 * For cases where its a duplicate tm (we received more 12130 * than one request for a tm) we want to use now, the point 12131 * where we are just sending the first bit of the request. 12132 */ 12133 rc_cur->deadline = cts; 12134 } else { 12135 /* 12136 * Here we have a different tm from the last request 12137 * so we want to use arrival time as our base. 12138 */ 12139 rc_cur->deadline = rc_cur->localtime; 12140 } 12141 /* 12142 * Next calculate the length and compensate for 12143 * TLS if need be. 12144 */ 12145 len = rc_cur->end - rc_cur->start; 12146 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 12147 /* 12148 * This session is doing TLS. Take a swag guess 12149 * at the overhead. 12150 */ 12151 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 12152 } 12153 /* 12154 * Now considering the size, and the cspr, what is the time that 12155 * would be required at the cspr rate. Here we use the raw 12156 * cspr value since the client only looks at the raw data. We 12157 * do use len which includes TLS overhead, but not the TCP/IP etc. 12158 * That will get made up for in the CU pacing rate set. 12159 */ 12160 len *= HPTS_USEC_IN_SEC; 12161 len /= rc_cur->cspr; 12162 rc_cur->deadline += len; 12163 } else { 12164 rack->rc_catch_up = 0; 12165 rc_cur->deadline = 0; 12166 } 12167 if (rack->r_ctl.client_suggested_maxseg != 0) { 12168 /* 12169 * We need to reset the max pace segs if we have a 12170 * client_suggested_maxseg. 12171 */ 12172 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12173 } 12174 if (orig_ent) { 12175 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 12176 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 12177 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 12178 } 12179 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 12180 /* Remember it for next time and for CU mode */ 12181 rack->r_ctl.rc_last_sft = rc_cur; 12182 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 12183 } 12184 #endif 12185 12186 static void 12187 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 12188 { 12189 #ifdef TCP_REQUEST_TRK 12190 struct tcp_sendfile_track *ent; 12191 12192 ent = rack->r_ctl.rc_last_sft; 12193 if ((ent == NULL) || 12194 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 12195 (SEQ_GEQ(seq, ent->end_seq))) { 12196 /* Time to update the track. */ 12197 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 12198 ent = rack->r_ctl.rc_last_sft; 12199 } 12200 /* Out of all */ 12201 if (ent == NULL) { 12202 return; 12203 } 12204 if (SEQ_LT(ent->end_seq, (seq + len))) { 12205 /* 12206 * This is the case where our end_seq guess 12207 * was wrong. This is usually due to TLS having 12208 * more bytes then our guess. It could also be the 12209 * case that the client sent in two requests closely 12210 * and the SB is full of both so we are sending part 12211 * of each (end|beg). In such a case lets move this 12212 * guys end to match the end of this send. That 12213 * way it will complete when all of it is acked. 12214 */ 12215 ent->end_seq = (seq + len); 12216 if (rack->rc_hybrid_mode) 12217 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 12218 } 12219 /* Now validate we have set the send time of this one */ 12220 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 12221 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 12222 ent->first_send = cts; 12223 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 12224 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 12225 } 12226 #endif 12227 } 12228 12229 static void 12230 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 12231 { 12232 /* 12233 * The fast output path is enabled and we 12234 * have moved the cumack forward. Lets see if 12235 * we can expand forward the fast path length by 12236 * that amount. What we would ideally like to 12237 * do is increase the number of bytes in the 12238 * fast path block (left_to_send) by the 12239 * acked amount. However we have to gate that 12240 * by two factors: 12241 * 1) The amount outstanding and the rwnd of the peer 12242 * (i.e. we don't want to exceed the rwnd of the peer). 12243 * <and> 12244 * 2) The amount of data left in the socket buffer (i.e. 12245 * we can't send beyond what is in the buffer). 12246 * 12247 * Note that this does not take into account any increase 12248 * in the cwnd. We will only extend the fast path by 12249 * what was acked. 12250 */ 12251 uint32_t new_total, gating_val; 12252 12253 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 12254 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 12255 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 12256 if (new_total <= gating_val) { 12257 /* We can increase left_to_send by the acked amount */ 12258 counter_u64_add(rack_extended_rfo, 1); 12259 rack->r_ctl.fsb.left_to_send = new_total; 12260 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 12261 ("rack:%p left_to_send:%u sbavail:%u out:%u", 12262 rack, rack->r_ctl.fsb.left_to_send, 12263 sbavail(&rack->rc_inp->inp_socket->so_snd), 12264 (tp->snd_max - tp->snd_una))); 12265 12266 } 12267 } 12268 12269 static void 12270 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 12271 { 12272 /* 12273 * Here any sendmap entry that points to the 12274 * beginning mbuf must be adjusted to the correct 12275 * offset. This must be called with: 12276 * 1) The socket buffer locked 12277 * 2) snd_una adjusted to its new position. 12278 * 12279 * Note that (2) implies rack_ack_received has also 12280 * been called and all the sbcut's have been done. 12281 * 12282 * We grab the first mbuf in the socket buffer and 12283 * then go through the front of the sendmap, recalculating 12284 * the stored offset for any sendmap entry that has 12285 * that mbuf. We must use the sb functions to do this 12286 * since its possible an add was done has well as 12287 * the subtraction we may have just completed. This should 12288 * not be a penalty though, since we just referenced the sb 12289 * to go in and trim off the mbufs that we freed (of course 12290 * there will be a penalty for the sendmap references though). 12291 * 12292 * Note also with INVARIANT on, we validate with a KASSERT 12293 * that the first sendmap entry has a soff of 0. 12294 * 12295 */ 12296 struct mbuf *m; 12297 struct rack_sendmap *rsm; 12298 tcp_seq snd_una; 12299 #ifdef INVARIANTS 12300 int first_processed = 0; 12301 #endif 12302 12303 snd_una = rack->rc_tp->snd_una; 12304 SOCKBUF_LOCK_ASSERT(sb); 12305 m = sb->sb_mb; 12306 rsm = tqhash_min(rack->r_ctl.tqh); 12307 if ((rsm == NULL) || (m == NULL)) { 12308 /* Nothing outstanding */ 12309 return; 12310 } 12311 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 12312 KASSERT((rsm->m == m), 12313 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 12314 rack, sb, rsm)); 12315 while (rsm->m && (rsm->m == m)) { 12316 /* one to adjust */ 12317 #ifdef INVARIANTS 12318 struct mbuf *tm; 12319 uint32_t soff; 12320 12321 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 12322 if ((rsm->orig_m_len != m->m_len) || 12323 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 12324 rack_adjust_orig_mlen(rsm); 12325 } 12326 if (first_processed == 0) { 12327 KASSERT((rsm->soff == 0), 12328 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 12329 rack, rsm)); 12330 first_processed = 1; 12331 } 12332 if ((rsm->soff != soff) || (rsm->m != tm)) { 12333 /* 12334 * This is not a fatal error, we anticipate it 12335 * might happen (the else code), so we count it here 12336 * so that under invariant we can see that it really 12337 * does happen. 12338 */ 12339 counter_u64_add(rack_adjust_map_bw, 1); 12340 } 12341 rsm->m = tm; 12342 rsm->soff = soff; 12343 if (tm) { 12344 rsm->orig_m_len = rsm->m->m_len; 12345 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12346 } else { 12347 rsm->orig_m_len = 0; 12348 rsm->orig_t_space = 0; 12349 } 12350 #else 12351 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 12352 if (rsm->m) { 12353 rsm->orig_m_len = rsm->m->m_len; 12354 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12355 } else { 12356 rsm->orig_m_len = 0; 12357 rsm->orig_t_space = 0; 12358 } 12359 #endif 12360 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 12361 if (rsm == NULL) 12362 break; 12363 } 12364 } 12365 12366 #ifdef TCP_REQUEST_TRK 12367 static inline void 12368 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 12369 { 12370 struct tcp_sendfile_track *ent; 12371 int i; 12372 12373 if ((rack->rc_hybrid_mode == 0) && 12374 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 12375 /* 12376 * Just do normal completions hybrid pacing is not on 12377 * and CLDL is off as well. 12378 */ 12379 tcp_req_check_for_comp(rack->rc_tp, th_ack); 12380 return; 12381 } 12382 /* 12383 * Originally I was just going to find the th_ack associated 12384 * with an entry. But then I realized a large strech ack could 12385 * in theory ack two or more requests at once. So instead we 12386 * need to find all entries that are completed by th_ack not 12387 * just a single entry and do our logging. 12388 */ 12389 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12390 while (ent != NULL) { 12391 /* 12392 * We may be doing hybrid pacing or CLDL and need more details possibly 12393 * so we do it manually instead of calling 12394 * tcp_req_check_for_comp() 12395 */ 12396 uint64_t laa, tim, data, cbw, ftim; 12397 12398 /* Ok this ack frees it */ 12399 rack_log_hybrid(rack, th_ack, 12400 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 12401 rack_log_hybrid_sends(rack, ent, __LINE__); 12402 /* calculate the time based on the ack arrival */ 12403 data = ent->end - ent->start; 12404 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 12405 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 12406 if (ent->first_send > ent->localtime) 12407 ftim = ent->first_send; 12408 else 12409 ftim = ent->localtime; 12410 } else { 12411 /* TSNH */ 12412 ftim = ent->localtime; 12413 } 12414 if (laa > ent->localtime) 12415 tim = laa - ftim; 12416 else 12417 tim = 0; 12418 cbw = data * HPTS_USEC_IN_SEC; 12419 if (tim > 0) 12420 cbw /= tim; 12421 else 12422 cbw = 0; 12423 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 12424 /* 12425 * Check to see if we are freeing what we are pointing to send wise 12426 * if so be sure to NULL the pointer so we know we are no longer 12427 * set to anything. 12428 */ 12429 if (ent == rack->r_ctl.rc_last_sft) { 12430 rack->r_ctl.rc_last_sft = NULL; 12431 if (rack->rc_hybrid_mode) { 12432 rack->rc_catch_up = 0; 12433 if (rack->cspr_is_fcc == 0) 12434 rack->r_ctl.bw_rate_cap = 0; 12435 else 12436 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12437 rack->r_ctl.client_suggested_maxseg = 0; 12438 } 12439 } 12440 /* Generate the log that the tcp_netflix call would have */ 12441 tcp_req_log_req_info(rack->rc_tp, ent, 12442 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 12443 /* Free it and see if there is another one */ 12444 tcp_req_free_a_slot(rack->rc_tp, ent); 12445 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12446 } 12447 } 12448 #endif 12449 12450 12451 /* 12452 * Return value of 1, we do not need to call rack_process_data(). 12453 * return value of 0, rack_process_data can be called. 12454 * For ret_val if its 0 the TCP is locked, if its non-zero 12455 * its unlocked and probably unsafe to touch the TCB. 12456 */ 12457 static int 12458 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12459 struct tcpcb *tp, struct tcpopt *to, 12460 uint32_t tiwin, int32_t tlen, 12461 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 12462 { 12463 int32_t ourfinisacked = 0; 12464 int32_t nsegs, acked_amount; 12465 int32_t acked; 12466 struct mbuf *mfree; 12467 struct tcp_rack *rack; 12468 int32_t under_pacing = 0; 12469 int32_t post_recovery = 0; 12470 uint32_t p_cwnd; 12471 12472 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12473 12474 rack = (struct tcp_rack *)tp->t_fb_ptr; 12475 if (SEQ_GT(th->th_ack, tp->snd_max)) { 12476 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 12477 &rack->r_ctl.challenge_ack_ts, 12478 &rack->r_ctl.challenge_ack_cnt); 12479 rack->r_wanted_output = 1; 12480 return (1); 12481 } 12482 if (rack->gp_ready && 12483 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12484 under_pacing = 1; 12485 } 12486 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 12487 int in_rec, dup_ack_struck = 0; 12488 int dsack_seen = 0, sacks_seen = 0; 12489 12490 in_rec = IN_FASTRECOVERY(tp->t_flags); 12491 if (rack->rc_in_persist) { 12492 tp->t_rxtshift = 0; 12493 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12494 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12495 } 12496 12497 if ((th->th_ack == tp->snd_una) && 12498 (tiwin == tp->snd_wnd) && 12499 (orig_tlen == 0) && 12500 ((to->to_flags & TOF_SACK) == 0)) { 12501 rack_strike_dupack(rack, th->th_ack); 12502 dup_ack_struck = 1; 12503 } 12504 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 12505 dup_ack_struck, &dsack_seen, &sacks_seen); 12506 12507 } 12508 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12509 /* 12510 * Old ack, behind (or duplicate to) the last one rcv'd 12511 * Note: We mark reordering is occuring if its 12512 * less than and we have not closed our window. 12513 */ 12514 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 12515 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12516 if (rack->r_ctl.rc_reorder_ts == 0) 12517 rack->r_ctl.rc_reorder_ts = 1; 12518 } 12519 return (0); 12520 } 12521 /* 12522 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 12523 * something we sent. 12524 */ 12525 if (tp->t_flags & TF_NEEDSYN) { 12526 /* 12527 * T/TCP: Connection was half-synchronized, and our SYN has 12528 * been ACK'd (so connection is now fully synchronized). Go 12529 * to non-starred state, increment snd_una for ACK of SYN, 12530 * and check if we can do window scaling. 12531 */ 12532 tp->t_flags &= ~TF_NEEDSYN; 12533 tp->snd_una++; 12534 /* Do window scaling? */ 12535 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12536 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12537 tp->rcv_scale = tp->request_r_scale; 12538 /* Send window already scaled. */ 12539 } 12540 } 12541 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12542 12543 acked = BYTES_THIS_ACK(tp, th); 12544 if (acked) { 12545 /* 12546 * Any time we move the cum-ack forward clear 12547 * keep-alive tied probe-not-answered. The 12548 * persists clears its own on entry. 12549 */ 12550 rack->probe_not_answered = 0; 12551 } 12552 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12553 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12554 /* 12555 * If we just performed our first retransmit, and the ACK arrives 12556 * within our recovery window, then it was a mistake to do the 12557 * retransmit in the first place. Recover our original cwnd and 12558 * ssthresh, and proceed to transmit where we left off. 12559 */ 12560 if ((tp->t_flags & TF_PREVVALID) && 12561 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12562 tp->t_flags &= ~TF_PREVVALID; 12563 if (tp->t_rxtshift == 1 && 12564 (int)(ticks - tp->t_badrxtwin) < 0) 12565 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12566 } 12567 if (acked) { 12568 /* assure we are not backed off */ 12569 tp->t_rxtshift = 0; 12570 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12571 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12572 rack->rc_tlp_in_progress = 0; 12573 rack->r_ctl.rc_tlp_cnt_out = 0; 12574 /* 12575 * If it is the RXT timer we want to 12576 * stop it, so we can restart a TLP. 12577 */ 12578 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12579 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12580 #ifdef TCP_REQUEST_TRK 12581 rack_req_check_for_comp(rack, th->th_ack); 12582 #endif 12583 } 12584 /* 12585 * If we have a timestamp reply, update smoothed round trip time. If 12586 * no timestamp is present but transmit timer is running and timed 12587 * sequence number was acked, update smoothed round trip time. Since 12588 * we now have an rtt measurement, cancel the timer backoff (cf., 12589 * Phil Karn's retransmit alg.). Recompute the initial retransmit 12590 * timer. 12591 * 12592 * Some boxes send broken timestamp replies during the SYN+ACK 12593 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12594 * and blow up the retransmit timer. 12595 */ 12596 /* 12597 * If all outstanding data is acked, stop retransmit timer and 12598 * remember to restart (more output or persist). If there is more 12599 * data to be acked, restart retransmit timer, using current 12600 * (possibly backed-off) value. 12601 */ 12602 if (acked == 0) { 12603 if (ofia) 12604 *ofia = ourfinisacked; 12605 return (0); 12606 } 12607 if (IN_RECOVERY(tp->t_flags)) { 12608 if (SEQ_LT(th->th_ack, tp->snd_recover) && 12609 (SEQ_LT(th->th_ack, tp->snd_max))) { 12610 tcp_rack_partialack(tp); 12611 } else { 12612 rack_post_recovery(tp, th->th_ack); 12613 post_recovery = 1; 12614 /* 12615 * Grab the segsiz, multiply by 2 and add the snd_cwnd 12616 * that is the max the CC should add if we are exiting 12617 * recovery and doing a late add. 12618 */ 12619 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 12620 p_cwnd <<= 1; 12621 p_cwnd += tp->snd_cwnd; 12622 } 12623 } else if ((rack->rto_from_rec == 1) && 12624 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 12625 /* 12626 * We were in recovery, hit a rxt timeout 12627 * and never re-entered recovery. The timeout(s) 12628 * made up all the lost data. In such a case 12629 * we need to clear the rto_from_rec flag. 12630 */ 12631 rack->rto_from_rec = 0; 12632 } 12633 /* 12634 * Let the congestion control algorithm update congestion control 12635 * related information. This typically means increasing the 12636 * congestion window. 12637 */ 12638 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 12639 if (post_recovery && 12640 (tp->snd_cwnd > p_cwnd)) { 12641 /* Must be non-newreno (cubic) getting too ahead of itself */ 12642 tp->snd_cwnd = p_cwnd; 12643 } 12644 SOCKBUF_LOCK(&so->so_snd); 12645 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 12646 tp->snd_wnd -= acked_amount; 12647 mfree = sbcut_locked(&so->so_snd, acked_amount); 12648 if ((sbused(&so->so_snd) == 0) && 12649 (acked > acked_amount) && 12650 (tp->t_state >= TCPS_FIN_WAIT_1) && 12651 (tp->t_flags & TF_SENTFIN)) { 12652 /* 12653 * We must be sure our fin 12654 * was sent and acked (we can be 12655 * in FIN_WAIT_1 without having 12656 * sent the fin). 12657 */ 12658 ourfinisacked = 1; 12659 } 12660 tp->snd_una = th->th_ack; 12661 /* wakeups? */ 12662 if (acked_amount && sbavail(&so->so_snd)) 12663 rack_adjust_sendmap_head(rack, &so->so_snd); 12664 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12665 /* NB: sowwakeup_locked() does an implicit unlock. */ 12666 sowwakeup_locked(so); 12667 m_freem(mfree); 12668 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12669 tp->snd_recover = tp->snd_una; 12670 12671 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12672 tp->snd_nxt = tp->snd_max; 12673 } 12674 if (under_pacing && 12675 (rack->use_fixed_rate == 0) && 12676 (rack->in_probe_rtt == 0) && 12677 rack->rc_gp_dyn_mul && 12678 rack->rc_always_pace) { 12679 /* Check if we are dragging bottom */ 12680 rack_check_bottom_drag(tp, rack, so); 12681 } 12682 if (tp->snd_una == tp->snd_max) { 12683 /* Nothing left outstanding */ 12684 tp->t_flags &= ~TF_PREVVALID; 12685 rack->r_ctl.idle_snd_una = tp->snd_una; 12686 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12687 if (rack->r_ctl.rc_went_idle_time == 0) 12688 rack->r_ctl.rc_went_idle_time = 1; 12689 rack->r_ctl.retran_during_recovery = 0; 12690 rack->r_ctl.dsack_byte_cnt = 0; 12691 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12692 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12693 tp->t_acktime = 0; 12694 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12695 rack->rc_suspicious = 0; 12696 /* Set need output so persist might get set */ 12697 rack->r_wanted_output = 1; 12698 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12699 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12700 (sbavail(&so->so_snd) == 0) && 12701 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12702 /* 12703 * The socket was gone and the 12704 * peer sent data (now or in the past), time to 12705 * reset him. 12706 */ 12707 *ret_val = 1; 12708 /* tcp_close will kill the inp pre-log the Reset */ 12709 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12710 tp = tcp_close(tp); 12711 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12712 return (1); 12713 } 12714 } 12715 if (ofia) 12716 *ofia = ourfinisacked; 12717 return (0); 12718 } 12719 12720 12721 static void 12722 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12723 int dir, uint32_t flags, struct rack_sendmap *rsm) 12724 { 12725 if (tcp_bblogging_on(rack->rc_tp)) { 12726 union tcp_log_stackspecific log; 12727 struct timeval tv; 12728 12729 memset(&log, 0, sizeof(log)); 12730 log.u_bbr.flex1 = cnt; 12731 log.u_bbr.flex2 = split; 12732 log.u_bbr.flex3 = out; 12733 log.u_bbr.flex4 = line; 12734 log.u_bbr.flex5 = rack->r_must_retran; 12735 log.u_bbr.flex6 = flags; 12736 log.u_bbr.flex7 = rack->rc_has_collapsed; 12737 log.u_bbr.flex8 = dir; /* 12738 * 1 is collapsed, 0 is uncollapsed, 12739 * 2 is log of a rsm being marked, 3 is a split. 12740 */ 12741 if (rsm == NULL) 12742 log.u_bbr.rttProp = 0; 12743 else 12744 log.u_bbr.rttProp = (uintptr_t)rsm; 12745 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12746 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12747 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12748 &rack->rc_inp->inp_socket->so_rcv, 12749 &rack->rc_inp->inp_socket->so_snd, 12750 TCP_RACK_LOG_COLLAPSE, 0, 12751 0, &log, false, &tv); 12752 } 12753 } 12754 12755 static void 12756 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12757 { 12758 /* 12759 * Here all we do is mark the collapsed point and set the flag. 12760 * This may happen again and again, but there is no 12761 * sense splitting our map until we know where the 12762 * peer finally lands in the collapse. 12763 */ 12764 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12765 if ((rack->rc_has_collapsed == 0) || 12766 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12767 counter_u64_add(rack_collapsed_win_seen, 1); 12768 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12769 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12770 rack->rc_has_collapsed = 1; 12771 rack->r_collapse_point_valid = 1; 12772 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12773 } 12774 12775 static void 12776 rack_un_collapse_window(struct tcp_rack *rack, int line) 12777 { 12778 struct rack_sendmap *nrsm, *rsm; 12779 int cnt = 0, split = 0; 12780 int insret __diagused; 12781 12782 12783 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12784 rack->rc_has_collapsed = 0; 12785 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12786 if (rsm == NULL) { 12787 /* Nothing to do maybe the peer ack'ed it all */ 12788 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12789 return; 12790 } 12791 /* Now do we need to split this one? */ 12792 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12793 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12794 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12795 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12796 if (nrsm == NULL) { 12797 /* We can't get a rsm, mark all? */ 12798 nrsm = rsm; 12799 goto no_split; 12800 } 12801 /* Clone it */ 12802 split = 1; 12803 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12804 #ifndef INVARIANTS 12805 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12806 #else 12807 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12808 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12809 nrsm, insret, rack, rsm); 12810 } 12811 #endif 12812 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12813 rack->r_ctl.last_collapse_point, __LINE__); 12814 if (rsm->r_in_tmap) { 12815 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12816 nrsm->r_in_tmap = 1; 12817 } 12818 /* 12819 * Set in the new RSM as the 12820 * collapsed starting point 12821 */ 12822 rsm = nrsm; 12823 } 12824 12825 no_split: 12826 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12827 cnt++; 12828 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12829 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12830 cnt++; 12831 } 12832 if (cnt) { 12833 counter_u64_add(rack_collapsed_win, 1); 12834 } 12835 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12836 } 12837 12838 static void 12839 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12840 int32_t tlen, int32_t tfo_syn) 12841 { 12842 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12843 rack_timer_cancel(tp, rack, 12844 rack->r_ctl.rc_rcvtime, __LINE__); 12845 tp->t_flags |= TF_DELACK; 12846 } else { 12847 rack->r_wanted_output = 1; 12848 tp->t_flags |= TF_ACKNOW; 12849 } 12850 } 12851 12852 static void 12853 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12854 { 12855 /* 12856 * If fast output is in progress, lets validate that 12857 * the new window did not shrink on us and make it 12858 * so fast output should end. 12859 */ 12860 if (rack->r_fast_output) { 12861 uint32_t out; 12862 12863 /* 12864 * Calculate what we will send if left as is 12865 * and compare that to our send window. 12866 */ 12867 out = ctf_outstanding(tp); 12868 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12869 /* ok we have an issue */ 12870 if (out >= tp->snd_wnd) { 12871 /* Turn off fast output the window is met or collapsed */ 12872 rack->r_fast_output = 0; 12873 } else { 12874 /* we have some room left */ 12875 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12876 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12877 /* If not at least 1 full segment never mind */ 12878 rack->r_fast_output = 0; 12879 } 12880 } 12881 } 12882 } 12883 } 12884 12885 /* 12886 * Return value of 1, the TCB is unlocked and most 12887 * likely gone, return value of 0, the TCP is still 12888 * locked. 12889 */ 12890 static int 12891 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12892 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12893 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12894 { 12895 /* 12896 * Update window information. Don't look at window if no ACK: TAC's 12897 * send garbage on first SYN. 12898 */ 12899 int32_t nsegs; 12900 int32_t tfo_syn; 12901 struct tcp_rack *rack; 12902 12903 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12904 12905 rack = (struct tcp_rack *)tp->t_fb_ptr; 12906 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12907 if ((thflags & TH_ACK) && 12908 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12909 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12910 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12911 /* keep track of pure window updates */ 12912 if (tlen == 0 && 12913 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12914 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12915 tp->snd_wnd = tiwin; 12916 rack_validate_fo_sendwin_up(tp, rack); 12917 tp->snd_wl1 = th->th_seq; 12918 tp->snd_wl2 = th->th_ack; 12919 if (tp->snd_wnd > tp->max_sndwnd) 12920 tp->max_sndwnd = tp->snd_wnd; 12921 rack->r_wanted_output = 1; 12922 } else if (thflags & TH_ACK) { 12923 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12924 tp->snd_wnd = tiwin; 12925 rack_validate_fo_sendwin_up(tp, rack); 12926 tp->snd_wl1 = th->th_seq; 12927 tp->snd_wl2 = th->th_ack; 12928 } 12929 } 12930 if (tp->snd_wnd < ctf_outstanding(tp)) 12931 /* The peer collapsed the window */ 12932 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12933 else if (rack->rc_has_collapsed) 12934 rack_un_collapse_window(rack, __LINE__); 12935 if ((rack->r_collapse_point_valid) && 12936 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12937 rack->r_collapse_point_valid = 0; 12938 /* Was persist timer active and now we have window space? */ 12939 if ((rack->rc_in_persist != 0) && 12940 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12941 rack->r_ctl.rc_pace_min_segs))) { 12942 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12943 tp->snd_nxt = tp->snd_max; 12944 /* Make sure we output to start the timer */ 12945 rack->r_wanted_output = 1; 12946 } 12947 /* Do we enter persists? */ 12948 if ((rack->rc_in_persist == 0) && 12949 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12950 TCPS_HAVEESTABLISHED(tp->t_state) && 12951 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12952 sbavail(&tptosocket(tp)->so_snd) && 12953 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12954 /* 12955 * Here the rwnd is less than 12956 * the pacing size, we are established, 12957 * nothing is outstanding, and there is 12958 * data to send. Enter persists. 12959 */ 12960 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12961 } 12962 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12963 m_freem(m); 12964 return (0); 12965 } 12966 /* 12967 * don't process the URG bit, ignore them drag 12968 * along the up. 12969 */ 12970 tp->rcv_up = tp->rcv_nxt; 12971 12972 /* 12973 * Process the segment text, merging it into the TCP sequencing 12974 * queue, and arranging for acknowledgment of receipt if necessary. 12975 * This process logically involves adjusting tp->rcv_wnd as data is 12976 * presented to the user (this happens in tcp_usrreq.c, case 12977 * PRU_RCVD). If a FIN has already been received on this connection 12978 * then we just ignore the text. 12979 */ 12980 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12981 (tp->t_flags & TF_FASTOPEN)); 12982 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12983 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12984 tcp_seq save_start = th->th_seq; 12985 tcp_seq save_rnxt = tp->rcv_nxt; 12986 int save_tlen = tlen; 12987 12988 m_adj(m, drop_hdrlen); /* delayed header drop */ 12989 /* 12990 * Insert segment which includes th into TCP reassembly 12991 * queue with control block tp. Set thflags to whether 12992 * reassembly now includes a segment with FIN. This handles 12993 * the common case inline (segment is the next to be 12994 * received on an established connection, and the queue is 12995 * empty), avoiding linkage into and removal from the queue 12996 * and repetition of various conversions. Set DELACK for 12997 * segments received in order, but ack immediately when 12998 * segments are out of order (so fast retransmit can work). 12999 */ 13000 if (th->th_seq == tp->rcv_nxt && 13001 SEGQ_EMPTY(tp) && 13002 (TCPS_HAVEESTABLISHED(tp->t_state) || 13003 tfo_syn)) { 13004 #ifdef NETFLIX_SB_LIMITS 13005 u_int mcnt, appended; 13006 13007 if (so->so_rcv.sb_shlim) { 13008 mcnt = m_memcnt(m); 13009 appended = 0; 13010 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 13011 CFO_NOSLEEP, NULL) == false) { 13012 counter_u64_add(tcp_sb_shlim_fails, 1); 13013 m_freem(m); 13014 return (0); 13015 } 13016 } 13017 #endif 13018 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 13019 tp->rcv_nxt += tlen; 13020 if (tlen && 13021 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 13022 (tp->t_fbyte_in == 0)) { 13023 tp->t_fbyte_in = ticks; 13024 if (tp->t_fbyte_in == 0) 13025 tp->t_fbyte_in = 1; 13026 if (tp->t_fbyte_out && tp->t_fbyte_in) 13027 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 13028 } 13029 thflags = tcp_get_flags(th) & TH_FIN; 13030 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 13031 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 13032 SOCKBUF_LOCK(&so->so_rcv); 13033 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13034 m_freem(m); 13035 } else { 13036 int32_t newsize; 13037 13038 if (tlen > 0) { 13039 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 13040 if (newsize) 13041 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 13042 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 13043 } 13044 #ifdef NETFLIX_SB_LIMITS 13045 appended = 13046 #endif 13047 sbappendstream_locked(&so->so_rcv, m, 0); 13048 } 13049 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 13050 /* NB: sorwakeup_locked() does an implicit unlock. */ 13051 sorwakeup_locked(so); 13052 #ifdef NETFLIX_SB_LIMITS 13053 if (so->so_rcv.sb_shlim && appended != mcnt) 13054 counter_fo_release(so->so_rcv.sb_shlim, 13055 mcnt - appended); 13056 #endif 13057 } else { 13058 /* 13059 * XXX: Due to the header drop above "th" is 13060 * theoretically invalid by now. Fortunately 13061 * m_adj() doesn't actually frees any mbufs when 13062 * trimming from the head. 13063 */ 13064 tcp_seq temp = save_start; 13065 13066 thflags = tcp_reass(tp, th, &temp, &tlen, m); 13067 tp->t_flags |= TF_ACKNOW; 13068 if (tp->t_flags & TF_WAKESOR) { 13069 tp->t_flags &= ~TF_WAKESOR; 13070 /* NB: sorwakeup_locked() does an implicit unlock. */ 13071 sorwakeup_locked(so); 13072 } 13073 } 13074 if ((tp->t_flags & TF_SACK_PERMIT) && 13075 (save_tlen > 0) && 13076 TCPS_HAVEESTABLISHED(tp->t_state)) { 13077 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 13078 /* 13079 * DSACK actually handled in the fastpath 13080 * above. 13081 */ 13082 tcp_update_sack_list(tp, save_start, 13083 save_start + save_tlen); 13084 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 13085 if ((tp->rcv_numsacks >= 1) && 13086 (tp->sackblks[0].end == save_start)) { 13087 /* 13088 * Partial overlap, recorded at todrop 13089 * above. 13090 */ 13091 tcp_update_sack_list(tp, 13092 tp->sackblks[0].start, 13093 tp->sackblks[0].end); 13094 } else { 13095 tcp_update_dsack_list(tp, save_start, 13096 save_start + save_tlen); 13097 } 13098 } else if (tlen >= save_tlen) { 13099 /* Update of sackblks. */ 13100 tcp_update_dsack_list(tp, save_start, 13101 save_start + save_tlen); 13102 } else if (tlen > 0) { 13103 tcp_update_dsack_list(tp, save_start, 13104 save_start + tlen); 13105 } 13106 } 13107 } else { 13108 m_freem(m); 13109 thflags &= ~TH_FIN; 13110 } 13111 13112 /* 13113 * If FIN is received ACK the FIN and let the user know that the 13114 * connection is closing. 13115 */ 13116 if (thflags & TH_FIN) { 13117 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 13118 /* The socket upcall is handled by socantrcvmore. */ 13119 socantrcvmore(so); 13120 /* 13121 * If connection is half-synchronized (ie NEEDSYN 13122 * flag on) then delay ACK, so it may be piggybacked 13123 * when SYN is sent. Otherwise, since we received a 13124 * FIN then no more input can be expected, send ACK 13125 * now. 13126 */ 13127 if (tp->t_flags & TF_NEEDSYN) { 13128 rack_timer_cancel(tp, rack, 13129 rack->r_ctl.rc_rcvtime, __LINE__); 13130 tp->t_flags |= TF_DELACK; 13131 } else { 13132 tp->t_flags |= TF_ACKNOW; 13133 } 13134 tp->rcv_nxt++; 13135 } 13136 switch (tp->t_state) { 13137 /* 13138 * In SYN_RECEIVED and ESTABLISHED STATES enter the 13139 * CLOSE_WAIT state. 13140 */ 13141 case TCPS_SYN_RECEIVED: 13142 tp->t_starttime = ticks; 13143 /* FALLTHROUGH */ 13144 case TCPS_ESTABLISHED: 13145 rack_timer_cancel(tp, rack, 13146 rack->r_ctl.rc_rcvtime, __LINE__); 13147 tcp_state_change(tp, TCPS_CLOSE_WAIT); 13148 break; 13149 13150 /* 13151 * If still in FIN_WAIT_1 STATE FIN has not been 13152 * acked so enter the CLOSING state. 13153 */ 13154 case TCPS_FIN_WAIT_1: 13155 rack_timer_cancel(tp, rack, 13156 rack->r_ctl.rc_rcvtime, __LINE__); 13157 tcp_state_change(tp, TCPS_CLOSING); 13158 break; 13159 13160 /* 13161 * In FIN_WAIT_2 state enter the TIME_WAIT state, 13162 * starting the time-wait timer, turning off the 13163 * other standard timers. 13164 */ 13165 case TCPS_FIN_WAIT_2: 13166 rack_timer_cancel(tp, rack, 13167 rack->r_ctl.rc_rcvtime, __LINE__); 13168 tcp_twstart(tp); 13169 return (1); 13170 } 13171 } 13172 /* 13173 * Return any desired output. 13174 */ 13175 if ((tp->t_flags & TF_ACKNOW) || 13176 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 13177 rack->r_wanted_output = 1; 13178 } 13179 return (0); 13180 } 13181 13182 /* 13183 * Here nothing is really faster, its just that we 13184 * have broken out the fast-data path also just like 13185 * the fast-ack. 13186 */ 13187 static int 13188 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 13189 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13190 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 13191 { 13192 int32_t nsegs; 13193 int32_t newsize = 0; /* automatic sockbuf scaling */ 13194 struct tcp_rack *rack; 13195 #ifdef NETFLIX_SB_LIMITS 13196 u_int mcnt, appended; 13197 #endif 13198 13199 /* 13200 * If last ACK falls within this segment's sequence numbers, record 13201 * the timestamp. NOTE that the test is modified according to the 13202 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 13203 */ 13204 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 13205 return (0); 13206 } 13207 if (tiwin && tiwin != tp->snd_wnd) { 13208 return (0); 13209 } 13210 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 13211 return (0); 13212 } 13213 if (__predict_false((to->to_flags & TOF_TS) && 13214 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 13215 return (0); 13216 } 13217 if (__predict_false((th->th_ack != tp->snd_una))) { 13218 return (0); 13219 } 13220 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 13221 return (0); 13222 } 13223 if ((to->to_flags & TOF_TS) != 0 && 13224 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 13225 tp->ts_recent_age = tcp_ts_getticks(); 13226 tp->ts_recent = to->to_tsval; 13227 } 13228 rack = (struct tcp_rack *)tp->t_fb_ptr; 13229 /* 13230 * This is a pure, in-sequence data packet with nothing on the 13231 * reassembly queue and we have enough buffer space to take it. 13232 */ 13233 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13234 13235 #ifdef NETFLIX_SB_LIMITS 13236 if (so->so_rcv.sb_shlim) { 13237 mcnt = m_memcnt(m); 13238 appended = 0; 13239 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 13240 CFO_NOSLEEP, NULL) == false) { 13241 counter_u64_add(tcp_sb_shlim_fails, 1); 13242 m_freem(m); 13243 return (1); 13244 } 13245 } 13246 #endif 13247 /* Clean receiver SACK report if present */ 13248 if (tp->rcv_numsacks) 13249 tcp_clean_sackreport(tp); 13250 KMOD_TCPSTAT_INC(tcps_preddat); 13251 tp->rcv_nxt += tlen; 13252 if (tlen && 13253 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 13254 (tp->t_fbyte_in == 0)) { 13255 tp->t_fbyte_in = ticks; 13256 if (tp->t_fbyte_in == 0) 13257 tp->t_fbyte_in = 1; 13258 if (tp->t_fbyte_out && tp->t_fbyte_in) 13259 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 13260 } 13261 /* 13262 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 13263 */ 13264 tp->snd_wl1 = th->th_seq; 13265 /* 13266 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 13267 */ 13268 tp->rcv_up = tp->rcv_nxt; 13269 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 13270 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 13271 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 13272 13273 /* Add data to socket buffer. */ 13274 SOCKBUF_LOCK(&so->so_rcv); 13275 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13276 m_freem(m); 13277 } else { 13278 /* 13279 * Set new socket buffer size. Give up when limit is 13280 * reached. 13281 */ 13282 if (newsize) 13283 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 13284 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 13285 m_adj(m, drop_hdrlen); /* delayed header drop */ 13286 #ifdef NETFLIX_SB_LIMITS 13287 appended = 13288 #endif 13289 sbappendstream_locked(&so->so_rcv, m, 0); 13290 ctf_calc_rwin(so, tp); 13291 } 13292 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 13293 /* NB: sorwakeup_locked() does an implicit unlock. */ 13294 sorwakeup_locked(so); 13295 #ifdef NETFLIX_SB_LIMITS 13296 if (so->so_rcv.sb_shlim && mcnt != appended) 13297 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 13298 #endif 13299 rack_handle_delayed_ack(tp, rack, tlen, 0); 13300 if (tp->snd_una == tp->snd_max) 13301 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13302 return (1); 13303 } 13304 13305 /* 13306 * This subfunction is used to try to highly optimize the 13307 * fast path. We again allow window updates that are 13308 * in sequence to remain in the fast-path. We also add 13309 * in the __predict's to attempt to help the compiler. 13310 * Note that if we return a 0, then we can *not* process 13311 * it and the caller should push the packet into the 13312 * slow-path. 13313 */ 13314 static int 13315 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13316 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13317 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 13318 { 13319 int32_t acked; 13320 int32_t nsegs; 13321 int32_t under_pacing = 0; 13322 struct tcp_rack *rack; 13323 13324 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 13325 /* Old ack, behind (or duplicate to) the last one rcv'd */ 13326 return (0); 13327 } 13328 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 13329 /* Above what we have sent? */ 13330 return (0); 13331 } 13332 if (__predict_false(tiwin == 0)) { 13333 /* zero window */ 13334 return (0); 13335 } 13336 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 13337 /* We need a SYN or a FIN, unlikely.. */ 13338 return (0); 13339 } 13340 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 13341 /* Timestamp is behind .. old ack with seq wrap? */ 13342 return (0); 13343 } 13344 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 13345 /* Still recovering */ 13346 return (0); 13347 } 13348 rack = (struct tcp_rack *)tp->t_fb_ptr; 13349 if (rack->r_ctl.rc_sacked) { 13350 /* We have sack holes on our scoreboard */ 13351 return (0); 13352 } 13353 /* Ok if we reach here, we can process a fast-ack */ 13354 if (rack->gp_ready && 13355 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 13356 under_pacing = 1; 13357 } 13358 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13359 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 13360 /* Did the window get updated? */ 13361 if (tiwin != tp->snd_wnd) { 13362 tp->snd_wnd = tiwin; 13363 rack_validate_fo_sendwin_up(tp, rack); 13364 tp->snd_wl1 = th->th_seq; 13365 if (tp->snd_wnd > tp->max_sndwnd) 13366 tp->max_sndwnd = tp->snd_wnd; 13367 } 13368 /* Do we exit persists? */ 13369 if ((rack->rc_in_persist != 0) && 13370 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13371 rack->r_ctl.rc_pace_min_segs))) { 13372 rack_exit_persist(tp, rack, cts); 13373 } 13374 /* Do we enter persists? */ 13375 if ((rack->rc_in_persist == 0) && 13376 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13377 TCPS_HAVEESTABLISHED(tp->t_state) && 13378 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13379 sbavail(&tptosocket(tp)->so_snd) && 13380 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13381 /* 13382 * Here the rwnd is less than 13383 * the pacing size, we are established, 13384 * nothing is outstanding, and there is 13385 * data to send. Enter persists. 13386 */ 13387 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 13388 } 13389 /* 13390 * If last ACK falls within this segment's sequence numbers, record 13391 * the timestamp. NOTE that the test is modified according to the 13392 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 13393 */ 13394 if ((to->to_flags & TOF_TS) != 0 && 13395 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 13396 tp->ts_recent_age = tcp_ts_getticks(); 13397 tp->ts_recent = to->to_tsval; 13398 } 13399 /* 13400 * This is a pure ack for outstanding data. 13401 */ 13402 KMOD_TCPSTAT_INC(tcps_predack); 13403 13404 /* 13405 * "bad retransmit" recovery. 13406 */ 13407 if ((tp->t_flags & TF_PREVVALID) && 13408 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13409 tp->t_flags &= ~TF_PREVVALID; 13410 if (tp->t_rxtshift == 1 && 13411 (int)(ticks - tp->t_badrxtwin) < 0) 13412 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 13413 } 13414 /* 13415 * Recalculate the transmit timer / rtt. 13416 * 13417 * Some boxes send broken timestamp replies during the SYN+ACK 13418 * phase, ignore timestamps of 0 or we could calculate a huge RTT 13419 * and blow up the retransmit timer. 13420 */ 13421 acked = BYTES_THIS_ACK(tp, th); 13422 13423 #ifdef TCP_HHOOK 13424 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 13425 hhook_run_tcp_est_in(tp, th, to); 13426 #endif 13427 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 13428 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13429 if (acked) { 13430 struct mbuf *mfree; 13431 13432 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 13433 SOCKBUF_LOCK(&so->so_snd); 13434 mfree = sbcut_locked(&so->so_snd, acked); 13435 tp->snd_una = th->th_ack; 13436 /* Note we want to hold the sb lock through the sendmap adjust */ 13437 rack_adjust_sendmap_head(rack, &so->so_snd); 13438 /* Wake up the socket if we have room to write more */ 13439 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13440 sowwakeup_locked(so); 13441 m_freem(mfree); 13442 tp->t_rxtshift = 0; 13443 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13444 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13445 rack->rc_tlp_in_progress = 0; 13446 rack->r_ctl.rc_tlp_cnt_out = 0; 13447 /* 13448 * If it is the RXT timer we want to 13449 * stop it, so we can restart a TLP. 13450 */ 13451 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13452 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13453 13454 #ifdef TCP_REQUEST_TRK 13455 rack_req_check_for_comp(rack, th->th_ack); 13456 #endif 13457 } 13458 /* 13459 * Let the congestion control algorithm update congestion control 13460 * related information. This typically means increasing the 13461 * congestion window. 13462 */ 13463 if (tp->snd_wnd < ctf_outstanding(tp)) { 13464 /* The peer collapsed the window */ 13465 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 13466 } else if (rack->rc_has_collapsed) 13467 rack_un_collapse_window(rack, __LINE__); 13468 if ((rack->r_collapse_point_valid) && 13469 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 13470 rack->r_collapse_point_valid = 0; 13471 /* 13472 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 13473 */ 13474 tp->snd_wl2 = th->th_ack; 13475 tp->t_dupacks = 0; 13476 m_freem(m); 13477 /* ND6_HINT(tp); *//* Some progress has been made. */ 13478 13479 /* 13480 * If all outstanding data are acked, stop retransmit timer, 13481 * otherwise restart timer using current (possibly backed-off) 13482 * value. If process is waiting for space, wakeup/selwakeup/signal. 13483 * If data are ready to send, let tcp_output decide between more 13484 * output or persist. 13485 */ 13486 if (under_pacing && 13487 (rack->use_fixed_rate == 0) && 13488 (rack->in_probe_rtt == 0) && 13489 rack->rc_gp_dyn_mul && 13490 rack->rc_always_pace) { 13491 /* Check if we are dragging bottom */ 13492 rack_check_bottom_drag(tp, rack, so); 13493 } 13494 if (tp->snd_una == tp->snd_max) { 13495 tp->t_flags &= ~TF_PREVVALID; 13496 rack->r_ctl.retran_during_recovery = 0; 13497 rack->rc_suspicious = 0; 13498 rack->r_ctl.dsack_byte_cnt = 0; 13499 rack->r_ctl.idle_snd_una = tp->snd_una; 13500 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13501 if (rack->r_ctl.rc_went_idle_time == 0) 13502 rack->r_ctl.rc_went_idle_time = 1; 13503 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13504 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13505 tp->t_acktime = 0; 13506 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13507 } 13508 if (acked && rack->r_fast_output) 13509 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 13510 if (sbavail(&so->so_snd)) { 13511 rack->r_wanted_output = 1; 13512 } 13513 return (1); 13514 } 13515 13516 /* 13517 * Return value of 1, the TCB is unlocked and most 13518 * likely gone, return value of 0, the TCP is still 13519 * locked. 13520 */ 13521 static int 13522 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 13523 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13524 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13525 { 13526 int32_t ret_val = 0; 13527 int32_t orig_tlen = tlen; 13528 int32_t todrop; 13529 int32_t ourfinisacked = 0; 13530 struct tcp_rack *rack; 13531 13532 INP_WLOCK_ASSERT(tptoinpcb(tp)); 13533 13534 ctf_calc_rwin(so, tp); 13535 /* 13536 * If the state is SYN_SENT: if seg contains an ACK, but not for our 13537 * SYN, drop the input. if seg contains a RST, then drop the 13538 * connection. if seg does not contain SYN, then drop it. Otherwise 13539 * this is an acceptable SYN segment initialize tp->rcv_nxt and 13540 * tp->irs if seg contains ack then advance tp->snd_una if seg 13541 * contains an ECE and ECN support is enabled, the stream is ECN 13542 * capable. if SYN has been acked change to ESTABLISHED else 13543 * SYN_RCVD state arrange for segment to be acked (eventually) 13544 * continue processing rest of data/controls. 13545 */ 13546 if ((thflags & TH_ACK) && 13547 (SEQ_LEQ(th->th_ack, tp->iss) || 13548 SEQ_GT(th->th_ack, tp->snd_max))) { 13549 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13550 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13551 return (1); 13552 } 13553 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 13554 TCP_PROBE5(connect__refused, NULL, tp, 13555 mtod(m, const char *), tp, th); 13556 tp = tcp_drop(tp, ECONNREFUSED); 13557 ctf_do_drop(m, tp); 13558 return (1); 13559 } 13560 if (thflags & TH_RST) { 13561 ctf_do_drop(m, tp); 13562 return (1); 13563 } 13564 if (!(thflags & TH_SYN)) { 13565 ctf_do_drop(m, tp); 13566 return (1); 13567 } 13568 tp->irs = th->th_seq; 13569 tcp_rcvseqinit(tp); 13570 rack = (struct tcp_rack *)tp->t_fb_ptr; 13571 if (thflags & TH_ACK) { 13572 int tfo_partial = 0; 13573 13574 KMOD_TCPSTAT_INC(tcps_connects); 13575 soisconnected(so); 13576 #ifdef MAC 13577 mac_socketpeer_set_from_mbuf(m, so); 13578 #endif 13579 /* Do window scaling on this connection? */ 13580 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13581 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13582 tp->rcv_scale = tp->request_r_scale; 13583 } 13584 tp->rcv_adv += min(tp->rcv_wnd, 13585 TCP_MAXWIN << tp->rcv_scale); 13586 /* 13587 * If not all the data that was sent in the TFO SYN 13588 * has been acked, resend the remainder right away. 13589 */ 13590 if ((tp->t_flags & TF_FASTOPEN) && 13591 (tp->snd_una != tp->snd_max)) { 13592 /* Was it a partial ack? */ 13593 if (SEQ_LT(th->th_ack, tp->snd_max)) 13594 tfo_partial = 1; 13595 } 13596 /* 13597 * If there's data, delay ACK; if there's also a FIN ACKNOW 13598 * will be turned on later. 13599 */ 13600 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 13601 rack_timer_cancel(tp, rack, 13602 rack->r_ctl.rc_rcvtime, __LINE__); 13603 tp->t_flags |= TF_DELACK; 13604 } else { 13605 rack->r_wanted_output = 1; 13606 tp->t_flags |= TF_ACKNOW; 13607 } 13608 13609 tcp_ecn_input_syn_sent(tp, thflags, iptos); 13610 13611 if (SEQ_GT(th->th_ack, tp->snd_una)) { 13612 /* 13613 * We advance snd_una for the 13614 * fast open case. If th_ack is 13615 * acknowledging data beyond 13616 * snd_una we can't just call 13617 * ack-processing since the 13618 * data stream in our send-map 13619 * will start at snd_una + 1 (one 13620 * beyond the SYN). If its just 13621 * equal we don't need to do that 13622 * and there is no send_map. 13623 */ 13624 tp->snd_una++; 13625 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 13626 /* 13627 * We sent a SYN with data, and thus have a 13628 * sendmap entry with a SYN set. Lets find it 13629 * and take off the send bit and the byte and 13630 * set it up to be what we send (send it next). 13631 */ 13632 struct rack_sendmap *rsm; 13633 13634 rsm = tqhash_min(rack->r_ctl.tqh); 13635 if (rsm) { 13636 if (rsm->r_flags & RACK_HAS_SYN) { 13637 rsm->r_flags &= ~RACK_HAS_SYN; 13638 rsm->r_start++; 13639 } 13640 rack->r_ctl.rc_resend = rsm; 13641 } 13642 } 13643 } 13644 /* 13645 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 13646 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 13647 */ 13648 tp->t_starttime = ticks; 13649 if (tp->t_flags & TF_NEEDFIN) { 13650 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13651 tp->t_flags &= ~TF_NEEDFIN; 13652 thflags &= ~TH_SYN; 13653 } else { 13654 tcp_state_change(tp, TCPS_ESTABLISHED); 13655 TCP_PROBE5(connect__established, NULL, tp, 13656 mtod(m, const char *), tp, th); 13657 rack_cc_conn_init(tp); 13658 } 13659 } else { 13660 /* 13661 * Received initial SYN in SYN-SENT[*] state => simultaneous 13662 * open. If segment contains CC option and there is a 13663 * cached CC, apply TAO test. If it succeeds, connection is * 13664 * half-synchronized. Otherwise, do 3-way handshake: 13665 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 13666 * there was no CC option, clear cached CC value. 13667 */ 13668 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 13669 tcp_state_change(tp, TCPS_SYN_RECEIVED); 13670 } 13671 /* 13672 * Advance th->th_seq to correspond to first data byte. If data, 13673 * trim to stay within window, dropping FIN if necessary. 13674 */ 13675 th->th_seq++; 13676 if (tlen > tp->rcv_wnd) { 13677 todrop = tlen - tp->rcv_wnd; 13678 m_adj(m, -todrop); 13679 tlen = tp->rcv_wnd; 13680 thflags &= ~TH_FIN; 13681 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13682 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13683 } 13684 tp->snd_wl1 = th->th_seq - 1; 13685 tp->rcv_up = th->th_seq; 13686 /* 13687 * Client side of transaction: already sent SYN and data. If the 13688 * remote host used T/TCP to validate the SYN, our data will be 13689 * ACK'd; if so, enter normal data segment processing in the middle 13690 * of step 5, ack processing. Otherwise, goto step 6. 13691 */ 13692 if (thflags & TH_ACK) { 13693 /* For syn-sent we need to possibly update the rtt */ 13694 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13695 uint32_t t, mcts; 13696 13697 mcts = tcp_ts_getticks(); 13698 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13699 if (!tp->t_rttlow || tp->t_rttlow > t) 13700 tp->t_rttlow = t; 13701 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13702 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13703 tcp_rack_xmit_timer_commit(rack, tp); 13704 } 13705 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 13706 return (ret_val); 13707 /* We may have changed to FIN_WAIT_1 above */ 13708 if (tp->t_state == TCPS_FIN_WAIT_1) { 13709 /* 13710 * In FIN_WAIT_1 STATE in addition to the processing 13711 * for the ESTABLISHED state if our FIN is now 13712 * acknowledged then enter FIN_WAIT_2. 13713 */ 13714 if (ourfinisacked) { 13715 /* 13716 * If we can't receive any more data, then 13717 * closing user can proceed. Starting the 13718 * timer is contrary to the specification, 13719 * but if we don't get a FIN we'll hang 13720 * forever. 13721 * 13722 * XXXjl: we should release the tp also, and 13723 * use a compressed state. 13724 */ 13725 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13726 soisdisconnected(so); 13727 tcp_timer_activate(tp, TT_2MSL, 13728 (tcp_fast_finwait2_recycle ? 13729 tcp_finwait2_timeout : 13730 TP_MAXIDLE(tp))); 13731 } 13732 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13733 } 13734 } 13735 } 13736 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13737 tiwin, thflags, nxt_pkt)); 13738 } 13739 13740 /* 13741 * Return value of 1, the TCB is unlocked and most 13742 * likely gone, return value of 0, the TCP is still 13743 * locked. 13744 */ 13745 static int 13746 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13747 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13748 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13749 { 13750 struct tcp_rack *rack; 13751 int32_t orig_tlen = tlen; 13752 int32_t ret_val = 0; 13753 int32_t ourfinisacked = 0; 13754 13755 rack = (struct tcp_rack *)tp->t_fb_ptr; 13756 ctf_calc_rwin(so, tp); 13757 if ((thflags & TH_RST) || 13758 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13759 return (__ctf_process_rst(m, th, so, tp, 13760 &rack->r_ctl.challenge_ack_ts, 13761 &rack->r_ctl.challenge_ack_cnt)); 13762 if ((thflags & TH_ACK) && 13763 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13764 SEQ_GT(th->th_ack, tp->snd_max))) { 13765 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13766 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13767 return (1); 13768 } 13769 if (tp->t_flags & TF_FASTOPEN) { 13770 /* 13771 * When a TFO connection is in SYN_RECEIVED, the 13772 * only valid packets are the initial SYN, a 13773 * retransmit/copy of the initial SYN (possibly with 13774 * a subset of the original data), a valid ACK, a 13775 * FIN, or a RST. 13776 */ 13777 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13778 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13779 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13780 return (1); 13781 } else if (thflags & TH_SYN) { 13782 /* non-initial SYN is ignored */ 13783 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13784 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13785 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13786 ctf_do_drop(m, NULL); 13787 return (0); 13788 } 13789 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13790 ctf_do_drop(m, NULL); 13791 return (0); 13792 } 13793 } 13794 13795 /* 13796 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13797 * it's less than ts_recent, drop it. 13798 */ 13799 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13800 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13801 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13802 return (ret_val); 13803 } 13804 /* 13805 * In the SYN-RECEIVED state, validate that the packet belongs to 13806 * this connection before trimming the data to fit the receive 13807 * window. Check the sequence number versus IRS since we know the 13808 * sequence numbers haven't wrapped. This is a partial fix for the 13809 * "LAND" DoS attack. 13810 */ 13811 if (SEQ_LT(th->th_seq, tp->irs)) { 13812 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13813 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13814 return (1); 13815 } 13816 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13817 &rack->r_ctl.challenge_ack_ts, 13818 &rack->r_ctl.challenge_ack_cnt)) { 13819 return (ret_val); 13820 } 13821 /* 13822 * If last ACK falls within this segment's sequence numbers, record 13823 * its timestamp. NOTE: 1) That the test incorporates suggestions 13824 * from the latest proposal of the tcplw@cray.com list (Braden 13825 * 1993/04/26). 2) That updating only on newer timestamps interferes 13826 * with our earlier PAWS tests, so this check should be solely 13827 * predicated on the sequence space of this segment. 3) That we 13828 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13829 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13830 * SEG.Len, This modified check allows us to overcome RFC1323's 13831 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13832 * p.869. In such cases, we can still calculate the RTT correctly 13833 * when RCV.NXT == Last.ACK.Sent. 13834 */ 13835 if ((to->to_flags & TOF_TS) != 0 && 13836 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13837 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13838 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13839 tp->ts_recent_age = tcp_ts_getticks(); 13840 tp->ts_recent = to->to_tsval; 13841 } 13842 tp->snd_wnd = tiwin; 13843 rack_validate_fo_sendwin_up(tp, rack); 13844 /* 13845 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13846 * is on (half-synchronized state), then queue data for later 13847 * processing; else drop segment and return. 13848 */ 13849 if ((thflags & TH_ACK) == 0) { 13850 if (tp->t_flags & TF_FASTOPEN) { 13851 rack_cc_conn_init(tp); 13852 } 13853 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13854 tiwin, thflags, nxt_pkt)); 13855 } 13856 KMOD_TCPSTAT_INC(tcps_connects); 13857 if (tp->t_flags & TF_SONOTCONN) { 13858 tp->t_flags &= ~TF_SONOTCONN; 13859 soisconnected(so); 13860 } 13861 /* Do window scaling? */ 13862 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13863 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13864 tp->rcv_scale = tp->request_r_scale; 13865 } 13866 /* 13867 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13868 * FIN-WAIT-1 13869 */ 13870 tp->t_starttime = ticks; 13871 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 13872 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13873 tp->t_tfo_pending = NULL; 13874 } 13875 if (tp->t_flags & TF_NEEDFIN) { 13876 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13877 tp->t_flags &= ~TF_NEEDFIN; 13878 } else { 13879 tcp_state_change(tp, TCPS_ESTABLISHED); 13880 TCP_PROBE5(accept__established, NULL, tp, 13881 mtod(m, const char *), tp, th); 13882 /* 13883 * TFO connections call cc_conn_init() during SYN 13884 * processing. Calling it again here for such connections 13885 * is not harmless as it would undo the snd_cwnd reduction 13886 * that occurs when a TFO SYN|ACK is retransmitted. 13887 */ 13888 if (!(tp->t_flags & TF_FASTOPEN)) 13889 rack_cc_conn_init(tp); 13890 } 13891 /* 13892 * Account for the ACK of our SYN prior to 13893 * regular ACK processing below, except for 13894 * simultaneous SYN, which is handled later. 13895 */ 13896 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13897 tp->snd_una++; 13898 /* 13899 * If segment contains data or ACK, will call tcp_reass() later; if 13900 * not, do so now to pass queued data to user. 13901 */ 13902 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13903 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13904 (struct mbuf *)0); 13905 if (tp->t_flags & TF_WAKESOR) { 13906 tp->t_flags &= ~TF_WAKESOR; 13907 /* NB: sorwakeup_locked() does an implicit unlock. */ 13908 sorwakeup_locked(so); 13909 } 13910 } 13911 tp->snd_wl1 = th->th_seq - 1; 13912 /* For syn-recv we need to possibly update the rtt */ 13913 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13914 uint32_t t, mcts; 13915 13916 mcts = tcp_ts_getticks(); 13917 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13918 if (!tp->t_rttlow || tp->t_rttlow > t) 13919 tp->t_rttlow = t; 13920 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13921 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13922 tcp_rack_xmit_timer_commit(rack, tp); 13923 } 13924 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13925 return (ret_val); 13926 } 13927 if (tp->t_state == TCPS_FIN_WAIT_1) { 13928 /* We could have went to FIN_WAIT_1 (or EST) above */ 13929 /* 13930 * In FIN_WAIT_1 STATE in addition to the processing for the 13931 * ESTABLISHED state if our FIN is now acknowledged then 13932 * enter FIN_WAIT_2. 13933 */ 13934 if (ourfinisacked) { 13935 /* 13936 * If we can't receive any more data, then closing 13937 * user can proceed. Starting the timer is contrary 13938 * to the specification, but if we don't get a FIN 13939 * we'll hang forever. 13940 * 13941 * XXXjl: we should release the tp also, and use a 13942 * compressed state. 13943 */ 13944 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13945 soisdisconnected(so); 13946 tcp_timer_activate(tp, TT_2MSL, 13947 (tcp_fast_finwait2_recycle ? 13948 tcp_finwait2_timeout : 13949 TP_MAXIDLE(tp))); 13950 } 13951 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13952 } 13953 } 13954 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13955 tiwin, thflags, nxt_pkt)); 13956 } 13957 13958 /* 13959 * Return value of 1, the TCB is unlocked and most 13960 * likely gone, return value of 0, the TCP is still 13961 * locked. 13962 */ 13963 static int 13964 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13965 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13966 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13967 { 13968 int32_t ret_val = 0; 13969 int32_t orig_tlen = tlen; 13970 struct tcp_rack *rack; 13971 13972 /* 13973 * Header prediction: check for the two common cases of a 13974 * uni-directional data xfer. If the packet has no control flags, 13975 * is in-sequence, the window didn't change and we're not 13976 * retransmitting, it's a candidate. If the length is zero and the 13977 * ack moved forward, we're the sender side of the xfer. Just free 13978 * the data acked & wake any higher level process that was blocked 13979 * waiting for space. If the length is non-zero and the ack didn't 13980 * move, we're the receiver side. If we're getting packets in-order 13981 * (the reassembly queue is empty), add the data toc The socket 13982 * buffer and note that we need a delayed ack. Make sure that the 13983 * hidden state-flags are also off. Since we check for 13984 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13985 */ 13986 rack = (struct tcp_rack *)tp->t_fb_ptr; 13987 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13988 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13989 __predict_true(SEGQ_EMPTY(tp)) && 13990 __predict_true(th->th_seq == tp->rcv_nxt)) { 13991 if (tlen == 0) { 13992 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13993 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13994 return (0); 13995 } 13996 } else { 13997 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13998 tiwin, nxt_pkt, iptos)) { 13999 return (0); 14000 } 14001 } 14002 } 14003 ctf_calc_rwin(so, tp); 14004 14005 if ((thflags & TH_RST) || 14006 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14007 return (__ctf_process_rst(m, th, so, tp, 14008 &rack->r_ctl.challenge_ack_ts, 14009 &rack->r_ctl.challenge_ack_cnt)); 14010 14011 /* 14012 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14013 * synchronized state. 14014 */ 14015 if (thflags & TH_SYN) { 14016 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14017 return (ret_val); 14018 } 14019 /* 14020 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14021 * it's less than ts_recent, drop it. 14022 */ 14023 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14024 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14025 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14026 return (ret_val); 14027 } 14028 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14029 &rack->r_ctl.challenge_ack_ts, 14030 &rack->r_ctl.challenge_ack_cnt)) { 14031 return (ret_val); 14032 } 14033 /* 14034 * If last ACK falls within this segment's sequence numbers, record 14035 * its timestamp. NOTE: 1) That the test incorporates suggestions 14036 * from the latest proposal of the tcplw@cray.com list (Braden 14037 * 1993/04/26). 2) That updating only on newer timestamps interferes 14038 * with our earlier PAWS tests, so this check should be solely 14039 * predicated on the sequence space of this segment. 3) That we 14040 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14041 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14042 * SEG.Len, This modified check allows us to overcome RFC1323's 14043 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14044 * p.869. In such cases, we can still calculate the RTT correctly 14045 * when RCV.NXT == Last.ACK.Sent. 14046 */ 14047 if ((to->to_flags & TOF_TS) != 0 && 14048 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14049 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14050 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14051 tp->ts_recent_age = tcp_ts_getticks(); 14052 tp->ts_recent = to->to_tsval; 14053 } 14054 /* 14055 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14056 * is on (half-synchronized state), then queue data for later 14057 * processing; else drop segment and return. 14058 */ 14059 if ((thflags & TH_ACK) == 0) { 14060 if (tp->t_flags & TF_NEEDSYN) { 14061 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14062 tiwin, thflags, nxt_pkt)); 14063 14064 } else if (tp->t_flags & TF_ACKNOW) { 14065 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14066 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14067 return (ret_val); 14068 } else { 14069 ctf_do_drop(m, NULL); 14070 return (0); 14071 } 14072 } 14073 /* 14074 * Ack processing. 14075 */ 14076 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 14077 return (ret_val); 14078 } 14079 if (sbavail(&so->so_snd)) { 14080 if (ctf_progress_timeout_check(tp, true)) { 14081 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 14082 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14083 return (1); 14084 } 14085 } 14086 /* State changes only happen in rack_process_data() */ 14087 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14088 tiwin, thflags, nxt_pkt)); 14089 } 14090 14091 /* 14092 * Return value of 1, the TCB is unlocked and most 14093 * likely gone, return value of 0, the TCP is still 14094 * locked. 14095 */ 14096 static int 14097 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 14098 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14099 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14100 { 14101 int32_t ret_val = 0; 14102 int32_t orig_tlen = tlen; 14103 struct tcp_rack *rack; 14104 14105 rack = (struct tcp_rack *)tp->t_fb_ptr; 14106 ctf_calc_rwin(so, tp); 14107 if ((thflags & TH_RST) || 14108 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14109 return (__ctf_process_rst(m, th, so, tp, 14110 &rack->r_ctl.challenge_ack_ts, 14111 &rack->r_ctl.challenge_ack_cnt)); 14112 /* 14113 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14114 * synchronized state. 14115 */ 14116 if (thflags & TH_SYN) { 14117 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14118 return (ret_val); 14119 } 14120 /* 14121 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14122 * it's less than ts_recent, drop it. 14123 */ 14124 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14125 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14126 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14127 return (ret_val); 14128 } 14129 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14130 &rack->r_ctl.challenge_ack_ts, 14131 &rack->r_ctl.challenge_ack_cnt)) { 14132 return (ret_val); 14133 } 14134 /* 14135 * If last ACK falls within this segment's sequence numbers, record 14136 * its timestamp. NOTE: 1) That the test incorporates suggestions 14137 * from the latest proposal of the tcplw@cray.com list (Braden 14138 * 1993/04/26). 2) That updating only on newer timestamps interferes 14139 * with our earlier PAWS tests, so this check should be solely 14140 * predicated on the sequence space of this segment. 3) That we 14141 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14142 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14143 * SEG.Len, This modified check allows us to overcome RFC1323's 14144 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14145 * p.869. In such cases, we can still calculate the RTT correctly 14146 * when RCV.NXT == Last.ACK.Sent. 14147 */ 14148 if ((to->to_flags & TOF_TS) != 0 && 14149 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14150 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14151 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14152 tp->ts_recent_age = tcp_ts_getticks(); 14153 tp->ts_recent = to->to_tsval; 14154 } 14155 /* 14156 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14157 * is on (half-synchronized state), then queue data for later 14158 * processing; else drop segment and return. 14159 */ 14160 if ((thflags & TH_ACK) == 0) { 14161 if (tp->t_flags & TF_NEEDSYN) { 14162 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14163 tiwin, thflags, nxt_pkt)); 14164 14165 } else if (tp->t_flags & TF_ACKNOW) { 14166 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14167 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14168 return (ret_val); 14169 } else { 14170 ctf_do_drop(m, NULL); 14171 return (0); 14172 } 14173 } 14174 /* 14175 * Ack processing. 14176 */ 14177 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 14178 return (ret_val); 14179 } 14180 if (sbavail(&so->so_snd)) { 14181 if (ctf_progress_timeout_check(tp, true)) { 14182 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14183 tp, tick, PROGRESS_DROP, __LINE__); 14184 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14185 return (1); 14186 } 14187 } 14188 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14189 tiwin, thflags, nxt_pkt)); 14190 } 14191 14192 static int 14193 rack_check_data_after_close(struct mbuf *m, 14194 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 14195 { 14196 struct tcp_rack *rack; 14197 14198 rack = (struct tcp_rack *)tp->t_fb_ptr; 14199 if (rack->rc_allow_data_af_clo == 0) { 14200 close_now: 14201 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 14202 /* tcp_close will kill the inp pre-log the Reset */ 14203 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 14204 tp = tcp_close(tp); 14205 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 14206 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 14207 return (1); 14208 } 14209 if (sbavail(&so->so_snd) == 0) 14210 goto close_now; 14211 /* Ok we allow data that is ignored and a followup reset */ 14212 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 14213 tp->rcv_nxt = th->th_seq + *tlen; 14214 tp->t_flags2 |= TF2_DROP_AF_DATA; 14215 rack->r_wanted_output = 1; 14216 *tlen = 0; 14217 return (0); 14218 } 14219 14220 /* 14221 * Return value of 1, the TCB is unlocked and most 14222 * likely gone, return value of 0, the TCP is still 14223 * locked. 14224 */ 14225 static int 14226 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 14227 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14228 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14229 { 14230 int32_t ret_val = 0; 14231 int32_t orig_tlen = tlen; 14232 int32_t ourfinisacked = 0; 14233 struct tcp_rack *rack; 14234 14235 rack = (struct tcp_rack *)tp->t_fb_ptr; 14236 ctf_calc_rwin(so, tp); 14237 14238 if ((thflags & TH_RST) || 14239 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14240 return (__ctf_process_rst(m, th, so, tp, 14241 &rack->r_ctl.challenge_ack_ts, 14242 &rack->r_ctl.challenge_ack_cnt)); 14243 /* 14244 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14245 * synchronized state. 14246 */ 14247 if (thflags & TH_SYN) { 14248 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14249 return (ret_val); 14250 } 14251 /* 14252 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14253 * it's less than ts_recent, drop it. 14254 */ 14255 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14256 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14257 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14258 return (ret_val); 14259 } 14260 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14261 &rack->r_ctl.challenge_ack_ts, 14262 &rack->r_ctl.challenge_ack_cnt)) { 14263 return (ret_val); 14264 } 14265 /* 14266 * If new data are received on a connection after the user processes 14267 * are gone, then RST the other end. 14268 */ 14269 if ((tp->t_flags & TF_CLOSED) && tlen && 14270 rack_check_data_after_close(m, tp, &tlen, th, so)) 14271 return (1); 14272 /* 14273 * If last ACK falls within this segment's sequence numbers, record 14274 * its timestamp. NOTE: 1) That the test incorporates suggestions 14275 * from the latest proposal of the tcplw@cray.com list (Braden 14276 * 1993/04/26). 2) That updating only on newer timestamps interferes 14277 * with our earlier PAWS tests, so this check should be solely 14278 * predicated on the sequence space of this segment. 3) That we 14279 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14280 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14281 * SEG.Len, This modified check allows us to overcome RFC1323's 14282 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14283 * p.869. In such cases, we can still calculate the RTT correctly 14284 * when RCV.NXT == Last.ACK.Sent. 14285 */ 14286 if ((to->to_flags & TOF_TS) != 0 && 14287 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14288 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14289 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14290 tp->ts_recent_age = tcp_ts_getticks(); 14291 tp->ts_recent = to->to_tsval; 14292 } 14293 /* 14294 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14295 * is on (half-synchronized state), then queue data for later 14296 * processing; else drop segment and return. 14297 */ 14298 if ((thflags & TH_ACK) == 0) { 14299 if (tp->t_flags & TF_NEEDSYN) { 14300 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14301 tiwin, thflags, nxt_pkt)); 14302 } else if (tp->t_flags & TF_ACKNOW) { 14303 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14304 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14305 return (ret_val); 14306 } else { 14307 ctf_do_drop(m, NULL); 14308 return (0); 14309 } 14310 } 14311 /* 14312 * Ack processing. 14313 */ 14314 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14315 return (ret_val); 14316 } 14317 if (ourfinisacked) { 14318 /* 14319 * If we can't receive any more data, then closing user can 14320 * proceed. Starting the timer is contrary to the 14321 * specification, but if we don't get a FIN we'll hang 14322 * forever. 14323 * 14324 * XXXjl: we should release the tp also, and use a 14325 * compressed state. 14326 */ 14327 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 14328 soisdisconnected(so); 14329 tcp_timer_activate(tp, TT_2MSL, 14330 (tcp_fast_finwait2_recycle ? 14331 tcp_finwait2_timeout : 14332 TP_MAXIDLE(tp))); 14333 } 14334 tcp_state_change(tp, TCPS_FIN_WAIT_2); 14335 } 14336 if (sbavail(&so->so_snd)) { 14337 if (ctf_progress_timeout_check(tp, true)) { 14338 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14339 tp, tick, PROGRESS_DROP, __LINE__); 14340 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14341 return (1); 14342 } 14343 } 14344 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14345 tiwin, thflags, nxt_pkt)); 14346 } 14347 14348 /* 14349 * Return value of 1, the TCB is unlocked and most 14350 * likely gone, return value of 0, the TCP is still 14351 * locked. 14352 */ 14353 static int 14354 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 14355 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14356 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14357 { 14358 int32_t ret_val = 0; 14359 int32_t orig_tlen = tlen; 14360 int32_t ourfinisacked = 0; 14361 struct tcp_rack *rack; 14362 14363 rack = (struct tcp_rack *)tp->t_fb_ptr; 14364 ctf_calc_rwin(so, tp); 14365 14366 if ((thflags & TH_RST) || 14367 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14368 return (__ctf_process_rst(m, th, so, tp, 14369 &rack->r_ctl.challenge_ack_ts, 14370 &rack->r_ctl.challenge_ack_cnt)); 14371 /* 14372 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14373 * synchronized state. 14374 */ 14375 if (thflags & TH_SYN) { 14376 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14377 return (ret_val); 14378 } 14379 /* 14380 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14381 * it's less than ts_recent, drop it. 14382 */ 14383 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14384 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14385 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14386 return (ret_val); 14387 } 14388 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14389 &rack->r_ctl.challenge_ack_ts, 14390 &rack->r_ctl.challenge_ack_cnt)) { 14391 return (ret_val); 14392 } 14393 /* 14394 * If last ACK falls within this segment's sequence numbers, record 14395 * its timestamp. NOTE: 1) That the test incorporates suggestions 14396 * from the latest proposal of the tcplw@cray.com list (Braden 14397 * 1993/04/26). 2) That updating only on newer timestamps interferes 14398 * with our earlier PAWS tests, so this check should be solely 14399 * predicated on the sequence space of this segment. 3) That we 14400 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14401 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14402 * SEG.Len, This modified check allows us to overcome RFC1323's 14403 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14404 * p.869. In such cases, we can still calculate the RTT correctly 14405 * when RCV.NXT == Last.ACK.Sent. 14406 */ 14407 if ((to->to_flags & TOF_TS) != 0 && 14408 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14409 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14410 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14411 tp->ts_recent_age = tcp_ts_getticks(); 14412 tp->ts_recent = to->to_tsval; 14413 } 14414 /* 14415 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14416 * is on (half-synchronized state), then queue data for later 14417 * processing; else drop segment and return. 14418 */ 14419 if ((thflags & TH_ACK) == 0) { 14420 if (tp->t_flags & TF_NEEDSYN) { 14421 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14422 tiwin, thflags, nxt_pkt)); 14423 } else if (tp->t_flags & TF_ACKNOW) { 14424 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14425 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14426 return (ret_val); 14427 } else { 14428 ctf_do_drop(m, NULL); 14429 return (0); 14430 } 14431 } 14432 /* 14433 * Ack processing. 14434 */ 14435 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14436 return (ret_val); 14437 } 14438 if (ourfinisacked) { 14439 tcp_twstart(tp); 14440 m_freem(m); 14441 return (1); 14442 } 14443 if (sbavail(&so->so_snd)) { 14444 if (ctf_progress_timeout_check(tp, true)) { 14445 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14446 tp, tick, PROGRESS_DROP, __LINE__); 14447 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14448 return (1); 14449 } 14450 } 14451 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14452 tiwin, thflags, nxt_pkt)); 14453 } 14454 14455 /* 14456 * Return value of 1, the TCB is unlocked and most 14457 * likely gone, return value of 0, the TCP is still 14458 * locked. 14459 */ 14460 static int 14461 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 14462 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14463 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14464 { 14465 int32_t ret_val = 0; 14466 int32_t orig_tlen; 14467 int32_t ourfinisacked = 0; 14468 struct tcp_rack *rack; 14469 14470 rack = (struct tcp_rack *)tp->t_fb_ptr; 14471 ctf_calc_rwin(so, tp); 14472 14473 if ((thflags & TH_RST) || 14474 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14475 return (__ctf_process_rst(m, th, so, tp, 14476 &rack->r_ctl.challenge_ack_ts, 14477 &rack->r_ctl.challenge_ack_cnt)); 14478 /* 14479 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14480 * synchronized state. 14481 */ 14482 if (thflags & TH_SYN) { 14483 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14484 return (ret_val); 14485 } 14486 /* 14487 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14488 * it's less than ts_recent, drop it. 14489 */ 14490 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14491 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14492 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14493 return (ret_val); 14494 } 14495 orig_tlen = tlen; 14496 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14497 &rack->r_ctl.challenge_ack_ts, 14498 &rack->r_ctl.challenge_ack_cnt)) { 14499 return (ret_val); 14500 } 14501 /* 14502 * If last ACK falls within this segment's sequence numbers, record 14503 * its timestamp. NOTE: 1) That the test incorporates suggestions 14504 * from the latest proposal of the tcplw@cray.com list (Braden 14505 * 1993/04/26). 2) That updating only on newer timestamps interferes 14506 * with our earlier PAWS tests, so this check should be solely 14507 * predicated on the sequence space of this segment. 3) That we 14508 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14509 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14510 * SEG.Len, This modified check allows us to overcome RFC1323's 14511 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14512 * p.869. In such cases, we can still calculate the RTT correctly 14513 * when RCV.NXT == Last.ACK.Sent. 14514 */ 14515 if ((to->to_flags & TOF_TS) != 0 && 14516 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14517 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14518 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14519 tp->ts_recent_age = tcp_ts_getticks(); 14520 tp->ts_recent = to->to_tsval; 14521 } 14522 /* 14523 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14524 * is on (half-synchronized state), then queue data for later 14525 * processing; else drop segment and return. 14526 */ 14527 if ((thflags & TH_ACK) == 0) { 14528 if (tp->t_flags & TF_NEEDSYN) { 14529 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14530 tiwin, thflags, nxt_pkt)); 14531 } else if (tp->t_flags & TF_ACKNOW) { 14532 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14533 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14534 return (ret_val); 14535 } else { 14536 ctf_do_drop(m, NULL); 14537 return (0); 14538 } 14539 } 14540 /* 14541 * case TCPS_LAST_ACK: Ack processing. 14542 */ 14543 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14544 return (ret_val); 14545 } 14546 if (ourfinisacked) { 14547 tp = tcp_close(tp); 14548 ctf_do_drop(m, tp); 14549 return (1); 14550 } 14551 if (sbavail(&so->so_snd)) { 14552 if (ctf_progress_timeout_check(tp, true)) { 14553 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14554 tp, tick, PROGRESS_DROP, __LINE__); 14555 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14556 return (1); 14557 } 14558 } 14559 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14560 tiwin, thflags, nxt_pkt)); 14561 } 14562 14563 /* 14564 * Return value of 1, the TCB is unlocked and most 14565 * likely gone, return value of 0, the TCP is still 14566 * locked. 14567 */ 14568 static int 14569 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 14570 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14571 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14572 { 14573 int32_t ret_val = 0; 14574 int32_t orig_tlen = tlen; 14575 int32_t ourfinisacked = 0; 14576 struct tcp_rack *rack; 14577 14578 rack = (struct tcp_rack *)tp->t_fb_ptr; 14579 ctf_calc_rwin(so, tp); 14580 14581 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 14582 if ((thflags & TH_RST) || 14583 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14584 return (__ctf_process_rst(m, th, so, tp, 14585 &rack->r_ctl.challenge_ack_ts, 14586 &rack->r_ctl.challenge_ack_cnt)); 14587 /* 14588 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14589 * synchronized state. 14590 */ 14591 if (thflags & TH_SYN) { 14592 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14593 return (ret_val); 14594 } 14595 /* 14596 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14597 * it's less than ts_recent, drop it. 14598 */ 14599 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14600 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14601 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14602 return (ret_val); 14603 } 14604 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14605 &rack->r_ctl.challenge_ack_ts, 14606 &rack->r_ctl.challenge_ack_cnt)) { 14607 return (ret_val); 14608 } 14609 /* 14610 * If new data are received on a connection after the user processes 14611 * are gone, then RST the other end. 14612 */ 14613 if ((tp->t_flags & TF_CLOSED) && tlen && 14614 rack_check_data_after_close(m, tp, &tlen, th, so)) 14615 return (1); 14616 /* 14617 * If last ACK falls within this segment's sequence numbers, record 14618 * its timestamp. NOTE: 1) That the test incorporates suggestions 14619 * from the latest proposal of the tcplw@cray.com list (Braden 14620 * 1993/04/26). 2) That updating only on newer timestamps interferes 14621 * with our earlier PAWS tests, so this check should be solely 14622 * predicated on the sequence space of this segment. 3) That we 14623 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14624 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14625 * SEG.Len, This modified check allows us to overcome RFC1323's 14626 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14627 * p.869. In such cases, we can still calculate the RTT correctly 14628 * when RCV.NXT == Last.ACK.Sent. 14629 */ 14630 if ((to->to_flags & TOF_TS) != 0 && 14631 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14632 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14633 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14634 tp->ts_recent_age = tcp_ts_getticks(); 14635 tp->ts_recent = to->to_tsval; 14636 } 14637 /* 14638 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14639 * is on (half-synchronized state), then queue data for later 14640 * processing; else drop segment and return. 14641 */ 14642 if ((thflags & TH_ACK) == 0) { 14643 if (tp->t_flags & TF_NEEDSYN) { 14644 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14645 tiwin, thflags, nxt_pkt)); 14646 } else if (tp->t_flags & TF_ACKNOW) { 14647 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14648 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14649 return (ret_val); 14650 } else { 14651 ctf_do_drop(m, NULL); 14652 return (0); 14653 } 14654 } 14655 /* 14656 * Ack processing. 14657 */ 14658 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14659 return (ret_val); 14660 } 14661 if (sbavail(&so->so_snd)) { 14662 if (ctf_progress_timeout_check(tp, true)) { 14663 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14664 tp, tick, PROGRESS_DROP, __LINE__); 14665 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14666 return (1); 14667 } 14668 } 14669 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14670 tiwin, thflags, nxt_pkt)); 14671 } 14672 14673 static void inline 14674 rack_clear_rate_sample(struct tcp_rack *rack) 14675 { 14676 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 14677 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 14678 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 14679 } 14680 14681 static void 14682 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 14683 { 14684 uint64_t bw_est, rate_wanted; 14685 int chged = 0; 14686 uint32_t user_max, orig_min, orig_max; 14687 14688 #ifdef TCP_REQUEST_TRK 14689 if (rack->rc_hybrid_mode && 14690 (rack->r_ctl.rc_pace_max_segs != 0) && 14691 (rack_hybrid_allow_set_maxseg == 1) && 14692 (rack->r_ctl.rc_last_sft != NULL)) { 14693 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 14694 return; 14695 } 14696 #endif 14697 orig_min = rack->r_ctl.rc_pace_min_segs; 14698 orig_max = rack->r_ctl.rc_pace_max_segs; 14699 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 14700 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 14701 chged = 1; 14702 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 14703 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 14704 if (user_max != rack->r_ctl.rc_pace_max_segs) 14705 chged = 1; 14706 } 14707 if (rack->rc_force_max_seg) { 14708 rack->r_ctl.rc_pace_max_segs = user_max; 14709 } else if (rack->use_fixed_rate) { 14710 bw_est = rack_get_bw(rack); 14711 if ((rack->r_ctl.crte == NULL) || 14712 (bw_est != rack->r_ctl.crte->rate)) { 14713 rack->r_ctl.rc_pace_max_segs = user_max; 14714 } else { 14715 /* We are pacing right at the hardware rate */ 14716 uint32_t segsiz, pace_one; 14717 14718 if (rack_pace_one_seg || 14719 (rack->r_ctl.rc_user_set_min_segs == 1)) 14720 pace_one = 1; 14721 else 14722 pace_one = 0; 14723 segsiz = min(ctf_fixed_maxseg(tp), 14724 rack->r_ctl.rc_pace_min_segs); 14725 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14726 tp, bw_est, segsiz, pace_one, 14727 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14728 } 14729 } else if (rack->rc_always_pace) { 14730 if (rack->r_ctl.gp_bw || 14731 rack->r_ctl.init_rate) { 14732 /* We have a rate of some sort set */ 14733 uint32_t orig; 14734 14735 bw_est = rack_get_bw(rack); 14736 orig = rack->r_ctl.rc_pace_max_segs; 14737 if (fill_override) 14738 rate_wanted = *fill_override; 14739 else 14740 rate_wanted = rack_get_gp_est(rack); 14741 if (rate_wanted) { 14742 /* We have something */ 14743 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14744 rate_wanted, 14745 ctf_fixed_maxseg(rack->rc_tp)); 14746 } else 14747 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14748 if (orig != rack->r_ctl.rc_pace_max_segs) 14749 chged = 1; 14750 } else if ((rack->r_ctl.gp_bw == 0) && 14751 (rack->r_ctl.rc_pace_max_segs == 0)) { 14752 /* 14753 * If we have nothing limit us to bursting 14754 * out IW sized pieces. 14755 */ 14756 chged = 1; 14757 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14758 } 14759 } 14760 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14761 chged = 1; 14762 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14763 } 14764 if (chged) 14765 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14766 } 14767 14768 14769 static void 14770 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14771 { 14772 #ifdef INET6 14773 struct ip6_hdr *ip6 = NULL; 14774 #endif 14775 #ifdef INET 14776 struct ip *ip = NULL; 14777 #endif 14778 struct udphdr *udp = NULL; 14779 14780 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14781 #ifdef INET6 14782 if (rack->r_is_v6) { 14783 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14784 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14785 if (tp->t_port) { 14786 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14787 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14788 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14789 udp->uh_dport = tp->t_port; 14790 rack->r_ctl.fsb.udp = udp; 14791 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14792 } else 14793 { 14794 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14795 rack->r_ctl.fsb.udp = NULL; 14796 } 14797 tcpip_fillheaders(rack->rc_inp, 14798 tp->t_port, 14799 ip6, rack->r_ctl.fsb.th); 14800 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14801 } else 14802 #endif /* INET6 */ 14803 #ifdef INET 14804 { 14805 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14806 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14807 if (tp->t_port) { 14808 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14809 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14810 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14811 udp->uh_dport = tp->t_port; 14812 rack->r_ctl.fsb.udp = udp; 14813 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14814 } else 14815 { 14816 rack->r_ctl.fsb.udp = NULL; 14817 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14818 } 14819 tcpip_fillheaders(rack->rc_inp, 14820 tp->t_port, 14821 ip, rack->r_ctl.fsb.th); 14822 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14823 } 14824 #endif 14825 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14826 (long)TCP_MAXWIN << tp->rcv_scale); 14827 rack->r_fsb_inited = 1; 14828 } 14829 14830 static int 14831 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14832 { 14833 /* 14834 * Allocate the larger of spaces V6 if available else just 14835 * V4 and include udphdr (overbook) 14836 */ 14837 #ifdef INET6 14838 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14839 #else 14840 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14841 #endif 14842 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14843 M_TCPFSB, M_NOWAIT|M_ZERO); 14844 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14845 return (ENOMEM); 14846 } 14847 rack->r_fsb_inited = 0; 14848 return (0); 14849 } 14850 14851 static void 14852 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14853 { 14854 /* 14855 * Types of logs (mod value) 14856 * 20 - Initial round setup 14857 * 21 - Rack declares a new round. 14858 */ 14859 struct tcpcb *tp; 14860 14861 tp = rack->rc_tp; 14862 if (tcp_bblogging_on(tp)) { 14863 union tcp_log_stackspecific log; 14864 struct timeval tv; 14865 14866 memset(&log, 0, sizeof(log)); 14867 log.u_bbr.flex1 = rack->r_ctl.current_round; 14868 log.u_bbr.flex2 = rack->r_ctl.roundends; 14869 log.u_bbr.flex3 = high_seq; 14870 log.u_bbr.flex4 = tp->snd_max; 14871 log.u_bbr.flex8 = mod; 14872 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14873 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14874 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14875 TCP_LOG_EVENTP(tp, NULL, 14876 &tptosocket(tp)->so_rcv, 14877 &tptosocket(tp)->so_snd, 14878 TCP_HYSTART, 0, 14879 0, &log, false, &tv); 14880 } 14881 } 14882 14883 static void 14884 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14885 { 14886 rack->rack_deferred_inited = 1; 14887 rack->r_ctl.roundends = tp->snd_max; 14888 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14889 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14890 } 14891 14892 static void 14893 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14894 { 14895 /* Retransmit bit controls. 14896 * 14897 * The setting of these values control one of 14898 * three settings you can have and dictate 14899 * how rack does retransmissions. Note this 14900 * is in *any* mode i.e. pacing on or off DGP 14901 * fixed rate pacing, or just bursting rack. 14902 * 14903 * 1 - Use full sized retransmits i.e. limit 14904 * the size to whatever the pace_max_segments 14905 * size is. 14906 * 14907 * 2 - Use pacer min granularity as a guide to 14908 * the size combined with the current calculated 14909 * goodput b/w measurement. So for example if 14910 * the goodput is measured at 20Mbps we would 14911 * calculate 8125 (pacer minimum 250usec in 14912 * that b/w) and then round it up to the next 14913 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14914 * 14915 * 0 - The rack default 1 MSS (anything not 0/1/2 14916 * fall here too if we are setting via rack_init()). 14917 * 14918 */ 14919 if (ctl == 1) { 14920 rack->full_size_rxt = 1; 14921 rack->shape_rxt_to_pacing_min = 0; 14922 } else if (ctl == 2) { 14923 rack->full_size_rxt = 0; 14924 rack->shape_rxt_to_pacing_min = 1; 14925 } else { 14926 rack->full_size_rxt = 0; 14927 rack->shape_rxt_to_pacing_min = 0; 14928 } 14929 } 14930 14931 static void 14932 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14933 uint32_t flex1, 14934 uint32_t flex2, 14935 uint32_t flex3) 14936 { 14937 if (tcp_bblogging_on(rack->rc_tp)) { 14938 union tcp_log_stackspecific log; 14939 struct timeval tv; 14940 14941 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14942 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14943 log.u_bbr.flex8 = mod; 14944 log.u_bbr.flex1 = flex1; 14945 log.u_bbr.flex2 = flex2; 14946 log.u_bbr.flex3 = flex3; 14947 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14948 0, &log, false, NULL, __func__, __LINE__, &tv); 14949 } 14950 } 14951 14952 static int 14953 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14954 { 14955 struct tcp_rack *rack; 14956 struct rack_sendmap *rsm; 14957 int i; 14958 14959 14960 rack = (struct tcp_rack *)tp->t_fb_ptr; 14961 switch (reqr->req) { 14962 case TCP_QUERY_SENDMAP: 14963 if ((reqr->req_param == tp->snd_max) || 14964 (tp->snd_max == tp->snd_una)){ 14965 /* Unlikely */ 14966 return (0); 14967 } 14968 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14969 if (rsm == NULL) { 14970 /* Can't find that seq -- unlikely */ 14971 return (0); 14972 } 14973 reqr->sendmap_start = rsm->r_start; 14974 reqr->sendmap_end = rsm->r_end; 14975 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14976 reqr->sendmap_fas = rsm->r_fas; 14977 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14978 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14979 for(i=0; i<reqr->sendmap_send_cnt; i++) 14980 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14981 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14982 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14983 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14984 reqr->sendmap_dupacks = rsm->r_dupack; 14985 rack_log_chg_info(tp, rack, 1, 14986 rsm->r_start, 14987 rsm->r_end, 14988 rsm->r_flags); 14989 return(1); 14990 break; 14991 case TCP_QUERY_TIMERS_UP: 14992 if (rack->r_ctl.rc_hpts_flags == 0) { 14993 /* no timers up */ 14994 return (0); 14995 } 14996 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14997 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14998 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14999 } 15000 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 15001 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 15002 } 15003 rack_log_chg_info(tp, rack, 2, 15004 rack->r_ctl.rc_hpts_flags, 15005 rack->r_ctl.rc_last_output_to, 15006 rack->r_ctl.rc_timer_exp); 15007 return (1); 15008 break; 15009 case TCP_QUERY_RACK_TIMES: 15010 /* Reordering items */ 15011 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 15012 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 15013 /* Timerstamps and timers */ 15014 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 15015 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 15016 reqr->rack_rtt = rack->rc_rack_rtt; 15017 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 15018 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 15019 /* PRR data */ 15020 reqr->rack_sacked = rack->r_ctl.rc_sacked; 15021 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 15022 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 15023 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 15024 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 15025 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 15026 /* TLP and persists info */ 15027 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 15028 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 15029 if (rack->rc_in_persist) { 15030 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 15031 reqr->rack_in_persist = 1; 15032 } else { 15033 reqr->rack_time_went_idle = 0; 15034 reqr->rack_in_persist = 0; 15035 } 15036 if (rack->r_wanted_output) 15037 reqr->rack_wanted_output = 1; 15038 else 15039 reqr->rack_wanted_output = 0; 15040 return (1); 15041 break; 15042 default: 15043 return (-EINVAL); 15044 } 15045 } 15046 15047 static void 15048 rack_switch_failed(struct tcpcb *tp) 15049 { 15050 /* 15051 * This method gets called if a stack switch was 15052 * attempted and it failed. We are left 15053 * but our hpts timers were stopped and we 15054 * need to validate time units and t_flags2. 15055 */ 15056 struct tcp_rack *rack; 15057 struct timeval tv; 15058 uint32_t cts; 15059 uint32_t toval; 15060 struct hpts_diag diag; 15061 15062 rack = (struct tcp_rack *)tp->t_fb_ptr; 15063 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 15064 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 15065 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 15066 else 15067 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 15068 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15069 tp->t_flags2 |= TF2_MBUF_ACKCMP; 15070 if (tp->t_in_hpts > IHPTS_NONE) { 15071 /* Strange */ 15072 return; 15073 } 15074 cts = tcp_get_usecs(&tv); 15075 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15076 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 15077 toval = rack->r_ctl.rc_last_output_to - cts; 15078 } else { 15079 /* one slot please */ 15080 toval = HPTS_TICKS_PER_SLOT; 15081 } 15082 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 15083 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 15084 toval = rack->r_ctl.rc_timer_exp - cts; 15085 } else { 15086 /* one slot please */ 15087 toval = HPTS_TICKS_PER_SLOT; 15088 } 15089 } else 15090 toval = HPTS_TICKS_PER_SLOT; 15091 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 15092 __LINE__, &diag); 15093 rack_log_hpts_diag(rack, cts, &diag, &tv); 15094 } 15095 15096 static int 15097 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 15098 { 15099 struct rack_sendmap *rsm, *ersm; 15100 int insret __diagused; 15101 /* 15102 * When initing outstanding, we must be quite careful 15103 * to not refer to tp->t_fb_ptr. This has the old rack 15104 * pointer in it, not the "new" one (when we are doing 15105 * a stack switch). 15106 */ 15107 15108 15109 if (tp->t_fb->tfb_chg_query == NULL) { 15110 /* Create a send map for the current outstanding data */ 15111 15112 rsm = rack_alloc(rack); 15113 if (rsm == NULL) { 15114 uma_zfree(rack_pcb_zone, ptr); 15115 return (ENOMEM); 15116 } 15117 rsm->r_no_rtt_allowed = 1; 15118 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 15119 rsm->r_rtr_cnt = 1; 15120 rsm->r_rtr_bytes = 0; 15121 if (tp->t_flags & TF_SENTFIN) 15122 rsm->r_flags |= RACK_HAS_FIN; 15123 rsm->r_end = tp->snd_max; 15124 if (tp->snd_una == tp->iss) { 15125 /* The data space is one beyond snd_una */ 15126 rsm->r_flags |= RACK_HAS_SYN; 15127 rsm->r_start = tp->iss; 15128 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 15129 } else 15130 rsm->r_start = tp->snd_una; 15131 rsm->r_dupack = 0; 15132 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 15133 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 15134 if (rsm->m) { 15135 rsm->orig_m_len = rsm->m->m_len; 15136 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 15137 } else { 15138 rsm->orig_m_len = 0; 15139 rsm->orig_t_space = 0; 15140 } 15141 } else { 15142 /* 15143 * This can happen if we have a stand-alone FIN or 15144 * SYN. 15145 */ 15146 rsm->m = NULL; 15147 rsm->orig_m_len = 0; 15148 rsm->orig_t_space = 0; 15149 rsm->soff = 0; 15150 } 15151 #ifdef INVARIANTS 15152 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 15153 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 15154 insret, rack, rsm); 15155 } 15156 #else 15157 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 15158 #endif 15159 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 15160 rsm->r_in_tmap = 1; 15161 } else { 15162 /* We have a query mechanism, lets use it */ 15163 struct tcp_query_resp qr; 15164 int i; 15165 tcp_seq at; 15166 15167 at = tp->snd_una; 15168 while (at != tp->snd_max) { 15169 memset(&qr, 0, sizeof(qr)); 15170 qr.req = TCP_QUERY_SENDMAP; 15171 qr.req_param = at; 15172 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 15173 break; 15174 /* Move forward */ 15175 at = qr.sendmap_end; 15176 /* Now lets build the entry for this one */ 15177 rsm = rack_alloc(rack); 15178 if (rsm == NULL) { 15179 uma_zfree(rack_pcb_zone, ptr); 15180 return (ENOMEM); 15181 } 15182 memset(rsm, 0, sizeof(struct rack_sendmap)); 15183 /* Now configure the rsm and insert it */ 15184 rsm->r_dupack = qr.sendmap_dupacks; 15185 rsm->r_start = qr.sendmap_start; 15186 rsm->r_end = qr.sendmap_end; 15187 if (qr.sendmap_fas) 15188 rsm->r_fas = qr.sendmap_end; 15189 else 15190 rsm->r_fas = rsm->r_start - tp->snd_una; 15191 /* 15192 * We have carefully aligned the bits 15193 * so that all we have to do is copy over 15194 * the bits with the mask. 15195 */ 15196 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 15197 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 15198 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 15199 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 15200 for (i=0 ; i<rsm->r_rtr_cnt; i++) 15201 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 15202 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 15203 (rsm->r_start - tp->snd_una), &rsm->soff); 15204 if (rsm->m) { 15205 rsm->orig_m_len = rsm->m->m_len; 15206 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 15207 } else { 15208 rsm->orig_m_len = 0; 15209 rsm->orig_t_space = 0; 15210 } 15211 #ifdef INVARIANTS 15212 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 15213 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 15214 insret, rack, rsm); 15215 } 15216 #else 15217 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 15218 #endif 15219 if ((rsm->r_flags & RACK_ACKED) == 0) { 15220 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 15221 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 15222 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 15223 /* 15224 * If the existing ersm was sent at 15225 * a later time than the new one, then 15226 * the new one should appear ahead of this 15227 * ersm. 15228 */ 15229 rsm->r_in_tmap = 1; 15230 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 15231 break; 15232 } 15233 } 15234 if (rsm->r_in_tmap == 0) { 15235 /* 15236 * Not found so shove it on the tail. 15237 */ 15238 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 15239 rsm->r_in_tmap = 1; 15240 } 15241 } else { 15242 if ((rack->r_ctl.rc_sacklast == NULL) || 15243 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 15244 rack->r_ctl.rc_sacklast = rsm; 15245 } 15246 } 15247 rack_log_chg_info(tp, rack, 3, 15248 rsm->r_start, 15249 rsm->r_end, 15250 rsm->r_flags); 15251 } 15252 } 15253 return (0); 15254 } 15255 15256 static void 15257 rack_translate_policer_detect(struct tcp_rack *rack, uint32_t optval) 15258 { 15259 /* 15260 * P = Percent of retransmits 499 = 49.9% 15261 * A = Average number 1 (.1%) -> 169 (16.9%) 15262 * M = Median number of retrans 1 - 16 15263 * MMMM MMMM AAAA AAAA PPPP PPPP PPPP PPPP 15264 * 15265 */ 15266 uint16_t per, upp; 15267 15268 per = optval & 0x0000ffff; 15269 rack->r_ctl.policer_rxt_threshold = (uint32_t)(per & 0xffff); 15270 upp = ((optval & 0xffff0000) >> 16); 15271 rack->r_ctl.policer_avg_threshold = (0x00ff & upp); 15272 rack->r_ctl.policer_med_threshold = ((upp >> 8) & 0x00ff); 15273 if ((rack->r_ctl.policer_rxt_threshold > 0) && 15274 (rack->r_ctl.policer_avg_threshold > 0) && 15275 (rack->r_ctl.policer_med_threshold > 0)) { 15276 rack->policer_detect_on = 1; 15277 } else { 15278 rack->policer_detect_on = 0; 15279 } 15280 rack->r_ctl.saved_policer_val = optval; 15281 policer_detection_log(rack, optval, 15282 rack->r_ctl.policer_avg_threshold, 15283 rack->r_ctl.policer_med_threshold, 15284 rack->r_ctl.policer_rxt_threshold, 11); 15285 } 15286 15287 static int32_t 15288 rack_init(struct tcpcb *tp, void **ptr) 15289 { 15290 struct inpcb *inp = tptoinpcb(tp); 15291 struct tcp_rack *rack = NULL; 15292 uint32_t iwin, snt, us_cts; 15293 size_t sz; 15294 int err, no_query; 15295 15296 tcp_hpts_init(tp); 15297 15298 /* 15299 * First are we the initial or are we a switched stack? 15300 * If we are initing via tcp_newtcppcb the ptr passed 15301 * will be tp->t_fb_ptr. If its a stack switch that 15302 * has a previous stack we can query it will be a local 15303 * var that will in the end be set into t_fb_ptr. 15304 */ 15305 if (ptr == &tp->t_fb_ptr) 15306 no_query = 1; 15307 else 15308 no_query = 0; 15309 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 15310 if (*ptr == NULL) { 15311 /* 15312 * We need to allocate memory but cant. The INP and INP_INFO 15313 * locks and they are recursive (happens during setup. So a 15314 * scheme to drop the locks fails :( 15315 * 15316 */ 15317 return(ENOMEM); 15318 } 15319 memset(*ptr, 0, sizeof(struct tcp_rack)); 15320 rack = (struct tcp_rack *)*ptr; 15321 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 15322 if (rack->r_ctl.tqh == NULL) { 15323 uma_zfree(rack_pcb_zone, rack); 15324 return(ENOMEM); 15325 } 15326 tqhash_init(rack->r_ctl.tqh); 15327 TAILQ_INIT(&rack->r_ctl.rc_free); 15328 TAILQ_INIT(&rack->r_ctl.rc_tmap); 15329 rack->rc_tp = tp; 15330 rack->rc_inp = inp; 15331 /* Set the flag */ 15332 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 15333 /* Probably not needed but lets be sure */ 15334 rack_clear_rate_sample(rack); 15335 /* 15336 * Save off the default values, socket options will poke 15337 * at these if pacing is not on or we have not yet 15338 * reached where pacing is on (gp_ready/fixed enabled). 15339 * When they get set into the CC module (when gp_ready 15340 * is enabled or we enable fixed) then we will set these 15341 * values into the CC and place in here the old values 15342 * so we have a restoral. Then we will set the flag 15343 * rc_pacing_cc_set. That way whenever we turn off pacing 15344 * or switch off this stack, we will know to go restore 15345 * the saved values. 15346 * 15347 * We specifically put into the beta the ecn value for pacing. 15348 */ 15349 rack->rc_new_rnd_needed = 1; 15350 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 15351 /* We want abe like behavior as well */ 15352 15353 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 15354 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 15355 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 15356 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 15357 rack->r_ctl.policer_del_mss = rack_req_del_mss; 15358 if ((rack_policer_rxt_thresh > 0) && 15359 (rack_policer_avg_thresh > 0) && 15360 (rack_policer_med_thresh > 0)) { 15361 rack->r_ctl.policer_rxt_threshold = rack_policer_rxt_thresh; 15362 rack->r_ctl.policer_avg_threshold = rack_policer_avg_thresh; 15363 rack->r_ctl.policer_med_threshold = rack_policer_med_thresh; 15364 rack->policer_detect_on = 1; 15365 } else { 15366 rack->policer_detect_on = 0; 15367 } 15368 if (rack_fill_cw_state) 15369 rack->rc_pace_to_cwnd = 1; 15370 if (rack_pacing_min_seg) 15371 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 15372 if (use_rack_rr) 15373 rack->use_rack_rr = 1; 15374 if (rack_dnd_default) { 15375 rack->rc_pace_dnd = 1; 15376 } 15377 if (V_tcp_delack_enabled) 15378 tp->t_delayed_ack = 1; 15379 else 15380 tp->t_delayed_ack = 0; 15381 #ifdef TCP_ACCOUNTING 15382 if (rack_tcp_accounting) { 15383 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 15384 } 15385 #endif 15386 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 15387 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 15388 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 15389 if (rack->r_ctl.pcm_s == NULL) { 15390 rack->r_ctl.pcm_i.cnt_alloc = 0; 15391 } 15392 #ifdef NETFLIX_STATS 15393 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; 15394 #endif 15395 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 15396 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 15397 if (rack_enable_shared_cwnd) 15398 rack->rack_enable_scwnd = 1; 15399 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 15400 rack->rc_user_set_max_segs = rack_hptsi_segments; 15401 rack->r_ctl.max_reduction = rack_max_reduce; 15402 rack->rc_force_max_seg = 0; 15403 TAILQ_INIT(&rack->r_ctl.opt_list); 15404 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 15405 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 15406 if (rack_hibeta_setting) { 15407 rack->rack_hibeta = 1; 15408 if ((rack_hibeta_setting >= 50) && 15409 (rack_hibeta_setting <= 100)) { 15410 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; 15411 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 15412 } 15413 } else { 15414 rack->r_ctl.saved_hibeta = 50; 15415 } 15416 /* 15417 * We initialize to all ones so we never match 0 15418 * just in case the client sends in 0, it hopefully 15419 * will never have all 1's in ms :-) 15420 */ 15421 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 15422 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 15423 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 15424 rack->r_ctl.pol_bw_comp = rack_policing_do_bw_comp; 15425 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 15426 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 15427 rack->r_ctl.rc_highest_us_rtt = 0; 15428 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 15429 rack->pcm_enabled = rack_pcm_is_enabled; 15430 if (rack_fillcw_bw_cap) 15431 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 15432 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 15433 if (rack_use_cmp_acks) 15434 rack->r_use_cmp_ack = 1; 15435 if (rack_disable_prr) 15436 rack->rack_no_prr = 1; 15437 if (rack_gp_no_rec_chg) 15438 rack->rc_gp_no_rec_chg = 1; 15439 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 15440 rack->r_ctl.pacing_method |= RACK_REG_PACING; 15441 rack->rc_always_pace = 1; 15442 if (rack->rack_hibeta) 15443 rack_set_cc_pacing(rack); 15444 } else 15445 rack->rc_always_pace = 0; 15446 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 15447 rack->r_mbuf_queue = 1; 15448 else 15449 rack->r_mbuf_queue = 0; 15450 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15451 if (rack_limits_scwnd) 15452 rack->r_limit_scw = 1; 15453 else 15454 rack->r_limit_scw = 0; 15455 rack_init_retransmit_value(rack, rack_rxt_controls); 15456 rack->rc_labc = V_tcp_abc_l_var; 15457 if (rack_honors_hpts_min_to) 15458 rack->r_use_hpts_min = 1; 15459 if (tp->snd_una != 0) { 15460 rack->r_ctl.idle_snd_una = tp->snd_una; 15461 rack->rc_sendvars_notset = 0; 15462 /* 15463 * Make sure any TCP timers are not running. 15464 */ 15465 tcp_timer_stop(tp); 15466 } else { 15467 /* 15468 * Server side, we are called from the 15469 * syn-cache. This means none of the 15470 * snd_una/max are set yet so we have 15471 * to defer this until the first send. 15472 */ 15473 rack->rc_sendvars_notset = 1; 15474 } 15475 15476 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 15477 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 15478 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 15479 rack->r_ctl.rc_min_to = rack_min_to; 15480 microuptime(&rack->r_ctl.act_rcv_time); 15481 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 15482 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 15483 if (rack_hw_up_only) 15484 rack->r_up_only = 1; 15485 if (rack_do_dyn_mul) { 15486 /* When dynamic adjustment is on CA needs to start at 100% */ 15487 rack->rc_gp_dyn_mul = 1; 15488 if (rack_do_dyn_mul >= 100) 15489 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 15490 } else 15491 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 15492 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 15493 if (rack_timely_off) { 15494 rack->rc_skip_timely = 1; 15495 } 15496 if (rack->rc_skip_timely) { 15497 rack->r_ctl.rack_per_of_gp_rec = 90; 15498 rack->r_ctl.rack_per_of_gp_ca = 100; 15499 rack->r_ctl.rack_per_of_gp_ss = 250; 15500 } 15501 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 15502 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 15503 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 15504 15505 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 15506 rack_probertt_filter_life); 15507 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15508 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 15509 rack->r_ctl.rc_time_of_last_probertt = us_cts; 15510 rack->r_ctl.rc_went_idle_time = us_cts; 15511 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks() - (tcp_ack_war_time_window + 1); 15512 rack->r_ctl.rc_time_probertt_starts = 0; 15513 15514 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 15515 if (rack_rnd_cnt_req & 0x10000) 15516 rack->r_ctl.gate_to_fs = 1; 15517 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 15518 if ((rack_rnd_cnt_req & 0x100) > 0) { 15519 15520 } 15521 if (rack_dsack_std_based & 0x1) { 15522 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 15523 rack->rc_rack_tmr_std_based = 1; 15524 } 15525 if (rack_dsack_std_based & 0x2) { 15526 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 15527 rack->rc_rack_use_dsack = 1; 15528 } 15529 /* We require at least one measurement, even if the sysctl is 0 */ 15530 if (rack_req_measurements) 15531 rack->r_ctl.req_measurements = rack_req_measurements; 15532 else 15533 rack->r_ctl.req_measurements = 1; 15534 if (rack_enable_hw_pacing) 15535 rack->rack_hdw_pace_ena = 1; 15536 if (rack_hw_rate_caps) 15537 rack->r_rack_hw_rate_caps = 1; 15538 if (rack_non_rxt_use_cr) 15539 rack->rack_rec_nonrxt_use_cr = 1; 15540 /* Lets setup the fsb block */ 15541 err = rack_init_fsb(tp, rack); 15542 if (err) { 15543 uma_zfree(rack_pcb_zone, *ptr); 15544 *ptr = NULL; 15545 return (err); 15546 } 15547 if (rack_do_hystart) { 15548 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 15549 if (rack_do_hystart > 1) 15550 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 15551 if (rack_do_hystart > 2) 15552 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 15553 } 15554 /* Log what we will do with queries */ 15555 rack_log_chg_info(tp, rack, 7, 15556 no_query, 0, 0); 15557 if (rack_def_profile) 15558 rack_set_profile(rack, rack_def_profile); 15559 /* Cancel the GP measurement in progress */ 15560 tp->t_flags &= ~TF_GPUTINPROG; 15561 if ((tp->t_state != TCPS_CLOSED) && 15562 (tp->t_state != TCPS_TIME_WAIT)) { 15563 /* 15564 * We are already open, we may 15565 * need to adjust a few things. 15566 */ 15567 if (SEQ_GT(tp->snd_max, tp->iss)) 15568 snt = tp->snd_max - tp->iss; 15569 else 15570 snt = 0; 15571 iwin = rc_init_window(rack); 15572 if ((snt < iwin) && 15573 (no_query == 1)) { 15574 /* We are not past the initial window 15575 * on the first init (i.e. a stack switch 15576 * has not yet occured) so we need to make 15577 * sure cwnd and ssthresh is correct. 15578 */ 15579 if (tp->snd_cwnd < iwin) 15580 tp->snd_cwnd = iwin; 15581 /* 15582 * If we are within the initial window 15583 * we want ssthresh to be unlimited. Setting 15584 * it to the rwnd (which the default stack does 15585 * and older racks) is not really a good idea 15586 * since we want to be in SS and grow both the 15587 * cwnd and the rwnd (via dynamic rwnd growth). If 15588 * we set it to the rwnd then as the peer grows its 15589 * rwnd we will be stuck in CA and never hit SS. 15590 * 15591 * Its far better to raise it up high (this takes the 15592 * risk that there as been a loss already, probably 15593 * we should have an indicator in all stacks of loss 15594 * but we don't), but considering the normal use this 15595 * is a risk worth taking. The consequences of not 15596 * hitting SS are far worse than going one more time 15597 * into it early on (before we have sent even a IW). 15598 * It is highly unlikely that we will have had a loss 15599 * before getting the IW out. 15600 */ 15601 tp->snd_ssthresh = 0xffffffff; 15602 } 15603 /* 15604 * Any init based on sequence numbers 15605 * should be done in the deferred init path 15606 * since we can be CLOSED and not have them 15607 * inited when rack_init() is called. We 15608 * are not closed so lets call it. 15609 */ 15610 rack_deferred_init(tp, rack); 15611 } 15612 if ((tp->t_state != TCPS_CLOSED) && 15613 (tp->t_state != TCPS_TIME_WAIT) && 15614 (no_query == 0) && 15615 (tp->snd_una != tp->snd_max)) { 15616 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 15617 if (err) { 15618 *ptr = NULL; 15619 return(err); 15620 } 15621 } 15622 rack_stop_all_timers(tp, rack); 15623 /* Setup all the t_flags2 */ 15624 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 15625 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 15626 else 15627 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 15628 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15629 tp->t_flags2 |= TF2_MBUF_ACKCMP; 15630 /* 15631 * Timers in Rack are kept in microseconds so lets 15632 * convert any initial incoming variables 15633 * from ticks into usecs. Note that we 15634 * also change the values of t_srtt and t_rttvar, if 15635 * they are non-zero. They are kept with a 5 15636 * bit decimal so we have to carefully convert 15637 * these to get the full precision. 15638 */ 15639 rack_convert_rtts(tp); 15640 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 15641 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 15642 /* We do not start any timers on DROPPED connections */ 15643 if (tp->t_fb->tfb_chg_query == NULL) { 15644 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15645 } else { 15646 struct tcp_query_resp qr; 15647 int ret; 15648 15649 memset(&qr, 0, sizeof(qr)); 15650 15651 /* Get the misc time stamps and such for rack */ 15652 qr.req = TCP_QUERY_RACK_TIMES; 15653 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15654 if (ret == 1) { 15655 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 15656 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 15657 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 15658 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 15659 rack->rc_rack_rtt = qr.rack_rtt; 15660 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 15661 rack->r_ctl.rc_sacked = qr.rack_sacked; 15662 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 15663 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 15664 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 15665 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 15666 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 15667 if (qr.rack_tlp_out) { 15668 rack->rc_tlp_in_progress = 1; 15669 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 15670 } else { 15671 rack->rc_tlp_in_progress = 0; 15672 rack->r_ctl.rc_tlp_cnt_out = 0; 15673 } 15674 if (qr.rack_srtt_measured) 15675 rack->rc_srtt_measure_made = 1; 15676 if (qr.rack_in_persist == 1) { 15677 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 15678 #ifdef NETFLIX_SHARED_CWND 15679 if (rack->r_ctl.rc_scw) { 15680 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 15681 rack->rack_scwnd_is_idle = 1; 15682 } 15683 #endif 15684 rack->r_ctl.persist_lost_ends = 0; 15685 rack->probe_not_answered = 0; 15686 rack->forced_ack = 0; 15687 tp->t_rxtshift = 0; 15688 rack->rc_in_persist = 1; 15689 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 15690 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 15691 } 15692 if (qr.rack_wanted_output) 15693 rack->r_wanted_output = 1; 15694 rack_log_chg_info(tp, rack, 6, 15695 qr.rack_min_rtt, 15696 qr.rack_rtt, 15697 qr.rack_reorder_ts); 15698 } 15699 /* Get the old stack timers */ 15700 qr.req_param = 0; 15701 qr.req = TCP_QUERY_TIMERS_UP; 15702 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15703 if (ret) { 15704 /* 15705 * non-zero return means we have a timer('s) 15706 * to start. Zero means no timer (no keepalive 15707 * I suppose). 15708 */ 15709 uint32_t tov = 0; 15710 15711 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 15712 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 15713 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 15714 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 15715 tov = qr.timer_pacing_to - us_cts; 15716 else 15717 tov = HPTS_TICKS_PER_SLOT; 15718 } 15719 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 15720 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 15721 if (tov == 0) { 15722 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 15723 tov = qr.timer_timer_exp - us_cts; 15724 else 15725 tov = HPTS_TICKS_PER_SLOT; 15726 } 15727 } 15728 rack_log_chg_info(tp, rack, 4, 15729 rack->r_ctl.rc_hpts_flags, 15730 rack->r_ctl.rc_last_output_to, 15731 rack->r_ctl.rc_timer_exp); 15732 if (tov) { 15733 struct hpts_diag diag; 15734 15735 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 15736 __LINE__, &diag); 15737 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 15738 } 15739 } 15740 } 15741 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 15742 __LINE__, RACK_RTTS_INIT); 15743 } 15744 return (0); 15745 } 15746 15747 static int 15748 rack_handoff_ok(struct tcpcb *tp) 15749 { 15750 if ((tp->t_state == TCPS_CLOSED) || 15751 (tp->t_state == TCPS_LISTEN)) { 15752 /* Sure no problem though it may not stick */ 15753 return (0); 15754 } 15755 if ((tp->t_state == TCPS_SYN_SENT) || 15756 (tp->t_state == TCPS_SYN_RECEIVED)) { 15757 /* 15758 * We really don't know if you support sack, 15759 * you have to get to ESTAB or beyond to tell. 15760 */ 15761 return (EAGAIN); 15762 } 15763 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15764 /* 15765 * Rack will only send a FIN after all data is acknowledged. 15766 * So in this case we have more data outstanding. We can't 15767 * switch stacks until either all data and only the FIN 15768 * is left (in which case rack_init() now knows how 15769 * to deal with that) <or> all is acknowledged and we 15770 * are only left with incoming data, though why you 15771 * would want to switch to rack after all data is acknowledged 15772 * I have no idea (rrs)! 15773 */ 15774 return (EAGAIN); 15775 } 15776 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15777 return (0); 15778 } 15779 /* 15780 * If we reach here we don't do SACK on this connection so we can 15781 * never do rack. 15782 */ 15783 return (EINVAL); 15784 } 15785 15786 static void 15787 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15788 { 15789 15790 if (tp->t_fb_ptr) { 15791 uint32_t cnt_free = 0; 15792 struct tcp_rack *rack; 15793 struct rack_sendmap *rsm; 15794 15795 tcp_handle_orphaned_packets(tp); 15796 tp->t_flags &= ~TF_FORCEDATA; 15797 rack = (struct tcp_rack *)tp->t_fb_ptr; 15798 rack_log_pacing_delay_calc(rack, 15799 0, 15800 0, 15801 0, 15802 rack_get_gp_est(rack), /* delRate */ 15803 rack_get_lt_bw(rack), /* rttProp */ 15804 20, __LINE__, NULL, 0); 15805 #ifdef NETFLIX_SHARED_CWND 15806 if (rack->r_ctl.rc_scw) { 15807 uint32_t limit; 15808 15809 if (rack->r_limit_scw) 15810 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15811 else 15812 limit = 0; 15813 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15814 rack->r_ctl.rc_scw_index, 15815 limit); 15816 rack->r_ctl.rc_scw = NULL; 15817 } 15818 #endif 15819 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15820 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15821 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15822 rack->r_ctl.fsb.th = NULL; 15823 } 15824 if (rack->rc_always_pace == 1) { 15825 rack_remove_pacing(rack); 15826 } 15827 /* Clean up any options if they were not applied */ 15828 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15829 struct deferred_opt_list *dol; 15830 15831 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15832 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15833 free(dol, M_TCPDO); 15834 } 15835 /* rack does not use force data but other stacks may clear it */ 15836 if (rack->r_ctl.crte != NULL) { 15837 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15838 rack->rack_hdrw_pacing = 0; 15839 rack->r_ctl.crte = NULL; 15840 } 15841 #ifdef TCP_BLACKBOX 15842 tcp_log_flowend(tp); 15843 #endif 15844 /* 15845 * Lets take a different approach to purging just 15846 * get each one and free it like a cum-ack would and 15847 * not use a foreach loop. 15848 */ 15849 rsm = tqhash_min(rack->r_ctl.tqh); 15850 while (rsm) { 15851 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15852 rack->r_ctl.rc_num_maps_alloced--; 15853 uma_zfree(rack_zone, rsm); 15854 rsm = tqhash_min(rack->r_ctl.tqh); 15855 } 15856 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15857 while (rsm) { 15858 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15859 rack->r_ctl.rc_num_maps_alloced--; 15860 rack->rc_free_cnt--; 15861 cnt_free++; 15862 uma_zfree(rack_zone, rsm); 15863 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15864 } 15865 if (rack->r_ctl.pcm_s != NULL) { 15866 free(rack->r_ctl.pcm_s, M_TCPPCM); 15867 rack->r_ctl.pcm_s = NULL; 15868 rack->r_ctl.pcm_i.cnt_alloc = 0; 15869 rack->r_ctl.pcm_i.cnt = 0; 15870 } 15871 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15872 (tcp_bblogging_on(tp))) { 15873 union tcp_log_stackspecific log; 15874 struct timeval tv; 15875 15876 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15877 log.u_bbr.flex8 = 10; 15878 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15879 log.u_bbr.flex2 = rack->rc_free_cnt; 15880 log.u_bbr.flex3 = cnt_free; 15881 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15882 rsm = tqhash_min(rack->r_ctl.tqh); 15883 log.u_bbr.delRate = (uintptr_t)rsm; 15884 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15885 log.u_bbr.cur_del_rate = (uintptr_t)rsm; 15886 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15887 log.u_bbr.pkt_epoch = __LINE__; 15888 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15889 0, &log, false, NULL, NULL, 0, &tv); 15890 } 15891 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15892 ("rack:%p num_aloc:%u after freeing all?", 15893 rack, 15894 rack->r_ctl.rc_num_maps_alloced)); 15895 rack->rc_free_cnt = 0; 15896 free(rack->r_ctl.tqh, M_TCPFSB); 15897 rack->r_ctl.tqh = NULL; 15898 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15899 tp->t_fb_ptr = NULL; 15900 } 15901 /* Make sure snd_nxt is correctly set */ 15902 tp->snd_nxt = tp->snd_max; 15903 } 15904 15905 static void 15906 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15907 { 15908 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15909 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15910 } 15911 switch (tp->t_state) { 15912 case TCPS_SYN_SENT: 15913 rack->r_state = TCPS_SYN_SENT; 15914 rack->r_substate = rack_do_syn_sent; 15915 break; 15916 case TCPS_SYN_RECEIVED: 15917 rack->r_state = TCPS_SYN_RECEIVED; 15918 rack->r_substate = rack_do_syn_recv; 15919 break; 15920 case TCPS_ESTABLISHED: 15921 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15922 rack->r_state = TCPS_ESTABLISHED; 15923 rack->r_substate = rack_do_established; 15924 break; 15925 case TCPS_CLOSE_WAIT: 15926 rack->r_state = TCPS_CLOSE_WAIT; 15927 rack->r_substate = rack_do_close_wait; 15928 break; 15929 case TCPS_FIN_WAIT_1: 15930 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15931 rack->r_state = TCPS_FIN_WAIT_1; 15932 rack->r_substate = rack_do_fin_wait_1; 15933 break; 15934 case TCPS_CLOSING: 15935 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15936 rack->r_state = TCPS_CLOSING; 15937 rack->r_substate = rack_do_closing; 15938 break; 15939 case TCPS_LAST_ACK: 15940 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15941 rack->r_state = TCPS_LAST_ACK; 15942 rack->r_substate = rack_do_lastack; 15943 break; 15944 case TCPS_FIN_WAIT_2: 15945 rack->r_state = TCPS_FIN_WAIT_2; 15946 rack->r_substate = rack_do_fin_wait_2; 15947 break; 15948 case TCPS_LISTEN: 15949 case TCPS_CLOSED: 15950 case TCPS_TIME_WAIT: 15951 default: 15952 break; 15953 }; 15954 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15955 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15956 15957 } 15958 15959 static void 15960 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15961 { 15962 /* 15963 * We received an ack, and then did not 15964 * call send or were bounced out due to the 15965 * hpts was running. Now a timer is up as well, is 15966 * it the right timer? 15967 */ 15968 struct rack_sendmap *rsm; 15969 int tmr_up; 15970 15971 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15972 if (tcp_in_hpts(rack->rc_tp) == 0) { 15973 /* 15974 * Ok we probably need some timer up, but no 15975 * matter what the mask we are not in hpts. We 15976 * may have received an old ack and thus did nothing. 15977 */ 15978 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15979 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15980 return; 15981 } 15982 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15983 return; 15984 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15985 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15986 (tmr_up == PACE_TMR_RXT)) { 15987 /* Should be an RXT */ 15988 return; 15989 } 15990 if (rsm == NULL) { 15991 /* Nothing outstanding? */ 15992 if (tp->t_flags & TF_DELACK) { 15993 if (tmr_up == PACE_TMR_DELACK) 15994 /* We are supposed to have delayed ack up and we do */ 15995 return; 15996 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 15997 /* 15998 * if we hit enobufs then we would expect the possibility 15999 * of nothing outstanding and the RXT up (and the hptsi timer). 16000 */ 16001 return; 16002 } else if (((V_tcp_always_keepalive || 16003 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16004 (tp->t_state <= TCPS_CLOSING)) && 16005 (tmr_up == PACE_TMR_KEEP) && 16006 (tp->snd_max == tp->snd_una)) { 16007 /* We should have keep alive up and we do */ 16008 return; 16009 } 16010 } 16011 if (SEQ_GT(tp->snd_max, tp->snd_una) && 16012 ((tmr_up == PACE_TMR_TLP) || 16013 (tmr_up == PACE_TMR_RACK) || 16014 (tmr_up == PACE_TMR_RXT))) { 16015 /* 16016 * Either a Rack, TLP or RXT is fine if we 16017 * have outstanding data. 16018 */ 16019 return; 16020 } else if (tmr_up == PACE_TMR_DELACK) { 16021 /* 16022 * If the delayed ack was going to go off 16023 * before the rtx/tlp/rack timer were going to 16024 * expire, then that would be the timer in control. 16025 * Note we don't check the time here trusting the 16026 * code is correct. 16027 */ 16028 return; 16029 } 16030 /* 16031 * Ok the timer originally started is not what we want now. 16032 * We will force the hpts to be stopped if any, and restart 16033 * with the slot set to what was in the saved slot. 16034 */ 16035 if (tcp_in_hpts(rack->rc_tp)) { 16036 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16037 uint32_t us_cts; 16038 16039 us_cts = tcp_get_usecs(NULL); 16040 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 16041 rack->r_early = 1; 16042 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 16043 } 16044 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16045 } 16046 tcp_hpts_remove(rack->rc_tp); 16047 } 16048 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16049 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16050 } 16051 16052 16053 static void 16054 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 16055 { 16056 if ((SEQ_LT(tp->snd_wl1, seq) || 16057 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 16058 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 16059 /* keep track of pure window updates */ 16060 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 16061 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 16062 tp->snd_wnd = tiwin; 16063 rack_validate_fo_sendwin_up(tp, rack); 16064 tp->snd_wl1 = seq; 16065 tp->snd_wl2 = ack; 16066 if (tp->snd_wnd > tp->max_sndwnd) 16067 tp->max_sndwnd = tp->snd_wnd; 16068 rack->r_wanted_output = 1; 16069 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 16070 tp->snd_wnd = tiwin; 16071 rack_validate_fo_sendwin_up(tp, rack); 16072 tp->snd_wl1 = seq; 16073 tp->snd_wl2 = ack; 16074 } else { 16075 /* Not a valid win update */ 16076 return; 16077 } 16078 if (tp->snd_wnd > tp->max_sndwnd) 16079 tp->max_sndwnd = tp->snd_wnd; 16080 /* Do we exit persists? */ 16081 if ((rack->rc_in_persist != 0) && 16082 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 16083 rack->r_ctl.rc_pace_min_segs))) { 16084 rack_exit_persist(tp, rack, cts); 16085 } 16086 /* Do we enter persists? */ 16087 if ((rack->rc_in_persist == 0) && 16088 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 16089 TCPS_HAVEESTABLISHED(tp->t_state) && 16090 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 16091 sbavail(&tptosocket(tp)->so_snd) && 16092 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 16093 /* 16094 * Here the rwnd is less than 16095 * the pacing size, we are established, 16096 * nothing is outstanding, and there is 16097 * data to send. Enter persists. 16098 */ 16099 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 16100 } 16101 } 16102 16103 static void 16104 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 16105 { 16106 16107 if (tcp_bblogging_on(rack->rc_tp)) { 16108 struct inpcb *inp = tptoinpcb(tp); 16109 union tcp_log_stackspecific log; 16110 struct timeval ltv; 16111 char tcp_hdr_buf[60]; 16112 struct tcphdr *th; 16113 struct timespec ts; 16114 uint32_t orig_snd_una; 16115 uint8_t xx = 0; 16116 16117 #ifdef TCP_REQUEST_TRK 16118 struct tcp_sendfile_track *tcp_req; 16119 16120 if (SEQ_GT(ae->ack, tp->snd_una)) { 16121 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 16122 } else { 16123 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 16124 } 16125 #endif 16126 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16127 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16128 if (rack->rack_no_prr == 0) 16129 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16130 else 16131 log.u_bbr.flex1 = 0; 16132 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16133 log.u_bbr.use_lt_bw <<= 1; 16134 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16135 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16136 log.u_bbr.bbr_state = rack->rc_free_cnt; 16137 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 16138 log.u_bbr.pkts_out = tp->t_maxseg; 16139 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16140 log.u_bbr.flex7 = 1; 16141 log.u_bbr.lost = ae->flags; 16142 log.u_bbr.cwnd_gain = ackval; 16143 log.u_bbr.pacing_gain = 0x2; 16144 if (ae->flags & TSTMP_HDWR) { 16145 /* Record the hardware timestamp if present */ 16146 log.u_bbr.flex3 = M_TSTMP; 16147 ts.tv_sec = ae->timestamp / 1000000000; 16148 ts.tv_nsec = ae->timestamp % 1000000000; 16149 ltv.tv_sec = ts.tv_sec; 16150 ltv.tv_usec = ts.tv_nsec / 1000; 16151 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16152 } else if (ae->flags & TSTMP_LRO) { 16153 /* Record the LRO the arrival timestamp */ 16154 log.u_bbr.flex3 = M_TSTMP_LRO; 16155 ts.tv_sec = ae->timestamp / 1000000000; 16156 ts.tv_nsec = ae->timestamp % 1000000000; 16157 ltv.tv_sec = ts.tv_sec; 16158 ltv.tv_usec = ts.tv_nsec / 1000; 16159 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16160 } 16161 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16162 /* Log the rcv time */ 16163 log.u_bbr.delRate = ae->timestamp; 16164 #ifdef TCP_REQUEST_TRK 16165 log.u_bbr.applimited = tp->t_tcpreq_closed; 16166 log.u_bbr.applimited <<= 8; 16167 log.u_bbr.applimited |= tp->t_tcpreq_open; 16168 log.u_bbr.applimited <<= 8; 16169 log.u_bbr.applimited |= tp->t_tcpreq_req; 16170 if (tcp_req) { 16171 /* Copy out any client req info */ 16172 /* seconds */ 16173 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16174 /* useconds */ 16175 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16176 log.u_bbr.rttProp = tcp_req->timestamp; 16177 log.u_bbr.cur_del_rate = tcp_req->start; 16178 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16179 log.u_bbr.flex8 |= 1; 16180 } else { 16181 log.u_bbr.flex8 |= 2; 16182 log.u_bbr.bw_inuse = tcp_req->end; 16183 } 16184 log.u_bbr.flex6 = tcp_req->start_seq; 16185 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16186 log.u_bbr.flex8 |= 4; 16187 log.u_bbr.epoch = tcp_req->end_seq; 16188 } 16189 } 16190 #endif 16191 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 16192 th = (struct tcphdr *)tcp_hdr_buf; 16193 th->th_seq = ae->seq; 16194 th->th_ack = ae->ack; 16195 th->th_win = ae->win; 16196 /* Now fill in the ports */ 16197 th->th_sport = inp->inp_fport; 16198 th->th_dport = inp->inp_lport; 16199 tcp_set_flags(th, ae->flags); 16200 /* Now do we have a timestamp option? */ 16201 if (ae->flags & HAS_TSTMP) { 16202 u_char *cp; 16203 uint32_t val; 16204 16205 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 16206 cp = (u_char *)(th + 1); 16207 *cp = TCPOPT_NOP; 16208 cp++; 16209 *cp = TCPOPT_NOP; 16210 cp++; 16211 *cp = TCPOPT_TIMESTAMP; 16212 cp++; 16213 *cp = TCPOLEN_TIMESTAMP; 16214 cp++; 16215 val = htonl(ae->ts_value); 16216 bcopy((char *)&val, 16217 (char *)cp, sizeof(uint32_t)); 16218 val = htonl(ae->ts_echo); 16219 bcopy((char *)&val, 16220 (char *)(cp + 4), sizeof(uint32_t)); 16221 } else 16222 th->th_off = (sizeof(struct tcphdr) >> 2); 16223 16224 /* 16225 * For sane logging we need to play a little trick. 16226 * If the ack were fully processed we would have moved 16227 * snd_una to high_seq, but since compressed acks are 16228 * processed in two phases, at this point (logging) snd_una 16229 * won't be advanced. So we would see multiple acks showing 16230 * the advancement. We can prevent that by "pretending" that 16231 * snd_una was advanced and then un-advancing it so that the 16232 * logging code has the right value for tlb_snd_una. 16233 */ 16234 if (tp->snd_una != high_seq) { 16235 orig_snd_una = tp->snd_una; 16236 tp->snd_una = high_seq; 16237 xx = 1; 16238 } else 16239 xx = 0; 16240 TCP_LOG_EVENTP(tp, th, 16241 &tptosocket(tp)->so_rcv, 16242 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 16243 0, &log, true, <v); 16244 if (xx) { 16245 tp->snd_una = orig_snd_una; 16246 } 16247 } 16248 16249 } 16250 16251 static void 16252 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 16253 { 16254 uint32_t us_rtt; 16255 /* 16256 * A persist or keep-alive was forced out, update our 16257 * min rtt time. Note now worry about lost responses. 16258 * When a subsequent keep-alive or persist times out 16259 * and forced_ack is still on, then the last probe 16260 * was not responded to. In such cases we have a 16261 * sysctl that controls the behavior. Either we apply 16262 * the rtt but with reduced confidence (0). Or we just 16263 * plain don't apply the rtt estimate. Having data flow 16264 * will clear the probe_not_answered flag i.e. cum-ack 16265 * move forward <or> exiting and reentering persists. 16266 */ 16267 16268 rack->forced_ack = 0; 16269 rack->rc_tp->t_rxtshift = 0; 16270 if ((rack->rc_in_persist && 16271 (tiwin == rack->rc_tp->snd_wnd)) || 16272 (rack->rc_in_persist == 0)) { 16273 /* 16274 * In persists only apply the RTT update if this is 16275 * a response to our window probe. And that 16276 * means the rwnd sent must match the current 16277 * snd_wnd. If it does not, then we got a 16278 * window update ack instead. For keepalive 16279 * we allow the answer no matter what the window. 16280 * 16281 * Note that if the probe_not_answered is set then 16282 * the forced_ack_ts is the oldest one i.e. the first 16283 * probe sent that might have been lost. This assures 16284 * us that if we do calculate an RTT it is longer not 16285 * some short thing. 16286 */ 16287 if (rack->rc_in_persist) 16288 counter_u64_add(rack_persists_acks, 1); 16289 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 16290 if (us_rtt == 0) 16291 us_rtt = 1; 16292 if (rack->probe_not_answered == 0) { 16293 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 16294 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 16295 } else { 16296 /* We have a retransmitted probe here too */ 16297 if (rack_apply_rtt_with_reduced_conf) { 16298 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 16299 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 16300 } 16301 } 16302 } 16303 } 16304 16305 static void 16306 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 16307 { 16308 /* 16309 * The next send has occurred mark the end of the round 16310 * as when that data gets acknowledged. We can 16311 * also do common things we might need to do when 16312 * a round begins. 16313 */ 16314 rack->r_ctl.roundends = tp->snd_max; 16315 rack->rc_new_rnd_needed = 0; 16316 rack_log_hystart_event(rack, tp->snd_max, 4); 16317 } 16318 16319 16320 static void 16321 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 16322 uint32_t flex3) 16323 { 16324 if (tcp_bblogging_on(rack->rc_tp)) { 16325 union tcp_log_stackspecific log; 16326 struct timeval tv; 16327 16328 (void)tcp_get_usecs(&tv); 16329 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16330 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 16331 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16332 log.u_bbr.flex8 = mod; 16333 log.u_bbr.flex1 = flex1; 16334 log.u_bbr.flex2 = flex2; 16335 log.u_bbr.flex3 = flex3; 16336 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 16337 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 16338 log.u_bbr.bbr_substate = rack->pcm_needed; 16339 log.u_bbr.bbr_substate <<= 1; 16340 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 16341 log.u_bbr.bbr_substate <<= 1; 16342 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 16343 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 16344 0, &log, false, NULL, NULL, 0, &tv); 16345 } 16346 } 16347 16348 static void 16349 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 16350 { 16351 /* 16352 * The round (current_round) has ended. We now 16353 * setup for the next round by incrementing the 16354 * round numnber and doing any round specific 16355 * things. 16356 */ 16357 rack_log_hystart_event(rack, high_seq, 21); 16358 rack->r_ctl.current_round++; 16359 /* New round (current_round) begins at next send */ 16360 rack->rc_new_rnd_needed = 1; 16361 if ((rack->pcm_enabled == 1) && 16362 (rack->pcm_needed == 0) && 16363 (rack->pcm_in_progress == 0)) { 16364 /* 16365 * If we have enabled PCM, then we need to 16366 * check if the round has adanced to the state 16367 * where one is required. 16368 */ 16369 int rnds; 16370 16371 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 16372 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 16373 rack->pcm_needed = 1; 16374 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 16375 } else if (rack_verbose_logging) { 16376 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 16377 } 16378 } 16379 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 16380 /* We have hystart enabled send the round info in */ 16381 if (CC_ALGO(tp)->newround != NULL) { 16382 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 16383 } 16384 } 16385 /* 16386 * For DGP an initial startup check. We want to validate 16387 * that we are not just pushing on slow-start and just 16388 * not gaining.. i.e. filling buffers without getting any 16389 * boost in b/w during the inital slow-start. 16390 */ 16391 if (rack->dgp_on && 16392 (rack->rc_initial_ss_comp == 0) && 16393 (tp->snd_cwnd < tp->snd_ssthresh) && 16394 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 16395 (rack->r_ctl.gp_rnd_thresh > 0) && 16396 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 16397 16398 /* 16399 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 16400 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 16401 * exit SS. 16402 * 16403 * Pick up the flight size now as we enter slowstart (not the 16404 * cwnd which may be inflated). 16405 */ 16406 rack->rc_initial_ss_comp = 1; 16407 16408 if (tcp_bblogging_on(rack->rc_tp)) { 16409 union tcp_log_stackspecific log; 16410 struct timeval tv; 16411 16412 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16413 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 16414 log.u_bbr.flex1 = rack->r_ctl.current_round; 16415 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 16416 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 16417 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 16418 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 16419 log.u_bbr.flex8 = 40; 16420 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 16421 0, &log, false, NULL, __func__, __LINE__,&tv); 16422 } 16423 if ((rack->r_ctl.gate_to_fs == 1) && 16424 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 16425 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 16426 } 16427 tp->snd_ssthresh = tp->snd_cwnd - 1; 16428 /* Turn off any fast output running */ 16429 rack->r_fast_output = 0; 16430 } 16431 } 16432 16433 static int 16434 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 16435 { 16436 /* 16437 * Handle a "special" compressed ack mbuf. Each incoming 16438 * ack has only four possible dispositions: 16439 * 16440 * A) It moves the cum-ack forward 16441 * B) It is behind the cum-ack. 16442 * C) It is a window-update ack. 16443 * D) It is a dup-ack. 16444 * 16445 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 16446 * in the incoming mbuf. We also need to still pay attention 16447 * to nxt_pkt since there may be another packet after this 16448 * one. 16449 */ 16450 #ifdef TCP_ACCOUNTING 16451 uint64_t ts_val; 16452 uint64_t rdstc; 16453 #endif 16454 int segsiz; 16455 struct timespec ts; 16456 struct tcp_rack *rack; 16457 struct tcp_ackent *ae; 16458 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 16459 int cnt, i, did_out, ourfinisacked = 0; 16460 struct tcpopt to_holder, *to = NULL; 16461 #ifdef TCP_ACCOUNTING 16462 int win_up_req = 0; 16463 #endif 16464 int nsegs = 0; 16465 int under_pacing = 0; 16466 int post_recovery = 0; 16467 #ifdef TCP_ACCOUNTING 16468 sched_pin(); 16469 #endif 16470 rack = (struct tcp_rack *)tp->t_fb_ptr; 16471 if (rack->gp_ready && 16472 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 16473 under_pacing = 1; 16474 16475 if (rack->r_state != tp->t_state) 16476 rack_set_state(tp, rack); 16477 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16478 (tp->t_flags & TF_GPUTINPROG)) { 16479 /* 16480 * We have a goodput in progress 16481 * and we have entered a late state. 16482 * Do we have enough data in the sb 16483 * to handle the GPUT request? 16484 */ 16485 uint32_t bytes; 16486 16487 bytes = tp->gput_ack - tp->gput_seq; 16488 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16489 bytes += tp->gput_seq - tp->snd_una; 16490 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16491 /* 16492 * There are not enough bytes in the socket 16493 * buffer that have been sent to cover this 16494 * measurement. Cancel it. 16495 */ 16496 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16497 rack->r_ctl.rc_gp_srtt /*flex1*/, 16498 tp->gput_seq, 16499 0, 0, 18, __LINE__, NULL, 0); 16500 tp->t_flags &= ~TF_GPUTINPROG; 16501 } 16502 } 16503 to = &to_holder; 16504 to->to_flags = 0; 16505 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 16506 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 16507 cnt = m->m_len / sizeof(struct tcp_ackent); 16508 counter_u64_add(rack_multi_single_eq, cnt); 16509 high_seq = tp->snd_una; 16510 the_win = tp->snd_wnd; 16511 win_seq = tp->snd_wl1; 16512 win_upd_ack = tp->snd_wl2; 16513 cts = tcp_tv_to_usectick(tv); 16514 ms_cts = tcp_tv_to_mssectick(tv); 16515 rack->r_ctl.rc_rcvtime = cts; 16516 segsiz = ctf_fixed_maxseg(tp); 16517 if ((rack->rc_gp_dyn_mul) && 16518 (rack->use_fixed_rate == 0) && 16519 (rack->rc_always_pace)) { 16520 /* Check in on probertt */ 16521 rack_check_probe_rtt(rack, cts); 16522 } 16523 for (i = 0; i < cnt; i++) { 16524 #ifdef TCP_ACCOUNTING 16525 ts_val = get_cyclecount(); 16526 #endif 16527 rack_clear_rate_sample(rack); 16528 ae = ((mtod(m, struct tcp_ackent *)) + i); 16529 if (ae->flags & TH_FIN) 16530 rack_log_pacing_delay_calc(rack, 16531 0, 16532 0, 16533 0, 16534 rack_get_gp_est(rack), /* delRate */ 16535 rack_get_lt_bw(rack), /* rttProp */ 16536 20, __LINE__, NULL, 0); 16537 /* Setup the window */ 16538 tiwin = ae->win << tp->snd_scale; 16539 if (tiwin > rack->r_ctl.rc_high_rwnd) 16540 rack->r_ctl.rc_high_rwnd = tiwin; 16541 /* figure out the type of ack */ 16542 if (SEQ_LT(ae->ack, high_seq)) { 16543 /* Case B*/ 16544 ae->ack_val_set = ACK_BEHIND; 16545 } else if (SEQ_GT(ae->ack, high_seq)) { 16546 /* Case A */ 16547 ae->ack_val_set = ACK_CUMACK; 16548 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 16549 /* Case D */ 16550 ae->ack_val_set = ACK_DUPACK; 16551 } else { 16552 /* Case C */ 16553 ae->ack_val_set = ACK_RWND; 16554 } 16555 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16556 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 16557 /* Validate timestamp */ 16558 if (ae->flags & HAS_TSTMP) { 16559 /* Setup for a timestamp */ 16560 to->to_flags = TOF_TS; 16561 ae->ts_echo -= tp->ts_offset; 16562 to->to_tsecr = ae->ts_echo; 16563 to->to_tsval = ae->ts_value; 16564 /* 16565 * If echoed timestamp is later than the current time, fall back to 16566 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16567 * were used when this connection was established. 16568 */ 16569 if (TSTMP_GT(ae->ts_echo, ms_cts)) 16570 to->to_tsecr = 0; 16571 if (tp->ts_recent && 16572 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 16573 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 16574 #ifdef TCP_ACCOUNTING 16575 rdstc = get_cyclecount(); 16576 if (rdstc > ts_val) { 16577 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16578 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16579 } 16580 } 16581 #endif 16582 continue; 16583 } 16584 } 16585 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 16586 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 16587 tp->ts_recent_age = tcp_ts_getticks(); 16588 tp->ts_recent = ae->ts_value; 16589 } 16590 } else { 16591 /* Setup for a no options */ 16592 to->to_flags = 0; 16593 } 16594 /* Update the rcv time and perform idle reduction possibly */ 16595 if (tp->t_idle_reduce && 16596 (tp->snd_max == tp->snd_una) && 16597 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16598 counter_u64_add(rack_input_idle_reduces, 1); 16599 rack_cc_after_idle(rack, tp); 16600 } 16601 tp->t_rcvtime = ticks; 16602 /* Now what about ECN of a chain of pure ACKs? */ 16603 if (tcp_ecn_input_segment(tp, ae->flags, 0, 16604 tcp_packets_this_ack(tp, ae->ack), 16605 ae->codepoint)) 16606 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 16607 #ifdef TCP_ACCOUNTING 16608 /* Count for the specific type of ack in */ 16609 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16610 tp->tcp_cnt_counters[ae->ack_val_set]++; 16611 } 16612 #endif 16613 /* 16614 * Note how we could move up these in the determination 16615 * above, but we don't so that way the timestamp checks (and ECN) 16616 * is done first before we do any processing on the ACK. 16617 * The non-compressed path through the code has this 16618 * weakness (noted by @jtl) that it actually does some 16619 * processing before verifying the timestamp information. 16620 * We don't take that path here which is why we set 16621 * the ack_val_set first, do the timestamp and ecn 16622 * processing, and then look at what we have setup. 16623 */ 16624 if (ae->ack_val_set == ACK_BEHIND) { 16625 /* 16626 * Case B flag reordering, if window is not closed 16627 * or it could be a keep-alive or persists 16628 */ 16629 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 16630 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16631 if (rack->r_ctl.rc_reorder_ts == 0) 16632 rack->r_ctl.rc_reorder_ts = 1; 16633 } 16634 } else if (ae->ack_val_set == ACK_DUPACK) { 16635 /* Case D */ 16636 rack_strike_dupack(rack, ae->ack); 16637 } else if (ae->ack_val_set == ACK_RWND) { 16638 /* Case C */ 16639 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16640 ts.tv_sec = ae->timestamp / 1000000000; 16641 ts.tv_nsec = ae->timestamp % 1000000000; 16642 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16643 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16644 } else { 16645 rack->r_ctl.act_rcv_time = *tv; 16646 } 16647 if (rack->forced_ack) { 16648 rack_handle_probe_response(rack, tiwin, 16649 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 16650 } 16651 #ifdef TCP_ACCOUNTING 16652 win_up_req = 1; 16653 #endif 16654 win_upd_ack = ae->ack; 16655 win_seq = ae->seq; 16656 the_win = tiwin; 16657 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16658 } else { 16659 /* Case A */ 16660 if (SEQ_GT(ae->ack, tp->snd_max)) { 16661 /* 16662 * We just send an ack since the incoming 16663 * ack is beyond the largest seq we sent. 16664 */ 16665 if ((tp->t_flags & TF_ACKNOW) == 0) { 16666 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 16667 if (tp->t_flags && TF_ACKNOW) 16668 rack->r_wanted_output = 1; 16669 } 16670 } else { 16671 nsegs++; 16672 /* If the window changed setup to update */ 16673 if (tiwin != tp->snd_wnd) { 16674 win_upd_ack = ae->ack; 16675 win_seq = ae->seq; 16676 the_win = tiwin; 16677 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16678 } 16679 #ifdef TCP_ACCOUNTING 16680 /* Account for the acks */ 16681 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16682 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 16683 } 16684 #endif 16685 high_seq = ae->ack; 16686 /* Setup our act_rcv_time */ 16687 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16688 ts.tv_sec = ae->timestamp / 1000000000; 16689 ts.tv_nsec = ae->timestamp % 1000000000; 16690 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16691 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16692 } else { 16693 rack->r_ctl.act_rcv_time = *tv; 16694 } 16695 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 16696 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 16697 #ifdef TCP_REQUEST_TRK 16698 rack_req_check_for_comp(rack, high_seq); 16699 #endif 16700 if (rack->rc_dsack_round_seen) { 16701 /* Is the dsack round over? */ 16702 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 16703 /* Yes it is */ 16704 rack->rc_dsack_round_seen = 0; 16705 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 16706 } 16707 } 16708 } 16709 } 16710 /* And lets be sure to commit the rtt measurements for this ack */ 16711 tcp_rack_xmit_timer_commit(rack, tp); 16712 #ifdef TCP_ACCOUNTING 16713 rdstc = get_cyclecount(); 16714 if (rdstc > ts_val) { 16715 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16716 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16717 if (ae->ack_val_set == ACK_CUMACK) 16718 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 16719 } 16720 } 16721 #endif 16722 } 16723 #ifdef TCP_ACCOUNTING 16724 ts_val = get_cyclecount(); 16725 #endif 16726 /* Tend to any collapsed window */ 16727 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 16728 /* The peer collapsed the window */ 16729 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 16730 } else if (rack->rc_has_collapsed) 16731 rack_un_collapse_window(rack, __LINE__); 16732 if ((rack->r_collapse_point_valid) && 16733 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 16734 rack->r_collapse_point_valid = 0; 16735 acked_amount = acked = (high_seq - tp->snd_una); 16736 if (acked) { 16737 /* 16738 * The draft (v3) calls for us to use SEQ_GEQ, but that 16739 * causes issues when we are just going app limited. Lets 16740 * instead use SEQ_GT <or> where its equal but more data 16741 * is outstanding. 16742 * 16743 * Also make sure we are on the last ack of a series. We 16744 * have to have all the ack's processed in queue to know 16745 * if there is something left outstanding. 16746 * 16747 */ 16748 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 16749 (rack->rc_new_rnd_needed == 0) && 16750 (nxt_pkt == 0)) { 16751 /* 16752 * We have crossed into a new round with 16753 * this th_ack value. 16754 */ 16755 rack_new_round_setup(tp, rack, high_seq); 16756 } 16757 /* 16758 * Clear the probe not answered flag 16759 * since cum-ack moved forward. 16760 */ 16761 rack->probe_not_answered = 0; 16762 if (tp->t_flags & TF_NEEDSYN) { 16763 /* 16764 * T/TCP: Connection was half-synchronized, and our SYN has 16765 * been ACK'd (so connection is now fully synchronized). Go 16766 * to non-starred state, increment snd_una for ACK of SYN, 16767 * and check if we can do window scaling. 16768 */ 16769 tp->t_flags &= ~TF_NEEDSYN; 16770 tp->snd_una++; 16771 acked_amount = acked = (high_seq - tp->snd_una); 16772 } 16773 if (acked > sbavail(&so->so_snd)) 16774 acked_amount = sbavail(&so->so_snd); 16775 if (IN_FASTRECOVERY(tp->t_flags) && 16776 (rack->rack_no_prr == 0)) 16777 rack_update_prr(tp, rack, acked_amount, high_seq); 16778 if (IN_RECOVERY(tp->t_flags)) { 16779 if (SEQ_LT(high_seq, tp->snd_recover) && 16780 (SEQ_LT(high_seq, tp->snd_max))) { 16781 tcp_rack_partialack(tp); 16782 } else { 16783 rack_post_recovery(tp, high_seq); 16784 post_recovery = 1; 16785 } 16786 } else if ((rack->rto_from_rec == 1) && 16787 SEQ_GEQ(high_seq, tp->snd_recover)) { 16788 /* 16789 * We were in recovery, hit a rxt timeout 16790 * and never re-entered recovery. The timeout(s) 16791 * made up all the lost data. In such a case 16792 * we need to clear the rto_from_rec flag. 16793 */ 16794 rack->rto_from_rec = 0; 16795 } 16796 /* Handle the rack-log-ack part (sendmap) */ 16797 if ((sbused(&so->so_snd) == 0) && 16798 (acked > acked_amount) && 16799 (tp->t_state >= TCPS_FIN_WAIT_1) && 16800 (tp->t_flags & TF_SENTFIN)) { 16801 /* 16802 * We must be sure our fin 16803 * was sent and acked (we can be 16804 * in FIN_WAIT_1 without having 16805 * sent the fin). 16806 */ 16807 ourfinisacked = 1; 16808 /* 16809 * Lets make sure snd_una is updated 16810 * since most likely acked_amount = 0 (it 16811 * should be). 16812 */ 16813 tp->snd_una = high_seq; 16814 } 16815 /* Did we make a RTO error? */ 16816 if ((tp->t_flags & TF_PREVVALID) && 16817 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16818 tp->t_flags &= ~TF_PREVVALID; 16819 if (tp->t_rxtshift == 1 && 16820 (int)(ticks - tp->t_badrxtwin) < 0) 16821 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16822 } 16823 /* Handle the data in the socket buffer */ 16824 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16825 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16826 if (acked_amount > 0) { 16827 uint32_t p_cwnd; 16828 struct mbuf *mfree; 16829 16830 if (post_recovery) { 16831 /* 16832 * Grab the segsiz, multiply by 2 and add the snd_cwnd 16833 * that is the max the CC should add if we are exiting 16834 * recovery and doing a late add. 16835 */ 16836 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16837 p_cwnd <<= 1; 16838 p_cwnd += tp->snd_cwnd; 16839 } 16840 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 16841 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 16842 /* Must be non-newreno (cubic) getting too ahead of itself */ 16843 tp->snd_cwnd = p_cwnd; 16844 } 16845 SOCKBUF_LOCK(&so->so_snd); 16846 mfree = sbcut_locked(&so->so_snd, acked_amount); 16847 tp->snd_una = high_seq; 16848 /* Note we want to hold the sb lock through the sendmap adjust */ 16849 rack_adjust_sendmap_head(rack, &so->so_snd); 16850 /* Wake up the socket if we have room to write more */ 16851 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16852 sowwakeup_locked(so); 16853 m_freem(mfree); 16854 } 16855 /* update progress */ 16856 tp->t_acktime = ticks; 16857 rack_log_progress_event(rack, tp, tp->t_acktime, 16858 PROGRESS_UPDATE, __LINE__); 16859 /* Clear out shifts and such */ 16860 tp->t_rxtshift = 0; 16861 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16862 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16863 rack->rc_tlp_in_progress = 0; 16864 rack->r_ctl.rc_tlp_cnt_out = 0; 16865 /* Send recover and snd_nxt must be dragged along */ 16866 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16867 tp->snd_recover = tp->snd_una; 16868 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16869 tp->snd_nxt = tp->snd_max; 16870 /* 16871 * If the RXT timer is running we want to 16872 * stop it, so we can restart a TLP (or new RXT). 16873 */ 16874 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16875 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16876 tp->snd_wl2 = high_seq; 16877 tp->t_dupacks = 0; 16878 if (under_pacing && 16879 (rack->use_fixed_rate == 0) && 16880 (rack->in_probe_rtt == 0) && 16881 rack->rc_gp_dyn_mul && 16882 rack->rc_always_pace) { 16883 /* Check if we are dragging bottom */ 16884 rack_check_bottom_drag(tp, rack, so); 16885 } 16886 if (tp->snd_una == tp->snd_max) { 16887 tp->t_flags &= ~TF_PREVVALID; 16888 rack->r_ctl.retran_during_recovery = 0; 16889 rack->rc_suspicious = 0; 16890 rack->r_ctl.dsack_byte_cnt = 0; 16891 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16892 if (rack->r_ctl.rc_went_idle_time == 0) 16893 rack->r_ctl.rc_went_idle_time = 1; 16894 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16895 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16896 tp->t_acktime = 0; 16897 /* Set so we might enter persists... */ 16898 rack->r_wanted_output = 1; 16899 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16900 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16901 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16902 (sbavail(&so->so_snd) == 0) && 16903 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16904 /* 16905 * The socket was gone and the 16906 * peer sent data (not now in the past), time to 16907 * reset him. 16908 */ 16909 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16910 /* tcp_close will kill the inp pre-log the Reset */ 16911 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16912 #ifdef TCP_ACCOUNTING 16913 rdstc = get_cyclecount(); 16914 if (rdstc > ts_val) { 16915 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16916 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16917 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16918 } 16919 } 16920 #endif 16921 m_freem(m); 16922 tp = tcp_close(tp); 16923 if (tp == NULL) { 16924 #ifdef TCP_ACCOUNTING 16925 sched_unpin(); 16926 #endif 16927 return (1); 16928 } 16929 /* 16930 * We would normally do drop-with-reset which would 16931 * send back a reset. We can't since we don't have 16932 * all the needed bits. Instead lets arrange for 16933 * a call to tcp_output(). That way since we 16934 * are in the closed state we will generate a reset. 16935 * 16936 * Note if tcp_accounting is on we don't unpin since 16937 * we do that after the goto label. 16938 */ 16939 goto send_out_a_rst; 16940 } 16941 if ((sbused(&so->so_snd) == 0) && 16942 (tp->t_state >= TCPS_FIN_WAIT_1) && 16943 (tp->t_flags & TF_SENTFIN)) { 16944 /* 16945 * If we can't receive any more data, then closing user can 16946 * proceed. Starting the timer is contrary to the 16947 * specification, but if we don't get a FIN we'll hang 16948 * forever. 16949 * 16950 */ 16951 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16952 soisdisconnected(so); 16953 tcp_timer_activate(tp, TT_2MSL, 16954 (tcp_fast_finwait2_recycle ? 16955 tcp_finwait2_timeout : 16956 TP_MAXIDLE(tp))); 16957 } 16958 if (ourfinisacked == 0) { 16959 /* 16960 * We don't change to fin-wait-2 if we have our fin acked 16961 * which means we are probably in TCPS_CLOSING. 16962 */ 16963 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16964 } 16965 } 16966 } 16967 /* Wake up the socket if we have room to write more */ 16968 if (sbavail(&so->so_snd)) { 16969 rack->r_wanted_output = 1; 16970 if (ctf_progress_timeout_check(tp, true)) { 16971 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16972 tp, tick, PROGRESS_DROP, __LINE__); 16973 /* 16974 * We cheat here and don't send a RST, we should send one 16975 * when the pacer drops the connection. 16976 */ 16977 #ifdef TCP_ACCOUNTING 16978 rdstc = get_cyclecount(); 16979 if (rdstc > ts_val) { 16980 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16981 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16982 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16983 } 16984 } 16985 sched_unpin(); 16986 #endif 16987 (void)tcp_drop(tp, ETIMEDOUT); 16988 m_freem(m); 16989 return (1); 16990 } 16991 } 16992 if (ourfinisacked) { 16993 switch(tp->t_state) { 16994 case TCPS_CLOSING: 16995 #ifdef TCP_ACCOUNTING 16996 rdstc = get_cyclecount(); 16997 if (rdstc > ts_val) { 16998 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16999 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17000 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17001 } 17002 } 17003 sched_unpin(); 17004 #endif 17005 tcp_twstart(tp); 17006 m_freem(m); 17007 return (1); 17008 break; 17009 case TCPS_LAST_ACK: 17010 #ifdef TCP_ACCOUNTING 17011 rdstc = get_cyclecount(); 17012 if (rdstc > ts_val) { 17013 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17014 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17015 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17016 } 17017 } 17018 sched_unpin(); 17019 #endif 17020 tp = tcp_close(tp); 17021 ctf_do_drop(m, tp); 17022 return (1); 17023 break; 17024 case TCPS_FIN_WAIT_1: 17025 #ifdef TCP_ACCOUNTING 17026 rdstc = get_cyclecount(); 17027 if (rdstc > ts_val) { 17028 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17029 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17030 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17031 } 17032 } 17033 #endif 17034 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 17035 soisdisconnected(so); 17036 tcp_timer_activate(tp, TT_2MSL, 17037 (tcp_fast_finwait2_recycle ? 17038 tcp_finwait2_timeout : 17039 TP_MAXIDLE(tp))); 17040 } 17041 tcp_state_change(tp, TCPS_FIN_WAIT_2); 17042 break; 17043 default: 17044 break; 17045 } 17046 } 17047 if (rack->r_fast_output) { 17048 /* 17049 * We re doing fast output.. can we expand that? 17050 */ 17051 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 17052 } 17053 #ifdef TCP_ACCOUNTING 17054 rdstc = get_cyclecount(); 17055 if (rdstc > ts_val) { 17056 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17057 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17058 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17059 } 17060 } 17061 17062 } else if (win_up_req) { 17063 rdstc = get_cyclecount(); 17064 if (rdstc > ts_val) { 17065 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17066 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 17067 } 17068 } 17069 #endif 17070 } 17071 /* Now is there a next packet, if so we are done */ 17072 m_freem(m); 17073 did_out = 0; 17074 if (nxt_pkt) { 17075 #ifdef TCP_ACCOUNTING 17076 sched_unpin(); 17077 #endif 17078 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 17079 return (0); 17080 } 17081 rack_handle_might_revert(tp, rack); 17082 ctf_calc_rwin(so, tp); 17083 if ((rack->r_wanted_output != 0) || 17084 (rack->r_fast_output != 0) || 17085 (tp->t_flags & TF_ACKNOW )) { 17086 send_out_a_rst: 17087 if (tcp_output(tp) < 0) { 17088 #ifdef TCP_ACCOUNTING 17089 sched_unpin(); 17090 #endif 17091 return (1); 17092 } 17093 did_out = 1; 17094 } 17095 if (tp->t_flags2 & TF2_HPTS_CALLS) 17096 tp->t_flags2 &= ~TF2_HPTS_CALLS; 17097 rack_free_trim(rack); 17098 #ifdef TCP_ACCOUNTING 17099 sched_unpin(); 17100 #endif 17101 rack_timer_audit(tp, rack, &so->so_snd); 17102 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 17103 return (0); 17104 } 17105 17106 #define TCP_LRO_TS_OPTION \ 17107 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 17108 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 17109 17110 static int 17111 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17112 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 17113 struct timeval *tv) 17114 { 17115 struct inpcb *inp = tptoinpcb(tp); 17116 struct socket *so = tptosocket(tp); 17117 #ifdef TCP_ACCOUNTING 17118 uint64_t ts_val; 17119 #endif 17120 int32_t thflags, retval, did_out = 0; 17121 int32_t way_out = 0; 17122 /* 17123 * cts - is the current time from tv (caller gets ts) in microseconds. 17124 * ms_cts - is the current time from tv in milliseconds. 17125 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 17126 */ 17127 uint32_t cts, us_cts, ms_cts; 17128 uint32_t tiwin; 17129 struct timespec ts; 17130 struct tcpopt to; 17131 struct tcp_rack *rack; 17132 struct rack_sendmap *rsm; 17133 int32_t prev_state = 0; 17134 int no_output = 0; 17135 int slot_remaining = 0; 17136 #ifdef TCP_ACCOUNTING 17137 int ack_val_set = 0xf; 17138 #endif 17139 int nsegs; 17140 17141 NET_EPOCH_ASSERT(); 17142 INP_WLOCK_ASSERT(inp); 17143 17144 /* 17145 * tv passed from common code is from either M_TSTMP_LRO or 17146 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 17147 */ 17148 rack = (struct tcp_rack *)tp->t_fb_ptr; 17149 if (rack->rack_deferred_inited == 0) { 17150 /* 17151 * If we are the connecting socket we will 17152 * hit rack_init() when no sequence numbers 17153 * are setup. This makes it so we must defer 17154 * some initialization. Call that now. 17155 */ 17156 rack_deferred_init(tp, rack); 17157 } 17158 /* 17159 * Check to see if we need to skip any output plans. This 17160 * can happen in the non-LRO path where we are pacing and 17161 * must process the ack coming in but need to defer sending 17162 * anything becase a pacing timer is running. 17163 */ 17164 us_cts = tcp_tv_to_usectick(tv); 17165 if (m->m_flags & M_ACKCMP) { 17166 /* 17167 * All compressed ack's are ack's by definition so 17168 * remove any ack required flag and then do the processing. 17169 */ 17170 rack->rc_ack_required = 0; 17171 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 17172 } 17173 thflags = tcp_get_flags(th); 17174 if ((rack->rc_always_pace == 1) && 17175 (rack->rc_ack_can_sendout_data == 0) && 17176 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 17177 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 17178 /* 17179 * Ok conditions are right for queuing the packets 17180 * but we do have to check the flags in the inp, it 17181 * could be, if a sack is present, we want to be awoken and 17182 * so should process the packets. 17183 */ 17184 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 17185 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 17186 no_output = 1; 17187 } else { 17188 /* 17189 * If there is no options, or just a 17190 * timestamp option, we will want to queue 17191 * the packets. This is the same that LRO does 17192 * and will need to change with accurate ECN. 17193 */ 17194 uint32_t *ts_ptr; 17195 int optlen; 17196 17197 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 17198 ts_ptr = (uint32_t *)(th + 1); 17199 if ((optlen == 0) || 17200 ((optlen == TCPOLEN_TSTAMP_APPA) && 17201 (*ts_ptr == TCP_LRO_TS_OPTION))) 17202 no_output = 1; 17203 } 17204 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 17205 /* 17206 * It is unrealistic to think we can pace in less than 17207 * the minimum granularity of the pacer (def:250usec). So 17208 * if we have less than that time remaining we should go 17209 * ahead and allow output to be "early". We will attempt to 17210 * make up for it in any pacing time we try to apply on 17211 * the outbound packet. 17212 */ 17213 no_output = 0; 17214 } 17215 } 17216 /* 17217 * If there is a RST or FIN lets dump out the bw 17218 * with a FIN the connection may go on but we 17219 * may not. 17220 */ 17221 if ((thflags & TH_FIN) || (thflags & TH_RST)) 17222 rack_log_pacing_delay_calc(rack, 17223 rack->r_ctl.gp_bw, 17224 0, 17225 0, 17226 rack_get_gp_est(rack), /* delRate */ 17227 rack_get_lt_bw(rack), /* rttProp */ 17228 20, __LINE__, NULL, 0); 17229 if (m->m_flags & M_ACKCMP) { 17230 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 17231 } 17232 cts = tcp_tv_to_usectick(tv); 17233 ms_cts = tcp_tv_to_mssectick(tv); 17234 nsegs = m->m_pkthdr.lro_nsegs; 17235 counter_u64_add(rack_proc_non_comp_ack, 1); 17236 #ifdef TCP_ACCOUNTING 17237 sched_pin(); 17238 if (thflags & TH_ACK) 17239 ts_val = get_cyclecount(); 17240 #endif 17241 if ((m->m_flags & M_TSTMP) || 17242 (m->m_flags & M_TSTMP_LRO)) { 17243 mbuf_tstmp2timespec(m, &ts); 17244 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 17245 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 17246 } else 17247 rack->r_ctl.act_rcv_time = *tv; 17248 kern_prefetch(rack, &prev_state); 17249 prev_state = 0; 17250 /* 17251 * Unscale the window into a 32-bit value. For the SYN_SENT state 17252 * the scale is zero. 17253 */ 17254 tiwin = th->th_win << tp->snd_scale; 17255 #ifdef TCP_ACCOUNTING 17256 if (thflags & TH_ACK) { 17257 /* 17258 * We have a tradeoff here. We can either do what we are 17259 * doing i.e. pinning to this CPU and then doing the accounting 17260 * <or> we could do a critical enter, setup the rdtsc and cpu 17261 * as in below, and then validate we are on the same CPU on 17262 * exit. I have choosen to not do the critical enter since 17263 * that often will gain you a context switch, and instead lock 17264 * us (line above this if) to the same CPU with sched_pin(). This 17265 * means we may be context switched out for a higher priority 17266 * interupt but we won't be moved to another CPU. 17267 * 17268 * If this occurs (which it won't very often since we most likely 17269 * are running this code in interupt context and only a higher 17270 * priority will bump us ... clock?) we will falsely add in 17271 * to the time the interupt processing time plus the ack processing 17272 * time. This is ok since its a rare event. 17273 */ 17274 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 17275 ctf_fixed_maxseg(tp)); 17276 } 17277 #endif 17278 /* 17279 * Parse options on any incoming segment. 17280 */ 17281 memset(&to, 0, sizeof(to)); 17282 tcp_dooptions(&to, (u_char *)(th + 1), 17283 (th->th_off << 2) - sizeof(struct tcphdr), 17284 (thflags & TH_SYN) ? TO_SYN : 0); 17285 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 17286 __func__)); 17287 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 17288 __func__)); 17289 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 17290 /* 17291 * We don't look at sack's from the 17292 * peer because the MSS is too small which 17293 * can subject us to an attack. 17294 */ 17295 to.to_flags &= ~TOF_SACK; 17296 } 17297 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 17298 (tp->t_flags & TF_GPUTINPROG)) { 17299 /* 17300 * We have a goodput in progress 17301 * and we have entered a late state. 17302 * Do we have enough data in the sb 17303 * to handle the GPUT request? 17304 */ 17305 uint32_t bytes; 17306 17307 bytes = tp->gput_ack - tp->gput_seq; 17308 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 17309 bytes += tp->gput_seq - tp->snd_una; 17310 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 17311 /* 17312 * There are not enough bytes in the socket 17313 * buffer that have been sent to cover this 17314 * measurement. Cancel it. 17315 */ 17316 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17317 rack->r_ctl.rc_gp_srtt /*flex1*/, 17318 tp->gput_seq, 17319 0, 0, 18, __LINE__, NULL, 0); 17320 tp->t_flags &= ~TF_GPUTINPROG; 17321 } 17322 } 17323 if (tcp_bblogging_on(rack->rc_tp)) { 17324 union tcp_log_stackspecific log; 17325 struct timeval ltv; 17326 #ifdef TCP_REQUEST_TRK 17327 struct tcp_sendfile_track *tcp_req; 17328 17329 if (SEQ_GT(th->th_ack, tp->snd_una)) { 17330 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 17331 } else { 17332 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 17333 } 17334 #endif 17335 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 17336 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 17337 if (rack->rack_no_prr == 0) 17338 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 17339 else 17340 log.u_bbr.flex1 = 0; 17341 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 17342 log.u_bbr.use_lt_bw <<= 1; 17343 log.u_bbr.use_lt_bw |= rack->r_might_revert; 17344 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 17345 log.u_bbr.bbr_state = rack->rc_free_cnt; 17346 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17347 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 17348 log.u_bbr.flex3 = m->m_flags; 17349 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 17350 log.u_bbr.lost = thflags; 17351 log.u_bbr.pacing_gain = 0x1; 17352 #ifdef TCP_ACCOUNTING 17353 log.u_bbr.cwnd_gain = ack_val_set; 17354 #endif 17355 log.u_bbr.flex7 = 2; 17356 if (m->m_flags & M_TSTMP) { 17357 /* Record the hardware timestamp if present */ 17358 mbuf_tstmp2timespec(m, &ts); 17359 ltv.tv_sec = ts.tv_sec; 17360 ltv.tv_usec = ts.tv_nsec / 1000; 17361 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 17362 } else if (m->m_flags & M_TSTMP_LRO) { 17363 /* Record the LRO the arrival timestamp */ 17364 mbuf_tstmp2timespec(m, &ts); 17365 ltv.tv_sec = ts.tv_sec; 17366 ltv.tv_usec = ts.tv_nsec / 1000; 17367 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 17368 } 17369 log.u_bbr.timeStamp = tcp_get_usecs(<v); 17370 /* Log the rcv time */ 17371 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 17372 #ifdef TCP_REQUEST_TRK 17373 log.u_bbr.applimited = tp->t_tcpreq_closed; 17374 log.u_bbr.applimited <<= 8; 17375 log.u_bbr.applimited |= tp->t_tcpreq_open; 17376 log.u_bbr.applimited <<= 8; 17377 log.u_bbr.applimited |= tp->t_tcpreq_req; 17378 if (tcp_req) { 17379 /* Copy out any client req info */ 17380 /* seconds */ 17381 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 17382 /* useconds */ 17383 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 17384 log.u_bbr.rttProp = tcp_req->timestamp; 17385 log.u_bbr.cur_del_rate = tcp_req->start; 17386 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 17387 log.u_bbr.flex8 |= 1; 17388 } else { 17389 log.u_bbr.flex8 |= 2; 17390 log.u_bbr.bw_inuse = tcp_req->end; 17391 } 17392 log.u_bbr.flex6 = tcp_req->start_seq; 17393 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 17394 log.u_bbr.flex8 |= 4; 17395 log.u_bbr.epoch = tcp_req->end_seq; 17396 } 17397 } 17398 #endif 17399 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 17400 tlen, &log, true, <v); 17401 } 17402 /* Remove ack required flag if set, we have one */ 17403 if (thflags & TH_ACK) 17404 rack->rc_ack_required = 0; 17405 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 17406 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 17407 way_out = 4; 17408 retval = 0; 17409 m_freem(m); 17410 goto done_with_input; 17411 } 17412 /* 17413 * If a segment with the ACK-bit set arrives in the SYN-SENT state 17414 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 17415 */ 17416 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 17417 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 17418 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 17419 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 17420 #ifdef TCP_ACCOUNTING 17421 sched_unpin(); 17422 #endif 17423 return (1); 17424 } 17425 /* 17426 * If timestamps were negotiated during SYN/ACK and a 17427 * segment without a timestamp is received, silently drop 17428 * the segment, unless it is a RST segment or missing timestamps are 17429 * tolerated. 17430 * See section 3.2 of RFC 7323. 17431 */ 17432 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 17433 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 17434 way_out = 5; 17435 retval = 0; 17436 m_freem(m); 17437 goto done_with_input; 17438 } 17439 /* 17440 * Segment received on connection. Reset idle time and keep-alive 17441 * timer. XXX: This should be done after segment validation to 17442 * ignore broken/spoofed segs. 17443 */ 17444 if (tp->t_idle_reduce && 17445 (tp->snd_max == tp->snd_una) && 17446 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 17447 counter_u64_add(rack_input_idle_reduces, 1); 17448 rack_cc_after_idle(rack, tp); 17449 } 17450 tp->t_rcvtime = ticks; 17451 #ifdef STATS 17452 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 17453 #endif 17454 if (tiwin > rack->r_ctl.rc_high_rwnd) 17455 rack->r_ctl.rc_high_rwnd = tiwin; 17456 /* 17457 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 17458 * this to occur after we've validated the segment. 17459 */ 17460 if (tcp_ecn_input_segment(tp, thflags, tlen, 17461 tcp_packets_this_ack(tp, th->th_ack), 17462 iptos)) 17463 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 17464 17465 /* 17466 * If echoed timestamp is later than the current time, fall back to 17467 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 17468 * were used when this connection was established. 17469 */ 17470 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 17471 to.to_tsecr -= tp->ts_offset; 17472 if (TSTMP_GT(to.to_tsecr, ms_cts)) 17473 to.to_tsecr = 0; 17474 } 17475 if ((rack->r_rcvpath_rtt_up == 1) && 17476 (to.to_flags & TOF_TS) && 17477 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 17478 uint32_t rtt = 0; 17479 17480 /* 17481 * We are receiving only and thus not sending 17482 * data to do an RTT. We set a flag when we first 17483 * sent this TS to the peer. We now have it back 17484 * and have an RTT to share. We log it as a conf 17485 * 4, we are not so sure about it.. since we 17486 * may have lost an ack. 17487 */ 17488 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 17489 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 17490 rack->r_rcvpath_rtt_up = 0; 17491 /* Submit and commit the timer */ 17492 if (rtt > 0) { 17493 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 17494 tcp_rack_xmit_timer_commit(rack, tp); 17495 } 17496 } 17497 /* 17498 * If its the first time in we need to take care of options and 17499 * verify we can do SACK for rack! 17500 */ 17501 if (rack->r_state == 0) { 17502 /* Should be init'd by rack_init() */ 17503 KASSERT(rack->rc_inp != NULL, 17504 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 17505 if (rack->rc_inp == NULL) { 17506 rack->rc_inp = inp; 17507 } 17508 17509 /* 17510 * Process options only when we get SYN/ACK back. The SYN 17511 * case for incoming connections is handled in tcp_syncache. 17512 * According to RFC1323 the window field in a SYN (i.e., a 17513 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 17514 * this is traditional behavior, may need to be cleaned up. 17515 */ 17516 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 17517 /* Handle parallel SYN for ECN */ 17518 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 17519 if ((to.to_flags & TOF_SCALE) && 17520 (tp->t_flags & TF_REQ_SCALE)) { 17521 tp->t_flags |= TF_RCVD_SCALE; 17522 tp->snd_scale = to.to_wscale; 17523 } else 17524 tp->t_flags &= ~TF_REQ_SCALE; 17525 /* 17526 * Initial send window. It will be updated with the 17527 * next incoming segment to the scaled value. 17528 */ 17529 tp->snd_wnd = th->th_win; 17530 rack_validate_fo_sendwin_up(tp, rack); 17531 if ((to.to_flags & TOF_TS) && 17532 (tp->t_flags & TF_REQ_TSTMP)) { 17533 tp->t_flags |= TF_RCVD_TSTMP; 17534 tp->ts_recent = to.to_tsval; 17535 tp->ts_recent_age = cts; 17536 } else 17537 tp->t_flags &= ~TF_REQ_TSTMP; 17538 if (to.to_flags & TOF_MSS) { 17539 tcp_mss(tp, to.to_mss); 17540 } 17541 if ((tp->t_flags & TF_SACK_PERMIT) && 17542 (to.to_flags & TOF_SACKPERM) == 0) 17543 tp->t_flags &= ~TF_SACK_PERMIT; 17544 if (tp->t_flags & TF_FASTOPEN) { 17545 if (to.to_flags & TOF_FASTOPEN) { 17546 uint16_t mss; 17547 17548 if (to.to_flags & TOF_MSS) 17549 mss = to.to_mss; 17550 else 17551 if ((inp->inp_vflag & INP_IPV6) != 0) 17552 mss = TCP6_MSS; 17553 else 17554 mss = TCP_MSS; 17555 tcp_fastopen_update_cache(tp, mss, 17556 to.to_tfo_len, to.to_tfo_cookie); 17557 } else 17558 tcp_fastopen_disable_path(tp); 17559 } 17560 } 17561 /* 17562 * At this point we are at the initial call. Here we decide 17563 * if we are doing RACK or not. We do this by seeing if 17564 * TF_SACK_PERMIT is set and the sack-not-required is clear. 17565 * The code now does do dup-ack counting so if you don't 17566 * switch back you won't get rack & TLP, but you will still 17567 * get this stack. 17568 */ 17569 17570 if ((rack_sack_not_required == 0) && 17571 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 17572 tcp_switch_back_to_default(tp); 17573 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 17574 tlen, iptos); 17575 #ifdef TCP_ACCOUNTING 17576 sched_unpin(); 17577 #endif 17578 return (1); 17579 } 17580 tcp_set_hpts(tp); 17581 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 17582 } 17583 if (thflags & TH_FIN) 17584 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 17585 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 17586 if ((rack->rc_gp_dyn_mul) && 17587 (rack->use_fixed_rate == 0) && 17588 (rack->rc_always_pace)) { 17589 /* Check in on probertt */ 17590 rack_check_probe_rtt(rack, cts); 17591 } 17592 rack_clear_rate_sample(rack); 17593 if ((rack->forced_ack) && 17594 ((tcp_get_flags(th) & TH_RST) == 0)) { 17595 rack_handle_probe_response(rack, tiwin, us_cts); 17596 } 17597 /* 17598 * This is the one exception case where we set the rack state 17599 * always. All other times (timers etc) we must have a rack-state 17600 * set (so we assure we have done the checks above for SACK). 17601 */ 17602 rack->r_ctl.rc_rcvtime = cts; 17603 if (rack->r_state != tp->t_state) 17604 rack_set_state(tp, rack); 17605 if (SEQ_GT(th->th_ack, tp->snd_una) && 17606 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 17607 kern_prefetch(rsm, &prev_state); 17608 prev_state = rack->r_state; 17609 if ((thflags & TH_RST) && 17610 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 17611 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 17612 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 17613 /* The connection will be killed by a reset check the tracepoint */ 17614 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 17615 } 17616 retval = (*rack->r_substate) (m, th, so, 17617 tp, &to, drop_hdrlen, 17618 tlen, tiwin, thflags, nxt_pkt, iptos); 17619 if (retval == 0) { 17620 /* 17621 * If retval is 1 the tcb is unlocked and most likely the tp 17622 * is gone. 17623 */ 17624 INP_WLOCK_ASSERT(inp); 17625 if ((rack->rc_gp_dyn_mul) && 17626 (rack->rc_always_pace) && 17627 (rack->use_fixed_rate == 0) && 17628 rack->in_probe_rtt && 17629 (rack->r_ctl.rc_time_probertt_starts == 0)) { 17630 /* 17631 * If we are going for target, lets recheck before 17632 * we output. 17633 */ 17634 rack_check_probe_rtt(rack, cts); 17635 } 17636 if (rack->set_pacing_done_a_iw == 0) { 17637 /* How much has been acked? */ 17638 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 17639 /* We have enough to set in the pacing segment size */ 17640 rack->set_pacing_done_a_iw = 1; 17641 rack_set_pace_segments(tp, rack, __LINE__, NULL); 17642 } 17643 } 17644 tcp_rack_xmit_timer_commit(rack, tp); 17645 #ifdef TCP_ACCOUNTING 17646 /* 17647 * If we set the ack_val_se to what ack processing we are doing 17648 * we also want to track how many cycles we burned. Note 17649 * the bits after tcp_output we let be "free". This is because 17650 * we are also tracking the tcp_output times as well. Note the 17651 * use of 0xf here since we only have 11 counter (0 - 0xa) and 17652 * 0xf cannot be returned and is what we initialize it too to 17653 * indicate we are not doing the tabulations. 17654 */ 17655 if (ack_val_set != 0xf) { 17656 uint64_t crtsc; 17657 17658 crtsc = get_cyclecount(); 17659 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17660 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 17661 } 17662 } 17663 #endif 17664 if ((nxt_pkt == 0) && (no_output == 0)) { 17665 if ((rack->r_wanted_output != 0) || 17666 (tp->t_flags & TF_ACKNOW) || 17667 (rack->r_fast_output != 0)) { 17668 17669 do_output_now: 17670 if (tcp_output(tp) < 0) { 17671 #ifdef TCP_ACCOUNTING 17672 sched_unpin(); 17673 #endif 17674 return (1); 17675 } 17676 did_out = 1; 17677 } 17678 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 17679 rack_free_trim(rack); 17680 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 17681 goto do_output_now; 17682 } else if ((no_output == 1) && 17683 (nxt_pkt == 0) && 17684 (tcp_in_hpts(rack->rc_tp) == 0)) { 17685 /* 17686 * We are not in hpts and we had a pacing timer up. Use 17687 * the remaining time (slot_remaining) to restart the timer. 17688 */ 17689 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 17690 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 17691 rack_free_trim(rack); 17692 } 17693 /* Clear the flag, it may have been cleared by output but we may not have */ 17694 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 17695 tp->t_flags2 &= ~TF2_HPTS_CALLS; 17696 /* 17697 * The draft (v3) calls for us to use SEQ_GEQ, but that 17698 * causes issues when we are just going app limited. Lets 17699 * instead use SEQ_GT <or> where its equal but more data 17700 * is outstanding. 17701 * 17702 * Also make sure we are on the last ack of a series. We 17703 * have to have all the ack's processed in queue to know 17704 * if there is something left outstanding. 17705 */ 17706 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 17707 (rack->rc_new_rnd_needed == 0) && 17708 (nxt_pkt == 0)) { 17709 /* 17710 * We have crossed into a new round with 17711 * the new snd_unae. 17712 */ 17713 rack_new_round_setup(tp, rack, tp->snd_una); 17714 } 17715 if ((nxt_pkt == 0) && 17716 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 17717 (SEQ_GT(tp->snd_max, tp->snd_una) || 17718 (tp->t_flags & TF_DELACK) || 17719 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 17720 (tp->t_state <= TCPS_CLOSING)))) { 17721 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 17722 if ((tp->snd_max == tp->snd_una) && 17723 ((tp->t_flags & TF_DELACK) == 0) && 17724 (tcp_in_hpts(rack->rc_tp)) && 17725 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 17726 /* keep alive not needed if we are hptsi output yet */ 17727 ; 17728 } else { 17729 int late = 0; 17730 if (tcp_in_hpts(tp)) { 17731 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 17732 us_cts = tcp_get_usecs(NULL); 17733 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 17734 rack->r_early = 1; 17735 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 17736 } else 17737 late = 1; 17738 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 17739 } 17740 tcp_hpts_remove(tp); 17741 } 17742 if (late && (did_out == 0)) { 17743 /* 17744 * We are late in the sending 17745 * and we did not call the output 17746 * (this probably should not happen). 17747 */ 17748 goto do_output_now; 17749 } 17750 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 17751 } 17752 way_out = 1; 17753 } else if (nxt_pkt == 0) { 17754 /* Do we have the correct timer running? */ 17755 rack_timer_audit(tp, rack, &so->so_snd); 17756 way_out = 2; 17757 } 17758 done_with_input: 17759 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 17760 if (did_out) 17761 rack->r_wanted_output = 0; 17762 } 17763 17764 #ifdef TCP_ACCOUNTING 17765 sched_unpin(); 17766 #endif 17767 return (retval); 17768 } 17769 17770 static void 17771 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17772 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17773 { 17774 struct timeval tv; 17775 17776 /* First lets see if we have old packets */ 17777 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17778 if (ctf_do_queued_segments(tp, 1)) { 17779 m_freem(m); 17780 return; 17781 } 17782 } 17783 if (m->m_flags & M_TSTMP_LRO) { 17784 mbuf_tstmp2timeval(m, &tv); 17785 } else { 17786 /* Should not be should we kassert instead? */ 17787 tcp_get_usecs(&tv); 17788 } 17789 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17790 &tv) == 0) { 17791 INP_WUNLOCK(tptoinpcb(tp)); 17792 } 17793 } 17794 17795 struct rack_sendmap * 17796 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17797 { 17798 struct rack_sendmap *rsm = NULL; 17799 int32_t idx; 17800 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17801 17802 /* Return the next guy to be re-transmitted */ 17803 if (tqhash_empty(rack->r_ctl.tqh)) { 17804 return (NULL); 17805 } 17806 if (tp->t_flags & TF_SENTFIN) { 17807 /* retran the end FIN? */ 17808 return (NULL); 17809 } 17810 /* ok lets look at this one */ 17811 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17812 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17813 return (rsm); 17814 } 17815 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17816 goto check_it; 17817 } 17818 rsm = rack_find_lowest_rsm(rack); 17819 if (rsm == NULL) { 17820 return (NULL); 17821 } 17822 check_it: 17823 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 17824 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17825 /* 17826 * No sack so we automatically do the 3 strikes and 17827 * retransmit (no rack timer would be started). 17828 */ 17829 return (rsm); 17830 } 17831 if (rsm->r_flags & RACK_ACKED) { 17832 return (NULL); 17833 } 17834 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17835 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17836 /* Its not yet ready */ 17837 return (NULL); 17838 } 17839 srtt = rack_grab_rtt(tp, rack); 17840 idx = rsm->r_rtr_cnt - 1; 17841 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17842 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 17843 if ((tsused == ts_low) || 17844 (TSTMP_LT(tsused, ts_low))) { 17845 /* No time since sending */ 17846 return (NULL); 17847 } 17848 if ((tsused - ts_low) < thresh) { 17849 /* It has not been long enough yet */ 17850 return (NULL); 17851 } 17852 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17853 ((rsm->r_flags & RACK_SACK_PASSED))) { 17854 /* 17855 * We have passed the dup-ack threshold <or> 17856 * a SACK has indicated this is missing. 17857 * Note that if you are a declared attacker 17858 * it is only the dup-ack threshold that 17859 * will cause retransmits. 17860 */ 17861 /* log retransmit reason */ 17862 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17863 rack->r_fast_output = 0; 17864 return (rsm); 17865 } 17866 return (NULL); 17867 } 17868 17869 static void 17870 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, 17871 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17872 int line, struct rack_sendmap *rsm, uint8_t quality) 17873 { 17874 if (tcp_bblogging_on(rack->rc_tp)) { 17875 union tcp_log_stackspecific log; 17876 struct timeval tv; 17877 17878 if (rack_verbose_logging == 0) { 17879 /* 17880 * We are not verbose screen out all but 17881 * ones we always want. 17882 */ 17883 if ((method != 2) && 17884 (method != 3) && 17885 (method != 7) && 17886 (method != 89) && 17887 (method != 14) && 17888 (method != 20)) { 17889 return; 17890 } 17891 } 17892 memset(&log, 0, sizeof(log)); 17893 log.u_bbr.flex1 = slot; 17894 log.u_bbr.flex2 = len; 17895 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17896 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17897 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17898 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17899 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17900 log.u_bbr.use_lt_bw <<= 1; 17901 log.u_bbr.use_lt_bw |= rack->r_late; 17902 log.u_bbr.use_lt_bw <<= 1; 17903 log.u_bbr.use_lt_bw |= rack->r_early; 17904 log.u_bbr.use_lt_bw <<= 1; 17905 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17906 log.u_bbr.use_lt_bw <<= 1; 17907 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17908 log.u_bbr.use_lt_bw <<= 1; 17909 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17910 log.u_bbr.use_lt_bw <<= 1; 17911 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17912 log.u_bbr.use_lt_bw <<= 1; 17913 log.u_bbr.use_lt_bw |= rack->gp_ready; 17914 log.u_bbr.pkt_epoch = line; 17915 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17916 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17917 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17918 log.u_bbr.bw_inuse = bw_est; 17919 log.u_bbr.delRate = bw; 17920 if (rack->r_ctl.gp_bw == 0) 17921 log.u_bbr.cur_del_rate = 0; 17922 else 17923 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17924 log.u_bbr.rttProp = len_time; 17925 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17926 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17927 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17928 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17929 /* We are in slow start */ 17930 log.u_bbr.flex7 = 1; 17931 } else { 17932 /* we are on congestion avoidance */ 17933 log.u_bbr.flex7 = 0; 17934 } 17935 log.u_bbr.flex8 = method; 17936 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17937 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17938 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17939 log.u_bbr.cwnd_gain <<= 1; 17940 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17941 log.u_bbr.cwnd_gain <<= 1; 17942 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17943 log.u_bbr.bbr_substate = quality; 17944 log.u_bbr.bbr_state = rack->dgp_on; 17945 log.u_bbr.bbr_state <<= 1; 17946 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17947 log.u_bbr.bbr_state <<= 2; 17948 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17949 &rack->rc_inp->inp_socket->so_rcv, 17950 &rack->rc_inp->inp_socket->so_snd, 17951 BBR_LOG_HPTSI_CALC, 0, 17952 0, &log, false, &tv); 17953 } 17954 } 17955 17956 static uint32_t 17957 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17958 { 17959 uint32_t new_tso, user_max, pace_one; 17960 17961 user_max = rack->rc_user_set_max_segs * mss; 17962 if (rack->rc_force_max_seg) { 17963 return (user_max); 17964 } 17965 if (rack->use_fixed_rate && 17966 ((rack->r_ctl.crte == NULL) || 17967 (bw != rack->r_ctl.crte->rate))) { 17968 /* Use the user mss since we are not exactly matched */ 17969 return (user_max); 17970 } 17971 if (rack_pace_one_seg || 17972 (rack->r_ctl.rc_user_set_min_segs == 1)) 17973 pace_one = 1; 17974 else 17975 pace_one = 0; 17976 17977 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17978 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17979 if (new_tso > user_max) 17980 new_tso = user_max; 17981 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17982 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17983 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17984 } 17985 if (rack->r_ctl.rc_user_set_min_segs && 17986 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17987 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17988 return (new_tso); 17989 } 17990 17991 static uint64_t 17992 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17993 { 17994 uint64_t reduced_win; 17995 uint32_t gain; 17996 17997 if (window_input < rc_init_window(rack)) { 17998 /* 17999 * The cwnd is collapsed to 18000 * nearly zero, maybe because of a time-out? 18001 * Lets drop back to the lt-bw. 18002 */ 18003 reduced_win = rack_get_lt_bw(rack); 18004 /* Set the flag so the caller knows its a rate and not a reduced window */ 18005 *rate_set = 1; 18006 gain = 100; 18007 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 18008 /* 18009 * If we are in recover our cwnd needs to be less for 18010 * our pacing consideration. 18011 */ 18012 if (rack->rack_hibeta == 0) { 18013 reduced_win = window_input / 2; 18014 gain = 50; 18015 } else { 18016 reduced_win = window_input * rack->r_ctl.saved_hibeta; 18017 reduced_win /= 100; 18018 gain = rack->r_ctl.saved_hibeta; 18019 } 18020 } else { 18021 /* 18022 * Apply Timely factor to increase/decrease the 18023 * amount we are pacing at. 18024 */ 18025 gain = rack_get_output_gain(rack, NULL); 18026 if (gain > rack_gain_p5_ub) { 18027 gain = rack_gain_p5_ub; 18028 } 18029 reduced_win = window_input * gain; 18030 reduced_win /= 100; 18031 } 18032 if (gain_b != NULL) 18033 *gain_b = gain; 18034 /* 18035 * What is being returned here is a trimmed down 18036 * window values in all cases where rate_set is left 18037 * at 0. In one case we actually return the rate (lt_bw). 18038 * the "reduced_win" is returned as a slimmed down cwnd that 18039 * is then calculated by the caller into a rate when rate_set 18040 * is 0. 18041 */ 18042 return (reduced_win); 18043 } 18044 18045 static int32_t 18046 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 18047 { 18048 uint64_t lentim, fill_bw; 18049 18050 rack->r_via_fill_cw = 0; 18051 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 18052 return (slot); 18053 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 18054 return (slot); 18055 if (rack->r_ctl.rc_last_us_rtt == 0) 18056 return (slot); 18057 if (rack->rc_pace_fill_if_rttin_range && 18058 (rack->r_ctl.rc_last_us_rtt >= 18059 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 18060 /* The rtt is huge, N * smallest, lets not fill */ 18061 return (slot); 18062 } 18063 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 18064 return (slot); 18065 /* 18066 * first lets calculate the b/w based on the last us-rtt 18067 * and the the smallest send window. 18068 */ 18069 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 18070 if (rack->rc_fillcw_apply_discount) { 18071 uint32_t rate_set = 0; 18072 18073 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 18074 if (rate_set) { 18075 goto at_lt_bw; 18076 } 18077 } 18078 /* Take the rwnd if its smaller */ 18079 if (fill_bw > rack->rc_tp->snd_wnd) 18080 fill_bw = rack->rc_tp->snd_wnd; 18081 /* Now lets make it into a b/w */ 18082 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 18083 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 18084 /* Adjust to any cap */ 18085 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 18086 fill_bw = rack->r_ctl.fillcw_cap; 18087 18088 at_lt_bw: 18089 if (rack_bw_multipler > 0) { 18090 /* 18091 * We want to limit fill-cw to the some multiplier 18092 * of the max(lt_bw, gp_est). The normal default 18093 * is 0 for off, so a sysctl has enabled it. 18094 */ 18095 uint64_t lt_bw, gp, rate; 18096 18097 gp = rack_get_gp_est(rack); 18098 lt_bw = rack_get_lt_bw(rack); 18099 if (lt_bw > gp) 18100 rate = lt_bw; 18101 else 18102 rate = gp; 18103 rate *= rack_bw_multipler; 18104 rate /= 100; 18105 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18106 union tcp_log_stackspecific log; 18107 struct timeval tv; 18108 18109 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18110 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18111 log.u_bbr.flex1 = rack_bw_multipler; 18112 log.u_bbr.flex2 = len; 18113 log.u_bbr.cur_del_rate = gp; 18114 log.u_bbr.delRate = lt_bw; 18115 log.u_bbr.bw_inuse = rate; 18116 log.u_bbr.rttProp = fill_bw; 18117 log.u_bbr.flex8 = 44; 18118 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 18119 BBR_LOG_CWND, 0, 18120 0, &log, false, NULL, 18121 __func__, __LINE__, &tv); 18122 } 18123 if (fill_bw > rate) 18124 fill_bw = rate; 18125 } 18126 /* We are below the min b/w */ 18127 if (non_paced) 18128 *rate_wanted = fill_bw; 18129 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 18130 return (slot); 18131 rack->r_via_fill_cw = 1; 18132 if (rack->r_rack_hw_rate_caps && 18133 (rack->r_ctl.crte != NULL)) { 18134 uint64_t high_rate; 18135 18136 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 18137 if (fill_bw > high_rate) { 18138 /* We are capping bw at the highest rate table entry */ 18139 if (*rate_wanted > high_rate) { 18140 /* The original rate was also capped */ 18141 rack->r_via_fill_cw = 0; 18142 } 18143 rack_log_hdwr_pacing(rack, 18144 fill_bw, high_rate, __LINE__, 18145 0, 3); 18146 fill_bw = high_rate; 18147 if (capped) 18148 *capped = 1; 18149 } 18150 } else if ((rack->r_ctl.crte == NULL) && 18151 (rack->rack_hdrw_pacing == 0) && 18152 (rack->rack_hdw_pace_ena) && 18153 rack->r_rack_hw_rate_caps && 18154 (rack->rack_attempt_hdwr_pace == 0) && 18155 (rack->rc_inp->inp_route.ro_nh != NULL) && 18156 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 18157 /* 18158 * Ok we may have a first attempt that is greater than our top rate 18159 * lets check. 18160 */ 18161 uint64_t high_rate; 18162 18163 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 18164 if (high_rate) { 18165 if (fill_bw > high_rate) { 18166 fill_bw = high_rate; 18167 if (capped) 18168 *capped = 1; 18169 } 18170 } 18171 } 18172 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 18173 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 18174 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 18175 fill_bw = rack->r_ctl.bw_rate_cap; 18176 } 18177 /* 18178 * Ok fill_bw holds our mythical b/w to fill the cwnd 18179 * in an rtt (unless it was capped), what does that 18180 * time wise equate too? 18181 */ 18182 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 18183 lentim /= fill_bw; 18184 *rate_wanted = fill_bw; 18185 if (non_paced || (lentim < slot)) { 18186 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 18187 0, lentim, 12, __LINE__, NULL, 0); 18188 return ((int32_t)lentim); 18189 } else 18190 return (slot); 18191 } 18192 18193 static uint32_t 18194 rack_policer_check_send(struct tcp_rack *rack, uint32_t len, uint32_t segsiz, uint32_t *needs) 18195 { 18196 uint64_t calc; 18197 18198 rack->rc_policer_should_pace = 0; 18199 calc = rack_policer_bucket_reserve * rack->r_ctl.policer_bucket_size; 18200 calc /= 100; 18201 /* 18202 * Now lets look at if we want more than is in the bucket <or> 18203 * we want more than is reserved in the bucket. 18204 */ 18205 if (rack_verbose_logging > 0) 18206 policer_detection_log(rack, len, segsiz, calc, rack->r_ctl.current_policer_bucket, 8); 18207 if ((calc > rack->r_ctl.current_policer_bucket) || 18208 (len >= (rack->r_ctl.current_policer_bucket - calc))) { 18209 /* 18210 * We may want to pace depending on if we are going 18211 * into the reserve or not. 18212 */ 18213 uint32_t newlen; 18214 18215 if (calc > rack->r_ctl.current_policer_bucket) { 18216 /* 18217 * This will eat into the reserve if we 18218 * don't have room at all some lines 18219 * below will catch it. 18220 */ 18221 newlen = rack->r_ctl.policer_max_seg; 18222 rack->rc_policer_should_pace = 1; 18223 } else { 18224 /* 18225 * We have all of the reserve plus something in the bucket 18226 * that we can give out. 18227 */ 18228 newlen = rack->r_ctl.current_policer_bucket - calc; 18229 if (newlen < rack->r_ctl.policer_max_seg) { 18230 /* 18231 * Into the reserve to get a full policer_max_seg 18232 * so we set the len to that and eat into 18233 * the reserve. If we go over the code 18234 * below will make us wait. 18235 */ 18236 newlen = rack->r_ctl.policer_max_seg; 18237 rack->rc_policer_should_pace = 1; 18238 } 18239 } 18240 if (newlen > rack->r_ctl.current_policer_bucket) { 18241 /* We have to wait some */ 18242 *needs = newlen - rack->r_ctl.current_policer_bucket; 18243 return (0); 18244 } 18245 if (rack_verbose_logging > 0) 18246 policer_detection_log(rack, len, segsiz, newlen, 0, 9); 18247 len = newlen; 18248 } /* else we have all len available above the reserve */ 18249 if (rack_verbose_logging > 0) 18250 policer_detection_log(rack, len, segsiz, calc, 0, 10); 18251 return (len); 18252 } 18253 18254 static uint32_t 18255 rack_policed_sending(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, uint32_t segsiz, int call_line) 18256 { 18257 /* 18258 * Given a send of len, and a token bucket set at current_policer_bucket_size 18259 * are we close enough to the end of the bucket that we need to pace? If so 18260 * calculate out a time and return it. Otherwise subtract the tokens from 18261 * the bucket. 18262 */ 18263 uint64_t calc; 18264 18265 if ((rack->r_ctl.policer_bw == 0) || 18266 (rack->r_ctl.policer_bucket_size < segsiz)) { 18267 /* 18268 * We should have an estimate here... 18269 */ 18270 return (0); 18271 } 18272 calc = (uint64_t)rack_policer_bucket_reserve * (uint64_t)rack->r_ctl.policer_bucket_size; 18273 calc /= 100; 18274 if ((rack->r_ctl.current_policer_bucket < len) || 18275 (rack->rc_policer_should_pace == 1) || 18276 ((rack->r_ctl.current_policer_bucket - len) <= (uint32_t)calc)) { 18277 /* we need to pace */ 18278 uint64_t lentim, res; 18279 uint32_t slot; 18280 18281 lentim = (uint64_t)len * (uint64_t)HPTS_USEC_IN_SEC; 18282 res = lentim / rack->r_ctl.policer_bw; 18283 slot = (uint32_t)res; 18284 if (rack->r_ctl.current_policer_bucket > len) 18285 rack->r_ctl.current_policer_bucket -= len; 18286 else 18287 rack->r_ctl.current_policer_bucket = 0; 18288 policer_detection_log(rack, len, slot, (uint32_t)rack_policer_bucket_reserve, call_line, 5); 18289 rack->rc_policer_should_pace = 0; 18290 return(slot); 18291 } 18292 /* Just take tokens out of the bucket and let rack do whatever it would have */ 18293 policer_detection_log(rack, len, 0, (uint32_t)rack_policer_bucket_reserve, call_line, 6); 18294 if (len < rack->r_ctl.current_policer_bucket) { 18295 rack->r_ctl.current_policer_bucket -= len; 18296 } else { 18297 rack->r_ctl.current_policer_bucket = 0; 18298 } 18299 return (0); 18300 } 18301 18302 18303 static int32_t 18304 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 18305 { 18306 uint64_t srtt; 18307 int32_t slot = 0; 18308 int32_t minslot = 0; 18309 int can_start_hw_pacing = 1; 18310 int err; 18311 int pace_one; 18312 18313 if (rack_pace_one_seg || 18314 (rack->r_ctl.rc_user_set_min_segs == 1)) 18315 pace_one = 1; 18316 else 18317 pace_one = 0; 18318 if (rack->rc_policer_detected == 1) { 18319 /* 18320 * A policer has been detected and we 18321 * have all of our data (policer-bw and 18322 * policer bucket size) calculated. Call 18323 * into the function to find out if we are 18324 * overriding the time. 18325 */ 18326 slot = rack_policed_sending(rack, tp, len, segsiz, line); 18327 if (slot) { 18328 uint64_t logbw; 18329 18330 logbw = rack->r_ctl.current_policer_bucket; 18331 logbw <<= 32; 18332 logbw |= rack->r_ctl.policer_bucket_size; 18333 rack_log_pacing_delay_calc(rack, len, slot, rack->r_ctl.policer_bw, logbw, 0, 89, __LINE__, NULL, 0); 18334 return(slot); 18335 } 18336 } 18337 if (rack->rc_always_pace == 0) { 18338 /* 18339 * We use the most optimistic possible cwnd/srtt for 18340 * sending calculations. This will make our 18341 * calculation anticipate getting more through 18342 * quicker then possible. But thats ok we don't want 18343 * the peer to have a gap in data sending. 18344 */ 18345 uint64_t cwnd, tr_perms = 0; 18346 int32_t reduce = 0; 18347 18348 old_method: 18349 /* 18350 * We keep no precise pacing with the old method 18351 * instead we use the pacer to mitigate bursts. 18352 */ 18353 if (rack->r_ctl.rc_rack_min_rtt) 18354 srtt = rack->r_ctl.rc_rack_min_rtt; 18355 else 18356 srtt = max(tp->t_srtt, 1); 18357 if (rack->r_ctl.rc_rack_largest_cwnd) 18358 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 18359 else 18360 cwnd = rack->r_ctl.cwnd_to_use; 18361 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 18362 tr_perms = (cwnd * 1000) / srtt; 18363 if (tr_perms == 0) { 18364 tr_perms = ctf_fixed_maxseg(tp); 18365 } 18366 /* 18367 * Calculate how long this will take to drain, if 18368 * the calculation comes out to zero, thats ok we 18369 * will use send_a_lot to possibly spin around for 18370 * more increasing tot_len_this_send to the point 18371 * that its going to require a pace, or we hit the 18372 * cwnd. Which in that case we are just waiting for 18373 * a ACK. 18374 */ 18375 slot = len / tr_perms; 18376 /* Now do we reduce the time so we don't run dry? */ 18377 if (slot && rack_slot_reduction) { 18378 reduce = (slot / rack_slot_reduction); 18379 if (reduce < slot) { 18380 slot -= reduce; 18381 } else 18382 slot = 0; 18383 } 18384 slot *= HPTS_USEC_IN_MSEC; 18385 if (rack->rc_pace_to_cwnd) { 18386 uint64_t rate_wanted = 0; 18387 18388 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 18389 rack->rc_ack_can_sendout_data = 1; 18390 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 18391 } else 18392 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 18393 /*******************************************************/ 18394 /* RRS: We insert non-paced call to stats here for len */ 18395 /*******************************************************/ 18396 } else { 18397 uint64_t bw_est, res, lentim, rate_wanted; 18398 uint32_t segs, oh; 18399 int capped = 0; 18400 int prev_fill; 18401 18402 if ((rack->r_rr_config == 1) && rsm) { 18403 return (rack->r_ctl.rc_min_to); 18404 } 18405 if (rack->use_fixed_rate) { 18406 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 18407 } else if ((rack->r_ctl.init_rate == 0) && 18408 (rack->r_ctl.gp_bw == 0)) { 18409 /* no way to yet do an estimate */ 18410 bw_est = rate_wanted = 0; 18411 } else if (rack->dgp_on) { 18412 bw_est = rack_get_bw(rack); 18413 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 18414 } else { 18415 uint32_t gain, rate_set = 0; 18416 18417 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 18418 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 18419 if (rate_set == 0) { 18420 if (rate_wanted > rack->rc_tp->snd_wnd) 18421 rate_wanted = rack->rc_tp->snd_wnd; 18422 /* Now lets make it into a b/w */ 18423 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 18424 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 18425 } 18426 bw_est = rate_wanted; 18427 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 18428 rack->r_ctl.cwnd_to_use, 18429 rate_wanted, bw_est, 18430 rack->r_ctl.rc_last_us_rtt, 18431 88, __LINE__, NULL, gain); 18432 } 18433 if ((bw_est == 0) || (rate_wanted == 0) || 18434 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 18435 /* 18436 * No way yet to make a b/w estimate or 18437 * our raise is set incorrectly. 18438 */ 18439 goto old_method; 18440 } 18441 rack_rate_cap_bw(rack, &rate_wanted, &capped); 18442 /* We need to account for all the overheads */ 18443 segs = (len + segsiz - 1) / segsiz; 18444 /* 18445 * We need the diff between 1514 bytes (e-mtu with e-hdr) 18446 * and how much data we put in each packet. Yes this 18447 * means we may be off if we are larger than 1500 bytes 18448 * or smaller. But this just makes us more conservative. 18449 */ 18450 18451 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 18452 if (rack->r_is_v6) { 18453 #ifdef INET6 18454 oh += sizeof(struct ip6_hdr); 18455 #endif 18456 } else { 18457 #ifdef INET 18458 oh += sizeof(struct ip); 18459 #endif 18460 } 18461 /* We add a fixed 14 for the ethernet header */ 18462 oh += 14; 18463 segs *= oh; 18464 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 18465 res = lentim / rate_wanted; 18466 slot = (uint32_t)res; 18467 if (rack_hw_rate_min && 18468 (rate_wanted < rack_hw_rate_min)) { 18469 can_start_hw_pacing = 0; 18470 if (rack->r_ctl.crte) { 18471 /* 18472 * Ok we need to release it, we 18473 * have fallen too low. 18474 */ 18475 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18476 rack->r_ctl.crte = NULL; 18477 rack->rack_attempt_hdwr_pace = 0; 18478 rack->rack_hdrw_pacing = 0; 18479 } 18480 } 18481 if (rack->r_ctl.crte && 18482 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 18483 /* 18484 * We want more than the hardware can give us, 18485 * don't start any hw pacing. 18486 */ 18487 can_start_hw_pacing = 0; 18488 if (rack->r_rack_hw_rate_caps == 0) { 18489 /* 18490 * Ok we need to release it, we 18491 * want more than the card can give us and 18492 * no rate cap is in place. Set it up so 18493 * when we want less we can retry. 18494 */ 18495 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18496 rack->r_ctl.crte = NULL; 18497 rack->rack_attempt_hdwr_pace = 0; 18498 rack->rack_hdrw_pacing = 0; 18499 } 18500 } 18501 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 18502 /* 18503 * We lost our rate somehow, this can happen 18504 * if the interface changed underneath us. 18505 */ 18506 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18507 rack->r_ctl.crte = NULL; 18508 /* Lets re-allow attempting to setup pacing */ 18509 rack->rack_hdrw_pacing = 0; 18510 rack->rack_attempt_hdwr_pace = 0; 18511 rack_log_hdwr_pacing(rack, 18512 rate_wanted, bw_est, __LINE__, 18513 0, 6); 18514 } 18515 prev_fill = rack->r_via_fill_cw; 18516 if ((rack->rc_pace_to_cwnd) && 18517 (capped == 0) && 18518 (rack->dgp_on == 1) && 18519 (rack->use_fixed_rate == 0) && 18520 (rack->in_probe_rtt == 0) && 18521 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 18522 /* 18523 * We want to pace at our rate *or* faster to 18524 * fill the cwnd to the max if its not full. 18525 */ 18526 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 18527 /* Re-check to make sure we are not exceeding our max b/w */ 18528 if ((rack->r_ctl.crte != NULL) && 18529 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 18530 /* 18531 * We want more than the hardware can give us, 18532 * don't start any hw pacing. 18533 */ 18534 can_start_hw_pacing = 0; 18535 if (rack->r_rack_hw_rate_caps == 0) { 18536 /* 18537 * Ok we need to release it, we 18538 * want more than the card can give us and 18539 * no rate cap is in place. Set it up so 18540 * when we want less we can retry. 18541 */ 18542 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18543 rack->r_ctl.crte = NULL; 18544 rack->rack_attempt_hdwr_pace = 0; 18545 rack->rack_hdrw_pacing = 0; 18546 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 18547 } 18548 } 18549 } 18550 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 18551 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 18552 if ((rack->rack_hdw_pace_ena) && 18553 (can_start_hw_pacing > 0) && 18554 (rack->rack_hdrw_pacing == 0) && 18555 (rack->rack_attempt_hdwr_pace == 0)) { 18556 /* 18557 * Lets attempt to turn on hardware pacing 18558 * if we can. 18559 */ 18560 rack->rack_attempt_hdwr_pace = 1; 18561 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 18562 rack->rc_inp->inp_route.ro_nh->nh_ifp, 18563 rate_wanted, 18564 RS_PACING_GEQ, 18565 &err, &rack->r_ctl.crte_prev_rate); 18566 if (rack->r_ctl.crte) { 18567 rack->rack_hdrw_pacing = 1; 18568 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 18569 pace_one, rack->r_ctl.crte, 18570 NULL, rack->r_ctl.pace_len_divisor); 18571 rack_log_hdwr_pacing(rack, 18572 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 18573 err, 0); 18574 rack->r_ctl.last_hw_bw_req = rate_wanted; 18575 } else { 18576 counter_u64_add(rack_hw_pace_init_fail, 1); 18577 } 18578 } else if (rack->rack_hdrw_pacing && 18579 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 18580 /* Do we need to adjust our rate? */ 18581 const struct tcp_hwrate_limit_table *nrte; 18582 18583 if (rack->r_up_only && 18584 (rate_wanted < rack->r_ctl.crte->rate)) { 18585 /** 18586 * We have four possible states here 18587 * having to do with the previous time 18588 * and this time. 18589 * previous | this-time 18590 * A) 0 | 0 -- fill_cw not in the picture 18591 * B) 1 | 0 -- we were doing a fill-cw but now are not 18592 * C) 1 | 1 -- all rates from fill_cw 18593 * D) 0 | 1 -- we were doing non-fill and now we are filling 18594 * 18595 * For case A, C and D we don't allow a drop. But for 18596 * case B where we now our on our steady rate we do 18597 * allow a drop. 18598 * 18599 */ 18600 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 18601 goto done_w_hdwr; 18602 } 18603 if ((rate_wanted > rack->r_ctl.crte->rate) || 18604 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 18605 if (rack_hw_rate_to_low && 18606 (bw_est < rack_hw_rate_to_low)) { 18607 /* 18608 * The pacing rate is too low for hardware, but 18609 * do allow hardware pacing to be restarted. 18610 */ 18611 rack_log_hdwr_pacing(rack, 18612 bw_est, rack->r_ctl.crte->rate, __LINE__, 18613 0, 5); 18614 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18615 rack->r_ctl.crte = NULL; 18616 rack->rack_attempt_hdwr_pace = 0; 18617 rack->rack_hdrw_pacing = 0; 18618 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 18619 goto done_w_hdwr; 18620 } 18621 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 18622 rack->rc_tp, 18623 rack->rc_inp->inp_route.ro_nh->nh_ifp, 18624 rate_wanted, 18625 RS_PACING_GEQ, 18626 &err, &rack->r_ctl.crte_prev_rate); 18627 if (nrte == NULL) { 18628 /* 18629 * Lost the rate, lets drop hardware pacing 18630 * period. 18631 */ 18632 rack->rack_hdrw_pacing = 0; 18633 rack->r_ctl.crte = NULL; 18634 rack_log_hdwr_pacing(rack, 18635 rate_wanted, 0, __LINE__, 18636 err, 1); 18637 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 18638 counter_u64_add(rack_hw_pace_lost, 1); 18639 } else if (nrte != rack->r_ctl.crte) { 18640 rack->r_ctl.crte = nrte; 18641 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 18642 segsiz, pace_one, rack->r_ctl.crte, 18643 NULL, rack->r_ctl.pace_len_divisor); 18644 rack_log_hdwr_pacing(rack, 18645 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 18646 err, 2); 18647 rack->r_ctl.last_hw_bw_req = rate_wanted; 18648 } 18649 } else { 18650 /* We just need to adjust the segment size */ 18651 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 18652 rack_log_hdwr_pacing(rack, 18653 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 18654 0, 4); 18655 rack->r_ctl.last_hw_bw_req = rate_wanted; 18656 } 18657 } 18658 } 18659 if (minslot && (minslot > slot)) { 18660 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, 18661 98, __LINE__, NULL, 0); 18662 slot = minslot; 18663 } 18664 done_w_hdwr: 18665 if (rack_limit_time_with_srtt && 18666 (rack->use_fixed_rate == 0) && 18667 (rack->rack_hdrw_pacing == 0)) { 18668 /* 18669 * Sanity check, we do not allow the pacing delay 18670 * to be longer than the SRTT of the path. If it is 18671 * a slow path, then adding a packet should increase 18672 * the RTT and compensate for this i.e. the srtt will 18673 * be greater so the allowed pacing time will be greater. 18674 * 18675 * Note this restriction is not for where a peak rate 18676 * is set, we are doing fixed pacing or hardware pacing. 18677 */ 18678 if (rack->rc_tp->t_srtt) 18679 srtt = rack->rc_tp->t_srtt; 18680 else 18681 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 18682 if (srtt < (uint64_t)slot) { 18683 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 18684 slot = srtt; 18685 } 18686 } 18687 /*******************************************************************/ 18688 /* RRS: We insert paced call to stats here for len and rate_wanted */ 18689 /*******************************************************************/ 18690 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 18691 } 18692 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 18693 /* 18694 * If this rate is seeing enobufs when it 18695 * goes to send then either the nic is out 18696 * of gas or we are mis-estimating the time 18697 * somehow and not letting the queue empty 18698 * completely. Lets add to the pacing time. 18699 */ 18700 int hw_boost_delay; 18701 18702 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 18703 if (hw_boost_delay > rack_enobuf_hw_max) 18704 hw_boost_delay = rack_enobuf_hw_max; 18705 else if (hw_boost_delay < rack_enobuf_hw_min) 18706 hw_boost_delay = rack_enobuf_hw_min; 18707 slot += hw_boost_delay; 18708 } 18709 return (slot); 18710 } 18711 18712 static void 18713 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 18714 tcp_seq startseq, uint32_t sb_offset) 18715 { 18716 struct rack_sendmap *my_rsm = NULL; 18717 18718 if (tp->t_state < TCPS_ESTABLISHED) { 18719 /* 18720 * We don't start any measurements if we are 18721 * not at least established. 18722 */ 18723 return; 18724 } 18725 if (tp->t_state >= TCPS_FIN_WAIT_1) { 18726 /* 18727 * We will get no more data into the SB 18728 * this means we need to have the data available 18729 * before we start a measurement. 18730 */ 18731 18732 if (sbavail(&tptosocket(tp)->so_snd) < 18733 max(rc_init_window(rack), 18734 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 18735 /* Nope not enough data */ 18736 return; 18737 } 18738 } 18739 tp->t_flags |= TF_GPUTINPROG; 18740 rack->r_ctl.rc_gp_cumack_ts = 0; 18741 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 18742 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 18743 tp->gput_seq = startseq; 18744 rack->app_limited_needs_set = 0; 18745 if (rack->in_probe_rtt) 18746 rack->measure_saw_probe_rtt = 1; 18747 else if ((rack->measure_saw_probe_rtt) && 18748 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 18749 rack->measure_saw_probe_rtt = 0; 18750 if (rack->rc_gp_filled) 18751 tp->gput_ts = rack->r_ctl.last_cumack_advance; 18752 else { 18753 /* Special case initial measurement */ 18754 struct timeval tv; 18755 18756 tp->gput_ts = tcp_get_usecs(&tv); 18757 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18758 } 18759 /* 18760 * We take a guess out into the future, 18761 * if we have no measurement and no 18762 * initial rate, we measure the first 18763 * initial-windows worth of data to 18764 * speed up getting some GP measurement and 18765 * thus start pacing. 18766 */ 18767 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 18768 rack->app_limited_needs_set = 1; 18769 tp->gput_ack = startseq + max(rc_init_window(rack), 18770 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 18771 rack_log_pacing_delay_calc(rack, 18772 tp->gput_seq, 18773 tp->gput_ack, 18774 0, 18775 tp->gput_ts, 18776 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18777 9, 18778 __LINE__, NULL, 0); 18779 rack_tend_gp_marks(tp, rack); 18780 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18781 return; 18782 } 18783 if (sb_offset) { 18784 /* 18785 * We are out somewhere in the sb 18786 * can we use the already outstanding data? 18787 */ 18788 18789 if (rack->r_ctl.rc_app_limited_cnt == 0) { 18790 /* 18791 * Yes first one is good and in this case 18792 * the tp->gput_ts is correctly set based on 18793 * the last ack that arrived (no need to 18794 * set things up when an ack comes in). 18795 */ 18796 my_rsm = tqhash_min(rack->r_ctl.tqh); 18797 if ((my_rsm == NULL) || 18798 (my_rsm->r_rtr_cnt != 1)) { 18799 /* retransmission? */ 18800 goto use_latest; 18801 } 18802 } else { 18803 if (rack->r_ctl.rc_first_appl == NULL) { 18804 /* 18805 * If rc_first_appl is NULL 18806 * then the cnt should be 0. 18807 * This is probably an error, maybe 18808 * a KASSERT would be approprate. 18809 */ 18810 goto use_latest; 18811 } 18812 /* 18813 * If we have a marker pointer to the last one that is 18814 * app limited we can use that, but we need to set 18815 * things up so that when it gets ack'ed we record 18816 * the ack time (if its not already acked). 18817 */ 18818 rack->app_limited_needs_set = 1; 18819 /* 18820 * We want to get to the rsm that is either 18821 * next with space i.e. over 1 MSS or the one 18822 * after that (after the app-limited). 18823 */ 18824 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 18825 if (my_rsm) { 18826 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 18827 /* Have to use the next one */ 18828 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18829 else { 18830 /* Use after the first MSS of it is acked */ 18831 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 18832 goto start_set; 18833 } 18834 } 18835 if ((my_rsm == NULL) || 18836 (my_rsm->r_rtr_cnt != 1)) { 18837 /* 18838 * Either its a retransmit or 18839 * the last is the app-limited one. 18840 */ 18841 goto use_latest; 18842 } 18843 } 18844 tp->gput_seq = my_rsm->r_start; 18845 start_set: 18846 if (my_rsm->r_flags & RACK_ACKED) { 18847 /* 18848 * This one has been acked use the arrival ack time 18849 */ 18850 struct rack_sendmap *nrsm; 18851 18852 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18853 rack->app_limited_needs_set = 0; 18854 /* 18855 * Ok in this path we need to use the r_end now 18856 * since this guy is the starting ack. 18857 */ 18858 tp->gput_seq = my_rsm->r_end; 18859 /* 18860 * We also need to adjust up the sendtime 18861 * to the send of the next data after my_rsm. 18862 */ 18863 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18864 if (nrsm != NULL) 18865 my_rsm = nrsm; 18866 else { 18867 /* 18868 * The next as not been sent, thats the 18869 * case for using the latest. 18870 */ 18871 goto use_latest; 18872 } 18873 } 18874 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18875 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 18876 rack->r_ctl.rc_gp_cumack_ts = 0; 18877 if ((rack->r_ctl.cleared_app_ack == 1) && 18878 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { 18879 /* 18880 * We just cleared an application limited period 18881 * so the next seq out needs to skip the first 18882 * ack. 18883 */ 18884 rack->app_limited_needs_set = 1; 18885 rack->r_ctl.cleared_app_ack = 0; 18886 } 18887 rack_log_pacing_delay_calc(rack, 18888 tp->gput_seq, 18889 tp->gput_ack, 18890 (uintptr_t)my_rsm, 18891 tp->gput_ts, 18892 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18893 9, 18894 __LINE__, my_rsm, 0); 18895 /* Now lets make sure all are marked as they should be */ 18896 rack_tend_gp_marks(tp, rack); 18897 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18898 return; 18899 } 18900 18901 use_latest: 18902 /* 18903 * We don't know how long we may have been 18904 * idle or if this is the first-send. Lets 18905 * setup the flag so we will trim off 18906 * the first ack'd data so we get a true 18907 * measurement. 18908 */ 18909 rack->app_limited_needs_set = 1; 18910 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18911 rack->r_ctl.rc_gp_cumack_ts = 0; 18912 /* Find this guy so we can pull the send time */ 18913 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18914 if (my_rsm) { 18915 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18916 if (my_rsm->r_flags & RACK_ACKED) { 18917 /* 18918 * Unlikely since its probably what was 18919 * just transmitted (but I am paranoid). 18920 */ 18921 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18922 rack->app_limited_needs_set = 0; 18923 } 18924 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18925 /* This also is unlikely */ 18926 tp->gput_seq = my_rsm->r_start; 18927 } 18928 } else { 18929 /* 18930 * TSNH unless we have some send-map limit, 18931 * and even at that it should not be hitting 18932 * that limit (we should have stopped sending). 18933 */ 18934 struct timeval tv; 18935 18936 microuptime(&tv); 18937 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18938 } 18939 rack_tend_gp_marks(tp, rack); 18940 rack_log_pacing_delay_calc(rack, 18941 tp->gput_seq, 18942 tp->gput_ack, 18943 (uintptr_t)my_rsm, 18944 tp->gput_ts, 18945 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18946 9, __LINE__, NULL, 0); 18947 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18948 } 18949 18950 static inline uint32_t 18951 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18952 uint32_t avail, int32_t sb_offset) 18953 { 18954 uint32_t len; 18955 uint32_t sendwin; 18956 18957 if (tp->snd_wnd > cwnd_to_use) 18958 sendwin = cwnd_to_use; 18959 else 18960 sendwin = tp->snd_wnd; 18961 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18962 /* We never want to go over our peers rcv-window */ 18963 len = 0; 18964 } else { 18965 uint32_t flight; 18966 18967 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18968 if (flight >= sendwin) { 18969 /* 18970 * We have in flight what we are allowed by cwnd (if 18971 * it was rwnd blocking it would have hit above out 18972 * >= tp->snd_wnd). 18973 */ 18974 return (0); 18975 } 18976 len = sendwin - flight; 18977 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18978 /* We would send too much (beyond the rwnd) */ 18979 len = tp->snd_wnd - ctf_outstanding(tp); 18980 } 18981 if ((len + sb_offset) > avail) { 18982 /* 18983 * We don't have that much in the SB, how much is 18984 * there? 18985 */ 18986 len = avail - sb_offset; 18987 } 18988 } 18989 return (len); 18990 } 18991 18992 static void 18993 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18994 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18995 int rsm_is_null, int optlen, int line, uint16_t mode) 18996 { 18997 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18998 union tcp_log_stackspecific log; 18999 struct timeval tv; 19000 19001 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19002 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19003 log.u_bbr.flex1 = error; 19004 log.u_bbr.flex2 = flags; 19005 log.u_bbr.flex3 = rsm_is_null; 19006 log.u_bbr.flex4 = ipoptlen; 19007 log.u_bbr.flex5 = tp->rcv_numsacks; 19008 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19009 log.u_bbr.flex7 = optlen; 19010 log.u_bbr.flex8 = rack->r_fsb_inited; 19011 log.u_bbr.applimited = rack->r_fast_output; 19012 log.u_bbr.bw_inuse = rack_get_bw(rack); 19013 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19014 log.u_bbr.cwnd_gain = mode; 19015 log.u_bbr.pkts_out = orig_len; 19016 log.u_bbr.lt_epoch = len; 19017 log.u_bbr.delivered = line; 19018 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 19019 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19020 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 19021 len, &log, false, NULL, __func__, __LINE__, &tv); 19022 } 19023 } 19024 19025 19026 static struct mbuf * 19027 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 19028 struct rack_fast_send_blk *fsb, 19029 int32_t seglimit, int32_t segsize, int hw_tls) 19030 { 19031 #ifdef KERN_TLS 19032 struct ktls_session *tls, *ntls; 19033 #ifdef INVARIANTS 19034 struct mbuf *start; 19035 #endif 19036 #endif 19037 struct mbuf *m, *n, **np, *smb; 19038 struct mbuf *top; 19039 int32_t off, soff; 19040 int32_t len = *plen; 19041 int32_t fragsize; 19042 int32_t len_cp = 0; 19043 uint32_t mlen, frags; 19044 19045 soff = off = the_off; 19046 smb = m = the_m; 19047 np = ⊤ 19048 top = NULL; 19049 #ifdef KERN_TLS 19050 if (hw_tls && (m->m_flags & M_EXTPG)) 19051 tls = m->m_epg_tls; 19052 else 19053 tls = NULL; 19054 #ifdef INVARIANTS 19055 start = m; 19056 #endif 19057 #endif 19058 while (len > 0) { 19059 if (m == NULL) { 19060 *plen = len_cp; 19061 break; 19062 } 19063 #ifdef KERN_TLS 19064 if (hw_tls) { 19065 if (m->m_flags & M_EXTPG) 19066 ntls = m->m_epg_tls; 19067 else 19068 ntls = NULL; 19069 19070 /* 19071 * Avoid mixing TLS records with handshake 19072 * data or TLS records from different 19073 * sessions. 19074 */ 19075 if (tls != ntls) { 19076 MPASS(m != start); 19077 *plen = len_cp; 19078 break; 19079 } 19080 } 19081 #endif 19082 mlen = min(len, m->m_len - off); 19083 if (seglimit) { 19084 /* 19085 * For M_EXTPG mbufs, add 3 segments 19086 * + 1 in case we are crossing page boundaries 19087 * + 2 in case the TLS hdr/trailer are used 19088 * It is cheaper to just add the segments 19089 * than it is to take the cache miss to look 19090 * at the mbuf ext_pgs state in detail. 19091 */ 19092 if (m->m_flags & M_EXTPG) { 19093 fragsize = min(segsize, PAGE_SIZE); 19094 frags = 3; 19095 } else { 19096 fragsize = segsize; 19097 frags = 0; 19098 } 19099 19100 /* Break if we really can't fit anymore. */ 19101 if ((frags + 1) >= seglimit) { 19102 *plen = len_cp; 19103 break; 19104 } 19105 19106 /* 19107 * Reduce size if you can't copy the whole 19108 * mbuf. If we can't copy the whole mbuf, also 19109 * adjust len so the loop will end after this 19110 * mbuf. 19111 */ 19112 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 19113 mlen = (seglimit - frags - 1) * fragsize; 19114 len = mlen; 19115 *plen = len_cp + len; 19116 } 19117 frags += howmany(mlen, fragsize); 19118 if (frags == 0) 19119 frags++; 19120 seglimit -= frags; 19121 KASSERT(seglimit > 0, 19122 ("%s: seglimit went too low", __func__)); 19123 } 19124 n = m_get(M_NOWAIT, m->m_type); 19125 *np = n; 19126 if (n == NULL) 19127 goto nospace; 19128 n->m_len = mlen; 19129 soff += mlen; 19130 len_cp += n->m_len; 19131 if (m->m_flags & (M_EXT | M_EXTPG)) { 19132 n->m_data = m->m_data + off; 19133 mb_dupcl(n, m); 19134 } else { 19135 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 19136 (u_int)n->m_len); 19137 } 19138 len -= n->m_len; 19139 off = 0; 19140 m = m->m_next; 19141 np = &n->m_next; 19142 if (len || (soff == smb->m_len)) { 19143 /* 19144 * We have more so we move forward or 19145 * we have consumed the entire mbuf and 19146 * len has fell to 0. 19147 */ 19148 soff = 0; 19149 smb = m; 19150 } 19151 19152 } 19153 if (fsb != NULL) { 19154 fsb->m = smb; 19155 fsb->off = soff; 19156 if (smb) { 19157 /* 19158 * Save off the size of the mbuf. We do 19159 * this so that we can recognize when it 19160 * has been trimmed by sbcut() as acks 19161 * come in. 19162 */ 19163 fsb->o_m_len = smb->m_len; 19164 fsb->o_t_len = M_TRAILINGROOM(smb); 19165 } else { 19166 /* 19167 * This is the case where the next mbuf went to NULL. This 19168 * means with this copy we have sent everything in the sb. 19169 * In theory we could clear the fast_output flag, but lets 19170 * not since its possible that we could get more added 19171 * and acks that call the extend function which would let 19172 * us send more. 19173 */ 19174 fsb->o_m_len = 0; 19175 fsb->o_t_len = 0; 19176 } 19177 } 19178 return (top); 19179 nospace: 19180 if (top) 19181 m_freem(top); 19182 return (NULL); 19183 19184 } 19185 19186 /* 19187 * This is a copy of m_copym(), taking the TSO segment size/limit 19188 * constraints into account, and advancing the sndptr as it goes. 19189 */ 19190 static struct mbuf * 19191 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 19192 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 19193 { 19194 struct mbuf *m, *n; 19195 int32_t soff; 19196 19197 m = rack->r_ctl.fsb.m; 19198 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 19199 /* 19200 * The trailing space changed, mbufs can grow 19201 * at the tail but they can't shrink from 19202 * it, KASSERT that. Adjust the orig_m_len to 19203 * compensate for this change. 19204 */ 19205 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 19206 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 19207 m, 19208 rack, 19209 (intmax_t)M_TRAILINGROOM(m), 19210 rack->r_ctl.fsb.o_t_len, 19211 rack->r_ctl.fsb.o_m_len, 19212 m->m_len)); 19213 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 19214 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 19215 } 19216 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 19217 /* 19218 * Mbuf shrank, trimmed off the top by an ack, our 19219 * offset changes. 19220 */ 19221 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 19222 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 19223 m, m->m_len, 19224 rack, rack->r_ctl.fsb.o_m_len, 19225 rack->r_ctl.fsb.off)); 19226 19227 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 19228 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 19229 else 19230 rack->r_ctl.fsb.off = 0; 19231 rack->r_ctl.fsb.o_m_len = m->m_len; 19232 #ifdef INVARIANTS 19233 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 19234 panic("rack:%p m:%p m_len grew outside of t_space compensation", 19235 rack, m); 19236 #endif 19237 } 19238 soff = rack->r_ctl.fsb.off; 19239 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 19240 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 19241 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 19242 __FUNCTION__, 19243 rack, *plen, m, m->m_len)); 19244 /* Save off the right location before we copy and advance */ 19245 *s_soff = soff; 19246 *s_mb = rack->r_ctl.fsb.m; 19247 n = rack_fo_base_copym(m, soff, plen, 19248 &rack->r_ctl.fsb, 19249 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 19250 return (n); 19251 } 19252 19253 /* Log the buffer level */ 19254 static void 19255 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 19256 int len, struct timeval *tv, 19257 uint32_t cts) 19258 { 19259 uint32_t p_rate = 0, p_queue = 0, err = 0; 19260 union tcp_log_stackspecific log; 19261 19262 #ifdef RATELIMIT 19263 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 19264 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 19265 #endif 19266 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19267 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19268 log.u_bbr.flex1 = p_rate; 19269 log.u_bbr.flex2 = p_queue; 19270 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 19271 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 19272 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 19273 log.u_bbr.flex7 = 99; 19274 log.u_bbr.flex8 = 0; 19275 log.u_bbr.pkts_out = err; 19276 log.u_bbr.delRate = rack->r_ctl.crte->rate; 19277 log.u_bbr.timeStamp = cts; 19278 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19279 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 19280 len, &log, false, NULL, __func__, __LINE__, tv); 19281 19282 } 19283 19284 static uint32_t 19285 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 19286 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 19287 { 19288 uint64_t lentime = 0; 19289 #ifdef RATELIMIT 19290 uint32_t p_rate = 0, p_queue = 0, err; 19291 union tcp_log_stackspecific log; 19292 uint64_t bw; 19293 19294 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 19295 /* Failed or queue is zero */ 19296 if (err || (p_queue == 0)) { 19297 lentime = 0; 19298 goto out; 19299 } 19300 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 19301 if (err) { 19302 lentime = 0; 19303 goto out; 19304 } 19305 /* 19306 * If we reach here we have some bytes in 19307 * the queue. The number returned is a value 19308 * between 0 and 0xffff where ffff is full 19309 * and 0 is empty. So how best to make this into 19310 * something usable? 19311 * 19312 * The "safer" way is lets take the b/w gotten 19313 * from the query (which should be our b/w rate) 19314 * and pretend that a full send (our rc_pace_max_segs) 19315 * is outstanding. We factor it so its as if a full 19316 * number of our MSS segment is terms of full 19317 * ethernet segments are outstanding. 19318 */ 19319 bw = p_rate / 8; 19320 if (bw) { 19321 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 19322 lentime *= ETHERNET_SEGMENT_SIZE; 19323 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 19324 lentime /= bw; 19325 } else { 19326 /* TSNH -- KASSERT? */ 19327 lentime = 0; 19328 } 19329 out: 19330 if (tcp_bblogging_on(tp)) { 19331 memset(&log, 0, sizeof(log)); 19332 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19333 log.u_bbr.flex1 = p_rate; 19334 log.u_bbr.flex2 = p_queue; 19335 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 19336 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 19337 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 19338 log.u_bbr.flex7 = 99; 19339 log.u_bbr.flex8 = 0; 19340 log.u_bbr.pkts_out = err; 19341 log.u_bbr.delRate = rack->r_ctl.crte->rate; 19342 log.u_bbr.cur_del_rate = lentime; 19343 log.u_bbr.timeStamp = cts; 19344 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19345 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 19346 len, &log, false, NULL, __func__, __LINE__,tv); 19347 } 19348 #endif 19349 return ((uint32_t)lentime); 19350 } 19351 19352 static int 19353 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 19354 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 19355 { 19356 /* 19357 * Enter the fast retransmit path. We are given that a sched_pin is 19358 * in place (if accounting is compliled in) and the cycle count taken 19359 * at the entry is in the ts_val. The concept her is that the rsm 19360 * now holds the mbuf offsets and such so we can directly transmit 19361 * without a lot of overhead, the len field is already set for 19362 * us to prohibit us from sending too much (usually its 1MSS). 19363 */ 19364 struct ip *ip = NULL; 19365 struct udphdr *udp = NULL; 19366 struct tcphdr *th = NULL; 19367 struct mbuf *m = NULL; 19368 struct inpcb *inp; 19369 uint8_t *cpto; 19370 struct tcp_log_buffer *lgb; 19371 #ifdef TCP_ACCOUNTING 19372 uint64_t crtsc; 19373 int cnt_thru = 1; 19374 #endif 19375 struct tcpopt to; 19376 u_char opt[TCP_MAXOLEN]; 19377 uint32_t hdrlen, optlen; 19378 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 19379 uint16_t flags; 19380 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19381 uint32_t if_hw_tsomaxsegsize; 19382 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 19383 19384 #ifdef INET6 19385 struct ip6_hdr *ip6 = NULL; 19386 19387 if (rack->r_is_v6) { 19388 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19389 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19390 } else 19391 #endif /* INET6 */ 19392 { 19393 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19394 hdrlen = sizeof(struct tcpiphdr); 19395 } 19396 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19397 goto failed; 19398 } 19399 if (doing_tlp) { 19400 /* Its a TLP add the flag, it may already be there but be sure */ 19401 rsm->r_flags |= RACK_TLP; 19402 } else { 19403 /* If it was a TLP it is not not on this retransmit */ 19404 rsm->r_flags &= ~RACK_TLP; 19405 } 19406 startseq = rsm->r_start; 19407 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19408 inp = rack->rc_inp; 19409 to.to_flags = 0; 19410 flags = tcp_outflags[tp->t_state]; 19411 if (flags & (TH_SYN|TH_RST)) { 19412 goto failed; 19413 } 19414 if (rsm->r_flags & RACK_HAS_FIN) { 19415 /* We can't send a FIN here */ 19416 goto failed; 19417 } 19418 if (flags & TH_FIN) { 19419 /* We never send a FIN */ 19420 flags &= ~TH_FIN; 19421 } 19422 if (tp->t_flags & TF_RCVD_TSTMP) { 19423 to.to_tsval = ms_cts + tp->ts_offset; 19424 to.to_tsecr = tp->ts_recent; 19425 to.to_flags = TOF_TS; 19426 } 19427 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19428 /* TCP-MD5 (RFC2385). */ 19429 if (tp->t_flags & TF_SIGNATURE) 19430 to.to_flags |= TOF_SIGNATURE; 19431 #endif 19432 optlen = tcp_addoptions(&to, opt); 19433 hdrlen += optlen; 19434 udp = rack->r_ctl.fsb.udp; 19435 if (udp) 19436 hdrlen += sizeof(struct udphdr); 19437 if (rack->r_ctl.rc_pace_max_segs) 19438 max_val = rack->r_ctl.rc_pace_max_segs; 19439 else if (rack->rc_user_set_max_segs) 19440 max_val = rack->rc_user_set_max_segs * segsiz; 19441 else 19442 max_val = len; 19443 if ((tp->t_flags & TF_TSO) && 19444 V_tcp_do_tso && 19445 (len > segsiz) && 19446 (tp->t_port == 0)) 19447 tso = 1; 19448 #ifdef INET6 19449 if (MHLEN < hdrlen + max_linkhdr) 19450 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19451 else 19452 #endif 19453 m = m_gethdr(M_NOWAIT, MT_DATA); 19454 if (m == NULL) 19455 goto failed; 19456 m->m_data += max_linkhdr; 19457 m->m_len = hdrlen; 19458 th = rack->r_ctl.fsb.th; 19459 /* Establish the len to send */ 19460 if (len > max_val) 19461 len = max_val; 19462 if ((tso) && (len + optlen > segsiz)) { 19463 uint32_t if_hw_tsomax; 19464 int32_t max_len; 19465 19466 /* extract TSO information */ 19467 if_hw_tsomax = tp->t_tsomax; 19468 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19469 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19470 /* 19471 * Check if we should limit by maximum payload 19472 * length: 19473 */ 19474 if (if_hw_tsomax != 0) { 19475 /* compute maximum TSO length */ 19476 max_len = (if_hw_tsomax - hdrlen - 19477 max_linkhdr); 19478 if (max_len <= 0) { 19479 goto failed; 19480 } else if (len > max_len) { 19481 len = max_len; 19482 } 19483 } 19484 if (len <= segsiz) { 19485 /* 19486 * In case there are too many small fragments don't 19487 * use TSO: 19488 */ 19489 tso = 0; 19490 } 19491 } else { 19492 tso = 0; 19493 } 19494 if ((tso == 0) && (len > segsiz)) 19495 len = segsiz; 19496 (void)tcp_get_usecs(tv); 19497 if ((len == 0) || 19498 (len <= MHLEN - hdrlen - max_linkhdr)) { 19499 goto failed; 19500 } 19501 th->th_seq = htonl(rsm->r_start); 19502 th->th_ack = htonl(tp->rcv_nxt); 19503 /* 19504 * The PUSH bit should only be applied 19505 * if the full retransmission is made. If 19506 * we are sending less than this is the 19507 * left hand edge and should not have 19508 * the PUSH bit. 19509 */ 19510 if ((rsm->r_flags & RACK_HAD_PUSH) && 19511 (len == (rsm->r_end - rsm->r_start))) 19512 flags |= TH_PUSH; 19513 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19514 if (th->th_win == 0) { 19515 tp->t_sndzerowin++; 19516 tp->t_flags |= TF_RXWIN0SENT; 19517 } else 19518 tp->t_flags &= ~TF_RXWIN0SENT; 19519 if (rsm->r_flags & RACK_TLP) { 19520 /* 19521 * TLP should not count in retran count, but 19522 * in its own bin 19523 */ 19524 counter_u64_add(rack_tlp_retran, 1); 19525 counter_u64_add(rack_tlp_retran_bytes, len); 19526 } else { 19527 tp->t_sndrexmitpack++; 19528 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 19529 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 19530 } 19531 #ifdef STATS 19532 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 19533 len); 19534 #endif 19535 if (rsm->m == NULL) 19536 goto failed; 19537 if (rsm->m && 19538 ((rsm->orig_m_len != rsm->m->m_len) || 19539 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 19540 /* Fix up the orig_m_len and possibly the mbuf offset */ 19541 rack_adjust_orig_mlen(rsm); 19542 } 19543 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 19544 if (len <= segsiz) { 19545 /* 19546 * Must have ran out of mbufs for the copy 19547 * shorten it to no longer need tso. Lets 19548 * not put on sendalot since we are low on 19549 * mbufs. 19550 */ 19551 tso = 0; 19552 } 19553 if ((m->m_next == NULL) || (len <= 0)){ 19554 goto failed; 19555 } 19556 if (udp) { 19557 if (rack->r_is_v6) 19558 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19559 else 19560 ulen = hdrlen + len - sizeof(struct ip); 19561 udp->uh_ulen = htons(ulen); 19562 } 19563 m->m_pkthdr.rcvif = (struct ifnet *)0; 19564 if (TCPS_HAVERCVDSYN(tp->t_state) && 19565 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19566 int ect = tcp_ecn_output_established(tp, &flags, len, true); 19567 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19568 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19569 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19570 #ifdef INET6 19571 if (rack->r_is_v6) { 19572 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19573 ip6->ip6_flow |= htonl(ect << 20); 19574 } 19575 else 19576 #endif 19577 { 19578 ip->ip_tos &= ~IPTOS_ECN_MASK; 19579 ip->ip_tos |= ect; 19580 } 19581 } 19582 if (rack->r_ctl.crte != NULL) { 19583 /* See if we can send via the hw queue */ 19584 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 19585 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 19586 if (slot == 0) 19587 ip_sendflag = 0; 19588 } 19589 tcp_set_flags(th, flags); 19590 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19591 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19592 if (to.to_flags & TOF_SIGNATURE) { 19593 /* 19594 * Calculate MD5 signature and put it into the place 19595 * determined before. 19596 * NOTE: since TCP options buffer doesn't point into 19597 * mbuf's data, calculate offset and use it. 19598 */ 19599 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19600 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19601 /* 19602 * Do not send segment if the calculation of MD5 19603 * digest has failed. 19604 */ 19605 goto failed; 19606 } 19607 } 19608 #endif 19609 #ifdef INET6 19610 if (rack->r_is_v6) { 19611 if (tp->t_port) { 19612 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19613 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19614 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19615 th->th_sum = htons(0); 19616 UDPSTAT_INC(udps_opackets); 19617 } else { 19618 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19619 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19620 th->th_sum = in6_cksum_pseudo(ip6, 19621 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19622 0); 19623 } 19624 } 19625 #endif 19626 #if defined(INET6) && defined(INET) 19627 else 19628 #endif 19629 #ifdef INET 19630 { 19631 if (tp->t_port) { 19632 m->m_pkthdr.csum_flags = CSUM_UDP; 19633 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19634 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19635 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19636 th->th_sum = htons(0); 19637 UDPSTAT_INC(udps_opackets); 19638 } else { 19639 m->m_pkthdr.csum_flags = CSUM_TCP; 19640 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19641 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19642 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19643 IPPROTO_TCP + len + optlen)); 19644 } 19645 /* IP version must be set here for ipv4/ipv6 checking later */ 19646 KASSERT(ip->ip_v == IPVERSION, 19647 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19648 } 19649 #endif 19650 if (tso) { 19651 /* 19652 * Here we use segsiz since we have no added options besides 19653 * any standard timestamp options (no DSACKs or SACKS are sent 19654 * via either fast-path). 19655 */ 19656 KASSERT(len > segsiz, 19657 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19658 m->m_pkthdr.csum_flags |= CSUM_TSO; 19659 m->m_pkthdr.tso_segsz = segsiz; 19660 } 19661 #ifdef INET6 19662 if (rack->r_is_v6) { 19663 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19664 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19665 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19666 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19667 else 19668 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19669 } 19670 #endif 19671 #if defined(INET) && defined(INET6) 19672 else 19673 #endif 19674 #ifdef INET 19675 { 19676 ip->ip_len = htons(m->m_pkthdr.len); 19677 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19678 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19679 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19680 if (tp->t_port == 0 || len < V_tcp_minmss) { 19681 ip->ip_off |= htons(IP_DF); 19682 } 19683 } else { 19684 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19685 } 19686 } 19687 #endif 19688 if (doing_tlp == 0) { 19689 /* Set we retransmitted */ 19690 rack->rc_gp_saw_rec = 1; 19691 } else { 19692 /* Its a TLP set ca or ss */ 19693 if (tp->snd_cwnd > tp->snd_ssthresh) { 19694 /* Set we sent in CA */ 19695 rack->rc_gp_saw_ca = 1; 19696 } else { 19697 /* Set we sent in SS */ 19698 rack->rc_gp_saw_ss = 1; 19699 } 19700 } 19701 /* Time to copy in our header */ 19702 cpto = mtod(m, uint8_t *); 19703 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19704 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19705 if (optlen) { 19706 bcopy(opt, th + 1, optlen); 19707 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19708 } else { 19709 th->th_off = sizeof(struct tcphdr) >> 2; 19710 } 19711 if (tcp_bblogging_on(rack->rc_tp)) { 19712 union tcp_log_stackspecific log; 19713 19714 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 19715 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 19716 counter_u64_add(rack_collapsed_win_rxt, 1); 19717 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 19718 } 19719 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19720 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19721 if (rack->rack_no_prr) 19722 log.u_bbr.flex1 = 0; 19723 else 19724 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19725 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19726 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19727 log.u_bbr.flex4 = max_val; 19728 /* Save off the early/late values */ 19729 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19730 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19731 log.u_bbr.bw_inuse = rack_get_bw(rack); 19732 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19733 if (doing_tlp == 0) 19734 log.u_bbr.flex8 = 1; 19735 else 19736 log.u_bbr.flex8 = 2; 19737 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19738 log.u_bbr.flex7 = 55; 19739 log.u_bbr.pkts_out = tp->t_maxseg; 19740 log.u_bbr.timeStamp = cts; 19741 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19742 if (rsm && (rsm->r_rtr_cnt > 0)) { 19743 /* 19744 * When we have a retransmit we want to log the 19745 * burst at send and flight at send from before. 19746 */ 19747 log.u_bbr.flex5 = rsm->r_fas; 19748 log.u_bbr.bbr_substate = rsm->r_bas; 19749 } else { 19750 /* 19751 * This is currently unlikely until we do the 19752 * packet pair probes but I will add it for completeness. 19753 */ 19754 log.u_bbr.flex5 = log.u_bbr.inflight; 19755 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19756 } 19757 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19758 log.u_bbr.delivered = 0; 19759 log.u_bbr.rttProp = (uintptr_t)rsm; 19760 log.u_bbr.delRate = rsm->r_flags; 19761 log.u_bbr.delRate <<= 31; 19762 log.u_bbr.delRate |= rack->r_must_retran; 19763 log.u_bbr.delRate <<= 1; 19764 log.u_bbr.delRate |= 1; 19765 log.u_bbr.pkt_epoch = __LINE__; 19766 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19767 len, &log, false, NULL, __func__, __LINE__, tv); 19768 } else 19769 lgb = NULL; 19770 if ((rack->r_ctl.crte != NULL) && 19771 tcp_bblogging_on(tp)) { 19772 rack_log_queue_level(tp, rack, len, tv, cts); 19773 } 19774 #ifdef INET6 19775 if (rack->r_is_v6) { 19776 error = ip6_output(m, inp->in6p_outputopts, 19777 &inp->inp_route6, 19778 ip_sendflag, NULL, NULL, inp); 19779 } 19780 else 19781 #endif 19782 #ifdef INET 19783 { 19784 error = ip_output(m, NULL, 19785 &inp->inp_route, 19786 ip_sendflag, 0, inp); 19787 } 19788 #endif 19789 m = NULL; 19790 if (lgb) { 19791 lgb->tlb_errno = error; 19792 lgb = NULL; 19793 } 19794 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 19795 tp->snd_nxt = tp->snd_max; 19796 if (error) { 19797 goto failed; 19798 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 19799 rack->rc_hw_nobuf = 0; 19800 rack->r_ctl.rc_agg_delayed = 0; 19801 rack->r_early = 0; 19802 rack->r_late = 0; 19803 rack->r_ctl.rc_agg_early = 0; 19804 } 19805 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 19806 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 19807 if (doing_tlp) { 19808 rack->rc_tlp_in_progress = 1; 19809 rack->r_ctl.rc_tlp_cnt_out++; 19810 } 19811 if (error == 0) { 19812 counter_u64_add(rack_total_bytes, len); 19813 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 19814 if (doing_tlp) { 19815 rack->rc_last_sent_tlp_past_cumack = 0; 19816 rack->rc_last_sent_tlp_seq_valid = 1; 19817 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 19818 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 19819 } 19820 if (rack->r_ctl.rc_prr_sndcnt >= len) 19821 rack->r_ctl.rc_prr_sndcnt -= len; 19822 else 19823 rack->r_ctl.rc_prr_sndcnt = 0; 19824 } 19825 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19826 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19827 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19828 rack->r_ctl.retran_during_recovery += len; 19829 { 19830 int idx; 19831 19832 idx = (len / segsiz) + 3; 19833 if (idx >= TCP_MSS_ACCT_ATIMER) 19834 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19835 else 19836 counter_u64_add(rack_out_size[idx], 1); 19837 } 19838 if (tp->t_rtttime == 0) { 19839 tp->t_rtttime = ticks; 19840 tp->t_rtseq = startseq; 19841 KMOD_TCPSTAT_INC(tcps_segstimed); 19842 } 19843 counter_u64_add(rack_fto_rsm_send, 1); 19844 if (error && (error == ENOBUFS)) { 19845 if (rack->r_ctl.crte != NULL) { 19846 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 19847 if (tcp_bblogging_on(rack->rc_tp)) 19848 rack_log_queue_level(tp, rack, len, tv, cts); 19849 } else 19850 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 19851 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19852 if (rack->rc_enobuf < 0x7f) 19853 rack->rc_enobuf++; 19854 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19855 slot = 10 * HPTS_USEC_IN_MSEC; 19856 if (rack->r_ctl.crte != NULL) { 19857 counter_u64_add(rack_saw_enobuf_hw, 1); 19858 tcp_rl_log_enobuf(rack->r_ctl.crte); 19859 } 19860 counter_u64_add(rack_saw_enobuf, 1); 19861 } else { 19862 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 19863 } 19864 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 19865 #ifdef TCP_ACCOUNTING 19866 crtsc = get_cyclecount(); 19867 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19868 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19869 } 19870 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19871 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19872 } 19873 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19874 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 19875 } 19876 sched_unpin(); 19877 #endif 19878 return (0); 19879 failed: 19880 if (m) 19881 m_free(m); 19882 return (-1); 19883 } 19884 19885 static void 19886 rack_sndbuf_autoscale(struct tcp_rack *rack) 19887 { 19888 /* 19889 * Automatic sizing of send socket buffer. Often the send buffer 19890 * size is not optimally adjusted to the actual network conditions 19891 * at hand (delay bandwidth product). Setting the buffer size too 19892 * small limits throughput on links with high bandwidth and high 19893 * delay (eg. trans-continental/oceanic links). Setting the 19894 * buffer size too big consumes too much real kernel memory, 19895 * especially with many connections on busy servers. 19896 * 19897 * The criteria to step up the send buffer one notch are: 19898 * 1. receive window of remote host is larger than send buffer 19899 * (with a fudge factor of 5/4th); 19900 * 2. send buffer is filled to 7/8th with data (so we actually 19901 * have data to make use of it); 19902 * 3. send buffer fill has not hit maximal automatic size; 19903 * 4. our send window (slow start and cogestion controlled) is 19904 * larger than sent but unacknowledged data in send buffer. 19905 * 19906 * Note that the rack version moves things much faster since 19907 * we want to avoid hitting cache lines in the rack_fast_output() 19908 * path so this is called much less often and thus moves 19909 * the SB forward by a percentage. 19910 */ 19911 struct socket *so; 19912 struct tcpcb *tp; 19913 uint32_t sendwin, scaleup; 19914 19915 tp = rack->rc_tp; 19916 so = rack->rc_inp->inp_socket; 19917 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19918 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19919 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19920 sbused(&so->so_snd) >= 19921 (so->so_snd.sb_hiwat / 8 * 7) && 19922 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19923 sendwin >= (sbused(&so->so_snd) - 19924 (tp->snd_max - tp->snd_una))) { 19925 if (rack_autosndbuf_inc) 19926 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19927 else 19928 scaleup = V_tcp_autosndbuf_inc; 19929 if (scaleup < V_tcp_autosndbuf_inc) 19930 scaleup = V_tcp_autosndbuf_inc; 19931 scaleup += so->so_snd.sb_hiwat; 19932 if (scaleup > V_tcp_autosndbuf_max) 19933 scaleup = V_tcp_autosndbuf_max; 19934 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19935 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19936 } 19937 } 19938 } 19939 19940 static int 19941 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19942 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 19943 { 19944 /* 19945 * Enter to do fast output. We are given that the sched_pin is 19946 * in place (if accounting is compiled in) and the cycle count taken 19947 * at entry is in place in ts_val. The idea here is that 19948 * we know how many more bytes needs to be sent (presumably either 19949 * during pacing or to fill the cwnd and that was greater than 19950 * the max-burst). We have how much to send and all the info we 19951 * need to just send. 19952 */ 19953 #ifdef INET 19954 struct ip *ip = NULL; 19955 #endif 19956 struct udphdr *udp = NULL; 19957 struct tcphdr *th = NULL; 19958 struct mbuf *m, *s_mb; 19959 struct inpcb *inp; 19960 uint8_t *cpto; 19961 struct tcp_log_buffer *lgb; 19962 #ifdef TCP_ACCOUNTING 19963 uint64_t crtsc; 19964 #endif 19965 struct tcpopt to; 19966 u_char opt[TCP_MAXOLEN]; 19967 uint32_t hdrlen, optlen; 19968 #ifdef TCP_ACCOUNTING 19969 int cnt_thru = 1; 19970 #endif 19971 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19972 uint16_t flags; 19973 uint32_t s_soff; 19974 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19975 uint32_t if_hw_tsomaxsegsize; 19976 uint32_t add_flag = RACK_SENT_FP; 19977 #ifdef INET6 19978 struct ip6_hdr *ip6 = NULL; 19979 19980 if (rack->r_is_v6) { 19981 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19982 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19983 } else 19984 #endif /* INET6 */ 19985 { 19986 #ifdef INET 19987 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19988 hdrlen = sizeof(struct tcpiphdr); 19989 #endif 19990 } 19991 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19992 m = NULL; 19993 goto failed; 19994 } 19995 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19996 startseq = tp->snd_max; 19997 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19998 inp = rack->rc_inp; 19999 len = rack->r_ctl.fsb.left_to_send; 20000 to.to_flags = 0; 20001 flags = rack->r_ctl.fsb.tcp_flags; 20002 if (tp->t_flags & TF_RCVD_TSTMP) { 20003 to.to_tsval = ms_cts + tp->ts_offset; 20004 to.to_tsecr = tp->ts_recent; 20005 to.to_flags = TOF_TS; 20006 } 20007 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20008 /* TCP-MD5 (RFC2385). */ 20009 if (tp->t_flags & TF_SIGNATURE) 20010 to.to_flags |= TOF_SIGNATURE; 20011 #endif 20012 optlen = tcp_addoptions(&to, opt); 20013 hdrlen += optlen; 20014 udp = rack->r_ctl.fsb.udp; 20015 if (udp) 20016 hdrlen += sizeof(struct udphdr); 20017 if (rack->r_ctl.rc_pace_max_segs) 20018 max_val = rack->r_ctl.rc_pace_max_segs; 20019 else if (rack->rc_user_set_max_segs) 20020 max_val = rack->rc_user_set_max_segs * segsiz; 20021 else 20022 max_val = len; 20023 if ((tp->t_flags & TF_TSO) && 20024 V_tcp_do_tso && 20025 (len > segsiz) && 20026 (tp->t_port == 0)) 20027 tso = 1; 20028 again: 20029 #ifdef INET6 20030 if (MHLEN < hdrlen + max_linkhdr) 20031 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 20032 else 20033 #endif 20034 m = m_gethdr(M_NOWAIT, MT_DATA); 20035 if (m == NULL) 20036 goto failed; 20037 m->m_data += max_linkhdr; 20038 m->m_len = hdrlen; 20039 th = rack->r_ctl.fsb.th; 20040 /* Establish the len to send */ 20041 if (len > max_val) 20042 len = max_val; 20043 if ((tso) && (len + optlen > segsiz)) { 20044 uint32_t if_hw_tsomax; 20045 int32_t max_len; 20046 20047 /* extract TSO information */ 20048 if_hw_tsomax = tp->t_tsomax; 20049 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 20050 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 20051 /* 20052 * Check if we should limit by maximum payload 20053 * length: 20054 */ 20055 if (if_hw_tsomax != 0) { 20056 /* compute maximum TSO length */ 20057 max_len = (if_hw_tsomax - hdrlen - 20058 max_linkhdr); 20059 if (max_len <= 0) { 20060 goto failed; 20061 } else if (len > max_len) { 20062 len = max_len; 20063 } 20064 } 20065 if (len <= segsiz) { 20066 /* 20067 * In case there are too many small fragments don't 20068 * use TSO: 20069 */ 20070 tso = 0; 20071 } 20072 } else { 20073 tso = 0; 20074 } 20075 if ((tso == 0) && (len > segsiz)) 20076 len = segsiz; 20077 (void)tcp_get_usecs(tv); 20078 if ((len == 0) || 20079 (len <= MHLEN - hdrlen - max_linkhdr)) { 20080 goto failed; 20081 } 20082 sb_offset = tp->snd_max - tp->snd_una; 20083 th->th_seq = htonl(tp->snd_max); 20084 th->th_ack = htonl(tp->rcv_nxt); 20085 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 20086 if (th->th_win == 0) { 20087 tp->t_sndzerowin++; 20088 tp->t_flags |= TF_RXWIN0SENT; 20089 } else 20090 tp->t_flags &= ~TF_RXWIN0SENT; 20091 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 20092 KMOD_TCPSTAT_INC(tcps_sndpack); 20093 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 20094 #ifdef STATS 20095 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 20096 len); 20097 #endif 20098 if (rack->r_ctl.fsb.m == NULL) 20099 goto failed; 20100 20101 /* s_mb and s_soff are saved for rack_log_output */ 20102 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 20103 &s_mb, &s_soff); 20104 if (len <= segsiz) { 20105 /* 20106 * Must have ran out of mbufs for the copy 20107 * shorten it to no longer need tso. Lets 20108 * not put on sendalot since we are low on 20109 * mbufs. 20110 */ 20111 tso = 0; 20112 } 20113 if (rack->r_ctl.fsb.rfo_apply_push && 20114 (len == rack->r_ctl.fsb.left_to_send)) { 20115 tcp_set_flags(th, flags | TH_PUSH); 20116 add_flag |= RACK_HAD_PUSH; 20117 } 20118 if ((m->m_next == NULL) || (len <= 0)){ 20119 goto failed; 20120 } 20121 if (udp) { 20122 if (rack->r_is_v6) 20123 ulen = hdrlen + len - sizeof(struct ip6_hdr); 20124 else 20125 ulen = hdrlen + len - sizeof(struct ip); 20126 udp->uh_ulen = htons(ulen); 20127 } 20128 m->m_pkthdr.rcvif = (struct ifnet *)0; 20129 if (TCPS_HAVERCVDSYN(tp->t_state) && 20130 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 20131 int ect = tcp_ecn_output_established(tp, &flags, len, false); 20132 if ((tp->t_state == TCPS_SYN_RECEIVED) && 20133 (tp->t_flags2 & TF2_ECN_SND_ECE)) 20134 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 20135 #ifdef INET6 20136 if (rack->r_is_v6) { 20137 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 20138 ip6->ip6_flow |= htonl(ect << 20); 20139 } 20140 else 20141 #endif 20142 { 20143 #ifdef INET 20144 ip->ip_tos &= ~IPTOS_ECN_MASK; 20145 ip->ip_tos |= ect; 20146 #endif 20147 } 20148 } 20149 tcp_set_flags(th, flags); 20150 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 20151 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20152 if (to.to_flags & TOF_SIGNATURE) { 20153 /* 20154 * Calculate MD5 signature and put it into the place 20155 * determined before. 20156 * NOTE: since TCP options buffer doesn't point into 20157 * mbuf's data, calculate offset and use it. 20158 */ 20159 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 20160 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 20161 /* 20162 * Do not send segment if the calculation of MD5 20163 * digest has failed. 20164 */ 20165 goto failed; 20166 } 20167 } 20168 #endif 20169 #ifdef INET6 20170 if (rack->r_is_v6) { 20171 if (tp->t_port) { 20172 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 20173 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20174 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 20175 th->th_sum = htons(0); 20176 UDPSTAT_INC(udps_opackets); 20177 } else { 20178 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 20179 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20180 th->th_sum = in6_cksum_pseudo(ip6, 20181 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 20182 0); 20183 } 20184 } 20185 #endif 20186 #if defined(INET6) && defined(INET) 20187 else 20188 #endif 20189 #ifdef INET 20190 { 20191 if (tp->t_port) { 20192 m->m_pkthdr.csum_flags = CSUM_UDP; 20193 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20194 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 20195 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 20196 th->th_sum = htons(0); 20197 UDPSTAT_INC(udps_opackets); 20198 } else { 20199 m->m_pkthdr.csum_flags = CSUM_TCP; 20200 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20201 th->th_sum = in_pseudo(ip->ip_src.s_addr, 20202 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 20203 IPPROTO_TCP + len + optlen)); 20204 } 20205 /* IP version must be set here for ipv4/ipv6 checking later */ 20206 KASSERT(ip->ip_v == IPVERSION, 20207 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 20208 } 20209 #endif 20210 if (tso) { 20211 /* 20212 * Here we use segsiz since we have no added options besides 20213 * any standard timestamp options (no DSACKs or SACKS are sent 20214 * via either fast-path). 20215 */ 20216 KASSERT(len > segsiz, 20217 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 20218 m->m_pkthdr.csum_flags |= CSUM_TSO; 20219 m->m_pkthdr.tso_segsz = segsiz; 20220 } 20221 #ifdef INET6 20222 if (rack->r_is_v6) { 20223 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 20224 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 20225 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 20226 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20227 else 20228 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20229 } 20230 #endif 20231 #if defined(INET) && defined(INET6) 20232 else 20233 #endif 20234 #ifdef INET 20235 { 20236 ip->ip_len = htons(m->m_pkthdr.len); 20237 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 20238 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 20239 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20240 if (tp->t_port == 0 || len < V_tcp_minmss) { 20241 ip->ip_off |= htons(IP_DF); 20242 } 20243 } else { 20244 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20245 } 20246 } 20247 #endif 20248 if (tp->snd_cwnd > tp->snd_ssthresh) { 20249 /* Set we sent in CA */ 20250 rack->rc_gp_saw_ca = 1; 20251 } else { 20252 /* Set we sent in SS */ 20253 rack->rc_gp_saw_ss = 1; 20254 } 20255 /* Time to copy in our header */ 20256 cpto = mtod(m, uint8_t *); 20257 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 20258 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 20259 if (optlen) { 20260 bcopy(opt, th + 1, optlen); 20261 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 20262 } else { 20263 th->th_off = sizeof(struct tcphdr) >> 2; 20264 } 20265 if ((rack->r_ctl.crte != NULL) && 20266 tcp_bblogging_on(tp)) { 20267 rack_log_queue_level(tp, rack, len, tv, cts); 20268 } 20269 if (tcp_bblogging_on(rack->rc_tp)) { 20270 union tcp_log_stackspecific log; 20271 20272 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 20273 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 20274 if (rack->rack_no_prr) 20275 log.u_bbr.flex1 = 0; 20276 else 20277 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 20278 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 20279 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 20280 log.u_bbr.flex4 = max_val; 20281 /* Save off the early/late values */ 20282 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 20283 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 20284 log.u_bbr.bw_inuse = rack_get_bw(rack); 20285 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 20286 log.u_bbr.flex8 = 0; 20287 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 20288 log.u_bbr.flex7 = 44; 20289 log.u_bbr.pkts_out = tp->t_maxseg; 20290 log.u_bbr.timeStamp = cts; 20291 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20292 log.u_bbr.flex5 = log.u_bbr.inflight; 20293 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 20294 log.u_bbr.delivered = 0; 20295 log.u_bbr.rttProp = 0; 20296 log.u_bbr.delRate = rack->r_must_retran; 20297 log.u_bbr.delRate <<= 1; 20298 log.u_bbr.pkt_epoch = __LINE__; 20299 /* For fast output no retrans so just inflight and how many mss we send */ 20300 log.u_bbr.flex5 = log.u_bbr.inflight; 20301 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 20302 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 20303 len, &log, false, NULL, __func__, __LINE__, tv); 20304 } else 20305 lgb = NULL; 20306 #ifdef INET6 20307 if (rack->r_is_v6) { 20308 error = ip6_output(m, inp->in6p_outputopts, 20309 &inp->inp_route6, 20310 0, NULL, NULL, inp); 20311 } 20312 #endif 20313 #if defined(INET) && defined(INET6) 20314 else 20315 #endif 20316 #ifdef INET 20317 { 20318 error = ip_output(m, NULL, 20319 &inp->inp_route, 20320 0, 0, inp); 20321 } 20322 #endif 20323 if (lgb) { 20324 lgb->tlb_errno = error; 20325 lgb = NULL; 20326 } 20327 if (error) { 20328 *send_err = error; 20329 m = NULL; 20330 goto failed; 20331 } else if (rack->rc_hw_nobuf) { 20332 rack->rc_hw_nobuf = 0; 20333 rack->r_ctl.rc_agg_delayed = 0; 20334 rack->r_early = 0; 20335 rack->r_late = 0; 20336 rack->r_ctl.rc_agg_early = 0; 20337 } 20338 if ((error == 0) && (rack->lt_bw_up == 0)) { 20339 /* Unlikely */ 20340 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 20341 rack->r_ctl.lt_seq = tp->snd_una; 20342 rack->lt_bw_up = 1; 20343 } else if ((error == 0) && 20344 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 20345 /* 20346 * Need to record what we have since we are 20347 * approaching seq wrap. 20348 */ 20349 struct timeval tv; 20350 uint64_t tmark; 20351 20352 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 20353 rack->r_ctl.lt_seq = tp->snd_una; 20354 tmark = tcp_get_u64_usecs(&tv); 20355 if (tmark > rack->r_ctl.lt_timemark) { 20356 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 20357 rack->r_ctl.lt_timemark = tmark; 20358 } 20359 } 20360 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 20361 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 20362 m = NULL; 20363 if (tp->snd_una == tp->snd_max) { 20364 rack->r_ctl.rc_tlp_rxt_last_time = cts; 20365 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 20366 tp->t_acktime = ticks; 20367 } 20368 counter_u64_add(rack_total_bytes, len); 20369 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 20370 20371 rack->forced_ack = 0; /* If we send something zap the FA flag */ 20372 tot_len += len; 20373 if ((tp->t_flags & TF_GPUTINPROG) == 0) 20374 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 20375 tp->snd_max += len; 20376 tp->snd_nxt = tp->snd_max; 20377 if (rack->rc_new_rnd_needed) { 20378 rack_new_round_starts(tp, rack, tp->snd_max); 20379 } 20380 { 20381 int idx; 20382 20383 idx = (len / segsiz) + 3; 20384 if (idx >= TCP_MSS_ACCT_ATIMER) 20385 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 20386 else 20387 counter_u64_add(rack_out_size[idx], 1); 20388 } 20389 if (len <= rack->r_ctl.fsb.left_to_send) 20390 rack->r_ctl.fsb.left_to_send -= len; 20391 else 20392 rack->r_ctl.fsb.left_to_send = 0; 20393 if (rack->r_ctl.fsb.left_to_send < segsiz) { 20394 rack->r_fast_output = 0; 20395 rack->r_ctl.fsb.left_to_send = 0; 20396 /* At the end of fast_output scale up the sb */ 20397 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 20398 rack_sndbuf_autoscale(rack); 20399 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 20400 } 20401 if (tp->t_rtttime == 0) { 20402 tp->t_rtttime = ticks; 20403 tp->t_rtseq = startseq; 20404 KMOD_TCPSTAT_INC(tcps_segstimed); 20405 } 20406 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 20407 (max_val > len) && 20408 (tso == 0)) { 20409 max_val -= len; 20410 len = segsiz; 20411 th = rack->r_ctl.fsb.th; 20412 #ifdef TCP_ACCOUNTING 20413 cnt_thru++; 20414 #endif 20415 goto again; 20416 } 20417 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 20418 counter_u64_add(rack_fto_send, 1); 20419 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__); 20420 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 20421 #ifdef TCP_ACCOUNTING 20422 crtsc = get_cyclecount(); 20423 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20424 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 20425 } 20426 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20427 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 20428 } 20429 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20430 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 20431 } 20432 sched_unpin(); 20433 #endif 20434 return (0); 20435 failed: 20436 if (m) 20437 m_free(m); 20438 rack->r_fast_output = 0; 20439 return (-1); 20440 } 20441 20442 static inline void 20443 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 20444 struct sockbuf *sb, 20445 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 20446 bool hw_tls, 20447 uint16_t flags) 20448 { 20449 rack->r_fast_output = 1; 20450 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 20451 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 20452 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 20453 rack->r_ctl.fsb.tcp_flags = flags; 20454 rack->r_ctl.fsb.left_to_send = orig_len - len; 20455 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 20456 /* Less than a full sized pace, lets not */ 20457 rack->r_fast_output = 0; 20458 return; 20459 } else { 20460 /* Round down to the nearest pace_max_seg */ 20461 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 20462 } 20463 if (hw_tls) 20464 rack->r_ctl.fsb.hw_tls = 1; 20465 else 20466 rack->r_ctl.fsb.hw_tls = 0; 20467 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 20468 ("rack:%p left_to_send:%u sbavail:%u out:%u", 20469 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 20470 (tp->snd_max - tp->snd_una))); 20471 if (rack->r_ctl.fsb.left_to_send < segsiz) 20472 rack->r_fast_output = 0; 20473 else { 20474 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 20475 rack->r_ctl.fsb.rfo_apply_push = 1; 20476 else 20477 rack->r_ctl.fsb.rfo_apply_push = 0; 20478 } 20479 } 20480 20481 static uint32_t 20482 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 20483 { 20484 uint64_t min_time; 20485 uint32_t maxlen; 20486 20487 min_time = (uint64_t)get_hpts_min_sleep_time(); 20488 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 20489 maxlen = roundup(maxlen, segsiz); 20490 return (maxlen); 20491 } 20492 20493 static struct rack_sendmap * 20494 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 20495 { 20496 struct rack_sendmap *rsm = NULL; 20497 int thresh; 20498 20499 restart: 20500 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 20501 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 20502 /* Nothing, strange turn off validity */ 20503 rack->r_collapse_point_valid = 0; 20504 return (NULL); 20505 } 20506 /* Can we send it yet? */ 20507 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 20508 /* 20509 * Receiver window has not grown enough for 20510 * the segment to be put on the wire. 20511 */ 20512 return (NULL); 20513 } 20514 if (rsm->r_flags & RACK_ACKED) { 20515 /* 20516 * It has been sacked, lets move to the 20517 * next one if possible. 20518 */ 20519 rack->r_ctl.last_collapse_point = rsm->r_end; 20520 /* Are we done? */ 20521 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20522 rack->r_ctl.high_collapse_point)) { 20523 rack->r_collapse_point_valid = 0; 20524 return (NULL); 20525 } 20526 goto restart; 20527 } 20528 /* Now has it been long enough ? */ 20529 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 20530 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 20531 rack_log_collapse(rack, rsm->r_start, 20532 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 20533 thresh, __LINE__, 6, rsm->r_flags, rsm); 20534 return (rsm); 20535 } 20536 /* Not enough time */ 20537 rack_log_collapse(rack, rsm->r_start, 20538 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 20539 thresh, __LINE__, 7, rsm->r_flags, rsm); 20540 return (NULL); 20541 } 20542 20543 static void 20544 rack_credit_back_policer_idle_time(struct tcp_rack *rack, uint64_t idle_t, int line) 20545 { 20546 /* 20547 * We were idle some time (idle_t) and so our policer bucket 20548 * needs to grow. It can go no higher than policer_bucket_size. 20549 */ 20550 uint64_t len; 20551 20552 len = idle_t * rack->r_ctl.policer_bw; 20553 len /= HPTS_USEC_IN_SEC; 20554 rack->r_ctl.current_policer_bucket += (uint32_t)len; 20555 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.current_policer_bucket) { 20556 rack->r_ctl.current_policer_bucket = rack->r_ctl.policer_bucket_size; 20557 } 20558 if (rack_verbose_logging > 0) 20559 policer_detection_log(rack, (uint32_t)len, line, (uint32_t)idle_t, 0, 7); 20560 } 20561 20562 static inline void 20563 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 20564 { 20565 if ((rack->full_size_rxt == 0) && 20566 (rack->shape_rxt_to_pacing_min == 0) && 20567 (*len >= segsiz)) { 20568 *len = segsiz; 20569 } else if (rack->shape_rxt_to_pacing_min && 20570 rack->gp_ready) { 20571 /* We use pacing min as shaping len req */ 20572 uint32_t maxlen; 20573 20574 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20575 if (*len > maxlen) 20576 *len = maxlen; 20577 } else { 20578 /* 20579 * The else is full_size_rxt is on so send it all 20580 * note we do need to check this for exceeding 20581 * our max segment size due to the fact that 20582 * we do sometimes merge chunks together i.e. 20583 * we cannot just assume that we will never have 20584 * a chunk greater than pace_max_seg 20585 */ 20586 if (*len > pace_max_seg) 20587 *len = pace_max_seg; 20588 } 20589 } 20590 20591 static int 20592 rack_output(struct tcpcb *tp) 20593 { 20594 struct socket *so; 20595 uint32_t recwin; 20596 uint32_t sb_offset, s_moff = 0; 20597 int32_t len, error = 0; 20598 uint16_t flags; 20599 struct mbuf *m, *s_mb = NULL; 20600 struct mbuf *mb; 20601 uint32_t if_hw_tsomaxsegcount = 0; 20602 uint32_t if_hw_tsomaxsegsize; 20603 int32_t segsiz, minseg; 20604 long tot_len_this_send = 0; 20605 #ifdef INET 20606 struct ip *ip = NULL; 20607 #endif 20608 struct udphdr *udp = NULL; 20609 struct tcp_rack *rack; 20610 struct tcphdr *th; 20611 uint8_t pass = 0; 20612 uint8_t mark = 0; 20613 uint8_t check_done = 0; 20614 uint8_t wanted_cookie = 0; 20615 u_char opt[TCP_MAXOLEN]; 20616 unsigned ipoptlen, optlen, hdrlen, ulen=0; 20617 uint32_t rack_seq; 20618 20619 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20620 unsigned ipsec_optlen = 0; 20621 20622 #endif 20623 int32_t idle, sendalot; 20624 uint32_t tot_idle; 20625 int32_t sub_from_prr = 0; 20626 volatile int32_t sack_rxmit; 20627 struct rack_sendmap *rsm = NULL; 20628 int32_t tso, mtu; 20629 struct tcpopt to; 20630 int32_t slot = 0; 20631 int32_t sup_rack = 0; 20632 uint32_t cts, ms_cts, delayed, early; 20633 uint32_t add_flag = RACK_SENT_SP; 20634 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 20635 uint8_t doing_tlp = 0; 20636 uint32_t cwnd_to_use, pace_max_seg; 20637 int32_t do_a_prefetch = 0; 20638 int32_t prefetch_rsm = 0; 20639 int32_t orig_len = 0; 20640 struct timeval tv; 20641 int32_t prefetch_so_done = 0; 20642 struct tcp_log_buffer *lgb; 20643 struct inpcb *inp = tptoinpcb(tp); 20644 struct sockbuf *sb; 20645 uint64_t ts_val = 0; 20646 #ifdef TCP_ACCOUNTING 20647 uint64_t crtsc; 20648 #endif 20649 #ifdef INET6 20650 struct ip6_hdr *ip6 = NULL; 20651 int32_t isipv6; 20652 #endif 20653 bool hpts_calling, hw_tls = false; 20654 20655 NET_EPOCH_ASSERT(); 20656 INP_WLOCK_ASSERT(inp); 20657 20658 /* setup and take the cache hits here */ 20659 rack = (struct tcp_rack *)tp->t_fb_ptr; 20660 #ifdef TCP_ACCOUNTING 20661 sched_pin(); 20662 ts_val = get_cyclecount(); 20663 #endif 20664 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 20665 tp->t_flags2 &= ~TF2_HPTS_CALLS; 20666 #ifdef TCP_OFFLOAD 20667 if (tp->t_flags & TF_TOE) { 20668 #ifdef TCP_ACCOUNTING 20669 sched_unpin(); 20670 #endif 20671 return (tcp_offload_output(tp)); 20672 } 20673 #endif 20674 if (rack->rack_deferred_inited == 0) { 20675 /* 20676 * If we are the connecting socket we will 20677 * hit rack_init() when no sequence numbers 20678 * are setup. This makes it so we must defer 20679 * some initialization. Call that now. 20680 */ 20681 rack_deferred_init(tp, rack); 20682 } 20683 /* 20684 * For TFO connections in SYN_RECEIVED, only allow the initial 20685 * SYN|ACK and those sent by the retransmit timer. 20686 */ 20687 if ((tp->t_flags & TF_FASTOPEN) && 20688 (tp->t_state == TCPS_SYN_RECEIVED) && 20689 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 20690 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 20691 #ifdef TCP_ACCOUNTING 20692 sched_unpin(); 20693 #endif 20694 return (0); 20695 } 20696 #ifdef INET6 20697 if (rack->r_state) { 20698 /* Use the cache line loaded if possible */ 20699 isipv6 = rack->r_is_v6; 20700 } else { 20701 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 20702 } 20703 #endif 20704 early = 0; 20705 cts = tcp_get_usecs(&tv); 20706 ms_cts = tcp_tv_to_mssectick(&tv); 20707 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 20708 tcp_in_hpts(rack->rc_tp)) { 20709 /* 20710 * We are on the hpts for some timer but not hptsi output. 20711 * Remove from the hpts unconditionally. 20712 */ 20713 rack_timer_cancel(tp, rack, cts, __LINE__); 20714 } 20715 /* Are we pacing and late? */ 20716 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20717 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 20718 /* We are delayed */ 20719 delayed = cts - rack->r_ctl.rc_last_output_to; 20720 } else { 20721 delayed = 0; 20722 } 20723 /* Do the timers, which may override the pacer */ 20724 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 20725 int retval; 20726 20727 retval = rack_process_timers(tp, rack, cts, hpts_calling, 20728 &doing_tlp); 20729 if (retval != 0) { 20730 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 20731 #ifdef TCP_ACCOUNTING 20732 sched_unpin(); 20733 #endif 20734 /* 20735 * If timers want tcp_drop(), then pass error out, 20736 * otherwise suppress it. 20737 */ 20738 return (retval < 0 ? retval : 0); 20739 } 20740 } 20741 if (rack->rc_in_persist) { 20742 if (tcp_in_hpts(rack->rc_tp) == 0) { 20743 /* Timer is not running */ 20744 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20745 } 20746 #ifdef TCP_ACCOUNTING 20747 sched_unpin(); 20748 #endif 20749 return (0); 20750 } 20751 if ((rack->rc_ack_required == 1) && 20752 (rack->r_timer_override == 0)){ 20753 /* A timeout occurred and no ack has arrived */ 20754 if (tcp_in_hpts(rack->rc_tp) == 0) { 20755 /* Timer is not running */ 20756 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20757 } 20758 #ifdef TCP_ACCOUNTING 20759 sched_unpin(); 20760 #endif 20761 return (0); 20762 } 20763 if ((rack->r_timer_override) || 20764 (rack->rc_ack_can_sendout_data) || 20765 (delayed) || 20766 (tp->t_state < TCPS_ESTABLISHED)) { 20767 rack->rc_ack_can_sendout_data = 0; 20768 if (tcp_in_hpts(rack->rc_tp)) 20769 tcp_hpts_remove(rack->rc_tp); 20770 } else if (tcp_in_hpts(rack->rc_tp)) { 20771 /* 20772 * On the hpts you can't pass even if ACKNOW is on, we will 20773 * when the hpts fires. 20774 */ 20775 #ifdef TCP_ACCOUNTING 20776 crtsc = get_cyclecount(); 20777 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20778 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 20779 } 20780 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20781 tp->tcp_cnt_counters[SND_BLOCKED]++; 20782 } 20783 sched_unpin(); 20784 #endif 20785 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 20786 return (0); 20787 } 20788 /* Finish out both pacing early and late accounting */ 20789 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20790 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 20791 early = rack->r_ctl.rc_last_output_to - cts; 20792 } else 20793 early = 0; 20794 if (delayed && (rack->rc_always_pace == 1)) { 20795 rack->r_ctl.rc_agg_delayed += delayed; 20796 rack->r_late = 1; 20797 } else if (early && (rack->rc_always_pace == 1)) { 20798 rack->r_ctl.rc_agg_early += early; 20799 rack->r_early = 1; 20800 } else if (rack->rc_always_pace == 0) { 20801 /* Non-paced we are not late */ 20802 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 20803 rack->r_early = rack->r_late = 0; 20804 } 20805 /* Now that early/late accounting is done turn off the flag */ 20806 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 20807 rack->r_wanted_output = 0; 20808 rack->r_timer_override = 0; 20809 if ((tp->t_state != rack->r_state) && 20810 TCPS_HAVEESTABLISHED(tp->t_state)) { 20811 rack_set_state(tp, rack); 20812 } 20813 if ((rack->r_fast_output) && 20814 (doing_tlp == 0) && 20815 (tp->rcv_numsacks == 0)) { 20816 int ret; 20817 20818 error = 0; 20819 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 20820 if (ret >= 0) 20821 return(ret); 20822 else if (error) { 20823 inp = rack->rc_inp; 20824 so = inp->inp_socket; 20825 sb = &so->so_snd; 20826 goto nomore; 20827 } 20828 } 20829 inp = rack->rc_inp; 20830 /* 20831 * For TFO connections in SYN_SENT or SYN_RECEIVED, 20832 * only allow the initial SYN or SYN|ACK and those sent 20833 * by the retransmit timer. 20834 */ 20835 if ((tp->t_flags & TF_FASTOPEN) && 20836 ((tp->t_state == TCPS_SYN_RECEIVED) || 20837 (tp->t_state == TCPS_SYN_SENT)) && 20838 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 20839 (tp->t_rxtshift == 0)) { /* not a retransmit */ 20840 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20841 so = inp->inp_socket; 20842 sb = &so->so_snd; 20843 goto just_return_nolock; 20844 } 20845 /* 20846 * Determine length of data that should be transmitted, and flags 20847 * that will be used. If there is some data or critical controls 20848 * (SYN, RST) to send, then transmit; otherwise, investigate 20849 * further. 20850 */ 20851 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 20852 if (tp->t_idle_reduce) { 20853 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 20854 rack_cc_after_idle(rack, tp); 20855 } 20856 tp->t_flags &= ~TF_LASTIDLE; 20857 if (idle) { 20858 if (tp->t_flags & TF_MORETOCOME) { 20859 tp->t_flags |= TF_LASTIDLE; 20860 idle = 0; 20861 } 20862 } 20863 if ((tp->snd_una == tp->snd_max) && 20864 rack->r_ctl.rc_went_idle_time && 20865 (cts > rack->r_ctl.rc_went_idle_time)) { 20866 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 20867 if (tot_idle > rack_min_probertt_hold) { 20868 /* Count as a probe rtt */ 20869 if (rack->in_probe_rtt == 0) { 20870 rack->r_ctl.rc_lower_rtt_us_cts = cts; 20871 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 20872 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 20873 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 20874 } else { 20875 rack_exit_probertt(rack, cts); 20876 } 20877 } 20878 } 20879 if(rack->policer_detect_on) { 20880 /* 20881 * If we are doing policer detetion we at a minium 20882 * record the time but if possible add back to 20883 * the bucket based on the idle time. 20884 */ 20885 uint64_t idle_t, u64_cts; 20886 20887 segsiz = min(ctf_fixed_maxseg(tp), 20888 rack->r_ctl.rc_pace_min_segs); 20889 u64_cts = tcp_tv_to_lusectick(&tv); 20890 if ((rack->rc_policer_detected == 1) && 20891 (rack->r_ctl.policer_bucket_size > segsiz) && 20892 (rack->r_ctl.policer_bw > 0) && 20893 (u64_cts > rack->r_ctl.last_sendtime)) { 20894 /* We are being policed add back the time */ 20895 idle_t = u64_cts - rack->r_ctl.last_sendtime; 20896 rack_credit_back_policer_idle_time(rack, idle_t, __LINE__); 20897 } 20898 rack->r_ctl.last_sendtime = u64_cts; 20899 } 20900 if (rack_use_fsb && 20901 (rack->r_ctl.fsb.tcp_ip_hdr) && 20902 (rack->r_fsb_inited == 0) && 20903 (rack->r_state != TCPS_CLOSED)) 20904 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 20905 if (rack->rc_sendvars_notset == 1) { 20906 rack->r_ctl.idle_snd_una = tp->snd_una; 20907 rack->rc_sendvars_notset = 0; 20908 /* 20909 * Make sure any TCP timers (keep-alive) is not running. 20910 */ 20911 tcp_timer_stop(tp); 20912 } 20913 if ((rack->rack_no_prr == 1) && 20914 (rack->rc_always_pace == 0)) { 20915 /* 20916 * Sanity check before sending, if we have 20917 * no-pacing enabled and prr is turned off that 20918 * is a logistics error. Correct this by turnning 20919 * prr back on. A user *must* set some form of 20920 * pacing in order to turn PRR off. We do this 20921 * in the output path so that we can avoid socket 20922 * option ordering issues that would occur if we 20923 * tried to do it while setting rack_no_prr on. 20924 */ 20925 rack->rack_no_prr = 0; 20926 } 20927 if ((rack->pcm_enabled == 1) && 20928 (rack->pcm_needed == 0) && 20929 (tot_idle > 0)) { 20930 /* 20931 * We have been idle some micro seconds. We need 20932 * to factor this in to see if a PCM is needed. 20933 */ 20934 uint32_t rtts_idle, rnds; 20935 20936 if (tp->t_srtt) 20937 rtts_idle = tot_idle / tp->t_srtt; 20938 else 20939 rtts_idle = 0; 20940 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 20941 rack->r_ctl.pcm_idle_rounds += rtts_idle; 20942 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 20943 rack->pcm_needed = 1; 20944 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 20945 } 20946 } 20947 again: 20948 sendalot = 0; 20949 cts = tcp_get_usecs(&tv); 20950 ms_cts = tcp_tv_to_mssectick(&tv); 20951 tso = 0; 20952 mtu = 0; 20953 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20954 minseg = segsiz; 20955 if (rack->r_ctl.rc_pace_max_segs == 0) 20956 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 20957 else 20958 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 20959 if (TCPS_HAVEESTABLISHED(tp->t_state) && 20960 (rack->r_ctl.pcm_max_seg == 0)) { 20961 /* 20962 * We set in our first send so we know that the ctf_fixed_maxseg 20963 * has been fully set. If we do it in rack_init() we most likely 20964 * see 512 bytes so we end up at 5120, not desirable. 20965 */ 20966 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 20967 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 20968 /* 20969 * Assure our initial PCM probe is at least 10 MSS. 20970 */ 20971 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 20972 } 20973 } 20974 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 20975 uint32_t rw_avail, cwa; 20976 20977 if (tp->snd_wnd > ctf_outstanding(tp)) 20978 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 20979 else 20980 rw_avail = 0; 20981 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 20982 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20983 else 20984 cwa = 0; 20985 if ((cwa >= rack->r_ctl.pcm_max_seg) && 20986 (rw_avail > rack->r_ctl.pcm_max_seg)) { 20987 /* Raise up the max seg for this trip through */ 20988 pace_max_seg = rack->r_ctl.pcm_max_seg; 20989 /* Disable any fast output */ 20990 rack->r_fast_output = 0; 20991 } 20992 if (rack_verbose_logging) { 20993 rack_log_pcm(rack, 4, 20994 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 20995 } 20996 } 20997 sb_offset = tp->snd_max - tp->snd_una; 20998 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20999 flags = tcp_outflags[tp->t_state]; 21000 while (rack->rc_free_cnt < rack_free_cache) { 21001 rsm = rack_alloc(rack); 21002 if (rsm == NULL) { 21003 if (hpts_calling) 21004 /* Retry in a ms */ 21005 slot = (1 * HPTS_USEC_IN_MSEC); 21006 so = inp->inp_socket; 21007 sb = &so->so_snd; 21008 goto just_return_nolock; 21009 } 21010 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 21011 rack->rc_free_cnt++; 21012 rsm = NULL; 21013 } 21014 sack_rxmit = 0; 21015 len = 0; 21016 rsm = NULL; 21017 if (flags & TH_RST) { 21018 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 21019 so = inp->inp_socket; 21020 sb = &so->so_snd; 21021 goto send; 21022 } 21023 if (rack->r_ctl.rc_resend) { 21024 /* Retransmit timer */ 21025 rsm = rack->r_ctl.rc_resend; 21026 rack->r_ctl.rc_resend = NULL; 21027 len = rsm->r_end - rsm->r_start; 21028 sack_rxmit = 1; 21029 sendalot = 0; 21030 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21031 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21032 __func__, __LINE__, 21033 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21034 sb_offset = rsm->r_start - tp->snd_una; 21035 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21036 } else if (rack->r_collapse_point_valid && 21037 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 21038 /* 21039 * If an RSM is returned then enough time has passed 21040 * for us to retransmit it. Move up the collapse point, 21041 * since this rsm has its chance to retransmit now. 21042 */ 21043 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 21044 rack->r_ctl.last_collapse_point = rsm->r_end; 21045 /* Are we done? */ 21046 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 21047 rack->r_ctl.high_collapse_point)) 21048 rack->r_collapse_point_valid = 0; 21049 sack_rxmit = 1; 21050 /* We are not doing a TLP */ 21051 doing_tlp = 0; 21052 len = rsm->r_end - rsm->r_start; 21053 sb_offset = rsm->r_start - tp->snd_una; 21054 sendalot = 0; 21055 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21056 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 21057 /* We have a retransmit that takes precedence */ 21058 if ((!IN_FASTRECOVERY(tp->t_flags)) && 21059 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 21060 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 21061 /* Enter recovery if not induced by a time-out */ 21062 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 21063 } 21064 #ifdef INVARIANTS 21065 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 21066 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 21067 tp, rack, rsm, rsm->r_start, tp->snd_una); 21068 } 21069 #endif 21070 len = rsm->r_end - rsm->r_start; 21071 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21072 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21073 __func__, __LINE__, 21074 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21075 sb_offset = rsm->r_start - tp->snd_una; 21076 sendalot = 0; 21077 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21078 if (len > 0) { 21079 sack_rxmit = 1; 21080 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 21081 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 21082 min(len, segsiz)); 21083 } 21084 } else if (rack->r_ctl.rc_tlpsend) { 21085 /* Tail loss probe */ 21086 long cwin; 21087 long tlen; 21088 21089 /* 21090 * Check if we can do a TLP with a RACK'd packet 21091 * this can happen if we are not doing the rack 21092 * cheat and we skipped to a TLP and it 21093 * went off. 21094 */ 21095 rsm = rack->r_ctl.rc_tlpsend; 21096 /* We are doing a TLP make sure the flag is preent */ 21097 rsm->r_flags |= RACK_TLP; 21098 rack->r_ctl.rc_tlpsend = NULL; 21099 sack_rxmit = 1; 21100 tlen = rsm->r_end - rsm->r_start; 21101 if (tlen > segsiz) 21102 tlen = segsiz; 21103 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21104 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21105 __func__, __LINE__, 21106 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21107 sb_offset = rsm->r_start - tp->snd_una; 21108 cwin = min(tp->snd_wnd, tlen); 21109 len = cwin; 21110 } 21111 if (rack->r_must_retran && 21112 (doing_tlp == 0) && 21113 (SEQ_GT(tp->snd_max, tp->snd_una)) && 21114 (rsm == NULL)) { 21115 /* 21116 * There are two different ways that we 21117 * can get into this block: 21118 * a) This is a non-sack connection, we had a time-out 21119 * and thus r_must_retran was set and everything 21120 * left outstanding as been marked for retransmit. 21121 * b) The MTU of the path shrank, so that everything 21122 * was marked to be retransmitted with the smaller 21123 * mtu and r_must_retran was set. 21124 * 21125 * This means that we expect the sendmap (outstanding) 21126 * to all be marked must. We can use the tmap to 21127 * look at them. 21128 * 21129 */ 21130 int sendwin, flight; 21131 21132 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 21133 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 21134 if (flight >= sendwin) { 21135 /* 21136 * We can't send yet. 21137 */ 21138 so = inp->inp_socket; 21139 sb = &so->so_snd; 21140 goto just_return_nolock; 21141 } 21142 /* 21143 * This is the case a/b mentioned above. All 21144 * outstanding/not-acked should be marked. 21145 * We can use the tmap to find them. 21146 */ 21147 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 21148 if (rsm == NULL) { 21149 /* TSNH */ 21150 rack->r_must_retran = 0; 21151 rack->r_ctl.rc_out_at_rto = 0; 21152 so = inp->inp_socket; 21153 sb = &so->so_snd; 21154 goto just_return_nolock; 21155 } 21156 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 21157 /* 21158 * The first one does not have the flag, did we collapse 21159 * further up in our list? 21160 */ 21161 rack->r_must_retran = 0; 21162 rack->r_ctl.rc_out_at_rto = 0; 21163 rsm = NULL; 21164 sack_rxmit = 0; 21165 } else { 21166 sack_rxmit = 1; 21167 len = rsm->r_end - rsm->r_start; 21168 sb_offset = rsm->r_start - tp->snd_una; 21169 sendalot = 0; 21170 if ((rack->full_size_rxt == 0) && 21171 (rack->shape_rxt_to_pacing_min == 0) && 21172 (len >= segsiz)) 21173 len = segsiz; 21174 else if (rack->shape_rxt_to_pacing_min && 21175 rack->gp_ready) { 21176 /* We use pacing min as shaping len req */ 21177 uint32_t maxlen; 21178 21179 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 21180 if (len > maxlen) 21181 len = maxlen; 21182 } 21183 /* 21184 * Delay removing the flag RACK_MUST_RXT so 21185 * that the fastpath for retransmit will 21186 * work with this rsm. 21187 */ 21188 } 21189 } 21190 /* 21191 * Enforce a connection sendmap count limit if set 21192 * as long as we are not retransmiting. 21193 */ 21194 if ((rsm == NULL) && 21195 (V_tcp_map_entries_limit > 0) && 21196 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 21197 counter_u64_add(rack_to_alloc_limited, 1); 21198 if (!rack->alloc_limit_reported) { 21199 rack->alloc_limit_reported = 1; 21200 counter_u64_add(rack_alloc_limited_conns, 1); 21201 } 21202 so = inp->inp_socket; 21203 sb = &so->so_snd; 21204 goto just_return_nolock; 21205 } 21206 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 21207 /* we are retransmitting the fin */ 21208 len--; 21209 if (len) { 21210 /* 21211 * When retransmitting data do *not* include the 21212 * FIN. This could happen from a TLP probe. 21213 */ 21214 flags &= ~TH_FIN; 21215 } 21216 } 21217 if (rsm && rack->r_fsb_inited && 21218 rack_use_rsm_rfo && 21219 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 21220 int ret; 21221 21222 if ((rack->rc_policer_detected == 1) && 21223 (rack->r_ctl.policer_bucket_size > segsiz) && 21224 (rack->r_ctl.policer_bw > 0)) { 21225 /* Check to see if there is room */ 21226 if (rack->r_ctl.current_policer_bucket < len) { 21227 goto skip_fast_output; 21228 } 21229 } 21230 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 21231 if (ret == 0) 21232 return (0); 21233 } 21234 skip_fast_output: 21235 so = inp->inp_socket; 21236 sb = &so->so_snd; 21237 if (do_a_prefetch == 0) { 21238 kern_prefetch(sb, &do_a_prefetch); 21239 do_a_prefetch = 1; 21240 } 21241 #ifdef NETFLIX_SHARED_CWND 21242 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 21243 rack->rack_enable_scwnd) { 21244 /* We are doing cwnd sharing */ 21245 if (rack->gp_ready && 21246 (rack->rack_attempted_scwnd == 0) && 21247 (rack->r_ctl.rc_scw == NULL) && 21248 tp->t_lib) { 21249 /* The pcbid is in, lets make an attempt */ 21250 counter_u64_add(rack_try_scwnd, 1); 21251 rack->rack_attempted_scwnd = 1; 21252 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 21253 &rack->r_ctl.rc_scw_index, 21254 segsiz); 21255 } 21256 if (rack->r_ctl.rc_scw && 21257 (rack->rack_scwnd_is_idle == 1) && 21258 sbavail(&so->so_snd)) { 21259 /* we are no longer out of data */ 21260 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21261 rack->rack_scwnd_is_idle = 0; 21262 } 21263 if (rack->r_ctl.rc_scw) { 21264 /* First lets update and get the cwnd */ 21265 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 21266 rack->r_ctl.rc_scw_index, 21267 tp->snd_cwnd, tp->snd_wnd, segsiz); 21268 } 21269 } 21270 #endif 21271 /* 21272 * Get standard flags, and add SYN or FIN if requested by 'hidden' 21273 * state flags. 21274 */ 21275 if (tp->t_flags & TF_NEEDFIN) 21276 flags |= TH_FIN; 21277 if (tp->t_flags & TF_NEEDSYN) 21278 flags |= TH_SYN; 21279 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 21280 void *end_rsm; 21281 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 21282 if (end_rsm) 21283 kern_prefetch(end_rsm, &prefetch_rsm); 21284 prefetch_rsm = 1; 21285 } 21286 SOCKBUF_LOCK(sb); 21287 if ((sack_rxmit == 0) && 21288 (TCPS_HAVEESTABLISHED(tp->t_state) || 21289 (tp->t_flags & TF_FASTOPEN))) { 21290 /* 21291 * We are not retransmitting (sack_rxmit is 0) so we 21292 * are sending new data. This is always based on snd_max. 21293 * Now in theory snd_max may be equal to snd_una, if so 21294 * then nothing is outstanding and the offset would be 0. 21295 */ 21296 uint32_t avail; 21297 21298 avail = sbavail(sb); 21299 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 21300 sb_offset = tp->snd_max - tp->snd_una; 21301 else 21302 sb_offset = 0; 21303 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 21304 if (rack->r_ctl.rc_tlp_new_data) { 21305 /* TLP is forcing out new data */ 21306 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 21307 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 21308 } 21309 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 21310 if (tp->snd_wnd > sb_offset) 21311 len = tp->snd_wnd - sb_offset; 21312 else 21313 len = 0; 21314 } else { 21315 len = rack->r_ctl.rc_tlp_new_data; 21316 } 21317 rack->r_ctl.rc_tlp_new_data = 0; 21318 } else { 21319 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 21320 } 21321 if ((rack->r_ctl.crte == NULL) && 21322 IN_FASTRECOVERY(tp->t_flags) && 21323 (rack->full_size_rxt == 0) && 21324 (rack->shape_rxt_to_pacing_min == 0) && 21325 (len > segsiz)) { 21326 /* 21327 * For prr=off, we need to send only 1 MSS 21328 * at a time. We do this because another sack could 21329 * be arriving that causes us to send retransmits and 21330 * we don't want to be on a long pace due to a larger send 21331 * that keeps us from sending out the retransmit. 21332 */ 21333 len = segsiz; 21334 } else if (rack->shape_rxt_to_pacing_min && 21335 rack->gp_ready) { 21336 /* We use pacing min as shaping len req */ 21337 uint32_t maxlen; 21338 21339 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 21340 if (len > maxlen) 21341 len = maxlen; 21342 }/* The else is full_size_rxt is on so send it all */ 21343 } else { 21344 uint32_t outstanding; 21345 /* 21346 * We are inside of a Fast recovery episode, this 21347 * is caused by a SACK or 3 dup acks. At this point 21348 * we have sent all the retransmissions and we rely 21349 * on PRR to dictate what we will send in the form of 21350 * new data. 21351 */ 21352 21353 outstanding = tp->snd_max - tp->snd_una; 21354 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 21355 if (tp->snd_wnd > outstanding) { 21356 len = tp->snd_wnd - outstanding; 21357 /* Check to see if we have the data */ 21358 if ((sb_offset + len) > avail) { 21359 /* It does not all fit */ 21360 if (avail > sb_offset) 21361 len = avail - sb_offset; 21362 else 21363 len = 0; 21364 } 21365 } else { 21366 len = 0; 21367 } 21368 } else if (avail > sb_offset) { 21369 len = avail - sb_offset; 21370 } else { 21371 len = 0; 21372 } 21373 if (len > 0) { 21374 if (len > rack->r_ctl.rc_prr_sndcnt) { 21375 len = rack->r_ctl.rc_prr_sndcnt; 21376 } 21377 if (len > 0) { 21378 sub_from_prr = 1; 21379 } 21380 } 21381 if (len > segsiz) { 21382 /* 21383 * We should never send more than a MSS when 21384 * retransmitting or sending new data in prr 21385 * mode unless the override flag is on. Most 21386 * likely the PRR algorithm is not going to 21387 * let us send a lot as well :-) 21388 */ 21389 if (rack->r_ctl.rc_prr_sendalot == 0) { 21390 len = segsiz; 21391 } 21392 } else if (len < segsiz) { 21393 /* 21394 * Do we send any? The idea here is if the 21395 * send empty's the socket buffer we want to 21396 * do it. However if not then lets just wait 21397 * for our prr_sndcnt to get bigger. 21398 */ 21399 long leftinsb; 21400 21401 leftinsb = sbavail(sb) - sb_offset; 21402 if (leftinsb > len) { 21403 /* This send does not empty the sb */ 21404 len = 0; 21405 } 21406 } 21407 } 21408 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 21409 /* 21410 * If you have not established 21411 * and are not doing FAST OPEN 21412 * no data please. 21413 */ 21414 if ((sack_rxmit == 0) && 21415 !(tp->t_flags & TF_FASTOPEN)) { 21416 len = 0; 21417 sb_offset = 0; 21418 } 21419 } 21420 if (prefetch_so_done == 0) { 21421 kern_prefetch(so, &prefetch_so_done); 21422 prefetch_so_done = 1; 21423 } 21424 orig_len = len; 21425 if ((rack->rc_policer_detected == 1) && 21426 (rack->r_ctl.policer_bucket_size > segsiz) && 21427 (rack->r_ctl.policer_bw > 0) && 21428 (len > 0)) { 21429 /* 21430 * Ok we believe we have a policer watching 21431 * what we send, can we send len? If not can 21432 * we tune it down to a smaller value? 21433 */ 21434 uint32_t plen, buck_needs; 21435 21436 plen = rack_policer_check_send(rack, len, segsiz, &buck_needs); 21437 if (plen == 0) { 21438 /* 21439 * We are not allowed to send. How long 21440 * do we need to pace for i.e. how long 21441 * before len is available to send? 21442 */ 21443 uint64_t lentime; 21444 21445 lentime = buck_needs; 21446 lentime *= HPTS_USEC_IN_SEC; 21447 lentime /= rack->r_ctl.policer_bw; 21448 slot = (uint32_t)lentime; 21449 tot_len_this_send = 0; 21450 SOCKBUF_UNLOCK(sb); 21451 if (rack_verbose_logging > 0) 21452 policer_detection_log(rack, len, slot, buck_needs, 0, 12); 21453 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 21454 rack_log_type_just_return(rack, cts, 0, slot, hpts_calling, 0, cwnd_to_use); 21455 goto just_return_clean; 21456 } 21457 if (plen < len) { 21458 sendalot = 0; 21459 len = plen; 21460 } 21461 } 21462 /* 21463 * Lop off SYN bit if it has already been sent. However, if this is 21464 * SYN-SENT state and if segment contains data and if we don't know 21465 * that foreign host supports TAO, suppress sending segment. 21466 */ 21467 if ((flags & TH_SYN) && 21468 SEQ_GT(tp->snd_max, tp->snd_una) && 21469 ((sack_rxmit == 0) && 21470 (tp->t_rxtshift == 0))) { 21471 /* 21472 * When sending additional segments following a TFO SYN|ACK, 21473 * do not include the SYN bit. 21474 */ 21475 if ((tp->t_flags & TF_FASTOPEN) && 21476 (tp->t_state == TCPS_SYN_RECEIVED)) 21477 flags &= ~TH_SYN; 21478 } 21479 /* 21480 * Be careful not to send data and/or FIN on SYN segments. This 21481 * measure is needed to prevent interoperability problems with not 21482 * fully conformant TCP implementations. 21483 */ 21484 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 21485 len = 0; 21486 flags &= ~TH_FIN; 21487 } 21488 /* 21489 * On TFO sockets, ensure no data is sent in the following cases: 21490 * 21491 * - When retransmitting SYN|ACK on a passively-created socket 21492 * 21493 * - When retransmitting SYN on an actively created socket 21494 * 21495 * - When sending a zero-length cookie (cookie request) on an 21496 * actively created socket 21497 * 21498 * - When the socket is in the CLOSED state (RST is being sent) 21499 */ 21500 if ((tp->t_flags & TF_FASTOPEN) && 21501 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 21502 ((tp->t_state == TCPS_SYN_SENT) && 21503 (tp->t_tfo_client_cookie_len == 0)) || 21504 (flags & TH_RST))) { 21505 sack_rxmit = 0; 21506 len = 0; 21507 } 21508 /* Without fast-open there should never be data sent on a SYN */ 21509 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 21510 len = 0; 21511 } 21512 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 21513 /* We only send 1 MSS if we have a DSACK block */ 21514 add_flag |= RACK_SENT_W_DSACK; 21515 len = segsiz; 21516 } 21517 if (len <= 0) { 21518 /* 21519 * We have nothing to send, or the window shrank, or 21520 * is closed, do we need to go into persists? 21521 */ 21522 len = 0; 21523 if ((tp->snd_wnd == 0) && 21524 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21525 (tp->snd_una == tp->snd_max) && 21526 (sb_offset < (int)sbavail(sb))) { 21527 rack_enter_persist(tp, rack, cts, tp->snd_una); 21528 } 21529 } else if ((rsm == NULL) && 21530 (doing_tlp == 0) && 21531 (len < pace_max_seg)) { 21532 /* 21533 * We are not sending a maximum sized segment for 21534 * some reason. Should we not send anything (think 21535 * sws or persists)? 21536 */ 21537 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 21538 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21539 (len < minseg) && 21540 (len < (int)(sbavail(sb) - sb_offset))) { 21541 /* 21542 * Here the rwnd is less than 21543 * the minimum pacing size, this is not a retransmit, 21544 * we are established and 21545 * the send is not the last in the socket buffer 21546 * we send nothing, and we may enter persists 21547 * if nothing is outstanding. 21548 */ 21549 len = 0; 21550 if (tp->snd_max == tp->snd_una) { 21551 /* 21552 * Nothing out we can 21553 * go into persists. 21554 */ 21555 rack_enter_persist(tp, rack, cts, tp->snd_una); 21556 } 21557 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 21558 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 21559 (len < (int)(sbavail(sb) - sb_offset)) && 21560 (len < minseg)) { 21561 /* 21562 * Here we are not retransmitting, and 21563 * the cwnd is not so small that we could 21564 * not send at least a min size (rxt timer 21565 * not having gone off), We have 2 segments or 21566 * more already in flight, its not the tail end 21567 * of the socket buffer and the cwnd is blocking 21568 * us from sending out a minimum pacing segment size. 21569 * Lets not send anything. 21570 */ 21571 len = 0; 21572 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 21573 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 21574 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 21575 (len < (int)(sbavail(sb) - sb_offset)) && 21576 (TCPS_HAVEESTABLISHED(tp->t_state))) { 21577 /* 21578 * Here we have a send window but we have 21579 * filled it up and we can't send another pacing segment. 21580 * We also have in flight more than 2 segments 21581 * and we are not completing the sb i.e. we allow 21582 * the last bytes of the sb to go out even if 21583 * its not a full pacing segment. 21584 */ 21585 len = 0; 21586 } else if ((rack->r_ctl.crte != NULL) && 21587 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 21588 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 21589 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 21590 (len < (int)(sbavail(sb) - sb_offset))) { 21591 /* 21592 * Here we are doing hardware pacing, this is not a TLP, 21593 * we are not sending a pace max segment size, there is rwnd 21594 * room to send at least N pace_max_seg, the cwnd is greater 21595 * than or equal to a full pacing segments plus 4 mss and we have 2 or 21596 * more segments in flight and its not the tail of the socket buffer. 21597 * 21598 * We don't want to send instead we need to get more ack's in to 21599 * allow us to send a full pacing segment. Normally, if we are pacing 21600 * about the right speed, we should have finished our pacing 21601 * send as most of the acks have come back if we are at the 21602 * right rate. This is a bit fuzzy since return path delay 21603 * can delay the acks, which is why we want to make sure we 21604 * have cwnd space to have a bit more than a max pace segments in flight. 21605 * 21606 * If we have not gotten our acks back we are pacing at too high a 21607 * rate delaying will not hurt and will bring our GP estimate down by 21608 * injecting the delay. If we don't do this we will send 21609 * 2 MSS out in response to the acks being clocked in which 21610 * defeats the point of hw-pacing (i.e. to help us get 21611 * larger TSO's out). 21612 */ 21613 len = 0; 21614 } 21615 21616 } 21617 /* len will be >= 0 after this point. */ 21618 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21619 rack_sndbuf_autoscale(rack); 21620 /* 21621 * Decide if we can use TCP Segmentation Offloading (if supported by 21622 * hardware). 21623 * 21624 * TSO may only be used if we are in a pure bulk sending state. The 21625 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 21626 * options prevent using TSO. With TSO the TCP header is the same 21627 * (except for the sequence number) for all generated packets. This 21628 * makes it impossible to transmit any options which vary per 21629 * generated segment or packet. 21630 * 21631 * IPv4 handling has a clear separation of ip options and ip header 21632 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 21633 * the right thing below to provide length of just ip options and thus 21634 * checking for ipoptlen is enough to decide if ip options are present. 21635 */ 21636 ipoptlen = 0; 21637 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21638 /* 21639 * Pre-calculate here as we save another lookup into the darknesses 21640 * of IPsec that way and can actually decide if TSO is ok. 21641 */ 21642 #ifdef INET6 21643 if (isipv6 && IPSEC_ENABLED(ipv6)) 21644 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 21645 #ifdef INET 21646 else 21647 #endif 21648 #endif /* INET6 */ 21649 #ifdef INET 21650 if (IPSEC_ENABLED(ipv4)) 21651 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 21652 #endif /* INET */ 21653 #endif 21654 21655 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21656 ipoptlen += ipsec_optlen; 21657 #endif 21658 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 21659 (tp->t_port == 0) && 21660 ((tp->t_flags & TF_SIGNATURE) == 0) && 21661 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 21662 ipoptlen == 0) 21663 tso = 1; 21664 { 21665 uint32_t outstanding __unused; 21666 21667 outstanding = tp->snd_max - tp->snd_una; 21668 if (tp->t_flags & TF_SENTFIN) { 21669 /* 21670 * If we sent a fin, snd_max is 1 higher than 21671 * snd_una 21672 */ 21673 outstanding--; 21674 } 21675 if (sack_rxmit) { 21676 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 21677 flags &= ~TH_FIN; 21678 } 21679 } 21680 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 21681 (long)TCP_MAXWIN << tp->rcv_scale); 21682 21683 /* 21684 * Sender silly window avoidance. We transmit under the following 21685 * conditions when len is non-zero: 21686 * 21687 * - We have a full segment (or more with TSO) - This is the last 21688 * buffer in a write()/send() and we are either idle or running 21689 * NODELAY - we've timed out (e.g. persist timer) - we have more 21690 * then 1/2 the maximum send window's worth of data (receiver may be 21691 * limited the window size) - we need to retransmit 21692 */ 21693 if (len) { 21694 if (len >= segsiz) { 21695 goto send; 21696 } 21697 /* 21698 * NOTE! on localhost connections an 'ack' from the remote 21699 * end may occur synchronously with the output and cause us 21700 * to flush a buffer queued with moretocome. XXX 21701 * 21702 */ 21703 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 21704 (idle || (tp->t_flags & TF_NODELAY)) && 21705 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 21706 (tp->t_flags & TF_NOPUSH) == 0) { 21707 pass = 2; 21708 goto send; 21709 } 21710 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 21711 pass = 22; 21712 goto send; 21713 } 21714 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 21715 pass = 4; 21716 goto send; 21717 } 21718 if (sack_rxmit) { 21719 pass = 6; 21720 goto send; 21721 } 21722 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 21723 (ctf_outstanding(tp) < (segsiz * 2))) { 21724 /* 21725 * We have less than two MSS outstanding (delayed ack) 21726 * and our rwnd will not let us send a full sized 21727 * MSS. Lets go ahead and let this small segment 21728 * out because we want to try to have at least two 21729 * packets inflight to not be caught by delayed ack. 21730 */ 21731 pass = 12; 21732 goto send; 21733 } 21734 } 21735 /* 21736 * Sending of standalone window updates. 21737 * 21738 * Window updates are important when we close our window due to a 21739 * full socket buffer and are opening it again after the application 21740 * reads data from it. Once the window has opened again and the 21741 * remote end starts to send again the ACK clock takes over and 21742 * provides the most current window information. 21743 * 21744 * We must avoid the silly window syndrome whereas every read from 21745 * the receive buffer, no matter how small, causes a window update 21746 * to be sent. We also should avoid sending a flurry of window 21747 * updates when the socket buffer had queued a lot of data and the 21748 * application is doing small reads. 21749 * 21750 * Prevent a flurry of pointless window updates by only sending an 21751 * update when we can increase the advertized window by more than 21752 * 1/4th of the socket buffer capacity. When the buffer is getting 21753 * full or is very small be more aggressive and send an update 21754 * whenever we can increase by two mss sized segments. In all other 21755 * situations the ACK's to new incoming data will carry further 21756 * window increases. 21757 * 21758 * Don't send an independent window update if a delayed ACK is 21759 * pending (it will get piggy-backed on it) or the remote side 21760 * already has done a half-close and won't send more data. Skip 21761 * this if the connection is in T/TCP half-open state. 21762 */ 21763 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 21764 !(tp->t_flags & TF_DELACK) && 21765 !TCPS_HAVERCVDFIN(tp->t_state)) { 21766 /* 21767 * "adv" is the amount we could increase the window, taking 21768 * into account that we are limited by TCP_MAXWIN << 21769 * tp->rcv_scale. 21770 */ 21771 int32_t adv; 21772 int oldwin; 21773 21774 adv = recwin; 21775 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 21776 oldwin = (tp->rcv_adv - tp->rcv_nxt); 21777 if (adv > oldwin) 21778 adv -= oldwin; 21779 else { 21780 /* We can't increase the window */ 21781 adv = 0; 21782 } 21783 } else 21784 oldwin = 0; 21785 21786 /* 21787 * If the new window size ends up being the same as or less 21788 * than the old size when it is scaled, then don't force 21789 * a window update. 21790 */ 21791 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 21792 goto dontupdate; 21793 21794 if (adv >= (int32_t)(2 * segsiz) && 21795 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 21796 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 21797 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 21798 pass = 7; 21799 goto send; 21800 } 21801 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 21802 pass = 23; 21803 goto send; 21804 } 21805 } 21806 dontupdate: 21807 21808 /* 21809 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 21810 * is also a catch-all for the retransmit timer timeout case. 21811 */ 21812 if (tp->t_flags & TF_ACKNOW) { 21813 pass = 8; 21814 goto send; 21815 } 21816 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 21817 pass = 9; 21818 goto send; 21819 } 21820 /* 21821 * If our state indicates that FIN should be sent and we have not 21822 * yet done so, then we need to send. 21823 */ 21824 if ((flags & TH_FIN) && 21825 (tp->snd_max == tp->snd_una)) { 21826 pass = 11; 21827 goto send; 21828 } 21829 /* 21830 * No reason to send a segment, just return. 21831 */ 21832 just_return: 21833 SOCKBUF_UNLOCK(sb); 21834 just_return_nolock: 21835 { 21836 int app_limited = CTF_JR_SENT_DATA; 21837 21838 if ((tp->t_flags & TF_FASTOPEN) == 0 && 21839 (flags & TH_FIN) && 21840 (len == 0) && 21841 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 21842 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21843 /* 21844 * Ok less than or right at a MSS is 21845 * outstanding. The original FreeBSD stack would 21846 * have sent a FIN, which can speed things up for 21847 * a transactional application doing a MSG_WAITALL. 21848 * To speed things up since we do *not* send a FIN 21849 * if data is outstanding, we send a "challenge ack". 21850 * The idea behind that is instead of having to have 21851 * the peer wait for the delayed-ack timer to run off 21852 * we send an ack that makes the peer send us an ack. 21853 */ 21854 rack_send_ack_challange(rack); 21855 } 21856 if (tot_len_this_send > 0) { 21857 rack->r_ctl.fsb.recwin = recwin; 21858 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 21859 if ((error == 0) && 21860 (rack->rc_policer_detected == 0) && 21861 rack_use_rfo && 21862 ((flags & (TH_SYN|TH_FIN)) == 0) && 21863 (ipoptlen == 0) && 21864 (tp->rcv_numsacks == 0) && 21865 rack->r_fsb_inited && 21866 TCPS_HAVEESTABLISHED(tp->t_state) && 21867 ((IN_RECOVERY(tp->t_flags)) == 0) && 21868 (rack->r_must_retran == 0) && 21869 ((tp->t_flags & TF_NEEDFIN) == 0) && 21870 (len > 0) && (orig_len > 0) && 21871 (orig_len > len) && 21872 ((orig_len - len) >= segsiz) && 21873 ((optlen == 0) || 21874 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 21875 /* We can send at least one more MSS using our fsb */ 21876 rack_setup_fast_output(tp, rack, sb, len, orig_len, 21877 segsiz, pace_max_seg, hw_tls, flags); 21878 } else 21879 rack->r_fast_output = 0; 21880 rack_log_fsb(rack, tp, so, flags, 21881 ipoptlen, orig_len, len, 0, 21882 1, optlen, __LINE__, 1); 21883 /* Assure when we leave that snd_nxt will point to top */ 21884 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 21885 tp->snd_nxt = tp->snd_max; 21886 } else { 21887 int end_window = 0; 21888 uint32_t seq = tp->gput_ack; 21889 21890 rsm = tqhash_max(rack->r_ctl.tqh); 21891 if (rsm) { 21892 /* 21893 * Mark the last sent that we just-returned (hinting 21894 * that delayed ack may play a role in any rtt measurement). 21895 */ 21896 rsm->r_just_ret = 1; 21897 } 21898 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 21899 rack->r_ctl.rc_agg_delayed = 0; 21900 rack->r_early = 0; 21901 rack->r_late = 0; 21902 rack->r_ctl.rc_agg_early = 0; 21903 if ((ctf_outstanding(tp) + 21904 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 21905 minseg)) >= tp->snd_wnd) { 21906 /* We are limited by the rwnd */ 21907 app_limited = CTF_JR_RWND_LIMITED; 21908 if (IN_FASTRECOVERY(tp->t_flags)) 21909 rack->r_ctl.rc_prr_sndcnt = 0; 21910 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 21911 /* We are limited by whats available -- app limited */ 21912 app_limited = CTF_JR_APP_LIMITED; 21913 if (IN_FASTRECOVERY(tp->t_flags)) 21914 rack->r_ctl.rc_prr_sndcnt = 0; 21915 } else if ((idle == 0) && 21916 ((tp->t_flags & TF_NODELAY) == 0) && 21917 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 21918 (len < segsiz)) { 21919 /* 21920 * No delay is not on and the 21921 * user is sending less than 1MSS. This 21922 * brings out SWS avoidance so we 21923 * don't send. Another app-limited case. 21924 */ 21925 app_limited = CTF_JR_APP_LIMITED; 21926 } else if (tp->t_flags & TF_NOPUSH) { 21927 /* 21928 * The user has requested no push of 21929 * the last segment and we are 21930 * at the last segment. Another app 21931 * limited case. 21932 */ 21933 app_limited = CTF_JR_APP_LIMITED; 21934 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 21935 /* Its the cwnd */ 21936 app_limited = CTF_JR_CWND_LIMITED; 21937 } else if (IN_FASTRECOVERY(tp->t_flags) && 21938 (rack->rack_no_prr == 0) && 21939 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 21940 app_limited = CTF_JR_PRR; 21941 } else { 21942 /* Now why here are we not sending? */ 21943 #ifdef NOW 21944 #ifdef INVARIANTS 21945 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 21946 #endif 21947 #endif 21948 app_limited = CTF_JR_ASSESSING; 21949 } 21950 /* 21951 * App limited in some fashion, for our pacing GP 21952 * measurements we don't want any gap (even cwnd). 21953 * Close down the measurement window. 21954 */ 21955 if (rack_cwnd_block_ends_measure && 21956 ((app_limited == CTF_JR_CWND_LIMITED) || 21957 (app_limited == CTF_JR_PRR))) { 21958 /* 21959 * The reason we are not sending is 21960 * the cwnd (or prr). We have been configured 21961 * to end the measurement window in 21962 * this case. 21963 */ 21964 end_window = 1; 21965 } else if (rack_rwnd_block_ends_measure && 21966 (app_limited == CTF_JR_RWND_LIMITED)) { 21967 /* 21968 * We are rwnd limited and have been 21969 * configured to end the measurement 21970 * window in this case. 21971 */ 21972 end_window = 1; 21973 } else if (app_limited == CTF_JR_APP_LIMITED) { 21974 /* 21975 * A true application limited period, we have 21976 * ran out of data. 21977 */ 21978 end_window = 1; 21979 } else if (app_limited == CTF_JR_ASSESSING) { 21980 /* 21981 * In the assessing case we hit the end of 21982 * the if/else and had no known reason 21983 * This will panic us under invariants.. 21984 * 21985 * If we get this out in logs we need to 21986 * investagate which reason we missed. 21987 */ 21988 end_window = 1; 21989 } 21990 if (end_window) { 21991 uint8_t log = 0; 21992 21993 /* Adjust the Gput measurement */ 21994 if ((tp->t_flags & TF_GPUTINPROG) && 21995 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21996 tp->gput_ack = tp->snd_max; 21997 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21998 /* 21999 * There is not enough to measure. 22000 */ 22001 tp->t_flags &= ~TF_GPUTINPROG; 22002 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 22003 rack->r_ctl.rc_gp_srtt /*flex1*/, 22004 tp->gput_seq, 22005 0, 0, 18, __LINE__, NULL, 0); 22006 } else 22007 log = 1; 22008 } 22009 /* Mark the last packet has app limited */ 22010 rsm = tqhash_max(rack->r_ctl.tqh); 22011 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 22012 if (rack->r_ctl.rc_app_limited_cnt == 0) 22013 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 22014 else { 22015 /* 22016 * Go out to the end app limited and mark 22017 * this new one as next and move the end_appl up 22018 * to this guy. 22019 */ 22020 if (rack->r_ctl.rc_end_appl) 22021 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 22022 rack->r_ctl.rc_end_appl = rsm; 22023 } 22024 rsm->r_flags |= RACK_APP_LIMITED; 22025 rack->r_ctl.rc_app_limited_cnt++; 22026 } 22027 if (log) 22028 rack_log_pacing_delay_calc(rack, 22029 rack->r_ctl.rc_app_limited_cnt, seq, 22030 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 22031 } 22032 } 22033 /* Check if we need to go into persists or not */ 22034 if ((tp->snd_max == tp->snd_una) && 22035 TCPS_HAVEESTABLISHED(tp->t_state) && 22036 sbavail(sb) && 22037 (sbavail(sb) > tp->snd_wnd) && 22038 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 22039 /* Yes lets make sure to move to persist before timer-start */ 22040 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 22041 } 22042 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 22043 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 22044 } 22045 just_return_clean: 22046 #ifdef NETFLIX_SHARED_CWND 22047 if ((sbavail(sb) == 0) && 22048 rack->r_ctl.rc_scw) { 22049 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 22050 rack->rack_scwnd_is_idle = 1; 22051 } 22052 #endif 22053 #ifdef TCP_ACCOUNTING 22054 if (tot_len_this_send > 0) { 22055 crtsc = get_cyclecount(); 22056 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22057 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22058 } 22059 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22060 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 22061 } 22062 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22063 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 22064 } 22065 } else { 22066 crtsc = get_cyclecount(); 22067 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22068 tp->tcp_cnt_counters[SND_LIMITED]++; 22069 } 22070 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22071 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 22072 } 22073 } 22074 sched_unpin(); 22075 #endif 22076 return (0); 22077 22078 send: 22079 if ((rack->r_ctl.crte != NULL) && 22080 (rsm == NULL) && 22081 ((rack->rc_hw_nobuf == 1) || 22082 (rack_hw_check_queue && (check_done == 0)))) { 22083 /* 22084 * We only want to do this once with the hw_check_queue, 22085 * for the enobuf case we would only do it once if 22086 * we come around to again, the flag will be clear. 22087 */ 22088 check_done = 1; 22089 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 22090 if (slot) { 22091 rack->r_ctl.rc_agg_delayed = 0; 22092 rack->r_ctl.rc_agg_early = 0; 22093 rack->r_early = 0; 22094 rack->r_late = 0; 22095 SOCKBUF_UNLOCK(&so->so_snd); 22096 goto skip_all_send; 22097 } 22098 } 22099 if (rsm || sack_rxmit) 22100 counter_u64_add(rack_nfto_resend, 1); 22101 else 22102 counter_u64_add(rack_non_fto_send, 1); 22103 if ((flags & TH_FIN) && 22104 sbavail(sb)) { 22105 /* 22106 * We do not transmit a FIN 22107 * with data outstanding. We 22108 * need to make it so all data 22109 * is acked first. 22110 */ 22111 flags &= ~TH_FIN; 22112 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22113 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 22114 ((tp->snd_max - tp->snd_una) <= segsiz)) { 22115 /* 22116 * Ok less than or right at a MSS is 22117 * outstanding. The original FreeBSD stack would 22118 * have sent a FIN, which can speed things up for 22119 * a transactional application doing a MSG_WAITALL. 22120 * To speed things up since we do *not* send a FIN 22121 * if data is outstanding, we send a "challenge ack". 22122 * The idea behind that is instead of having to have 22123 * the peer wait for the delayed-ack timer to run off 22124 * we send an ack that makes the peer send us an ack. 22125 */ 22126 rack_send_ack_challange(rack); 22127 } 22128 } 22129 /* Enforce stack imposed max seg size if we have one */ 22130 if (pace_max_seg && 22131 (len > pace_max_seg)) { 22132 mark = 1; 22133 len = pace_max_seg; 22134 } 22135 if ((rsm == NULL) && 22136 (rack->pcm_in_progress == 0) && 22137 (rack->r_ctl.pcm_max_seg > 0) && 22138 (len >= rack->r_ctl.pcm_max_seg)) { 22139 /* It is large enough for a measurement */ 22140 add_flag |= RACK_IS_PCM; 22141 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 22142 } else if (rack_verbose_logging) { 22143 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 22144 } 22145 22146 SOCKBUF_LOCK_ASSERT(sb); 22147 if (len > 0) { 22148 if (len >= segsiz) 22149 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 22150 else 22151 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 22152 } 22153 /* 22154 * Before ESTABLISHED, force sending of initial options unless TCP 22155 * set not to do any options. NOTE: we assume that the IP/TCP header 22156 * plus TCP options always fit in a single mbuf, leaving room for a 22157 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 22158 * + optlen <= MCLBYTES 22159 */ 22160 optlen = 0; 22161 #ifdef INET6 22162 if (isipv6) 22163 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 22164 else 22165 #endif 22166 hdrlen = sizeof(struct tcpiphdr); 22167 22168 /* 22169 * Ok what seq are we sending from. If we have 22170 * no rsm to use, then we look at various bits, 22171 * if we are putting out a SYN it will be ISS. 22172 * If we are retransmitting a FIN it will 22173 * be snd_max-1 else its snd_max. 22174 */ 22175 if (rsm == NULL) { 22176 if (flags & TH_SYN) 22177 rack_seq = tp->iss; 22178 else if ((flags & TH_FIN) && 22179 (tp->t_flags & TF_SENTFIN)) 22180 rack_seq = tp->snd_max - 1; 22181 else 22182 rack_seq = tp->snd_max; 22183 } else { 22184 rack_seq = rsm->r_start; 22185 } 22186 /* 22187 * Compute options for segment. We only have to care about SYN and 22188 * established connection segments. Options for SYN-ACK segments 22189 * are handled in TCP syncache. 22190 */ 22191 to.to_flags = 0; 22192 if ((tp->t_flags & TF_NOOPT) == 0) { 22193 /* Maximum segment size. */ 22194 if (flags & TH_SYN) { 22195 to.to_mss = tcp_mssopt(&inp->inp_inc); 22196 if (tp->t_port) 22197 to.to_mss -= V_tcp_udp_tunneling_overhead; 22198 to.to_flags |= TOF_MSS; 22199 22200 /* 22201 * On SYN or SYN|ACK transmits on TFO connections, 22202 * only include the TFO option if it is not a 22203 * retransmit, as the presence of the TFO option may 22204 * have caused the original SYN or SYN|ACK to have 22205 * been dropped by a middlebox. 22206 */ 22207 if ((tp->t_flags & TF_FASTOPEN) && 22208 (tp->t_rxtshift == 0)) { 22209 if (tp->t_state == TCPS_SYN_RECEIVED) { 22210 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 22211 to.to_tfo_cookie = 22212 (u_int8_t *)&tp->t_tfo_cookie.server; 22213 to.to_flags |= TOF_FASTOPEN; 22214 wanted_cookie = 1; 22215 } else if (tp->t_state == TCPS_SYN_SENT) { 22216 to.to_tfo_len = 22217 tp->t_tfo_client_cookie_len; 22218 to.to_tfo_cookie = 22219 tp->t_tfo_cookie.client; 22220 to.to_flags |= TOF_FASTOPEN; 22221 wanted_cookie = 1; 22222 /* 22223 * If we wind up having more data to 22224 * send with the SYN than can fit in 22225 * one segment, don't send any more 22226 * until the SYN|ACK comes back from 22227 * the other end. 22228 */ 22229 sendalot = 0; 22230 } 22231 } 22232 } 22233 /* Window scaling. */ 22234 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 22235 to.to_wscale = tp->request_r_scale; 22236 to.to_flags |= TOF_SCALE; 22237 } 22238 /* Timestamps. */ 22239 if ((tp->t_flags & TF_RCVD_TSTMP) || 22240 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 22241 uint32_t ts_to_use; 22242 22243 if ((rack->r_rcvpath_rtt_up == 1) && 22244 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 22245 /* 22246 * When we are doing a rcv_rtt probe all 22247 * other timestamps use the next msec. This 22248 * is safe since our previous ack is in the 22249 * air and we will just have a few more 22250 * on the next ms. This assures that only 22251 * the one ack has the ms_cts that was on 22252 * our ack-probe. 22253 */ 22254 ts_to_use = ms_cts + 1; 22255 } else { 22256 ts_to_use = ms_cts; 22257 } 22258 to.to_tsval = ts_to_use + tp->ts_offset; 22259 to.to_tsecr = tp->ts_recent; 22260 to.to_flags |= TOF_TS; 22261 if ((len == 0) && 22262 (TCPS_HAVEESTABLISHED(tp->t_state)) && 22263 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 22264 (tp->snd_una == tp->snd_max) && 22265 (flags & TH_ACK) && 22266 (sbavail(sb) == 0) && 22267 (rack->r_ctl.current_round != 0) && 22268 ((flags & (TH_SYN|TH_FIN)) == 0) && 22269 (rack->r_rcvpath_rtt_up == 0)) { 22270 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 22271 rack->r_ctl.last_time_of_arm_rcv = cts; 22272 rack->r_rcvpath_rtt_up = 1; 22273 /* Subtract 1 from seq to force a response */ 22274 rack_seq--; 22275 } 22276 } 22277 /* Set receive buffer autosizing timestamp. */ 22278 if (tp->rfbuf_ts == 0 && 22279 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 22280 tp->rfbuf_ts = ms_cts; 22281 } 22282 /* Selective ACK's. */ 22283 if (tp->t_flags & TF_SACK_PERMIT) { 22284 if (flags & TH_SYN) 22285 to.to_flags |= TOF_SACKPERM; 22286 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 22287 tp->rcv_numsacks > 0) { 22288 to.to_flags |= TOF_SACK; 22289 to.to_nsacks = tp->rcv_numsacks; 22290 to.to_sacks = (u_char *)tp->sackblks; 22291 } 22292 } 22293 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 22294 /* TCP-MD5 (RFC2385). */ 22295 if (tp->t_flags & TF_SIGNATURE) 22296 to.to_flags |= TOF_SIGNATURE; 22297 #endif 22298 22299 /* Processing the options. */ 22300 hdrlen += optlen = tcp_addoptions(&to, opt); 22301 /* 22302 * If we wanted a TFO option to be added, but it was unable 22303 * to fit, ensure no data is sent. 22304 */ 22305 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 22306 !(to.to_flags & TOF_FASTOPEN)) 22307 len = 0; 22308 } 22309 if (tp->t_port) { 22310 if (V_tcp_udp_tunneling_port == 0) { 22311 /* The port was removed?? */ 22312 SOCKBUF_UNLOCK(&so->so_snd); 22313 #ifdef TCP_ACCOUNTING 22314 crtsc = get_cyclecount(); 22315 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22316 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22317 } 22318 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22319 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22320 } 22321 sched_unpin(); 22322 #endif 22323 return (EHOSTUNREACH); 22324 } 22325 hdrlen += sizeof(struct udphdr); 22326 } 22327 #ifdef INET6 22328 if (isipv6) 22329 ipoptlen = ip6_optlen(inp); 22330 else 22331 #endif 22332 if (inp->inp_options) 22333 ipoptlen = inp->inp_options->m_len - 22334 offsetof(struct ipoption, ipopt_list); 22335 else 22336 ipoptlen = 0; 22337 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22338 ipoptlen += ipsec_optlen; 22339 #endif 22340 22341 /* 22342 * Adjust data length if insertion of options will bump the packet 22343 * length beyond the t_maxseg length. Clear the FIN bit because we 22344 * cut off the tail of the segment. 22345 */ 22346 if (len + optlen + ipoptlen > tp->t_maxseg) { 22347 if (tso) { 22348 uint32_t if_hw_tsomax; 22349 uint32_t moff; 22350 int32_t max_len; 22351 22352 /* extract TSO information */ 22353 if_hw_tsomax = tp->t_tsomax; 22354 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 22355 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 22356 KASSERT(ipoptlen == 0, 22357 ("%s: TSO can't do IP options", __func__)); 22358 22359 /* 22360 * Check if we should limit by maximum payload 22361 * length: 22362 */ 22363 if (if_hw_tsomax != 0) { 22364 /* compute maximum TSO length */ 22365 max_len = (if_hw_tsomax - hdrlen - 22366 max_linkhdr); 22367 if (max_len <= 0) { 22368 len = 0; 22369 } else if (len > max_len) { 22370 sendalot = 1; 22371 len = max_len; 22372 mark = 2; 22373 } 22374 } 22375 /* 22376 * Prevent the last segment from being fractional 22377 * unless the send sockbuf can be emptied: 22378 */ 22379 max_len = (tp->t_maxseg - optlen); 22380 if ((sb_offset + len) < sbavail(sb)) { 22381 moff = len % (u_int)max_len; 22382 if (moff != 0) { 22383 mark = 3; 22384 len -= moff; 22385 } 22386 } 22387 /* 22388 * In case there are too many small fragments don't 22389 * use TSO: 22390 */ 22391 if (len <= max_len) { 22392 mark = 4; 22393 tso = 0; 22394 } 22395 /* 22396 * Send the FIN in a separate segment after the bulk 22397 * sending is done. We don't trust the TSO 22398 * implementations to clear the FIN flag on all but 22399 * the last segment. 22400 */ 22401 if (tp->t_flags & TF_NEEDFIN) { 22402 sendalot = 4; 22403 } 22404 } else { 22405 mark = 5; 22406 if (optlen + ipoptlen >= tp->t_maxseg) { 22407 /* 22408 * Since we don't have enough space to put 22409 * the IP header chain and the TCP header in 22410 * one packet as required by RFC 7112, don't 22411 * send it. Also ensure that at least one 22412 * byte of the payload can be put into the 22413 * TCP segment. 22414 */ 22415 SOCKBUF_UNLOCK(&so->so_snd); 22416 error = EMSGSIZE; 22417 sack_rxmit = 0; 22418 goto out; 22419 } 22420 len = tp->t_maxseg - optlen - ipoptlen; 22421 sendalot = 5; 22422 } 22423 } else { 22424 tso = 0; 22425 mark = 6; 22426 } 22427 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 22428 ("%s: len > IP_MAXPACKET", __func__)); 22429 #ifdef DIAGNOSTIC 22430 #ifdef INET6 22431 if (max_linkhdr + hdrlen > MCLBYTES) 22432 #else 22433 if (max_linkhdr + hdrlen > MHLEN) 22434 #endif 22435 panic("tcphdr too big"); 22436 #endif 22437 22438 /* 22439 * This KASSERT is here to catch edge cases at a well defined place. 22440 * Before, those had triggered (random) panic conditions further 22441 * down. 22442 */ 22443 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 22444 if ((len == 0) && 22445 (flags & TH_FIN) && 22446 (sbused(sb))) { 22447 /* 22448 * We have outstanding data, don't send a fin by itself!. 22449 * 22450 * Check to see if we need to send a challenge ack. 22451 */ 22452 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 22453 ((tp->snd_max - tp->snd_una) <= segsiz)) { 22454 /* 22455 * Ok less than or right at a MSS is 22456 * outstanding. The original FreeBSD stack would 22457 * have sent a FIN, which can speed things up for 22458 * a transactional application doing a MSG_WAITALL. 22459 * To speed things up since we do *not* send a FIN 22460 * if data is outstanding, we send a "challenge ack". 22461 * The idea behind that is instead of having to have 22462 * the peer wait for the delayed-ack timer to run off 22463 * we send an ack that makes the peer send us an ack. 22464 */ 22465 rack_send_ack_challange(rack); 22466 } 22467 goto just_return; 22468 } 22469 /* 22470 * Grab a header mbuf, attaching a copy of data to be transmitted, 22471 * and initialize the header from the template for sends on this 22472 * connection. 22473 */ 22474 hw_tls = tp->t_nic_ktls_xmit != 0; 22475 if (len) { 22476 uint32_t max_val; 22477 uint32_t moff; 22478 22479 if (pace_max_seg) 22480 max_val = pace_max_seg; 22481 else 22482 max_val = len; 22483 /* 22484 * We allow a limit on sending with hptsi. 22485 */ 22486 if (len > max_val) { 22487 mark = 7; 22488 len = max_val; 22489 } 22490 #ifdef INET6 22491 if (MHLEN < hdrlen + max_linkhdr) 22492 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 22493 else 22494 #endif 22495 m = m_gethdr(M_NOWAIT, MT_DATA); 22496 22497 if (m == NULL) { 22498 SOCKBUF_UNLOCK(sb); 22499 error = ENOBUFS; 22500 sack_rxmit = 0; 22501 goto out; 22502 } 22503 m->m_data += max_linkhdr; 22504 m->m_len = hdrlen; 22505 22506 /* 22507 * Start the m_copy functions from the closest mbuf to the 22508 * sb_offset in the socket buffer chain. 22509 */ 22510 mb = sbsndptr_noadv(sb, sb_offset, &moff); 22511 s_mb = mb; 22512 s_moff = moff; 22513 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 22514 m_copydata(mb, moff, (int)len, 22515 mtod(m, caddr_t)+hdrlen); 22516 /* 22517 * If we are not retransmitting advance the 22518 * sndptr to help remember the next place in 22519 * the sb. 22520 */ 22521 if (rsm == NULL) 22522 sbsndptr_adv(sb, mb, len); 22523 m->m_len += len; 22524 } else { 22525 struct sockbuf *msb; 22526 22527 /* 22528 * If we are not retransmitting pass in msb so 22529 * the socket buffer can be advanced. Otherwise 22530 * set it to NULL if its a retransmission since 22531 * we don't want to change the sb remembered 22532 * location. 22533 */ 22534 if (rsm == NULL) 22535 msb = sb; 22536 else 22537 msb = NULL; 22538 m->m_next = tcp_m_copym( 22539 mb, moff, &len, 22540 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 22541 ((rsm == NULL) ? hw_tls : 0) 22542 #ifdef NETFLIX_COPY_ARGS 22543 , &s_mb, &s_moff 22544 #endif 22545 ); 22546 if (len <= (tp->t_maxseg - optlen)) { 22547 /* 22548 * Must have ran out of mbufs for the copy 22549 * shorten it to no longer need tso. Lets 22550 * not put on sendalot since we are low on 22551 * mbufs. 22552 */ 22553 tso = 0; 22554 } 22555 if (m->m_next == NULL) { 22556 SOCKBUF_UNLOCK(sb); 22557 (void)m_free(m); 22558 error = ENOBUFS; 22559 sack_rxmit = 0; 22560 goto out; 22561 } 22562 } 22563 if (sack_rxmit) { 22564 if (rsm && (rsm->r_flags & RACK_TLP)) { 22565 /* 22566 * TLP should not count in retran count, but 22567 * in its own bin 22568 */ 22569 counter_u64_add(rack_tlp_retran, 1); 22570 counter_u64_add(rack_tlp_retran_bytes, len); 22571 } else { 22572 tp->t_sndrexmitpack++; 22573 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 22574 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 22575 } 22576 #ifdef STATS 22577 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 22578 len); 22579 #endif 22580 } else { 22581 KMOD_TCPSTAT_INC(tcps_sndpack); 22582 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 22583 #ifdef STATS 22584 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 22585 len); 22586 #endif 22587 } 22588 /* 22589 * If we're sending everything we've got, set PUSH. (This 22590 * will keep happy those implementations which only give 22591 * data to the user when a buffer fills or a PUSH comes in.) 22592 */ 22593 if (sb_offset + len == sbused(sb) && 22594 sbused(sb) && 22595 !(flags & TH_SYN)) { 22596 flags |= TH_PUSH; 22597 add_flag |= RACK_HAD_PUSH; 22598 } 22599 22600 SOCKBUF_UNLOCK(sb); 22601 } else { 22602 SOCKBUF_UNLOCK(sb); 22603 if (tp->t_flags & TF_ACKNOW) 22604 KMOD_TCPSTAT_INC(tcps_sndacks); 22605 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 22606 KMOD_TCPSTAT_INC(tcps_sndctrl); 22607 else 22608 KMOD_TCPSTAT_INC(tcps_sndwinup); 22609 22610 m = m_gethdr(M_NOWAIT, MT_DATA); 22611 if (m == NULL) { 22612 error = ENOBUFS; 22613 sack_rxmit = 0; 22614 goto out; 22615 } 22616 #ifdef INET6 22617 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 22618 MHLEN >= hdrlen) { 22619 M_ALIGN(m, hdrlen); 22620 } else 22621 #endif 22622 m->m_data += max_linkhdr; 22623 m->m_len = hdrlen; 22624 } 22625 SOCKBUF_UNLOCK_ASSERT(sb); 22626 m->m_pkthdr.rcvif = (struct ifnet *)0; 22627 #ifdef MAC 22628 mac_inpcb_create_mbuf(inp, m); 22629 #endif 22630 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 22631 #ifdef INET6 22632 if (isipv6) 22633 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 22634 else 22635 #endif /* INET6 */ 22636 #ifdef INET 22637 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 22638 #endif 22639 th = rack->r_ctl.fsb.th; 22640 udp = rack->r_ctl.fsb.udp; 22641 if (udp) { 22642 #ifdef INET6 22643 if (isipv6) 22644 ulen = hdrlen + len - sizeof(struct ip6_hdr); 22645 else 22646 #endif /* INET6 */ 22647 ulen = hdrlen + len - sizeof(struct ip); 22648 udp->uh_ulen = htons(ulen); 22649 } 22650 } else { 22651 #ifdef INET6 22652 if (isipv6) { 22653 ip6 = mtod(m, struct ip6_hdr *); 22654 if (tp->t_port) { 22655 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 22656 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 22657 udp->uh_dport = tp->t_port; 22658 ulen = hdrlen + len - sizeof(struct ip6_hdr); 22659 udp->uh_ulen = htons(ulen); 22660 th = (struct tcphdr *)(udp + 1); 22661 } else 22662 th = (struct tcphdr *)(ip6 + 1); 22663 tcpip_fillheaders(inp, tp->t_port, ip6, th); 22664 } else 22665 #endif /* INET6 */ 22666 { 22667 #ifdef INET 22668 ip = mtod(m, struct ip *); 22669 if (tp->t_port) { 22670 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 22671 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 22672 udp->uh_dport = tp->t_port; 22673 ulen = hdrlen + len - sizeof(struct ip); 22674 udp->uh_ulen = htons(ulen); 22675 th = (struct tcphdr *)(udp + 1); 22676 } else 22677 th = (struct tcphdr *)(ip + 1); 22678 tcpip_fillheaders(inp, tp->t_port, ip, th); 22679 #endif 22680 } 22681 } 22682 /* 22683 * If we are starting a connection, send ECN setup SYN packet. If we 22684 * are on a retransmit, we may resend those bits a number of times 22685 * as per RFC 3168. 22686 */ 22687 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 22688 flags |= tcp_ecn_output_syn_sent(tp); 22689 } 22690 /* Also handle parallel SYN for ECN */ 22691 if (TCPS_HAVERCVDSYN(tp->t_state) && 22692 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 22693 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 22694 if ((tp->t_state == TCPS_SYN_RECEIVED) && 22695 (tp->t_flags2 & TF2_ECN_SND_ECE)) 22696 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 22697 #ifdef INET6 22698 if (isipv6) { 22699 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 22700 ip6->ip6_flow |= htonl(ect << 20); 22701 } 22702 else 22703 #endif 22704 { 22705 #ifdef INET 22706 ip->ip_tos &= ~IPTOS_ECN_MASK; 22707 ip->ip_tos |= ect; 22708 #endif 22709 } 22710 } 22711 th->th_seq = htonl(rack_seq); 22712 th->th_ack = htonl(tp->rcv_nxt); 22713 tcp_set_flags(th, flags); 22714 /* 22715 * Calculate receive window. Don't shrink window, but avoid silly 22716 * window syndrome. 22717 * If a RST segment is sent, advertise a window of zero. 22718 */ 22719 if (flags & TH_RST) { 22720 recwin = 0; 22721 } else { 22722 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 22723 recwin < (long)segsiz) { 22724 recwin = 0; 22725 } 22726 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 22727 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 22728 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 22729 } 22730 22731 /* 22732 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 22733 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 22734 * handled in syncache. 22735 */ 22736 if (flags & TH_SYN) 22737 th->th_win = htons((u_short) 22738 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 22739 else { 22740 /* Avoid shrinking window with window scaling. */ 22741 recwin = roundup2(recwin, 1 << tp->rcv_scale); 22742 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 22743 } 22744 /* 22745 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 22746 * window. This may cause the remote transmitter to stall. This 22747 * flag tells soreceive() to disable delayed acknowledgements when 22748 * draining the buffer. This can occur if the receiver is 22749 * attempting to read more data than can be buffered prior to 22750 * transmitting on the connection. 22751 */ 22752 if (th->th_win == 0) { 22753 tp->t_sndzerowin++; 22754 tp->t_flags |= TF_RXWIN0SENT; 22755 } else 22756 tp->t_flags &= ~TF_RXWIN0SENT; 22757 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 22758 /* Now are we using fsb?, if so copy the template data to the mbuf */ 22759 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 22760 uint8_t *cpto; 22761 22762 cpto = mtod(m, uint8_t *); 22763 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 22764 /* 22765 * We have just copied in: 22766 * IP/IP6 22767 * <optional udphdr> 22768 * tcphdr (no options) 22769 * 22770 * We need to grab the correct pointers into the mbuf 22771 * for both the tcp header, and possibly the udp header (if tunneling). 22772 * We do this by using the offset in the copy buffer and adding it 22773 * to the mbuf base pointer (cpto). 22774 */ 22775 #ifdef INET6 22776 if (isipv6) 22777 ip6 = mtod(m, struct ip6_hdr *); 22778 else 22779 #endif /* INET6 */ 22780 #ifdef INET 22781 ip = mtod(m, struct ip *); 22782 #endif 22783 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 22784 /* If we have a udp header lets set it into the mbuf as well */ 22785 if (udp) 22786 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 22787 } 22788 if (optlen) { 22789 bcopy(opt, th + 1, optlen); 22790 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 22791 } 22792 /* 22793 * Put TCP length in extended header, and then checksum extended 22794 * header and data. 22795 */ 22796 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 22797 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 22798 if (to.to_flags & TOF_SIGNATURE) { 22799 /* 22800 * Calculate MD5 signature and put it into the place 22801 * determined before. 22802 * NOTE: since TCP options buffer doesn't point into 22803 * mbuf's data, calculate offset and use it. 22804 */ 22805 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 22806 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 22807 /* 22808 * Do not send segment if the calculation of MD5 22809 * digest has failed. 22810 */ 22811 goto out; 22812 } 22813 } 22814 #endif 22815 #ifdef INET6 22816 if (isipv6) { 22817 /* 22818 * ip6_plen is not need to be filled now, and will be filled 22819 * in ip6_output. 22820 */ 22821 if (tp->t_port) { 22822 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 22823 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 22824 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 22825 th->th_sum = htons(0); 22826 UDPSTAT_INC(udps_opackets); 22827 } else { 22828 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 22829 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 22830 th->th_sum = in6_cksum_pseudo(ip6, 22831 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 22832 0); 22833 } 22834 } 22835 #endif 22836 #if defined(INET6) && defined(INET) 22837 else 22838 #endif 22839 #ifdef INET 22840 { 22841 if (tp->t_port) { 22842 m->m_pkthdr.csum_flags = CSUM_UDP; 22843 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 22844 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 22845 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 22846 th->th_sum = htons(0); 22847 UDPSTAT_INC(udps_opackets); 22848 } else { 22849 m->m_pkthdr.csum_flags = CSUM_TCP; 22850 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 22851 th->th_sum = in_pseudo(ip->ip_src.s_addr, 22852 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 22853 IPPROTO_TCP + len + optlen)); 22854 } 22855 /* IP version must be set here for ipv4/ipv6 checking later */ 22856 KASSERT(ip->ip_v == IPVERSION, 22857 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 22858 } 22859 #endif 22860 /* 22861 * Enable TSO and specify the size of the segments. The TCP pseudo 22862 * header checksum is always provided. XXX: Fixme: This is currently 22863 * not the case for IPv6. 22864 */ 22865 if (tso) { 22866 /* 22867 * Here we must use t_maxseg and the optlen since 22868 * the optlen may include SACK's (or DSACK). 22869 */ 22870 KASSERT(len > tp->t_maxseg - optlen, 22871 ("%s: len <= tso_segsz", __func__)); 22872 m->m_pkthdr.csum_flags |= CSUM_TSO; 22873 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 22874 } 22875 KASSERT(len + hdrlen == m_length(m, NULL), 22876 ("%s: mbuf chain different than expected: %d + %u != %u", 22877 __func__, len, hdrlen, m_length(m, NULL))); 22878 22879 #ifdef TCP_HHOOK 22880 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 22881 hhook_run_tcp_est_out(tp, th, &to, len, tso); 22882 #endif 22883 if ((rack->r_ctl.crte != NULL) && 22884 (rack->rc_hw_nobuf == 0) && 22885 tcp_bblogging_on(tp)) { 22886 rack_log_queue_level(tp, rack, len, &tv, cts); 22887 } 22888 /* We're getting ready to send; log now. */ 22889 if (tcp_bblogging_on(rack->rc_tp)) { 22890 union tcp_log_stackspecific log; 22891 22892 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 22893 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 22894 if (rack->rack_no_prr) 22895 log.u_bbr.flex1 = 0; 22896 else 22897 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 22898 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 22899 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 22900 log.u_bbr.flex4 = orig_len; 22901 /* Save off the early/late values */ 22902 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 22903 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 22904 log.u_bbr.bw_inuse = rack_get_bw(rack); 22905 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 22906 log.u_bbr.flex8 = 0; 22907 if (rsm) { 22908 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 22909 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 22910 counter_u64_add(rack_collapsed_win_rxt, 1); 22911 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 22912 } 22913 if (doing_tlp) 22914 log.u_bbr.flex8 = 2; 22915 else 22916 log.u_bbr.flex8 = 1; 22917 } else { 22918 if (doing_tlp) 22919 log.u_bbr.flex8 = 3; 22920 } 22921 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 22922 log.u_bbr.flex7 = mark; 22923 log.u_bbr.flex7 <<= 8; 22924 log.u_bbr.flex7 |= pass; 22925 log.u_bbr.pkts_out = tp->t_maxseg; 22926 log.u_bbr.timeStamp = cts; 22927 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 22928 if (rsm && (rsm->r_rtr_cnt > 0)) { 22929 /* 22930 * When we have a retransmit we want to log the 22931 * burst at send and flight at send from before. 22932 */ 22933 log.u_bbr.flex5 = rsm->r_fas; 22934 log.u_bbr.bbr_substate = rsm->r_bas; 22935 } else { 22936 /* 22937 * New transmits we log in flex5 the inflight again as 22938 * well as the number of segments in our send in the 22939 * substate field. 22940 */ 22941 log.u_bbr.flex5 = log.u_bbr.inflight; 22942 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 22943 } 22944 log.u_bbr.lt_epoch = cwnd_to_use; 22945 log.u_bbr.delivered = sendalot; 22946 log.u_bbr.rttProp = (uintptr_t)rsm; 22947 log.u_bbr.pkt_epoch = __LINE__; 22948 if (rsm) { 22949 log.u_bbr.delRate = rsm->r_flags; 22950 log.u_bbr.delRate <<= 31; 22951 log.u_bbr.delRate |= rack->r_must_retran; 22952 log.u_bbr.delRate <<= 1; 22953 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22954 } else { 22955 log.u_bbr.delRate = rack->r_must_retran; 22956 log.u_bbr.delRate <<= 1; 22957 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22958 } 22959 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 22960 len, &log, false, NULL, __func__, __LINE__, &tv); 22961 } else 22962 lgb = NULL; 22963 22964 /* 22965 * Fill in IP length and desired time to live and send to IP level. 22966 * There should be a better way to handle ttl and tos; we could keep 22967 * them in the template, but need a way to checksum without them. 22968 */ 22969 /* 22970 * m->m_pkthdr.len should have been set before cksum calcuration, 22971 * because in6_cksum() need it. 22972 */ 22973 #ifdef INET6 22974 if (isipv6) { 22975 /* 22976 * we separately set hoplimit for every segment, since the 22977 * user might want to change the value via setsockopt. Also, 22978 * desired default hop limit might be changed via Neighbor 22979 * Discovery. 22980 */ 22981 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 22982 22983 /* 22984 * Set the packet size here for the benefit of DTrace 22985 * probes. ip6_output() will set it properly; it's supposed 22986 * to include the option header lengths as well. 22987 */ 22988 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 22989 22990 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 22991 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22992 else 22993 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22994 22995 if (tp->t_state == TCPS_SYN_SENT) 22996 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 22997 22998 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 22999 /* TODO: IPv6 IP6TOS_ECT bit on */ 23000 error = ip6_output(m, 23001 inp->in6p_outputopts, 23002 &inp->inp_route6, 23003 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 23004 NULL, NULL, inp); 23005 23006 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 23007 mtu = inp->inp_route6.ro_nh->nh_mtu; 23008 } 23009 #endif /* INET6 */ 23010 #if defined(INET) && defined(INET6) 23011 else 23012 #endif 23013 #ifdef INET 23014 { 23015 ip->ip_len = htons(m->m_pkthdr.len); 23016 #ifdef INET6 23017 if (inp->inp_vflag & INP_IPV6PROTO) 23018 ip->ip_ttl = in6_selecthlim(inp, NULL); 23019 #endif /* INET6 */ 23020 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 23021 /* 23022 * If we do path MTU discovery, then we set DF on every 23023 * packet. This might not be the best thing to do according 23024 * to RFC3390 Section 2. However the tcp hostcache migitates 23025 * the problem so it affects only the first tcp connection 23026 * with a host. 23027 * 23028 * NB: Don't set DF on small MTU/MSS to have a safe 23029 * fallback. 23030 */ 23031 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 23032 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 23033 if (tp->t_port == 0 || len < V_tcp_minmss) { 23034 ip->ip_off |= htons(IP_DF); 23035 } 23036 } else { 23037 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 23038 } 23039 23040 if (tp->t_state == TCPS_SYN_SENT) 23041 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 23042 23043 TCP_PROBE5(send, NULL, tp, ip, tp, th); 23044 23045 error = ip_output(m, 23046 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 23047 inp->inp_options, 23048 #else 23049 NULL, 23050 #endif 23051 &inp->inp_route, 23052 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 23053 inp); 23054 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 23055 mtu = inp->inp_route.ro_nh->nh_mtu; 23056 } 23057 #endif /* INET */ 23058 if (lgb) { 23059 lgb->tlb_errno = error; 23060 lgb = NULL; 23061 } 23062 23063 out: 23064 /* 23065 * In transmit state, time the transmission and arrange for the 23066 * retransmit. In persist state, just set snd_max. 23067 */ 23068 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 23069 rack_to_usec_ts(&tv), 23070 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 23071 if (error == 0) { 23072 if (add_flag & RACK_IS_PCM) { 23073 /* We just launched a PCM */ 23074 /* rrs here log */ 23075 rack->pcm_in_progress = 1; 23076 rack->pcm_needed = 0; 23077 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 23078 } 23079 if (rsm == NULL) { 23080 if (rack->lt_bw_up == 0) { 23081 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 23082 rack->r_ctl.lt_seq = tp->snd_una; 23083 rack->lt_bw_up = 1; 23084 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 23085 /* 23086 * Need to record what we have since we are 23087 * approaching seq wrap. 23088 */ 23089 uint64_t tmark; 23090 23091 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 23092 rack->r_ctl.lt_seq = tp->snd_una; 23093 tmark = tcp_get_u64_usecs(&tv); 23094 if (tmark > rack->r_ctl.lt_timemark) { 23095 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 23096 rack->r_ctl.lt_timemark = tmark; 23097 } 23098 } 23099 } 23100 rack->forced_ack = 0; /* If we send something zap the FA flag */ 23101 counter_u64_add(rack_total_bytes, len); 23102 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 23103 if (rsm && doing_tlp) { 23104 rack->rc_last_sent_tlp_past_cumack = 0; 23105 rack->rc_last_sent_tlp_seq_valid = 1; 23106 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 23107 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 23108 } 23109 if (rack->rc_hw_nobuf) { 23110 rack->rc_hw_nobuf = 0; 23111 rack->r_ctl.rc_agg_delayed = 0; 23112 rack->r_early = 0; 23113 rack->r_late = 0; 23114 rack->r_ctl.rc_agg_early = 0; 23115 } 23116 if (rsm && (doing_tlp == 0)) { 23117 /* Set we retransmitted */ 23118 rack->rc_gp_saw_rec = 1; 23119 } else { 23120 if (cwnd_to_use > tp->snd_ssthresh) { 23121 /* Set we sent in CA */ 23122 rack->rc_gp_saw_ca = 1; 23123 } else { 23124 /* Set we sent in SS */ 23125 rack->rc_gp_saw_ss = 1; 23126 } 23127 } 23128 if (TCPS_HAVEESTABLISHED(tp->t_state) && 23129 (tp->t_flags & TF_SACK_PERMIT) && 23130 tp->rcv_numsacks > 0) 23131 tcp_clean_dsack_blocks(tp); 23132 tot_len_this_send += len; 23133 if (len == 0) { 23134 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 23135 } else { 23136 int idx; 23137 23138 idx = (len / segsiz) + 3; 23139 if (idx >= TCP_MSS_ACCT_ATIMER) 23140 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 23141 else 23142 counter_u64_add(rack_out_size[idx], 1); 23143 } 23144 } 23145 if ((rack->rack_no_prr == 0) && 23146 sub_from_prr && 23147 (error == 0)) { 23148 if (rack->r_ctl.rc_prr_sndcnt >= len) 23149 rack->r_ctl.rc_prr_sndcnt -= len; 23150 else 23151 rack->r_ctl.rc_prr_sndcnt = 0; 23152 } 23153 sub_from_prr = 0; 23154 if (doing_tlp) { 23155 /* Make sure the TLP is added */ 23156 add_flag |= RACK_TLP; 23157 } else if (rsm) { 23158 /* If its a resend without TLP then it must not have the flag */ 23159 rsm->r_flags &= ~RACK_TLP; 23160 } 23161 23162 23163 if ((error == 0) && 23164 (len > 0) && 23165 (tp->snd_una == tp->snd_max)) 23166 rack->r_ctl.rc_tlp_rxt_last_time = cts; 23167 23168 { 23169 /* 23170 * This block is not associated with the above error == 0 test. 23171 * It is used to advance snd_max if we have a new transmit. 23172 */ 23173 tcp_seq startseq = tp->snd_max; 23174 23175 23176 if (rsm && (doing_tlp == 0)) 23177 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 23178 if (error) 23179 /* We don't log or do anything with errors */ 23180 goto nomore; 23181 if (doing_tlp == 0) { 23182 if (rsm == NULL) { 23183 /* 23184 * Not a retransmission of some 23185 * sort, new data is going out so 23186 * clear our TLP count and flag. 23187 */ 23188 rack->rc_tlp_in_progress = 0; 23189 rack->r_ctl.rc_tlp_cnt_out = 0; 23190 } 23191 } else { 23192 /* 23193 * We have just sent a TLP, mark that it is true 23194 * and make sure our in progress is set so we 23195 * continue to check the count. 23196 */ 23197 rack->rc_tlp_in_progress = 1; 23198 rack->r_ctl.rc_tlp_cnt_out++; 23199 } 23200 /* 23201 * If we are retransmitting we are done, snd_max 23202 * does not get updated. 23203 */ 23204 if (sack_rxmit) 23205 goto nomore; 23206 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 23207 /* 23208 * Update the time we just added data since 23209 * nothing was outstanding. 23210 */ 23211 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 23212 tp->t_acktime = ticks; 23213 } 23214 /* 23215 * Now for special SYN/FIN handling. 23216 */ 23217 if (flags & (TH_SYN | TH_FIN)) { 23218 if ((flags & TH_SYN) && 23219 ((tp->t_flags & TF_SENTSYN) == 0)) { 23220 tp->snd_max++; 23221 tp->t_flags |= TF_SENTSYN; 23222 } 23223 if ((flags & TH_FIN) && 23224 ((tp->t_flags & TF_SENTFIN) == 0)) { 23225 tp->snd_max++; 23226 tp->t_flags |= TF_SENTFIN; 23227 } 23228 } 23229 tp->snd_max += len; 23230 if (rack->rc_new_rnd_needed) { 23231 rack_new_round_starts(tp, rack, tp->snd_max); 23232 } 23233 /* 23234 * Time this transmission if not a retransmission and 23235 * not currently timing anything. 23236 * This is only relevant in case of switching back to 23237 * the base stack. 23238 */ 23239 if (tp->t_rtttime == 0) { 23240 tp->t_rtttime = ticks; 23241 tp->t_rtseq = startseq; 23242 KMOD_TCPSTAT_INC(tcps_segstimed); 23243 } 23244 if (len && 23245 ((tp->t_flags & TF_GPUTINPROG) == 0)) 23246 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 23247 /* 23248 * If we are doing FO we need to update the mbuf position and subtract 23249 * this happens when the peer sends us duplicate information and 23250 * we thus want to send a DSACK. 23251 * 23252 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 23253 * turned off? If not then we are going to echo multiple DSACK blocks 23254 * out (with the TSO), which we should not be doing. 23255 */ 23256 if (rack->r_fast_output && len) { 23257 if (rack->r_ctl.fsb.left_to_send > len) 23258 rack->r_ctl.fsb.left_to_send -= len; 23259 else 23260 rack->r_ctl.fsb.left_to_send = 0; 23261 if (rack->r_ctl.fsb.left_to_send < segsiz) 23262 rack->r_fast_output = 0; 23263 if (rack->r_fast_output) { 23264 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 23265 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 23266 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 23267 } 23268 } 23269 if (rack_pcm_blast == 0) { 23270 if ((orig_len > len) && 23271 (add_flag & RACK_IS_PCM) && 23272 (len < pace_max_seg) && 23273 ((pace_max_seg - len) > segsiz)) { 23274 /* 23275 * We are doing a PCM measurement and we did 23276 * not get enough data in the TSO to meet the 23277 * burst requirement. 23278 */ 23279 uint32_t n_len; 23280 23281 n_len = (orig_len - len); 23282 orig_len -= len; 23283 pace_max_seg -= len; 23284 len = n_len; 23285 sb_offset = tp->snd_max - tp->snd_una; 23286 /* Re-lock for the next spin */ 23287 SOCKBUF_LOCK(sb); 23288 goto send; 23289 } 23290 } else { 23291 if ((orig_len > len) && 23292 (add_flag & RACK_IS_PCM) && 23293 ((orig_len - len) > segsiz)) { 23294 /* 23295 * We are doing a PCM measurement and we did 23296 * not get enough data in the TSO to meet the 23297 * burst requirement. 23298 */ 23299 uint32_t n_len; 23300 23301 n_len = (orig_len - len); 23302 orig_len -= len; 23303 len = n_len; 23304 sb_offset = tp->snd_max - tp->snd_una; 23305 /* Re-lock for the next spin */ 23306 SOCKBUF_LOCK(sb); 23307 goto send; 23308 } 23309 } 23310 } 23311 nomore: 23312 if (error) { 23313 rack->r_ctl.rc_agg_delayed = 0; 23314 rack->r_early = 0; 23315 rack->r_late = 0; 23316 rack->r_ctl.rc_agg_early = 0; 23317 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 23318 /* 23319 * Failures do not advance the seq counter above. For the 23320 * case of ENOBUFS we will fall out and retry in 1ms with 23321 * the hpts. Everything else will just have to retransmit 23322 * with the timer. 23323 * 23324 * In any case, we do not want to loop around for another 23325 * send without a good reason. 23326 */ 23327 sendalot = 0; 23328 switch (error) { 23329 case EPERM: 23330 case EACCES: 23331 tp->t_softerror = error; 23332 #ifdef TCP_ACCOUNTING 23333 crtsc = get_cyclecount(); 23334 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23335 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 23336 } 23337 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23338 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 23339 } 23340 sched_unpin(); 23341 #endif 23342 return (error); 23343 case ENOBUFS: 23344 /* 23345 * Pace us right away to retry in a some 23346 * time 23347 */ 23348 if (rack->r_ctl.crte != NULL) { 23349 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 23350 if (tcp_bblogging_on(rack->rc_tp)) 23351 rack_log_queue_level(tp, rack, len, &tv, cts); 23352 } else 23353 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 23354 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 23355 if (rack->rc_enobuf < 0x7f) 23356 rack->rc_enobuf++; 23357 if (slot < (10 * HPTS_USEC_IN_MSEC)) 23358 slot = 10 * HPTS_USEC_IN_MSEC; 23359 if (rack->r_ctl.crte != NULL) { 23360 counter_u64_add(rack_saw_enobuf_hw, 1); 23361 tcp_rl_log_enobuf(rack->r_ctl.crte); 23362 } 23363 counter_u64_add(rack_saw_enobuf, 1); 23364 goto enobufs; 23365 case EMSGSIZE: 23366 /* 23367 * For some reason the interface we used initially 23368 * to send segments changed to another or lowered 23369 * its MTU. If TSO was active we either got an 23370 * interface without TSO capabilits or TSO was 23371 * turned off. If we obtained mtu from ip_output() 23372 * then update it and try again. 23373 */ 23374 if (tso) 23375 tp->t_flags &= ~TF_TSO; 23376 if (mtu != 0) { 23377 int saved_mtu; 23378 23379 saved_mtu = tp->t_maxseg; 23380 tcp_mss_update(tp, -1, mtu, NULL, NULL); 23381 if (saved_mtu > tp->t_maxseg) { 23382 goto again; 23383 } 23384 } 23385 slot = 10 * HPTS_USEC_IN_MSEC; 23386 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 23387 #ifdef TCP_ACCOUNTING 23388 crtsc = get_cyclecount(); 23389 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23390 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 23391 } 23392 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23393 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 23394 } 23395 sched_unpin(); 23396 #endif 23397 return (error); 23398 case ENETUNREACH: 23399 counter_u64_add(rack_saw_enetunreach, 1); 23400 case EHOSTDOWN: 23401 case EHOSTUNREACH: 23402 case ENETDOWN: 23403 if (TCPS_HAVERCVDSYN(tp->t_state)) { 23404 tp->t_softerror = error; 23405 } 23406 /* FALLTHROUGH */ 23407 default: 23408 slot = 10 * HPTS_USEC_IN_MSEC; 23409 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 23410 #ifdef TCP_ACCOUNTING 23411 crtsc = get_cyclecount(); 23412 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23413 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 23414 } 23415 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23416 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 23417 } 23418 sched_unpin(); 23419 #endif 23420 return (error); 23421 } 23422 } else { 23423 rack->rc_enobuf = 0; 23424 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 23425 rack->r_ctl.retran_during_recovery += len; 23426 } 23427 KMOD_TCPSTAT_INC(tcps_sndtotal); 23428 23429 /* 23430 * Data sent (as far as we can tell). If this advertises a larger 23431 * window than any other segment, then remember the size of the 23432 * advertised window. Any pending ACK has now been sent. 23433 */ 23434 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 23435 tp->rcv_adv = tp->rcv_nxt + recwin; 23436 23437 tp->last_ack_sent = tp->rcv_nxt; 23438 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 23439 enobufs: 23440 if (sendalot) { 23441 /* Do we need to turn off sendalot? */ 23442 if (pace_max_seg && 23443 (tot_len_this_send >= pace_max_seg)) { 23444 /* We hit our max. */ 23445 sendalot = 0; 23446 } 23447 } 23448 if ((error == 0) && (flags & TH_FIN)) 23449 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 23450 if (flags & TH_RST) { 23451 /* 23452 * We don't send again after sending a RST. 23453 */ 23454 slot = 0; 23455 sendalot = 0; 23456 if (error == 0) 23457 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 23458 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 23459 /* 23460 * Get our pacing rate, if an error 23461 * occurred in sending (ENOBUF) we would 23462 * hit the else if with slot preset. Other 23463 * errors return. 23464 */ 23465 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 23466 } 23467 /* We have sent clear the flag */ 23468 rack->r_ent_rec_ns = 0; 23469 if (rack->r_must_retran) { 23470 if (rsm) { 23471 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 23472 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 23473 /* 23474 * We have retransmitted all. 23475 */ 23476 rack->r_must_retran = 0; 23477 rack->r_ctl.rc_out_at_rto = 0; 23478 } 23479 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 23480 /* 23481 * Sending new data will also kill 23482 * the loop. 23483 */ 23484 rack->r_must_retran = 0; 23485 rack->r_ctl.rc_out_at_rto = 0; 23486 } 23487 } 23488 rack->r_ctl.fsb.recwin = recwin; 23489 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 23490 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 23491 /* 23492 * We hit an RTO and now have past snd_max at the RTO 23493 * clear all the WAS flags. 23494 */ 23495 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 23496 } 23497 if (slot) { 23498 /* set the rack tcb into the slot N */ 23499 if ((error == 0) && 23500 rack_use_rfo && 23501 ((flags & (TH_SYN|TH_FIN)) == 0) && 23502 (rsm == NULL) && 23503 (ipoptlen == 0) && 23504 (tp->rcv_numsacks == 0) && 23505 (rack->rc_policer_detected == 0) && 23506 rack->r_fsb_inited && 23507 TCPS_HAVEESTABLISHED(tp->t_state) && 23508 ((IN_RECOVERY(tp->t_flags)) == 0) && 23509 (rack->r_must_retran == 0) && 23510 ((tp->t_flags & TF_NEEDFIN) == 0) && 23511 (len > 0) && (orig_len > 0) && 23512 (orig_len > len) && 23513 ((orig_len - len) >= segsiz) && 23514 ((optlen == 0) || 23515 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 23516 /* We can send at least one more MSS using our fsb */ 23517 rack_setup_fast_output(tp, rack, sb, len, orig_len, 23518 segsiz, pace_max_seg, hw_tls, flags); 23519 } else 23520 rack->r_fast_output = 0; 23521 rack_log_fsb(rack, tp, so, flags, 23522 ipoptlen, orig_len, len, error, 23523 (rsm == NULL), optlen, __LINE__, 2); 23524 } else if (sendalot) { 23525 int ret; 23526 23527 sack_rxmit = 0; 23528 if ((error == 0) && 23529 rack_use_rfo && 23530 ((flags & (TH_SYN|TH_FIN)) == 0) && 23531 (rsm == NULL) && 23532 (ipoptlen == 0) && 23533 (tp->rcv_numsacks == 0) && 23534 (rack->r_must_retran == 0) && 23535 rack->r_fsb_inited && 23536 TCPS_HAVEESTABLISHED(tp->t_state) && 23537 ((IN_RECOVERY(tp->t_flags)) == 0) && 23538 ((tp->t_flags & TF_NEEDFIN) == 0) && 23539 (len > 0) && (orig_len > 0) && 23540 (orig_len > len) && 23541 ((orig_len - len) >= segsiz) && 23542 ((optlen == 0) || 23543 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 23544 /* we can use fast_output for more */ 23545 rack_setup_fast_output(tp, rack, sb, len, orig_len, 23546 segsiz, pace_max_seg, hw_tls, flags); 23547 if (rack->r_fast_output) { 23548 error = 0; 23549 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 23550 if (ret >= 0) 23551 return (ret); 23552 else if (error) 23553 goto nomore; 23554 23555 } 23556 } 23557 goto again; 23558 } 23559 skip_all_send: 23560 /* Assure when we leave that snd_nxt will point to top */ 23561 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 23562 tp->snd_nxt = tp->snd_max; 23563 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 23564 #ifdef TCP_ACCOUNTING 23565 crtsc = get_cyclecount() - ts_val; 23566 if (tot_len_this_send) { 23567 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23568 tp->tcp_cnt_counters[SND_OUT_DATA]++; 23569 } 23570 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23571 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 23572 } 23573 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23574 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 23575 } 23576 } else { 23577 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23578 tp->tcp_cnt_counters[SND_OUT_ACK]++; 23579 } 23580 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23581 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 23582 } 23583 } 23584 sched_unpin(); 23585 #endif 23586 if (error == ENOBUFS) 23587 error = 0; 23588 return (error); 23589 } 23590 23591 static void 23592 rack_update_seg(struct tcp_rack *rack) 23593 { 23594 uint32_t orig_val; 23595 23596 orig_val = rack->r_ctl.rc_pace_max_segs; 23597 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 23598 if (orig_val != rack->r_ctl.rc_pace_max_segs) 23599 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 23600 } 23601 23602 static void 23603 rack_mtu_change(struct tcpcb *tp) 23604 { 23605 /* 23606 * The MSS may have changed 23607 */ 23608 struct tcp_rack *rack; 23609 struct rack_sendmap *rsm; 23610 23611 rack = (struct tcp_rack *)tp->t_fb_ptr; 23612 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 23613 /* 23614 * The MTU has changed we need to resend everything 23615 * since all we have sent is lost. We first fix 23616 * up the mtu though. 23617 */ 23618 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23619 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 23620 rack_remxt_tmr(tp); 23621 rack->r_fast_output = 0; 23622 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 23623 rack->r_ctl.rc_sacked); 23624 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 23625 rack->r_must_retran = 1; 23626 /* Mark all inflight to needing to be rxt'd */ 23627 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 23628 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 23629 } 23630 } 23631 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 23632 /* We don't use snd_nxt to retransmit */ 23633 tp->snd_nxt = tp->snd_max; 23634 } 23635 23636 static int 23637 rack_set_dgp(struct tcp_rack *rack) 23638 { 23639 if (rack->dgp_on == 1) 23640 return(0); 23641 if ((rack->use_fixed_rate == 1) && 23642 (rack->rc_always_pace == 1)) { 23643 /* 23644 * We are already pacing another 23645 * way. 23646 */ 23647 return (EBUSY); 23648 } 23649 if (rack->rc_always_pace == 1) { 23650 rack_remove_pacing(rack); 23651 } 23652 if (tcp_incr_dgp_pacing_cnt() == 0) 23653 return (ENOSPC); 23654 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 23655 rack->rc_fillcw_apply_discount = 0; 23656 rack->dgp_on = 1; 23657 rack->rc_always_pace = 1; 23658 rack->rc_pace_dnd = 1; 23659 rack->use_fixed_rate = 0; 23660 if (rack->gp_ready) 23661 rack_set_cc_pacing(rack); 23662 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23663 rack->rack_attempt_hdwr_pace = 0; 23664 /* rxt settings */ 23665 rack->full_size_rxt = 1; 23666 rack->shape_rxt_to_pacing_min = 0; 23667 /* cmpack=1 */ 23668 rack->r_use_cmp_ack = 1; 23669 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 23670 rack->r_use_cmp_ack) 23671 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 23672 /* scwnd=1 */ 23673 rack->rack_enable_scwnd = 1; 23674 /* dynamic=100 */ 23675 rack->rc_gp_dyn_mul = 1; 23676 /* gp_inc_ca */ 23677 rack->r_ctl.rack_per_of_gp_ca = 100; 23678 /* rrr_conf=3 */ 23679 rack->r_rr_config = 3; 23680 /* npush=2 */ 23681 rack->r_ctl.rc_no_push_at_mrtt = 2; 23682 /* fillcw=1 */ 23683 rack->rc_pace_to_cwnd = 1; 23684 rack->rc_pace_fill_if_rttin_range = 0; 23685 rack->rtt_limit_mul = 0; 23686 /* noprr=1 */ 23687 rack->rack_no_prr = 1; 23688 /* lscwnd=1 */ 23689 rack->r_limit_scw = 1; 23690 /* gp_inc_rec */ 23691 rack->r_ctl.rack_per_of_gp_rec = 90; 23692 return (0); 23693 } 23694 23695 static int 23696 rack_set_profile(struct tcp_rack *rack, int prof) 23697 { 23698 int err = EINVAL; 23699 if (prof == 1) { 23700 /* 23701 * Profile 1 is "standard" DGP. It ignores 23702 * client buffer level. 23703 */ 23704 err = rack_set_dgp(rack); 23705 if (err) 23706 return (err); 23707 } else if (prof == 6) { 23708 err = rack_set_dgp(rack); 23709 if (err) 23710 return (err); 23711 /* 23712 * Profile 6 tweaks DGP so that it will apply to 23713 * fill-cw the same settings that profile5 does 23714 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 23715 */ 23716 rack->rc_fillcw_apply_discount = 1; 23717 } else if (prof == 0) { 23718 /* This changes things back to the default settings */ 23719 if (rack->rc_always_pace == 1) { 23720 rack_remove_pacing(rack); 23721 } else { 23722 /* Make sure any stray flags are off */ 23723 rack->dgp_on = 0; 23724 rack->rc_hybrid_mode = 0; 23725 rack->use_fixed_rate = 0; 23726 } 23727 err = 0; 23728 if (rack_fill_cw_state) 23729 rack->rc_pace_to_cwnd = 1; 23730 else 23731 rack->rc_pace_to_cwnd = 0; 23732 23733 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 23734 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23735 rack->rc_always_pace = 1; 23736 if (rack->rack_hibeta) 23737 rack_set_cc_pacing(rack); 23738 } else 23739 rack->rc_always_pace = 0; 23740 if (rack_dsack_std_based & 0x1) { 23741 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 23742 rack->rc_rack_tmr_std_based = 1; 23743 } 23744 if (rack_dsack_std_based & 0x2) { 23745 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 23746 rack->rc_rack_use_dsack = 1; 23747 } 23748 if (rack_use_cmp_acks) 23749 rack->r_use_cmp_ack = 1; 23750 else 23751 rack->r_use_cmp_ack = 0; 23752 if (rack_disable_prr) 23753 rack->rack_no_prr = 1; 23754 else 23755 rack->rack_no_prr = 0; 23756 if (rack_gp_no_rec_chg) 23757 rack->rc_gp_no_rec_chg = 1; 23758 else 23759 rack->rc_gp_no_rec_chg = 0; 23760 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 23761 rack->r_mbuf_queue = 1; 23762 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 23763 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 23764 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23765 } else { 23766 rack->r_mbuf_queue = 0; 23767 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23768 } 23769 if (rack_enable_shared_cwnd) 23770 rack->rack_enable_scwnd = 1; 23771 else 23772 rack->rack_enable_scwnd = 0; 23773 if (rack_do_dyn_mul) { 23774 /* When dynamic adjustment is on CA needs to start at 100% */ 23775 rack->rc_gp_dyn_mul = 1; 23776 if (rack_do_dyn_mul >= 100) 23777 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 23778 } else { 23779 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 23780 rack->rc_gp_dyn_mul = 0; 23781 } 23782 rack->r_rr_config = 0; 23783 rack->r_ctl.rc_no_push_at_mrtt = 0; 23784 rack->rc_pace_fill_if_rttin_range = 0; 23785 rack->rtt_limit_mul = 0; 23786 23787 if (rack_enable_hw_pacing) 23788 rack->rack_hdw_pace_ena = 1; 23789 else 23790 rack->rack_hdw_pace_ena = 0; 23791 if (rack_disable_prr) 23792 rack->rack_no_prr = 1; 23793 else 23794 rack->rack_no_prr = 0; 23795 if (rack_limits_scwnd) 23796 rack->r_limit_scw = 1; 23797 else 23798 rack->r_limit_scw = 0; 23799 rack_init_retransmit_value(rack, rack_rxt_controls); 23800 err = 0; 23801 } 23802 return (err); 23803 } 23804 23805 static int 23806 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 23807 { 23808 struct deferred_opt_list *dol; 23809 23810 dol = malloc(sizeof(struct deferred_opt_list), 23811 M_TCPDO, M_NOWAIT|M_ZERO); 23812 if (dol == NULL) { 23813 /* 23814 * No space yikes -- fail out.. 23815 */ 23816 return (0); 23817 } 23818 dol->optname = sopt_name; 23819 dol->optval = loptval; 23820 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 23821 return (1); 23822 } 23823 23824 static int 23825 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 23826 { 23827 #ifdef TCP_REQUEST_TRK 23828 struct tcp_sendfile_track *sft; 23829 struct timeval tv; 23830 tcp_seq seq; 23831 int err; 23832 23833 microuptime(&tv); 23834 23835 /* Make sure no fixed rate is on */ 23836 rack->use_fixed_rate = 0; 23837 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 23838 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 23839 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 23840 /* Now allocate or find our entry that will have these settings */ 23841 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 23842 if (sft == NULL) { 23843 rack->rc_tp->tcp_hybrid_error++; 23844 /* no space, where would it have gone? */ 23845 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 23846 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 23847 return (ENOSPC); 23848 } 23849 /* mask our internal flags */ 23850 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 23851 /* The seq will be snd_una + everything in the buffer */ 23852 seq = sft->start_seq; 23853 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 23854 /* Disabling hybrid pacing */ 23855 if (rack->rc_hybrid_mode) { 23856 rack_set_profile(rack, 0); 23857 rack->rc_tp->tcp_hybrid_stop++; 23858 } 23859 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 23860 return (0); 23861 } 23862 if (rack->dgp_on == 0) { 23863 /* 23864 * If we have not yet turned DGP on, do so 23865 * now setting pure DGP mode, no buffer level 23866 * response. 23867 */ 23868 if ((err = rack_set_profile(rack, 1)) != 0){ 23869 /* Failed to turn pacing on */ 23870 rack->rc_tp->tcp_hybrid_error++; 23871 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 23872 return (err); 23873 } 23874 } 23875 /* 23876 * Now we must switch to hybrid mode as well which also 23877 * means moving to regular pacing. 23878 */ 23879 if (rack->rc_hybrid_mode == 0) { 23880 /* First time */ 23881 if (tcp_can_enable_pacing()) { 23882 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23883 rack->rc_hybrid_mode = 1; 23884 } else { 23885 return (ENOSPC); 23886 } 23887 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 23888 /* 23889 * This should be true. 23890 */ 23891 tcp_dec_dgp_pacing_cnt(); 23892 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23893 } 23894 } 23895 /* Now set in our flags */ 23896 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 23897 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 23898 sft->cspr = hybrid->cspr; 23899 else 23900 sft->cspr = 0; 23901 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 23902 sft->hint_maxseg = hybrid->hint_maxseg; 23903 else 23904 sft->hint_maxseg = 0; 23905 rack->rc_tp->tcp_hybrid_start++; 23906 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 23907 return (0); 23908 #else 23909 return (ENOTSUP); 23910 #endif 23911 } 23912 23913 static int 23914 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 23915 { 23916 /* 23917 * Gather rack specific information. 23918 */ 23919 struct tcp_rack *rack; 23920 23921 rack = (struct tcp_rack *)tp->t_fb_ptr; 23922 /* We pulled a SSI info log out what was there */ 23923 policer_detection_log(rack, rack->rc_highly_buffered, 0, 0, 0, 20); 23924 if (rack->policer_detect_on) { 23925 si->policer_detection_enabled = 1; 23926 if (rack->rc_policer_detected) { 23927 si->policer_detected = 1; 23928 si->policer_bucket_size = rack->r_ctl.policer_bucket_size; 23929 si->policer_last_bw = rack->r_ctl.policer_bw; 23930 } else { 23931 si->policer_detected = 0; 23932 si->policer_bucket_size = 0; 23933 si->policer_last_bw = 0; 23934 } 23935 si->current_round = rack->r_ctl.current_round; 23936 si->highly_buffered = rack->rc_highly_buffered; 23937 } 23938 si->bytes_transmitted = tp->t_sndbytes; 23939 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 23940 return (0); 23941 } 23942 23943 static int 23944 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 23945 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 23946 23947 { 23948 struct epoch_tracker et; 23949 struct sockopt sopt; 23950 struct cc_newreno_opts opt; 23951 uint64_t val; 23952 int error = 0; 23953 uint16_t ca, ss; 23954 23955 switch (sopt_name) { 23956 case TCP_RACK_SET_RXT_OPTIONS: 23957 if ((optval >= 0) && (optval <= 2)) { 23958 rack_init_retransmit_value(rack, optval); 23959 } else { 23960 /* 23961 * You must send in 0, 1 or 2 all else is 23962 * invalid. 23963 */ 23964 error = EINVAL; 23965 } 23966 break; 23967 case TCP_RACK_DSACK_OPT: 23968 RACK_OPTS_INC(tcp_rack_dsack_opt); 23969 if (optval & 0x1) { 23970 rack->rc_rack_tmr_std_based = 1; 23971 } else { 23972 rack->rc_rack_tmr_std_based = 0; 23973 } 23974 if (optval & 0x2) { 23975 rack->rc_rack_use_dsack = 1; 23976 } else { 23977 rack->rc_rack_use_dsack = 0; 23978 } 23979 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 23980 break; 23981 case TCP_RACK_PACING_DIVISOR: 23982 RACK_OPTS_INC(tcp_rack_pacing_divisor); 23983 if (optval == 0) { 23984 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 23985 } else { 23986 if (optval < RL_MIN_DIVISOR) 23987 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 23988 else 23989 rack->r_ctl.pace_len_divisor = optval; 23990 } 23991 break; 23992 case TCP_RACK_HI_BETA: 23993 RACK_OPTS_INC(tcp_rack_hi_beta); 23994 if (optval > 0) { 23995 rack->rack_hibeta = 1; 23996 if ((optval >= 50) && 23997 (optval <= 100)) { 23998 /* 23999 * User wants to set a custom beta. 24000 */ 24001 rack->r_ctl.saved_hibeta = optval; 24002 if (rack->rc_pacing_cc_set) 24003 rack_undo_cc_pacing(rack); 24004 rack->r_ctl.rc_saved_beta.beta = optval; 24005 } 24006 if (rack->rc_pacing_cc_set == 0) 24007 rack_set_cc_pacing(rack); 24008 } else { 24009 rack->rack_hibeta = 0; 24010 if (rack->rc_pacing_cc_set) 24011 rack_undo_cc_pacing(rack); 24012 } 24013 break; 24014 case TCP_RACK_PACING_BETA: 24015 error = EINVAL; 24016 break; 24017 case TCP_RACK_TIMER_SLOP: 24018 RACK_OPTS_INC(tcp_rack_timer_slop); 24019 rack->r_ctl.timer_slop = optval; 24020 if (rack->rc_tp->t_srtt) { 24021 /* 24022 * If we have an SRTT lets update t_rxtcur 24023 * to have the new slop. 24024 */ 24025 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 24026 rack_rto_min, rack_rto_max, 24027 rack->r_ctl.timer_slop); 24028 } 24029 break; 24030 case TCP_RACK_PACING_BETA_ECN: 24031 RACK_OPTS_INC(tcp_rack_beta_ecn); 24032 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 24033 /* This only works for newreno. */ 24034 error = EINVAL; 24035 break; 24036 } 24037 if (rack->rc_pacing_cc_set) { 24038 /* 24039 * Set them into the real CC module 24040 * whats in the rack pcb is the old values 24041 * to be used on restoral/ 24042 */ 24043 sopt.sopt_dir = SOPT_SET; 24044 opt.name = CC_NEWRENO_BETA_ECN; 24045 opt.val = optval; 24046 if (CC_ALGO(tp)->ctl_output != NULL) 24047 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 24048 else 24049 error = ENOENT; 24050 } else { 24051 /* 24052 * Not pacing yet so set it into our local 24053 * rack pcb storage. 24054 */ 24055 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 24056 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 24057 } 24058 break; 24059 case TCP_DEFER_OPTIONS: 24060 RACK_OPTS_INC(tcp_defer_opt); 24061 if (optval) { 24062 if (rack->gp_ready) { 24063 /* Too late */ 24064 error = EINVAL; 24065 break; 24066 } 24067 rack->defer_options = 1; 24068 } else 24069 rack->defer_options = 0; 24070 break; 24071 case TCP_RACK_MEASURE_CNT: 24072 RACK_OPTS_INC(tcp_rack_measure_cnt); 24073 if (optval && (optval <= 0xff)) { 24074 rack->r_ctl.req_measurements = optval; 24075 } else 24076 error = EINVAL; 24077 break; 24078 case TCP_REC_ABC_VAL: 24079 RACK_OPTS_INC(tcp_rec_abc_val); 24080 if (optval > 0) 24081 rack->r_use_labc_for_rec = 1; 24082 else 24083 rack->r_use_labc_for_rec = 0; 24084 break; 24085 case TCP_RACK_ABC_VAL: 24086 RACK_OPTS_INC(tcp_rack_abc_val); 24087 if ((optval > 0) && (optval < 255)) 24088 rack->rc_labc = optval; 24089 else 24090 error = EINVAL; 24091 break; 24092 case TCP_HDWR_UP_ONLY: 24093 RACK_OPTS_INC(tcp_pacing_up_only); 24094 if (optval) 24095 rack->r_up_only = 1; 24096 else 24097 rack->r_up_only = 0; 24098 break; 24099 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24100 RACK_OPTS_INC(tcp_fillcw_rate_cap); 24101 rack->r_ctl.fillcw_cap = loptval; 24102 break; 24103 case TCP_PACING_RATE_CAP: 24104 RACK_OPTS_INC(tcp_pacing_rate_cap); 24105 if ((rack->dgp_on == 1) && 24106 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 24107 /* 24108 * If we are doing DGP we need to switch 24109 * to using the pacing limit. 24110 */ 24111 if (tcp_can_enable_pacing() == 0) { 24112 error = ENOSPC; 24113 break; 24114 } 24115 /* 24116 * Now change up the flags and counts to be correct. 24117 */ 24118 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24119 tcp_dec_dgp_pacing_cnt(); 24120 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 24121 } 24122 rack->r_ctl.bw_rate_cap = loptval; 24123 break; 24124 case TCP_HYBRID_PACING: 24125 if (hybrid == NULL) { 24126 error = EINVAL; 24127 break; 24128 } 24129 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 24130 error = EPERM; 24131 break; 24132 } 24133 error = process_hybrid_pacing(rack, hybrid); 24134 break; 24135 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24136 if (optval) 24137 rack->r_ctl.side_chan_dis_mask = optval; 24138 else 24139 rack->r_ctl.side_chan_dis_mask = 0; 24140 break; 24141 case TCP_RACK_PROFILE: 24142 RACK_OPTS_INC(tcp_profile); 24143 error = rack_set_profile(rack, optval); 24144 break; 24145 case TCP_USE_CMP_ACKS: 24146 RACK_OPTS_INC(tcp_use_cmp_acks); 24147 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 24148 /* You can't turn it off once its on! */ 24149 error = EINVAL; 24150 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 24151 rack->r_use_cmp_ack = 1; 24152 rack->r_mbuf_queue = 1; 24153 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24154 } 24155 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 24156 tp->t_flags2 |= TF2_MBUF_ACKCMP; 24157 break; 24158 case TCP_SHARED_CWND_TIME_LIMIT: 24159 RACK_OPTS_INC(tcp_lscwnd); 24160 if (optval) 24161 rack->r_limit_scw = 1; 24162 else 24163 rack->r_limit_scw = 0; 24164 break; 24165 case TCP_RACK_DGP_IN_REC: 24166 error = EINVAL; 24167 break; 24168 case TCP_POLICER_DETECT: /* URL:pol_det */ 24169 RACK_OPTS_INC(tcp_pol_detect); 24170 rack_translate_policer_detect(rack, optval); 24171 break; 24172 case TCP_POLICER_MSS: 24173 RACK_OPTS_INC(tcp_pol_mss); 24174 rack->r_ctl.policer_del_mss = (uint8_t)optval; 24175 if (optval & 0x00000100) { 24176 /* 24177 * Value is setup like so: 24178 * VVVV VVVV VVVV VVVV VVVV VVAI MMMM MMMM 24179 * Where MMMM MMMM is MSS setting 24180 * I (9th bit) is the Postive value that 24181 * says it is being set (if its 0 then the 24182 * upper bits 11 - 32 have no meaning. 24183 * This allows setting it off with 24184 * 0x000001MM. 24185 * 24186 * The 10th bit is used to turn on the 24187 * alternate median (not the expanded one). 24188 * 24189 */ 24190 rack->r_ctl.pol_bw_comp = (optval >> 10); 24191 } 24192 if (optval & 0x00000200) { 24193 rack->r_ctl.policer_alt_median = 1; 24194 } else { 24195 rack->r_ctl.policer_alt_median = 0; 24196 } 24197 break; 24198 case TCP_RACK_PACE_TO_FILL: 24199 RACK_OPTS_INC(tcp_fillcw); 24200 if (optval == 0) 24201 rack->rc_pace_to_cwnd = 0; 24202 else { 24203 rack->rc_pace_to_cwnd = 1; 24204 } 24205 if ((optval >= rack_gp_rtt_maxmul) && 24206 rack_gp_rtt_maxmul && 24207 (optval < 0xf)) { 24208 rack->rc_pace_fill_if_rttin_range = 1; 24209 rack->rtt_limit_mul = optval; 24210 } else { 24211 rack->rc_pace_fill_if_rttin_range = 0; 24212 rack->rtt_limit_mul = 0; 24213 } 24214 break; 24215 case TCP_RACK_NO_PUSH_AT_MAX: 24216 RACK_OPTS_INC(tcp_npush); 24217 if (optval == 0) 24218 rack->r_ctl.rc_no_push_at_mrtt = 0; 24219 else if (optval < 0xff) 24220 rack->r_ctl.rc_no_push_at_mrtt = optval; 24221 else 24222 error = EINVAL; 24223 break; 24224 case TCP_SHARED_CWND_ENABLE: 24225 RACK_OPTS_INC(tcp_rack_scwnd); 24226 if (optval == 0) 24227 rack->rack_enable_scwnd = 0; 24228 else 24229 rack->rack_enable_scwnd = 1; 24230 break; 24231 case TCP_RACK_MBUF_QUEUE: 24232 /* Now do we use the LRO mbuf-queue feature */ 24233 RACK_OPTS_INC(tcp_rack_mbufq); 24234 if (optval || rack->r_use_cmp_ack) 24235 rack->r_mbuf_queue = 1; 24236 else 24237 rack->r_mbuf_queue = 0; 24238 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 24239 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24240 else 24241 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24242 break; 24243 case TCP_RACK_NONRXT_CFG_RATE: 24244 RACK_OPTS_INC(tcp_rack_cfg_rate); 24245 if (optval == 0) 24246 rack->rack_rec_nonrxt_use_cr = 0; 24247 else 24248 rack->rack_rec_nonrxt_use_cr = 1; 24249 break; 24250 case TCP_NO_PRR: 24251 RACK_OPTS_INC(tcp_rack_noprr); 24252 if (optval == 0) 24253 rack->rack_no_prr = 0; 24254 else if (optval == 1) 24255 rack->rack_no_prr = 1; 24256 else if (optval == 2) 24257 rack->no_prr_addback = 1; 24258 else 24259 error = EINVAL; 24260 break; 24261 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24262 if (optval > 0) 24263 rack->cspr_is_fcc = 1; 24264 else 24265 rack->cspr_is_fcc = 0; 24266 break; 24267 case TCP_TIMELY_DYN_ADJ: 24268 RACK_OPTS_INC(tcp_timely_dyn); 24269 if (optval == 0) 24270 rack->rc_gp_dyn_mul = 0; 24271 else { 24272 rack->rc_gp_dyn_mul = 1; 24273 if (optval >= 100) { 24274 /* 24275 * If the user sets something 100 or more 24276 * its the gp_ca value. 24277 */ 24278 rack->r_ctl.rack_per_of_gp_ca = optval; 24279 } 24280 } 24281 break; 24282 case TCP_RACK_DO_DETECTION: 24283 error = EINVAL; 24284 break; 24285 case TCP_RACK_TLP_USE: 24286 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 24287 error = EINVAL; 24288 break; 24289 } 24290 RACK_OPTS_INC(tcp_tlp_use); 24291 rack->rack_tlp_threshold_use = optval; 24292 break; 24293 case TCP_RACK_TLP_REDUCE: 24294 /* RACK TLP cwnd reduction (bool) */ 24295 RACK_OPTS_INC(tcp_rack_tlp_reduce); 24296 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 24297 break; 24298 /* Pacing related ones */ 24299 case TCP_RACK_PACE_ALWAYS: 24300 /* 24301 * zero is old rack method, 1 is new 24302 * method using a pacing rate. 24303 */ 24304 RACK_OPTS_INC(tcp_rack_pace_always); 24305 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24306 error = EPERM; 24307 break; 24308 } 24309 if (optval > 0) { 24310 if (rack->rc_always_pace) { 24311 error = EALREADY; 24312 break; 24313 } else if (tcp_can_enable_pacing()) { 24314 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24315 rack->rc_always_pace = 1; 24316 if (rack->rack_hibeta) 24317 rack_set_cc_pacing(rack); 24318 } 24319 else { 24320 error = ENOSPC; 24321 break; 24322 } 24323 } else { 24324 if (rack->rc_always_pace == 1) { 24325 rack_remove_pacing(rack); 24326 } 24327 } 24328 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 24329 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24330 else 24331 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24332 /* A rate may be set irate or other, if so set seg size */ 24333 rack_update_seg(rack); 24334 break; 24335 case TCP_BBR_RACK_INIT_RATE: 24336 RACK_OPTS_INC(tcp_initial_rate); 24337 val = optval; 24338 /* Change from kbits per second to bytes per second */ 24339 val *= 1000; 24340 val /= 8; 24341 rack->r_ctl.init_rate = val; 24342 if (rack->rc_always_pace) 24343 rack_update_seg(rack); 24344 break; 24345 case TCP_BBR_IWINTSO: 24346 error = EINVAL; 24347 break; 24348 case TCP_RACK_FORCE_MSEG: 24349 RACK_OPTS_INC(tcp_rack_force_max_seg); 24350 if (optval) 24351 rack->rc_force_max_seg = 1; 24352 else 24353 rack->rc_force_max_seg = 0; 24354 break; 24355 case TCP_RACK_PACE_MIN_SEG: 24356 RACK_OPTS_INC(tcp_rack_min_seg); 24357 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 24358 rack_set_pace_segments(tp, rack, __LINE__, NULL); 24359 break; 24360 case TCP_RACK_PACE_MAX_SEG: 24361 /* Max segments size in a pace in bytes */ 24362 RACK_OPTS_INC(tcp_rack_max_seg); 24363 if ((rack->dgp_on == 1) && 24364 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 24365 /* 24366 * If we set a max-seg and are doing DGP then 24367 * we now fall under the pacing limits not the 24368 * DGP ones. 24369 */ 24370 if (tcp_can_enable_pacing() == 0) { 24371 error = ENOSPC; 24372 break; 24373 } 24374 /* 24375 * Now change up the flags and counts to be correct. 24376 */ 24377 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24378 tcp_dec_dgp_pacing_cnt(); 24379 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 24380 } 24381 if (optval <= MAX_USER_SET_SEG) 24382 rack->rc_user_set_max_segs = optval; 24383 else 24384 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 24385 rack_set_pace_segments(tp, rack, __LINE__, NULL); 24386 break; 24387 case TCP_RACK_PACE_RATE_REC: 24388 /* Set the fixed pacing rate in Bytes per second ca */ 24389 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 24390 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24391 error = EPERM; 24392 break; 24393 } 24394 if (rack->dgp_on) { 24395 /* 24396 * We are already pacing another 24397 * way. 24398 */ 24399 error = EBUSY; 24400 break; 24401 } 24402 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 24403 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 24404 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 24405 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 24406 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 24407 rack->use_fixed_rate = 1; 24408 if (rack->rack_hibeta) 24409 rack_set_cc_pacing(rack); 24410 rack_log_pacing_delay_calc(rack, 24411 rack->r_ctl.rc_fixed_pacing_rate_ss, 24412 rack->r_ctl.rc_fixed_pacing_rate_ca, 24413 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 24414 __LINE__, NULL,0); 24415 break; 24416 24417 case TCP_RACK_PACE_RATE_SS: 24418 /* Set the fixed pacing rate in Bytes per second ca */ 24419 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 24420 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24421 error = EPERM; 24422 break; 24423 } 24424 if (rack->dgp_on) { 24425 /* 24426 * We are already pacing another 24427 * way. 24428 */ 24429 error = EBUSY; 24430 break; 24431 } 24432 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 24433 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 24434 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 24435 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 24436 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 24437 rack->use_fixed_rate = 1; 24438 if (rack->rack_hibeta) 24439 rack_set_cc_pacing(rack); 24440 rack_log_pacing_delay_calc(rack, 24441 rack->r_ctl.rc_fixed_pacing_rate_ss, 24442 rack->r_ctl.rc_fixed_pacing_rate_ca, 24443 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 24444 __LINE__, NULL, 0); 24445 break; 24446 24447 case TCP_RACK_PACE_RATE_CA: 24448 /* Set the fixed pacing rate in Bytes per second ca */ 24449 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 24450 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24451 error = EPERM; 24452 break; 24453 } 24454 if (rack->dgp_on) { 24455 /* 24456 * We are already pacing another 24457 * way. 24458 */ 24459 error = EBUSY; 24460 break; 24461 } 24462 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 24463 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 24464 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 24465 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 24466 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 24467 rack->use_fixed_rate = 1; 24468 if (rack->rack_hibeta) 24469 rack_set_cc_pacing(rack); 24470 rack_log_pacing_delay_calc(rack, 24471 rack->r_ctl.rc_fixed_pacing_rate_ss, 24472 rack->r_ctl.rc_fixed_pacing_rate_ca, 24473 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 24474 __LINE__, NULL, 0); 24475 break; 24476 case TCP_RACK_GP_INCREASE_REC: 24477 RACK_OPTS_INC(tcp_gp_inc_rec); 24478 rack->r_ctl.rack_per_of_gp_rec = optval; 24479 rack_log_pacing_delay_calc(rack, 24480 rack->r_ctl.rack_per_of_gp_ss, 24481 rack->r_ctl.rack_per_of_gp_ca, 24482 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 24483 __LINE__, NULL, 0); 24484 break; 24485 case TCP_RACK_GP_INCREASE_CA: 24486 RACK_OPTS_INC(tcp_gp_inc_ca); 24487 ca = optval; 24488 if (ca < 100) { 24489 /* 24490 * We don't allow any reduction 24491 * over the GP b/w. 24492 */ 24493 error = EINVAL; 24494 break; 24495 } 24496 rack->r_ctl.rack_per_of_gp_ca = ca; 24497 rack_log_pacing_delay_calc(rack, 24498 rack->r_ctl.rack_per_of_gp_ss, 24499 rack->r_ctl.rack_per_of_gp_ca, 24500 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 24501 __LINE__, NULL, 0); 24502 break; 24503 case TCP_RACK_GP_INCREASE_SS: 24504 RACK_OPTS_INC(tcp_gp_inc_ss); 24505 ss = optval; 24506 if (ss < 100) { 24507 /* 24508 * We don't allow any reduction 24509 * over the GP b/w. 24510 */ 24511 error = EINVAL; 24512 break; 24513 } 24514 rack->r_ctl.rack_per_of_gp_ss = ss; 24515 rack_log_pacing_delay_calc(rack, 24516 rack->r_ctl.rack_per_of_gp_ss, 24517 rack->r_ctl.rack_per_of_gp_ca, 24518 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 24519 __LINE__, NULL, 0); 24520 break; 24521 case TCP_RACK_RR_CONF: 24522 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 24523 if (optval && optval <= 3) 24524 rack->r_rr_config = optval; 24525 else 24526 rack->r_rr_config = 0; 24527 break; 24528 case TCP_PACING_DND: /* URL:dnd */ 24529 if (optval > 0) 24530 rack->rc_pace_dnd = 1; 24531 else 24532 rack->rc_pace_dnd = 0; 24533 break; 24534 case TCP_HDWR_RATE_CAP: 24535 RACK_OPTS_INC(tcp_hdwr_rate_cap); 24536 if (optval) { 24537 if (rack->r_rack_hw_rate_caps == 0) 24538 rack->r_rack_hw_rate_caps = 1; 24539 else 24540 error = EALREADY; 24541 } else { 24542 rack->r_rack_hw_rate_caps = 0; 24543 } 24544 break; 24545 case TCP_DGP_UPPER_BOUNDS: 24546 { 24547 uint8_t val; 24548 val = optval & 0x0000ff; 24549 rack->r_ctl.rack_per_upper_bound_ca = val; 24550 val = (optval >> 16) & 0x0000ff; 24551 rack->r_ctl.rack_per_upper_bound_ss = val; 24552 break; 24553 } 24554 case TCP_SS_EEXIT: /* URL:eexit */ 24555 if (optval > 0) { 24556 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 24557 if (optval & 0x10000) { 24558 rack->r_ctl.gate_to_fs = 1; 24559 } else { 24560 rack->r_ctl.gate_to_fs = 0; 24561 } 24562 if (optval & 0x20000) { 24563 rack->r_ctl.use_gp_not_last = 1; 24564 } else { 24565 rack->r_ctl.use_gp_not_last = 0; 24566 } 24567 if (optval & 0xfffc0000) { 24568 uint32_t v; 24569 24570 v = (optval >> 18) & 0x00003fff; 24571 if (v >= 1000) 24572 rack->r_ctl.gp_gain_req = v; 24573 } 24574 } else { 24575 /* We do not do ss early exit at all */ 24576 rack->rc_initial_ss_comp = 1; 24577 rack->r_ctl.gp_rnd_thresh = 0; 24578 } 24579 break; 24580 case TCP_RACK_SPLIT_LIMIT: 24581 RACK_OPTS_INC(tcp_split_limit); 24582 rack->r_ctl.rc_split_limit = optval; 24583 break; 24584 case TCP_BBR_HDWR_PACE: 24585 RACK_OPTS_INC(tcp_hdwr_pacing); 24586 if (optval){ 24587 if (rack->rack_hdrw_pacing == 0) { 24588 rack->rack_hdw_pace_ena = 1; 24589 rack->rack_attempt_hdwr_pace = 0; 24590 } else 24591 error = EALREADY; 24592 } else { 24593 rack->rack_hdw_pace_ena = 0; 24594 #ifdef RATELIMIT 24595 if (rack->r_ctl.crte != NULL) { 24596 rack->rack_hdrw_pacing = 0; 24597 rack->rack_attempt_hdwr_pace = 0; 24598 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 24599 rack->r_ctl.crte = NULL; 24600 } 24601 #endif 24602 } 24603 break; 24604 /* End Pacing related ones */ 24605 case TCP_RACK_PRR_SENDALOT: 24606 /* Allow PRR to send more than one seg */ 24607 RACK_OPTS_INC(tcp_rack_prr_sendalot); 24608 rack->r_ctl.rc_prr_sendalot = optval; 24609 break; 24610 case TCP_RACK_MIN_TO: 24611 /* Minimum time between rack t-o's in ms */ 24612 RACK_OPTS_INC(tcp_rack_min_to); 24613 rack->r_ctl.rc_min_to = optval; 24614 break; 24615 case TCP_RACK_EARLY_SEG: 24616 /* If early recovery max segments */ 24617 RACK_OPTS_INC(tcp_rack_early_seg); 24618 rack->r_ctl.rc_early_recovery_segs = optval; 24619 break; 24620 case TCP_RACK_ENABLE_HYSTART: 24621 { 24622 if (optval) { 24623 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 24624 if (rack_do_hystart > RACK_HYSTART_ON) 24625 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 24626 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 24627 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 24628 } else { 24629 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 24630 } 24631 } 24632 break; 24633 case TCP_RACK_REORD_THRESH: 24634 /* RACK reorder threshold (shift amount) */ 24635 RACK_OPTS_INC(tcp_rack_reord_thresh); 24636 if ((optval > 0) && (optval < 31)) 24637 rack->r_ctl.rc_reorder_shift = optval; 24638 else 24639 error = EINVAL; 24640 break; 24641 case TCP_RACK_REORD_FADE: 24642 /* Does reordering fade after ms time */ 24643 RACK_OPTS_INC(tcp_rack_reord_fade); 24644 rack->r_ctl.rc_reorder_fade = optval; 24645 break; 24646 case TCP_RACK_TLP_THRESH: 24647 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24648 RACK_OPTS_INC(tcp_rack_tlp_thresh); 24649 if (optval) 24650 rack->r_ctl.rc_tlp_threshold = optval; 24651 else 24652 error = EINVAL; 24653 break; 24654 case TCP_BBR_USE_RACK_RR: 24655 RACK_OPTS_INC(tcp_rack_rr); 24656 if (optval) 24657 rack->use_rack_rr = 1; 24658 else 24659 rack->use_rack_rr = 0; 24660 break; 24661 case TCP_RACK_PKT_DELAY: 24662 /* RACK added ms i.e. rack-rtt + reord + N */ 24663 RACK_OPTS_INC(tcp_rack_pkt_delay); 24664 rack->r_ctl.rc_pkt_delay = optval; 24665 break; 24666 case TCP_DELACK: 24667 RACK_OPTS_INC(tcp_rack_delayed_ack); 24668 if (optval == 0) 24669 tp->t_delayed_ack = 0; 24670 else 24671 tp->t_delayed_ack = 1; 24672 if (tp->t_flags & TF_DELACK) { 24673 tp->t_flags &= ~TF_DELACK; 24674 tp->t_flags |= TF_ACKNOW; 24675 NET_EPOCH_ENTER(et); 24676 rack_output(tp); 24677 NET_EPOCH_EXIT(et); 24678 } 24679 break; 24680 24681 case TCP_BBR_RACK_RTT_USE: 24682 RACK_OPTS_INC(tcp_rack_rtt_use); 24683 if ((optval != USE_RTT_HIGH) && 24684 (optval != USE_RTT_LOW) && 24685 (optval != USE_RTT_AVG)) 24686 error = EINVAL; 24687 else 24688 rack->r_ctl.rc_rate_sample_method = optval; 24689 break; 24690 case TCP_HONOR_HPTS_MIN: 24691 RACK_OPTS_INC(tcp_honor_hpts); 24692 if (optval) { 24693 rack->r_use_hpts_min = 1; 24694 /* 24695 * Must be between 2 - 80% to be a reduction else 24696 * we keep the default (10%). 24697 */ 24698 if ((optval > 1) && (optval <= 80)) { 24699 rack->r_ctl.max_reduction = optval; 24700 } 24701 } else 24702 rack->r_use_hpts_min = 0; 24703 break; 24704 case TCP_REC_IS_DYN: /* URL:dynrec */ 24705 RACK_OPTS_INC(tcp_dyn_rec); 24706 if (optval) 24707 rack->rc_gp_no_rec_chg = 1; 24708 else 24709 rack->rc_gp_no_rec_chg = 0; 24710 break; 24711 case TCP_NO_TIMELY: 24712 RACK_OPTS_INC(tcp_notimely); 24713 if (optval) { 24714 rack->rc_skip_timely = 1; 24715 rack->r_ctl.rack_per_of_gp_rec = 90; 24716 rack->r_ctl.rack_per_of_gp_ca = 100; 24717 rack->r_ctl.rack_per_of_gp_ss = 250; 24718 } else { 24719 rack->rc_skip_timely = 0; 24720 } 24721 break; 24722 case TCP_GP_USE_LTBW: 24723 if (optval == 0) { 24724 rack->use_lesser_lt_bw = 0; 24725 rack->dis_lt_bw = 1; 24726 } else if (optval == 1) { 24727 rack->use_lesser_lt_bw = 1; 24728 rack->dis_lt_bw = 0; 24729 } else if (optval == 2) { 24730 rack->use_lesser_lt_bw = 0; 24731 rack->dis_lt_bw = 0; 24732 } 24733 break; 24734 case TCP_DATA_AFTER_CLOSE: 24735 RACK_OPTS_INC(tcp_data_after_close); 24736 if (optval) 24737 rack->rc_allow_data_af_clo = 1; 24738 else 24739 rack->rc_allow_data_af_clo = 0; 24740 break; 24741 default: 24742 break; 24743 } 24744 tcp_log_socket_option(tp, sopt_name, optval, error); 24745 return (error); 24746 } 24747 24748 static void 24749 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 24750 { 24751 /* 24752 * A new connection has been created (tp) and 24753 * the parent is the inpcb given. We want to 24754 * apply a read-lock to the parent (we are already 24755 * holding a write lock on the tp) and copy anything 24756 * out of the rack specific data as long as its tfb is 24757 * the same as ours i.e. we are the same stack. Otherwise 24758 * we just return. 24759 */ 24760 struct tcpcb *par; 24761 struct tcp_rack *dest, *src; 24762 int cnt = 0; 24763 24764 par = intotcpcb(parent); 24765 if (par->t_fb != tp->t_fb) { 24766 /* Not the same stack */ 24767 tcp_log_socket_option(tp, 0, 0, 1); 24768 return; 24769 } 24770 /* Ok if we reach here lets setup the two rack pointers */ 24771 dest = (struct tcp_rack *)tp->t_fb_ptr; 24772 src = (struct tcp_rack *)par->t_fb_ptr; 24773 if ((src == NULL) || (dest == NULL)) { 24774 /* Huh? */ 24775 tcp_log_socket_option(tp, 0, 0, 2); 24776 return; 24777 } 24778 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 24779 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 24780 if ((src->dgp_on) && (dest->dgp_on == 0)) { 24781 /* Profile 1 had to be set via sock opt */ 24782 rack_set_dgp(dest); 24783 cnt++; 24784 } 24785 /* TCP_RACK_SET_RXT_OPTIONS */ 24786 if (dest->full_size_rxt != src->full_size_rxt) { 24787 dest->full_size_rxt = src->full_size_rxt; 24788 cnt++; 24789 } 24790 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 24791 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 24792 cnt++; 24793 } 24794 /* TCP_RACK_DSACK_OPT */ 24795 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 24796 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 24797 cnt++; 24798 } 24799 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 24800 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 24801 cnt++; 24802 } 24803 /* TCP_RACK_PACING_DIVISOR */ 24804 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 24805 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 24806 cnt++; 24807 } 24808 /* TCP_RACK_HI_BETA */ 24809 if (src->rack_hibeta != dest->rack_hibeta) { 24810 cnt++; 24811 if (src->rack_hibeta) { 24812 dest->r_ctl.rc_saved_beta.beta = src->r_ctl.rc_saved_beta.beta; 24813 dest->rack_hibeta = 1; 24814 } else { 24815 dest->rack_hibeta = 0; 24816 } 24817 } 24818 /* TCP_RACK_TIMER_SLOP */ 24819 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 24820 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 24821 cnt++; 24822 } 24823 /* TCP_RACK_PACING_BETA_ECN */ 24824 if (dest->r_ctl.rc_saved_beta.beta_ecn != src->r_ctl.rc_saved_beta.beta_ecn) { 24825 dest->r_ctl.rc_saved_beta.beta_ecn = src->r_ctl.rc_saved_beta.beta_ecn; 24826 cnt++; 24827 } 24828 if (dest->r_ctl.rc_saved_beta.newreno_flags != src->r_ctl.rc_saved_beta.newreno_flags) { 24829 dest->r_ctl.rc_saved_beta.newreno_flags = src->r_ctl.rc_saved_beta.newreno_flags; 24830 cnt++; 24831 } 24832 /* We do not do TCP_DEFER_OPTIONS */ 24833 /* TCP_RACK_MEASURE_CNT */ 24834 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 24835 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 24836 cnt++; 24837 } 24838 /* TCP_HDWR_UP_ONLY */ 24839 if (dest->r_up_only != src->r_up_only) { 24840 dest->r_up_only = src->r_up_only; 24841 cnt++; 24842 } 24843 /* TCP_FILLCW_RATE_CAP */ 24844 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 24845 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 24846 cnt++; 24847 } 24848 /* TCP_PACING_RATE_CAP */ 24849 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 24850 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 24851 cnt++; 24852 } 24853 /* A listener can't set TCP_HYBRID_PACING */ 24854 /* TCP_SIDECHAN_DIS */ 24855 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 24856 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 24857 cnt++; 24858 } 24859 /* TCP_SHARED_CWND_TIME_LIMIT */ 24860 if (dest->r_limit_scw != src->r_limit_scw) { 24861 dest->r_limit_scw = src->r_limit_scw; 24862 cnt++; 24863 } 24864 /* TCP_POLICER_DETECT */ 24865 if (dest->r_ctl.policer_rxt_threshold != src->r_ctl.policer_rxt_threshold) { 24866 dest->r_ctl.policer_rxt_threshold = src->r_ctl.policer_rxt_threshold; 24867 cnt++; 24868 } 24869 if (dest->r_ctl.policer_avg_threshold != src->r_ctl.policer_avg_threshold) { 24870 dest->r_ctl.policer_avg_threshold = src->r_ctl.policer_avg_threshold; 24871 cnt++; 24872 } 24873 if (dest->r_ctl.policer_med_threshold != src->r_ctl.policer_med_threshold) { 24874 dest->r_ctl.policer_med_threshold = src->r_ctl.policer_med_threshold; 24875 cnt++; 24876 } 24877 if (dest->policer_detect_on != src->policer_detect_on) { 24878 dest->policer_detect_on = src->policer_detect_on; 24879 cnt++; 24880 } 24881 24882 if (dest->r_ctl.saved_policer_val != src->r_ctl.saved_policer_val) { 24883 dest->r_ctl.saved_policer_val = src->r_ctl.saved_policer_val; 24884 cnt++; 24885 } 24886 /* TCP_POLICER_MSS */ 24887 if (dest->r_ctl.policer_del_mss != src->r_ctl.policer_del_mss) { 24888 dest->r_ctl.policer_del_mss = src->r_ctl.policer_del_mss; 24889 cnt++; 24890 } 24891 24892 if (dest->r_ctl.pol_bw_comp != src->r_ctl.pol_bw_comp) { 24893 dest->r_ctl.pol_bw_comp = src->r_ctl.pol_bw_comp; 24894 cnt++; 24895 } 24896 24897 if (dest->r_ctl.policer_alt_median != src->r_ctl.policer_alt_median) { 24898 dest->r_ctl.policer_alt_median = src->r_ctl.policer_alt_median; 24899 cnt++; 24900 } 24901 /* TCP_RACK_PACE_TO_FILL */ 24902 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 24903 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 24904 cnt++; 24905 } 24906 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 24907 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 24908 cnt++; 24909 } 24910 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 24911 dest->rtt_limit_mul = src->rtt_limit_mul; 24912 cnt++; 24913 } 24914 /* TCP_RACK_NO_PUSH_AT_MAX */ 24915 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 24916 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 24917 cnt++; 24918 } 24919 /* TCP_SHARED_CWND_ENABLE */ 24920 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 24921 dest->rack_enable_scwnd = src->rack_enable_scwnd; 24922 cnt++; 24923 } 24924 /* TCP_USE_CMP_ACKS */ 24925 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 24926 dest->r_use_cmp_ack = src->r_use_cmp_ack; 24927 cnt++; 24928 } 24929 24930 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 24931 dest->r_mbuf_queue = src->r_mbuf_queue; 24932 cnt++; 24933 } 24934 /* TCP_RACK_MBUF_QUEUE */ 24935 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 24936 dest->r_mbuf_queue = src->r_mbuf_queue; 24937 cnt++; 24938 } 24939 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 24940 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24941 } else { 24942 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24943 } 24944 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 24945 tp->t_flags2 |= TF2_MBUF_ACKCMP; 24946 } 24947 /* TCP_RACK_NONRXT_CFG_RATE */ 24948 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 24949 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 24950 cnt++; 24951 } 24952 /* TCP_NO_PRR */ 24953 if (dest->rack_no_prr != src->rack_no_prr) { 24954 dest->rack_no_prr = src->rack_no_prr; 24955 cnt++; 24956 } 24957 if (dest->no_prr_addback != src->no_prr_addback) { 24958 dest->no_prr_addback = src->no_prr_addback; 24959 cnt++; 24960 } 24961 /* RACK_CSPR_IS_FCC */ 24962 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 24963 dest->cspr_is_fcc = src->cspr_is_fcc; 24964 cnt++; 24965 } 24966 /* TCP_TIMELY_DYN_ADJ */ 24967 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 24968 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 24969 cnt++; 24970 } 24971 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 24972 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 24973 cnt++; 24974 } 24975 /* TCP_RACK_TLP_USE */ 24976 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 24977 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 24978 cnt++; 24979 } 24980 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 24981 /* TCP_BBR_RACK_INIT_RATE */ 24982 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 24983 dest->r_ctl.init_rate = src->r_ctl.init_rate; 24984 cnt++; 24985 } 24986 /* TCP_RACK_FORCE_MSEG */ 24987 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 24988 dest->rc_force_max_seg = src->rc_force_max_seg; 24989 cnt++; 24990 } 24991 /* TCP_RACK_PACE_MIN_SEG */ 24992 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 24993 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 24994 cnt++; 24995 } 24996 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 24997 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 24998 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 24999 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 25000 cnt++; 25001 } 25002 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 25003 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 25004 cnt++; 25005 } 25006 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 25007 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 25008 cnt++; 25009 } 25010 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 25011 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 25012 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 25013 cnt++; 25014 } 25015 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 25016 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 25017 cnt++; 25018 } 25019 25020 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 25021 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 25022 cnt++; 25023 } 25024 /* TCP_RACK_RR_CONF */ 25025 if (dest->r_rr_config != src->r_rr_config) { 25026 dest->r_rr_config = src->r_rr_config; 25027 cnt++; 25028 } 25029 /* TCP_PACING_DND */ 25030 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 25031 dest->rc_pace_dnd = src->rc_pace_dnd; 25032 cnt++; 25033 } 25034 /* TCP_HDWR_RATE_CAP */ 25035 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 25036 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 25037 cnt++; 25038 } 25039 /* TCP_DGP_UPPER_BOUNDS */ 25040 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 25041 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 25042 cnt++; 25043 } 25044 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 25045 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 25046 cnt++; 25047 } 25048 /* TCP_SS_EEXIT */ 25049 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 25050 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 25051 cnt++; 25052 } 25053 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 25054 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 25055 cnt++; 25056 } 25057 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 25058 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 25059 cnt++; 25060 } 25061 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 25062 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 25063 cnt++; 25064 } 25065 /* TCP_BBR_HDWR_PACE */ 25066 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 25067 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 25068 cnt++; 25069 } 25070 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 25071 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 25072 cnt++; 25073 } 25074 /* TCP_RACK_PRR_SENDALOT */ 25075 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 25076 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 25077 cnt++; 25078 } 25079 /* TCP_RACK_MIN_TO */ 25080 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 25081 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 25082 cnt++; 25083 } 25084 /* TCP_RACK_EARLY_SEG */ 25085 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 25086 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 25087 cnt++; 25088 } 25089 /* TCP_RACK_ENABLE_HYSTART */ 25090 if (par->t_ccv.flags != tp->t_ccv.flags) { 25091 cnt++; 25092 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 25093 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 25094 if (rack_do_hystart > RACK_HYSTART_ON) 25095 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 25096 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 25097 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 25098 } else { 25099 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 25100 } 25101 } 25102 /* TCP_RACK_REORD_THRESH */ 25103 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 25104 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 25105 cnt++; 25106 } 25107 /* TCP_RACK_REORD_FADE */ 25108 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 25109 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 25110 cnt++; 25111 } 25112 /* TCP_RACK_TLP_THRESH */ 25113 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 25114 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 25115 cnt++; 25116 } 25117 /* TCP_BBR_USE_RACK_RR */ 25118 if (dest->use_rack_rr != src->use_rack_rr) { 25119 dest->use_rack_rr = src->use_rack_rr; 25120 cnt++; 25121 } 25122 /* TCP_RACK_PKT_DELAY */ 25123 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 25124 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 25125 cnt++; 25126 } 25127 /* TCP_DELACK will get copied via the main code if applicable */ 25128 /* TCP_BBR_RACK_RTT_USE */ 25129 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 25130 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 25131 cnt++; 25132 } 25133 /* TCP_HONOR_HPTS_MIN */ 25134 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 25135 dest->r_use_hpts_min = src->r_use_hpts_min; 25136 cnt++; 25137 } 25138 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 25139 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 25140 cnt++; 25141 } 25142 /* TCP_REC_IS_DYN */ 25143 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 25144 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 25145 cnt++; 25146 } 25147 if (dest->rc_skip_timely != src->rc_skip_timely) { 25148 dest->rc_skip_timely = src->rc_skip_timely; 25149 cnt++; 25150 } 25151 /* TCP_DATA_AFTER_CLOSE */ 25152 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 25153 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 25154 cnt++; 25155 } 25156 /* TCP_GP_USE_LTBW */ 25157 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 25158 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 25159 cnt++; 25160 } 25161 if (dest->dis_lt_bw != src->dis_lt_bw) { 25162 dest->dis_lt_bw = src->dis_lt_bw; 25163 cnt++; 25164 } 25165 tcp_log_socket_option(tp, 0, cnt, 0); 25166 } 25167 25168 25169 static void 25170 rack_apply_deferred_options(struct tcp_rack *rack) 25171 { 25172 struct deferred_opt_list *dol, *sdol; 25173 uint32_t s_optval; 25174 25175 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 25176 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 25177 /* Disadvantage of deferal is you loose the error return */ 25178 s_optval = (uint32_t)dol->optval; 25179 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 25180 free(dol, M_TCPDO); 25181 } 25182 } 25183 25184 static void 25185 rack_hw_tls_change(struct tcpcb *tp, int chg) 25186 { 25187 /* Update HW tls state */ 25188 struct tcp_rack *rack; 25189 25190 rack = (struct tcp_rack *)tp->t_fb_ptr; 25191 if (chg) 25192 rack->r_ctl.fsb.hw_tls = 1; 25193 else 25194 rack->r_ctl.fsb.hw_tls = 0; 25195 } 25196 25197 static int 25198 rack_pru_options(struct tcpcb *tp, int flags) 25199 { 25200 if (flags & PRUS_OOB) 25201 return (EOPNOTSUPP); 25202 return (0); 25203 } 25204 25205 static bool 25206 rack_wake_check(struct tcpcb *tp) 25207 { 25208 struct tcp_rack *rack; 25209 struct timeval tv; 25210 uint32_t cts; 25211 25212 rack = (struct tcp_rack *)tp->t_fb_ptr; 25213 if (rack->r_ctl.rc_hpts_flags) { 25214 cts = tcp_get_usecs(&tv); 25215 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 25216 /* 25217 * Pacing timer is up, check if we are ready. 25218 */ 25219 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 25220 return (true); 25221 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 25222 /* 25223 * A timer is up, check if we are ready. 25224 */ 25225 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 25226 return (true); 25227 } 25228 } 25229 return (false); 25230 } 25231 25232 static struct tcp_function_block __tcp_rack = { 25233 .tfb_tcp_block_name = __XSTRING(STACKNAME), 25234 .tfb_tcp_output = rack_output, 25235 .tfb_do_queued_segments = ctf_do_queued_segments, 25236 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 25237 .tfb_tcp_do_segment = rack_do_segment, 25238 .tfb_tcp_ctloutput = rack_ctloutput, 25239 .tfb_tcp_fb_init = rack_init, 25240 .tfb_tcp_fb_fini = rack_fini, 25241 .tfb_tcp_timer_stop_all = rack_stopall, 25242 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 25243 .tfb_tcp_handoff_ok = rack_handoff_ok, 25244 .tfb_tcp_mtu_chg = rack_mtu_change, 25245 .tfb_pru_options = rack_pru_options, 25246 .tfb_hwtls_change = rack_hw_tls_change, 25247 .tfb_chg_query = rack_chg_query, 25248 .tfb_switch_failed = rack_switch_failed, 25249 .tfb_early_wake_check = rack_wake_check, 25250 .tfb_compute_pipe = rack_compute_pipe, 25251 .tfb_stack_info = rack_stack_information, 25252 .tfb_inherit = rack_inherit, 25253 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 25254 25255 }; 25256 25257 /* 25258 * rack_ctloutput() must drop the inpcb lock before performing copyin on 25259 * socket option arguments. When it re-acquires the lock after the copy, it 25260 * has to revalidate that the connection is still valid for the socket 25261 * option. 25262 */ 25263 static int 25264 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 25265 { 25266 struct inpcb *inp = tptoinpcb(tp); 25267 #ifdef INET 25268 struct ip *ip; 25269 #endif 25270 struct tcp_rack *rack; 25271 struct tcp_hybrid_req hybrid; 25272 uint64_t loptval; 25273 int32_t error = 0, optval; 25274 25275 rack = (struct tcp_rack *)tp->t_fb_ptr; 25276 if (rack == NULL) { 25277 INP_WUNLOCK(inp); 25278 return (EINVAL); 25279 } 25280 #ifdef INET 25281 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 25282 #endif 25283 25284 switch (sopt->sopt_level) { 25285 #ifdef INET6 25286 case IPPROTO_IPV6: 25287 MPASS(inp->inp_vflag & INP_IPV6PROTO); 25288 switch (sopt->sopt_name) { 25289 case IPV6_USE_MIN_MTU: 25290 tcp6_use_min_mtu(tp); 25291 break; 25292 } 25293 INP_WUNLOCK(inp); 25294 return (0); 25295 #endif 25296 #ifdef INET 25297 case IPPROTO_IP: 25298 switch (sopt->sopt_name) { 25299 case IP_TOS: 25300 /* 25301 * The DSCP codepoint has changed, update the fsb. 25302 */ 25303 ip->ip_tos = rack->rc_inp->inp_ip_tos; 25304 break; 25305 case IP_TTL: 25306 /* 25307 * The TTL has changed, update the fsb. 25308 */ 25309 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 25310 break; 25311 } 25312 INP_WUNLOCK(inp); 25313 return (0); 25314 #endif 25315 #ifdef SO_PEERPRIO 25316 case SOL_SOCKET: 25317 switch (sopt->sopt_name) { 25318 case SO_PEERPRIO: /* SC-URL:bs */ 25319 /* Already read in and sanity checked in sosetopt(). */ 25320 if (inp->inp_socket) { 25321 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 25322 } 25323 break; 25324 } 25325 INP_WUNLOCK(inp); 25326 return (0); 25327 #endif 25328 case IPPROTO_TCP: 25329 switch (sopt->sopt_name) { 25330 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 25331 /* Pacing related ones */ 25332 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 25333 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 25334 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 25335 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 25336 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 25337 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 25338 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 25339 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 25340 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 25341 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 25342 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 25343 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 25344 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 25345 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 25346 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 25347 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 25348 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 25349 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 25350 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 25351 /* End pacing related */ 25352 case TCP_POLICER_DETECT: /* URL:pol_det */ 25353 case TCP_POLICER_MSS: /* URL:pol_mss */ 25354 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 25355 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 25356 case TCP_RACK_MIN_TO: /* URL:min_to */ 25357 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 25358 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 25359 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 25360 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 25361 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 25362 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 25363 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 25364 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 25365 case TCP_NO_PRR: /* URL:noprr */ 25366 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 25367 case TCP_DATA_AFTER_CLOSE: /* no URL */ 25368 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 25369 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 25370 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 25371 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 25372 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 25373 case TCP_RACK_PROFILE: /* URL:profile */ 25374 case TCP_SIDECHAN_DIS: /* URL:scodm */ 25375 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 25376 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 25377 case TCP_RACK_ABC_VAL: /* URL:labc */ 25378 case TCP_REC_ABC_VAL: /* URL:reclabc */ 25379 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 25380 case TCP_DEFER_OPTIONS: /* URL:defer */ 25381 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 25382 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 25383 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 25384 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 25385 case TCP_RACK_HI_BETA: /* URL:hibeta */ 25386 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 25387 case TCP_SS_EEXIT: /* URL:eexit */ 25388 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 25389 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 25390 case TCP_PACING_DND: /* URL:dnd */ 25391 case TCP_NO_TIMELY: /* URL:notimely */ 25392 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 25393 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 25394 case TCP_REC_IS_DYN: /* URL:dynrec */ 25395 case TCP_GP_USE_LTBW: /* URL:useltbw */ 25396 goto process_opt; 25397 break; 25398 default: 25399 /* Filter off all unknown options to the base stack */ 25400 return (tcp_default_ctloutput(tp, sopt)); 25401 break; 25402 } 25403 default: 25404 INP_WUNLOCK(inp); 25405 return (0); 25406 } 25407 process_opt: 25408 INP_WUNLOCK(inp); 25409 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 25410 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 25411 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 25412 /* 25413 * We truncate it down to 32 bits for the socket-option trace this 25414 * means rates > 34Gbps won't show right, but thats probably ok. 25415 */ 25416 optval = (uint32_t)loptval; 25417 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 25418 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 25419 } else { 25420 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 25421 /* Save it in 64 bit form too */ 25422 loptval = optval; 25423 } 25424 if (error) 25425 return (error); 25426 INP_WLOCK(inp); 25427 if (tp->t_fb != &__tcp_rack) { 25428 INP_WUNLOCK(inp); 25429 return (ENOPROTOOPT); 25430 } 25431 if (rack->defer_options && (rack->gp_ready == 0) && 25432 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 25433 (sopt->sopt_name != TCP_HYBRID_PACING) && 25434 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 25435 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 25436 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 25437 /* Options are being deferred */ 25438 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 25439 INP_WUNLOCK(inp); 25440 return (0); 25441 } else { 25442 /* No memory to defer, fail */ 25443 INP_WUNLOCK(inp); 25444 return (ENOMEM); 25445 } 25446 } 25447 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 25448 INP_WUNLOCK(inp); 25449 return (error); 25450 } 25451 25452 static void 25453 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 25454 { 25455 25456 INP_WLOCK_ASSERT(tptoinpcb(tp)); 25457 bzero(ti, sizeof(*ti)); 25458 25459 ti->tcpi_state = tp->t_state; 25460 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 25461 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 25462 if (tp->t_flags & TF_SACK_PERMIT) 25463 ti->tcpi_options |= TCPI_OPT_SACK; 25464 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 25465 ti->tcpi_options |= TCPI_OPT_WSCALE; 25466 ti->tcpi_snd_wscale = tp->snd_scale; 25467 ti->tcpi_rcv_wscale = tp->rcv_scale; 25468 } 25469 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 25470 ti->tcpi_options |= TCPI_OPT_ECN; 25471 if (tp->t_flags & TF_FASTOPEN) 25472 ti->tcpi_options |= TCPI_OPT_TFO; 25473 /* still kept in ticks is t_rcvtime */ 25474 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 25475 /* Since we hold everything in precise useconds this is easy */ 25476 ti->tcpi_rtt = tp->t_srtt; 25477 ti->tcpi_rttvar = tp->t_rttvar; 25478 ti->tcpi_rto = tp->t_rxtcur; 25479 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 25480 ti->tcpi_snd_cwnd = tp->snd_cwnd; 25481 /* 25482 * FreeBSD-specific extension fields for tcp_info. 25483 */ 25484 ti->tcpi_rcv_space = tp->rcv_wnd; 25485 ti->tcpi_rcv_nxt = tp->rcv_nxt; 25486 ti->tcpi_snd_wnd = tp->snd_wnd; 25487 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 25488 ti->tcpi_snd_nxt = tp->snd_nxt; 25489 ti->tcpi_snd_mss = tp->t_maxseg; 25490 ti->tcpi_rcv_mss = tp->t_maxseg; 25491 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 25492 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 25493 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 25494 ti->tcpi_total_tlp = tp->t_sndtlppack; 25495 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 25496 ti->tcpi_rttmin = tp->t_rttlow; 25497 #ifdef NETFLIX_STATS 25498 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 25499 #endif 25500 #ifdef TCP_OFFLOAD 25501 if (tp->t_flags & TF_TOE) { 25502 ti->tcpi_options |= TCPI_OPT_TOE; 25503 tcp_offload_tcp_info(tp, ti); 25504 } 25505 #endif 25506 } 25507 25508 static int 25509 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 25510 { 25511 struct inpcb *inp = tptoinpcb(tp); 25512 struct tcp_rack *rack; 25513 int32_t error, optval; 25514 uint64_t val, loptval; 25515 struct tcp_info ti; 25516 /* 25517 * Because all our options are either boolean or an int, we can just 25518 * pull everything into optval and then unlock and copy. If we ever 25519 * add a option that is not a int, then this will have quite an 25520 * impact to this routine. 25521 */ 25522 error = 0; 25523 rack = (struct tcp_rack *)tp->t_fb_ptr; 25524 if (rack == NULL) { 25525 INP_WUNLOCK(inp); 25526 return (EINVAL); 25527 } 25528 switch (sopt->sopt_name) { 25529 case TCP_INFO: 25530 /* First get the info filled */ 25531 rack_fill_info(tp, &ti); 25532 /* Fix up the rtt related fields if needed */ 25533 INP_WUNLOCK(inp); 25534 error = sooptcopyout(sopt, &ti, sizeof ti); 25535 return (error); 25536 /* 25537 * Beta is the congestion control value for NewReno that influences how 25538 * much of a backoff happens when loss is detected. It is normally set 25539 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 25540 * when you exit recovery. 25541 */ 25542 case TCP_RACK_PACING_BETA: 25543 break; 25544 /* 25545 * Beta_ecn is the congestion control value for NewReno that influences how 25546 * much of a backoff happens when a ECN mark is detected. It is normally set 25547 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 25548 * you exit recovery. Note that classic ECN has a beta of 50, it is only 25549 * ABE Ecn that uses this "less" value, but we do too with pacing :) 25550 */ 25551 25552 case TCP_RACK_PACING_BETA_ECN: 25553 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 25554 error = EINVAL; 25555 else if (rack->rc_pacing_cc_set == 0) 25556 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 25557 else { 25558 /* 25559 * Reach out into the CC data and report back what 25560 * I have previously set. Yeah it looks hackish but 25561 * we don't want to report the saved values. 25562 */ 25563 if (tp->t_ccv.cc_data) 25564 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 25565 else 25566 error = EINVAL; 25567 } 25568 break; 25569 case TCP_RACK_DSACK_OPT: 25570 optval = 0; 25571 if (rack->rc_rack_tmr_std_based) { 25572 optval |= 1; 25573 } 25574 if (rack->rc_rack_use_dsack) { 25575 optval |= 2; 25576 } 25577 break; 25578 case TCP_RACK_ENABLE_HYSTART: 25579 { 25580 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 25581 optval = RACK_HYSTART_ON; 25582 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 25583 optval = RACK_HYSTART_ON_W_SC; 25584 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 25585 optval = RACK_HYSTART_ON_W_SC_C; 25586 } else { 25587 optval = RACK_HYSTART_OFF; 25588 } 25589 } 25590 break; 25591 case TCP_RACK_DGP_IN_REC: 25592 error = EINVAL; 25593 break; 25594 case TCP_RACK_HI_BETA: 25595 optval = rack->rack_hibeta; 25596 break; 25597 case TCP_POLICER_MSS: 25598 optval = rack->r_ctl.policer_del_mss; 25599 break; 25600 case TCP_POLICER_DETECT: 25601 optval = rack->r_ctl.saved_policer_val; 25602 break; 25603 case TCP_DEFER_OPTIONS: 25604 optval = rack->defer_options; 25605 break; 25606 case TCP_RACK_MEASURE_CNT: 25607 optval = rack->r_ctl.req_measurements; 25608 break; 25609 case TCP_REC_ABC_VAL: 25610 optval = rack->r_use_labc_for_rec; 25611 break; 25612 case TCP_RACK_ABC_VAL: 25613 optval = rack->rc_labc; 25614 break; 25615 case TCP_HDWR_UP_ONLY: 25616 optval= rack->r_up_only; 25617 break; 25618 case TCP_FILLCW_RATE_CAP: 25619 loptval = rack->r_ctl.fillcw_cap; 25620 break; 25621 case TCP_PACING_RATE_CAP: 25622 loptval = rack->r_ctl.bw_rate_cap; 25623 break; 25624 case TCP_RACK_PROFILE: 25625 /* You cannot retrieve a profile, its write only */ 25626 error = EINVAL; 25627 break; 25628 case TCP_SIDECHAN_DIS: 25629 optval = rack->r_ctl.side_chan_dis_mask; 25630 break; 25631 case TCP_HYBRID_PACING: 25632 /* You cannot retrieve hybrid pacing information, its write only */ 25633 error = EINVAL; 25634 break; 25635 case TCP_USE_CMP_ACKS: 25636 optval = rack->r_use_cmp_ack; 25637 break; 25638 case TCP_RACK_PACE_TO_FILL: 25639 optval = rack->rc_pace_to_cwnd; 25640 break; 25641 case TCP_RACK_NO_PUSH_AT_MAX: 25642 optval = rack->r_ctl.rc_no_push_at_mrtt; 25643 break; 25644 case TCP_SHARED_CWND_ENABLE: 25645 optval = rack->rack_enable_scwnd; 25646 break; 25647 case TCP_RACK_NONRXT_CFG_RATE: 25648 optval = rack->rack_rec_nonrxt_use_cr; 25649 break; 25650 case TCP_NO_PRR: 25651 if (rack->rack_no_prr == 1) 25652 optval = 1; 25653 else if (rack->no_prr_addback == 1) 25654 optval = 2; 25655 else 25656 optval = 0; 25657 break; 25658 case TCP_GP_USE_LTBW: 25659 if (rack->dis_lt_bw) { 25660 /* It is not used */ 25661 optval = 0; 25662 } else if (rack->use_lesser_lt_bw) { 25663 /* we use min() */ 25664 optval = 1; 25665 } else { 25666 /* we use max() */ 25667 optval = 2; 25668 } 25669 break; 25670 case TCP_RACK_DO_DETECTION: 25671 error = EINVAL; 25672 break; 25673 case TCP_RACK_MBUF_QUEUE: 25674 /* Now do we use the LRO mbuf-queue feature */ 25675 optval = rack->r_mbuf_queue; 25676 break; 25677 case RACK_CSPR_IS_FCC: 25678 optval = rack->cspr_is_fcc; 25679 break; 25680 case TCP_TIMELY_DYN_ADJ: 25681 optval = rack->rc_gp_dyn_mul; 25682 break; 25683 case TCP_BBR_IWINTSO: 25684 error = EINVAL; 25685 break; 25686 case TCP_RACK_TLP_REDUCE: 25687 /* RACK TLP cwnd reduction (bool) */ 25688 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 25689 break; 25690 case TCP_BBR_RACK_INIT_RATE: 25691 val = rack->r_ctl.init_rate; 25692 /* convert to kbits per sec */ 25693 val *= 8; 25694 val /= 1000; 25695 optval = (uint32_t)val; 25696 break; 25697 case TCP_RACK_FORCE_MSEG: 25698 optval = rack->rc_force_max_seg; 25699 break; 25700 case TCP_RACK_PACE_MIN_SEG: 25701 optval = rack->r_ctl.rc_user_set_min_segs; 25702 break; 25703 case TCP_RACK_PACE_MAX_SEG: 25704 /* Max segments in a pace */ 25705 optval = rack->rc_user_set_max_segs; 25706 break; 25707 case TCP_RACK_PACE_ALWAYS: 25708 /* Use the always pace method */ 25709 optval = rack->rc_always_pace; 25710 break; 25711 case TCP_RACK_PRR_SENDALOT: 25712 /* Allow PRR to send more than one seg */ 25713 optval = rack->r_ctl.rc_prr_sendalot; 25714 break; 25715 case TCP_RACK_MIN_TO: 25716 /* Minimum time between rack t-o's in ms */ 25717 optval = rack->r_ctl.rc_min_to; 25718 break; 25719 case TCP_RACK_SPLIT_LIMIT: 25720 optval = rack->r_ctl.rc_split_limit; 25721 break; 25722 case TCP_RACK_EARLY_SEG: 25723 /* If early recovery max segments */ 25724 optval = rack->r_ctl.rc_early_recovery_segs; 25725 break; 25726 case TCP_RACK_REORD_THRESH: 25727 /* RACK reorder threshold (shift amount) */ 25728 optval = rack->r_ctl.rc_reorder_shift; 25729 break; 25730 case TCP_SS_EEXIT: 25731 if (rack->r_ctl.gp_rnd_thresh) { 25732 uint32_t v; 25733 25734 v = rack->r_ctl.gp_gain_req; 25735 v <<= 17; 25736 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 25737 if (rack->r_ctl.gate_to_fs == 1) 25738 optval |= 0x10000; 25739 } else 25740 optval = 0; 25741 break; 25742 case TCP_RACK_REORD_FADE: 25743 /* Does reordering fade after ms time */ 25744 optval = rack->r_ctl.rc_reorder_fade; 25745 break; 25746 case TCP_BBR_USE_RACK_RR: 25747 /* Do we use the rack cheat for rxt */ 25748 optval = rack->use_rack_rr; 25749 break; 25750 case TCP_RACK_RR_CONF: 25751 optval = rack->r_rr_config; 25752 break; 25753 case TCP_HDWR_RATE_CAP: 25754 optval = rack->r_rack_hw_rate_caps; 25755 break; 25756 case TCP_BBR_HDWR_PACE: 25757 optval = rack->rack_hdw_pace_ena; 25758 break; 25759 case TCP_RACK_TLP_THRESH: 25760 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 25761 optval = rack->r_ctl.rc_tlp_threshold; 25762 break; 25763 case TCP_RACK_PKT_DELAY: 25764 /* RACK added ms i.e. rack-rtt + reord + N */ 25765 optval = rack->r_ctl.rc_pkt_delay; 25766 break; 25767 case TCP_RACK_TLP_USE: 25768 optval = rack->rack_tlp_threshold_use; 25769 break; 25770 case TCP_PACING_DND: 25771 optval = rack->rc_pace_dnd; 25772 break; 25773 case TCP_RACK_PACE_RATE_CA: 25774 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 25775 break; 25776 case TCP_RACK_PACE_RATE_SS: 25777 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 25778 break; 25779 case TCP_RACK_PACE_RATE_REC: 25780 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 25781 break; 25782 case TCP_DGP_UPPER_BOUNDS: 25783 optval = rack->r_ctl.rack_per_upper_bound_ss; 25784 optval <<= 16; 25785 optval |= rack->r_ctl.rack_per_upper_bound_ca; 25786 break; 25787 case TCP_RACK_GP_INCREASE_SS: 25788 optval = rack->r_ctl.rack_per_of_gp_ca; 25789 break; 25790 case TCP_RACK_GP_INCREASE_CA: 25791 optval = rack->r_ctl.rack_per_of_gp_ss; 25792 break; 25793 case TCP_RACK_PACING_DIVISOR: 25794 optval = rack->r_ctl.pace_len_divisor; 25795 break; 25796 case TCP_BBR_RACK_RTT_USE: 25797 optval = rack->r_ctl.rc_rate_sample_method; 25798 break; 25799 case TCP_DELACK: 25800 optval = tp->t_delayed_ack; 25801 break; 25802 case TCP_DATA_AFTER_CLOSE: 25803 optval = rack->rc_allow_data_af_clo; 25804 break; 25805 case TCP_SHARED_CWND_TIME_LIMIT: 25806 optval = rack->r_limit_scw; 25807 break; 25808 case TCP_HONOR_HPTS_MIN: 25809 if (rack->r_use_hpts_min) 25810 optval = rack->r_ctl.max_reduction; 25811 else 25812 optval = 0; 25813 break; 25814 case TCP_REC_IS_DYN: 25815 optval = rack->rc_gp_no_rec_chg; 25816 break; 25817 case TCP_NO_TIMELY: 25818 optval = rack->rc_skip_timely; 25819 break; 25820 case TCP_RACK_TIMER_SLOP: 25821 optval = rack->r_ctl.timer_slop; 25822 break; 25823 default: 25824 return (tcp_default_ctloutput(tp, sopt)); 25825 break; 25826 } 25827 INP_WUNLOCK(inp); 25828 if (error == 0) { 25829 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 25830 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 25831 error = sooptcopyout(sopt, &loptval, sizeof loptval); 25832 else 25833 error = sooptcopyout(sopt, &optval, sizeof optval); 25834 } 25835 return (error); 25836 } 25837 25838 static int 25839 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 25840 { 25841 if (sopt->sopt_dir == SOPT_SET) { 25842 return (rack_set_sockopt(tp, sopt)); 25843 } else if (sopt->sopt_dir == SOPT_GET) { 25844 return (rack_get_sockopt(tp, sopt)); 25845 } else { 25846 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 25847 } 25848 } 25849 25850 static const char *rack_stack_names[] = { 25851 __XSTRING(STACKNAME), 25852 #ifdef STACKALIAS 25853 __XSTRING(STACKALIAS), 25854 #endif 25855 }; 25856 25857 static int 25858 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 25859 { 25860 memset(mem, 0, size); 25861 return (0); 25862 } 25863 25864 static void 25865 rack_dtor(void *mem, int32_t size, void *arg) 25866 { 25867 25868 } 25869 25870 static bool rack_mod_inited = false; 25871 25872 static int 25873 tcp_addrack(module_t mod, int32_t type, void *data) 25874 { 25875 int32_t err = 0; 25876 int num_stacks; 25877 25878 switch (type) { 25879 case MOD_LOAD: 25880 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 25881 sizeof(struct rack_sendmap), 25882 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 25883 25884 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 25885 sizeof(struct tcp_rack), 25886 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 25887 25888 sysctl_ctx_init(&rack_sysctl_ctx); 25889 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 25890 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 25891 OID_AUTO, 25892 #ifdef STACKALIAS 25893 __XSTRING(STACKALIAS), 25894 #else 25895 __XSTRING(STACKNAME), 25896 #endif 25897 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 25898 ""); 25899 if (rack_sysctl_root == NULL) { 25900 printf("Failed to add sysctl node\n"); 25901 err = EFAULT; 25902 goto free_uma; 25903 } 25904 rack_init_sysctls(); 25905 num_stacks = nitems(rack_stack_names); 25906 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 25907 rack_stack_names, &num_stacks); 25908 if (err) { 25909 printf("Failed to register %s stack name for " 25910 "%s module\n", rack_stack_names[num_stacks], 25911 __XSTRING(MODNAME)); 25912 sysctl_ctx_free(&rack_sysctl_ctx); 25913 free_uma: 25914 uma_zdestroy(rack_zone); 25915 uma_zdestroy(rack_pcb_zone); 25916 rack_counter_destroy(); 25917 printf("Failed to register rack module -- err:%d\n", err); 25918 return (err); 25919 } 25920 tcp_lro_reg_mbufq(); 25921 rack_mod_inited = true; 25922 break; 25923 case MOD_QUIESCE: 25924 err = deregister_tcp_functions(&__tcp_rack, true, false); 25925 break; 25926 case MOD_UNLOAD: 25927 err = deregister_tcp_functions(&__tcp_rack, false, true); 25928 if (err == EBUSY) 25929 break; 25930 if (rack_mod_inited) { 25931 uma_zdestroy(rack_zone); 25932 uma_zdestroy(rack_pcb_zone); 25933 sysctl_ctx_free(&rack_sysctl_ctx); 25934 rack_counter_destroy(); 25935 rack_mod_inited = false; 25936 } 25937 tcp_lro_dereg_mbufq(); 25938 err = 0; 25939 break; 25940 default: 25941 return (EOPNOTSUPP); 25942 } 25943 return (err); 25944 } 25945 25946 static moduledata_t tcp_rack = { 25947 .name = __XSTRING(MODNAME), 25948 .evhand = tcp_addrack, 25949 .priv = 0 25950 }; 25951 25952 MODULE_VERSION(MODNAME, 1); 25953 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 25954 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 25955 25956 #endif /* #if !defined(INET) && !defined(INET6) */ 25957