1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> /* for proc0 declaration */ 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #ifdef STATS 52 #include <sys/qmath.h> 53 #include <sys/tree.h> 54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 55 #else 56 #include <sys/tree.h> 57 #endif 58 #include <sys/refcount.h> 59 #include <sys/queue.h> 60 #include <sys/tim_filter.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 #include <sys/protosw.h> 65 #ifdef TCP_ACCOUNTING 66 #include <sys/sched.h> 67 #include <machine/cpu.h> 68 #endif 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/route/nhop.h> 73 #include <net/vnet.h> 74 75 #define TCPSTATES /* for logging */ 76 77 #include <netinet/in.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/ip.h> 81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 83 #include <netinet/ip_var.h> 84 #include <netinet/ip6.h> 85 #include <netinet6/in6_pcb.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/tcp.h> 88 #define TCPOUTFLAGS 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_log_buf.h> 94 #include <netinet/tcp_syncache.h> 95 #include <netinet/tcp_hpts.h> 96 #include <netinet/tcp_ratelimit.h> 97 #include <netinet/tcp_accounting.h> 98 #include <netinet/tcpip.h> 99 #include <netinet/cc/cc.h> 100 #include <netinet/cc/cc_newreno.h> 101 #include <netinet/tcp_fastopen.h> 102 #include <netinet/tcp_lro.h> 103 #ifdef NETFLIX_SHARED_CWND 104 #include <netinet/tcp_shared_cwnd.h> 105 #endif 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_ecn.h> 113 114 #include <netipsec/ipsec_support.h> 115 116 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 117 #include <netipsec/ipsec.h> 118 #include <netipsec/ipsec6.h> 119 #endif /* IPSEC */ 120 121 #include <netinet/udp.h> 122 #include <netinet/udp_var.h> 123 #include <machine/in_cksum.h> 124 125 #ifdef MAC 126 #include <security/mac/mac_framework.h> 127 #endif 128 #include "sack_filter.h" 129 #include "tcp_rack.h" 130 #include "tailq_hash.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 146 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 150 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 151 152 struct sysctl_ctx_list rack_sysctl_ctx; 153 struct sysctl_oid *rack_sysctl_root; 154 155 #define CUM_ACKED 1 156 #define SACKED 2 157 158 /* 159 * The RACK module incorporates a number of 160 * TCP ideas that have been put out into the IETF 161 * over the last few years: 162 * - Matt Mathis's Rate Halving which slowly drops 163 * the congestion window so that the ack clock can 164 * be maintained during a recovery. 165 * - Yuchung Cheng's RACK TCP (for which its named) that 166 * will stop us using the number of dup acks and instead 167 * use time as the gage of when we retransmit. 168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 169 * of Dukkipati et.al. 170 * RACK depends on SACK, so if an endpoint arrives that 171 * cannot do SACK the state machine below will shuttle the 172 * connection back to using the "default" TCP stack that is 173 * in FreeBSD. 174 * 175 * To implement RACK the original TCP stack was first decomposed 176 * into a functional state machine with individual states 177 * for each of the possible TCP connection states. The do_segment 178 * functions role in life is to mandate the connection supports SACK 179 * initially and then assure that the RACK state matches the conenction 180 * state before calling the states do_segment function. Each 181 * state is simplified due to the fact that the original do_segment 182 * has been decomposed and we *know* what state we are in (no 183 * switches on the state) and all tests for SACK are gone. This 184 * greatly simplifies what each state does. 185 * 186 * TCP output is also over-written with a new version since it 187 * must maintain the new rack scoreboard. 188 * 189 */ 190 static int32_t rack_tlp_thresh = 1; 191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 192 static int32_t rack_tlp_use_greater = 1; 193 static int32_t rack_reorder_thresh = 2; 194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 195 * - 60 seconds */ 196 static uint16_t rack_policer_rxt_thresh= 0; /* 499 = 49.9%, 0 is off */ 197 static uint8_t rack_policer_avg_thresh = 0; /* 3.2 */ 198 static uint8_t rack_policer_med_thresh = 0; /* 1 - 16 */ 199 static uint16_t rack_policer_bucket_reserve = 20; /* How much % is reserved in the bucket */ 200 static uint64_t rack_pol_min_bw = 125000; /* 1mbps in Bytes per sec */ 201 static uint32_t rack_policer_data_thresh = 64000; /* 64,000 bytes must be sent before we engage */ 202 static uint32_t rack_policing_do_bw_comp = 1; 203 static uint32_t rack_pcm_every_n_rounds = 100; 204 static uint32_t rack_pcm_blast = 0; 205 static uint32_t rack_pcm_is_enabled = 1; 206 static uint8_t rack_req_del_mss = 18; /* How many segments need to be sent in a recovery episode to do policer_detection */ 207 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 208 209 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */ 210 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 211 212 213 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 214 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 215 static int32_t rack_rxt_controls = 0; 216 static int32_t rack_fill_cw_state = 0; 217 static uint8_t rack_req_measurements = 1; 218 /* Attack threshold detections */ 219 static uint32_t rack_highest_sack_thresh_seen = 0; 220 static uint32_t rack_highest_move_thresh_seen = 0; 221 static uint32_t rack_merge_out_sacks_on_attack = 0; 222 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 223 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */ 224 static int32_t rack_hw_rate_caps = 0; /* 1; */ 225 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 226 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 227 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 228 static int32_t rack_hw_up_only = 0; 229 static int32_t rack_stats_gets_ms_rtt = 1; 230 static int32_t rack_prr_addbackmax = 2; 231 static int32_t rack_do_hystart = 0; 232 static int32_t rack_apply_rtt_with_reduced_conf = 0; 233 static int32_t rack_hibeta_setting = 0; 234 static int32_t rack_default_pacing_divisor = 250; 235 static uint16_t rack_pacing_min_seg = 0; 236 static int32_t rack_timely_off = 0; 237 238 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 239 static int32_t rack_pkt_delay = 1000; 240 static int32_t rack_send_a_lot_in_prr = 1; 241 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 242 static int32_t rack_verbose_logging = 0; 243 static int32_t rack_ignore_data_after_close = 1; 244 static int32_t rack_enable_shared_cwnd = 1; 245 static int32_t rack_use_cmp_acks = 1; 246 static int32_t rack_use_fsb = 1; 247 static int32_t rack_use_rfo = 1; 248 static int32_t rack_use_rsm_rfo = 1; 249 static int32_t rack_max_abc_post_recovery = 2; 250 static int32_t rack_client_low_buf = 0; 251 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 252 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 253 #ifdef TCP_ACCOUNTING 254 static int32_t rack_tcp_accounting = 0; 255 #endif 256 static int32_t rack_limits_scwnd = 1; 257 static int32_t rack_enable_mqueue_for_nonpaced = 0; 258 static int32_t rack_hybrid_allow_set_maxseg = 0; 259 static int32_t rack_disable_prr = 0; 260 static int32_t use_rack_rr = 1; 261 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 262 static int32_t rack_persist_min = 250000; /* 250usec */ 263 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 264 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 265 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ 266 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 267 static int32_t rack_limit_time_with_srtt = 0; 268 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 269 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 270 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 271 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 272 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 273 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 274 static int32_t rack_full_buffer_discount = 10; 275 /* 276 * Currently regular tcp has a rto_min of 30ms 277 * the backoff goes 12 times so that ends up 278 * being a total of 122.850 seconds before a 279 * connection is killed. 280 */ 281 static uint32_t rack_def_data_window = 20; 282 static uint32_t rack_goal_bdp = 2; 283 static uint32_t rack_min_srtts = 1; 284 static uint32_t rack_min_measure_usec = 0; 285 static int32_t rack_tlp_min = 10000; /* 10ms */ 286 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 287 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 288 static const int32_t rack_free_cache = 2; 289 static int32_t rack_hptsi_segments = 40; 290 static int32_t rack_rate_sample_method = USE_RTT_LOW; 291 static int32_t rack_pace_every_seg = 0; 292 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 293 static int32_t rack_slot_reduction = 4; 294 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 295 static int32_t rack_cwnd_block_ends_measure = 0; 296 static int32_t rack_rwnd_block_ends_measure = 0; 297 static int32_t rack_def_profile = 0; 298 299 static int32_t rack_lower_cwnd_at_tlp = 0; 300 static int32_t rack_always_send_oldest = 0; 301 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 302 303 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 304 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 305 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 306 307 /* Probertt */ 308 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 309 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 310 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 311 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 312 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 313 314 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 315 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 316 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 317 static uint32_t rack_probertt_use_min_rtt_exit = 0; 318 static uint32_t rack_probe_rtt_sets_cwnd = 0; 319 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 320 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 321 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 322 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 323 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 324 static uint32_t rack_probertt_filter_life = 10000000; 325 static uint32_t rack_probertt_lower_within = 10; 326 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 327 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 328 static int32_t rack_probertt_clear_is = 1; 329 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 330 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 331 332 /* Part of pacing */ 333 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 334 335 /* Timely information: 336 * 337 * Here we have various control parameters on how 338 * timely may change the multiplier. rack_gain_p5_ub 339 * is associated with timely but not directly influencing 340 * the rate decision like the other variables. It controls 341 * the way fill-cw interacts with timely and caps how much 342 * timely can boost the fill-cw b/w. 343 * 344 * The other values are various boost/shrink numbers as well 345 * as potential caps when adjustments are made to the timely 346 * gain (returned by rack_get_output_gain(). Remember too that 347 * the gain returned can be overriden by other factors such as 348 * probeRTT as well as fixed-rate-pacing. 349 */ 350 static int32_t rack_gain_p5_ub = 250; 351 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 352 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 353 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 354 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 355 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 356 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 357 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 358 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 359 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 360 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 361 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 362 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 363 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 364 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 365 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 366 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 367 static int32_t rack_use_max_for_nobackoff = 0; 368 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 369 static int32_t rack_timely_no_stopping = 0; 370 static int32_t rack_down_raise_thresh = 100; 371 static int32_t rack_req_segs = 1; 372 static uint64_t rack_bw_rate_cap = 0; 373 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 374 375 376 /* Rack specific counters */ 377 counter_u64_t rack_saw_enobuf; 378 counter_u64_t rack_saw_enobuf_hw; 379 counter_u64_t rack_saw_enetunreach; 380 counter_u64_t rack_persists_sends; 381 counter_u64_t rack_persists_acks; 382 counter_u64_t rack_persists_loss; 383 counter_u64_t rack_persists_lost_ends; 384 counter_u64_t rack_total_bytes; 385 #ifdef INVARIANTS 386 counter_u64_t rack_adjust_map_bw; 387 #endif 388 /* Tail loss probe counters */ 389 counter_u64_t rack_tlp_tot; 390 counter_u64_t rack_tlp_newdata; 391 counter_u64_t rack_tlp_retran; 392 counter_u64_t rack_tlp_retran_bytes; 393 counter_u64_t rack_to_tot; 394 counter_u64_t rack_hot_alloc; 395 counter_u64_t tcp_policer_detected; 396 counter_u64_t rack_to_alloc; 397 counter_u64_t rack_to_alloc_hard; 398 counter_u64_t rack_to_alloc_emerg; 399 counter_u64_t rack_to_alloc_limited; 400 counter_u64_t rack_alloc_limited_conns; 401 counter_u64_t rack_split_limited; 402 counter_u64_t rack_rxt_clamps_cwnd; 403 counter_u64_t rack_rxt_clamps_cwnd_uniq; 404 405 counter_u64_t rack_multi_single_eq; 406 counter_u64_t rack_proc_non_comp_ack; 407 408 counter_u64_t rack_fto_send; 409 counter_u64_t rack_fto_rsm_send; 410 counter_u64_t rack_nfto_resend; 411 counter_u64_t rack_non_fto_send; 412 counter_u64_t rack_extended_rfo; 413 414 counter_u64_t rack_sack_proc_all; 415 counter_u64_t rack_sack_proc_short; 416 counter_u64_t rack_sack_proc_restart; 417 counter_u64_t rack_sack_attacks_detected; 418 counter_u64_t rack_sack_attacks_reversed; 419 counter_u64_t rack_sack_attacks_suspect; 420 counter_u64_t rack_sack_used_next_merge; 421 counter_u64_t rack_sack_splits; 422 counter_u64_t rack_sack_used_prev_merge; 423 counter_u64_t rack_sack_skipped_acked; 424 counter_u64_t rack_ack_total; 425 counter_u64_t rack_express_sack; 426 counter_u64_t rack_sack_total; 427 counter_u64_t rack_move_none; 428 counter_u64_t rack_move_some; 429 430 counter_u64_t rack_input_idle_reduces; 431 counter_u64_t rack_collapsed_win; 432 counter_u64_t rack_collapsed_win_seen; 433 counter_u64_t rack_collapsed_win_rxt; 434 counter_u64_t rack_collapsed_win_rxt_bytes; 435 counter_u64_t rack_try_scwnd; 436 counter_u64_t rack_hw_pace_init_fail; 437 counter_u64_t rack_hw_pace_lost; 438 439 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 440 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 441 442 443 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 444 445 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 446 (tv) = (value) + slop; \ 447 if ((u_long)(tv) < (u_long)(tvmin)) \ 448 (tv) = (tvmin); \ 449 if ((u_long)(tv) > (u_long)(tvmax)) \ 450 (tv) = (tvmax); \ 451 } while (0) 452 453 static void 454 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 455 456 static int 457 rack_process_ack(struct mbuf *m, struct tcphdr *th, 458 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 459 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 460 static int 461 rack_process_data(struct mbuf *m, struct tcphdr *th, 462 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 463 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 464 static void 465 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 466 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 467 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 468 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 469 uint8_t limit_type); 470 static struct rack_sendmap * 471 rack_check_recovery_mode(struct tcpcb *tp, 472 uint32_t tsused); 473 static uint32_t 474 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 475 static void 476 rack_cong_signal(struct tcpcb *tp, 477 uint32_t type, uint32_t ack, int ); 478 static void rack_counter_destroy(void); 479 static int 480 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 481 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 482 static void 483 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 484 static void 485 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 486 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 487 static void rack_dtor(void *mem, int32_t size, void *arg); 488 static void 489 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 490 uint32_t flex1, uint32_t flex2, 491 uint32_t flex3, uint32_t flex4, 492 uint32_t flex5, uint32_t flex6, 493 uint16_t flex7, uint8_t mod); 494 495 static void 496 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 497 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 498 struct rack_sendmap *rsm, uint8_t quality); 499 static struct rack_sendmap * 500 rack_find_high_nonack(struct tcp_rack *rack, 501 struct rack_sendmap *rsm); 502 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 503 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 504 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 505 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 506 static void 507 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 508 tcp_seq th_ack, int line, uint8_t quality); 509 static void 510 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 511 512 static uint32_t 513 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 514 static int32_t rack_handoff_ok(struct tcpcb *tp); 515 static int32_t rack_init(struct tcpcb *tp, void **ptr); 516 static void rack_init_sysctls(void); 517 518 static void 519 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 520 struct tcphdr *th, int entered_rec, int dup_ack_struck, 521 int *dsack_seen, int *sacks_seen); 522 static void 523 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 524 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 525 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 526 527 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 528 529 530 static void 531 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 532 struct rack_sendmap *rsm, uint32_t cts); 533 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 534 static int32_t rack_output(struct tcpcb *tp); 535 536 static uint32_t 537 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 538 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 539 uint32_t cts, uint32_t segsiz); 540 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 541 static void rack_remxt_tmr(struct tcpcb *tp); 542 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 543 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 544 static int32_t rack_stopall(struct tcpcb *tp); 545 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 546 static uint32_t 547 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 548 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 549 static void 550 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 551 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 552 static int 553 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 554 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 555 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 556 static int 557 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 559 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 560 561 static void 562 rack_peg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz); 563 564 static int 565 rack_do_closing(struct mbuf *m, struct tcphdr *th, 566 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 567 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 568 static int 569 rack_do_established(struct mbuf *m, struct tcphdr *th, 570 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 571 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 572 static int 573 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 574 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 575 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 576 static int 577 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 578 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 579 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 580 static int 581 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 582 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 583 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 584 static int 585 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 586 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 587 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 588 static int 589 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 590 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 591 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 592 static int 593 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 594 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 595 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 596 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 597 struct rack_sendmap * 598 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 599 uint32_t tsused); 600 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 601 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 602 static void 603 tcp_rack_partialack(struct tcpcb *tp); 604 static int 605 rack_set_profile(struct tcp_rack *rack, int prof); 606 static void 607 rack_apply_deferred_options(struct tcp_rack *rack); 608 609 int32_t rack_clear_counter=0; 610 611 static uint64_t 612 rack_get_lt_bw(struct tcp_rack *rack) 613 { 614 struct timeval tv; 615 uint64_t tim, bytes; 616 617 tim = rack->r_ctl.lt_bw_time; 618 bytes = rack->r_ctl.lt_bw_bytes; 619 if (rack->lt_bw_up) { 620 /* Include all the current bytes too */ 621 microuptime(&tv); 622 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 623 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 624 } 625 if ((bytes != 0) && (tim != 0)) 626 return ((bytes * (uint64_t)1000000) / tim); 627 else 628 return (0); 629 } 630 631 static void 632 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 633 { 634 struct sockopt sopt; 635 struct cc_newreno_opts opt; 636 struct newreno old; 637 struct tcpcb *tp; 638 int error, failed = 0; 639 640 tp = rack->rc_tp; 641 if (tp->t_cc == NULL) { 642 /* Tcb is leaving */ 643 return; 644 } 645 rack->rc_pacing_cc_set = 1; 646 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 647 /* Not new-reno we can't play games with beta! */ 648 failed = 1; 649 goto out; 650 651 } 652 if (CC_ALGO(tp)->ctl_output == NULL) { 653 /* Huh, not using new-reno so no swaps.? */ 654 failed = 2; 655 goto out; 656 } 657 /* Get the current values out */ 658 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 659 sopt.sopt_dir = SOPT_GET; 660 opt.name = CC_NEWRENO_BETA; 661 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 662 if (error) { 663 failed = 3; 664 goto out; 665 } 666 old.beta = opt.val; 667 opt.name = CC_NEWRENO_BETA_ECN; 668 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 669 if (error) { 670 failed = 4; 671 goto out; 672 } 673 old.beta_ecn = opt.val; 674 675 /* Now lets set in the values we have stored */ 676 sopt.sopt_dir = SOPT_SET; 677 opt.name = CC_NEWRENO_BETA; 678 opt.val = rack->r_ctl.rc_saved_beta.beta; 679 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 680 if (error) { 681 failed = 5; 682 goto out; 683 } 684 opt.name = CC_NEWRENO_BETA_ECN; 685 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 686 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 687 if (error) { 688 failed = 6; 689 goto out; 690 } 691 /* Save off the values for restoral */ 692 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 693 out: 694 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 695 union tcp_log_stackspecific log; 696 struct timeval tv; 697 struct newreno *ptr; 698 699 ptr = ((struct newreno *)tp->t_ccv.cc_data); 700 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 701 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 702 log.u_bbr.flex1 = ptr->beta; 703 log.u_bbr.flex2 = ptr->beta_ecn; 704 log.u_bbr.flex3 = ptr->newreno_flags; 705 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 706 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 707 log.u_bbr.flex6 = failed; 708 log.u_bbr.flex7 = rack->gp_ready; 709 log.u_bbr.flex7 <<= 1; 710 log.u_bbr.flex7 |= rack->use_fixed_rate; 711 log.u_bbr.flex7 <<= 1; 712 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 713 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 714 log.u_bbr.flex8 = flex8; 715 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 716 0, &log, false, NULL, NULL, 0, &tv); 717 } 718 } 719 720 static void 721 rack_set_cc_pacing(struct tcp_rack *rack) 722 { 723 if (rack->rc_pacing_cc_set) 724 return; 725 /* 726 * Use the swap utility placing in 3 for flex8 to id a 727 * set of a new set of values. 728 */ 729 rack->rc_pacing_cc_set = 1; 730 rack_swap_beta_values(rack, 3); 731 } 732 733 static void 734 rack_undo_cc_pacing(struct tcp_rack *rack) 735 { 736 if (rack->rc_pacing_cc_set == 0) 737 return; 738 /* 739 * Use the swap utility placing in 4 for flex8 to id a 740 * restoral of the old values. 741 */ 742 rack->rc_pacing_cc_set = 0; 743 rack_swap_beta_values(rack, 4); 744 } 745 746 static void 747 rack_remove_pacing(struct tcp_rack *rack) 748 { 749 if (rack->rc_pacing_cc_set) 750 rack_undo_cc_pacing(rack); 751 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 752 tcp_decrement_paced_conn(); 753 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 754 tcp_dec_dgp_pacing_cnt(); 755 rack->rc_always_pace = 0; 756 rack->r_ctl.pacing_method = RACK_PACING_NONE; 757 rack->dgp_on = 0; 758 rack->rc_hybrid_mode = 0; 759 rack->use_fixed_rate = 0; 760 } 761 762 static void 763 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 764 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 765 { 766 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 767 union tcp_log_stackspecific log; 768 struct timeval tv; 769 770 memset(&log, 0, sizeof(log)); 771 log.u_bbr.flex1 = seq_end; 772 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 773 log.u_bbr.flex3 = ack_end_t; 774 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 775 log.u_bbr.flex5 = send_end_t; 776 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 777 log.u_bbr.flex7 = mode; 778 log.u_bbr.flex8 = 69; 779 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 780 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 781 log.u_bbr.pkts_out = line; 782 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 783 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 784 log.u_bbr.epoch = rack->r_ctl.current_round; 785 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 786 if (rsm != NULL) { 787 log.u_bbr.applimited = rsm->r_start; 788 log.u_bbr.delivered = rsm->r_end; 789 log.u_bbr.epoch = rsm->r_flags; 790 } 791 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 792 TCP_LOG_EVENTP(rack->rc_tp, NULL, 793 &rack->rc_inp->inp_socket->so_rcv, 794 &rack->rc_inp->inp_socket->so_snd, 795 BBR_LOG_HPTSI_CALC, 0, 796 0, &log, false, &tv); 797 } 798 } 799 800 static int 801 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 802 { 803 uint32_t stat; 804 int32_t error; 805 806 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 807 if (error || req->newptr == NULL) 808 return error; 809 810 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 811 if (error) 812 return (error); 813 if (stat == 1) { 814 #ifdef INVARIANTS 815 printf("Clearing RACK counters\n"); 816 #endif 817 counter_u64_zero(rack_tlp_tot); 818 counter_u64_zero(rack_tlp_newdata); 819 counter_u64_zero(rack_tlp_retran); 820 counter_u64_zero(rack_tlp_retran_bytes); 821 counter_u64_zero(rack_to_tot); 822 counter_u64_zero(rack_saw_enobuf); 823 counter_u64_zero(rack_saw_enobuf_hw); 824 counter_u64_zero(rack_saw_enetunreach); 825 counter_u64_zero(rack_persists_sends); 826 counter_u64_zero(rack_total_bytes); 827 counter_u64_zero(rack_persists_acks); 828 counter_u64_zero(rack_persists_loss); 829 counter_u64_zero(rack_persists_lost_ends); 830 #ifdef INVARIANTS 831 counter_u64_zero(rack_adjust_map_bw); 832 #endif 833 counter_u64_zero(rack_to_alloc_hard); 834 counter_u64_zero(rack_to_alloc_emerg); 835 counter_u64_zero(rack_sack_proc_all); 836 counter_u64_zero(rack_fto_send); 837 counter_u64_zero(rack_fto_rsm_send); 838 counter_u64_zero(rack_extended_rfo); 839 counter_u64_zero(rack_hw_pace_init_fail); 840 counter_u64_zero(rack_hw_pace_lost); 841 counter_u64_zero(rack_non_fto_send); 842 counter_u64_zero(rack_nfto_resend); 843 counter_u64_zero(rack_sack_proc_short); 844 counter_u64_zero(rack_sack_proc_restart); 845 counter_u64_zero(rack_to_alloc); 846 counter_u64_zero(rack_to_alloc_limited); 847 counter_u64_zero(rack_alloc_limited_conns); 848 counter_u64_zero(rack_split_limited); 849 counter_u64_zero(rack_rxt_clamps_cwnd); 850 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 851 counter_u64_zero(rack_multi_single_eq); 852 counter_u64_zero(rack_proc_non_comp_ack); 853 counter_u64_zero(rack_sack_attacks_detected); 854 counter_u64_zero(rack_sack_attacks_reversed); 855 counter_u64_zero(rack_sack_attacks_suspect); 856 counter_u64_zero(rack_sack_used_next_merge); 857 counter_u64_zero(rack_sack_used_prev_merge); 858 counter_u64_zero(rack_sack_splits); 859 counter_u64_zero(rack_sack_skipped_acked); 860 counter_u64_zero(rack_ack_total); 861 counter_u64_zero(rack_express_sack); 862 counter_u64_zero(rack_sack_total); 863 counter_u64_zero(rack_move_none); 864 counter_u64_zero(rack_move_some); 865 counter_u64_zero(rack_try_scwnd); 866 counter_u64_zero(rack_collapsed_win); 867 counter_u64_zero(rack_collapsed_win_rxt); 868 counter_u64_zero(rack_collapsed_win_seen); 869 counter_u64_zero(rack_collapsed_win_rxt_bytes); 870 } else if (stat == 2) { 871 #ifdef INVARIANTS 872 printf("Clearing RACK option array\n"); 873 #endif 874 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 875 } else if (stat == 3) { 876 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 877 } else if (stat == 4) { 878 #ifdef INVARIANTS 879 printf("Clearing RACK out size array\n"); 880 #endif 881 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 882 } 883 rack_clear_counter = 0; 884 return (0); 885 } 886 887 static void 888 rack_init_sysctls(void) 889 { 890 struct sysctl_oid *rack_counters; 891 struct sysctl_oid *rack_attack; 892 struct sysctl_oid *rack_pacing; 893 struct sysctl_oid *rack_timely; 894 struct sysctl_oid *rack_timers; 895 struct sysctl_oid *rack_tlp; 896 struct sysctl_oid *rack_misc; 897 struct sysctl_oid *rack_features; 898 struct sysctl_oid *rack_measure; 899 struct sysctl_oid *rack_probertt; 900 struct sysctl_oid *rack_hw_pacing; 901 struct sysctl_oid *rack_policing; 902 903 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 904 SYSCTL_CHILDREN(rack_sysctl_root), 905 OID_AUTO, 906 "sack_attack", 907 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 908 "Rack Sack Attack Counters and Controls"); 909 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 910 SYSCTL_CHILDREN(rack_sysctl_root), 911 OID_AUTO, 912 "stats", 913 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 914 "Rack Counters"); 915 SYSCTL_ADD_S32(&rack_sysctl_ctx, 916 SYSCTL_CHILDREN(rack_sysctl_root), 917 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 918 &rack_rate_sample_method , USE_RTT_LOW, 919 "What method should we use for rate sampling 0=high, 1=low "); 920 /* Probe rtt related controls */ 921 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 922 SYSCTL_CHILDREN(rack_sysctl_root), 923 OID_AUTO, 924 "probertt", 925 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 926 "ProbeRTT related Controls"); 927 SYSCTL_ADD_U16(&rack_sysctl_ctx, 928 SYSCTL_CHILDREN(rack_probertt), 929 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 930 &rack_atexit_prtt_hbp, 130, 931 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 932 SYSCTL_ADD_U16(&rack_sysctl_ctx, 933 SYSCTL_CHILDREN(rack_probertt), 934 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 935 &rack_atexit_prtt, 130, 936 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 937 SYSCTL_ADD_U16(&rack_sysctl_ctx, 938 SYSCTL_CHILDREN(rack_probertt), 939 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 940 &rack_per_of_gp_probertt, 60, 941 "What percentage of goodput do we pace at in probertt"); 942 SYSCTL_ADD_U16(&rack_sysctl_ctx, 943 SYSCTL_CHILDREN(rack_probertt), 944 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 945 &rack_per_of_gp_probertt_reduce, 10, 946 "What percentage of goodput do we reduce every gp_srtt"); 947 SYSCTL_ADD_U16(&rack_sysctl_ctx, 948 SYSCTL_CHILDREN(rack_probertt), 949 OID_AUTO, "gp_per_low", CTLFLAG_RW, 950 &rack_per_of_gp_lowthresh, 40, 951 "What percentage of goodput do we allow the multiplier to fall to"); 952 SYSCTL_ADD_U32(&rack_sysctl_ctx, 953 SYSCTL_CHILDREN(rack_probertt), 954 OID_AUTO, "time_between", CTLFLAG_RW, 955 & rack_time_between_probertt, 96000000, 956 "How many useconds between the lowest rtt falling must past before we enter probertt"); 957 SYSCTL_ADD_U32(&rack_sysctl_ctx, 958 SYSCTL_CHILDREN(rack_probertt), 959 OID_AUTO, "safety", CTLFLAG_RW, 960 &rack_probe_rtt_safety_val, 2000000, 961 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 962 SYSCTL_ADD_U32(&rack_sysctl_ctx, 963 SYSCTL_CHILDREN(rack_probertt), 964 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 965 &rack_probe_rtt_sets_cwnd, 0, 966 "Do we set the cwnd too (if always_lower is on)"); 967 SYSCTL_ADD_U32(&rack_sysctl_ctx, 968 SYSCTL_CHILDREN(rack_probertt), 969 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 970 &rack_max_drain_wait, 2, 971 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 972 SYSCTL_ADD_U32(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_probertt), 974 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 975 &rack_must_drain, 1, 976 "We must drain this many gp_srtt's waiting for flight to reach goal"); 977 SYSCTL_ADD_U32(&rack_sysctl_ctx, 978 SYSCTL_CHILDREN(rack_probertt), 979 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 980 &rack_probertt_use_min_rtt_entry, 1, 981 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 982 SYSCTL_ADD_U32(&rack_sysctl_ctx, 983 SYSCTL_CHILDREN(rack_probertt), 984 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 985 &rack_probertt_use_min_rtt_exit, 0, 986 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 987 SYSCTL_ADD_U32(&rack_sysctl_ctx, 988 SYSCTL_CHILDREN(rack_probertt), 989 OID_AUTO, "length_div", CTLFLAG_RW, 990 &rack_probertt_gpsrtt_cnt_div, 0, 991 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 992 SYSCTL_ADD_U32(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_probertt), 994 OID_AUTO, "length_mul", CTLFLAG_RW, 995 &rack_probertt_gpsrtt_cnt_mul, 0, 996 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 997 SYSCTL_ADD_U32(&rack_sysctl_ctx, 998 SYSCTL_CHILDREN(rack_probertt), 999 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 1000 &rack_min_probertt_hold, 200000, 1001 "What is the minimum time we hold probertt at target"); 1002 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_probertt), 1004 OID_AUTO, "filter_life", CTLFLAG_RW, 1005 &rack_probertt_filter_life, 10000000, 1006 "What is the time for the filters life in useconds"); 1007 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_probertt), 1009 OID_AUTO, "lower_within", CTLFLAG_RW, 1010 &rack_probertt_lower_within, 10, 1011 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 1012 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_probertt), 1014 OID_AUTO, "must_move", CTLFLAG_RW, 1015 &rack_min_rtt_movement, 250, 1016 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1017 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1018 SYSCTL_CHILDREN(rack_probertt), 1019 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1020 &rack_probertt_clear_is, 1, 1021 "Do we clear I/S counts on exiting probe-rtt"); 1022 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_probertt), 1024 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1025 &rack_max_drain_hbp, 1, 1026 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1027 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_probertt), 1029 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1030 &rack_hbp_thresh, 3, 1031 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1032 /* Pacing related sysctls */ 1033 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_sysctl_root), 1035 OID_AUTO, 1036 "pacing", 1037 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1038 "Pacing related Controls"); 1039 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_pacing), 1041 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1042 &rack_pcm_is_enabled, 1, 1043 "Do we by default do PCM measurements?"); 1044 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1045 SYSCTL_CHILDREN(rack_pacing), 1046 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1047 &rack_pcm_every_n_rounds, 100, 1048 "How many rounds before we need to do a PCM measurement"); 1049 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1050 SYSCTL_CHILDREN(rack_pacing), 1051 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1052 &rack_pcm_blast, 0, 1053 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1054 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1055 SYSCTL_CHILDREN(rack_pacing), 1056 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1057 &rack_gp_gain_req, 1200, 1058 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1059 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1060 SYSCTL_CHILDREN(rack_pacing), 1061 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1062 &rack_rnd_cnt_req, 0x10005, 1063 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_pacing), 1066 OID_AUTO, "no_timely", CTLFLAG_RW, 1067 &rack_timely_off, 0, 1068 "Do we not use timely in DGP?"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_pacing), 1071 OID_AUTO, "fullbufdisc", CTLFLAG_RW, 1072 &rack_full_buffer_discount, 10, 1073 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_pacing), 1076 OID_AUTO, "fillcw", CTLFLAG_RW, 1077 &rack_fill_cw_state, 0, 1078 "Enable fillcw on new connections (default=0 off)?"); 1079 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_pacing), 1081 OID_AUTO, "min_burst", CTLFLAG_RW, 1082 &rack_pacing_min_seg, 0, 1083 "What is the min burst size for pacing (0 disables)?"); 1084 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_pacing), 1086 OID_AUTO, "divisor", CTLFLAG_RW, 1087 &rack_default_pacing_divisor, 250, 1088 "What is the default divisor given to the rl code?"); 1089 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1090 SYSCTL_CHILDREN(rack_pacing), 1091 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1092 &rack_bw_multipler, 0, 1093 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1094 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_pacing), 1096 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1097 &rack_max_per_above, 30, 1098 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1099 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_pacing), 1101 OID_AUTO, "allow1mss", CTLFLAG_RW, 1102 &rack_pace_one_seg, 0, 1103 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_pacing), 1106 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1107 &rack_limit_time_with_srtt, 0, 1108 "Do we limit pacing time based on srtt"); 1109 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_pacing), 1111 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1112 &rack_per_of_gp_ss, 250, 1113 "If non zero, what percentage of goodput to pace at in slow start"); 1114 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_pacing), 1116 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1117 &rack_per_of_gp_ca, 150, 1118 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1119 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_pacing), 1121 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1122 &rack_per_of_gp_rec, 200, 1123 "If non zero, what percentage of goodput to pace at in recovery"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_pacing), 1126 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1127 &rack_hptsi_segments, 40, 1128 "What size is the max for TSO segments in pacing and burst mitigation"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_pacing), 1131 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1132 &rack_slot_reduction, 4, 1133 "When doing only burst mitigation what is the reduce divisor"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_sysctl_root), 1136 OID_AUTO, "use_pacing", CTLFLAG_RW, 1137 &rack_pace_every_seg, 0, 1138 "If set we use pacing, if clear we use only the original burst mitigation"); 1139 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_pacing), 1141 OID_AUTO, "rate_cap", CTLFLAG_RW, 1142 &rack_bw_rate_cap, 0, 1143 "If set we apply this value to the absolute rate cap used by pacing"); 1144 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_pacing), 1146 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1147 &rack_fillcw_bw_cap, 3750000, 1148 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1149 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_sysctl_root), 1151 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1152 &rack_req_measurements, 1, 1153 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1154 /* Hardware pacing */ 1155 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1156 SYSCTL_CHILDREN(rack_sysctl_root), 1157 OID_AUTO, 1158 "hdwr_pacing", 1159 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1160 "Pacing related Controls"); 1161 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1162 SYSCTL_CHILDREN(rack_hw_pacing), 1163 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1164 &rack_hw_rwnd_factor, 2, 1165 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1166 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1167 SYSCTL_CHILDREN(rack_hw_pacing), 1168 OID_AUTO, "precheck", CTLFLAG_RW, 1169 &rack_hw_check_queue, 0, 1170 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1171 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1172 SYSCTL_CHILDREN(rack_hw_pacing), 1173 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1174 &rack_enobuf_hw_boost_mult, 0, 1175 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1176 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1177 SYSCTL_CHILDREN(rack_hw_pacing), 1178 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1179 &rack_enobuf_hw_max, 2, 1180 "What is the max boost the pacing time if we see a ENOBUFS?"); 1181 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1182 SYSCTL_CHILDREN(rack_hw_pacing), 1183 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1184 &rack_enobuf_hw_min, 2, 1185 "What is the min boost the pacing time if we see a ENOBUFS?"); 1186 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1187 SYSCTL_CHILDREN(rack_hw_pacing), 1188 OID_AUTO, "enable", CTLFLAG_RW, 1189 &rack_enable_hw_pacing, 0, 1190 "Should RACK attempt to use hw pacing?"); 1191 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1192 SYSCTL_CHILDREN(rack_hw_pacing), 1193 OID_AUTO, "rate_cap", CTLFLAG_RW, 1194 &rack_hw_rate_caps, 0, 1195 "Does the highest hardware pacing rate cap the rate we will send at??"); 1196 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1197 SYSCTL_CHILDREN(rack_hw_pacing), 1198 OID_AUTO, "uncap_per", CTLFLAG_RW, 1199 &rack_hw_rate_cap_per, 0, 1200 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1201 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1202 SYSCTL_CHILDREN(rack_hw_pacing), 1203 OID_AUTO, "rate_min", CTLFLAG_RW, 1204 &rack_hw_rate_min, 0, 1205 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1206 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1207 SYSCTL_CHILDREN(rack_hw_pacing), 1208 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1209 &rack_hw_rate_to_low, 0, 1210 "If we fall below this rate, dis-engage hw pacing?"); 1211 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1212 SYSCTL_CHILDREN(rack_hw_pacing), 1213 OID_AUTO, "up_only", CTLFLAG_RW, 1214 &rack_hw_up_only, 0, 1215 "Do we allow hw pacing to lower the rate selected?"); 1216 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1217 SYSCTL_CHILDREN(rack_hw_pacing), 1218 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1219 &rack_hw_pace_extra_slots, 0, 1220 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1221 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1222 SYSCTL_CHILDREN(rack_sysctl_root), 1223 OID_AUTO, 1224 "timely", 1225 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1226 "Rack Timely RTT Controls"); 1227 /* Timely based GP dynmics */ 1228 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1229 SYSCTL_CHILDREN(rack_timely), 1230 OID_AUTO, "upper", CTLFLAG_RW, 1231 &rack_gp_per_bw_mul_up, 2, 1232 "Rack timely upper range for equal b/w (in percentage)"); 1233 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1234 SYSCTL_CHILDREN(rack_timely), 1235 OID_AUTO, "lower", CTLFLAG_RW, 1236 &rack_gp_per_bw_mul_down, 4, 1237 "Rack timely lower range for equal b/w (in percentage)"); 1238 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1239 SYSCTL_CHILDREN(rack_timely), 1240 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1241 &rack_gp_rtt_maxmul, 3, 1242 "Rack timely multiplier of lowest rtt for rtt_max"); 1243 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1244 SYSCTL_CHILDREN(rack_timely), 1245 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1246 &rack_gp_rtt_mindiv, 4, 1247 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1248 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1249 SYSCTL_CHILDREN(rack_timely), 1250 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1251 &rack_gp_rtt_minmul, 1, 1252 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1253 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1254 SYSCTL_CHILDREN(rack_timely), 1255 OID_AUTO, "decrease", CTLFLAG_RW, 1256 &rack_gp_decrease_per, 80, 1257 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1258 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1259 SYSCTL_CHILDREN(rack_timely), 1260 OID_AUTO, "increase", CTLFLAG_RW, 1261 &rack_gp_increase_per, 2, 1262 "Rack timely increase perentage of our GP multiplication factor"); 1263 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1264 SYSCTL_CHILDREN(rack_timely), 1265 OID_AUTO, "lowerbound", CTLFLAG_RW, 1266 &rack_per_lower_bound, 50, 1267 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1268 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1269 SYSCTL_CHILDREN(rack_timely), 1270 OID_AUTO, "p5_upper", CTLFLAG_RW, 1271 &rack_gain_p5_ub, 250, 1272 "Profile 5 upper bound to timely gain"); 1273 1274 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1275 SYSCTL_CHILDREN(rack_timely), 1276 OID_AUTO, "upperboundss", CTLFLAG_RW, 1277 &rack_per_upper_bound_ss, 0, 1278 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1279 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1280 SYSCTL_CHILDREN(rack_timely), 1281 OID_AUTO, "upperboundca", CTLFLAG_RW, 1282 &rack_per_upper_bound_ca, 0, 1283 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1284 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1285 SYSCTL_CHILDREN(rack_timely), 1286 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1287 &rack_do_dyn_mul, 0, 1288 "Rack timely do we enable dynmaic timely goodput by default"); 1289 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1290 SYSCTL_CHILDREN(rack_timely), 1291 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1292 &rack_gp_no_rec_chg, 1, 1293 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1294 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1295 SYSCTL_CHILDREN(rack_timely), 1296 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1297 &rack_timely_dec_clear, 6, 1298 "Rack timely what threshold do we count to before another boost during b/w decent"); 1299 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1300 SYSCTL_CHILDREN(rack_timely), 1301 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1302 &rack_timely_max_push_rise, 3, 1303 "Rack timely how many times do we push up with b/w increase"); 1304 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1305 SYSCTL_CHILDREN(rack_timely), 1306 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1307 &rack_timely_max_push_drop, 3, 1308 "Rack timely how many times do we push back on b/w decent"); 1309 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1310 SYSCTL_CHILDREN(rack_timely), 1311 OID_AUTO, "min_segs", CTLFLAG_RW, 1312 &rack_timely_min_segs, 4, 1313 "Rack timely when setting the cwnd what is the min num segments"); 1314 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1315 SYSCTL_CHILDREN(rack_timely), 1316 OID_AUTO, "noback_max", CTLFLAG_RW, 1317 &rack_use_max_for_nobackoff, 0, 1318 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1319 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1320 SYSCTL_CHILDREN(rack_timely), 1321 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1322 &rack_timely_int_timely_only, 0, 1323 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1324 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1325 SYSCTL_CHILDREN(rack_timely), 1326 OID_AUTO, "nonstop", CTLFLAG_RW, 1327 &rack_timely_no_stopping, 0, 1328 "Rack timely don't stop increase"); 1329 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_timely), 1331 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1332 &rack_down_raise_thresh, 100, 1333 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_timely), 1336 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1337 &rack_req_segs, 1, 1338 "Bottom dragging if not these many segments outstanding and room"); 1339 1340 /* TLP and Rack related parameters */ 1341 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1342 SYSCTL_CHILDREN(rack_sysctl_root), 1343 OID_AUTO, 1344 "tlp", 1345 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1346 "TLP and Rack related Controls"); 1347 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1348 SYSCTL_CHILDREN(rack_tlp), 1349 OID_AUTO, "use_rrr", CTLFLAG_RW, 1350 &use_rack_rr, 1, 1351 "Do we use Rack Rapid Recovery"); 1352 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1353 SYSCTL_CHILDREN(rack_tlp), 1354 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1355 &rack_max_abc_post_recovery, 2, 1356 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1357 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1358 SYSCTL_CHILDREN(rack_tlp), 1359 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1360 &rack_non_rxt_use_cr, 0, 1361 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1362 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1363 SYSCTL_CHILDREN(rack_tlp), 1364 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1365 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1366 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1367 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1368 SYSCTL_CHILDREN(rack_tlp), 1369 OID_AUTO, "limit", CTLFLAG_RW, 1370 &rack_tlp_limit, 2, 1371 "How many TLP's can be sent without sending new data"); 1372 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1373 SYSCTL_CHILDREN(rack_tlp), 1374 OID_AUTO, "use_greater", CTLFLAG_RW, 1375 &rack_tlp_use_greater, 1, 1376 "Should we use the rack_rtt time if its greater than srtt"); 1377 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1378 SYSCTL_CHILDREN(rack_tlp), 1379 OID_AUTO, "tlpminto", CTLFLAG_RW, 1380 &rack_tlp_min, 10000, 1381 "TLP minimum timeout per the specification (in microseconds)"); 1382 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1383 SYSCTL_CHILDREN(rack_tlp), 1384 OID_AUTO, "send_oldest", CTLFLAG_RW, 1385 &rack_always_send_oldest, 0, 1386 "Should we always send the oldest TLP and RACK-TLP"); 1387 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1388 SYSCTL_CHILDREN(rack_tlp), 1389 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1390 &rack_lower_cwnd_at_tlp, 0, 1391 "When a TLP completes a retran should we enter recovery"); 1392 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1393 SYSCTL_CHILDREN(rack_tlp), 1394 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1395 &rack_reorder_thresh, 2, 1396 "What factor for rack will be added when seeing reordering (shift right)"); 1397 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1398 SYSCTL_CHILDREN(rack_tlp), 1399 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1400 &rack_tlp_thresh, 1, 1401 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1402 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1403 SYSCTL_CHILDREN(rack_tlp), 1404 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1405 &rack_reorder_fade, 60000000, 1406 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1407 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1408 SYSCTL_CHILDREN(rack_tlp), 1409 OID_AUTO, "pktdelay", CTLFLAG_RW, 1410 &rack_pkt_delay, 1000, 1411 "Extra RACK time (in microseconds) besides reordering thresh"); 1412 1413 /* Timer related controls */ 1414 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1415 SYSCTL_CHILDREN(rack_sysctl_root), 1416 OID_AUTO, 1417 "timers", 1418 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1419 "Timer related controls"); 1420 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1421 SYSCTL_CHILDREN(rack_timers), 1422 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1423 &rack_ssthresh_rest_rto_rec, 0, 1424 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1425 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1426 SYSCTL_CHILDREN(rack_timers), 1427 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1428 &rack_rxt_scoreboard_clear_thresh, 2, 1429 "How many RTO's are allowed before we clear the scoreboard"); 1430 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1431 SYSCTL_CHILDREN(rack_timers), 1432 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1433 &rack_honors_hpts_min_to, 1, 1434 "Do rack pacing timers honor hpts min timeout"); 1435 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1436 SYSCTL_CHILDREN(rack_timers), 1437 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1438 &rack_max_reduce, 10, 1439 "Max percentage we will reduce slot by for pacing when we are behind"); 1440 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1441 SYSCTL_CHILDREN(rack_timers), 1442 OID_AUTO, "persmin", CTLFLAG_RW, 1443 &rack_persist_min, 250000, 1444 "What is the minimum time in microseconds between persists"); 1445 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_timers), 1447 OID_AUTO, "persmax", CTLFLAG_RW, 1448 &rack_persist_max, 2000000, 1449 "What is the largest delay in microseconds between persists"); 1450 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1451 SYSCTL_CHILDREN(rack_timers), 1452 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1453 &rack_delayed_ack_time, 40000, 1454 "Delayed ack time (40ms in microseconds)"); 1455 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1456 SYSCTL_CHILDREN(rack_timers), 1457 OID_AUTO, "minrto", CTLFLAG_RW, 1458 &rack_rto_min, 30000, 1459 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1460 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1461 SYSCTL_CHILDREN(rack_timers), 1462 OID_AUTO, "maxrto", CTLFLAG_RW, 1463 &rack_rto_max, 4000000, 1464 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1465 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_timers), 1467 OID_AUTO, "minto", CTLFLAG_RW, 1468 &rack_min_to, 1000, 1469 "Minimum rack timeout in microseconds"); 1470 /* Measure controls */ 1471 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1472 SYSCTL_CHILDREN(rack_sysctl_root), 1473 OID_AUTO, 1474 "measure", 1475 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1476 "Measure related controls"); 1477 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1478 SYSCTL_CHILDREN(rack_measure), 1479 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1480 &rack_wma_divisor, 8, 1481 "When doing b/w calculation what is the divisor for the WMA"); 1482 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1483 SYSCTL_CHILDREN(rack_measure), 1484 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1485 &rack_cwnd_block_ends_measure, 0, 1486 "Does a cwnd just-return end the measurement window (app limited)"); 1487 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1488 SYSCTL_CHILDREN(rack_measure), 1489 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1490 &rack_rwnd_block_ends_measure, 0, 1491 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1492 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1493 SYSCTL_CHILDREN(rack_measure), 1494 OID_AUTO, "min_target", CTLFLAG_RW, 1495 &rack_def_data_window, 20, 1496 "What is the minimum target window (in mss) for a GP measurements"); 1497 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_measure), 1499 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1500 &rack_goal_bdp, 2, 1501 "What is the goal BDP to measure"); 1502 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_measure), 1504 OID_AUTO, "min_srtts", CTLFLAG_RW, 1505 &rack_min_srtts, 1, 1506 "What is the goal BDP to measure"); 1507 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1508 SYSCTL_CHILDREN(rack_measure), 1509 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1510 &rack_min_measure_usec, 0, 1511 "What is the Minimum time time for a measurement if 0, this is off"); 1512 /* Features */ 1513 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1514 SYSCTL_CHILDREN(rack_sysctl_root), 1515 OID_AUTO, 1516 "features", 1517 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1518 "Feature controls"); 1519 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1520 SYSCTL_CHILDREN(rack_features), 1521 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1522 &rack_hybrid_allow_set_maxseg, 0, 1523 "Should hybrid pacing allow the setmss command"); 1524 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1525 SYSCTL_CHILDREN(rack_features), 1526 OID_AUTO, "cmpack", CTLFLAG_RW, 1527 &rack_use_cmp_acks, 1, 1528 "Should RACK have LRO send compressed acks"); 1529 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1530 SYSCTL_CHILDREN(rack_features), 1531 OID_AUTO, "fsb", CTLFLAG_RW, 1532 &rack_use_fsb, 1, 1533 "Should RACK use the fast send block?"); 1534 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1535 SYSCTL_CHILDREN(rack_features), 1536 OID_AUTO, "rfo", CTLFLAG_RW, 1537 &rack_use_rfo, 1, 1538 "Should RACK use rack_fast_output()?"); 1539 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1540 SYSCTL_CHILDREN(rack_features), 1541 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1542 &rack_use_rsm_rfo, 1, 1543 "Should RACK use rack_fast_rsm_output()?"); 1544 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1545 SYSCTL_CHILDREN(rack_features), 1546 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1547 &rack_enable_mqueue_for_nonpaced, 0, 1548 "Should RACK use mbuf queuing for non-paced connections"); 1549 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1550 SYSCTL_CHILDREN(rack_features), 1551 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1552 &rack_do_hystart, 0, 1553 "Should RACK enable HyStart++ on connections?"); 1554 /* Policer detection */ 1555 rack_policing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1556 SYSCTL_CHILDREN(rack_sysctl_root), 1557 OID_AUTO, 1558 "policing", 1559 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1560 "policer detection"); 1561 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_policing), 1563 OID_AUTO, "rxt_thresh", CTLFLAG_RW, 1564 &rack_policer_rxt_thresh, 0, 1565 "Percentage of retransmits we need to be a possible policer (499 = 49.9 percent)"); 1566 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1567 SYSCTL_CHILDREN(rack_policing), 1568 OID_AUTO, "avg_thresh", CTLFLAG_RW, 1569 &rack_policer_avg_thresh, 0, 1570 "What threshold of average retransmits needed to recover a lost packet (1 - 169 aka 21 = 2.1)?"); 1571 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1572 SYSCTL_CHILDREN(rack_policing), 1573 OID_AUTO, "med_thresh", CTLFLAG_RW, 1574 &rack_policer_med_thresh, 0, 1575 "What threshold of Median retransmits needed to recover a lost packet (1 - 16)?"); 1576 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1577 SYSCTL_CHILDREN(rack_policing), 1578 OID_AUTO, "data_thresh", CTLFLAG_RW, 1579 &rack_policer_data_thresh, 64000, 1580 "How many bytes must have gotten through before we can start doing policer detection?"); 1581 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_policing), 1583 OID_AUTO, "bwcomp", CTLFLAG_RW, 1584 &rack_policing_do_bw_comp, 1, 1585 "Do we raise up low b/w so that at least pace_max_seg can be sent in the srtt?"); 1586 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1587 SYSCTL_CHILDREN(rack_policing), 1588 OID_AUTO, "recmss", CTLFLAG_RW, 1589 &rack_req_del_mss, 18, 1590 "How many MSS must be delivered during recovery to engage policer detection?"); 1591 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_policing), 1593 OID_AUTO, "res_div", CTLFLAG_RW, 1594 &rack_policer_bucket_reserve, 20, 1595 "What percentage is reserved in the policer bucket?"); 1596 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1597 SYSCTL_CHILDREN(rack_policing), 1598 OID_AUTO, "min_comp_bw", CTLFLAG_RW, 1599 &rack_pol_min_bw, 125000, 1600 "Do we have a min b/w for b/w compensation (0 = no)?"); 1601 /* Misc rack controls */ 1602 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1603 SYSCTL_CHILDREN(rack_sysctl_root), 1604 OID_AUTO, 1605 "misc", 1606 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1607 "Misc related controls"); 1608 #ifdef TCP_ACCOUNTING 1609 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1610 SYSCTL_CHILDREN(rack_misc), 1611 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1612 &rack_tcp_accounting, 0, 1613 "Should we turn on TCP accounting for all rack sessions?"); 1614 #endif 1615 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1616 SYSCTL_CHILDREN(rack_misc), 1617 OID_AUTO, "dnd", CTLFLAG_RW, 1618 &rack_dnd_default, 0, 1619 "Do not disturb default for rack_rrr = 3"); 1620 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1621 SYSCTL_CHILDREN(rack_misc), 1622 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1623 &sad_seg_size_per, 800, 1624 "Percentage of segment size needed in a sack 800 = 80.0?"); 1625 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_misc), 1627 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1628 &rack_rxt_controls, 0, 1629 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1630 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1631 SYSCTL_CHILDREN(rack_misc), 1632 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1633 &rack_hibeta_setting, 0, 1634 "Do we ue a high beta (80 instead of 50)?"); 1635 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_misc), 1637 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1638 &rack_apply_rtt_with_reduced_conf, 0, 1639 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1640 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1641 SYSCTL_CHILDREN(rack_misc), 1642 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1643 &rack_dsack_std_based, 3, 1644 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1645 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1646 SYSCTL_CHILDREN(rack_misc), 1647 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1648 &rack_prr_addbackmax, 2, 1649 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1650 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1651 SYSCTL_CHILDREN(rack_misc), 1652 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1653 &rack_stats_gets_ms_rtt, 1, 1654 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1655 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_misc), 1657 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1658 &rack_client_low_buf, 0, 1659 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1660 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1661 SYSCTL_CHILDREN(rack_misc), 1662 OID_AUTO, "defprofile", CTLFLAG_RW, 1663 &rack_def_profile, 0, 1664 "Should RACK use a default profile (0=no, num == profile num)?"); 1665 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_misc), 1667 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1668 &rack_enable_shared_cwnd, 1, 1669 "Should RACK try to use the shared cwnd on connections where allowed"); 1670 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1671 SYSCTL_CHILDREN(rack_misc), 1672 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1673 &rack_limits_scwnd, 1, 1674 "Should RACK place low end time limits on the shared cwnd feature"); 1675 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1676 SYSCTL_CHILDREN(rack_misc), 1677 OID_AUTO, "no_prr", CTLFLAG_RW, 1678 &rack_disable_prr, 0, 1679 "Should RACK not use prr and only pace (must have pacing on)"); 1680 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1681 SYSCTL_CHILDREN(rack_misc), 1682 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1683 &rack_verbose_logging, 0, 1684 "Should RACK black box logging be verbose"); 1685 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_misc), 1687 OID_AUTO, "data_after_close", CTLFLAG_RW, 1688 &rack_ignore_data_after_close, 1, 1689 "Do we hold off sending a RST until all pending data is ack'd"); 1690 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1691 SYSCTL_CHILDREN(rack_misc), 1692 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1693 &rack_sack_not_required, 1, 1694 "Do we allow rack to run on connections not supporting SACK"); 1695 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1696 SYSCTL_CHILDREN(rack_misc), 1697 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1698 &rack_send_a_lot_in_prr, 1, 1699 "Send a lot in prr"); 1700 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1701 SYSCTL_CHILDREN(rack_misc), 1702 OID_AUTO, "autoscale", CTLFLAG_RW, 1703 &rack_autosndbuf_inc, 20, 1704 "What percentage should rack scale up its snd buffer by?"); 1705 1706 1707 /* Sack Attacker detection stuff */ 1708 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1709 SYSCTL_CHILDREN(rack_attack), 1710 OID_AUTO, "merge_out", CTLFLAG_RW, 1711 &rack_merge_out_sacks_on_attack, 0, 1712 "Do we merge the sendmap when we decide we are being attacked?"); 1713 1714 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1715 SYSCTL_CHILDREN(rack_attack), 1716 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1717 &rack_highest_sack_thresh_seen, 0, 1718 "Highest sack to ack ratio seen"); 1719 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1720 SYSCTL_CHILDREN(rack_attack), 1721 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1722 &rack_highest_move_thresh_seen, 0, 1723 "Highest move to non-move ratio seen"); 1724 rack_ack_total = counter_u64_alloc(M_WAITOK); 1725 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1726 SYSCTL_CHILDREN(rack_attack), 1727 OID_AUTO, "acktotal", CTLFLAG_RD, 1728 &rack_ack_total, 1729 "Total number of Ack's"); 1730 rack_express_sack = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_attack), 1733 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1734 &rack_express_sack, 1735 "Total expresss number of Sack's"); 1736 rack_sack_total = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_attack), 1739 OID_AUTO, "sacktotal", CTLFLAG_RD, 1740 &rack_sack_total, 1741 "Total number of SACKs"); 1742 rack_move_none = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_attack), 1745 OID_AUTO, "move_none", CTLFLAG_RD, 1746 &rack_move_none, 1747 "Total number of SACK index reuse of positions under threshold"); 1748 rack_move_some = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_attack), 1751 OID_AUTO, "move_some", CTLFLAG_RD, 1752 &rack_move_some, 1753 "Total number of SACK index reuse of positions over threshold"); 1754 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_attack), 1757 OID_AUTO, "attacks", CTLFLAG_RD, 1758 &rack_sack_attacks_detected, 1759 "Total number of SACK attackers that had sack disabled"); 1760 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1762 SYSCTL_CHILDREN(rack_attack), 1763 OID_AUTO, "reversed", CTLFLAG_RD, 1764 &rack_sack_attacks_reversed, 1765 "Total number of SACK attackers that were later determined false positive"); 1766 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_attack), 1769 OID_AUTO, "suspect", CTLFLAG_RD, 1770 &rack_sack_attacks_suspect, 1771 "Total number of SACKs that triggered early detection"); 1772 1773 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1775 SYSCTL_CHILDREN(rack_attack), 1776 OID_AUTO, "nextmerge", CTLFLAG_RD, 1777 &rack_sack_used_next_merge, 1778 "Total number of times we used the next merge"); 1779 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1780 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1781 SYSCTL_CHILDREN(rack_attack), 1782 OID_AUTO, "prevmerge", CTLFLAG_RD, 1783 &rack_sack_used_prev_merge, 1784 "Total number of times we used the prev merge"); 1785 /* Counters */ 1786 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1787 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1788 SYSCTL_CHILDREN(rack_counters), 1789 OID_AUTO, "totalbytes", CTLFLAG_RD, 1790 &rack_total_bytes, 1791 "Total number of bytes sent"); 1792 rack_fto_send = counter_u64_alloc(M_WAITOK); 1793 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1794 SYSCTL_CHILDREN(rack_counters), 1795 OID_AUTO, "fto_send", CTLFLAG_RD, 1796 &rack_fto_send, "Total number of rack_fast_output sends"); 1797 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1798 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1799 SYSCTL_CHILDREN(rack_counters), 1800 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1801 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1802 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1803 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1804 SYSCTL_CHILDREN(rack_counters), 1805 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1806 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1807 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1809 SYSCTL_CHILDREN(rack_counters), 1810 OID_AUTO, "nfto_send", CTLFLAG_RD, 1811 &rack_non_fto_send, "Total number of rack_output first sends"); 1812 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1813 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1814 SYSCTL_CHILDREN(rack_counters), 1815 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1816 &rack_extended_rfo, "Total number of times we extended rfo"); 1817 1818 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1819 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1820 SYSCTL_CHILDREN(rack_counters), 1821 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1822 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1823 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1824 1825 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1826 SYSCTL_CHILDREN(rack_counters), 1827 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1828 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1829 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1830 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1831 SYSCTL_CHILDREN(rack_counters), 1832 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1833 &rack_tlp_tot, 1834 "Total number of tail loss probe expirations"); 1835 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1836 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1837 SYSCTL_CHILDREN(rack_counters), 1838 OID_AUTO, "tlp_new", CTLFLAG_RD, 1839 &rack_tlp_newdata, 1840 "Total number of tail loss probe sending new data"); 1841 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1842 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1843 SYSCTL_CHILDREN(rack_counters), 1844 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1845 &rack_tlp_retran, 1846 "Total number of tail loss probe sending retransmitted data"); 1847 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1848 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1849 SYSCTL_CHILDREN(rack_counters), 1850 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1851 &rack_tlp_retran_bytes, 1852 "Total bytes of tail loss probe sending retransmitted data"); 1853 rack_to_tot = counter_u64_alloc(M_WAITOK); 1854 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1855 SYSCTL_CHILDREN(rack_counters), 1856 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1857 &rack_to_tot, 1858 "Total number of times the rack to expired"); 1859 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1861 SYSCTL_CHILDREN(rack_counters), 1862 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1863 &rack_saw_enobuf, 1864 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1865 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1867 SYSCTL_CHILDREN(rack_counters), 1868 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1869 &rack_saw_enobuf_hw, 1870 "Total number of times a send returned enobuf for hdwr paced connections"); 1871 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1873 SYSCTL_CHILDREN(rack_counters), 1874 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1875 &rack_saw_enetunreach, 1876 "Total number of times a send received a enetunreachable"); 1877 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1879 SYSCTL_CHILDREN(rack_counters), 1880 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1881 &rack_hot_alloc, 1882 "Total allocations from the top of our list"); 1883 tcp_policer_detected = counter_u64_alloc(M_WAITOK); 1884 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1885 SYSCTL_CHILDREN(rack_counters), 1886 OID_AUTO, "policer_detected", CTLFLAG_RD, 1887 &tcp_policer_detected, 1888 "Total policer_detections"); 1889 1890 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1891 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1892 SYSCTL_CHILDREN(rack_counters), 1893 OID_AUTO, "allocs", CTLFLAG_RD, 1894 &rack_to_alloc, 1895 "Total allocations of tracking structures"); 1896 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1898 SYSCTL_CHILDREN(rack_counters), 1899 OID_AUTO, "allochard", CTLFLAG_RD, 1900 &rack_to_alloc_hard, 1901 "Total allocations done with sleeping the hard way"); 1902 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1903 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1904 SYSCTL_CHILDREN(rack_counters), 1905 OID_AUTO, "allocemerg", CTLFLAG_RD, 1906 &rack_to_alloc_emerg, 1907 "Total allocations done from emergency cache"); 1908 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1909 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1910 SYSCTL_CHILDREN(rack_counters), 1911 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1912 &rack_to_alloc_limited, 1913 "Total allocations dropped due to limit"); 1914 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1915 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1916 SYSCTL_CHILDREN(rack_counters), 1917 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1918 &rack_alloc_limited_conns, 1919 "Connections with allocations dropped due to limit"); 1920 rack_split_limited = counter_u64_alloc(M_WAITOK); 1921 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1922 SYSCTL_CHILDREN(rack_counters), 1923 OID_AUTO, "split_limited", CTLFLAG_RD, 1924 &rack_split_limited, 1925 "Split allocations dropped due to limit"); 1926 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1927 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1928 SYSCTL_CHILDREN(rack_counters), 1929 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1930 &rack_rxt_clamps_cwnd, 1931 "Number of times that excessive rxt clamped the cwnd down"); 1932 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1933 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1934 SYSCTL_CHILDREN(rack_counters), 1935 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1936 &rack_rxt_clamps_cwnd_uniq, 1937 "Number of connections that have had excessive rxt clamped the cwnd down"); 1938 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1939 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1940 SYSCTL_CHILDREN(rack_counters), 1941 OID_AUTO, "persist_sends", CTLFLAG_RD, 1942 &rack_persists_sends, 1943 "Number of times we sent a persist probe"); 1944 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1945 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1946 SYSCTL_CHILDREN(rack_counters), 1947 OID_AUTO, "persist_acks", CTLFLAG_RD, 1948 &rack_persists_acks, 1949 "Number of times a persist probe was acked"); 1950 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1951 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1952 SYSCTL_CHILDREN(rack_counters), 1953 OID_AUTO, "persist_loss", CTLFLAG_RD, 1954 &rack_persists_loss, 1955 "Number of times we detected a lost persist probe (no ack)"); 1956 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1957 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1958 SYSCTL_CHILDREN(rack_counters), 1959 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1960 &rack_persists_lost_ends, 1961 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1962 #ifdef INVARIANTS 1963 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1964 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1965 SYSCTL_CHILDREN(rack_counters), 1966 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1967 &rack_adjust_map_bw, 1968 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1969 #endif 1970 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1971 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1972 SYSCTL_CHILDREN(rack_counters), 1973 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1974 &rack_multi_single_eq, 1975 "Number of compressed acks total represented"); 1976 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1977 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1978 SYSCTL_CHILDREN(rack_counters), 1979 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1980 &rack_proc_non_comp_ack, 1981 "Number of non compresseds acks that we processed"); 1982 1983 1984 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1985 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1986 SYSCTL_CHILDREN(rack_counters), 1987 OID_AUTO, "sack_long", CTLFLAG_RD, 1988 &rack_sack_proc_all, 1989 "Total times we had to walk whole list for sack processing"); 1990 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1991 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1992 SYSCTL_CHILDREN(rack_counters), 1993 OID_AUTO, "sack_restart", CTLFLAG_RD, 1994 &rack_sack_proc_restart, 1995 "Total times we had to walk whole list due to a restart"); 1996 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1997 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1998 SYSCTL_CHILDREN(rack_counters), 1999 OID_AUTO, "sack_short", CTLFLAG_RD, 2000 &rack_sack_proc_short, 2001 "Total times we took shortcut for sack processing"); 2002 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 2003 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2004 SYSCTL_CHILDREN(rack_attack), 2005 OID_AUTO, "skipacked", CTLFLAG_RD, 2006 &rack_sack_skipped_acked, 2007 "Total number of times we skipped previously sacked"); 2008 rack_sack_splits = counter_u64_alloc(M_WAITOK); 2009 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2010 SYSCTL_CHILDREN(rack_attack), 2011 OID_AUTO, "ofsplit", CTLFLAG_RD, 2012 &rack_sack_splits, 2013 "Total number of times we did the old fashion tree split"); 2014 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 2015 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2016 SYSCTL_CHILDREN(rack_counters), 2017 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 2018 &rack_input_idle_reduces, 2019 "Total number of idle reductions on input"); 2020 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 2021 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2022 SYSCTL_CHILDREN(rack_counters), 2023 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 2024 &rack_collapsed_win_seen, 2025 "Total number of collapsed window events seen (where our window shrinks)"); 2026 2027 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 2028 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2029 SYSCTL_CHILDREN(rack_counters), 2030 OID_AUTO, "collapsed_win", CTLFLAG_RD, 2031 &rack_collapsed_win, 2032 "Total number of collapsed window events where we mark packets"); 2033 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 2034 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2035 SYSCTL_CHILDREN(rack_counters), 2036 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 2037 &rack_collapsed_win_rxt, 2038 "Total number of packets that were retransmitted"); 2039 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 2040 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2041 SYSCTL_CHILDREN(rack_counters), 2042 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 2043 &rack_collapsed_win_rxt_bytes, 2044 "Total number of bytes that were retransmitted"); 2045 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 2046 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2047 SYSCTL_CHILDREN(rack_counters), 2048 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 2049 &rack_try_scwnd, 2050 "Total number of scwnd attempts"); 2051 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 2052 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 2053 OID_AUTO, "outsize", CTLFLAG_RD, 2054 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 2055 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 2056 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 2057 OID_AUTO, "opts", CTLFLAG_RD, 2058 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 2059 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 2060 SYSCTL_CHILDREN(rack_sysctl_root), 2061 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2062 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 2063 } 2064 2065 static uint32_t 2066 rc_init_window(struct tcp_rack *rack) 2067 { 2068 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 2069 2070 } 2071 2072 static uint64_t 2073 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 2074 { 2075 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 2076 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 2077 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2078 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 2079 else 2080 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 2081 } 2082 2083 static void 2084 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 2085 uint64_t data, uint8_t mod, uint16_t aux, 2086 struct tcp_sendfile_track *cur, int line) 2087 { 2088 #ifdef TCP_REQUEST_TRK 2089 int do_log = 0; 2090 2091 /* 2092 * The rate cap one is noisy and only should come out when normal BB logging 2093 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2094 * once per chunk and make up the BBpoint that can be turned on by the client. 2095 */ 2096 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2097 /* 2098 * The very noisy two need to only come out when 2099 * we have verbose logging on. 2100 */ 2101 if (rack_verbose_logging != 0) 2102 do_log = tcp_bblogging_on(rack->rc_tp); 2103 else 2104 do_log = 0; 2105 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2106 /* 2107 * All other less noisy logs here except the measure which 2108 * also needs to come out on the point and the log. 2109 */ 2110 do_log = tcp_bblogging_on(rack->rc_tp); 2111 } else { 2112 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2113 } 2114 2115 if (do_log) { 2116 union tcp_log_stackspecific log; 2117 struct timeval tv; 2118 uint64_t lt_bw; 2119 2120 /* Convert our ms to a microsecond */ 2121 memset(&log, 0, sizeof(log)); 2122 2123 log.u_bbr.cwnd_gain = line; 2124 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2125 log.u_bbr.rttProp = tim; 2126 log.u_bbr.bw_inuse = cbw; 2127 log.u_bbr.delRate = rack_get_gp_est(rack); 2128 lt_bw = rack_get_lt_bw(rack); 2129 log.u_bbr.flex1 = seq; 2130 log.u_bbr.pacing_gain = aux; 2131 /* lt_bw = < flex3 | flex2 > */ 2132 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2133 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2134 /* Record the last obtained us rtt in inflight */ 2135 if (cur == NULL) { 2136 /* Make sure we are looking at the right log if an overide comes in */ 2137 cur = rack->r_ctl.rc_last_sft; 2138 } 2139 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2140 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2141 else { 2142 /* Use the last known rtt i.e. the rack-rtt */ 2143 log.u_bbr.inflight = rack->rc_rack_rtt; 2144 } 2145 if (cur != NULL) { 2146 uint64_t off; 2147 2148 log.u_bbr.cur_del_rate = cur->deadline; 2149 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2150 /* start = < lost | pkt_epoch > */ 2151 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2152 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2153 log.u_bbr.flex6 = cur->start_seq; 2154 log.u_bbr.pkts_out = cur->end_seq; 2155 } else { 2156 /* start = < lost | pkt_epoch > */ 2157 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2158 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2159 /* end = < pkts_out | flex6 > */ 2160 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2161 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2162 } 2163 /* first_send = <lt_epoch | epoch> */ 2164 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2165 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2166 /* localtime = <delivered | applimited>*/ 2167 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2168 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2169 #ifdef TCP_REQUEST_TRK 2170 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2171 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2172 #endif 2173 log.u_bbr.inhpts = 1; 2174 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2175 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2176 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2177 } else { 2178 log.u_bbr.flex7 = 0xffff; 2179 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2180 } 2181 /* 2182 * Compose bbr_state to be a bit wise 0000ADHF 2183 * where A is the always_pace flag 2184 * where D is the dgp_on flag 2185 * where H is the hybrid_mode on flag 2186 * where F is the use_fixed_rate flag. 2187 */ 2188 log.u_bbr.bbr_state = rack->rc_always_pace; 2189 log.u_bbr.bbr_state <<= 1; 2190 log.u_bbr.bbr_state |= rack->dgp_on; 2191 log.u_bbr.bbr_state <<= 1; 2192 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2193 log.u_bbr.bbr_state <<= 1; 2194 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2195 log.u_bbr.flex8 = mod; 2196 tcp_log_event(rack->rc_tp, NULL, 2197 &rack->rc_inp->inp_socket->so_rcv, 2198 &rack->rc_inp->inp_socket->so_snd, 2199 TCP_HYBRID_PACING_LOG, 0, 2200 0, &log, false, NULL, __func__, __LINE__, &tv); 2201 2202 } 2203 #endif 2204 } 2205 2206 #ifdef TCP_REQUEST_TRK 2207 static void 2208 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2209 { 2210 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2211 union tcp_log_stackspecific log; 2212 struct timeval tv; 2213 uint64_t off; 2214 2215 /* Convert our ms to a microsecond */ 2216 memset(&log, 0, sizeof(log)); 2217 2218 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2219 log.u_bbr.delRate = cur->sent_at_fs; 2220 2221 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2222 /* 2223 * We did not get a new Rules Applied to set so 2224 * no overlapping send occured, this means the 2225 * current byte counts are correct. 2226 */ 2227 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2228 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2229 } else { 2230 /* 2231 * Overlapping send case, we switched to a new 2232 * send and did a rules applied. 2233 */ 2234 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2235 log.u_bbr.rttProp = cur->rxt_at_ls; 2236 } 2237 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2238 log.u_bbr.cwnd_gain = line; 2239 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2240 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2241 /* start = < flex1 | flex2 > */ 2242 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2243 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2244 /* end = < flex3 | flex4 > */ 2245 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2246 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2247 2248 /* localtime = <delivered | applimited>*/ 2249 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2250 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2251 /* client timestamp = <lt_epoch | epoch>*/ 2252 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2253 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2254 /* now set all the flags in */ 2255 log.u_bbr.pkts_out = cur->hybrid_flags; 2256 log.u_bbr.lost = cur->playout_ms; 2257 log.u_bbr.flex6 = cur->flags; 2258 /* 2259 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2260 * where a false retransmit occurred so first_send <-> lastsend may 2261 * include longer time then it actually took if we have a false rxt. 2262 */ 2263 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2264 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2265 /* 2266 * Compose bbr_state to be a bit wise 0000ADHF 2267 * where A is the always_pace flag 2268 * where D is the dgp_on flag 2269 * where H is the hybrid_mode on flag 2270 * where F is the use_fixed_rate flag. 2271 */ 2272 log.u_bbr.bbr_state = rack->rc_always_pace; 2273 log.u_bbr.bbr_state <<= 1; 2274 log.u_bbr.bbr_state |= rack->dgp_on; 2275 log.u_bbr.bbr_state <<= 1; 2276 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2277 log.u_bbr.bbr_state <<= 1; 2278 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2279 2280 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2281 tcp_log_event(rack->rc_tp, NULL, 2282 &rack->rc_inp->inp_socket->so_rcv, 2283 &rack->rc_inp->inp_socket->so_snd, 2284 TCP_HYBRID_PACING_LOG, 0, 2285 0, &log, false, NULL, __func__, __LINE__, &tv); 2286 } 2287 } 2288 #endif 2289 2290 static inline uint64_t 2291 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2292 { 2293 uint64_t ret_bw, ether; 2294 uint64_t u_segsiz; 2295 2296 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2297 if (rack->r_is_v6){ 2298 #ifdef INET6 2299 ether += sizeof(struct ip6_hdr); 2300 #endif 2301 ether += 14; /* eheader size 6+6+2 */ 2302 } else { 2303 #ifdef INET 2304 ether += sizeof(struct ip); 2305 #endif 2306 ether += 14; /* eheader size 6+6+2 */ 2307 } 2308 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2309 ret_bw = bw; 2310 ret_bw *= ether; 2311 ret_bw /= u_segsiz; 2312 return (ret_bw); 2313 } 2314 2315 static void 2316 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2317 { 2318 #ifdef TCP_REQUEST_TRK 2319 struct timeval tv; 2320 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2321 #endif 2322 2323 if (rack->r_ctl.bw_rate_cap == 0) 2324 return; 2325 #ifdef TCP_REQUEST_TRK 2326 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2327 (rack->r_ctl.rc_last_sft != NULL)) { 2328 /* 2329 * We have a dynamic cap. The original target 2330 * is in bw_rate_cap, but we need to look at 2331 * how long it is until we hit the deadline. 2332 */ 2333 struct tcp_sendfile_track *ent; 2334 2335 ent = rack->r_ctl.rc_last_sft; 2336 microuptime(&tv); 2337 timenow = tcp_tv_to_lusectick(&tv); 2338 if (timenow >= ent->deadline) { 2339 /* No time left we do DGP only */ 2340 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2341 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2342 rack->r_ctl.bw_rate_cap = 0; 2343 return; 2344 } 2345 /* We have the time */ 2346 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2347 if (timeleft < HPTS_MSEC_IN_SEC) { 2348 /* If there is less than a ms left just use DGPs rate */ 2349 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2350 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2351 rack->r_ctl.bw_rate_cap = 0; 2352 return; 2353 } 2354 /* 2355 * Now lets find the amount of data left to send. 2356 * 2357 * Now ideally we want to use the end_seq to figure out how much more 2358 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2359 */ 2360 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2361 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2362 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2363 else { 2364 /* TSNH, we should catch it at the send */ 2365 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2366 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2367 rack->r_ctl.bw_rate_cap = 0; 2368 return; 2369 } 2370 } else { 2371 /* 2372 * The hard way, figure out how much is gone and then 2373 * take that away from the total the client asked for 2374 * (thats off by tls overhead if this is tls). 2375 */ 2376 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2377 lengone = rack->rc_tp->snd_una - ent->start_seq; 2378 else 2379 lengone = 0; 2380 if (lengone < (ent->end - ent->start)) 2381 lenleft = (ent->end - ent->start) - lengone; 2382 else { 2383 /* TSNH, we should catch it at the send */ 2384 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2385 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2386 rack->r_ctl.bw_rate_cap = 0; 2387 return; 2388 } 2389 } 2390 if (lenleft == 0) { 2391 /* We have it all sent */ 2392 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2393 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2394 if (rack->r_ctl.bw_rate_cap) 2395 goto normal_ratecap; 2396 else 2397 return; 2398 } 2399 calcbw = lenleft * HPTS_USEC_IN_SEC; 2400 calcbw /= timeleft; 2401 /* Now we must compensate for IP/TCP overhead */ 2402 calcbw = rack_compensate_for_linerate(rack, calcbw); 2403 /* Update the bit rate cap */ 2404 rack->r_ctl.bw_rate_cap = calcbw; 2405 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2406 (rack_hybrid_allow_set_maxseg == 1) && 2407 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2408 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2409 uint32_t orig_max; 2410 2411 orig_max = rack->r_ctl.rc_pace_max_segs; 2412 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2413 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2414 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2415 } 2416 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2417 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2418 if ((calcbw > 0) && (*bw > calcbw)) { 2419 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2420 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2421 *capped = 1; 2422 *bw = calcbw; 2423 } 2424 return; 2425 } 2426 normal_ratecap: 2427 #endif 2428 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2429 #ifdef TCP_REQUEST_TRK 2430 if (rack->rc_hybrid_mode && 2431 rack->rc_catch_up && 2432 (rack->r_ctl.rc_last_sft != NULL) && 2433 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2434 (rack_hybrid_allow_set_maxseg == 1) && 2435 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2436 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2437 uint32_t orig_max; 2438 2439 orig_max = rack->r_ctl.rc_pace_max_segs; 2440 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2441 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2442 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2443 } 2444 #endif 2445 *capped = 1; 2446 *bw = rack->r_ctl.bw_rate_cap; 2447 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2448 *bw, 0, 0, 2449 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2450 } 2451 } 2452 2453 static uint64_t 2454 rack_get_gp_est(struct tcp_rack *rack) 2455 { 2456 uint64_t bw, lt_bw, ret_bw; 2457 2458 if (rack->rc_gp_filled == 0) { 2459 /* 2460 * We have yet no b/w measurement, 2461 * if we have a user set initial bw 2462 * return it. If we don't have that and 2463 * we have an srtt, use the tcp IW (10) to 2464 * calculate a fictional b/w over the SRTT 2465 * which is more or less a guess. Note 2466 * we don't use our IW from rack on purpose 2467 * so if we have like IW=30, we are not 2468 * calculating a "huge" b/w. 2469 */ 2470 uint64_t srtt; 2471 2472 if (rack->dis_lt_bw == 1) 2473 lt_bw = 0; 2474 else 2475 lt_bw = rack_get_lt_bw(rack); 2476 if (lt_bw) { 2477 /* 2478 * No goodput bw but a long-term b/w does exist 2479 * lets use that. 2480 */ 2481 ret_bw = lt_bw; 2482 goto compensate; 2483 } 2484 if (rack->r_ctl.init_rate) 2485 return (rack->r_ctl.init_rate); 2486 2487 /* Ok lets come up with the IW guess, if we have a srtt */ 2488 if (rack->rc_tp->t_srtt == 0) { 2489 /* 2490 * Go with old pacing method 2491 * i.e. burst mitigation only. 2492 */ 2493 return (0); 2494 } 2495 /* Ok lets get the initial TCP win (not racks) */ 2496 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2497 srtt = (uint64_t)rack->rc_tp->t_srtt; 2498 bw *= (uint64_t)USECS_IN_SECOND; 2499 bw /= srtt; 2500 ret_bw = bw; 2501 goto compensate; 2502 2503 } 2504 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2505 /* Averaging is done, we can return the value */ 2506 bw = rack->r_ctl.gp_bw; 2507 } else { 2508 /* Still doing initial average must calculate */ 2509 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2510 } 2511 if (rack->dis_lt_bw) { 2512 /* We are not using lt-bw */ 2513 ret_bw = bw; 2514 goto compensate; 2515 } 2516 lt_bw = rack_get_lt_bw(rack); 2517 if (lt_bw == 0) { 2518 /* If we don't have one then equate it to the gp_bw */ 2519 lt_bw = rack->r_ctl.gp_bw; 2520 } 2521 if (rack->use_lesser_lt_bw) { 2522 if (lt_bw < bw) 2523 ret_bw = lt_bw; 2524 else 2525 ret_bw = bw; 2526 } else { 2527 if (lt_bw > bw) 2528 ret_bw = lt_bw; 2529 else 2530 ret_bw = bw; 2531 } 2532 /* 2533 * Now lets compensate based on the TCP/IP overhead. Our 2534 * Goodput estimate does not include this so we must pace out 2535 * a bit faster since our pacing calculations do. The pacing 2536 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2537 * we are using to do this, so we do that here in the opposite 2538 * direction as well. This means that if we are tunneled and the 2539 * segsiz is say 1200 bytes we will get quite a boost, but its 2540 * compensated for in the pacing time the opposite way. 2541 */ 2542 compensate: 2543 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2544 return(ret_bw); 2545 } 2546 2547 2548 static uint64_t 2549 rack_get_bw(struct tcp_rack *rack) 2550 { 2551 uint64_t bw; 2552 2553 if (rack->use_fixed_rate) { 2554 /* Return the fixed pacing rate */ 2555 return (rack_get_fixed_pacing_bw(rack)); 2556 } 2557 bw = rack_get_gp_est(rack); 2558 return (bw); 2559 } 2560 2561 static uint16_t 2562 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2563 { 2564 if (rack->use_fixed_rate) { 2565 return (100); 2566 } else if (rack->in_probe_rtt && (rsm == NULL)) 2567 return (rack->r_ctl.rack_per_of_gp_probertt); 2568 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2569 rack->r_ctl.rack_per_of_gp_rec)) { 2570 if (rsm) { 2571 /* a retransmission always use the recovery rate */ 2572 return (rack->r_ctl.rack_per_of_gp_rec); 2573 } else if (rack->rack_rec_nonrxt_use_cr) { 2574 /* Directed to use the configured rate */ 2575 goto configured_rate; 2576 } else if (rack->rack_no_prr && 2577 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2578 /* No PRR, lets just use the b/w estimate only */ 2579 return (100); 2580 } else { 2581 /* 2582 * Here we may have a non-retransmit but we 2583 * have no overrides, so just use the recovery 2584 * rate (prr is in effect). 2585 */ 2586 return (rack->r_ctl.rack_per_of_gp_rec); 2587 } 2588 } 2589 configured_rate: 2590 /* For the configured rate we look at our cwnd vs the ssthresh */ 2591 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2592 return (rack->r_ctl.rack_per_of_gp_ss); 2593 else 2594 return (rack->r_ctl.rack_per_of_gp_ca); 2595 } 2596 2597 static void 2598 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2599 { 2600 /* 2601 * Types of logs (mod value) 2602 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2603 * 2 = a dsack round begins, persist is reset to 16. 2604 * 3 = a dsack round ends 2605 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2606 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2607 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2608 */ 2609 if (tcp_bblogging_on(rack->rc_tp)) { 2610 union tcp_log_stackspecific log; 2611 struct timeval tv; 2612 2613 memset(&log, 0, sizeof(log)); 2614 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2615 log.u_bbr.flex1 <<= 1; 2616 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2617 log.u_bbr.flex1 <<= 1; 2618 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2619 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2620 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2621 log.u_bbr.flex4 = flex4; 2622 log.u_bbr.flex5 = flex5; 2623 log.u_bbr.flex6 = flex6; 2624 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2625 log.u_bbr.flex8 = mod; 2626 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2627 log.u_bbr.epoch = rack->r_ctl.current_round; 2628 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2629 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2630 &rack->rc_inp->inp_socket->so_rcv, 2631 &rack->rc_inp->inp_socket->so_snd, 2632 RACK_DSACK_HANDLING, 0, 2633 0, &log, false, &tv); 2634 } 2635 } 2636 2637 static void 2638 rack_log_hdwr_pacing(struct tcp_rack *rack, 2639 uint64_t rate, uint64_t hw_rate, int line, 2640 int error, uint16_t mod) 2641 { 2642 if (tcp_bblogging_on(rack->rc_tp)) { 2643 union tcp_log_stackspecific log; 2644 struct timeval tv; 2645 const struct ifnet *ifp; 2646 uint64_t ifp64; 2647 2648 memset(&log, 0, sizeof(log)); 2649 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2650 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2651 if (rack->r_ctl.crte) { 2652 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2653 } else if (rack->rc_inp->inp_route.ro_nh && 2654 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2655 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2656 } else 2657 ifp = NULL; 2658 if (ifp) { 2659 ifp64 = (uintptr_t)ifp; 2660 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff); 2661 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff); 2662 } 2663 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2664 log.u_bbr.bw_inuse = rate; 2665 log.u_bbr.flex5 = line; 2666 log.u_bbr.flex6 = error; 2667 log.u_bbr.flex7 = mod; 2668 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2669 log.u_bbr.flex8 = rack->use_fixed_rate; 2670 log.u_bbr.flex8 <<= 1; 2671 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2672 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2673 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2674 if (rack->r_ctl.crte) 2675 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2676 else 2677 log.u_bbr.cur_del_rate = 0; 2678 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2679 log.u_bbr.epoch = rack->r_ctl.current_round; 2680 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2681 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2682 &rack->rc_inp->inp_socket->so_rcv, 2683 &rack->rc_inp->inp_socket->so_snd, 2684 BBR_LOG_HDWR_PACE, 0, 2685 0, &log, false, &tv); 2686 } 2687 } 2688 2689 static uint64_t 2690 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2691 { 2692 /* 2693 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2694 */ 2695 uint64_t bw_est, high_rate; 2696 uint64_t gain; 2697 2698 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2699 bw_est = bw * gain; 2700 bw_est /= (uint64_t)100; 2701 /* Never fall below the minimum (def 64kbps) */ 2702 if (bw_est < RACK_MIN_BW) 2703 bw_est = RACK_MIN_BW; 2704 if (rack->r_rack_hw_rate_caps) { 2705 /* Rate caps are in place */ 2706 if (rack->r_ctl.crte != NULL) { 2707 /* We have a hdwr rate already */ 2708 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2709 if (bw_est >= high_rate) { 2710 /* We are capping bw at the highest rate table entry */ 2711 if (rack_hw_rate_cap_per && 2712 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2713 rack->r_rack_hw_rate_caps = 0; 2714 goto done; 2715 } 2716 rack_log_hdwr_pacing(rack, 2717 bw_est, high_rate, __LINE__, 2718 0, 3); 2719 bw_est = high_rate; 2720 if (capped) 2721 *capped = 1; 2722 } 2723 } else if ((rack->rack_hdrw_pacing == 0) && 2724 (rack->rack_hdw_pace_ena) && 2725 (rack->rack_attempt_hdwr_pace == 0) && 2726 (rack->rc_inp->inp_route.ro_nh != NULL) && 2727 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2728 /* 2729 * Special case, we have not yet attempted hardware 2730 * pacing, and yet we may, when we do, find out if we are 2731 * above the highest rate. We need to know the maxbw for the interface 2732 * in question (if it supports ratelimiting). We get back 2733 * a 0, if the interface is not found in the RL lists. 2734 */ 2735 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2736 if (high_rate) { 2737 /* Yep, we have a rate is it above this rate? */ 2738 if (bw_est > high_rate) { 2739 bw_est = high_rate; 2740 if (capped) 2741 *capped = 1; 2742 } 2743 } 2744 } 2745 } 2746 done: 2747 return (bw_est); 2748 } 2749 2750 static void 2751 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2752 { 2753 if (tcp_bblogging_on(rack->rc_tp)) { 2754 union tcp_log_stackspecific log; 2755 struct timeval tv; 2756 2757 if ((mod != 1) && (rack_verbose_logging == 0)) { 2758 /* 2759 * We get 3 values currently for mod 2760 * 1 - We are retransmitting and this tells the reason. 2761 * 2 - We are clearing a dup-ack count. 2762 * 3 - We are incrementing a dup-ack count. 2763 * 2764 * The clear/increment are only logged 2765 * if you have BBverbose on. 2766 */ 2767 return; 2768 } 2769 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2770 log.u_bbr.flex1 = tsused; 2771 log.u_bbr.flex2 = thresh; 2772 log.u_bbr.flex3 = rsm->r_flags; 2773 log.u_bbr.flex4 = rsm->r_dupack; 2774 log.u_bbr.flex5 = rsm->r_start; 2775 log.u_bbr.flex6 = rsm->r_end; 2776 log.u_bbr.flex8 = mod; 2777 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2778 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2779 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2780 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2781 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2782 log.u_bbr.pacing_gain = rack->r_must_retran; 2783 log.u_bbr.epoch = rack->r_ctl.current_round; 2784 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2785 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2786 &rack->rc_inp->inp_socket->so_rcv, 2787 &rack->rc_inp->inp_socket->so_snd, 2788 BBR_LOG_SETTINGS_CHG, 0, 2789 0, &log, false, &tv); 2790 } 2791 } 2792 2793 static void 2794 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2795 { 2796 if (tcp_bblogging_on(rack->rc_tp)) { 2797 union tcp_log_stackspecific log; 2798 struct timeval tv; 2799 2800 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2801 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2802 log.u_bbr.flex2 = to; 2803 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2804 log.u_bbr.flex4 = slot; 2805 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2806 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2807 log.u_bbr.flex7 = rack->rc_in_persist; 2808 log.u_bbr.flex8 = which; 2809 if (rack->rack_no_prr) 2810 log.u_bbr.pkts_out = 0; 2811 else 2812 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2813 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2814 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2815 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2816 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2817 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2818 log.u_bbr.pacing_gain = rack->r_must_retran; 2819 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2820 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2821 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2822 log.u_bbr.lost = rack_rto_min; 2823 log.u_bbr.epoch = rack->r_ctl.roundends; 2824 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2825 log.u_bbr.bw_inuse <<= 32; 2826 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2827 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2828 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2829 &rack->rc_inp->inp_socket->so_rcv, 2830 &rack->rc_inp->inp_socket->so_snd, 2831 BBR_LOG_TIMERSTAR, 0, 2832 0, &log, false, &tv); 2833 } 2834 } 2835 2836 static void 2837 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2838 { 2839 if (tcp_bblogging_on(rack->rc_tp)) { 2840 union tcp_log_stackspecific log; 2841 struct timeval tv; 2842 2843 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2844 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2845 log.u_bbr.flex8 = to_num; 2846 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2847 log.u_bbr.flex2 = rack->rc_rack_rtt; 2848 if (rsm == NULL) 2849 log.u_bbr.flex3 = 0; 2850 else 2851 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2852 if (rack->rack_no_prr) 2853 log.u_bbr.flex5 = 0; 2854 else 2855 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2856 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2857 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2858 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2859 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2860 log.u_bbr.pacing_gain = rack->r_must_retran; 2861 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2862 log.u_bbr.bw_inuse <<= 32; 2863 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2864 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2865 &rack->rc_inp->inp_socket->so_rcv, 2866 &rack->rc_inp->inp_socket->so_snd, 2867 BBR_LOG_RTO, 0, 2868 0, &log, false, &tv); 2869 } 2870 } 2871 2872 static void 2873 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2874 struct rack_sendmap *prev, 2875 struct rack_sendmap *rsm, 2876 struct rack_sendmap *next, 2877 int flag, uint32_t th_ack, int line) 2878 { 2879 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2880 union tcp_log_stackspecific log; 2881 struct timeval tv; 2882 2883 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2884 log.u_bbr.flex8 = flag; 2885 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2886 log.u_bbr.cur_del_rate = (uintptr_t)prev; 2887 log.u_bbr.delRate = (uintptr_t)rsm; 2888 log.u_bbr.rttProp = (uintptr_t)next; 2889 log.u_bbr.flex7 = 0; 2890 if (prev) { 2891 log.u_bbr.flex1 = prev->r_start; 2892 log.u_bbr.flex2 = prev->r_end; 2893 log.u_bbr.flex7 |= 0x4; 2894 } 2895 if (rsm) { 2896 log.u_bbr.flex3 = rsm->r_start; 2897 log.u_bbr.flex4 = rsm->r_end; 2898 log.u_bbr.flex7 |= 0x2; 2899 } 2900 if (next) { 2901 log.u_bbr.flex5 = next->r_start; 2902 log.u_bbr.flex6 = next->r_end; 2903 log.u_bbr.flex7 |= 0x1; 2904 } 2905 log.u_bbr.applimited = line; 2906 log.u_bbr.pkts_out = th_ack; 2907 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2908 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2909 if (rack->rack_no_prr) 2910 log.u_bbr.lost = 0; 2911 else 2912 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2913 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2914 log.u_bbr.bw_inuse <<= 32; 2915 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2916 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2917 &rack->rc_inp->inp_socket->so_rcv, 2918 &rack->rc_inp->inp_socket->so_snd, 2919 TCP_LOG_MAPCHG, 0, 2920 0, &log, false, &tv); 2921 } 2922 } 2923 2924 static void 2925 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2926 struct rack_sendmap *rsm, int conf) 2927 { 2928 if (tcp_bblogging_on(tp)) { 2929 union tcp_log_stackspecific log; 2930 struct timeval tv; 2931 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2932 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2933 log.u_bbr.flex1 = t; 2934 log.u_bbr.flex2 = len; 2935 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2936 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2937 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2938 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2939 log.u_bbr.flex7 = conf; 2940 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2941 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2942 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2943 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2944 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2945 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2946 if (rsm) { 2947 log.u_bbr.pkt_epoch = rsm->r_start; 2948 log.u_bbr.lost = rsm->r_end; 2949 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2950 /* We loose any upper of the 24 bits */ 2951 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2952 } else { 2953 /* Its a SYN */ 2954 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2955 log.u_bbr.lost = 0; 2956 log.u_bbr.cwnd_gain = 0; 2957 log.u_bbr.pacing_gain = 0; 2958 } 2959 /* Write out general bits of interest rrs here */ 2960 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2961 log.u_bbr.use_lt_bw <<= 1; 2962 log.u_bbr.use_lt_bw |= rack->forced_ack; 2963 log.u_bbr.use_lt_bw <<= 1; 2964 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2965 log.u_bbr.use_lt_bw <<= 1; 2966 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2967 log.u_bbr.use_lt_bw <<= 1; 2968 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2969 log.u_bbr.use_lt_bw <<= 1; 2970 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2971 log.u_bbr.use_lt_bw <<= 1; 2972 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2973 log.u_bbr.use_lt_bw <<= 1; 2974 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2975 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2976 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2977 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2978 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2979 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2980 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2981 log.u_bbr.bw_inuse <<= 32; 2982 if (rsm) 2983 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2984 TCP_LOG_EVENTP(tp, NULL, 2985 &rack->rc_inp->inp_socket->so_rcv, 2986 &rack->rc_inp->inp_socket->so_snd, 2987 BBR_LOG_BBRRTT, 0, 2988 0, &log, false, &tv); 2989 2990 2991 } 2992 } 2993 2994 static void 2995 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2996 { 2997 /* 2998 * Log the rtt sample we are 2999 * applying to the srtt algorithm in 3000 * useconds. 3001 */ 3002 if (tcp_bblogging_on(rack->rc_tp)) { 3003 union tcp_log_stackspecific log; 3004 struct timeval tv; 3005 3006 /* Convert our ms to a microsecond */ 3007 memset(&log, 0, sizeof(log)); 3008 log.u_bbr.flex1 = rtt; 3009 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3010 log.u_bbr.flex7 = 1; 3011 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3012 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3013 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3014 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3015 log.u_bbr.pacing_gain = rack->r_must_retran; 3016 /* 3017 * We capture in delRate the upper 32 bits as 3018 * the confidence level we had declared, and the 3019 * lower 32 bits as the actual RTT using the arrival 3020 * timestamp. 3021 */ 3022 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 3023 log.u_bbr.delRate <<= 32; 3024 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 3025 /* Lets capture all the things that make up t_rtxcur */ 3026 log.u_bbr.applimited = rack_rto_min; 3027 log.u_bbr.epoch = rack_rto_max; 3028 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 3029 log.u_bbr.lost = rack_rto_min; 3030 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 3031 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 3032 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 3033 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 3034 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 3035 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3036 &rack->rc_inp->inp_socket->so_rcv, 3037 &rack->rc_inp->inp_socket->so_snd, 3038 TCP_LOG_RTT, 0, 3039 0, &log, false, &tv); 3040 } 3041 } 3042 3043 static void 3044 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 3045 { 3046 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3047 union tcp_log_stackspecific log; 3048 struct timeval tv; 3049 3050 /* Convert our ms to a microsecond */ 3051 memset(&log, 0, sizeof(log)); 3052 log.u_bbr.flex1 = rtt; 3053 log.u_bbr.flex2 = send_time; 3054 log.u_bbr.flex3 = ack_time; 3055 log.u_bbr.flex4 = where; 3056 log.u_bbr.flex7 = 2; 3057 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3058 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3059 log.u_bbr.bw_inuse <<= 32; 3060 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3061 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3062 &rack->rc_inp->inp_socket->so_rcv, 3063 &rack->rc_inp->inp_socket->so_snd, 3064 TCP_LOG_RTT, 0, 3065 0, &log, false, &tv); 3066 } 3067 } 3068 3069 3070 static void 3071 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 3072 { 3073 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3074 union tcp_log_stackspecific log; 3075 struct timeval tv; 3076 3077 /* Convert our ms to a microsecond */ 3078 memset(&log, 0, sizeof(log)); 3079 log.u_bbr.flex1 = idx; 3080 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 3081 log.u_bbr.flex3 = tsecho; 3082 log.u_bbr.flex7 = 3; 3083 log.u_bbr.rttProp = tsv; 3084 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3085 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3086 log.u_bbr.bw_inuse <<= 32; 3087 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3088 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3089 &rack->rc_inp->inp_socket->so_rcv, 3090 &rack->rc_inp->inp_socket->so_snd, 3091 TCP_LOG_RTT, 0, 3092 0, &log, false, &tv); 3093 } 3094 } 3095 3096 3097 static inline void 3098 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3099 { 3100 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3101 union tcp_log_stackspecific log; 3102 struct timeval tv; 3103 3104 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3105 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3106 log.u_bbr.flex1 = line; 3107 log.u_bbr.flex2 = tick; 3108 log.u_bbr.flex3 = tp->t_maxunacktime; 3109 log.u_bbr.flex4 = tp->t_acktime; 3110 log.u_bbr.flex8 = event; 3111 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3112 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3113 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3114 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3115 log.u_bbr.pacing_gain = rack->r_must_retran; 3116 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3117 log.u_bbr.bw_inuse <<= 32; 3118 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3119 TCP_LOG_EVENTP(tp, NULL, 3120 &rack->rc_inp->inp_socket->so_rcv, 3121 &rack->rc_inp->inp_socket->so_snd, 3122 BBR_LOG_PROGRESS, 0, 3123 0, &log, false, &tv); 3124 } 3125 } 3126 3127 static void 3128 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 3129 { 3130 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3131 union tcp_log_stackspecific log; 3132 3133 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3134 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3135 log.u_bbr.flex1 = slot; 3136 if (rack->rack_no_prr) 3137 log.u_bbr.flex2 = 0; 3138 else 3139 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3140 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3141 log.u_bbr.flex6 = line; 3142 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3143 log.u_bbr.flex8 = rack->rc_in_persist; 3144 log.u_bbr.timeStamp = cts; 3145 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3146 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3147 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3148 log.u_bbr.pacing_gain = rack->r_must_retran; 3149 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3150 &rack->rc_inp->inp_socket->so_rcv, 3151 &rack->rc_inp->inp_socket->so_snd, 3152 BBR_LOG_BBRSND, 0, 3153 0, &log, false, tv); 3154 } 3155 } 3156 3157 static void 3158 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3159 { 3160 if (tcp_bblogging_on(rack->rc_tp)) { 3161 union tcp_log_stackspecific log; 3162 struct timeval tv; 3163 3164 memset(&log, 0, sizeof(log)); 3165 log.u_bbr.flex1 = did_out; 3166 log.u_bbr.flex2 = nxt_pkt; 3167 log.u_bbr.flex3 = way_out; 3168 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3169 if (rack->rack_no_prr) 3170 log.u_bbr.flex5 = 0; 3171 else 3172 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3173 log.u_bbr.flex6 = nsegs; 3174 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3175 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3176 log.u_bbr.flex7 <<= 1; 3177 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3178 log.u_bbr.flex7 <<= 1; 3179 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3180 log.u_bbr.flex8 = rack->rc_in_persist; 3181 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3182 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3183 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3184 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3185 log.u_bbr.use_lt_bw <<= 1; 3186 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3187 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3188 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3189 log.u_bbr.pacing_gain = rack->r_must_retran; 3190 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3191 log.u_bbr.bw_inuse <<= 32; 3192 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3193 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3194 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3195 log.u_bbr.lost = rack->rc_tp->t_srtt; 3196 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3197 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3198 &rack->rc_inp->inp_socket->so_rcv, 3199 &rack->rc_inp->inp_socket->so_snd, 3200 BBR_LOG_DOSEG_DONE, 0, 3201 0, &log, false, &tv); 3202 } 3203 } 3204 3205 static void 3206 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3207 { 3208 if (tcp_bblogging_on(rack->rc_tp)) { 3209 union tcp_log_stackspecific log; 3210 struct timeval tv; 3211 3212 memset(&log, 0, sizeof(log)); 3213 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3214 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3215 log.u_bbr.flex4 = arg1; 3216 log.u_bbr.flex5 = arg2; 3217 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3218 log.u_bbr.flex6 = arg3; 3219 log.u_bbr.flex8 = frm; 3220 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3221 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3222 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3223 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3224 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3225 log.u_bbr.pacing_gain = rack->r_must_retran; 3226 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3227 &tptosocket(tp)->so_snd, 3228 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3229 } 3230 } 3231 3232 static void 3233 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3234 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3235 { 3236 if (tcp_bblogging_on(rack->rc_tp)) { 3237 union tcp_log_stackspecific log; 3238 struct timeval tv; 3239 3240 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3241 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3242 log.u_bbr.flex1 = slot; 3243 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3244 log.u_bbr.flex4 = reason; 3245 if (rack->rack_no_prr) 3246 log.u_bbr.flex5 = 0; 3247 else 3248 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3249 log.u_bbr.flex7 = hpts_calling; 3250 log.u_bbr.flex8 = rack->rc_in_persist; 3251 log.u_bbr.lt_epoch = cwnd_to_use; 3252 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3253 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3254 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3255 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3256 log.u_bbr.pacing_gain = rack->r_must_retran; 3257 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3258 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3259 log.u_bbr.bw_inuse <<= 32; 3260 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3261 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3262 &rack->rc_inp->inp_socket->so_rcv, 3263 &rack->rc_inp->inp_socket->so_snd, 3264 BBR_LOG_JUSTRET, 0, 3265 tlen, &log, false, &tv); 3266 } 3267 } 3268 3269 static void 3270 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3271 struct timeval *tv, uint32_t flags_on_entry) 3272 { 3273 if (tcp_bblogging_on(rack->rc_tp)) { 3274 union tcp_log_stackspecific log; 3275 3276 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3277 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3278 log.u_bbr.flex1 = line; 3279 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3280 log.u_bbr.flex3 = flags_on_entry; 3281 log.u_bbr.flex4 = us_cts; 3282 if (rack->rack_no_prr) 3283 log.u_bbr.flex5 = 0; 3284 else 3285 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3286 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3287 log.u_bbr.flex7 = hpts_removed; 3288 log.u_bbr.flex8 = 1; 3289 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3290 log.u_bbr.timeStamp = us_cts; 3291 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3292 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3293 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3294 log.u_bbr.pacing_gain = rack->r_must_retran; 3295 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3296 log.u_bbr.bw_inuse <<= 32; 3297 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3298 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3299 &rack->rc_inp->inp_socket->so_rcv, 3300 &rack->rc_inp->inp_socket->so_snd, 3301 BBR_LOG_TIMERCANC, 0, 3302 0, &log, false, tv); 3303 } 3304 } 3305 3306 static void 3307 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3308 uint32_t flex1, uint32_t flex2, 3309 uint32_t flex3, uint32_t flex4, 3310 uint32_t flex5, uint32_t flex6, 3311 uint16_t flex7, uint8_t mod) 3312 { 3313 if (tcp_bblogging_on(rack->rc_tp)) { 3314 union tcp_log_stackspecific log; 3315 struct timeval tv; 3316 3317 if (mod == 1) { 3318 /* No you can't use 1, its for the real to cancel */ 3319 return; 3320 } 3321 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3322 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3323 log.u_bbr.flex1 = flex1; 3324 log.u_bbr.flex2 = flex2; 3325 log.u_bbr.flex3 = flex3; 3326 log.u_bbr.flex4 = flex4; 3327 log.u_bbr.flex5 = flex5; 3328 log.u_bbr.flex6 = flex6; 3329 log.u_bbr.flex7 = flex7; 3330 log.u_bbr.flex8 = mod; 3331 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3332 &rack->rc_inp->inp_socket->so_rcv, 3333 &rack->rc_inp->inp_socket->so_snd, 3334 BBR_LOG_TIMERCANC, 0, 3335 0, &log, false, &tv); 3336 } 3337 } 3338 3339 static void 3340 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3341 { 3342 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3343 union tcp_log_stackspecific log; 3344 struct timeval tv; 3345 3346 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3347 log.u_bbr.flex1 = timers; 3348 log.u_bbr.flex2 = ret; 3349 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3350 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3351 log.u_bbr.flex5 = cts; 3352 if (rack->rack_no_prr) 3353 log.u_bbr.flex6 = 0; 3354 else 3355 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3356 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3357 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3358 log.u_bbr.pacing_gain = rack->r_must_retran; 3359 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3360 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3361 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3362 &rack->rc_inp->inp_socket->so_rcv, 3363 &rack->rc_inp->inp_socket->so_snd, 3364 BBR_LOG_TO_PROCESS, 0, 3365 0, &log, false, &tv); 3366 } 3367 } 3368 3369 static void 3370 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3371 { 3372 if (tcp_bblogging_on(rack->rc_tp)) { 3373 union tcp_log_stackspecific log; 3374 struct timeval tv; 3375 3376 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3377 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3378 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3379 if (rack->rack_no_prr) 3380 log.u_bbr.flex3 = 0; 3381 else 3382 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3383 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3384 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3385 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3386 log.u_bbr.flex7 = line; 3387 log.u_bbr.flex8 = frm; 3388 log.u_bbr.pkts_out = orig_cwnd; 3389 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3390 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3391 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3392 log.u_bbr.use_lt_bw <<= 1; 3393 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3394 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3395 &rack->rc_inp->inp_socket->so_rcv, 3396 &rack->rc_inp->inp_socket->so_snd, 3397 BBR_LOG_BBRUPD, 0, 3398 0, &log, false, &tv); 3399 } 3400 } 3401 3402 static void 3403 rack_counter_destroy(void) 3404 { 3405 counter_u64_free(rack_total_bytes); 3406 counter_u64_free(rack_fto_send); 3407 counter_u64_free(rack_fto_rsm_send); 3408 counter_u64_free(rack_nfto_resend); 3409 counter_u64_free(rack_hw_pace_init_fail); 3410 counter_u64_free(rack_hw_pace_lost); 3411 counter_u64_free(rack_non_fto_send); 3412 counter_u64_free(rack_extended_rfo); 3413 counter_u64_free(rack_ack_total); 3414 counter_u64_free(rack_express_sack); 3415 counter_u64_free(rack_sack_total); 3416 counter_u64_free(rack_move_none); 3417 counter_u64_free(rack_move_some); 3418 counter_u64_free(rack_sack_attacks_detected); 3419 counter_u64_free(rack_sack_attacks_reversed); 3420 counter_u64_free(rack_sack_attacks_suspect); 3421 counter_u64_free(rack_sack_used_next_merge); 3422 counter_u64_free(rack_sack_used_prev_merge); 3423 counter_u64_free(rack_tlp_tot); 3424 counter_u64_free(rack_tlp_newdata); 3425 counter_u64_free(rack_tlp_retran); 3426 counter_u64_free(rack_tlp_retran_bytes); 3427 counter_u64_free(rack_to_tot); 3428 counter_u64_free(rack_saw_enobuf); 3429 counter_u64_free(rack_saw_enobuf_hw); 3430 counter_u64_free(rack_saw_enetunreach); 3431 counter_u64_free(rack_hot_alloc); 3432 counter_u64_free(tcp_policer_detected); 3433 counter_u64_free(rack_to_alloc); 3434 counter_u64_free(rack_to_alloc_hard); 3435 counter_u64_free(rack_to_alloc_emerg); 3436 counter_u64_free(rack_to_alloc_limited); 3437 counter_u64_free(rack_alloc_limited_conns); 3438 counter_u64_free(rack_split_limited); 3439 counter_u64_free(rack_multi_single_eq); 3440 counter_u64_free(rack_rxt_clamps_cwnd); 3441 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3442 counter_u64_free(rack_proc_non_comp_ack); 3443 counter_u64_free(rack_sack_proc_all); 3444 counter_u64_free(rack_sack_proc_restart); 3445 counter_u64_free(rack_sack_proc_short); 3446 counter_u64_free(rack_sack_skipped_acked); 3447 counter_u64_free(rack_sack_splits); 3448 counter_u64_free(rack_input_idle_reduces); 3449 counter_u64_free(rack_collapsed_win); 3450 counter_u64_free(rack_collapsed_win_rxt); 3451 counter_u64_free(rack_collapsed_win_rxt_bytes); 3452 counter_u64_free(rack_collapsed_win_seen); 3453 counter_u64_free(rack_try_scwnd); 3454 counter_u64_free(rack_persists_sends); 3455 counter_u64_free(rack_persists_acks); 3456 counter_u64_free(rack_persists_loss); 3457 counter_u64_free(rack_persists_lost_ends); 3458 #ifdef INVARIANTS 3459 counter_u64_free(rack_adjust_map_bw); 3460 #endif 3461 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3462 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3463 } 3464 3465 static struct rack_sendmap * 3466 rack_alloc(struct tcp_rack *rack) 3467 { 3468 struct rack_sendmap *rsm; 3469 3470 /* 3471 * First get the top of the list it in 3472 * theory is the "hottest" rsm we have, 3473 * possibly just freed by ack processing. 3474 */ 3475 if (rack->rc_free_cnt > rack_free_cache) { 3476 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3477 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3478 counter_u64_add(rack_hot_alloc, 1); 3479 rack->rc_free_cnt--; 3480 return (rsm); 3481 } 3482 /* 3483 * Once we get under our free cache we probably 3484 * no longer have a "hot" one available. Lets 3485 * get one from UMA. 3486 */ 3487 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3488 if (rsm) { 3489 rack->r_ctl.rc_num_maps_alloced++; 3490 counter_u64_add(rack_to_alloc, 1); 3491 return (rsm); 3492 } 3493 /* 3494 * Dig in to our aux rsm's (the last two) since 3495 * UMA failed to get us one. 3496 */ 3497 if (rack->rc_free_cnt) { 3498 counter_u64_add(rack_to_alloc_emerg, 1); 3499 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3500 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3501 rack->rc_free_cnt--; 3502 return (rsm); 3503 } 3504 return (NULL); 3505 } 3506 3507 static struct rack_sendmap * 3508 rack_alloc_full_limit(struct tcp_rack *rack) 3509 { 3510 if ((V_tcp_map_entries_limit > 0) && 3511 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3512 counter_u64_add(rack_to_alloc_limited, 1); 3513 if (!rack->alloc_limit_reported) { 3514 rack->alloc_limit_reported = 1; 3515 counter_u64_add(rack_alloc_limited_conns, 1); 3516 } 3517 return (NULL); 3518 } 3519 return (rack_alloc(rack)); 3520 } 3521 3522 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3523 static struct rack_sendmap * 3524 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3525 { 3526 struct rack_sendmap *rsm; 3527 3528 if (limit_type) { 3529 /* currently there is only one limit type */ 3530 if (rack->r_ctl.rc_split_limit > 0 && 3531 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3532 counter_u64_add(rack_split_limited, 1); 3533 if (!rack->alloc_limit_reported) { 3534 rack->alloc_limit_reported = 1; 3535 counter_u64_add(rack_alloc_limited_conns, 1); 3536 } 3537 return (NULL); 3538 } 3539 } 3540 3541 /* allocate and mark in the limit type, if set */ 3542 rsm = rack_alloc(rack); 3543 if (rsm != NULL && limit_type) { 3544 rsm->r_limit_type = limit_type; 3545 rack->r_ctl.rc_num_split_allocs++; 3546 } 3547 return (rsm); 3548 } 3549 3550 static void 3551 rack_free_trim(struct tcp_rack *rack) 3552 { 3553 struct rack_sendmap *rsm; 3554 3555 /* 3556 * Free up all the tail entries until 3557 * we get our list down to the limit. 3558 */ 3559 while (rack->rc_free_cnt > rack_free_cache) { 3560 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3561 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3562 rack->rc_free_cnt--; 3563 rack->r_ctl.rc_num_maps_alloced--; 3564 uma_zfree(rack_zone, rsm); 3565 } 3566 } 3567 3568 static void 3569 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3570 { 3571 if (rsm->r_flags & RACK_APP_LIMITED) { 3572 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3573 rack->r_ctl.rc_app_limited_cnt--; 3574 } 3575 } 3576 if (rsm->r_limit_type) { 3577 /* currently there is only one limit type */ 3578 rack->r_ctl.rc_num_split_allocs--; 3579 } 3580 if (rsm == rack->r_ctl.rc_first_appl) { 3581 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); 3582 rack->r_ctl.cleared_app_ack = 1; 3583 if (rack->r_ctl.rc_app_limited_cnt == 0) 3584 rack->r_ctl.rc_first_appl = NULL; 3585 else 3586 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3587 } 3588 if (rsm == rack->r_ctl.rc_resend) 3589 rack->r_ctl.rc_resend = NULL; 3590 if (rsm == rack->r_ctl.rc_end_appl) 3591 rack->r_ctl.rc_end_appl = NULL; 3592 if (rack->r_ctl.rc_tlpsend == rsm) 3593 rack->r_ctl.rc_tlpsend = NULL; 3594 if (rack->r_ctl.rc_sacklast == rsm) 3595 rack->r_ctl.rc_sacklast = NULL; 3596 memset(rsm, 0, sizeof(struct rack_sendmap)); 3597 /* Make sure we are not going to overrun our count limit of 0xff */ 3598 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3599 rack_free_trim(rack); 3600 } 3601 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3602 rack->rc_free_cnt++; 3603 } 3604 3605 static uint32_t 3606 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3607 { 3608 uint64_t srtt, bw, len, tim; 3609 uint32_t segsiz, def_len, minl; 3610 3611 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3612 def_len = rack_def_data_window * segsiz; 3613 if (rack->rc_gp_filled == 0) { 3614 /* 3615 * We have no measurement (IW is in flight?) so 3616 * we can only guess using our data_window sysctl 3617 * value (usually 20MSS). 3618 */ 3619 return (def_len); 3620 } 3621 /* 3622 * Now we have a number of factors to consider. 3623 * 3624 * 1) We have a desired BDP which is usually 3625 * at least 2. 3626 * 2) We have a minimum number of rtt's usually 1 SRTT 3627 * but we allow it too to be more. 3628 * 3) We want to make sure a measurement last N useconds (if 3629 * we have set rack_min_measure_usec. 3630 * 3631 * We handle the first concern here by trying to create a data 3632 * window of max(rack_def_data_window, DesiredBDP). The 3633 * second concern we handle in not letting the measurement 3634 * window end normally until at least the required SRTT's 3635 * have gone by which is done further below in 3636 * rack_enough_for_measurement(). Finally the third concern 3637 * we also handle here by calculating how long that time 3638 * would take at the current BW and then return the 3639 * max of our first calculation and that length. Note 3640 * that if rack_min_measure_usec is 0, we don't deal 3641 * with concern 3. Also for both Concern 1 and 3 an 3642 * application limited period could end the measurement 3643 * earlier. 3644 * 3645 * So lets calculate the BDP with the "known" b/w using 3646 * the SRTT has our rtt and then multiply it by the 3647 * goal. 3648 */ 3649 bw = rack_get_bw(rack); 3650 srtt = (uint64_t)tp->t_srtt; 3651 len = bw * srtt; 3652 len /= (uint64_t)HPTS_USEC_IN_SEC; 3653 len *= max(1, rack_goal_bdp); 3654 /* Now we need to round up to the nearest MSS */ 3655 len = roundup(len, segsiz); 3656 if (rack_min_measure_usec) { 3657 /* Now calculate our min length for this b/w */ 3658 tim = rack_min_measure_usec; 3659 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3660 if (minl == 0) 3661 minl = 1; 3662 minl = roundup(minl, segsiz); 3663 if (len < minl) 3664 len = minl; 3665 } 3666 /* 3667 * Now if we have a very small window we want 3668 * to attempt to get the window that is 3669 * as small as possible. This happens on 3670 * low b/w connections and we don't want to 3671 * span huge numbers of rtt's between measurements. 3672 * 3673 * We basically include 2 over our "MIN window" so 3674 * that the measurement can be shortened (possibly) by 3675 * an ack'ed packet. 3676 */ 3677 if (len < def_len) 3678 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3679 else 3680 return (max((uint32_t)len, def_len)); 3681 3682 } 3683 3684 static int 3685 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3686 { 3687 uint32_t tim, srtts, segsiz; 3688 3689 /* 3690 * Has enough time passed for the GP measurement to be valid? 3691 */ 3692 if (SEQ_LT(th_ack, tp->gput_seq)) { 3693 /* Not enough bytes yet */ 3694 return (0); 3695 } 3696 if ((tp->snd_max == tp->snd_una) || 3697 (th_ack == tp->snd_max)){ 3698 /* 3699 * All is acked quality of all acked is 3700 * usually low or medium, but we in theory could split 3701 * all acked into two cases, where you got 3702 * a signifigant amount of your window and 3703 * where you did not. For now we leave it 3704 * but it is something to contemplate in the 3705 * future. The danger here is that delayed ack 3706 * is effecting the last byte (which is a 50:50 chance). 3707 */ 3708 *quality = RACK_QUALITY_ALLACKED; 3709 return (1); 3710 } 3711 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3712 /* 3713 * We obtained our entire window of data we wanted 3714 * no matter if we are in recovery or not then 3715 * its ok since expanding the window does not 3716 * make things fuzzy (or at least not as much). 3717 */ 3718 *quality = RACK_QUALITY_HIGH; 3719 return (1); 3720 } 3721 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3722 if (SEQ_LT(th_ack, tp->gput_ack) && 3723 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3724 /* Not enough bytes yet */ 3725 return (0); 3726 } 3727 if (rack->r_ctl.rc_first_appl && 3728 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3729 /* 3730 * We are up to the app limited send point 3731 * we have to measure irrespective of the time.. 3732 */ 3733 *quality = RACK_QUALITY_APPLIMITED; 3734 return (1); 3735 } 3736 /* Now what about time? */ 3737 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3738 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3739 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3740 /* 3741 * We do not allow a measurement if we are in recovery 3742 * that would shrink the goodput window we wanted. 3743 * This is to prevent cloudyness of when the last send 3744 * was actually made. 3745 */ 3746 *quality = RACK_QUALITY_HIGH; 3747 return (1); 3748 } 3749 /* Nope not even a full SRTT has passed */ 3750 return (0); 3751 } 3752 3753 static void 3754 rack_log_timely(struct tcp_rack *rack, 3755 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3756 uint64_t up_bnd, int line, uint8_t method) 3757 { 3758 if (tcp_bblogging_on(rack->rc_tp)) { 3759 union tcp_log_stackspecific log; 3760 struct timeval tv; 3761 3762 memset(&log, 0, sizeof(log)); 3763 log.u_bbr.flex1 = logged; 3764 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3765 log.u_bbr.flex2 <<= 4; 3766 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3767 log.u_bbr.flex2 <<= 4; 3768 log.u_bbr.flex2 |= rack->rc_gp_incr; 3769 log.u_bbr.flex2 <<= 4; 3770 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3771 log.u_bbr.flex3 = rack->rc_gp_incr; 3772 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3773 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3774 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3775 log.u_bbr.flex7 = rack->rc_gp_bwred; 3776 log.u_bbr.flex8 = method; 3777 log.u_bbr.cur_del_rate = cur_bw; 3778 log.u_bbr.delRate = low_bnd; 3779 log.u_bbr.bw_inuse = up_bnd; 3780 log.u_bbr.rttProp = rack_get_bw(rack); 3781 log.u_bbr.pkt_epoch = line; 3782 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3783 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3784 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3785 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3786 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3787 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3788 log.u_bbr.cwnd_gain <<= 1; 3789 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3790 log.u_bbr.cwnd_gain <<= 1; 3791 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3792 log.u_bbr.cwnd_gain <<= 1; 3793 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3794 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3795 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3796 &rack->rc_inp->inp_socket->so_rcv, 3797 &rack->rc_inp->inp_socket->so_snd, 3798 TCP_TIMELY_WORK, 0, 3799 0, &log, false, &tv); 3800 } 3801 } 3802 3803 static int 3804 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3805 { 3806 /* 3807 * Before we increase we need to know if 3808 * the estimate just made was less than 3809 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3810 * 3811 * If we already are pacing at a fast enough 3812 * rate to push us faster there is no sense of 3813 * increasing. 3814 * 3815 * We first caculate our actual pacing rate (ss or ca multiplier 3816 * times our cur_bw). 3817 * 3818 * Then we take the last measured rate and multipy by our 3819 * maximum pacing overage to give us a max allowable rate. 3820 * 3821 * If our act_rate is smaller than our max_allowable rate 3822 * then we should increase. Else we should hold steady. 3823 * 3824 */ 3825 uint64_t act_rate, max_allow_rate; 3826 3827 if (rack_timely_no_stopping) 3828 return (1); 3829 3830 if ((cur_bw == 0) || (last_bw_est == 0)) { 3831 /* 3832 * Initial startup case or 3833 * everything is acked case. 3834 */ 3835 rack_log_timely(rack, mult, cur_bw, 0, 0, 3836 __LINE__, 9); 3837 return (1); 3838 } 3839 if (mult <= 100) { 3840 /* 3841 * We can always pace at or slightly above our rate. 3842 */ 3843 rack_log_timely(rack, mult, cur_bw, 0, 0, 3844 __LINE__, 9); 3845 return (1); 3846 } 3847 act_rate = cur_bw * (uint64_t)mult; 3848 act_rate /= 100; 3849 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3850 max_allow_rate /= 100; 3851 if (act_rate < max_allow_rate) { 3852 /* 3853 * Here the rate we are actually pacing at 3854 * is smaller than 10% above our last measurement. 3855 * This means we are pacing below what we would 3856 * like to try to achieve (plus some wiggle room). 3857 */ 3858 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3859 __LINE__, 9); 3860 return (1); 3861 } else { 3862 /* 3863 * Here we are already pacing at least rack_max_per_above(10%) 3864 * what we are getting back. This indicates most likely 3865 * that we are being limited (cwnd/rwnd/app) and can't 3866 * get any more b/w. There is no sense of trying to 3867 * raise up the pacing rate its not speeding us up 3868 * and we already are pacing faster than we are getting. 3869 */ 3870 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3871 __LINE__, 8); 3872 return (0); 3873 } 3874 } 3875 3876 static void 3877 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3878 { 3879 /* 3880 * When we drag bottom, we want to assure 3881 * that no multiplier is below 1.0, if so 3882 * we want to restore it to at least that. 3883 */ 3884 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3885 /* This is unlikely we usually do not touch recovery */ 3886 rack->r_ctl.rack_per_of_gp_rec = 100; 3887 } 3888 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3889 rack->r_ctl.rack_per_of_gp_ca = 100; 3890 } 3891 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3892 rack->r_ctl.rack_per_of_gp_ss = 100; 3893 } 3894 } 3895 3896 static void 3897 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3898 { 3899 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3900 rack->r_ctl.rack_per_of_gp_ca = 100; 3901 } 3902 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3903 rack->r_ctl.rack_per_of_gp_ss = 100; 3904 } 3905 } 3906 3907 static void 3908 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3909 { 3910 int32_t calc, logged, plus; 3911 3912 logged = 0; 3913 3914 if (rack->rc_skip_timely) 3915 return; 3916 if (override) { 3917 /* 3918 * override is passed when we are 3919 * loosing b/w and making one last 3920 * gasp at trying to not loose out 3921 * to a new-reno flow. 3922 */ 3923 goto extra_boost; 3924 } 3925 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3926 if (rack->rc_gp_incr && 3927 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3928 /* 3929 * Reset and get 5 strokes more before the boost. Note 3930 * that the count is 0 based so we have to add one. 3931 */ 3932 extra_boost: 3933 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3934 rack->rc_gp_timely_inc_cnt = 0; 3935 } else 3936 plus = (uint32_t)rack_gp_increase_per; 3937 /* Must be at least 1% increase for true timely increases */ 3938 if ((plus < 1) && 3939 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3940 plus = 1; 3941 if (rack->rc_gp_saw_rec && 3942 (rack->rc_gp_no_rec_chg == 0) && 3943 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3944 rack->r_ctl.rack_per_of_gp_rec)) { 3945 /* We have been in recovery ding it too */ 3946 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3947 if (calc > 0xffff) 3948 calc = 0xffff; 3949 logged |= 1; 3950 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3951 if (rack->r_ctl.rack_per_upper_bound_ca && 3952 (rack->rc_dragged_bottom == 0) && 3953 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3954 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3955 } 3956 if (rack->rc_gp_saw_ca && 3957 (rack->rc_gp_saw_ss == 0) && 3958 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3959 rack->r_ctl.rack_per_of_gp_ca)) { 3960 /* In CA */ 3961 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3962 if (calc > 0xffff) 3963 calc = 0xffff; 3964 logged |= 2; 3965 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3966 if (rack->r_ctl.rack_per_upper_bound_ca && 3967 (rack->rc_dragged_bottom == 0) && 3968 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3969 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3970 } 3971 if (rack->rc_gp_saw_ss && 3972 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3973 rack->r_ctl.rack_per_of_gp_ss)) { 3974 /* In SS */ 3975 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3976 if (calc > 0xffff) 3977 calc = 0xffff; 3978 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3979 if (rack->r_ctl.rack_per_upper_bound_ss && 3980 (rack->rc_dragged_bottom == 0) && 3981 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3982 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3983 logged |= 4; 3984 } 3985 if (logged && 3986 (rack->rc_gp_incr == 0)){ 3987 /* Go into increment mode */ 3988 rack->rc_gp_incr = 1; 3989 rack->rc_gp_timely_inc_cnt = 0; 3990 } 3991 if (rack->rc_gp_incr && 3992 logged && 3993 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3994 rack->rc_gp_timely_inc_cnt++; 3995 } 3996 rack_log_timely(rack, logged, plus, 0, 0, 3997 __LINE__, 1); 3998 } 3999 4000 static uint32_t 4001 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 4002 { 4003 /*- 4004 * norm_grad = rtt_diff / minrtt; 4005 * new_per = curper * (1 - B * norm_grad) 4006 * 4007 * B = rack_gp_decrease_per (default 80%) 4008 * rtt_dif = input var current rtt-diff 4009 * curper = input var current percentage 4010 * minrtt = from rack filter 4011 * 4012 * In order to do the floating point calculations above we 4013 * do an integer conversion. The code looks confusing so let me 4014 * translate it into something that use more variables and 4015 * is clearer for us humans :) 4016 * 4017 * uint64_t norm_grad, inverse, reduce_by, final_result; 4018 * uint32_t perf; 4019 * 4020 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 4021 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 4022 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 4023 * inverse /= 1000000; 4024 * reduce_by = (1000000 - inverse); 4025 * final_result = (cur_per * reduce_by) / 1000000; 4026 * perf = (uint32_t)final_result; 4027 */ 4028 uint64_t perf; 4029 4030 perf = (((uint64_t)curper * ((uint64_t)1000000 - 4031 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 4032 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 4033 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 4034 (uint64_t)1000000)) / 4035 (uint64_t)1000000); 4036 if (perf > curper) { 4037 /* TSNH */ 4038 perf = curper - 1; 4039 } 4040 return ((uint32_t)perf); 4041 } 4042 4043 static uint32_t 4044 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 4045 { 4046 /* 4047 * highrttthresh 4048 * result = curper * (1 - (B * ( 1 - ------ )) 4049 * gp_srtt 4050 * 4051 * B = rack_gp_decrease_per (default .8 i.e. 80) 4052 * highrttthresh = filter_min * rack_gp_rtt_maxmul 4053 */ 4054 uint64_t perf; 4055 uint32_t highrttthresh; 4056 4057 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4058 4059 perf = (((uint64_t)curper * ((uint64_t)1000000 - 4060 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 4061 ((uint64_t)highrttthresh * (uint64_t)1000000) / 4062 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 4063 if (tcp_bblogging_on(rack->rc_tp)) { 4064 uint64_t log1; 4065 4066 log1 = rtt; 4067 log1 <<= 32; 4068 log1 |= highrttthresh; 4069 rack_log_timely(rack, 4070 rack_gp_decrease_per, 4071 (uint64_t)curper, 4072 log1, 4073 perf, 4074 __LINE__, 4075 15); 4076 } 4077 return (perf); 4078 } 4079 4080 static void 4081 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 4082 { 4083 uint64_t logvar, logvar2, logvar3; 4084 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 4085 4086 if (rack->rc_skip_timely) 4087 return; 4088 if (rack->rc_gp_incr) { 4089 /* Turn off increment counting */ 4090 rack->rc_gp_incr = 0; 4091 rack->rc_gp_timely_inc_cnt = 0; 4092 } 4093 ss_red = ca_red = rec_red = 0; 4094 logged = 0; 4095 /* Calculate the reduction value */ 4096 if (rtt_diff < 0) { 4097 rtt_diff *= -1; 4098 } 4099 /* Must be at least 1% reduction */ 4100 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4101 /* We have been in recovery ding it too */ 4102 if (timely_says == 2) { 4103 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4104 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4105 if (alt < new_per) 4106 val = alt; 4107 else 4108 val = new_per; 4109 } else 4110 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4111 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4112 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4113 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4114 } else { 4115 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4116 rec_red = 0; 4117 } 4118 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4119 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4120 logged |= 1; 4121 } 4122 if (rack->rc_gp_saw_ss) { 4123 /* Sent in SS */ 4124 if (timely_says == 2) { 4125 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4126 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4127 if (alt < new_per) 4128 val = alt; 4129 else 4130 val = new_per; 4131 } else 4132 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4133 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4134 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4135 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4136 } else { 4137 ss_red = new_per; 4138 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4139 logvar = new_per; 4140 logvar <<= 32; 4141 logvar |= alt; 4142 logvar2 = (uint32_t)rtt; 4143 logvar2 <<= 32; 4144 logvar2 |= (uint32_t)rtt_diff; 4145 logvar3 = rack_gp_rtt_maxmul; 4146 logvar3 <<= 32; 4147 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4148 rack_log_timely(rack, timely_says, 4149 logvar2, logvar3, 4150 logvar, __LINE__, 10); 4151 } 4152 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4153 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4154 logged |= 4; 4155 } else if (rack->rc_gp_saw_ca) { 4156 /* Sent in CA */ 4157 if (timely_says == 2) { 4158 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4159 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4160 if (alt < new_per) 4161 val = alt; 4162 else 4163 val = new_per; 4164 } else 4165 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4166 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4167 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4168 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4169 } else { 4170 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4171 ca_red = 0; 4172 logvar = new_per; 4173 logvar <<= 32; 4174 logvar |= alt; 4175 logvar2 = (uint32_t)rtt; 4176 logvar2 <<= 32; 4177 logvar2 |= (uint32_t)rtt_diff; 4178 logvar3 = rack_gp_rtt_maxmul; 4179 logvar3 <<= 32; 4180 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4181 rack_log_timely(rack, timely_says, 4182 logvar2, logvar3, 4183 logvar, __LINE__, 10); 4184 } 4185 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4186 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4187 logged |= 2; 4188 } 4189 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4190 rack->rc_gp_timely_dec_cnt++; 4191 if (rack_timely_dec_clear && 4192 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4193 rack->rc_gp_timely_dec_cnt = 0; 4194 } 4195 logvar = ss_red; 4196 logvar <<= 32; 4197 logvar |= ca_red; 4198 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4199 __LINE__, 2); 4200 } 4201 4202 static void 4203 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4204 uint32_t rtt, uint32_t line, uint8_t reas) 4205 { 4206 if (tcp_bblogging_on(rack->rc_tp)) { 4207 union tcp_log_stackspecific log; 4208 struct timeval tv; 4209 4210 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4211 log.u_bbr.flex1 = line; 4212 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4213 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4214 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4215 log.u_bbr.flex5 = rtt; 4216 log.u_bbr.flex6 = rack->rc_highly_buffered; 4217 log.u_bbr.flex6 <<= 1; 4218 log.u_bbr.flex6 |= rack->forced_ack; 4219 log.u_bbr.flex6 <<= 1; 4220 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4221 log.u_bbr.flex6 <<= 1; 4222 log.u_bbr.flex6 |= rack->in_probe_rtt; 4223 log.u_bbr.flex6 <<= 1; 4224 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4225 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4226 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4227 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4228 log.u_bbr.flex8 = reas; 4229 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4230 log.u_bbr.delRate = rack_get_bw(rack); 4231 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4232 log.u_bbr.cur_del_rate <<= 32; 4233 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4234 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4235 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4236 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4237 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4238 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4239 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4240 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4241 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4242 log.u_bbr.rttProp = us_cts; 4243 log.u_bbr.rttProp <<= 32; 4244 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4245 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4246 &rack->rc_inp->inp_socket->so_rcv, 4247 &rack->rc_inp->inp_socket->so_snd, 4248 BBR_LOG_RTT_SHRINKS, 0, 4249 0, &log, false, &rack->r_ctl.act_rcv_time); 4250 } 4251 } 4252 4253 static void 4254 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4255 { 4256 uint64_t bwdp; 4257 4258 bwdp = rack_get_bw(rack); 4259 bwdp *= (uint64_t)rtt; 4260 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4261 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4262 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4263 /* 4264 * A window protocol must be able to have 4 packets 4265 * outstanding as the floor in order to function 4266 * (especially considering delayed ack :D). 4267 */ 4268 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4269 } 4270 } 4271 4272 static void 4273 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4274 { 4275 /** 4276 * ProbeRTT is a bit different in rack_pacing than in 4277 * BBR. It is like BBR in that it uses the lowering of 4278 * the RTT as a signal that we saw something new and 4279 * counts from there for how long between. But it is 4280 * different in that its quite simple. It does not 4281 * play with the cwnd and wait until we get down 4282 * to N segments outstanding and hold that for 4283 * 200ms. Instead it just sets the pacing reduction 4284 * rate to a set percentage (70 by default) and hold 4285 * that for a number of recent GP Srtt's. 4286 */ 4287 uint32_t segsiz; 4288 4289 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4290 if (rack->rc_gp_dyn_mul == 0) 4291 return; 4292 4293 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4294 /* We are idle */ 4295 return; 4296 } 4297 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4298 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4299 /* 4300 * Stop the goodput now, the idea here is 4301 * that future measurements with in_probe_rtt 4302 * won't register if they are not greater so 4303 * we want to get what info (if any) is available 4304 * now. 4305 */ 4306 rack_do_goodput_measurement(rack->rc_tp, rack, 4307 rack->rc_tp->snd_una, __LINE__, 4308 RACK_QUALITY_PROBERTT); 4309 } 4310 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4311 rack->r_ctl.rc_time_probertt_entered = us_cts; 4312 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4313 rack->r_ctl.rc_pace_min_segs); 4314 rack->in_probe_rtt = 1; 4315 rack->measure_saw_probe_rtt = 1; 4316 rack->r_ctl.rc_time_probertt_starts = 0; 4317 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4318 if (rack_probertt_use_min_rtt_entry) 4319 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4320 else 4321 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4322 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4323 __LINE__, RACK_RTTS_ENTERPROBE); 4324 } 4325 4326 static void 4327 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4328 { 4329 struct rack_sendmap *rsm; 4330 uint32_t segsiz; 4331 4332 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4333 rack->r_ctl.rc_pace_min_segs); 4334 rack->in_probe_rtt = 0; 4335 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4336 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4337 /* 4338 * Stop the goodput now, the idea here is 4339 * that future measurements with in_probe_rtt 4340 * won't register if they are not greater so 4341 * we want to get what info (if any) is available 4342 * now. 4343 */ 4344 rack_do_goodput_measurement(rack->rc_tp, rack, 4345 rack->rc_tp->snd_una, __LINE__, 4346 RACK_QUALITY_PROBERTT); 4347 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4348 /* 4349 * We don't have enough data to make a measurement. 4350 * So lets just stop and start here after exiting 4351 * probe-rtt. We probably are not interested in 4352 * the results anyway. 4353 */ 4354 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4355 } 4356 /* 4357 * Measurements through the current snd_max are going 4358 * to be limited by the slower pacing rate. 4359 * 4360 * We need to mark these as app-limited so we 4361 * don't collapse the b/w. 4362 */ 4363 rsm = tqhash_max(rack->r_ctl.tqh); 4364 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4365 if (rack->r_ctl.rc_app_limited_cnt == 0) 4366 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4367 else { 4368 /* 4369 * Go out to the end app limited and mark 4370 * this new one as next and move the end_appl up 4371 * to this guy. 4372 */ 4373 if (rack->r_ctl.rc_end_appl) 4374 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4375 rack->r_ctl.rc_end_appl = rsm; 4376 } 4377 rsm->r_flags |= RACK_APP_LIMITED; 4378 rack->r_ctl.rc_app_limited_cnt++; 4379 } 4380 /* 4381 * Now, we need to examine our pacing rate multipliers. 4382 * If its under 100%, we need to kick it back up to 4383 * 100%. We also don't let it be over our "max" above 4384 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4385 * Note setting clamp_atexit_prtt to 0 has the effect 4386 * of setting CA/SS to 100% always at exit (which is 4387 * the default behavior). 4388 */ 4389 if (rack_probertt_clear_is) { 4390 rack->rc_gp_incr = 0; 4391 rack->rc_gp_bwred = 0; 4392 rack->rc_gp_timely_inc_cnt = 0; 4393 rack->rc_gp_timely_dec_cnt = 0; 4394 } 4395 /* Do we do any clamping at exit? */ 4396 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4397 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4398 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4399 } 4400 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4401 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4402 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4403 } 4404 /* 4405 * Lets set rtt_diff to 0, so that we will get a "boost" 4406 * after exiting. 4407 */ 4408 rack->r_ctl.rc_rtt_diff = 0; 4409 4410 /* Clear all flags so we start fresh */ 4411 rack->rc_tp->t_bytes_acked = 0; 4412 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4413 /* 4414 * If configured to, set the cwnd and ssthresh to 4415 * our targets. 4416 */ 4417 if (rack_probe_rtt_sets_cwnd) { 4418 uint64_t ebdp; 4419 uint32_t setto; 4420 4421 /* Set ssthresh so we get into CA once we hit our target */ 4422 if (rack_probertt_use_min_rtt_exit == 1) { 4423 /* Set to min rtt */ 4424 rack_set_prtt_target(rack, segsiz, 4425 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4426 } else if (rack_probertt_use_min_rtt_exit == 2) { 4427 /* Set to current gp rtt */ 4428 rack_set_prtt_target(rack, segsiz, 4429 rack->r_ctl.rc_gp_srtt); 4430 } else if (rack_probertt_use_min_rtt_exit == 3) { 4431 /* Set to entry gp rtt */ 4432 rack_set_prtt_target(rack, segsiz, 4433 rack->r_ctl.rc_entry_gp_rtt); 4434 } else { 4435 uint64_t sum; 4436 uint32_t setval; 4437 4438 sum = rack->r_ctl.rc_entry_gp_rtt; 4439 sum *= 10; 4440 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4441 if (sum >= 20) { 4442 /* 4443 * A highly buffered path needs 4444 * cwnd space for timely to work. 4445 * Lets set things up as if 4446 * we are heading back here again. 4447 */ 4448 setval = rack->r_ctl.rc_entry_gp_rtt; 4449 } else if (sum >= 15) { 4450 /* 4451 * Lets take the smaller of the 4452 * two since we are just somewhat 4453 * buffered. 4454 */ 4455 setval = rack->r_ctl.rc_gp_srtt; 4456 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4457 setval = rack->r_ctl.rc_entry_gp_rtt; 4458 } else { 4459 /* 4460 * Here we are not highly buffered 4461 * and should pick the min we can to 4462 * keep from causing loss. 4463 */ 4464 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4465 } 4466 rack_set_prtt_target(rack, segsiz, 4467 setval); 4468 } 4469 if (rack_probe_rtt_sets_cwnd > 1) { 4470 /* There is a percentage here to boost */ 4471 ebdp = rack->r_ctl.rc_target_probertt_flight; 4472 ebdp *= rack_probe_rtt_sets_cwnd; 4473 ebdp /= 100; 4474 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4475 } else 4476 setto = rack->r_ctl.rc_target_probertt_flight; 4477 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4478 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4479 /* Enforce a min */ 4480 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4481 } 4482 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4483 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4484 } 4485 rack_log_rtt_shrinks(rack, us_cts, 4486 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4487 __LINE__, RACK_RTTS_EXITPROBE); 4488 /* Clear times last so log has all the info */ 4489 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4490 rack->r_ctl.rc_time_probertt_entered = us_cts; 4491 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4492 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4493 } 4494 4495 static void 4496 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4497 { 4498 /* Check in on probe-rtt */ 4499 4500 if (rack->rc_gp_filled == 0) { 4501 /* We do not do p-rtt unless we have gp measurements */ 4502 return; 4503 } 4504 if (rack->in_probe_rtt) { 4505 uint64_t no_overflow; 4506 uint32_t endtime, must_stay; 4507 4508 if (rack->r_ctl.rc_went_idle_time && 4509 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4510 /* 4511 * We went idle during prtt, just exit now. 4512 */ 4513 rack_exit_probertt(rack, us_cts); 4514 } else if (rack_probe_rtt_safety_val && 4515 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4516 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4517 /* 4518 * Probe RTT safety value triggered! 4519 */ 4520 rack_log_rtt_shrinks(rack, us_cts, 4521 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4522 __LINE__, RACK_RTTS_SAFETY); 4523 rack_exit_probertt(rack, us_cts); 4524 } 4525 /* Calculate the max we will wait */ 4526 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4527 if (rack->rc_highly_buffered) 4528 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4529 /* Calculate the min we must wait */ 4530 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4531 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4532 TSTMP_LT(us_cts, endtime)) { 4533 uint32_t calc; 4534 /* Do we lower more? */ 4535 no_exit: 4536 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4537 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4538 else 4539 calc = 0; 4540 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4541 if (calc) { 4542 /* Maybe */ 4543 calc *= rack_per_of_gp_probertt_reduce; 4544 if (calc > rack_per_of_gp_probertt) 4545 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4546 else 4547 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4548 /* Limit it too */ 4549 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4550 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4551 } 4552 /* We must reach target or the time set */ 4553 return; 4554 } 4555 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4556 if ((TSTMP_LT(us_cts, must_stay) && 4557 rack->rc_highly_buffered) || 4558 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4559 rack->r_ctl.rc_target_probertt_flight)) { 4560 /* We are not past the must_stay time */ 4561 goto no_exit; 4562 } 4563 rack_log_rtt_shrinks(rack, us_cts, 4564 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4565 __LINE__, RACK_RTTS_REACHTARGET); 4566 rack->r_ctl.rc_time_probertt_starts = us_cts; 4567 if (rack->r_ctl.rc_time_probertt_starts == 0) 4568 rack->r_ctl.rc_time_probertt_starts = 1; 4569 /* Restore back to our rate we want to pace at in prtt */ 4570 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4571 } 4572 /* 4573 * Setup our end time, some number of gp_srtts plus 200ms. 4574 */ 4575 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4576 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4577 if (rack_probertt_gpsrtt_cnt_div) 4578 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4579 else 4580 endtime = 0; 4581 endtime += rack_min_probertt_hold; 4582 endtime += rack->r_ctl.rc_time_probertt_starts; 4583 if (TSTMP_GEQ(us_cts, endtime)) { 4584 /* yes, exit probertt */ 4585 rack_exit_probertt(rack, us_cts); 4586 } 4587 4588 } else if ((rack->rc_skip_timely == 0) && 4589 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4590 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4591 /* Go into probertt, its been too long since we went lower */ 4592 rack_enter_probertt(rack, us_cts); 4593 } 4594 } 4595 4596 static void 4597 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4598 uint32_t rtt, int32_t rtt_diff) 4599 { 4600 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4601 uint32_t losses; 4602 4603 if ((rack->rc_gp_dyn_mul == 0) || 4604 (rack->use_fixed_rate) || 4605 (rack->in_probe_rtt) || 4606 (rack->rc_always_pace == 0)) { 4607 /* No dynamic GP multiplier in play */ 4608 return; 4609 } 4610 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4611 cur_bw = rack_get_bw(rack); 4612 /* Calculate our up and down range */ 4613 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4614 up_bnd /= 100; 4615 up_bnd += rack->r_ctl.last_gp_comp_bw; 4616 4617 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4618 subfr /= 100; 4619 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4620 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4621 /* 4622 * This is the case where our RTT is above 4623 * the max target and we have been configured 4624 * to just do timely no bonus up stuff in that case. 4625 * 4626 * There are two configurations, set to 1, and we 4627 * just do timely if we are over our max. If its 4628 * set above 1 then we slam the multipliers down 4629 * to 100 and then decrement per timely. 4630 */ 4631 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4632 __LINE__, 3); 4633 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4634 rack_validate_multipliers_at_or_below_100(rack); 4635 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4636 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4637 /* 4638 * We are decreasing this is a bit complicated this 4639 * means we are loosing ground. This could be 4640 * because another flow entered and we are competing 4641 * for b/w with it. This will push the RTT up which 4642 * makes timely unusable unless we want to get shoved 4643 * into a corner and just be backed off (the age 4644 * old problem with delay based CC). 4645 * 4646 * On the other hand if it was a route change we 4647 * would like to stay somewhat contained and not 4648 * blow out the buffers. 4649 */ 4650 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4651 __LINE__, 3); 4652 rack->r_ctl.last_gp_comp_bw = cur_bw; 4653 if (rack->rc_gp_bwred == 0) { 4654 /* Go into reduction counting */ 4655 rack->rc_gp_bwred = 1; 4656 rack->rc_gp_timely_dec_cnt = 0; 4657 } 4658 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4659 /* 4660 * Push another time with a faster pacing 4661 * to try to gain back (we include override to 4662 * get a full raise factor). 4663 */ 4664 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4665 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4666 (timely_says == 0) || 4667 (rack_down_raise_thresh == 0)) { 4668 /* 4669 * Do an override up in b/w if we were 4670 * below the threshold or if the threshold 4671 * is zero we always do the raise. 4672 */ 4673 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4674 } else { 4675 /* Log it stays the same */ 4676 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4677 __LINE__, 11); 4678 } 4679 rack->rc_gp_timely_dec_cnt++; 4680 /* We are not incrementing really no-count */ 4681 rack->rc_gp_incr = 0; 4682 rack->rc_gp_timely_inc_cnt = 0; 4683 } else { 4684 /* 4685 * Lets just use the RTT 4686 * information and give up 4687 * pushing. 4688 */ 4689 goto use_timely; 4690 } 4691 } else if ((timely_says != 2) && 4692 !losses && 4693 (last_bw_est > up_bnd)) { 4694 /* 4695 * We are increasing b/w lets keep going, updating 4696 * our b/w and ignoring any timely input, unless 4697 * of course we are at our max raise (if there is one). 4698 */ 4699 4700 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4701 __LINE__, 3); 4702 rack->r_ctl.last_gp_comp_bw = cur_bw; 4703 if (rack->rc_gp_saw_ss && 4704 rack->r_ctl.rack_per_upper_bound_ss && 4705 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4706 /* 4707 * In cases where we can't go higher 4708 * we should just use timely. 4709 */ 4710 goto use_timely; 4711 } 4712 if (rack->rc_gp_saw_ca && 4713 rack->r_ctl.rack_per_upper_bound_ca && 4714 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4715 /* 4716 * In cases where we can't go higher 4717 * we should just use timely. 4718 */ 4719 goto use_timely; 4720 } 4721 rack->rc_gp_bwred = 0; 4722 rack->rc_gp_timely_dec_cnt = 0; 4723 /* You get a set number of pushes if timely is trying to reduce */ 4724 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4725 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4726 } else { 4727 /* Log it stays the same */ 4728 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4729 __LINE__, 12); 4730 } 4731 return; 4732 } else { 4733 /* 4734 * We are staying between the lower and upper range bounds 4735 * so use timely to decide. 4736 */ 4737 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4738 __LINE__, 3); 4739 use_timely: 4740 if (timely_says) { 4741 rack->rc_gp_incr = 0; 4742 rack->rc_gp_timely_inc_cnt = 0; 4743 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4744 !losses && 4745 (last_bw_est < low_bnd)) { 4746 /* We are loosing ground */ 4747 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4748 rack->rc_gp_timely_dec_cnt++; 4749 /* We are not incrementing really no-count */ 4750 rack->rc_gp_incr = 0; 4751 rack->rc_gp_timely_inc_cnt = 0; 4752 } else 4753 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4754 } else { 4755 rack->rc_gp_bwred = 0; 4756 rack->rc_gp_timely_dec_cnt = 0; 4757 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4758 } 4759 } 4760 } 4761 4762 static int32_t 4763 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4764 { 4765 int32_t timely_says; 4766 uint64_t log_mult, log_rtt_a_diff; 4767 4768 log_rtt_a_diff = rtt; 4769 log_rtt_a_diff <<= 32; 4770 log_rtt_a_diff |= (uint32_t)rtt_diff; 4771 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4772 rack_gp_rtt_maxmul)) { 4773 /* Reduce the b/w multiplier */ 4774 timely_says = 2; 4775 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4776 log_mult <<= 32; 4777 log_mult |= prev_rtt; 4778 rack_log_timely(rack, timely_says, log_mult, 4779 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4780 log_rtt_a_diff, __LINE__, 4); 4781 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4782 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4783 max(rack_gp_rtt_mindiv , 1)))) { 4784 /* Increase the b/w multiplier */ 4785 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4786 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4787 max(rack_gp_rtt_mindiv , 1)); 4788 log_mult <<= 32; 4789 log_mult |= prev_rtt; 4790 timely_says = 0; 4791 rack_log_timely(rack, timely_says, log_mult , 4792 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4793 log_rtt_a_diff, __LINE__, 5); 4794 } else { 4795 /* 4796 * Use a gradient to find it the timely gradient 4797 * is: 4798 * grad = rc_rtt_diff / min_rtt; 4799 * 4800 * anything below or equal to 0 will be 4801 * a increase indication. Anything above 4802 * zero is a decrease. Note we take care 4803 * of the actual gradient calculation 4804 * in the reduction (its not needed for 4805 * increase). 4806 */ 4807 log_mult = prev_rtt; 4808 if (rtt_diff <= 0) { 4809 /* 4810 * Rttdiff is less than zero, increase the 4811 * b/w multiplier (its 0 or negative) 4812 */ 4813 timely_says = 0; 4814 rack_log_timely(rack, timely_says, log_mult, 4815 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4816 } else { 4817 /* Reduce the b/w multiplier */ 4818 timely_says = 1; 4819 rack_log_timely(rack, timely_says, log_mult, 4820 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4821 } 4822 } 4823 return (timely_says); 4824 } 4825 4826 static __inline int 4827 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4828 { 4829 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4830 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4831 /** 4832 * This covers the case that the 4833 * resent is completely inside 4834 * the gp range or up to it. 4835 * |----------------| 4836 * |-----| <or> 4837 * |----| 4838 * <or> |---| 4839 */ 4840 return (1); 4841 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4842 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4843 /** 4844 * This covers the case of 4845 * |--------------| 4846 * |-------->| 4847 */ 4848 return (1); 4849 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4850 SEQ_LT(rsm->r_start, tp->gput_ack) && 4851 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4852 4853 /** 4854 * This covers the case of 4855 * |--------------| 4856 * |-------->| 4857 */ 4858 return (1); 4859 } 4860 return (0); 4861 } 4862 4863 static __inline void 4864 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4865 { 4866 4867 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4868 return; 4869 /* 4870 * We have a Goodput measurement in progress. Mark 4871 * the send if its within the window. If its not 4872 * in the window make sure it does not have the mark. 4873 */ 4874 if (rack_in_gp_window(tp, rsm)) 4875 rsm->r_flags |= RACK_IN_GP_WIN; 4876 else 4877 rsm->r_flags &= ~RACK_IN_GP_WIN; 4878 } 4879 4880 static __inline void 4881 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4882 { 4883 /* A GP measurement is ending, clear all marks on the send map*/ 4884 struct rack_sendmap *rsm = NULL; 4885 4886 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4887 if (rsm == NULL) { 4888 rsm = tqhash_min(rack->r_ctl.tqh); 4889 } 4890 /* Nothing left? */ 4891 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4892 rsm->r_flags &= ~RACK_IN_GP_WIN; 4893 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4894 } 4895 } 4896 4897 4898 static __inline void 4899 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4900 { 4901 struct rack_sendmap *rsm = NULL; 4902 4903 if (tp->snd_una == tp->snd_max) { 4904 /* Nothing outstanding yet, nothing to do here */ 4905 return; 4906 } 4907 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4908 /* 4909 * We are measuring ahead of some outstanding 4910 * data. We need to walk through up until we get 4911 * to gp_seq marking so that no rsm is set incorrectly 4912 * with RACK_IN_GP_WIN. 4913 */ 4914 rsm = tqhash_min(rack->r_ctl.tqh); 4915 while (rsm != NULL) { 4916 rack_mark_in_gp_win(tp, rsm); 4917 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4918 break; 4919 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4920 } 4921 } 4922 if (rsm == NULL) { 4923 /* 4924 * Need to find the GP seq, if rsm is 4925 * set we stopped as we hit it. 4926 */ 4927 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4928 if (rsm == NULL) 4929 return; 4930 rack_mark_in_gp_win(tp, rsm); 4931 } 4932 /* 4933 * Now we may need to mark already sent rsm, ahead of 4934 * gput_seq in the window since they may have been sent 4935 * *before* we started our measurment. The rsm, if non-null 4936 * has been marked (note if rsm would have been NULL we would have 4937 * returned in the previous block). So we go to the next, and continue 4938 * until we run out of entries or we exceed the gp_ack value. 4939 */ 4940 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4941 while (rsm) { 4942 rack_mark_in_gp_win(tp, rsm); 4943 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4944 break; 4945 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4946 } 4947 } 4948 4949 static void 4950 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 4951 { 4952 if (tcp_bblogging_on(rack->rc_tp)) { 4953 union tcp_log_stackspecific log; 4954 struct timeval tv; 4955 4956 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4957 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4958 log.u_bbr.flex1 = add_part; 4959 log.u_bbr.flex2 = sub_part; 4960 log.u_bbr.flex3 = rack_wma_divisor; 4961 log.u_bbr.flex4 = srtt; 4962 log.u_bbr.flex7 = (uint16_t)line; 4963 log.u_bbr.flex8 = meth; 4964 log.u_bbr.delRate = rack->r_ctl.gp_bw; 4965 log.u_bbr.cur_del_rate = meas_bw; 4966 log.u_bbr.rttProp = utim; 4967 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4968 &rack->rc_inp->inp_socket->so_rcv, 4969 &rack->rc_inp->inp_socket->so_snd, 4970 BBR_LOG_THRESH_CALC, 0, 4971 0, &log, false, &rack->r_ctl.act_rcv_time); 4972 } 4973 } 4974 4975 static void 4976 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4977 tcp_seq th_ack, int line, uint8_t quality) 4978 { 4979 uint64_t tim, bytes_ps, stim, utim; 4980 uint32_t segsiz, bytes, reqbytes, us_cts; 4981 int32_t gput, new_rtt_diff, timely_says; 4982 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4983 int did_add = 0; 4984 4985 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4986 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4987 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4988 tim = us_cts - tp->gput_ts; 4989 else 4990 tim = 0; 4991 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4992 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4993 else 4994 stim = 0; 4995 /* 4996 * Use the larger of the send time or ack time. This prevents us 4997 * from being influenced by ack artifacts to come up with too 4998 * high of measurement. Note that since we are spanning over many more 4999 * bytes in most of our measurements hopefully that is less likely to 5000 * occur. 5001 */ 5002 if (tim > stim) 5003 utim = max(tim, 1); 5004 else 5005 utim = max(stim, 1); 5006 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 5007 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 5008 if ((tim == 0) && (stim == 0)) { 5009 /* 5010 * Invalid measurement time, maybe 5011 * all on one ack/one send? 5012 */ 5013 bytes = 0; 5014 bytes_ps = 0; 5015 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5016 0, 0, 0, 10, __LINE__, NULL, quality); 5017 goto skip_measurement; 5018 } 5019 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 5020 /* We never made a us_rtt measurement? */ 5021 bytes = 0; 5022 bytes_ps = 0; 5023 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5024 0, 0, 0, 10, __LINE__, NULL, quality); 5025 goto skip_measurement; 5026 } 5027 /* 5028 * Calculate the maximum possible b/w this connection 5029 * could have. We base our calculation on the lowest 5030 * rtt we have seen during the measurement and the 5031 * largest rwnd the client has given us in that time. This 5032 * forms a BDP that is the maximum that we could ever 5033 * get to the client. Anything larger is not valid. 5034 * 5035 * I originally had code here that rejected measurements 5036 * where the time was less than 1/2 the latest us_rtt. 5037 * But after thinking on that I realized its wrong since 5038 * say you had a 150Mbps or even 1Gbps link, and you 5039 * were a long way away.. example I am in Europe (100ms rtt) 5040 * talking to my 1Gbps link in S.C. Now measuring say 150,000 5041 * bytes my time would be 1.2ms, and yet my rtt would say 5042 * the measurement was invalid the time was < 50ms. The 5043 * same thing is true for 150Mb (8ms of time). 5044 * 5045 * A better way I realized is to look at what the maximum 5046 * the connection could possibly do. This is gated on 5047 * the lowest RTT we have seen and the highest rwnd. 5048 * We should in theory never exceed that, if we are 5049 * then something on the path is storing up packets 5050 * and then feeding them all at once to our endpoint 5051 * messing up our measurement. 5052 */ 5053 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 5054 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 5055 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 5056 if (SEQ_LT(th_ack, tp->gput_seq)) { 5057 /* No measurement can be made */ 5058 bytes = 0; 5059 bytes_ps = 0; 5060 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5061 0, 0, 0, 10, __LINE__, NULL, quality); 5062 goto skip_measurement; 5063 } else 5064 bytes = (th_ack - tp->gput_seq); 5065 bytes_ps = (uint64_t)bytes; 5066 /* 5067 * Don't measure a b/w for pacing unless we have gotten at least 5068 * an initial windows worth of data in this measurement interval. 5069 * 5070 * Small numbers of bytes get badly influenced by delayed ack and 5071 * other artifacts. Note we take the initial window or our 5072 * defined minimum GP (defaulting to 10 which hopefully is the 5073 * IW). 5074 */ 5075 if (rack->rc_gp_filled == 0) { 5076 /* 5077 * The initial estimate is special. We 5078 * have blasted out an IW worth of packets 5079 * without a real valid ack ts results. We 5080 * then setup the app_limited_needs_set flag, 5081 * this should get the first ack in (probably 2 5082 * MSS worth) to be recorded as the timestamp. 5083 * We thus allow a smaller number of bytes i.e. 5084 * IW - 2MSS. 5085 */ 5086 reqbytes -= (2 * segsiz); 5087 /* Also lets fill previous for our first measurement to be neutral */ 5088 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5089 } 5090 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 5091 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5092 rack->r_ctl.rc_app_limited_cnt, 5093 0, 0, 10, __LINE__, NULL, quality); 5094 goto skip_measurement; 5095 } 5096 /* 5097 * We now need to calculate the Timely like status so 5098 * we can update (possibly) the b/w multipliers. 5099 */ 5100 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5101 if (rack->rc_gp_filled == 0) { 5102 /* No previous reading */ 5103 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5104 } else { 5105 if (rack->measure_saw_probe_rtt == 0) { 5106 /* 5107 * We don't want a probertt to be counted 5108 * since it will be negative incorrectly. We 5109 * expect to be reducing the RTT when we 5110 * pace at a slower rate. 5111 */ 5112 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5113 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5114 } 5115 } 5116 timely_says = rack_make_timely_judgement(rack, 5117 rack->r_ctl.rc_gp_srtt, 5118 rack->r_ctl.rc_rtt_diff, 5119 rack->r_ctl.rc_prev_gp_srtt 5120 ); 5121 bytes_ps *= HPTS_USEC_IN_SEC; 5122 bytes_ps /= utim; 5123 if (bytes_ps > rack->r_ctl.last_max_bw) { 5124 /* 5125 * Something is on path playing 5126 * since this b/w is not possible based 5127 * on our BDP (highest rwnd and lowest rtt 5128 * we saw in the measurement window). 5129 * 5130 * Another option here would be to 5131 * instead skip the measurement. 5132 */ 5133 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5134 bytes_ps, rack->r_ctl.last_max_bw, 0, 5135 11, __LINE__, NULL, quality); 5136 bytes_ps = rack->r_ctl.last_max_bw; 5137 } 5138 /* We store gp for b/w in bytes per second */ 5139 if (rack->rc_gp_filled == 0) { 5140 /* Initial measurement */ 5141 if (bytes_ps) { 5142 rack->r_ctl.gp_bw = bytes_ps; 5143 rack->rc_gp_filled = 1; 5144 rack->r_ctl.num_measurements = 1; 5145 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5146 } else { 5147 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5148 rack->r_ctl.rc_app_limited_cnt, 5149 0, 0, 10, __LINE__, NULL, quality); 5150 } 5151 if (tcp_in_hpts(rack->rc_tp) && 5152 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5153 /* 5154 * Ok we can't trust the pacer in this case 5155 * where we transition from un-paced to paced. 5156 * Or for that matter when the burst mitigation 5157 * was making a wild guess and got it wrong. 5158 * Stop the pacer and clear up all the aggregate 5159 * delays etc. 5160 */ 5161 tcp_hpts_remove(rack->rc_tp); 5162 rack->r_ctl.rc_hpts_flags = 0; 5163 rack->r_ctl.rc_last_output_to = 0; 5164 } 5165 did_add = 2; 5166 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5167 /* Still a small number run an average */ 5168 rack->r_ctl.gp_bw += bytes_ps; 5169 addpart = rack->r_ctl.num_measurements; 5170 rack->r_ctl.num_measurements++; 5171 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5172 /* We have collected enough to move forward */ 5173 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5174 } 5175 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5176 did_add = 3; 5177 } else { 5178 /* 5179 * We want to take 1/wma of the goodput and add in to 7/8th 5180 * of the old value weighted by the srtt. So if your measurement 5181 * period is say 2 SRTT's long you would get 1/4 as the 5182 * value, if it was like 1/2 SRTT then you would get 1/16th. 5183 * 5184 * But we must be careful not to take too much i.e. if the 5185 * srtt is say 20ms and the measurement is taken over 5186 * 400ms our weight would be 400/20 i.e. 20. On the 5187 * other hand if we get a measurement over 1ms with a 5188 * 10ms rtt we only want to take a much smaller portion. 5189 */ 5190 uint8_t meth; 5191 5192 if (rack->r_ctl.num_measurements < 0xff) { 5193 rack->r_ctl.num_measurements++; 5194 } 5195 srtt = (uint64_t)tp->t_srtt; 5196 if (srtt == 0) { 5197 /* 5198 * Strange why did t_srtt go back to zero? 5199 */ 5200 if (rack->r_ctl.rc_rack_min_rtt) 5201 srtt = rack->r_ctl.rc_rack_min_rtt; 5202 else 5203 srtt = HPTS_USEC_IN_MSEC; 5204 } 5205 /* 5206 * XXXrrs: Note for reviewers, in playing with 5207 * dynamic pacing I discovered this GP calculation 5208 * as done originally leads to some undesired results. 5209 * Basically you can get longer measurements contributing 5210 * too much to the WMA. Thus I changed it if you are doing 5211 * dynamic adjustments to only do the aportioned adjustment 5212 * if we have a very small (time wise) measurement. Longer 5213 * measurements just get there weight (defaulting to 1/8) 5214 * add to the WMA. We may want to think about changing 5215 * this to always do that for both sides i.e. dynamic 5216 * and non-dynamic... but considering lots of folks 5217 * were playing with this I did not want to change the 5218 * calculation per.se. without your thoughts.. Lawerence? 5219 * Peter?? 5220 */ 5221 if (rack->rc_gp_dyn_mul == 0) { 5222 subpart = rack->r_ctl.gp_bw * utim; 5223 subpart /= (srtt * 8); 5224 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5225 /* 5226 * The b/w update takes no more 5227 * away then 1/2 our running total 5228 * so factor it in. 5229 */ 5230 addpart = bytes_ps * utim; 5231 addpart /= (srtt * 8); 5232 meth = 1; 5233 } else { 5234 /* 5235 * Don't allow a single measurement 5236 * to account for more than 1/2 of the 5237 * WMA. This could happen on a retransmission 5238 * where utim becomes huge compared to 5239 * srtt (multiple retransmissions when using 5240 * the sending rate which factors in all the 5241 * transmissions from the first one). 5242 */ 5243 subpart = rack->r_ctl.gp_bw / 2; 5244 addpart = bytes_ps / 2; 5245 meth = 2; 5246 } 5247 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5248 resid_bw = rack->r_ctl.gp_bw - subpart; 5249 rack->r_ctl.gp_bw = resid_bw + addpart; 5250 did_add = 1; 5251 } else { 5252 if ((utim / srtt) <= 1) { 5253 /* 5254 * The b/w update was over a small period 5255 * of time. The idea here is to prevent a small 5256 * measurement time period from counting 5257 * too much. So we scale it based on the 5258 * time so it attributes less than 1/rack_wma_divisor 5259 * of its measurement. 5260 */ 5261 subpart = rack->r_ctl.gp_bw * utim; 5262 subpart /= (srtt * rack_wma_divisor); 5263 addpart = bytes_ps * utim; 5264 addpart /= (srtt * rack_wma_divisor); 5265 meth = 3; 5266 } else { 5267 /* 5268 * The scaled measurement was long 5269 * enough so lets just add in the 5270 * portion of the measurement i.e. 1/rack_wma_divisor 5271 */ 5272 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5273 addpart = bytes_ps / rack_wma_divisor; 5274 meth = 4; 5275 } 5276 if ((rack->measure_saw_probe_rtt == 0) || 5277 (bytes_ps > rack->r_ctl.gp_bw)) { 5278 /* 5279 * For probe-rtt we only add it in 5280 * if its larger, all others we just 5281 * add in. 5282 */ 5283 did_add = 1; 5284 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5285 resid_bw = rack->r_ctl.gp_bw - subpart; 5286 rack->r_ctl.gp_bw = resid_bw + addpart; 5287 } 5288 } 5289 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5290 } 5291 /* 5292 * We only watch the growth of the GP during the initial startup 5293 * or first-slowstart that ensues. If we ever needed to watch 5294 * growth of gp outside of that period all we need to do is 5295 * remove the first clause of this if (rc_initial_ss_comp). 5296 */ 5297 if ((rack->rc_initial_ss_comp == 0) && 5298 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5299 uint64_t gp_est; 5300 5301 gp_est = bytes_ps; 5302 if (tcp_bblogging_on(rack->rc_tp)) { 5303 union tcp_log_stackspecific log; 5304 struct timeval tv; 5305 5306 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5307 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5308 log.u_bbr.flex1 = rack->r_ctl.current_round; 5309 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5310 log.u_bbr.delRate = gp_est; 5311 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5312 log.u_bbr.flex8 = 41; 5313 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5314 0, &log, false, NULL, __func__, __LINE__,&tv); 5315 } 5316 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5317 (rack->r_ctl.last_gpest == 0)) { 5318 /* 5319 * The round we get our measurement averaging going 5320 * is the base round so it always is the source point 5321 * for when we had our first increment. From there on 5322 * we only record the round that had a rise. 5323 */ 5324 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5325 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5326 } else if (gp_est >= rack->r_ctl.last_gpest) { 5327 /* 5328 * Test to see if its gone up enough 5329 * to set the round count up to now. Note 5330 * that on the seeding of the 4th measurement we 5331 */ 5332 gp_est *= 1000; 5333 gp_est /= rack->r_ctl.last_gpest; 5334 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5335 /* 5336 * We went up enough to record the round. 5337 */ 5338 if (tcp_bblogging_on(rack->rc_tp)) { 5339 union tcp_log_stackspecific log; 5340 struct timeval tv; 5341 5342 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5343 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5344 log.u_bbr.flex1 = rack->r_ctl.current_round; 5345 log.u_bbr.flex2 = (uint32_t)gp_est; 5346 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5347 log.u_bbr.delRate = gp_est; 5348 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5349 log.u_bbr.flex8 = 42; 5350 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5351 0, &log, false, NULL, __func__, __LINE__,&tv); 5352 } 5353 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5354 if (rack->r_ctl.use_gp_not_last == 1) 5355 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5356 else 5357 rack->r_ctl.last_gpest = bytes_ps; 5358 } 5359 } 5360 } 5361 if ((rack->gp_ready == 0) && 5362 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5363 /* We have enough measurements now */ 5364 rack->gp_ready = 1; 5365 if (rack->dgp_on || 5366 rack->rack_hibeta) 5367 rack_set_cc_pacing(rack); 5368 if (rack->defer_options) 5369 rack_apply_deferred_options(rack); 5370 } 5371 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5372 rack_get_bw(rack), 22, did_add, NULL, quality); 5373 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5374 5375 if ((rack->measure_saw_probe_rtt == 0) && 5376 rack->rc_gp_rtt_set) { 5377 if (rack->rc_skip_timely == 0) { 5378 rack_update_multiplier(rack, timely_says, bytes_ps, 5379 rack->r_ctl.rc_gp_srtt, 5380 rack->r_ctl.rc_rtt_diff); 5381 } 5382 } 5383 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5384 rack_get_bw(rack), 3, line, NULL, quality); 5385 rack_log_pacing_delay_calc(rack, 5386 bytes, /* flex2 */ 5387 tim, /* flex1 */ 5388 bytes_ps, /* bw_inuse */ 5389 rack->r_ctl.gp_bw, /* delRate */ 5390 rack_get_lt_bw(rack), /* rttProp */ 5391 20, line, NULL, 0); 5392 /* reset the gp srtt and setup the new prev */ 5393 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5394 /* Record the lost count for the next measurement */ 5395 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5396 skip_measurement: 5397 /* 5398 * We restart our diffs based on the gpsrtt in the 5399 * measurement window. 5400 */ 5401 rack->rc_gp_rtt_set = 0; 5402 rack->rc_gp_saw_rec = 0; 5403 rack->rc_gp_saw_ca = 0; 5404 rack->rc_gp_saw_ss = 0; 5405 rack->rc_dragged_bottom = 0; 5406 if (quality == RACK_QUALITY_HIGH) { 5407 /* 5408 * Gput in the stats world is in kbps where bytes_ps is 5409 * bytes per second so we do ((x * 8)/ 1000). 5410 */ 5411 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5412 #ifdef STATS 5413 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5414 gput); 5415 /* 5416 * XXXLAS: This is a temporary hack, and should be 5417 * chained off VOI_TCP_GPUT when stats(9) grows an 5418 * API to deal with chained VOIs. 5419 */ 5420 if (tp->t_stats_gput_prev > 0) 5421 stats_voi_update_abs_s32(tp->t_stats, 5422 VOI_TCP_GPUT_ND, 5423 ((gput - tp->t_stats_gput_prev) * 100) / 5424 tp->t_stats_gput_prev); 5425 #endif 5426 tp->t_stats_gput_prev = gput; 5427 } 5428 tp->t_flags &= ~TF_GPUTINPROG; 5429 /* 5430 * Now are we app limited now and there is space from where we 5431 * were to where we want to go? 5432 * 5433 * We don't do the other case i.e. non-applimited here since 5434 * the next send will trigger us picking up the missing data. 5435 */ 5436 if (rack->r_ctl.rc_first_appl && 5437 TCPS_HAVEESTABLISHED(tp->t_state) && 5438 rack->r_ctl.rc_app_limited_cnt && 5439 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5440 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5441 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5442 /* 5443 * Yep there is enough outstanding to make a measurement here. 5444 */ 5445 struct rack_sendmap *rsm; 5446 5447 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5448 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5449 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5450 rack->app_limited_needs_set = 0; 5451 tp->gput_seq = th_ack; 5452 if (rack->in_probe_rtt) 5453 rack->measure_saw_probe_rtt = 1; 5454 else if ((rack->measure_saw_probe_rtt) && 5455 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5456 rack->measure_saw_probe_rtt = 0; 5457 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5458 /* There is a full window to gain info from */ 5459 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5460 } else { 5461 /* We can only measure up to the applimited point */ 5462 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5463 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5464 /* 5465 * We don't have enough to make a measurement. 5466 */ 5467 tp->t_flags &= ~TF_GPUTINPROG; 5468 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5469 0, 0, 0, 6, __LINE__, NULL, quality); 5470 return; 5471 } 5472 } 5473 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5474 /* 5475 * We will get no more data into the SB 5476 * this means we need to have the data available 5477 * before we start a measurement. 5478 */ 5479 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5480 /* Nope not enough data. */ 5481 return; 5482 } 5483 } 5484 tp->t_flags |= TF_GPUTINPROG; 5485 /* 5486 * Now we need to find the timestamp of the send at tp->gput_seq 5487 * for the send based measurement. 5488 */ 5489 rack->r_ctl.rc_gp_cumack_ts = 0; 5490 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5491 if (rsm) { 5492 /* Ok send-based limit is set */ 5493 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5494 /* 5495 * Move back to include the earlier part 5496 * so our ack time lines up right (this may 5497 * make an overlapping measurement but thats 5498 * ok). 5499 */ 5500 tp->gput_seq = rsm->r_start; 5501 } 5502 if (rsm->r_flags & RACK_ACKED) { 5503 struct rack_sendmap *nrsm; 5504 5505 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5506 tp->gput_seq = rsm->r_end; 5507 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5508 if (nrsm) 5509 rsm = nrsm; 5510 else { 5511 rack->app_limited_needs_set = 1; 5512 } 5513 } else 5514 rack->app_limited_needs_set = 1; 5515 /* We always go from the first send */ 5516 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5517 } else { 5518 /* 5519 * If we don't find the rsm due to some 5520 * send-limit set the current time, which 5521 * basically disables the send-limit. 5522 */ 5523 struct timeval tv; 5524 5525 microuptime(&tv); 5526 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5527 } 5528 rack_tend_gp_marks(tp, rack); 5529 rack_log_pacing_delay_calc(rack, 5530 tp->gput_seq, 5531 tp->gput_ack, 5532 (uintptr_t)rsm, 5533 tp->gput_ts, 5534 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5535 9, 5536 __LINE__, rsm, quality); 5537 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5538 } else { 5539 /* 5540 * To make sure proper timestamp merging occurs, we need to clear 5541 * all GP marks if we don't start a measurement. 5542 */ 5543 rack_clear_gp_marks(tp, rack); 5544 } 5545 } 5546 5547 /* 5548 * CC wrapper hook functions 5549 */ 5550 static void 5551 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5552 uint16_t type, int32_t post_recovery) 5553 { 5554 uint32_t prior_cwnd, acked; 5555 struct tcp_log_buffer *lgb = NULL; 5556 uint8_t labc_to_use, quality; 5557 5558 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5559 tp->t_ccv.nsegs = nsegs; 5560 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5561 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5562 uint32_t max; 5563 5564 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5565 if (tp->t_ccv.bytes_this_ack > max) { 5566 tp->t_ccv.bytes_this_ack = max; 5567 } 5568 } 5569 #ifdef STATS 5570 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5571 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5572 #endif 5573 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5574 /* 5575 * We will ack all the data, time to end any 5576 * lt_bw_up we have running until something 5577 * new is sent. Note we need to use the actual 5578 * ack_rcv_time which with pacing may be different. 5579 */ 5580 uint64_t tmark; 5581 5582 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5583 rack->r_ctl.lt_seq = tp->snd_max; 5584 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 5585 if (tmark >= rack->r_ctl.lt_timemark) { 5586 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5587 } 5588 rack->r_ctl.lt_timemark = tmark; 5589 rack->lt_bw_up = 0; 5590 } 5591 quality = RACK_QUALITY_NONE; 5592 if ((tp->t_flags & TF_GPUTINPROG) && 5593 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5594 /* Measure the Goodput */ 5595 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5596 } 5597 /* Which way our we limited, if not cwnd limited no advance in CA */ 5598 if (tp->snd_cwnd <= tp->snd_wnd) 5599 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5600 else 5601 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5602 if (tp->snd_cwnd > tp->snd_ssthresh) { 5603 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5604 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5605 /* For the setting of a window past use the actual scwnd we are using */ 5606 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5607 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5608 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5609 } 5610 } else { 5611 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5612 tp->t_bytes_acked = 0; 5613 } 5614 prior_cwnd = tp->snd_cwnd; 5615 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5616 (rack_client_low_buf && rack->client_bufferlvl && 5617 (rack->client_bufferlvl < rack_client_low_buf))) 5618 labc_to_use = rack->rc_labc; 5619 else 5620 labc_to_use = rack_max_abc_post_recovery; 5621 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5622 union tcp_log_stackspecific log; 5623 struct timeval tv; 5624 5625 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5626 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5627 log.u_bbr.flex1 = th_ack; 5628 log.u_bbr.flex2 = tp->t_ccv.flags; 5629 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5630 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5631 log.u_bbr.flex5 = labc_to_use; 5632 log.u_bbr.flex6 = prior_cwnd; 5633 log.u_bbr.flex7 = V_tcp_do_newsack; 5634 log.u_bbr.flex8 = 1; 5635 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5636 0, &log, false, NULL, __func__, __LINE__,&tv); 5637 } 5638 if (CC_ALGO(tp)->ack_received != NULL) { 5639 /* XXXLAS: Find a way to live without this */ 5640 tp->t_ccv.curack = th_ack; 5641 tp->t_ccv.labc = labc_to_use; 5642 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5643 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5644 } 5645 if (lgb) { 5646 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5647 } 5648 if (rack->r_must_retran) { 5649 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5650 /* 5651 * We now are beyond the rxt point so lets disable 5652 * the flag. 5653 */ 5654 rack->r_ctl.rc_out_at_rto = 0; 5655 rack->r_must_retran = 0; 5656 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5657 /* 5658 * Only decrement the rc_out_at_rto if the cwnd advances 5659 * at least a whole segment. Otherwise next time the peer 5660 * acks, we won't be able to send this generaly happens 5661 * when we are in Congestion Avoidance. 5662 */ 5663 if (acked <= rack->r_ctl.rc_out_at_rto){ 5664 rack->r_ctl.rc_out_at_rto -= acked; 5665 } else { 5666 rack->r_ctl.rc_out_at_rto = 0; 5667 } 5668 } 5669 } 5670 #ifdef STATS 5671 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5672 #endif 5673 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5674 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5675 } 5676 if ((rack->rc_initial_ss_comp == 0) && 5677 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5678 /* 5679 * The cwnd has grown beyond ssthresh we have 5680 * entered ca and completed our first Slowstart. 5681 */ 5682 rack->rc_initial_ss_comp = 1; 5683 } 5684 } 5685 5686 static void 5687 tcp_rack_partialack(struct tcpcb *tp) 5688 { 5689 struct tcp_rack *rack; 5690 5691 rack = (struct tcp_rack *)tp->t_fb_ptr; 5692 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5693 /* 5694 * If we are doing PRR and have enough 5695 * room to send <or> we are pacing and prr 5696 * is disabled we will want to see if we 5697 * can send data (by setting r_wanted_output to 5698 * true). 5699 */ 5700 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5701 rack->rack_no_prr) 5702 rack->r_wanted_output = 1; 5703 } 5704 5705 static inline uint64_t 5706 rack_get_rxt_per(uint64_t snds, uint64_t rxts) 5707 { 5708 uint64_t rxt_per; 5709 5710 if (snds > 0) { 5711 rxt_per = rxts * 1000; 5712 rxt_per /= snds; 5713 } else { 5714 /* This is an unlikely path */ 5715 if (rxts) { 5716 /* Its the max it was all re-transmits */ 5717 rxt_per = 0xffffffffffffffff; 5718 } else { 5719 rxt_per = 0; 5720 } 5721 } 5722 return (rxt_per); 5723 } 5724 5725 static void 5726 policer_detection_log(struct tcp_rack *rack, uint32_t flex1, uint32_t flex2, uint32_t flex3, uint32_t flex4, uint8_t flex8) 5727 { 5728 if (tcp_bblogging_on(rack->rc_tp)) { 5729 union tcp_log_stackspecific log; 5730 struct timeval tv; 5731 5732 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5733 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5734 log.u_bbr.flex1 = flex1; 5735 log.u_bbr.flex2 = flex2; 5736 log.u_bbr.flex3 = flex3; 5737 log.u_bbr.flex4 = flex4; 5738 log.u_bbr.flex5 = rack->r_ctl.current_policer_bucket; 5739 log.u_bbr.flex6 = rack->r_ctl.policer_bucket_size; 5740 log.u_bbr.flex7 = 0; 5741 log.u_bbr.flex8 = flex8; 5742 log.u_bbr.bw_inuse = rack->r_ctl.policer_bw; 5743 log.u_bbr.applimited = rack->r_ctl.current_round; 5744 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 5745 log.u_bbr.delivered = (uint32_t)rack->r_ctl.bytes_acked_in_recovery; 5746 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 5747 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 5748 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 5749 log.u_bbr.bbr_state = rack->rc_policer_detected; 5750 log.u_bbr.bbr_substate = 0; 5751 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5752 log.u_bbr.use_lt_bw = rack->policer_detect_on; 5753 log.u_bbr.lt_epoch = 0; 5754 log.u_bbr.pkts_out = 0; 5755 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 5756 0, &log, false, NULL, NULL, 0, &tv); 5757 } 5758 5759 } 5760 5761 static void 5762 policer_detection(struct tcpcb *tp, struct tcp_rack *rack, int post_recovery) 5763 { 5764 /* 5765 * Rack excess rxt accounting is turned on. If we 5766 * are above a threshold of rxt's in at least N 5767 * rounds, then back off the cwnd and ssthresh 5768 * to fit into the long-term b/w. 5769 */ 5770 5771 uint32_t pkts, mid, med, alt_med, avg, segsiz, tot_retran_pkt_count = 0; 5772 uint32_t cnt_of_mape_rxt = 0; 5773 uint64_t snds, rxts, rxt_per, tim, del, del_bw; 5774 int i; 5775 struct timeval tv; 5776 5777 5778 /* 5779 * First is there enough packets delivered during recovery to make 5780 * a determiniation of b/w? 5781 */ 5782 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5783 if ((rack->rc_policer_detected == 0) && 5784 (rack->r_ctl.policer_del_mss > 0) && 5785 ((uint32_t)rack->r_ctl.policer_del_mss > ((rack->r_ctl.bytes_acked_in_recovery + segsiz - 1)/segsiz))) { 5786 /* 5787 * Not enough data sent in recovery for initial detection. Once 5788 * we have deteced a policer we allow less than the threshold (polcer_del_mss) 5789 * amount of data in a recovery to let us fall through and double check 5790 * our policer settings and possibly expand or collapse the bucket size and 5791 * the polcier b/w. 5792 * 5793 * Once you are declared to be policed. this block of code cannot be 5794 * reached, instead blocks further down will re-check the policer detection 5795 * triggers and possibly reset the measurements if somehow we have let the 5796 * policer bucket size grow too large. 5797 */ 5798 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5799 policer_detection_log(rack, rack->r_ctl.policer_del_mss, 5800 ((rack->r_ctl.bytes_acked_in_recovery + segsiz - 1)/segsiz), 5801 rack->r_ctl.bytes_acked_in_recovery, segsiz, 18); 5802 } 5803 return; 5804 } 5805 tcp_get_usecs(&tv); 5806 tim = tcp_tv_to_lusectick(&tv) - rack->r_ctl.time_entered_recovery; 5807 del = rack->r_ctl.bytes_acked_in_recovery; 5808 if (tim > 0) 5809 del_bw = (del * (uint64_t)1000000) / tim; 5810 else 5811 del_bw = 0; 5812 /* B/W compensation? */ 5813 5814 if (rack->r_ctl.pol_bw_comp && ((rack->r_ctl.policer_bw > 0) || 5815 (del_bw > 0))) { 5816 /* 5817 * Sanity check now that the data is in. How long does it 5818 * take for us to pace out two of our policer_max_seg's? 5819 * 5820 * If it is longer than the RTT then we are set 5821 * too slow, maybe because of not enough data 5822 * sent during recovery. 5823 */ 5824 uint64_t lentime, res, srtt, max_delbw, alt_bw; 5825 5826 srtt = (uint64_t)rack_grab_rtt(tp, rack); 5827 if ((tp->t_srtt > 0) && (srtt > tp->t_srtt)) 5828 srtt = tp->t_srtt; 5829 lentime = rack->r_ctl.policer_max_seg * (uint64_t)HPTS_USEC_IN_SEC * 2; 5830 if (del_bw > rack->r_ctl.policer_bw) { 5831 max_delbw = del_bw; 5832 } else { 5833 max_delbw = rack->r_ctl.policer_bw; 5834 } 5835 res = lentime / max_delbw; 5836 if ((srtt > 0) && (res > srtt)) { 5837 /* 5838 * At this rate we can not get two policer_maxsegs 5839 * out before the ack arrives back. 5840 * 5841 * Lets at least get it raised up so that 5842 * we can be a bit faster than that if possible. 5843 */ 5844 lentime = (rack->r_ctl.policer_max_seg * 2); 5845 tim = srtt; 5846 alt_bw = (lentime * (uint64_t)HPTS_USEC_IN_SEC) / tim; 5847 if (alt_bw > max_delbw) { 5848 uint64_t cap_alt_bw; 5849 5850 cap_alt_bw = (max_delbw + (max_delbw * rack->r_ctl.pol_bw_comp)); 5851 if ((rack_pol_min_bw > 0) && (cap_alt_bw < rack_pol_min_bw)) { 5852 /* We place a min on the cap which defaults to 1Mbps */ 5853 cap_alt_bw = rack_pol_min_bw; 5854 } 5855 if (alt_bw <= cap_alt_bw) { 5856 /* It should be */ 5857 del_bw = alt_bw; 5858 policer_detection_log(rack, 5859 (uint32_t)tim, 5860 rack->r_ctl.policer_max_seg, 5861 0, 5862 0, 5863 16); 5864 } else { 5865 /* 5866 * This is an odd case where likely the RTT is very very 5867 * low. And yet it is still being policed. We don't want 5868 * to get more than (rack_policing_do_bw_comp+1) x del-rate 5869 * where del-rate is what we got in recovery for either the 5870 * first Policer Detection(PD) or this PD we are on now. 5871 */ 5872 del_bw = cap_alt_bw; 5873 policer_detection_log(rack, 5874 (uint32_t)tim, 5875 rack->r_ctl.policer_max_seg, 5876 (uint32_t)max_delbw, 5877 (rack->r_ctl.pol_bw_comp + 1), 5878 16); 5879 } 5880 } 5881 } 5882 } 5883 snds = tp->t_sndbytes - rack->r_ctl.last_policer_sndbytes; 5884 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_policer_snd_rxt_bytes; 5885 rxt_per = rack_get_rxt_per(snds, rxts); 5886 /* Figure up the average and median */ 5887 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5888 if (rack->r_ctl.rc_cnt_of_retran[i] > 0) { 5889 tot_retran_pkt_count += (i + 1) * rack->r_ctl.rc_cnt_of_retran[i]; 5890 cnt_of_mape_rxt += rack->r_ctl.rc_cnt_of_retran[i]; 5891 } 5892 } 5893 if (cnt_of_mape_rxt) 5894 avg = (tot_retran_pkt_count * 10)/cnt_of_mape_rxt; 5895 else 5896 avg = 0; 5897 alt_med = med = 0; 5898 mid = tot_retran_pkt_count/2; 5899 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5900 pkts = (i + 1) * rack->r_ctl.rc_cnt_of_retran[i]; 5901 if (mid > pkts) { 5902 mid -= pkts; 5903 continue; 5904 } 5905 med = (i + 1); 5906 break; 5907 } 5908 mid = cnt_of_mape_rxt / 2; 5909 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5910 if (mid > rack->r_ctl.rc_cnt_of_retran[i]) { 5911 mid -= rack->r_ctl.rc_cnt_of_retran[i]; 5912 continue; 5913 } 5914 alt_med = (i + 1); 5915 break; 5916 } 5917 if (rack->r_ctl.policer_alt_median) { 5918 /* Swap the medians */ 5919 uint32_t swap; 5920 5921 swap = med; 5922 med = alt_med; 5923 alt_med = swap; 5924 } 5925 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5926 union tcp_log_stackspecific log; 5927 struct timeval tv; 5928 5929 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5930 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5931 log.u_bbr.flex1 = avg; 5932 log.u_bbr.flex2 = med; 5933 log.u_bbr.flex3 = (uint32_t)rxt_per; 5934 log.u_bbr.flex4 = rack->r_ctl.policer_avg_threshold; 5935 log.u_bbr.flex5 = rack->r_ctl.policer_med_threshold; 5936 log.u_bbr.flex6 = rack->r_ctl.policer_rxt_threshold; 5937 log.u_bbr.flex7 = rack->r_ctl.policer_alt_median; 5938 log.u_bbr.flex8 = 1; 5939 log.u_bbr.delivered = rack->r_ctl.policer_bucket_size; 5940 log.u_bbr.applimited = rack->r_ctl.current_round; 5941 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 5942 log.u_bbr.bw_inuse = del_bw; 5943 log.u_bbr.cur_del_rate = rxts; 5944 log.u_bbr.delRate = snds; 5945 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 5946 log.u_bbr.bbr_state = rack->rc_policer_detected; 5947 log.u_bbr.bbr_substate = 0; 5948 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5949 log.u_bbr.use_lt_bw = rack->policer_detect_on; 5950 log.u_bbr.lt_epoch = (uint32_t)tim; 5951 log.u_bbr.pkts_out = rack->r_ctl.bytes_acked_in_recovery; 5952 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 5953 0, &log, false, NULL, NULL, 0, &tv); 5954 } 5955 if (med == RETRAN_CNT_SIZE) { 5956 /* 5957 * If the median is the maximum, then what we 5958 * likely have here is a network breakage. Either that 5959 * or we are so unlucky that all of our traffic is being 5960 * dropped and having to be retransmitted the maximum times 5961 * and this just is not how a policer works. 5962 * 5963 * If it is truely a policer eventually we will come 5964 * through and it won't be the maximum. 5965 */ 5966 return; 5967 } 5968 /* Has enough rounds progressed for us to re-measure? */ 5969 if ((rxt_per >= (uint64_t)rack->r_ctl.policer_rxt_threshold) && 5970 (avg >= rack->r_ctl.policer_avg_threshold) && 5971 (med >= rack->r_ctl.policer_med_threshold)) { 5972 /* 5973 * We hit all thresholds that indicate we are 5974 * being policed. Now we may be doing this from a rack timeout 5975 * which then means the rest of recovery will hopefully go 5976 * smoother as we pace. At the end of recovery we will 5977 * fall back in here and reset the values using the 5978 * results of the entire recovery episode (we could also 5979 * hit this as we exit recovery as well which means only 5980 * one time in here). 5981 * 5982 * This is done explicitly that if we hit the thresholds 5983 * again in a second recovery we overwrite the values. We do 5984 * that because over time, as we pace the policer_bucket_size may 5985 * continue to grow. This then provides more and more times when 5986 * we are not pacing to the policer rate. This lets us compensate 5987 * for when we hit a false positive and those flows continue to 5988 * increase. However if its a real policer we will then get over its 5989 * limit, over time, again and thus end up back here hitting the 5990 * thresholds again. 5991 * 5992 * The alternative to this is to instead whenever we pace due to 5993 * policing in rack_policed_sending we could add the amount len paced to the 5994 * idle_snd_una value (which decreases the amount in last_amount_before_rec 5995 * since that is always [th_ack - idle_snd_una]). This would then prevent 5996 * the polcier_bucket_size from growing in additional recovery episodes 5997 * Which would then mean false postives would be pretty much stuck 5998 * after things got back to normal (assuming that what caused the 5999 * false positive was a small network outage). 6000 * 6001 */ 6002 tcp_trace_point(rack->rc_tp, TCP_TP_POLICER_DET); 6003 if (rack->rc_policer_detected == 0) { 6004 /* 6005 * Increment the stat that tells us we identified 6006 * a policer only once. Note that if we ever allow 6007 * the flag to be cleared (reverted) then we need 6008 * to adjust this to not do multi-counting. 6009 */ 6010 counter_u64_add(tcp_policer_detected, 1); 6011 } 6012 rack->r_ctl.last_policer_sndbytes = tp->t_sndbytes; 6013 rack->r_ctl.last_policer_snd_rxt_bytes = tp->t_snd_rxt_bytes; 6014 rack->r_ctl.policer_bw = del_bw; 6015 rack->r_ctl.policer_max_seg = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, 6016 rack->r_ctl.policer_bw, 6017 min(ctf_fixed_maxseg(rack->rc_tp), 6018 rack->r_ctl.rc_pace_min_segs), 6019 0, NULL, 6020 NULL, rack->r_ctl.pace_len_divisor); 6021 /* Now what about the policer bucket size */ 6022 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6023 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.policer_max_seg) { 6024 /* We must be able to send our max-seg or else chaos ensues */ 6025 rack->r_ctl.policer_bucket_size = rack->r_ctl.policer_max_seg * 2; 6026 } 6027 if (rack->rc_policer_detected == 0) 6028 rack->r_ctl.current_policer_bucket = 0; 6029 if (tcp_bblogging_on(rack->rc_tp)) { 6030 union tcp_log_stackspecific log; 6031 struct timeval tv; 6032 6033 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6034 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6035 log.u_bbr.flex1 = avg; 6036 log.u_bbr.flex2 = med; 6037 log.u_bbr.flex3 = rxt_per; 6038 log.u_bbr.flex4 = rack->r_ctl.policer_avg_threshold; 6039 log.u_bbr.flex5 = rack->r_ctl.policer_med_threshold; 6040 log.u_bbr.flex6 = rack->r_ctl.policer_rxt_threshold; 6041 log.u_bbr.flex7 = rack->r_ctl.policer_alt_median; 6042 log.u_bbr.flex8 = 2; 6043 log.u_bbr.applimited = rack->r_ctl.current_round; 6044 log.u_bbr.bw_inuse = del_bw; 6045 log.u_bbr.delivered = rack->r_ctl.policer_bucket_size; 6046 log.u_bbr.cur_del_rate = rxts; 6047 log.u_bbr.delRate = snds; 6048 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 6049 log.u_bbr.bbr_state = rack->rc_policer_detected; 6050 log.u_bbr.bbr_substate = 0; 6051 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 6052 log.u_bbr.use_lt_bw = rack->policer_detect_on; 6053 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 6054 log.u_bbr.lt_epoch = (uint32_t)tim; 6055 log.u_bbr.pkts_out = rack->r_ctl.bytes_acked_in_recovery; 6056 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 6057 0, &log, false, NULL, NULL, 0, &tv); 6058 /* 6059 * Put out an added log, 19, for the sole purpose 6060 * of getting the txt/rxt so that we can benchmark 6061 * in read-bbrlog the ongoing rxt rate after our 6062 * policer invocation in the HYSTART announcments. 6063 */ 6064 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6065 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 6066 log.u_bbr.flex1 = alt_med; 6067 log.u_bbr.flex8 = 19; 6068 log.u_bbr.cur_del_rate = tp->t_sndbytes; 6069 log.u_bbr.delRate = tp->t_snd_rxt_bytes; 6070 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 6071 0, &log, false, NULL, NULL, 0, &tv); 6072 } 6073 /* Turn off any fast output, thats ended */ 6074 rack->r_fast_output = 0; 6075 /* Mark the time for credits */ 6076 rack->r_ctl.last_sendtime = tcp_get_u64_usecs(NULL); 6077 if (rack->r_rr_config < 2) { 6078 /* 6079 * We need to be stricter on the RR config so 6080 * the pacing has priority. 6081 */ 6082 rack->r_rr_config = 2; 6083 } 6084 policer_detection_log(rack, 6085 rack->r_ctl.idle_snd_una, 6086 rack->r_ctl.ack_for_idle, 6087 0, 6088 (uint32_t)tim, 6089 14); 6090 rack->rc_policer_detected = 1; 6091 } else if ((rack->rc_policer_detected == 1) && 6092 (post_recovery == 1)) { 6093 /* 6094 * If we are exiting recovery and have already detected 6095 * we need to possibly update the values. 6096 * 6097 * First: Update the idle -> recovery sent value. 6098 */ 6099 uint32_t srtt; 6100 6101 if (rack->r_ctl.last_amount_before_rec > rack->r_ctl.policer_bucket_size) { 6102 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6103 } 6104 srtt = (uint64_t)rack_grab_rtt(tp, rack); 6105 if ((tp->t_srtt > 0) && (srtt > tp->t_srtt)) 6106 srtt = tp->t_srtt; 6107 if ((srtt != 0) && 6108 (tim < (uint64_t)srtt)) { 6109 /* 6110 * Not long enough. 6111 */ 6112 if (rack_verbose_logging) 6113 policer_detection_log(rack, 6114 (uint32_t)tim, 6115 0, 6116 0, 6117 0, 6118 15); 6119 return; 6120 } 6121 /* 6122 * Finally update the b/w if its grown. 6123 */ 6124 if (del_bw > rack->r_ctl.policer_bw) { 6125 rack->r_ctl.policer_bw = del_bw; 6126 rack->r_ctl.policer_max_seg = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, 6127 rack->r_ctl.policer_bw, 6128 min(ctf_fixed_maxseg(rack->rc_tp), 6129 rack->r_ctl.rc_pace_min_segs), 6130 0, NULL, 6131 NULL, rack->r_ctl.pace_len_divisor); 6132 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.policer_max_seg) { 6133 /* We must be able to send our max-seg or else chaos ensues */ 6134 rack->r_ctl.policer_bucket_size = rack->r_ctl.policer_max_seg * 2; 6135 } 6136 } 6137 policer_detection_log(rack, 6138 rack->r_ctl.idle_snd_una, 6139 rack->r_ctl.ack_for_idle, 6140 0, 6141 (uint32_t)tim, 6142 3); 6143 } 6144 } 6145 6146 static void 6147 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 6148 { 6149 /* now check with the policer if on */ 6150 if (rack->policer_detect_on == 1) { 6151 policer_detection(tp, rack, 1); 6152 } 6153 /* 6154 * Now exit recovery, note we must do the idle set after the policer_detection 6155 * to get the amount acked prior to recovery correct. 6156 */ 6157 rack->r_ctl.idle_snd_una = tp->snd_una; 6158 EXIT_RECOVERY(tp->t_flags); 6159 } 6160 6161 static void 6162 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 6163 { 6164 struct tcp_rack *rack; 6165 uint32_t orig_cwnd; 6166 6167 orig_cwnd = tp->snd_cwnd; 6168 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6169 rack = (struct tcp_rack *)tp->t_fb_ptr; 6170 /* only alert CC if we alerted when we entered */ 6171 if (CC_ALGO(tp)->post_recovery != NULL) { 6172 tp->t_ccv.curack = th_ack; 6173 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 6174 if (tp->snd_cwnd < tp->snd_ssthresh) { 6175 /* 6176 * Rack has burst control and pacing 6177 * so lets not set this any lower than 6178 * snd_ssthresh per RFC-6582 (option 2). 6179 */ 6180 tp->snd_cwnd = tp->snd_ssthresh; 6181 } 6182 } 6183 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6184 union tcp_log_stackspecific log; 6185 struct timeval tv; 6186 6187 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6188 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6189 log.u_bbr.flex1 = th_ack; 6190 log.u_bbr.flex2 = tp->t_ccv.flags; 6191 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 6192 log.u_bbr.flex4 = tp->t_ccv.nsegs; 6193 log.u_bbr.flex5 = V_tcp_abc_l_var; 6194 log.u_bbr.flex6 = orig_cwnd; 6195 log.u_bbr.flex7 = V_tcp_do_newsack; 6196 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 6197 log.u_bbr.flex8 = 2; 6198 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 6199 0, &log, false, NULL, __func__, __LINE__, &tv); 6200 } 6201 if ((rack->rack_no_prr == 0) && 6202 (rack->no_prr_addback == 0) && 6203 (rack->r_ctl.rc_prr_sndcnt > 0)) { 6204 /* 6205 * Suck the next prr cnt back into cwnd, but 6206 * only do that if we are not application limited. 6207 */ 6208 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 6209 /* 6210 * We are allowed to add back to the cwnd the amount we did 6211 * not get out if: 6212 * a) no_prr_addback is off. 6213 * b) we are not app limited 6214 * c) we are doing prr 6215 * <and> 6216 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 6217 */ 6218 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 6219 rack->r_ctl.rc_prr_sndcnt); 6220 } 6221 rack->r_ctl.rc_prr_sndcnt = 0; 6222 rack_log_to_prr(rack, 1, 0, __LINE__); 6223 } 6224 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 6225 tp->snd_recover = tp->snd_una; 6226 if (rack->r_ctl.dsack_persist) { 6227 rack->r_ctl.dsack_persist--; 6228 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6229 rack->r_ctl.num_dsack = 0; 6230 } 6231 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6232 } 6233 if (rack->rto_from_rec == 1) { 6234 rack->rto_from_rec = 0; 6235 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 6236 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 6237 } 6238 rack_exit_recovery(tp, rack, 1); 6239 } 6240 6241 static void 6242 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 6243 { 6244 struct tcp_rack *rack; 6245 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 6246 6247 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6248 #ifdef STATS 6249 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 6250 #endif 6251 if (IN_RECOVERY(tp->t_flags) == 0) { 6252 in_rec_at_entry = 0; 6253 ssthresh_enter = tp->snd_ssthresh; 6254 cwnd_enter = tp->snd_cwnd; 6255 } else 6256 in_rec_at_entry = 1; 6257 rack = (struct tcp_rack *)tp->t_fb_ptr; 6258 switch (type) { 6259 case CC_NDUPACK: 6260 tp->t_flags &= ~TF_WASFRECOVERY; 6261 tp->t_flags &= ~TF_WASCRECOVERY; 6262 if (!IN_FASTRECOVERY(tp->t_flags)) { 6263 struct rack_sendmap *rsm; 6264 struct timeval tv; 6265 uint32_t segsiz; 6266 6267 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 6268 if (rack->rc_initial_ss_comp == 0) { 6269 /* Yep it is the end of the initial slowstart */ 6270 rack->rc_initial_ss_comp = 1; 6271 } 6272 microuptime(&tv); 6273 rack->r_ctl.time_entered_recovery = tcp_tv_to_lusectick(&tv); 6274 if (SEQ_GEQ(ack, tp->snd_una)) { 6275 /* 6276 * The ack is above snd_una. Lets see 6277 * if we can establish a postive distance from 6278 * our idle mark. 6279 */ 6280 rack->r_ctl.ack_for_idle = ack; 6281 if (SEQ_GT(ack, rack->r_ctl.idle_snd_una)) { 6282 rack->r_ctl.last_amount_before_rec = ack - rack->r_ctl.idle_snd_una; 6283 } else { 6284 /* No data thru yet */ 6285 rack->r_ctl.last_amount_before_rec = 0; 6286 } 6287 } else if (SEQ_GT(tp->snd_una, rack->r_ctl.idle_snd_una)) { 6288 /* 6289 * The ack is out of order and behind the snd_una. It may 6290 * have contained SACK information which we processed else 6291 * we would have rejected it. 6292 */ 6293 rack->r_ctl.ack_for_idle = tp->snd_una; 6294 rack->r_ctl.last_amount_before_rec = tp->snd_una - rack->r_ctl.idle_snd_una; 6295 } else { 6296 rack->r_ctl.ack_for_idle = ack; 6297 rack->r_ctl.last_amount_before_rec = 0; 6298 } 6299 if (rack->rc_policer_detected) { 6300 /* 6301 * If we are being policed and we have a loss, it 6302 * means our bucket is now empty. This can happen 6303 * where some other flow on the same host sends 6304 * that this connection is not aware of. 6305 */ 6306 rack->r_ctl.current_policer_bucket = 0; 6307 if (rack_verbose_logging) 6308 policer_detection_log(rack, rack->r_ctl.last_amount_before_rec, 0, 0, 0, 4); 6309 if (rack->r_ctl.last_amount_before_rec > rack->r_ctl.policer_bucket_size) { 6310 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6311 } 6312 } 6313 memset(rack->r_ctl.rc_cnt_of_retran, 0, sizeof(rack->r_ctl.rc_cnt_of_retran)); 6314 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6315 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6316 /* 6317 * Go through the outstanding and re-peg 6318 * any that should have been left in the 6319 * retransmit list (on a double recovery). 6320 */ 6321 if (rsm->r_act_rxt_cnt > 0) { 6322 rack_peg_rxt(rack, rsm, segsiz); 6323 } 6324 } 6325 rack->r_ctl.bytes_acked_in_recovery = 0; 6326 rack->r_ctl.rc_prr_delivered = 0; 6327 rack->r_ctl.rc_prr_out = 0; 6328 rack->r_fast_output = 0; 6329 if (rack->rack_no_prr == 0) { 6330 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 6331 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 6332 } 6333 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 6334 tp->snd_recover = tp->snd_max; 6335 if (tp->t_flags2 & TF2_ECN_PERMIT) 6336 tp->t_flags2 |= TF2_ECN_SND_CWR; 6337 } 6338 break; 6339 case CC_ECN: 6340 if (!IN_CONGRECOVERY(tp->t_flags) || 6341 /* 6342 * Allow ECN reaction on ACK to CWR, if 6343 * that data segment was also CE marked. 6344 */ 6345 SEQ_GEQ(ack, tp->snd_recover)) { 6346 EXIT_CONGRECOVERY(tp->t_flags); 6347 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 6348 rack->r_fast_output = 0; 6349 tp->snd_recover = tp->snd_max + 1; 6350 if (tp->t_flags2 & TF2_ECN_PERMIT) 6351 tp->t_flags2 |= TF2_ECN_SND_CWR; 6352 } 6353 break; 6354 case CC_RTO: 6355 tp->t_dupacks = 0; 6356 tp->t_bytes_acked = 0; 6357 rack->r_fast_output = 0; 6358 if (IN_RECOVERY(tp->t_flags)) 6359 rack_exit_recovery(tp, rack, 2); 6360 rack->r_ctl.bytes_acked_in_recovery = 0; 6361 rack->r_ctl.time_entered_recovery = 0; 6362 orig_cwnd = tp->snd_cwnd; 6363 rack_log_to_prr(rack, 16, orig_cwnd, line); 6364 if (CC_ALGO(tp)->cong_signal == NULL) { 6365 /* TSNH */ 6366 tp->snd_ssthresh = max(2, 6367 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 6368 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 6369 tp->snd_cwnd = ctf_fixed_maxseg(tp); 6370 } 6371 if (tp->t_flags2 & TF2_ECN_PERMIT) 6372 tp->t_flags2 |= TF2_ECN_SND_CWR; 6373 break; 6374 case CC_RTO_ERR: 6375 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 6376 /* RTO was unnecessary, so reset everything. */ 6377 tp->snd_cwnd = tp->snd_cwnd_prev; 6378 tp->snd_ssthresh = tp->snd_ssthresh_prev; 6379 tp->snd_recover = tp->snd_recover_prev; 6380 if (tp->t_flags & TF_WASFRECOVERY) { 6381 ENTER_FASTRECOVERY(tp->t_flags); 6382 tp->t_flags &= ~TF_WASFRECOVERY; 6383 } 6384 if (tp->t_flags & TF_WASCRECOVERY) { 6385 ENTER_CONGRECOVERY(tp->t_flags); 6386 tp->t_flags &= ~TF_WASCRECOVERY; 6387 } 6388 tp->snd_nxt = tp->snd_max; 6389 tp->t_badrxtwin = 0; 6390 break; 6391 } 6392 if ((CC_ALGO(tp)->cong_signal != NULL) && 6393 (type != CC_RTO)){ 6394 tp->t_ccv.curack = ack; 6395 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 6396 } 6397 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 6398 rack_log_to_prr(rack, 15, cwnd_enter, line); 6399 rack->r_ctl.dsack_byte_cnt = 0; 6400 rack->r_ctl.retran_during_recovery = 0; 6401 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 6402 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 6403 rack->r_ent_rec_ns = 1; 6404 } 6405 } 6406 6407 static inline void 6408 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 6409 { 6410 uint32_t i_cwnd; 6411 6412 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6413 6414 if (CC_ALGO(tp)->after_idle != NULL) 6415 CC_ALGO(tp)->after_idle(&tp->t_ccv); 6416 6417 if (tp->snd_cwnd == 1) 6418 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 6419 else 6420 i_cwnd = rc_init_window(rack); 6421 6422 /* 6423 * Being idle is no different than the initial window. If the cc 6424 * clamps it down below the initial window raise it to the initial 6425 * window. 6426 */ 6427 if (tp->snd_cwnd < i_cwnd) { 6428 tp->snd_cwnd = i_cwnd; 6429 } 6430 } 6431 6432 /* 6433 * Indicate whether this ack should be delayed. We can delay the ack if 6434 * following conditions are met: 6435 * - There is no delayed ack timer in progress. 6436 * - Our last ack wasn't a 0-sized window. We never want to delay 6437 * the ack that opens up a 0-sized window. 6438 * - LRO wasn't used for this segment. We make sure by checking that the 6439 * segment size is not larger than the MSS. 6440 * - Delayed acks are enabled or this is a half-synchronized T/TCP 6441 * connection. 6442 */ 6443 #define DELAY_ACK(tp, tlen) \ 6444 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 6445 ((tp->t_flags & TF_DELACK) == 0) && \ 6446 (tlen <= tp->t_maxseg) && \ 6447 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 6448 6449 static struct rack_sendmap * 6450 rack_find_lowest_rsm(struct tcp_rack *rack) 6451 { 6452 struct rack_sendmap *rsm; 6453 6454 /* 6455 * Walk the time-order transmitted list looking for an rsm that is 6456 * not acked. This will be the one that was sent the longest time 6457 * ago that is still outstanding. 6458 */ 6459 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6460 if (rsm->r_flags & RACK_ACKED) { 6461 continue; 6462 } 6463 goto finish; 6464 } 6465 finish: 6466 return (rsm); 6467 } 6468 6469 static struct rack_sendmap * 6470 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 6471 { 6472 struct rack_sendmap *prsm; 6473 6474 /* 6475 * Walk the sequence order list backward until we hit and arrive at 6476 * the highest seq not acked. In theory when this is called it 6477 * should be the last segment (which it was not). 6478 */ 6479 prsm = rsm; 6480 6481 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 6482 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 6483 continue; 6484 } 6485 return (prsm); 6486 } 6487 return (NULL); 6488 } 6489 6490 static uint32_t 6491 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 6492 { 6493 int32_t lro; 6494 uint32_t thresh; 6495 6496 /* 6497 * lro is the flag we use to determine if we have seen reordering. 6498 * If it gets set we have seen reordering. The reorder logic either 6499 * works in one of two ways: 6500 * 6501 * If reorder-fade is configured, then we track the last time we saw 6502 * re-ordering occur. If we reach the point where enough time as 6503 * passed we no longer consider reordering has occuring. 6504 * 6505 * Or if reorder-face is 0, then once we see reordering we consider 6506 * the connection to alway be subject to reordering and just set lro 6507 * to 1. 6508 * 6509 * In the end if lro is non-zero we add the extra time for 6510 * reordering in. 6511 */ 6512 if (srtt == 0) 6513 srtt = 1; 6514 if (rack->r_ctl.rc_reorder_ts) { 6515 if (rack->r_ctl.rc_reorder_fade) { 6516 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 6517 lro = cts - rack->r_ctl.rc_reorder_ts; 6518 if (lro == 0) { 6519 /* 6520 * No time as passed since the last 6521 * reorder, mark it as reordering. 6522 */ 6523 lro = 1; 6524 } 6525 } else { 6526 /* Negative time? */ 6527 lro = 0; 6528 } 6529 if (lro > rack->r_ctl.rc_reorder_fade) { 6530 /* Turn off reordering seen too */ 6531 rack->r_ctl.rc_reorder_ts = 0; 6532 lro = 0; 6533 } 6534 } else { 6535 /* Reodering does not fade */ 6536 lro = 1; 6537 } 6538 } else { 6539 lro = 0; 6540 } 6541 if (rack->rc_rack_tmr_std_based == 0) { 6542 thresh = srtt + rack->r_ctl.rc_pkt_delay; 6543 } else { 6544 /* Standards based pkt-delay is 1/4 srtt */ 6545 thresh = srtt + (srtt >> 2); 6546 } 6547 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 6548 /* It must be set, if not you get 1/4 rtt */ 6549 if (rack->r_ctl.rc_reorder_shift) 6550 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 6551 else 6552 thresh += (srtt >> 2); 6553 } 6554 if (rack->rc_rack_use_dsack && 6555 lro && 6556 (rack->r_ctl.num_dsack > 0)) { 6557 /* 6558 * We only increase the reordering window if we 6559 * have seen reordering <and> we have a DSACK count. 6560 */ 6561 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 6562 if (log_allowed) 6563 rack_log_dsack_event(rack, 4, line, srtt, thresh); 6564 } 6565 /* SRTT * 2 is the ceiling */ 6566 if (thresh > (srtt * 2)) { 6567 thresh = srtt * 2; 6568 } 6569 /* And we don't want it above the RTO max either */ 6570 if (thresh > rack_rto_max) { 6571 thresh = rack_rto_max; 6572 } 6573 if (log_allowed) 6574 rack_log_dsack_event(rack, 6, line, srtt, thresh); 6575 return (thresh); 6576 } 6577 6578 static uint32_t 6579 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 6580 struct rack_sendmap *rsm, uint32_t srtt) 6581 { 6582 struct rack_sendmap *prsm; 6583 uint32_t thresh, len; 6584 int segsiz; 6585 6586 if (srtt == 0) 6587 srtt = 1; 6588 if (rack->r_ctl.rc_tlp_threshold) 6589 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 6590 else 6591 thresh = (srtt * 2); 6592 6593 /* Get the previous sent packet, if any */ 6594 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6595 len = rsm->r_end - rsm->r_start; 6596 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6597 /* Exactly like the ID */ 6598 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6599 uint32_t alt_thresh; 6600 /* 6601 * Compensate for delayed-ack with the d-ack time. 6602 */ 6603 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6604 if (alt_thresh > thresh) 6605 thresh = alt_thresh; 6606 } 6607 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6608 /* 2.1 behavior */ 6609 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6610 if (prsm && (len <= segsiz)) { 6611 /* 6612 * Two packets outstanding, thresh should be (2*srtt) + 6613 * possible inter-packet delay (if any). 6614 */ 6615 uint32_t inter_gap = 0; 6616 int idx, nidx; 6617 6618 idx = rsm->r_rtr_cnt - 1; 6619 nidx = prsm->r_rtr_cnt - 1; 6620 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6621 /* Yes it was sent later (or at the same time) */ 6622 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6623 } 6624 thresh += inter_gap; 6625 } else if (len <= segsiz) { 6626 /* 6627 * Possibly compensate for delayed-ack. 6628 */ 6629 uint32_t alt_thresh; 6630 6631 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6632 if (alt_thresh > thresh) 6633 thresh = alt_thresh; 6634 } 6635 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6636 /* 2.2 behavior */ 6637 if (len <= segsiz) { 6638 uint32_t alt_thresh; 6639 /* 6640 * Compensate for delayed-ack with the d-ack time. 6641 */ 6642 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6643 if (alt_thresh > thresh) 6644 thresh = alt_thresh; 6645 } 6646 } 6647 /* Not above an RTO */ 6648 if (thresh > tp->t_rxtcur) { 6649 thresh = tp->t_rxtcur; 6650 } 6651 /* Not above a RTO max */ 6652 if (thresh > rack_rto_max) { 6653 thresh = rack_rto_max; 6654 } 6655 /* Apply user supplied min TLP */ 6656 if (thresh < rack_tlp_min) { 6657 thresh = rack_tlp_min; 6658 } 6659 return (thresh); 6660 } 6661 6662 static uint32_t 6663 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6664 { 6665 /* 6666 * We want the rack_rtt which is the 6667 * last rtt we measured. However if that 6668 * does not exist we fallback to the srtt (which 6669 * we probably will never do) and then as a last 6670 * resort we use RACK_INITIAL_RTO if no srtt is 6671 * yet set. 6672 */ 6673 if (rack->rc_rack_rtt) 6674 return (rack->rc_rack_rtt); 6675 else if (tp->t_srtt == 0) 6676 return (RACK_INITIAL_RTO); 6677 return (tp->t_srtt); 6678 } 6679 6680 static struct rack_sendmap * 6681 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6682 { 6683 /* 6684 * Check to see that we don't need to fall into recovery. We will 6685 * need to do so if our oldest transmit is past the time we should 6686 * have had an ack. 6687 */ 6688 struct tcp_rack *rack; 6689 struct rack_sendmap *rsm; 6690 int32_t idx; 6691 uint32_t srtt, thresh; 6692 6693 rack = (struct tcp_rack *)tp->t_fb_ptr; 6694 if (tqhash_empty(rack->r_ctl.tqh)) { 6695 return (NULL); 6696 } 6697 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6698 if (rsm == NULL) 6699 return (NULL); 6700 6701 6702 if (rsm->r_flags & RACK_ACKED) { 6703 rsm = rack_find_lowest_rsm(rack); 6704 if (rsm == NULL) 6705 return (NULL); 6706 } 6707 idx = rsm->r_rtr_cnt - 1; 6708 srtt = rack_grab_rtt(tp, rack); 6709 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6710 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6711 return (NULL); 6712 } 6713 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6714 return (NULL); 6715 } 6716 /* Ok if we reach here we are over-due and this guy can be sent */ 6717 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6718 return (rsm); 6719 } 6720 6721 static uint32_t 6722 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6723 { 6724 int32_t t; 6725 int32_t tt; 6726 uint32_t ret_val; 6727 6728 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6729 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6730 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6731 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6732 ret_val = (uint32_t)tt; 6733 return (ret_val); 6734 } 6735 6736 static uint32_t 6737 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6738 { 6739 /* 6740 * Start the FR timer, we do this based on getting the first one in 6741 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6742 * events we need to stop the running timer (if its running) before 6743 * starting the new one. 6744 */ 6745 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6746 uint32_t srtt_cur; 6747 int32_t idx; 6748 int32_t is_tlp_timer = 0; 6749 struct rack_sendmap *rsm; 6750 6751 if (rack->t_timers_stopped) { 6752 /* All timers have been stopped none are to run */ 6753 return (0); 6754 } 6755 if (rack->rc_in_persist) { 6756 /* We can't start any timer in persists */ 6757 return (rack_get_persists_timer_val(tp, rack)); 6758 } 6759 rack->rc_on_min_to = 0; 6760 if ((tp->t_state < TCPS_ESTABLISHED) || 6761 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6762 goto activate_rxt; 6763 } 6764 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6765 if ((rsm == NULL) || sup_rack) { 6766 /* Nothing on the send map or no rack */ 6767 activate_rxt: 6768 time_since_sent = 0; 6769 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6770 if (rsm) { 6771 /* 6772 * Should we discount the RTX timer any? 6773 * 6774 * We want to discount it the smallest amount. 6775 * If a timer (Rack/TLP or RXT) has gone off more 6776 * recently thats the discount we want to use (now - timer time). 6777 * If the retransmit of the oldest packet was more recent then 6778 * we want to use that (now - oldest-packet-last_transmit_time). 6779 * 6780 */ 6781 idx = rsm->r_rtr_cnt - 1; 6782 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6783 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6784 else 6785 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6786 if (TSTMP_GT(cts, tstmp_touse)) 6787 time_since_sent = cts - tstmp_touse; 6788 } 6789 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6790 sbavail(&tptosocket(tp)->so_snd)) { 6791 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6792 to = tp->t_rxtcur; 6793 if (to > time_since_sent) 6794 to -= time_since_sent; 6795 else 6796 to = rack->r_ctl.rc_min_to; 6797 if (to == 0) 6798 to = 1; 6799 /* Special case for KEEPINIT */ 6800 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6801 (TP_KEEPINIT(tp) != 0) && 6802 rsm) { 6803 /* 6804 * We have to put a ceiling on the rxt timer 6805 * of the keep-init timeout. 6806 */ 6807 uint32_t max_time, red; 6808 6809 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6810 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6811 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6812 if (red < max_time) 6813 max_time -= red; 6814 else 6815 max_time = 1; 6816 } 6817 /* Reduce timeout to the keep value if needed */ 6818 if (max_time < to) 6819 to = max_time; 6820 } 6821 return (to); 6822 } 6823 return (0); 6824 } 6825 if (rsm->r_flags & RACK_ACKED) { 6826 rsm = rack_find_lowest_rsm(rack); 6827 if (rsm == NULL) { 6828 /* No lowest? */ 6829 goto activate_rxt; 6830 } 6831 } 6832 /* Convert from ms to usecs */ 6833 if ((rsm->r_flags & RACK_SACK_PASSED) || 6834 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6835 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6836 if ((tp->t_flags & TF_SENTFIN) && 6837 ((tp->snd_max - tp->snd_una) == 1) && 6838 (rsm->r_flags & RACK_HAS_FIN)) { 6839 /* 6840 * We don't start a rack timer if all we have is a 6841 * FIN outstanding. 6842 */ 6843 goto activate_rxt; 6844 } 6845 if ((rack->use_rack_rr == 0) && 6846 (IN_FASTRECOVERY(tp->t_flags)) && 6847 (rack->rack_no_prr == 0) && 6848 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6849 /* 6850 * We are not cheating, in recovery and 6851 * not enough ack's to yet get our next 6852 * retransmission out. 6853 * 6854 * Note that classified attackers do not 6855 * get to use the rack-cheat. 6856 */ 6857 goto activate_tlp; 6858 } 6859 srtt = rack_grab_rtt(tp, rack); 6860 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6861 idx = rsm->r_rtr_cnt - 1; 6862 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6863 if (SEQ_GEQ(exp, cts)) { 6864 to = exp - cts; 6865 if (to < rack->r_ctl.rc_min_to) { 6866 to = rack->r_ctl.rc_min_to; 6867 if (rack->r_rr_config == 3) 6868 rack->rc_on_min_to = 1; 6869 } 6870 } else { 6871 to = rack->r_ctl.rc_min_to; 6872 if (rack->r_rr_config == 3) 6873 rack->rc_on_min_to = 1; 6874 } 6875 } else { 6876 /* Ok we need to do a TLP not RACK */ 6877 activate_tlp: 6878 if ((rack->rc_tlp_in_progress != 0) && 6879 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6880 /* 6881 * The previous send was a TLP and we have sent 6882 * N TLP's without sending new data. 6883 */ 6884 goto activate_rxt; 6885 } 6886 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6887 if (rsm == NULL) { 6888 /* We found no rsm to TLP with. */ 6889 goto activate_rxt; 6890 } 6891 if (rsm->r_flags & RACK_HAS_FIN) { 6892 /* If its a FIN we dont do TLP */ 6893 rsm = NULL; 6894 goto activate_rxt; 6895 } 6896 idx = rsm->r_rtr_cnt - 1; 6897 time_since_sent = 0; 6898 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6899 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6900 else 6901 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6902 if (TSTMP_GT(cts, tstmp_touse)) 6903 time_since_sent = cts - tstmp_touse; 6904 is_tlp_timer = 1; 6905 if (tp->t_srtt) { 6906 if ((rack->rc_srtt_measure_made == 0) && 6907 (tp->t_srtt == 1)) { 6908 /* 6909 * If another stack as run and set srtt to 1, 6910 * then the srtt was 0, so lets use the initial. 6911 */ 6912 srtt = RACK_INITIAL_RTO; 6913 } else { 6914 srtt_cur = tp->t_srtt; 6915 srtt = srtt_cur; 6916 } 6917 } else 6918 srtt = RACK_INITIAL_RTO; 6919 /* 6920 * If the SRTT is not keeping up and the 6921 * rack RTT has spiked we want to use 6922 * the last RTT not the smoothed one. 6923 */ 6924 if (rack_tlp_use_greater && 6925 tp->t_srtt && 6926 (srtt < rack_grab_rtt(tp, rack))) { 6927 srtt = rack_grab_rtt(tp, rack); 6928 } 6929 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6930 if (thresh > time_since_sent) { 6931 to = thresh - time_since_sent; 6932 } else { 6933 to = rack->r_ctl.rc_min_to; 6934 rack_log_alt_to_to_cancel(rack, 6935 thresh, /* flex1 */ 6936 time_since_sent, /* flex2 */ 6937 tstmp_touse, /* flex3 */ 6938 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6939 (uint32_t)rsm->r_tim_lastsent[idx], 6940 srtt, 6941 idx, 99); 6942 } 6943 if (to < rack_tlp_min) { 6944 to = rack_tlp_min; 6945 } 6946 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 6947 /* 6948 * If the TLP time works out to larger than the max 6949 * RTO lets not do TLP.. just RTO. 6950 */ 6951 goto activate_rxt; 6952 } 6953 } 6954 if (is_tlp_timer == 0) { 6955 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6956 } else { 6957 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6958 } 6959 if (to == 0) 6960 to = 1; 6961 return (to); 6962 } 6963 6964 static void 6965 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6966 { 6967 if (rack->rc_in_persist == 0) { 6968 if (tp->t_flags & TF_GPUTINPROG) { 6969 /* 6970 * Stop the goodput now, the calling of the 6971 * measurement function clears the flag. 6972 */ 6973 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6974 RACK_QUALITY_PERSIST); 6975 } 6976 #ifdef NETFLIX_SHARED_CWND 6977 if (rack->r_ctl.rc_scw) { 6978 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6979 rack->rack_scwnd_is_idle = 1; 6980 } 6981 #endif 6982 rack->r_ctl.rc_went_idle_time = cts; 6983 if (rack->r_ctl.rc_went_idle_time == 0) 6984 rack->r_ctl.rc_went_idle_time = 1; 6985 if (rack->lt_bw_up) { 6986 /* Suspend our LT BW measurement */ 6987 uint64_t tmark; 6988 6989 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6990 rack->r_ctl.lt_seq = snd_una; 6991 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 6992 if (tmark >= rack->r_ctl.lt_timemark) { 6993 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6994 } 6995 rack->r_ctl.lt_timemark = tmark; 6996 rack->lt_bw_up = 0; 6997 rack->r_persist_lt_bw_off = 1; 6998 } 6999 rack_timer_cancel(tp, rack, cts, __LINE__); 7000 rack->r_ctl.persist_lost_ends = 0; 7001 rack->probe_not_answered = 0; 7002 rack->forced_ack = 0; 7003 tp->t_rxtshift = 0; 7004 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7005 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7006 rack->rc_in_persist = 1; 7007 } 7008 } 7009 7010 static void 7011 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7012 { 7013 if (tcp_in_hpts(rack->rc_tp)) { 7014 tcp_hpts_remove(rack->rc_tp); 7015 rack->r_ctl.rc_hpts_flags = 0; 7016 } 7017 #ifdef NETFLIX_SHARED_CWND 7018 if (rack->r_ctl.rc_scw) { 7019 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 7020 rack->rack_scwnd_is_idle = 0; 7021 } 7022 #endif 7023 if (rack->rc_gp_dyn_mul && 7024 (rack->use_fixed_rate == 0) && 7025 (rack->rc_always_pace)) { 7026 /* 7027 * Do we count this as if a probe-rtt just 7028 * finished? 7029 */ 7030 uint32_t time_idle, idle_min; 7031 7032 time_idle = cts - rack->r_ctl.rc_went_idle_time; 7033 idle_min = rack_min_probertt_hold; 7034 if (rack_probertt_gpsrtt_cnt_div) { 7035 uint64_t extra; 7036 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 7037 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 7038 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 7039 idle_min += (uint32_t)extra; 7040 } 7041 if (time_idle >= idle_min) { 7042 /* Yes, we count it as a probe-rtt. */ 7043 uint32_t us_cts; 7044 7045 us_cts = tcp_get_usecs(NULL); 7046 if (rack->in_probe_rtt == 0) { 7047 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7048 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 7049 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 7050 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 7051 } else { 7052 rack_exit_probertt(rack, us_cts); 7053 } 7054 } 7055 } 7056 if (rack->r_persist_lt_bw_off) { 7057 /* Continue where we left off */ 7058 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 7059 rack->lt_bw_up = 1; 7060 rack->r_persist_lt_bw_off = 0; 7061 } 7062 rack->r_ctl.idle_snd_una = tp->snd_una; 7063 rack->rc_in_persist = 0; 7064 rack->r_ctl.rc_went_idle_time = 0; 7065 tp->t_rxtshift = 0; 7066 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7067 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7068 rack->r_ctl.rc_agg_delayed = 0; 7069 rack->r_early = 0; 7070 rack->r_late = 0; 7071 rack->r_ctl.rc_agg_early = 0; 7072 } 7073 7074 static void 7075 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 7076 struct hpts_diag *diag, struct timeval *tv) 7077 { 7078 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 7079 union tcp_log_stackspecific log; 7080 7081 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7082 log.u_bbr.flex1 = diag->p_nxt_slot; 7083 log.u_bbr.flex2 = diag->p_cur_slot; 7084 log.u_bbr.flex3 = diag->slot_req; 7085 log.u_bbr.flex4 = diag->inp_hptsslot; 7086 log.u_bbr.flex5 = diag->slot_remaining; 7087 log.u_bbr.flex6 = diag->need_new_to; 7088 log.u_bbr.flex7 = diag->p_hpts_active; 7089 log.u_bbr.flex8 = diag->p_on_min_sleep; 7090 /* Hijack other fields as needed */ 7091 log.u_bbr.epoch = diag->have_slept; 7092 log.u_bbr.lt_epoch = diag->yet_to_sleep; 7093 log.u_bbr.pkts_out = diag->co_ret; 7094 log.u_bbr.applimited = diag->hpts_sleep_time; 7095 log.u_bbr.delivered = diag->p_prev_slot; 7096 log.u_bbr.inflight = diag->p_runningslot; 7097 log.u_bbr.bw_inuse = diag->wheel_slot; 7098 log.u_bbr.rttProp = diag->wheel_cts; 7099 log.u_bbr.timeStamp = cts; 7100 log.u_bbr.delRate = diag->maxslots; 7101 log.u_bbr.cur_del_rate = diag->p_curtick; 7102 log.u_bbr.cur_del_rate <<= 32; 7103 log.u_bbr.cur_del_rate |= diag->p_lasttick; 7104 TCP_LOG_EVENTP(rack->rc_tp, NULL, 7105 &rack->rc_inp->inp_socket->so_rcv, 7106 &rack->rc_inp->inp_socket->so_snd, 7107 BBR_LOG_HPTSDIAG, 0, 7108 0, &log, false, tv); 7109 } 7110 7111 } 7112 7113 static void 7114 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 7115 { 7116 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 7117 union tcp_log_stackspecific log; 7118 struct timeval tv; 7119 7120 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7121 log.u_bbr.flex1 = sb->sb_flags; 7122 log.u_bbr.flex2 = len; 7123 log.u_bbr.flex3 = sb->sb_state; 7124 log.u_bbr.flex8 = type; 7125 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 7126 TCP_LOG_EVENTP(rack->rc_tp, NULL, 7127 &rack->rc_inp->inp_socket->so_rcv, 7128 &rack->rc_inp->inp_socket->so_snd, 7129 TCP_LOG_SB_WAKE, 0, 7130 len, &log, false, &tv); 7131 } 7132 } 7133 7134 static void 7135 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 7136 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 7137 { 7138 struct hpts_diag diag; 7139 struct inpcb *inp = tptoinpcb(tp); 7140 struct timeval tv; 7141 uint32_t delayed_ack = 0; 7142 uint32_t hpts_timeout; 7143 uint32_t entry_slot = slot; 7144 uint8_t stopped; 7145 uint32_t left = 0; 7146 uint32_t us_cts; 7147 7148 if ((tp->t_state == TCPS_CLOSED) || 7149 (tp->t_state == TCPS_LISTEN)) { 7150 return; 7151 } 7152 if (tcp_in_hpts(tp)) { 7153 /* Already on the pacer */ 7154 return; 7155 } 7156 stopped = rack->rc_tmr_stopped; 7157 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 7158 left = rack->r_ctl.rc_timer_exp - cts; 7159 } 7160 rack->r_ctl.rc_timer_exp = 0; 7161 rack->r_ctl.rc_hpts_flags = 0; 7162 us_cts = tcp_get_usecs(&tv); 7163 /* Now early/late accounting */ 7164 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 7165 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 7166 /* 7167 * We have a early carry over set, 7168 * we can always add more time so we 7169 * can always make this compensation. 7170 * 7171 * Note if ack's are allowed to wake us do not 7172 * penalize the next timer for being awoke 7173 * by an ack aka the rc_agg_early (non-paced mode). 7174 */ 7175 slot += rack->r_ctl.rc_agg_early; 7176 rack->r_early = 0; 7177 rack->r_ctl.rc_agg_early = 0; 7178 } 7179 if ((rack->r_late) && 7180 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 7181 /* 7182 * This is harder, we can 7183 * compensate some but it 7184 * really depends on what 7185 * the current pacing time is. 7186 */ 7187 if (rack->r_ctl.rc_agg_delayed >= slot) { 7188 /* 7189 * We can't compensate for it all. 7190 * And we have to have some time 7191 * on the clock. We always have a min 7192 * 10 slots (10 x 10 i.e. 100 usecs). 7193 */ 7194 if (slot <= HPTS_TICKS_PER_SLOT) { 7195 /* We gain delay */ 7196 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 7197 slot = HPTS_TICKS_PER_SLOT; 7198 } else { 7199 /* We take off some */ 7200 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 7201 slot = HPTS_TICKS_PER_SLOT; 7202 } 7203 } else { 7204 slot -= rack->r_ctl.rc_agg_delayed; 7205 rack->r_ctl.rc_agg_delayed = 0; 7206 /* Make sure we have 100 useconds at minimum */ 7207 if (slot < HPTS_TICKS_PER_SLOT) { 7208 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 7209 slot = HPTS_TICKS_PER_SLOT; 7210 } 7211 if (rack->r_ctl.rc_agg_delayed == 0) 7212 rack->r_late = 0; 7213 } 7214 } else if (rack->r_late) { 7215 /* r_use_hpts_min is on and so is DGP */ 7216 uint32_t max_red; 7217 7218 max_red = (slot * rack->r_ctl.max_reduction) / 100; 7219 if (max_red >= rack->r_ctl.rc_agg_delayed) { 7220 slot -= rack->r_ctl.rc_agg_delayed; 7221 rack->r_ctl.rc_agg_delayed = 0; 7222 } else { 7223 slot -= max_red; 7224 rack->r_ctl.rc_agg_delayed -= max_red; 7225 } 7226 } 7227 if ((rack->r_use_hpts_min == 1) && 7228 (slot > 0) && 7229 (rack->dgp_on == 1)) { 7230 /* 7231 * We are enforcing a min pacing timer 7232 * based on our hpts min timeout. 7233 */ 7234 uint32_t min; 7235 7236 min = get_hpts_min_sleep_time(); 7237 if (min > slot) { 7238 slot = min; 7239 } 7240 } 7241 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 7242 if (tp->t_flags & TF_DELACK) { 7243 delayed_ack = TICKS_2_USEC(tcp_delacktime); 7244 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 7245 } 7246 if (delayed_ack && ((hpts_timeout == 0) || 7247 (delayed_ack < hpts_timeout))) 7248 hpts_timeout = delayed_ack; 7249 else 7250 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7251 /* 7252 * If no timers are going to run and we will fall off the hptsi 7253 * wheel, we resort to a keep-alive timer if its configured. 7254 */ 7255 if ((hpts_timeout == 0) && 7256 (slot == 0)) { 7257 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7258 (tp->t_state <= TCPS_CLOSING)) { 7259 /* 7260 * Ok we have no timer (persists, rack, tlp, rxt or 7261 * del-ack), we don't have segments being paced. So 7262 * all that is left is the keepalive timer. 7263 */ 7264 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7265 /* Get the established keep-alive time */ 7266 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 7267 } else { 7268 /* 7269 * Get the initial setup keep-alive time, 7270 * note that this is probably not going to 7271 * happen, since rack will be running a rxt timer 7272 * if a SYN of some sort is outstanding. It is 7273 * actually handled in rack_timeout_rxt(). 7274 */ 7275 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 7276 } 7277 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 7278 if (rack->in_probe_rtt) { 7279 /* 7280 * We want to instead not wake up a long time from 7281 * now but to wake up about the time we would 7282 * exit probe-rtt and initiate a keep-alive ack. 7283 * This will get us out of probe-rtt and update 7284 * our min-rtt. 7285 */ 7286 hpts_timeout = rack_min_probertt_hold; 7287 } 7288 } 7289 } 7290 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 7291 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 7292 /* 7293 * RACK, TLP, persists and RXT timers all are restartable 7294 * based on actions input .. i.e we received a packet (ack 7295 * or sack) and that changes things (rw, or snd_una etc). 7296 * Thus we can restart them with a new value. For 7297 * keep-alive, delayed_ack we keep track of what was left 7298 * and restart the timer with a smaller value. 7299 */ 7300 if (left < hpts_timeout) 7301 hpts_timeout = left; 7302 } 7303 if (hpts_timeout) { 7304 /* 7305 * Hack alert for now we can't time-out over 2,147,483 7306 * seconds (a bit more than 596 hours), which is probably ok 7307 * :). 7308 */ 7309 if (hpts_timeout > 0x7ffffffe) 7310 hpts_timeout = 0x7ffffffe; 7311 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 7312 } 7313 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 7314 if ((rack->gp_ready == 0) && 7315 (rack->use_fixed_rate == 0) && 7316 (hpts_timeout < slot) && 7317 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 7318 /* 7319 * We have no good estimate yet for the 7320 * old clunky burst mitigation or the 7321 * real pacing. And the tlp or rxt is smaller 7322 * than the pacing calculation. Lets not 7323 * pace that long since we know the calculation 7324 * so far is not accurate. 7325 */ 7326 slot = hpts_timeout; 7327 } 7328 /** 7329 * Turn off all the flags for queuing by default. The 7330 * flags have important meanings to what happens when 7331 * LRO interacts with the transport. Most likely (by default now) 7332 * mbuf_queueing and ack compression are on. So the transport 7333 * has a couple of flags that control what happens (if those 7334 * are not on then these flags won't have any effect since it 7335 * won't go through the queuing LRO path). 7336 * 7337 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 7338 * pacing output, so don't disturb. But 7339 * it also means LRO can wake me if there 7340 * is a SACK arrival. 7341 * 7342 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 7343 * with the above flag (QUEUE_READY) and 7344 * when present it says don't even wake me 7345 * if a SACK arrives. 7346 * 7347 * The idea behind these flags is that if we are pacing we 7348 * set the MBUF_QUEUE_READY and only get woken up if 7349 * a SACK arrives (which could change things) or if 7350 * our pacing timer expires. If, however, we have a rack 7351 * timer running, then we don't even want a sack to wake 7352 * us since the rack timer has to expire before we can send. 7353 * 7354 * Other cases should usually have none of the flags set 7355 * so LRO can call into us. 7356 */ 7357 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 7358 if (slot) { 7359 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 7360 rack->r_ctl.rc_last_output_to = us_cts + slot; 7361 /* 7362 * A pacing timer (slot) is being set, in 7363 * such a case we cannot send (we are blocked by 7364 * the timer). So lets tell LRO that it should not 7365 * wake us unless there is a SACK. Note this only 7366 * will be effective if mbuf queueing is on or 7367 * compressed acks are being processed. 7368 */ 7369 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 7370 /* 7371 * But wait if we have a Rack timer running 7372 * even a SACK should not disturb us (with 7373 * the exception of r_rr_config 3). 7374 */ 7375 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 7376 (IN_RECOVERY(tp->t_flags))) { 7377 if (rack->r_rr_config != 3) 7378 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 7379 else if (rack->rc_pace_dnd) { 7380 /* 7381 * When DND is on, we only let a sack 7382 * interrupt us if we are not in recovery. 7383 * 7384 * If DND is off, then we never hit here 7385 * and let all sacks wake us up. 7386 * 7387 */ 7388 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 7389 } 7390 } 7391 if (rack->rc_ack_can_sendout_data) { 7392 /* 7393 * Ahh but wait, this is that special case 7394 * where the pacing timer can be disturbed 7395 * backout the changes (used for non-paced 7396 * burst limiting). 7397 */ 7398 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 7399 TF2_MBUF_QUEUE_READY); 7400 } 7401 if ((rack->use_rack_rr) && 7402 (rack->r_rr_config < 2) && 7403 ((hpts_timeout) && (hpts_timeout < slot))) { 7404 /* 7405 * Arrange for the hpts to kick back in after the 7406 * t-o if the t-o does not cause a send. 7407 */ 7408 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7409 __LINE__, &diag); 7410 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7411 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7412 } else { 7413 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 7414 __LINE__, &diag); 7415 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7416 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 7417 } 7418 } else if (hpts_timeout) { 7419 /* 7420 * With respect to t_flags2(?) here, lets let any new acks wake 7421 * us up here. Since we are not pacing (no pacing timer), output 7422 * can happen so we should let it. If its a Rack timer, then any inbound 7423 * packet probably won't change the sending (we will be blocked) 7424 * but it may change the prr stats so letting it in (the set defaults 7425 * at the start of this block) are good enough. 7426 */ 7427 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7428 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7429 __LINE__, &diag); 7430 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7431 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7432 } else { 7433 /* No timer starting */ 7434 #ifdef INVARIANTS 7435 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 7436 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 7437 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 7438 } 7439 #endif 7440 } 7441 rack->rc_tmr_stopped = 0; 7442 if (slot) 7443 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 7444 } 7445 7446 static void 7447 rack_mark_lost(struct tcpcb *tp, 7448 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 7449 { 7450 struct rack_sendmap *nrsm; 7451 uint32_t thresh, exp; 7452 7453 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 7454 nrsm = rsm; 7455 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 7456 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 7457 /* Got up to all that were marked sack-passed */ 7458 break; 7459 } 7460 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 7461 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 7462 if (TSTMP_LT(exp, cts) || (exp == cts)) { 7463 /* We now consider it lost */ 7464 nrsm->r_flags |= RACK_WAS_LOST; 7465 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 7466 } else { 7467 /* Past here it won't be lost so stop */ 7468 break; 7469 } 7470 } 7471 } 7472 } 7473 7474 /* 7475 * RACK Timer, here we simply do logging and house keeping. 7476 * the normal rack_output() function will call the 7477 * appropriate thing to check if we need to do a RACK retransmit. 7478 * We return 1, saying don't proceed with rack_output only 7479 * when all timers have been stopped (destroyed PCB?). 7480 */ 7481 static int 7482 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7483 { 7484 /* 7485 * This timer simply provides an internal trigger to send out data. 7486 * The check_recovery_mode call will see if there are needed 7487 * retransmissions, if so we will enter fast-recovery. The output 7488 * call may or may not do the same thing depending on sysctl 7489 * settings. 7490 */ 7491 struct rack_sendmap *rsm; 7492 7493 counter_u64_add(rack_to_tot, 1); 7494 if (rack->r_state && (rack->r_state != tp->t_state)) 7495 rack_set_state(tp, rack); 7496 rack->rc_on_min_to = 0; 7497 rsm = rack_check_recovery_mode(tp, cts); 7498 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 7499 if (rsm) { 7500 /* We need to stroke any lost that are now declared as lost */ 7501 rack_mark_lost(tp, rack, rsm, cts); 7502 rack->r_ctl.rc_resend = rsm; 7503 rack->r_timer_override = 1; 7504 if (rack->use_rack_rr) { 7505 /* 7506 * Don't accumulate extra pacing delay 7507 * we are allowing the rack timer to 7508 * over-ride pacing i.e. rrr takes precedence 7509 * if the pacing interval is longer than the rrr 7510 * time (in other words we get the min pacing 7511 * time versus rrr pacing time). 7512 */ 7513 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7514 } 7515 } 7516 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 7517 if (rsm == NULL) { 7518 /* restart a timer and return 1 */ 7519 rack_start_hpts_timer(rack, tp, cts, 7520 0, 0, 0); 7521 return (1); 7522 } 7523 if ((rack->policer_detect_on == 1) && 7524 (rack->rc_policer_detected == 0)) { 7525 /* 7526 * We do this early if we have not 7527 * deteceted to attempt to detect 7528 * quicker. Normally we want to do this 7529 * as recovery exits (and we will again). 7530 */ 7531 policer_detection(tp, rack, 0); 7532 } 7533 return (0); 7534 } 7535 7536 7537 7538 static void 7539 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 7540 { 7541 7542 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 7543 /* 7544 * The trailing space changed, mbufs can grow 7545 * at the tail but they can't shrink from 7546 * it, KASSERT that. Adjust the orig_m_len to 7547 * compensate for this change. 7548 */ 7549 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 7550 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 7551 rsm->m, 7552 rsm, 7553 (intmax_t)M_TRAILINGROOM(rsm->m), 7554 rsm->orig_t_space, 7555 rsm->orig_m_len, 7556 rsm->m->m_len)); 7557 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 7558 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7559 } 7560 if (rsm->m->m_len < rsm->orig_m_len) { 7561 /* 7562 * Mbuf shrank, trimmed off the top by an ack, our 7563 * offset changes. 7564 */ 7565 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 7566 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 7567 rsm->m, rsm->m->m_len, 7568 rsm, rsm->orig_m_len, 7569 rsm->soff)); 7570 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 7571 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 7572 else 7573 rsm->soff = 0; 7574 rsm->orig_m_len = rsm->m->m_len; 7575 #ifdef INVARIANTS 7576 } else if (rsm->m->m_len > rsm->orig_m_len) { 7577 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 7578 rsm, rsm->m); 7579 #endif 7580 } 7581 } 7582 7583 static void 7584 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 7585 { 7586 struct mbuf *m; 7587 uint32_t soff; 7588 7589 if (src_rsm->m && 7590 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 7591 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 7592 /* Fix up the orig_m_len and possibly the mbuf offset */ 7593 rack_adjust_orig_mlen(src_rsm); 7594 } 7595 m = src_rsm->m; 7596 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 7597 while (soff >= m->m_len) { 7598 /* Move out past this mbuf */ 7599 soff -= m->m_len; 7600 m = m->m_next; 7601 KASSERT((m != NULL), 7602 ("rsm:%p nrsm:%p hit at soff:%u null m", 7603 src_rsm, rsm, soff)); 7604 if (m == NULL) { 7605 /* This should *not* happen which is why there is a kassert */ 7606 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7607 (src_rsm->r_start - rack->rc_tp->snd_una), 7608 &src_rsm->soff); 7609 src_rsm->orig_m_len = src_rsm->m->m_len; 7610 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7611 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7612 (rsm->r_start - rack->rc_tp->snd_una), 7613 &rsm->soff); 7614 rsm->orig_m_len = rsm->m->m_len; 7615 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7616 return; 7617 } 7618 } 7619 rsm->m = m; 7620 rsm->soff = soff; 7621 rsm->orig_m_len = m->m_len; 7622 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7623 } 7624 7625 static __inline void 7626 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7627 struct rack_sendmap *rsm, uint32_t start) 7628 { 7629 int idx; 7630 7631 nrsm->r_start = start; 7632 nrsm->r_end = rsm->r_end; 7633 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7634 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7635 nrsm->r_flags = rsm->r_flags; 7636 nrsm->r_dupack = rsm->r_dupack; 7637 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7638 nrsm->r_rtr_bytes = 0; 7639 nrsm->r_fas = rsm->r_fas; 7640 nrsm->r_bas = rsm->r_bas; 7641 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7642 nrsm->r_just_ret = rsm->r_just_ret; 7643 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7644 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7645 } 7646 /* Now if we have SYN flag we keep it on the left edge */ 7647 if (nrsm->r_flags & RACK_HAS_SYN) 7648 nrsm->r_flags &= ~RACK_HAS_SYN; 7649 /* Now if we have a FIN flag we keep it on the right edge */ 7650 if (rsm->r_flags & RACK_HAS_FIN) 7651 rsm->r_flags &= ~RACK_HAS_FIN; 7652 /* Push bit must go to the right edge as well */ 7653 if (rsm->r_flags & RACK_HAD_PUSH) 7654 rsm->r_flags &= ~RACK_HAD_PUSH; 7655 /* Clone over the state of the hw_tls flag */ 7656 nrsm->r_hw_tls = rsm->r_hw_tls; 7657 /* 7658 * Now we need to find nrsm's new location in the mbuf chain 7659 * we basically calculate a new offset, which is soff + 7660 * how much is left in original rsm. Then we walk out the mbuf 7661 * chain to find the righ position, it may be the same mbuf 7662 * or maybe not. 7663 */ 7664 KASSERT(((rsm->m != NULL) || 7665 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7666 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7667 if (rsm->m) 7668 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7669 } 7670 7671 static struct rack_sendmap * 7672 rack_merge_rsm(struct tcp_rack *rack, 7673 struct rack_sendmap *l_rsm, 7674 struct rack_sendmap *r_rsm) 7675 { 7676 /* 7677 * We are merging two ack'd RSM's, 7678 * the l_rsm is on the left (lower seq 7679 * values) and the r_rsm is on the right 7680 * (higher seq value). The simplest way 7681 * to merge these is to move the right 7682 * one into the left. I don't think there 7683 * is any reason we need to try to find 7684 * the oldest (or last oldest retransmitted). 7685 */ 7686 rack_log_map_chg(rack->rc_tp, rack, NULL, 7687 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7688 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7689 if (l_rsm->r_dupack < r_rsm->r_dupack) 7690 l_rsm->r_dupack = r_rsm->r_dupack; 7691 if (r_rsm->r_rtr_bytes) 7692 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7693 if (r_rsm->r_in_tmap) { 7694 /* This really should not happen */ 7695 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7696 r_rsm->r_in_tmap = 0; 7697 } 7698 7699 /* Now the flags */ 7700 if (r_rsm->r_flags & RACK_HAS_FIN) 7701 l_rsm->r_flags |= RACK_HAS_FIN; 7702 if (r_rsm->r_flags & RACK_TLP) 7703 l_rsm->r_flags |= RACK_TLP; 7704 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7705 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7706 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7707 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7708 /* 7709 * If both are app-limited then let the 7710 * free lower the count. If right is app 7711 * limited and left is not, transfer. 7712 */ 7713 l_rsm->r_flags |= RACK_APP_LIMITED; 7714 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7715 if (r_rsm == rack->r_ctl.rc_first_appl) 7716 rack->r_ctl.rc_first_appl = l_rsm; 7717 } 7718 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7719 /* 7720 * We keep the largest value, which is the newest 7721 * send. We do this in case a segment that is 7722 * joined together and not part of a GP estimate 7723 * later gets expanded into the GP estimate. 7724 * 7725 * We prohibit the merging of unlike kinds i.e. 7726 * all pieces that are in the GP estimate can be 7727 * merged and all pieces that are not in a GP estimate 7728 * can be merged, but not disimilar pieces. Combine 7729 * this with taking the highest here and we should 7730 * be ok unless of course the client reneges. Then 7731 * all bets are off. 7732 */ 7733 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7734 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7735 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7736 } 7737 /* 7738 * When merging two RSM's we also need to consider the ack time and keep 7739 * newest. If the ack gets merged into a measurement then that is the 7740 * one we will want to be using. 7741 */ 7742 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7743 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7744 7745 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7746 /* Transfer the split limit to the map we free */ 7747 r_rsm->r_limit_type = l_rsm->r_limit_type; 7748 l_rsm->r_limit_type = 0; 7749 } 7750 rack_free(rack, r_rsm); 7751 l_rsm->r_flags |= RACK_MERGED; 7752 return (l_rsm); 7753 } 7754 7755 /* 7756 * TLP Timer, here we simply setup what segment we want to 7757 * have the TLP expire on, the normal rack_output() will then 7758 * send it out. 7759 * 7760 * We return 1, saying don't proceed with rack_output only 7761 * when all timers have been stopped (destroyed PCB?). 7762 */ 7763 static int 7764 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7765 { 7766 /* 7767 * Tail Loss Probe. 7768 */ 7769 struct rack_sendmap *rsm = NULL; 7770 int insret __diagused; 7771 struct socket *so = tptosocket(tp); 7772 uint32_t amm; 7773 uint32_t out, avail; 7774 int collapsed_win = 0; 7775 7776 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7777 /* Its not time yet */ 7778 return (0); 7779 } 7780 if (ctf_progress_timeout_check(tp, true)) { 7781 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7782 return (-ETIMEDOUT); /* tcp_drop() */ 7783 } 7784 /* 7785 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7786 * need to figure out how to force a full MSS segment out. 7787 */ 7788 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7789 rack->r_ctl.retran_during_recovery = 0; 7790 rack->r_might_revert = 0; 7791 rack->r_ctl.dsack_byte_cnt = 0; 7792 counter_u64_add(rack_tlp_tot, 1); 7793 if (rack->r_state && (rack->r_state != tp->t_state)) 7794 rack_set_state(tp, rack); 7795 avail = sbavail(&so->so_snd); 7796 out = tp->snd_max - tp->snd_una; 7797 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7798 /* special case, we need a retransmission */ 7799 collapsed_win = 1; 7800 goto need_retran; 7801 } 7802 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7803 rack->r_ctl.dsack_persist--; 7804 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7805 rack->r_ctl.num_dsack = 0; 7806 } 7807 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7808 } 7809 if ((tp->t_flags & TF_GPUTINPROG) && 7810 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7811 /* 7812 * If this is the second in a row 7813 * TLP and we are doing a measurement 7814 * its time to abandon the measurement. 7815 * Something is likely broken on 7816 * the clients network and measuring a 7817 * broken network does us no good. 7818 */ 7819 tp->t_flags &= ~TF_GPUTINPROG; 7820 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7821 rack->r_ctl.rc_gp_srtt /*flex1*/, 7822 tp->gput_seq, 7823 0, 0, 18, __LINE__, NULL, 0); 7824 } 7825 /* 7826 * Check our send oldest always settings, and if 7827 * there is an oldest to send jump to the need_retran. 7828 */ 7829 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7830 goto need_retran; 7831 7832 if (avail > out) { 7833 /* New data is available */ 7834 amm = avail - out; 7835 if (amm > ctf_fixed_maxseg(tp)) { 7836 amm = ctf_fixed_maxseg(tp); 7837 if ((amm + out) > tp->snd_wnd) { 7838 /* We are rwnd limited */ 7839 goto need_retran; 7840 } 7841 } else if (amm < ctf_fixed_maxseg(tp)) { 7842 /* not enough to fill a MTU */ 7843 goto need_retran; 7844 } 7845 if (IN_FASTRECOVERY(tp->t_flags)) { 7846 /* Unlikely */ 7847 if (rack->rack_no_prr == 0) { 7848 if (out + amm <= tp->snd_wnd) { 7849 rack->r_ctl.rc_prr_sndcnt = amm; 7850 rack->r_ctl.rc_tlp_new_data = amm; 7851 rack_log_to_prr(rack, 4, 0, __LINE__); 7852 } 7853 } else 7854 goto need_retran; 7855 } else { 7856 /* Set the send-new override */ 7857 if (out + amm <= tp->snd_wnd) 7858 rack->r_ctl.rc_tlp_new_data = amm; 7859 else 7860 goto need_retran; 7861 } 7862 rack->r_ctl.rc_tlpsend = NULL; 7863 counter_u64_add(rack_tlp_newdata, 1); 7864 goto send; 7865 } 7866 need_retran: 7867 /* 7868 * Ok we need to arrange the last un-acked segment to be re-sent, or 7869 * optionally the first un-acked segment. 7870 */ 7871 if (collapsed_win == 0) { 7872 if (rack_always_send_oldest) 7873 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7874 else { 7875 rsm = tqhash_max(rack->r_ctl.tqh); 7876 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7877 rsm = rack_find_high_nonack(rack, rsm); 7878 } 7879 } 7880 if (rsm == NULL) { 7881 #ifdef TCP_BLACKBOX 7882 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7883 #endif 7884 goto out; 7885 } 7886 } else { 7887 /* 7888 * We had a collapsed window, lets find 7889 * the point before the collapse. 7890 */ 7891 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7892 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7893 else { 7894 rsm = tqhash_min(rack->r_ctl.tqh); 7895 } 7896 if (rsm == NULL) { 7897 /* Huh */ 7898 goto out; 7899 } 7900 } 7901 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7902 /* 7903 * We need to split this the last segment in two. 7904 */ 7905 struct rack_sendmap *nrsm; 7906 7907 nrsm = rack_alloc_full_limit(rack); 7908 if (nrsm == NULL) { 7909 /* 7910 * No memory to split, we will just exit and punt 7911 * off to the RXT timer. 7912 */ 7913 goto out; 7914 } 7915 rack_clone_rsm(rack, nrsm, rsm, 7916 (rsm->r_end - ctf_fixed_maxseg(tp))); 7917 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7918 #ifndef INVARIANTS 7919 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7920 #else 7921 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7922 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7923 nrsm, insret, rack, rsm); 7924 } 7925 #endif 7926 if (rsm->r_in_tmap) { 7927 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7928 nrsm->r_in_tmap = 1; 7929 } 7930 rsm = nrsm; 7931 } 7932 rack->r_ctl.rc_tlpsend = rsm; 7933 send: 7934 /* Make sure output path knows we are doing a TLP */ 7935 *doing_tlp = 1; 7936 rack->r_timer_override = 1; 7937 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7938 return (0); 7939 out: 7940 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7941 return (0); 7942 } 7943 7944 /* 7945 * Delayed ack Timer, here we simply need to setup the 7946 * ACK_NOW flag and remove the DELACK flag. From there 7947 * the output routine will send the ack out. 7948 * 7949 * We only return 1, saying don't proceed, if all timers 7950 * are stopped (destroyed PCB?). 7951 */ 7952 static int 7953 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7954 { 7955 7956 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7957 tp->t_flags &= ~TF_DELACK; 7958 tp->t_flags |= TF_ACKNOW; 7959 KMOD_TCPSTAT_INC(tcps_delack); 7960 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7961 return (0); 7962 } 7963 7964 static inline int 7965 rack_send_ack_challange(struct tcp_rack *rack) 7966 { 7967 struct tcptemp *t_template; 7968 7969 t_template = tcpip_maketemplate(rack->rc_inp); 7970 if (t_template) { 7971 if (rack->forced_ack == 0) { 7972 rack->forced_ack = 1; 7973 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7974 } else { 7975 rack->probe_not_answered = 1; 7976 } 7977 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 7978 &t_template->tt_t, (struct mbuf *)NULL, 7979 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 7980 free(t_template, M_TEMP); 7981 /* This does send an ack so kill any D-ack timer */ 7982 if (rack->rc_tp->t_flags & TF_DELACK) 7983 rack->rc_tp->t_flags &= ~TF_DELACK; 7984 return(1); 7985 } else 7986 return (0); 7987 7988 } 7989 7990 /* 7991 * Persists timer, here we simply send the 7992 * same thing as a keepalive will. 7993 * the one byte send. 7994 * 7995 * We only return 1, saying don't proceed, if all timers 7996 * are stopped (destroyed PCB?). 7997 */ 7998 static int 7999 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8000 { 8001 int32_t retval = 1; 8002 8003 if (rack->rc_in_persist == 0) 8004 return (0); 8005 if (ctf_progress_timeout_check(tp, false)) { 8006 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8007 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 8008 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8009 return (-ETIMEDOUT); /* tcp_drop() */ 8010 } 8011 /* 8012 * Persistence timer into zero window. Force a byte to be output, if 8013 * possible. 8014 */ 8015 KMOD_TCPSTAT_INC(tcps_persisttimeo); 8016 /* 8017 * Hack: if the peer is dead/unreachable, we do not time out if the 8018 * window is closed. After a full backoff, drop the connection if 8019 * the idle time (no responses to probes) reaches the maximum 8020 * backoff that we would use if retransmitting. 8021 */ 8022 if (tp->t_rxtshift >= V_tcp_retries && 8023 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 8024 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 8025 KMOD_TCPSTAT_INC(tcps_persistdrop); 8026 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8027 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8028 retval = -ETIMEDOUT; /* tcp_drop() */ 8029 goto out; 8030 } 8031 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 8032 tp->snd_una == tp->snd_max) 8033 rack_exit_persist(tp, rack, cts); 8034 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 8035 /* 8036 * If the user has closed the socket then drop a persisting 8037 * connection after a much reduced timeout. 8038 */ 8039 if (tp->t_state > TCPS_CLOSE_WAIT && 8040 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 8041 KMOD_TCPSTAT_INC(tcps_persistdrop); 8042 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8043 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8044 retval = -ETIMEDOUT; /* tcp_drop() */ 8045 goto out; 8046 } 8047 if (rack_send_ack_challange(rack)) { 8048 /* only set it if we were answered */ 8049 if (rack->probe_not_answered) { 8050 counter_u64_add(rack_persists_loss, 1); 8051 rack->r_ctl.persist_lost_ends++; 8052 } 8053 counter_u64_add(rack_persists_sends, 1); 8054 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 8055 } 8056 if (tp->t_rxtshift < V_tcp_retries) 8057 tp->t_rxtshift++; 8058 out: 8059 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 8060 rack_start_hpts_timer(rack, tp, cts, 8061 0, 0, 0); 8062 return (retval); 8063 } 8064 8065 /* 8066 * If a keepalive goes off, we had no other timers 8067 * happening. We always return 1 here since this 8068 * routine either drops the connection or sends 8069 * out a segment with respond. 8070 */ 8071 static int 8072 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8073 { 8074 struct inpcb *inp = tptoinpcb(tp); 8075 8076 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 8077 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 8078 /* 8079 * Keep-alive timer went off; send something or drop connection if 8080 * idle for too long. 8081 */ 8082 KMOD_TCPSTAT_INC(tcps_keeptimeo); 8083 if (tp->t_state < TCPS_ESTABLISHED) 8084 goto dropit; 8085 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 8086 tp->t_state <= TCPS_CLOSING) { 8087 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 8088 goto dropit; 8089 /* 8090 * Send a packet designed to force a response if the peer is 8091 * up and reachable: either an ACK if the connection is 8092 * still alive, or an RST if the peer has closed the 8093 * connection due to timeout or reboot. Using sequence 8094 * number tp->snd_una-1 causes the transmitted zero-length 8095 * segment to lie outside the receive window; by the 8096 * protocol spec, this requires the correspondent TCP to 8097 * respond. 8098 */ 8099 KMOD_TCPSTAT_INC(tcps_keepprobe); 8100 rack_send_ack_challange(rack); 8101 } 8102 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 8103 return (1); 8104 dropit: 8105 KMOD_TCPSTAT_INC(tcps_keepdrops); 8106 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 8107 return (-ETIMEDOUT); /* tcp_drop() */ 8108 } 8109 8110 /* 8111 * Retransmit helper function, clear up all the ack 8112 * flags and take care of important book keeping. 8113 */ 8114 static void 8115 rack_remxt_tmr(struct tcpcb *tp) 8116 { 8117 /* 8118 * The retransmit timer went off, all sack'd blocks must be 8119 * un-acked. 8120 */ 8121 struct rack_sendmap *rsm, *trsm = NULL; 8122 struct tcp_rack *rack; 8123 8124 rack = (struct tcp_rack *)tp->t_fb_ptr; 8125 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 8126 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 8127 rack->r_timer_override = 1; 8128 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 8129 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 8130 rack->r_late = 0; 8131 rack->r_early = 0; 8132 rack->r_ctl.rc_agg_delayed = 0; 8133 rack->r_ctl.rc_agg_early = 0; 8134 if (rack->r_state && (rack->r_state != tp->t_state)) 8135 rack_set_state(tp, rack); 8136 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 8137 /* 8138 * We do not clear the scoreboard until we have had 8139 * more than rack_rxt_scoreboard_clear_thresh time-outs. 8140 */ 8141 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 8142 if (rack->r_ctl.rc_resend != NULL) 8143 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8144 8145 return; 8146 } 8147 /* 8148 * Ideally we would like to be able to 8149 * mark SACK-PASS on anything not acked here. 8150 * 8151 * However, if we do that we would burst out 8152 * all that data 1ms apart. This would be unwise, 8153 * so for now we will just let the normal rxt timer 8154 * and tlp timer take care of it. 8155 * 8156 * Also we really need to stick them back in sequence 8157 * order. This way we send in the proper order and any 8158 * sacks that come floating in will "re-ack" the data. 8159 * To do this we zap the tmap with an INIT and then 8160 * walk through and place every rsm in the tail queue 8161 * hash table back in its seq ordered place. 8162 */ 8163 TAILQ_INIT(&rack->r_ctl.rc_tmap); 8164 8165 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8166 rsm->r_dupack = 0; 8167 if (rack_verbose_logging) 8168 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8169 /* We must re-add it back to the tlist */ 8170 if (trsm == NULL) { 8171 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8172 } else { 8173 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 8174 } 8175 rsm->r_in_tmap = 1; 8176 trsm = rsm; 8177 if (rsm->r_flags & RACK_ACKED) 8178 rsm->r_flags |= RACK_WAS_ACKED; 8179 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 8180 rsm->r_flags |= RACK_MUST_RXT; 8181 } 8182 /* zero the lost since it's all gone */ 8183 rack->r_ctl.rc_considered_lost = 0; 8184 /* Clear the count (we just un-acked them) */ 8185 rack->r_ctl.rc_sacked = 0; 8186 rack->r_ctl.rc_sacklast = NULL; 8187 /* Clear the tlp rtx mark */ 8188 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 8189 if (rack->r_ctl.rc_resend != NULL) 8190 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8191 rack->r_ctl.rc_prr_sndcnt = 0; 8192 rack_log_to_prr(rack, 6, 0, __LINE__); 8193 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 8194 if (rack->r_ctl.rc_resend != NULL) 8195 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8196 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && 8197 ((tp->t_flags & TF_SENTFIN) == 0)) { 8198 /* 8199 * For non-sack customers new data 8200 * needs to go out as retransmits until 8201 * we retransmit up to snd_max. 8202 */ 8203 rack->r_must_retran = 1; 8204 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 8205 rack->r_ctl.rc_sacked); 8206 } 8207 } 8208 8209 static void 8210 rack_convert_rtts(struct tcpcb *tp) 8211 { 8212 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 8213 tp->t_rxtcur = RACK_REXMTVAL(tp); 8214 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 8215 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 8216 } 8217 if (tp->t_rxtcur > rack_rto_max) { 8218 tp->t_rxtcur = rack_rto_max; 8219 } 8220 } 8221 8222 static void 8223 rack_cc_conn_init(struct tcpcb *tp) 8224 { 8225 struct tcp_rack *rack; 8226 uint32_t srtt; 8227 8228 rack = (struct tcp_rack *)tp->t_fb_ptr; 8229 srtt = tp->t_srtt; 8230 cc_conn_init(tp); 8231 /* 8232 * Now convert to rack's internal format, 8233 * if required. 8234 */ 8235 if ((srtt == 0) && (tp->t_srtt != 0)) 8236 rack_convert_rtts(tp); 8237 /* 8238 * We want a chance to stay in slowstart as 8239 * we create a connection. TCP spec says that 8240 * initially ssthresh is infinite. For our 8241 * purposes that is the snd_wnd. 8242 */ 8243 if (tp->snd_ssthresh < tp->snd_wnd) { 8244 tp->snd_ssthresh = tp->snd_wnd; 8245 } 8246 /* 8247 * We also want to assure a IW worth of 8248 * data can get inflight. 8249 */ 8250 if (rc_init_window(rack) < tp->snd_cwnd) 8251 tp->snd_cwnd = rc_init_window(rack); 8252 } 8253 8254 /* 8255 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 8256 * we will setup to retransmit the lowest seq number outstanding. 8257 */ 8258 static int 8259 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8260 { 8261 struct inpcb *inp = tptoinpcb(tp); 8262 int32_t rexmt; 8263 int32_t retval = 0; 8264 bool isipv6; 8265 8266 if ((tp->t_flags & TF_GPUTINPROG) && 8267 (tp->t_rxtshift)) { 8268 /* 8269 * We have had a second timeout 8270 * measurements on successive rxt's are not profitable. 8271 * It is unlikely to be of any use (the network is 8272 * broken or the client went away). 8273 */ 8274 tp->t_flags &= ~TF_GPUTINPROG; 8275 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8276 rack->r_ctl.rc_gp_srtt /*flex1*/, 8277 tp->gput_seq, 8278 0, 0, 18, __LINE__, NULL, 0); 8279 } 8280 if (ctf_progress_timeout_check(tp, false)) { 8281 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 8282 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 8283 return (-ETIMEDOUT); /* tcp_drop() */ 8284 } 8285 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 8286 rack->r_ctl.retran_during_recovery = 0; 8287 rack->rc_ack_required = 1; 8288 rack->r_ctl.dsack_byte_cnt = 0; 8289 if (IN_RECOVERY(tp->t_flags) && 8290 (rack->rto_from_rec == 0)) { 8291 /* 8292 * Mark that we had a rto while in recovery 8293 * and save the ssthresh so if we go back 8294 * into recovery we will have a chance 8295 * to slowstart back to the level. 8296 */ 8297 rack->rto_from_rec = 1; 8298 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 8299 } 8300 if (IN_FASTRECOVERY(tp->t_flags)) 8301 tp->t_flags |= TF_WASFRECOVERY; 8302 else 8303 tp->t_flags &= ~TF_WASFRECOVERY; 8304 if (IN_CONGRECOVERY(tp->t_flags)) 8305 tp->t_flags |= TF_WASCRECOVERY; 8306 else 8307 tp->t_flags &= ~TF_WASCRECOVERY; 8308 if (TCPS_HAVEESTABLISHED(tp->t_state) && 8309 (tp->snd_una == tp->snd_max)) { 8310 /* Nothing outstanding .. nothing to do */ 8311 return (0); 8312 } 8313 if (rack->r_ctl.dsack_persist) { 8314 rack->r_ctl.dsack_persist--; 8315 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 8316 rack->r_ctl.num_dsack = 0; 8317 } 8318 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 8319 } 8320 /* 8321 * Rack can only run one timer at a time, so we cannot 8322 * run a KEEPINIT (gating SYN sending) and a retransmit 8323 * timer for the SYN. So if we are in a front state and 8324 * have a KEEPINIT timer we need to check the first transmit 8325 * against now to see if we have exceeded the KEEPINIT time 8326 * (if one is set). 8327 */ 8328 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 8329 (TP_KEEPINIT(tp) != 0)) { 8330 struct rack_sendmap *rsm; 8331 8332 rsm = tqhash_min(rack->r_ctl.tqh); 8333 if (rsm) { 8334 /* Ok we have something outstanding to test keepinit with */ 8335 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 8336 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 8337 /* We have exceeded the KEEPINIT time */ 8338 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 8339 goto drop_it; 8340 } 8341 } 8342 } 8343 /* 8344 * Retransmission timer went off. Message has not been acked within 8345 * retransmit interval. Back off to a longer retransmit interval 8346 * and retransmit one segment. 8347 */ 8348 if ((rack->r_ctl.rc_resend == NULL) || 8349 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 8350 /* 8351 * If the rwnd collapsed on 8352 * the one we are retransmitting 8353 * it does not count against the 8354 * rxt count. 8355 */ 8356 tp->t_rxtshift++; 8357 } 8358 rack_remxt_tmr(tp); 8359 if (tp->t_rxtshift > V_tcp_retries) { 8360 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 8361 drop_it: 8362 tp->t_rxtshift = V_tcp_retries; 8363 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 8364 /* XXXGL: previously t_softerror was casted to uint16_t */ 8365 MPASS(tp->t_softerror >= 0); 8366 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 8367 goto out; /* tcp_drop() */ 8368 } 8369 if (tp->t_state == TCPS_SYN_SENT) { 8370 /* 8371 * If the SYN was retransmitted, indicate CWND to be limited 8372 * to 1 segment in cc_conn_init(). 8373 */ 8374 tp->snd_cwnd = 1; 8375 } else if (tp->t_rxtshift == 1) { 8376 /* 8377 * first retransmit; record ssthresh and cwnd so they can be 8378 * recovered if this turns out to be a "bad" retransmit. A 8379 * retransmit is considered "bad" if an ACK for this segment 8380 * is received within RTT/2 interval; the assumption here is 8381 * that the ACK was already in flight. See "On Estimating 8382 * End-to-End Network Path Properties" by Allman and Paxson 8383 * for more details. 8384 */ 8385 tp->snd_cwnd_prev = tp->snd_cwnd; 8386 tp->snd_ssthresh_prev = tp->snd_ssthresh; 8387 tp->snd_recover_prev = tp->snd_recover; 8388 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 8389 tp->t_flags |= TF_PREVVALID; 8390 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 8391 tp->t_flags &= ~TF_PREVVALID; 8392 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 8393 if ((tp->t_state == TCPS_SYN_SENT) || 8394 (tp->t_state == TCPS_SYN_RECEIVED)) 8395 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 8396 else 8397 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 8398 8399 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 8400 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 8401 /* 8402 * We enter the path for PLMTUD if connection is established or, if 8403 * connection is FIN_WAIT_1 status, reason for the last is that if 8404 * amount of data we send is very small, we could send it in couple 8405 * of packets and process straight to FIN. In that case we won't 8406 * catch ESTABLISHED state. 8407 */ 8408 #ifdef INET6 8409 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 8410 #else 8411 isipv6 = false; 8412 #endif 8413 if (((V_tcp_pmtud_blackhole_detect == 1) || 8414 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 8415 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 8416 ((tp->t_state == TCPS_ESTABLISHED) || 8417 (tp->t_state == TCPS_FIN_WAIT_1))) { 8418 /* 8419 * Idea here is that at each stage of mtu probe (usually, 8420 * 1448 -> 1188 -> 524) should be given 2 chances to recover 8421 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 8422 * should take care of that. 8423 */ 8424 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 8425 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 8426 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 8427 tp->t_rxtshift % 2 == 0)) { 8428 /* 8429 * Enter Path MTU Black-hole Detection mechanism: - 8430 * Disable Path MTU Discovery (IP "DF" bit). - 8431 * Reduce MTU to lower value than what we negotiated 8432 * with peer. 8433 */ 8434 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 8435 /* Record that we may have found a black hole. */ 8436 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 8437 /* Keep track of previous MSS. */ 8438 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 8439 } 8440 8441 /* 8442 * Reduce the MSS to blackhole value or to the 8443 * default in an attempt to retransmit. 8444 */ 8445 #ifdef INET6 8446 if (isipv6 && 8447 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 8448 /* Use the sysctl tuneable blackhole MSS. */ 8449 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 8450 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 8451 } else if (isipv6) { 8452 /* Use the default MSS. */ 8453 tp->t_maxseg = V_tcp_v6mssdflt; 8454 /* 8455 * Disable Path MTU Discovery when we switch 8456 * to minmss. 8457 */ 8458 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8459 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8460 } 8461 #endif 8462 #if defined(INET6) && defined(INET) 8463 else 8464 #endif 8465 #ifdef INET 8466 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 8467 /* Use the sysctl tuneable blackhole MSS. */ 8468 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 8469 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 8470 } else { 8471 /* Use the default MSS. */ 8472 tp->t_maxseg = V_tcp_mssdflt; 8473 /* 8474 * Disable Path MTU Discovery when we switch 8475 * to minmss. 8476 */ 8477 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8478 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8479 } 8480 #endif 8481 } else { 8482 /* 8483 * If further retransmissions are still unsuccessful 8484 * with a lowered MTU, maybe this isn't a blackhole 8485 * and we restore the previous MSS and blackhole 8486 * detection flags. The limit '6' is determined by 8487 * giving each probe stage (1448, 1188, 524) 2 8488 * chances to recover. 8489 */ 8490 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 8491 (tp->t_rxtshift >= 6)) { 8492 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 8493 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 8494 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 8495 if (tp->t_maxseg < V_tcp_mssdflt) { 8496 /* 8497 * The MSS is so small we should not 8498 * process incoming SACK's since we are 8499 * subject to attack in such a case. 8500 */ 8501 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 8502 } else { 8503 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 8504 } 8505 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 8506 } 8507 } 8508 } 8509 /* 8510 * Disable RFC1323 and SACK if we haven't got any response to 8511 * our third SYN to work-around some broken terminal servers 8512 * (most of which have hopefully been retired) that have bad VJ 8513 * header compression code which trashes TCP segments containing 8514 * unknown-to-them TCP options. 8515 */ 8516 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 8517 (tp->t_rxtshift == 3)) 8518 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 8519 /* 8520 * If we backed off this far, our srtt estimate is probably bogus. 8521 * Clobber it so we'll take the next rtt measurement as our srtt; 8522 * move the current srtt into rttvar to keep the current retransmit 8523 * times until then. 8524 */ 8525 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 8526 #ifdef INET6 8527 if ((inp->inp_vflag & INP_IPV6) != 0) 8528 in6_losing(inp); 8529 else 8530 #endif 8531 in_losing(inp); 8532 tp->t_rttvar += tp->t_srtt; 8533 tp->t_srtt = 0; 8534 } 8535 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 8536 tp->snd_recover = tp->snd_max; 8537 tp->t_flags |= TF_ACKNOW; 8538 tp->t_rtttime = 0; 8539 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 8540 out: 8541 return (retval); 8542 } 8543 8544 static int 8545 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 8546 { 8547 int32_t ret = 0; 8548 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 8549 8550 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 8551 (tp->t_flags & TF_GPUTINPROG)) { 8552 /* 8553 * We have a goodput in progress 8554 * and we have entered a late state. 8555 * Do we have enough data in the sb 8556 * to handle the GPUT request? 8557 */ 8558 uint32_t bytes; 8559 8560 bytes = tp->gput_ack - tp->gput_seq; 8561 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 8562 bytes += tp->gput_seq - tp->snd_una; 8563 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 8564 /* 8565 * There are not enough bytes in the socket 8566 * buffer that have been sent to cover this 8567 * measurement. Cancel it. 8568 */ 8569 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8570 rack->r_ctl.rc_gp_srtt /*flex1*/, 8571 tp->gput_seq, 8572 0, 0, 18, __LINE__, NULL, 0); 8573 tp->t_flags &= ~TF_GPUTINPROG; 8574 } 8575 } 8576 if (timers == 0) { 8577 return (0); 8578 } 8579 if (tp->t_state == TCPS_LISTEN) { 8580 /* no timers on listen sockets */ 8581 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 8582 return (0); 8583 return (1); 8584 } 8585 if ((timers & PACE_TMR_RACK) && 8586 rack->rc_on_min_to) { 8587 /* 8588 * For the rack timer when we 8589 * are on a min-timeout (which means rrr_conf = 3) 8590 * we don't want to check the timer. It may 8591 * be going off for a pace and thats ok we 8592 * want to send the retransmit (if its ready). 8593 * 8594 * If its on a normal rack timer (non-min) then 8595 * we will check if its expired. 8596 */ 8597 goto skip_time_check; 8598 } 8599 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 8600 uint32_t left; 8601 8602 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8603 ret = -1; 8604 rack_log_to_processing(rack, cts, ret, 0); 8605 return (0); 8606 } 8607 if (hpts_calling == 0) { 8608 /* 8609 * A user send or queued mbuf (sack) has called us? We 8610 * return 0 and let the pacing guards 8611 * deal with it if they should or 8612 * should not cause a send. 8613 */ 8614 ret = -2; 8615 rack_log_to_processing(rack, cts, ret, 0); 8616 return (0); 8617 } 8618 /* 8619 * Ok our timer went off early and we are not paced false 8620 * alarm, go back to sleep. We make sure we don't have 8621 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8622 * flag in place. 8623 */ 8624 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8625 ret = -3; 8626 left = rack->r_ctl.rc_timer_exp - cts; 8627 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8628 rack_log_to_processing(rack, cts, ret, left); 8629 return (1); 8630 } 8631 skip_time_check: 8632 rack->rc_tmr_stopped = 0; 8633 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8634 if (timers & PACE_TMR_DELACK) { 8635 ret = rack_timeout_delack(tp, rack, cts); 8636 } else if (timers & PACE_TMR_RACK) { 8637 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8638 rack->r_fast_output = 0; 8639 ret = rack_timeout_rack(tp, rack, cts); 8640 } else if (timers & PACE_TMR_TLP) { 8641 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8642 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8643 } else if (timers & PACE_TMR_RXT) { 8644 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8645 rack->r_fast_output = 0; 8646 ret = rack_timeout_rxt(tp, rack, cts); 8647 } else if (timers & PACE_TMR_PERSIT) { 8648 ret = rack_timeout_persist(tp, rack, cts); 8649 } else if (timers & PACE_TMR_KEEP) { 8650 ret = rack_timeout_keepalive(tp, rack, cts); 8651 } 8652 rack_log_to_processing(rack, cts, ret, timers); 8653 return (ret); 8654 } 8655 8656 static void 8657 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8658 { 8659 struct timeval tv; 8660 uint32_t us_cts, flags_on_entry; 8661 uint8_t hpts_removed = 0; 8662 8663 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8664 us_cts = tcp_get_usecs(&tv); 8665 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8666 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8667 ((tp->snd_max - tp->snd_una) == 0))) { 8668 tcp_hpts_remove(rack->rc_tp); 8669 hpts_removed = 1; 8670 /* If we were not delayed cancel out the flag. */ 8671 if ((tp->snd_max - tp->snd_una) == 0) 8672 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8673 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8674 } 8675 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8676 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8677 if (tcp_in_hpts(rack->rc_tp) && 8678 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8679 /* 8680 * Canceling timer's when we have no output being 8681 * paced. We also must remove ourselves from the 8682 * hpts. 8683 */ 8684 tcp_hpts_remove(rack->rc_tp); 8685 hpts_removed = 1; 8686 } 8687 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8688 } 8689 if (hpts_removed == 0) 8690 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8691 } 8692 8693 static int 8694 rack_stopall(struct tcpcb *tp) 8695 { 8696 struct tcp_rack *rack; 8697 8698 rack = (struct tcp_rack *)tp->t_fb_ptr; 8699 rack->t_timers_stopped = 1; 8700 8701 tcp_hpts_remove(tp); 8702 8703 return (0); 8704 } 8705 8706 static void 8707 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8708 { 8709 /* 8710 * Assure no timers are running. 8711 */ 8712 if (tcp_timer_active(tp, TT_PERSIST)) { 8713 /* We enter in persists, set the flag appropriately */ 8714 rack->rc_in_persist = 1; 8715 } 8716 if (tcp_in_hpts(rack->rc_tp)) { 8717 tcp_hpts_remove(rack->rc_tp); 8718 } 8719 } 8720 8721 /* 8722 * We maintain an array fo 16 (RETRAN_CNT_SIZE) entries. This 8723 * array is zeroed at the start of recovery. Each time a segment 8724 * is retransmitted, we translate that into a number of packets 8725 * (based on segsiz) and based on how many times its been retransmitted 8726 * increment by the number of packets the counter that represents 8727 * retansmitted N times. Index 0 is retransmitted 1 time, index 1 8728 * is retransmitted 2 times etc. 8729 * 8730 * So for example when we send a 4344 byte transmission with a 1448 8731 * byte segsize, and its the third time we have retransmitted this 8732 * segment, we would add to the rc_cnt_of_retran[2] the value of 8733 * 3. That represents 3 MSS were retransmitted 3 times (index is 8734 * the number of times retranmitted minus 1). 8735 */ 8736 static void 8737 rack_peg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz) 8738 { 8739 int idx; 8740 uint32_t peg; 8741 8742 peg = ((rsm->r_end - rsm->r_start) + segsiz) - 1; 8743 peg /= segsiz; 8744 idx = rsm->r_act_rxt_cnt - 1; 8745 if (idx >= RETRAN_CNT_SIZE) 8746 idx = RETRAN_CNT_SIZE - 1; 8747 /* Max of a uint16_t retransmits in a bucket */ 8748 if ((rack->r_ctl.rc_cnt_of_retran[idx] + peg) < 0xffff) 8749 rack->r_ctl.rc_cnt_of_retran[idx] += peg; 8750 else 8751 rack->r_ctl.rc_cnt_of_retran[idx] = 0xffff; 8752 } 8753 8754 /* 8755 * We maintain an array fo 16 (RETRAN_CNT_SIZE) entries. This 8756 * array is zeroed at the start of recovery. Each time a segment 8757 * is retransmitted, we translate that into a number of packets 8758 * (based on segsiz) and based on how many times its been retransmitted 8759 * increment by the number of packets the counter that represents 8760 * retansmitted N times. Index 0 is retransmitted 1 time, index 1 8761 * is retransmitted 2 times etc. 8762 * 8763 * The rack_unpeg_rxt is used when we go to retransmit a segment 8764 * again. Basically if the segment had previously been retransmitted 8765 * say 3 times (as our previous example illustrated in the comment 8766 * above rack_peg_rxt() prior to calling that and incrementing 8767 * r_ack_rxt_cnt we would have called rack_unpeg_rxt() that would 8768 * subtract back the previous add from its last rxt (in this 8769 * example r_act_cnt would have been 2 for 2 retransmissions. So 8770 * we would have subtracted 3 from rc_cnt_of_reetran[1] to remove 8771 * those 3 segments. You will see this in the rack_update_rsm() 8772 * below where we do: 8773 * if (rsm->r_act_rxt_cnt > 0) { 8774 * rack_unpeg_rxt(rack, rsm, segsiz); 8775 * } 8776 * rsm->r_act_rxt_cnt++; 8777 * rack_peg_rxt(rack, rsm, segsiz); 8778 * 8779 * This effectively moves the count from rc_cnt_of_retran[1] to 8780 * rc_cnt_of_retran[2]. 8781 */ 8782 static void 8783 rack_unpeg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz) 8784 { 8785 int idx; 8786 uint32_t peg; 8787 8788 idx = rsm->r_act_rxt_cnt - 1; 8789 if (idx >= RETRAN_CNT_SIZE) 8790 idx = RETRAN_CNT_SIZE - 1; 8791 peg = ((rsm->r_end - rsm->r_start) + segsiz) - 1; 8792 peg /= segsiz; 8793 if (peg < rack->r_ctl.rc_cnt_of_retran[idx]) 8794 rack->r_ctl.rc_cnt_of_retran[idx] -= peg; 8795 else { 8796 /* TSNH */ 8797 rack->r_ctl.rc_cnt_of_retran[idx] = 0; 8798 } 8799 } 8800 8801 static void 8802 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8803 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8804 { 8805 int32_t idx; 8806 8807 rsm->r_rtr_cnt++; 8808 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8809 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8810 rsm->r_flags |= RACK_OVERMAX; 8811 } 8812 if (rsm->r_act_rxt_cnt > 0) { 8813 /* Drop the count back for this, its retransmitting again */ 8814 rack_unpeg_rxt(rack, rsm, segsiz); 8815 } 8816 rsm->r_act_rxt_cnt++; 8817 /* Peg the count/index */ 8818 rack_peg_rxt(rack, rsm, segsiz); 8819 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8820 rsm->r_dupack = 0; 8821 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8822 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8823 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8824 } 8825 if (rsm->r_flags & RACK_WAS_LOST) { 8826 /* 8827 * We retransmitted it putting it back in flight 8828 * remove the lost desgination and reduce the 8829 * bytes considered lost. 8830 */ 8831 rsm->r_flags &= ~RACK_WAS_LOST; 8832 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 8833 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 8834 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 8835 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 8836 else 8837 rack->r_ctl.rc_considered_lost = 0; 8838 } 8839 idx = rsm->r_rtr_cnt - 1; 8840 rsm->r_tim_lastsent[idx] = ts; 8841 /* 8842 * Here we don't add in the len of send, since its already 8843 * in snduna <->snd_max. 8844 */ 8845 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8846 rack->r_ctl.rc_sacked); 8847 if (rsm->r_flags & RACK_ACKED) { 8848 /* Problably MTU discovery messing with us */ 8849 rsm->r_flags &= ~RACK_ACKED; 8850 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8851 } 8852 if (rsm->r_in_tmap) { 8853 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8854 rsm->r_in_tmap = 0; 8855 } 8856 /* Lets make sure it really is in or not the GP window */ 8857 rack_mark_in_gp_win(tp, rsm); 8858 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8859 rsm->r_in_tmap = 1; 8860 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8861 /* Take off the must retransmit flag, if its on */ 8862 if (rsm->r_flags & RACK_MUST_RXT) { 8863 if (rack->r_must_retran) 8864 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8865 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8866 /* 8867 * We have retransmitted all we need. Clear 8868 * any must retransmit flags. 8869 */ 8870 rack->r_must_retran = 0; 8871 rack->r_ctl.rc_out_at_rto = 0; 8872 } 8873 rsm->r_flags &= ~RACK_MUST_RXT; 8874 } 8875 /* Remove any collapsed flag */ 8876 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8877 if (rsm->r_flags & RACK_SACK_PASSED) { 8878 /* We have retransmitted due to the SACK pass */ 8879 rsm->r_flags &= ~RACK_SACK_PASSED; 8880 rsm->r_flags |= RACK_WAS_SACKPASS; 8881 } 8882 } 8883 8884 static uint32_t 8885 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8886 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8887 { 8888 /* 8889 * We (re-)transmitted starting at rsm->r_start for some length 8890 * (possibly less than r_end. 8891 */ 8892 struct rack_sendmap *nrsm; 8893 int insret __diagused; 8894 uint32_t c_end; 8895 int32_t len; 8896 8897 len = *lenp; 8898 c_end = rsm->r_start + len; 8899 if (SEQ_GEQ(c_end, rsm->r_end)) { 8900 /* 8901 * We retransmitted the whole piece or more than the whole 8902 * slopping into the next rsm. 8903 */ 8904 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8905 if (c_end == rsm->r_end) { 8906 *lenp = 0; 8907 return (0); 8908 } else { 8909 int32_t act_len; 8910 8911 /* Hangs over the end return whats left */ 8912 act_len = rsm->r_end - rsm->r_start; 8913 *lenp = (len - act_len); 8914 return (rsm->r_end); 8915 } 8916 /* We don't get out of this block. */ 8917 } 8918 /* 8919 * Here we retransmitted less than the whole thing which means we 8920 * have to split this into what was transmitted and what was not. 8921 */ 8922 nrsm = rack_alloc_full_limit(rack); 8923 if (nrsm == NULL) { 8924 /* 8925 * We can't get memory, so lets not proceed. 8926 */ 8927 *lenp = 0; 8928 return (0); 8929 } 8930 /* 8931 * So here we are going to take the original rsm and make it what we 8932 * retransmitted. nrsm will be the tail portion we did not 8933 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8934 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8935 * 1, 6 and the new piece will be 6, 11. 8936 */ 8937 rack_clone_rsm(rack, nrsm, rsm, c_end); 8938 nrsm->r_dupack = 0; 8939 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8940 #ifndef INVARIANTS 8941 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8942 #else 8943 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8944 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8945 nrsm, insret, rack, rsm); 8946 } 8947 #endif 8948 if (rsm->r_in_tmap) { 8949 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8950 nrsm->r_in_tmap = 1; 8951 } 8952 rsm->r_flags &= (~RACK_HAS_FIN); 8953 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8954 /* Log a split of rsm into rsm and nrsm */ 8955 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8956 *lenp = 0; 8957 return (0); 8958 } 8959 8960 static void 8961 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8962 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8963 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 8964 uint32_t s_moff, int hw_tls, int segsiz) 8965 { 8966 struct tcp_rack *rack; 8967 struct rack_sendmap *rsm, *nrsm; 8968 int insret __diagused; 8969 8970 register uint32_t snd_max, snd_una; 8971 8972 /* 8973 * Add to the RACK log of packets in flight or retransmitted. If 8974 * there is a TS option we will use the TS echoed, if not we will 8975 * grab a TS. 8976 * 8977 * Retransmissions will increment the count and move the ts to its 8978 * proper place. Note that if options do not include TS's then we 8979 * won't be able to effectively use the ACK for an RTT on a retran. 8980 * 8981 * Notes about r_start and r_end. Lets consider a send starting at 8982 * sequence 1 for 10 bytes. In such an example the r_start would be 8983 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8984 * This means that r_end is actually the first sequence for the next 8985 * slot (11). 8986 * 8987 */ 8988 /* 8989 * If err is set what do we do XXXrrs? should we not add the thing? 8990 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8991 * i.e. proceed with add ** do this for now. 8992 */ 8993 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8994 if (err) 8995 /* 8996 * We don't log errors -- we could but snd_max does not 8997 * advance in this case either. 8998 */ 8999 return; 9000 9001 if (th_flags & TH_RST) { 9002 /* 9003 * We don't log resets and we return immediately from 9004 * sending 9005 */ 9006 return; 9007 } 9008 rack = (struct tcp_rack *)tp->t_fb_ptr; 9009 snd_una = tp->snd_una; 9010 snd_max = tp->snd_max; 9011 if (th_flags & (TH_SYN | TH_FIN)) { 9012 /* 9013 * The call to rack_log_output is made before bumping 9014 * snd_max. This means we can record one extra byte on a SYN 9015 * or FIN if seq_out is adding more on and a FIN is present 9016 * (and we are not resending). 9017 */ 9018 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 9019 len++; 9020 if (th_flags & TH_FIN) 9021 len++; 9022 } 9023 if (SEQ_LEQ((seq_out + len), snd_una)) { 9024 /* Are sending an old segment to induce an ack (keep-alive)? */ 9025 return; 9026 } 9027 if (SEQ_LT(seq_out, snd_una)) { 9028 /* huh? should we panic? */ 9029 uint32_t end; 9030 9031 end = seq_out + len; 9032 seq_out = snd_una; 9033 if (SEQ_GEQ(end, seq_out)) 9034 len = end - seq_out; 9035 else 9036 len = 0; 9037 } 9038 if (len == 0) { 9039 /* We don't log zero window probes */ 9040 return; 9041 } 9042 if (IN_FASTRECOVERY(tp->t_flags)) { 9043 rack->r_ctl.rc_prr_out += len; 9044 } 9045 /* First question is it a retransmission or new? */ 9046 if (seq_out == snd_max) { 9047 /* Its new */ 9048 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 9049 again: 9050 rsm = rack_alloc(rack); 9051 if (rsm == NULL) { 9052 /* 9053 * Hmm out of memory and the tcb got destroyed while 9054 * we tried to wait. 9055 */ 9056 return; 9057 } 9058 if (th_flags & TH_FIN) { 9059 rsm->r_flags = RACK_HAS_FIN|add_flag; 9060 } else { 9061 rsm->r_flags = add_flag; 9062 } 9063 if (hw_tls) 9064 rsm->r_hw_tls = 1; 9065 rsm->r_tim_lastsent[0] = cts; 9066 rsm->r_rtr_cnt = 1; 9067 rsm->r_act_rxt_cnt = 0; 9068 rsm->r_rtr_bytes = 0; 9069 if (th_flags & TH_SYN) { 9070 /* The data space is one beyond snd_una */ 9071 rsm->r_flags |= RACK_HAS_SYN; 9072 } 9073 rsm->r_start = seq_out; 9074 rsm->r_end = rsm->r_start + len; 9075 rack_mark_in_gp_win(tp, rsm); 9076 rsm->r_dupack = 0; 9077 /* 9078 * save off the mbuf location that 9079 * sndmbuf_noadv returned (which is 9080 * where we started copying from).. 9081 */ 9082 rsm->m = s_mb; 9083 rsm->soff = s_moff; 9084 /* 9085 * Here we do add in the len of send, since its not yet 9086 * reflected in in snduna <->snd_max 9087 */ 9088 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 9089 rack->r_ctl.rc_sacked) + 9090 (rsm->r_end - rsm->r_start)); 9091 if ((rack->rc_initial_ss_comp == 0) && 9092 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 9093 rack->r_ctl.ss_hi_fs = rsm->r_fas; 9094 } 9095 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 9096 if (rsm->m) { 9097 if (rsm->m->m_len <= rsm->soff) { 9098 /* 9099 * XXXrrs Question, will this happen? 9100 * 9101 * If sbsndptr is set at the correct place 9102 * then s_moff should always be somewhere 9103 * within rsm->m. But if the sbsndptr was 9104 * off then that won't be true. If it occurs 9105 * we need to walkout to the correct location. 9106 */ 9107 struct mbuf *lm; 9108 9109 lm = rsm->m; 9110 while (lm->m_len <= rsm->soff) { 9111 rsm->soff -= lm->m_len; 9112 lm = lm->m_next; 9113 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 9114 __func__, rack, s_moff, s_mb, rsm->soff)); 9115 } 9116 rsm->m = lm; 9117 } 9118 rsm->orig_m_len = rsm->m->m_len; 9119 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 9120 } else { 9121 rsm->orig_m_len = 0; 9122 rsm->orig_t_space = 0; 9123 } 9124 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 9125 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9126 /* Log a new rsm */ 9127 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 9128 #ifndef INVARIANTS 9129 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 9130 #else 9131 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 9132 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9133 nrsm, insret, rack, rsm); 9134 } 9135 #endif 9136 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9137 rsm->r_in_tmap = 1; 9138 if (rsm->r_flags & RACK_IS_PCM) { 9139 rack->r_ctl.pcm_i.send_time = cts; 9140 rack->r_ctl.pcm_i.eseq = rsm->r_end; 9141 /* First time through we set the start too */ 9142 if (rack->pcm_in_progress == 0) 9143 rack->r_ctl.pcm_i.sseq = rsm->r_start; 9144 } 9145 /* 9146 * Special case detection, is there just a single 9147 * packet outstanding when we are not in recovery? 9148 * 9149 * If this is true mark it so. 9150 */ 9151 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9152 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 9153 struct rack_sendmap *prsm; 9154 9155 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 9156 if (prsm) 9157 prsm->r_one_out_nr = 1; 9158 } 9159 return; 9160 } 9161 /* 9162 * If we reach here its a retransmission and we need to find it. 9163 */ 9164 more: 9165 if (hintrsm && (hintrsm->r_start == seq_out)) { 9166 rsm = hintrsm; 9167 hintrsm = NULL; 9168 } else { 9169 /* No hints sorry */ 9170 rsm = NULL; 9171 } 9172 if ((rsm) && (rsm->r_start == seq_out)) { 9173 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 9174 if (len == 0) { 9175 return; 9176 } else { 9177 goto more; 9178 } 9179 } 9180 /* Ok it was not the last pointer go through it the hard way. */ 9181 refind: 9182 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 9183 if (rsm) { 9184 if (rsm->r_start == seq_out) { 9185 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 9186 if (len == 0) { 9187 return; 9188 } else { 9189 goto refind; 9190 } 9191 } 9192 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 9193 /* Transmitted within this piece */ 9194 /* 9195 * Ok we must split off the front and then let the 9196 * update do the rest 9197 */ 9198 nrsm = rack_alloc_full_limit(rack); 9199 if (nrsm == NULL) { 9200 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 9201 return; 9202 } 9203 /* 9204 * copy rsm to nrsm and then trim the front of rsm 9205 * to not include this part. 9206 */ 9207 rack_clone_rsm(rack, nrsm, rsm, seq_out); 9208 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 9209 #ifndef INVARIANTS 9210 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9211 #else 9212 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9213 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9214 nrsm, insret, rack, rsm); 9215 } 9216 #endif 9217 if (rsm->r_in_tmap) { 9218 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9219 nrsm->r_in_tmap = 1; 9220 } 9221 rsm->r_flags &= (~RACK_HAS_FIN); 9222 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 9223 if (len == 0) { 9224 return; 9225 } else if (len > 0) 9226 goto refind; 9227 } 9228 } 9229 /* 9230 * Hmm not found in map did they retransmit both old and on into the 9231 * new? 9232 */ 9233 if (seq_out == tp->snd_max) { 9234 goto again; 9235 } else if (SEQ_LT(seq_out, tp->snd_max)) { 9236 #ifdef INVARIANTS 9237 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 9238 seq_out, len, tp->snd_una, tp->snd_max); 9239 printf("Starting Dump of all rack entries\n"); 9240 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 9241 printf("rsm:%p start:%u end:%u\n", 9242 rsm, rsm->r_start, rsm->r_end); 9243 } 9244 printf("Dump complete\n"); 9245 panic("seq_out not found rack:%p tp:%p", 9246 rack, tp); 9247 #endif 9248 } else { 9249 #ifdef INVARIANTS 9250 /* 9251 * Hmm beyond sndmax? (only if we are using the new rtt-pack 9252 * flag) 9253 */ 9254 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 9255 seq_out, len, tp->snd_max, tp); 9256 #endif 9257 } 9258 } 9259 9260 /* 9261 * Record one of the RTT updates from an ack into 9262 * our sample structure. 9263 */ 9264 9265 static void 9266 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 9267 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 9268 { 9269 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9270 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 9271 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 9272 } 9273 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9274 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 9275 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 9276 } 9277 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 9278 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 9279 rack->r_ctl.rc_gp_lowrtt = us_rtt; 9280 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 9281 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 9282 } 9283 if ((confidence == 1) && 9284 ((rsm == NULL) || 9285 (rsm->r_just_ret) || 9286 (rsm->r_one_out_nr && 9287 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 9288 /* 9289 * If the rsm had a just return 9290 * hit it then we can't trust the 9291 * rtt measurement for buffer deterimination 9292 * Note that a confidence of 2, indicates 9293 * SACK'd which overrides the r_just_ret or 9294 * the r_one_out_nr. If it was a CUM-ACK and 9295 * we had only two outstanding, but get an 9296 * ack for only 1. Then that also lowers our 9297 * confidence. 9298 */ 9299 confidence = 0; 9300 } 9301 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9302 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 9303 if (rack->r_ctl.rack_rs.confidence == 0) { 9304 /* 9305 * We take anything with no current confidence 9306 * saved. 9307 */ 9308 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 9309 rack->r_ctl.rack_rs.confidence = confidence; 9310 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 9311 } else if (confidence != 0) { 9312 /* 9313 * Once we have a confident number, 9314 * we can update it with a smaller 9315 * value since this confident number 9316 * may include the DSACK time until 9317 * the next segment (the second one) arrived. 9318 */ 9319 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 9320 rack->r_ctl.rack_rs.confidence = confidence; 9321 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 9322 } 9323 } 9324 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 9325 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 9326 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 9327 rack->r_ctl.rack_rs.rs_rtt_cnt++; 9328 } 9329 9330 /* 9331 * Collect new round-trip time estimate 9332 * and update averages and current timeout. 9333 */ 9334 static void 9335 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 9336 { 9337 int32_t delta; 9338 int32_t rtt; 9339 9340 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 9341 /* No valid sample */ 9342 return; 9343 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 9344 /* We are to use the lowest RTT seen in a single ack */ 9345 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9346 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 9347 /* We are to use the highest RTT seen in a single ack */ 9348 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 9349 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 9350 /* We are to use the average RTT seen in a single ack */ 9351 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 9352 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 9353 } else { 9354 #ifdef INVARIANTS 9355 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 9356 #endif 9357 return; 9358 } 9359 if (rtt == 0) 9360 rtt = 1; 9361 if (rack->rc_gp_rtt_set == 0) { 9362 /* 9363 * With no RTT we have to accept 9364 * even one we are not confident of. 9365 */ 9366 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 9367 rack->rc_gp_rtt_set = 1; 9368 } else if (rack->r_ctl.rack_rs.confidence) { 9369 /* update the running gp srtt */ 9370 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 9371 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 9372 } 9373 if (rack->r_ctl.rack_rs.confidence) { 9374 /* 9375 * record the low and high for highly buffered path computation, 9376 * we only do this if we are confident (not a retransmission). 9377 */ 9378 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 9379 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9380 } 9381 if (rack->rc_highly_buffered == 0) { 9382 /* 9383 * Currently once we declare a path has 9384 * highly buffered there is no going 9385 * back, which may be a problem... 9386 */ 9387 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 9388 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 9389 rack->r_ctl.rc_highest_us_rtt, 9390 rack->r_ctl.rc_lowest_us_rtt, 9391 RACK_RTTS_SEEHBP); 9392 rack->rc_highly_buffered = 1; 9393 } 9394 } 9395 } 9396 if ((rack->r_ctl.rack_rs.confidence) || 9397 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 9398 /* 9399 * If we are highly confident of it <or> it was 9400 * never retransmitted we accept it as the last us_rtt. 9401 */ 9402 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9403 /* The lowest rtt can be set if its was not retransmited */ 9404 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 9405 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9406 if (rack->r_ctl.rc_lowest_us_rtt == 0) 9407 rack->r_ctl.rc_lowest_us_rtt = 1; 9408 } 9409 } 9410 rack = (struct tcp_rack *)tp->t_fb_ptr; 9411 if (tp->t_srtt != 0) { 9412 /* 9413 * We keep a simple srtt in microseconds, like our rtt 9414 * measurement. We don't need to do any tricks with shifting 9415 * etc. Instead we just add in 1/8th of the new measurement 9416 * and subtract out 1/8 of the old srtt. We do the same with 9417 * the variance after finding the absolute value of the 9418 * difference between this sample and the current srtt. 9419 */ 9420 delta = tp->t_srtt - rtt; 9421 /* Take off 1/8th of the current sRTT */ 9422 tp->t_srtt -= (tp->t_srtt >> 3); 9423 /* Add in 1/8th of the new RTT just measured */ 9424 tp->t_srtt += (rtt >> 3); 9425 if (tp->t_srtt <= 0) 9426 tp->t_srtt = 1; 9427 /* Now lets make the absolute value of the variance */ 9428 if (delta < 0) 9429 delta = -delta; 9430 /* Subtract out 1/8th */ 9431 tp->t_rttvar -= (tp->t_rttvar >> 3); 9432 /* Add in 1/8th of the new variance we just saw */ 9433 tp->t_rttvar += (delta >> 3); 9434 if (tp->t_rttvar <= 0) 9435 tp->t_rttvar = 1; 9436 } else { 9437 /* 9438 * No rtt measurement yet - use the unsmoothed rtt. Set the 9439 * variance to half the rtt (so our first retransmit happens 9440 * at 3*rtt). 9441 */ 9442 tp->t_srtt = rtt; 9443 tp->t_rttvar = rtt >> 1; 9444 } 9445 rack->rc_srtt_measure_made = 1; 9446 KMOD_TCPSTAT_INC(tcps_rttupdated); 9447 if (tp->t_rttupdated < UCHAR_MAX) 9448 tp->t_rttupdated++; 9449 #ifdef STATS 9450 if (rack_stats_gets_ms_rtt == 0) { 9451 /* Send in the microsecond rtt used for rxt timeout purposes */ 9452 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 9453 } else if (rack_stats_gets_ms_rtt == 1) { 9454 /* Send in the millisecond rtt used for rxt timeout purposes */ 9455 int32_t ms_rtt; 9456 9457 /* Round up */ 9458 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 9459 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 9460 } else if (rack_stats_gets_ms_rtt == 2) { 9461 /* Send in the millisecond rtt has close to the path RTT as we can get */ 9462 int32_t ms_rtt; 9463 9464 /* Round up */ 9465 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 9466 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 9467 } else { 9468 /* Send in the microsecond rtt has close to the path RTT as we can get */ 9469 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 9470 } 9471 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 9472 #endif 9473 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 9474 /* 9475 * the retransmit should happen at rtt + 4 * rttvar. Because of the 9476 * way we do the smoothing, srtt and rttvar will each average +1/2 9477 * tick of bias. When we compute the retransmit timer, we want 1/2 9478 * tick of rounding and 1 extra tick because of +-1/2 tick 9479 * uncertainty in the firing of the timer. The bias will give us 9480 * exactly the 1.5 tick we need. But, because the bias is 9481 * statistical, we have to test that we don't drop below the minimum 9482 * feasible timer (which is 2 ticks). 9483 */ 9484 tp->t_rxtshift = 0; 9485 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9486 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 9487 rack_log_rtt_sample(rack, rtt); 9488 tp->t_softerror = 0; 9489 } 9490 9491 9492 static void 9493 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 9494 { 9495 /* 9496 * Apply to filter the inbound us-rtt at us_cts. 9497 */ 9498 uint32_t old_rtt; 9499 9500 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 9501 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 9502 us_rtt, us_cts); 9503 if (old_rtt > us_rtt) { 9504 /* We just hit a new lower rtt time */ 9505 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 9506 __LINE__, RACK_RTTS_NEWRTT); 9507 /* 9508 * Only count it if its lower than what we saw within our 9509 * calculated range. 9510 */ 9511 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 9512 if (rack_probertt_lower_within && 9513 rack->rc_gp_dyn_mul && 9514 (rack->use_fixed_rate == 0) && 9515 (rack->rc_always_pace)) { 9516 /* 9517 * We are seeing a new lower rtt very close 9518 * to the time that we would have entered probe-rtt. 9519 * This is probably due to the fact that a peer flow 9520 * has entered probe-rtt. Lets go in now too. 9521 */ 9522 uint32_t val; 9523 9524 val = rack_probertt_lower_within * rack_time_between_probertt; 9525 val /= 100; 9526 if ((rack->in_probe_rtt == 0) && 9527 (rack->rc_skip_timely == 0) && 9528 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 9529 rack_enter_probertt(rack, us_cts); 9530 } 9531 } 9532 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 9533 } 9534 } 9535 } 9536 9537 static int 9538 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 9539 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 9540 { 9541 uint32_t us_rtt; 9542 int32_t i, all; 9543 uint32_t t, len_acked; 9544 9545 if ((rsm->r_flags & RACK_ACKED) || 9546 (rsm->r_flags & RACK_WAS_ACKED)) 9547 /* Already done */ 9548 return (0); 9549 if (rsm->r_no_rtt_allowed) { 9550 /* Not allowed */ 9551 return (0); 9552 } 9553 if (ack_type == CUM_ACKED) { 9554 if (SEQ_GT(th_ack, rsm->r_end)) { 9555 len_acked = rsm->r_end - rsm->r_start; 9556 all = 1; 9557 } else { 9558 len_acked = th_ack - rsm->r_start; 9559 all = 0; 9560 } 9561 } else { 9562 len_acked = rsm->r_end - rsm->r_start; 9563 all = 0; 9564 } 9565 if (rsm->r_rtr_cnt == 1) { 9566 9567 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9568 if ((int)t <= 0) 9569 t = 1; 9570 if (!tp->t_rttlow || tp->t_rttlow > t) 9571 tp->t_rttlow = t; 9572 if (!rack->r_ctl.rc_rack_min_rtt || 9573 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9574 rack->r_ctl.rc_rack_min_rtt = t; 9575 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9576 rack->r_ctl.rc_rack_min_rtt = 1; 9577 } 9578 } 9579 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 9580 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9581 else 9582 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9583 if (us_rtt == 0) 9584 us_rtt = 1; 9585 if (CC_ALGO(tp)->rttsample != NULL) { 9586 /* Kick the RTT to the CC */ 9587 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9588 } 9589 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 9590 if (ack_type == SACKED) { 9591 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 9592 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 9593 } else { 9594 /* 9595 * We need to setup what our confidence 9596 * is in this ack. 9597 * 9598 * If the rsm was app limited and it is 9599 * less than a mss in length (the end 9600 * of the send) then we have a gap. If we 9601 * were app limited but say we were sending 9602 * multiple MSS's then we are more confident 9603 * int it. 9604 * 9605 * When we are not app-limited then we see if 9606 * the rsm is being included in the current 9607 * measurement, we tell this by the app_limited_needs_set 9608 * flag. 9609 * 9610 * Note that being cwnd blocked is not applimited 9611 * as well as the pacing delay between packets which 9612 * are sending only 1 or 2 MSS's also will show up 9613 * in the RTT. We probably need to examine this algorithm 9614 * a bit more and enhance it to account for the delay 9615 * between rsm's. We could do that by saving off the 9616 * pacing delay of each rsm (in an rsm) and then 9617 * factoring that in somehow though for now I am 9618 * not sure how :) 9619 */ 9620 int calc_conf = 0; 9621 9622 if (rsm->r_flags & RACK_APP_LIMITED) { 9623 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 9624 calc_conf = 0; 9625 else 9626 calc_conf = 1; 9627 } else if (rack->app_limited_needs_set == 0) { 9628 calc_conf = 1; 9629 } else { 9630 calc_conf = 0; 9631 } 9632 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 9633 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 9634 calc_conf, rsm, rsm->r_rtr_cnt); 9635 } 9636 if ((rsm->r_flags & RACK_TLP) && 9637 (!IN_FASTRECOVERY(tp->t_flags))) { 9638 /* Segment was a TLP and our retrans matched */ 9639 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 9640 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 9641 } 9642 } 9643 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9644 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9645 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9646 /* New more recent rack_tmit_time */ 9647 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9648 if (rack->r_ctl.rc_rack_tmit_time == 0) 9649 rack->r_ctl.rc_rack_tmit_time = 1; 9650 rack->rc_rack_rtt = t; 9651 } 9652 return (1); 9653 } 9654 /* 9655 * We clear the soft/rxtshift since we got an ack. 9656 * There is no assurance we will call the commit() function 9657 * so we need to clear these to avoid incorrect handling. 9658 */ 9659 tp->t_rxtshift = 0; 9660 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9661 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 9662 tp->t_softerror = 0; 9663 if (to && (to->to_flags & TOF_TS) && 9664 (ack_type == CUM_ACKED) && 9665 (to->to_tsecr) && 9666 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 9667 /* 9668 * Now which timestamp does it match? In this block the ACK 9669 * must be coming from a previous transmission. 9670 */ 9671 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9672 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 9673 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9674 if ((int)t <= 0) 9675 t = 1; 9676 if (CC_ALGO(tp)->rttsample != NULL) { 9677 /* 9678 * Kick the RTT to the CC, here 9679 * we lie a bit in that we know the 9680 * retransmission is correct even though 9681 * we retransmitted. This is because 9682 * we match the timestamps. 9683 */ 9684 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 9685 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 9686 else 9687 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 9688 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9689 } 9690 if ((i + 1) < rsm->r_rtr_cnt) { 9691 /* 9692 * The peer ack'd from our previous 9693 * transmission. We have a spurious 9694 * retransmission and thus we dont 9695 * want to update our rack_rtt. 9696 * 9697 * Hmm should there be a CC revert here? 9698 * 9699 */ 9700 return (0); 9701 } 9702 if (!tp->t_rttlow || tp->t_rttlow > t) 9703 tp->t_rttlow = t; 9704 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9705 rack->r_ctl.rc_rack_min_rtt = t; 9706 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9707 rack->r_ctl.rc_rack_min_rtt = 1; 9708 } 9709 } 9710 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9711 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9712 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9713 /* New more recent rack_tmit_time */ 9714 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9715 if (rack->r_ctl.rc_rack_tmit_time == 0) 9716 rack->r_ctl.rc_rack_tmit_time = 1; 9717 rack->rc_rack_rtt = t; 9718 } 9719 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9720 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9721 rsm->r_rtr_cnt); 9722 return (1); 9723 } 9724 } 9725 /* If we are logging log out the sendmap */ 9726 if (tcp_bblogging_on(rack->rc_tp)) { 9727 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9728 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9729 } 9730 } 9731 goto ts_not_found; 9732 } else { 9733 /* 9734 * Ok its a SACK block that we retransmitted. or a windows 9735 * machine without timestamps. We can tell nothing from the 9736 * time-stamp since its not there or the time the peer last 9737 * received a segment that moved forward its cum-ack point. 9738 */ 9739 ts_not_found: 9740 i = rsm->r_rtr_cnt - 1; 9741 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9742 if ((int)t <= 0) 9743 t = 1; 9744 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9745 /* 9746 * We retransmitted and the ack came back in less 9747 * than the smallest rtt we have observed. We most 9748 * likely did an improper retransmit as outlined in 9749 * 6.2 Step 2 point 2 in the rack-draft so we 9750 * don't want to update our rack_rtt. We in 9751 * theory (in future) might want to think about reverting our 9752 * cwnd state but we won't for now. 9753 */ 9754 return (0); 9755 } else if (rack->r_ctl.rc_rack_min_rtt) { 9756 /* 9757 * We retransmitted it and the retransmit did the 9758 * job. 9759 */ 9760 if (!rack->r_ctl.rc_rack_min_rtt || 9761 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9762 rack->r_ctl.rc_rack_min_rtt = t; 9763 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9764 rack->r_ctl.rc_rack_min_rtt = 1; 9765 } 9766 } 9767 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9768 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9769 (uint32_t)rsm->r_tim_lastsent[i]))) { 9770 /* New more recent rack_tmit_time */ 9771 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9772 if (rack->r_ctl.rc_rack_tmit_time == 0) 9773 rack->r_ctl.rc_rack_tmit_time = 1; 9774 rack->rc_rack_rtt = t; 9775 } 9776 return (1); 9777 } 9778 } 9779 return (0); 9780 } 9781 9782 /* 9783 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9784 */ 9785 static void 9786 rack_log_sack_passed(struct tcpcb *tp, 9787 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9788 { 9789 struct rack_sendmap *nrsm; 9790 uint32_t thresh; 9791 9792 /* Get our rxt threshold for lost consideration */ 9793 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9794 /* Now start looking at rsm's */ 9795 nrsm = rsm; 9796 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9797 rack_head, r_tnext) { 9798 if (nrsm == rsm) { 9799 /* Skip original segment he is acked */ 9800 continue; 9801 } 9802 if (nrsm->r_flags & RACK_ACKED) { 9803 /* 9804 * Skip ack'd segments, though we 9805 * should not see these, since tmap 9806 * should not have ack'd segments. 9807 */ 9808 continue; 9809 } 9810 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9811 /* 9812 * If the peer dropped the rwnd on 9813 * these then we don't worry about them. 9814 */ 9815 continue; 9816 } 9817 /* Check lost state */ 9818 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9819 uint32_t exp; 9820 9821 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9822 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9823 /* We consider it lost */ 9824 nrsm->r_flags |= RACK_WAS_LOST; 9825 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9826 } 9827 } 9828 if (nrsm->r_flags & RACK_SACK_PASSED) { 9829 /* 9830 * We found one that is already marked 9831 * passed, we have been here before and 9832 * so all others below this are marked. 9833 */ 9834 break; 9835 } 9836 nrsm->r_flags |= RACK_SACK_PASSED; 9837 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9838 } 9839 } 9840 9841 static void 9842 rack_need_set_test(struct tcpcb *tp, 9843 struct tcp_rack *rack, 9844 struct rack_sendmap *rsm, 9845 tcp_seq th_ack, 9846 int line, 9847 int use_which) 9848 { 9849 struct rack_sendmap *s_rsm; 9850 9851 if ((tp->t_flags & TF_GPUTINPROG) && 9852 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9853 /* 9854 * We were app limited, and this ack 9855 * butts up or goes beyond the point where we want 9856 * to start our next measurement. We need 9857 * to record the new gput_ts as here and 9858 * possibly update the start sequence. 9859 */ 9860 uint32_t seq, ts; 9861 9862 if (rsm->r_rtr_cnt > 1) { 9863 /* 9864 * This is a retransmit, can we 9865 * really make any assessment at this 9866 * point? We are not really sure of 9867 * the timestamp, is it this or the 9868 * previous transmission? 9869 * 9870 * Lets wait for something better that 9871 * is not retransmitted. 9872 */ 9873 return; 9874 } 9875 seq = tp->gput_seq; 9876 ts = tp->gput_ts; 9877 rack->app_limited_needs_set = 0; 9878 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9879 /* Do we start at a new end? */ 9880 if ((use_which == RACK_USE_BEG) && 9881 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9882 /* 9883 * When we get an ACK that just eats 9884 * up some of the rsm, we set RACK_USE_BEG 9885 * since whats at r_start (i.e. th_ack) 9886 * is left unacked and thats where the 9887 * measurement now starts. 9888 */ 9889 tp->gput_seq = rsm->r_start; 9890 } 9891 if ((use_which == RACK_USE_END) && 9892 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9893 /* 9894 * We use the end when the cumack 9895 * is moving forward and completely 9896 * deleting the rsm passed so basically 9897 * r_end holds th_ack. 9898 * 9899 * For SACK's we also want to use the end 9900 * since this piece just got sacked and 9901 * we want to target anything after that 9902 * in our measurement. 9903 */ 9904 tp->gput_seq = rsm->r_end; 9905 } 9906 if (use_which == RACK_USE_END_OR_THACK) { 9907 /* 9908 * special case for ack moving forward, 9909 * not a sack, we need to move all the 9910 * way up to where this ack cum-ack moves 9911 * to. 9912 */ 9913 if (SEQ_GT(th_ack, rsm->r_end)) 9914 tp->gput_seq = th_ack; 9915 else 9916 tp->gput_seq = rsm->r_end; 9917 } 9918 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9919 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9920 else 9921 s_rsm = NULL; 9922 /* 9923 * Pick up the correct send time if we can the rsm passed in 9924 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9925 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9926 * find a different seq i.e. the next send up. 9927 * 9928 * If that has not been sent, s_rsm will be NULL and we must 9929 * arrange it so this function will get called again by setting 9930 * app_limited_needs_set. 9931 */ 9932 if (s_rsm) 9933 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9934 else { 9935 /* If we hit here we have to have *not* sent tp->gput_seq */ 9936 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9937 /* Set it up so we will go through here again */ 9938 rack->app_limited_needs_set = 1; 9939 } 9940 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9941 /* 9942 * We moved beyond this guy's range, re-calculate 9943 * the new end point. 9944 */ 9945 if (rack->rc_gp_filled == 0) { 9946 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9947 } else { 9948 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9949 } 9950 } 9951 /* 9952 * We are moving the goal post, we may be able to clear the 9953 * measure_saw_probe_rtt flag. 9954 */ 9955 if ((rack->in_probe_rtt == 0) && 9956 (rack->measure_saw_probe_rtt) && 9957 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9958 rack->measure_saw_probe_rtt = 0; 9959 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9960 seq, tp->gput_seq, 9961 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9962 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9963 5, line, NULL, 0); 9964 if (rack->rc_gp_filled && 9965 ((tp->gput_ack - tp->gput_seq) < 9966 max(rc_init_window(rack), (MIN_GP_WIN * 9967 ctf_fixed_maxseg(tp))))) { 9968 uint32_t ideal_amount; 9969 9970 ideal_amount = rack_get_measure_window(tp, rack); 9971 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9972 /* 9973 * There is no sense of continuing this measurement 9974 * because its too small to gain us anything we 9975 * trust. Skip it and that way we can start a new 9976 * measurement quicker. 9977 */ 9978 tp->t_flags &= ~TF_GPUTINPROG; 9979 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9980 0, 0, 9981 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9982 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9983 6, __LINE__, NULL, 0); 9984 } else { 9985 /* 9986 * Reset the window further out. 9987 */ 9988 tp->gput_ack = tp->gput_seq + ideal_amount; 9989 } 9990 } 9991 rack_tend_gp_marks(tp, rack); 9992 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9993 } 9994 } 9995 9996 static inline int 9997 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9998 { 9999 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 10000 /* Behind our TLP definition or right at */ 10001 return (0); 10002 } 10003 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 10004 /* The start is beyond or right at our end of TLP definition */ 10005 return (0); 10006 } 10007 /* It has to be a sub-part of the original TLP recorded */ 10008 return (1); 10009 } 10010 10011 static uint32_t 10012 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 10013 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 10014 uint32_t segsiz) 10015 { 10016 uint32_t start, end, changed = 0; 10017 struct rack_sendmap stack_map; 10018 struct rack_sendmap *rsm, *nrsm, *prev, *next; 10019 int insret __diagused; 10020 int32_t used_ref = 1; 10021 int can_use_hookery = 0; 10022 10023 start = sack->start; 10024 end = sack->end; 10025 rsm = *prsm; 10026 10027 do_rest_ofb: 10028 if ((rsm == NULL) || 10029 (SEQ_LT(end, rsm->r_start)) || 10030 (SEQ_GEQ(start, rsm->r_end)) || 10031 (SEQ_LT(start, rsm->r_start))) { 10032 /* 10033 * We are not in the right spot, 10034 * find the correct spot in the tree. 10035 */ 10036 used_ref = 0; 10037 rsm = tqhash_find(rack->r_ctl.tqh, start); 10038 } 10039 if (rsm == NULL) { 10040 /* TSNH */ 10041 goto out; 10042 } 10043 /* Ok we have an ACK for some piece of this rsm */ 10044 if (rsm->r_start != start) { 10045 if ((rsm->r_flags & RACK_ACKED) == 0) { 10046 /* 10047 * Before any splitting or hookery is 10048 * done is it a TLP of interest i.e. rxt? 10049 */ 10050 if ((rsm->r_flags & RACK_TLP) && 10051 (rsm->r_rtr_cnt > 1)) { 10052 /* 10053 * We are splitting a rxt TLP, check 10054 * if we need to save off the start/end 10055 */ 10056 if (rack->rc_last_tlp_acked_set && 10057 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10058 /* 10059 * We already turned this on since we are inside 10060 * the previous one was a partially sack now we 10061 * are getting another one (maybe all of it). 10062 * 10063 */ 10064 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10065 /* 10066 * Lets make sure we have all of it though. 10067 */ 10068 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10069 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10070 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10071 rack->r_ctl.last_tlp_acked_end); 10072 } 10073 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10074 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10075 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10076 rack->r_ctl.last_tlp_acked_end); 10077 } 10078 } else { 10079 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10080 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10081 rack->rc_last_tlp_past_cumack = 0; 10082 rack->rc_last_tlp_acked_set = 1; 10083 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10084 } 10085 } 10086 /** 10087 * Need to split this in two pieces the before and after, 10088 * the before remains in the map, the after must be 10089 * added. In other words we have: 10090 * rsm |--------------| 10091 * sackblk |-------> 10092 * rsm will become 10093 * rsm |---| 10094 * and nrsm will be the sacked piece 10095 * nrsm |----------| 10096 * 10097 * But before we start down that path lets 10098 * see if the sack spans over on top of 10099 * the next guy and it is already sacked. 10100 * 10101 */ 10102 /* 10103 * Hookery can only be used if the two entries 10104 * are in the same bucket and neither one of 10105 * them staddle the bucket line. 10106 */ 10107 next = tqhash_next(rack->r_ctl.tqh, rsm); 10108 if (next && 10109 (rsm->bindex == next->bindex) && 10110 ((rsm->r_flags & RACK_STRADDLE) == 0) && 10111 ((next->r_flags & RACK_STRADDLE) == 0) && 10112 ((rsm->r_flags & RACK_IS_PCM) == 0) && 10113 ((next->r_flags & RACK_IS_PCM) == 0) && 10114 (rsm->r_flags & RACK_IN_GP_WIN) && 10115 (next->r_flags & RACK_IN_GP_WIN)) 10116 can_use_hookery = 1; 10117 else 10118 can_use_hookery = 0; 10119 if (next && can_use_hookery && 10120 (next->r_flags & RACK_ACKED) && 10121 SEQ_GEQ(end, next->r_start)) { 10122 /** 10123 * So the next one is already acked, and 10124 * we can thus by hookery use our stack_map 10125 * to reflect the piece being sacked and 10126 * then adjust the two tree entries moving 10127 * the start and ends around. So we start like: 10128 * rsm |------------| (not-acked) 10129 * next |-----------| (acked) 10130 * sackblk |--------> 10131 * We want to end like so: 10132 * rsm |------| (not-acked) 10133 * next |-----------------| (acked) 10134 * nrsm |-----| 10135 * Where nrsm is a temporary stack piece we 10136 * use to update all the gizmos. 10137 */ 10138 /* Copy up our fudge block */ 10139 nrsm = &stack_map; 10140 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10141 /* Now adjust our tree blocks */ 10142 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 10143 next->r_start = start; 10144 rsm->r_flags |= RACK_SHUFFLED; 10145 next->r_flags |= RACK_SHUFFLED; 10146 /* Now we must adjust back where next->m is */ 10147 rack_setup_offset_for_rsm(rack, rsm, next); 10148 /* 10149 * Which timestamp do we keep? It is rather 10150 * important in GP measurements to have the 10151 * accurate end of the send window. 10152 * 10153 * We keep the largest value, which is the newest 10154 * send. We do this in case a segment that is 10155 * joined together and not part of a GP estimate 10156 * later gets expanded into the GP estimate. 10157 * 10158 * We prohibit the merging of unlike kinds i.e. 10159 * all pieces that are in the GP estimate can be 10160 * merged and all pieces that are not in a GP estimate 10161 * can be merged, but not disimilar pieces. Combine 10162 * this with taking the highest here and we should 10163 * be ok unless of course the client reneges. Then 10164 * all bets are off. 10165 */ 10166 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 10167 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 10168 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 10169 /* 10170 * And we must keep the newest ack arrival time. 10171 */ 10172 if (next->r_ack_arrival < 10173 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10174 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10175 10176 10177 /* We don't need to adjust rsm, it did not change */ 10178 /* Clear out the dup ack count of the remainder */ 10179 rsm->r_dupack = 0; 10180 rsm->r_just_ret = 0; 10181 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10182 /* Now lets make sure our fudge block is right */ 10183 nrsm->r_start = start; 10184 /* Now lets update all the stats and such */ 10185 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10186 if (rack->app_limited_needs_set) 10187 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10188 changed += (nrsm->r_end - nrsm->r_start); 10189 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10190 if (rsm->r_flags & RACK_WAS_LOST) { 10191 int my_chg; 10192 10193 my_chg = (nrsm->r_end - nrsm->r_start); 10194 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10195 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10196 if (my_chg <= rack->r_ctl.rc_considered_lost) 10197 rack->r_ctl.rc_considered_lost -= my_chg; 10198 else 10199 rack->r_ctl.rc_considered_lost = 0; 10200 } 10201 if (nrsm->r_flags & RACK_SACK_PASSED) { 10202 rack->r_ctl.rc_reorder_ts = cts; 10203 if (rack->r_ctl.rc_reorder_ts == 0) 10204 rack->r_ctl.rc_reorder_ts = 1; 10205 } 10206 /* 10207 * Now we want to go up from rsm (the 10208 * one left un-acked) to the next one 10209 * in the tmap. We do this so when 10210 * we walk backwards we include marking 10211 * sack-passed on rsm (The one passed in 10212 * is skipped since it is generally called 10213 * on something sacked before removing it 10214 * from the tmap). 10215 */ 10216 if (rsm->r_in_tmap) { 10217 nrsm = TAILQ_NEXT(rsm, r_tnext); 10218 /* 10219 * Now that we have the next 10220 * one walk backwards from there. 10221 */ 10222 if (nrsm && nrsm->r_in_tmap) 10223 rack_log_sack_passed(tp, rack, nrsm, cts); 10224 } 10225 /* Now are we done? */ 10226 if (SEQ_LT(end, next->r_end) || 10227 (end == next->r_end)) { 10228 /* Done with block */ 10229 goto out; 10230 } 10231 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 10232 counter_u64_add(rack_sack_used_next_merge, 1); 10233 /* Postion for the next block */ 10234 start = next->r_end; 10235 rsm = tqhash_next(rack->r_ctl.tqh, next); 10236 if (rsm == NULL) 10237 goto out; 10238 } else { 10239 /** 10240 * We can't use any hookery here, so we 10241 * need to split the map. We enter like 10242 * so: 10243 * rsm |--------| 10244 * sackblk |-----> 10245 * We will add the new block nrsm and 10246 * that will be the new portion, and then 10247 * fall through after reseting rsm. So we 10248 * split and look like this: 10249 * rsm |----| 10250 * sackblk |-----> 10251 * nrsm |---| 10252 * We then fall through reseting 10253 * rsm to nrsm, so the next block 10254 * picks it up. 10255 */ 10256 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10257 if (nrsm == NULL) { 10258 /* 10259 * failed XXXrrs what can we do but loose the sack 10260 * info? 10261 */ 10262 goto out; 10263 } 10264 counter_u64_add(rack_sack_splits, 1); 10265 rack_clone_rsm(rack, nrsm, rsm, start); 10266 rsm->r_just_ret = 0; 10267 #ifndef INVARIANTS 10268 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10269 #else 10270 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10271 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 10272 nrsm, insret, rack, rsm); 10273 } 10274 #endif 10275 if (rsm->r_in_tmap) { 10276 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10277 nrsm->r_in_tmap = 1; 10278 } 10279 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 10280 rsm->r_flags &= (~RACK_HAS_FIN); 10281 /* Position us to point to the new nrsm that starts the sack blk */ 10282 rsm = nrsm; 10283 } 10284 } else { 10285 /* Already sacked this piece */ 10286 counter_u64_add(rack_sack_skipped_acked, 1); 10287 if (end == rsm->r_end) { 10288 /* Done with block */ 10289 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10290 goto out; 10291 } else if (SEQ_LT(end, rsm->r_end)) { 10292 /* A partial sack to a already sacked block */ 10293 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10294 goto out; 10295 } else { 10296 /* 10297 * The end goes beyond this guy 10298 * reposition the start to the 10299 * next block. 10300 */ 10301 start = rsm->r_end; 10302 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10303 if (rsm == NULL) 10304 goto out; 10305 } 10306 } 10307 } 10308 if (SEQ_GEQ(end, rsm->r_end)) { 10309 /** 10310 * The end of this block is either beyond this guy or right 10311 * at this guy. I.e.: 10312 * rsm --- |-----| 10313 * end |-----| 10314 * <or> 10315 * end |---------| 10316 */ 10317 if ((rsm->r_flags & RACK_ACKED) == 0) { 10318 /* 10319 * Is it a TLP of interest? 10320 */ 10321 if ((rsm->r_flags & RACK_TLP) && 10322 (rsm->r_rtr_cnt > 1)) { 10323 /* 10324 * We are splitting a rxt TLP, check 10325 * if we need to save off the start/end 10326 */ 10327 if (rack->rc_last_tlp_acked_set && 10328 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10329 /* 10330 * We already turned this on since we are inside 10331 * the previous one was a partially sack now we 10332 * are getting another one (maybe all of it). 10333 */ 10334 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10335 /* 10336 * Lets make sure we have all of it though. 10337 */ 10338 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10339 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10340 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10341 rack->r_ctl.last_tlp_acked_end); 10342 } 10343 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10344 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10345 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10346 rack->r_ctl.last_tlp_acked_end); 10347 } 10348 } else { 10349 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10350 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10351 rack->rc_last_tlp_past_cumack = 0; 10352 rack->rc_last_tlp_acked_set = 1; 10353 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10354 } 10355 } 10356 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10357 changed += (rsm->r_end - rsm->r_start); 10358 /* You get a count for acking a whole segment or more */ 10359 if (rsm->r_flags & RACK_WAS_LOST) { 10360 int my_chg; 10361 10362 my_chg = (rsm->r_end - rsm->r_start); 10363 rsm->r_flags &= ~RACK_WAS_LOST; 10364 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10365 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10366 if (my_chg <= rack->r_ctl.rc_considered_lost) 10367 rack->r_ctl.rc_considered_lost -= my_chg; 10368 else 10369 rack->r_ctl.rc_considered_lost = 0; 10370 } 10371 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10372 if (rsm->r_in_tmap) /* should be true */ 10373 rack_log_sack_passed(tp, rack, rsm, cts); 10374 /* Is Reordering occuring? */ 10375 if (rsm->r_flags & RACK_SACK_PASSED) { 10376 rsm->r_flags &= ~RACK_SACK_PASSED; 10377 rack->r_ctl.rc_reorder_ts = cts; 10378 if (rack->r_ctl.rc_reorder_ts == 0) 10379 rack->r_ctl.rc_reorder_ts = 1; 10380 } 10381 if (rack->app_limited_needs_set) 10382 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10383 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10384 rsm->r_flags |= RACK_ACKED; 10385 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10386 if (rsm->r_in_tmap) { 10387 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10388 rsm->r_in_tmap = 0; 10389 } 10390 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 10391 } else { 10392 counter_u64_add(rack_sack_skipped_acked, 1); 10393 } 10394 if (end == rsm->r_end) { 10395 /* This block only - done, setup for next */ 10396 goto out; 10397 } 10398 /* 10399 * There is more not coverend by this rsm move on 10400 * to the next block in the tail queue hash table. 10401 */ 10402 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 10403 start = rsm->r_end; 10404 rsm = nrsm; 10405 if (rsm == NULL) 10406 goto out; 10407 goto do_rest_ofb; 10408 } 10409 /** 10410 * The end of this sack block is smaller than 10411 * our rsm i.e.: 10412 * rsm --- |-----| 10413 * end |--| 10414 */ 10415 if ((rsm->r_flags & RACK_ACKED) == 0) { 10416 /* 10417 * Is it a TLP of interest? 10418 */ 10419 if ((rsm->r_flags & RACK_TLP) && 10420 (rsm->r_rtr_cnt > 1)) { 10421 /* 10422 * We are splitting a rxt TLP, check 10423 * if we need to save off the start/end 10424 */ 10425 if (rack->rc_last_tlp_acked_set && 10426 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10427 /* 10428 * We already turned this on since we are inside 10429 * the previous one was a partially sack now we 10430 * are getting another one (maybe all of it). 10431 */ 10432 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10433 /* 10434 * Lets make sure we have all of it though. 10435 */ 10436 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10437 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10438 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10439 rack->r_ctl.last_tlp_acked_end); 10440 } 10441 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10442 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10443 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10444 rack->r_ctl.last_tlp_acked_end); 10445 } 10446 } else { 10447 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10448 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10449 rack->rc_last_tlp_past_cumack = 0; 10450 rack->rc_last_tlp_acked_set = 1; 10451 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10452 } 10453 } 10454 /* 10455 * Hookery can only be used if the two entries 10456 * are in the same bucket and neither one of 10457 * them staddle the bucket line. 10458 */ 10459 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10460 if (prev && 10461 (rsm->bindex == prev->bindex) && 10462 ((rsm->r_flags & RACK_STRADDLE) == 0) && 10463 ((prev->r_flags & RACK_STRADDLE) == 0) && 10464 ((rsm->r_flags & RACK_IS_PCM) == 0) && 10465 ((prev->r_flags & RACK_IS_PCM) == 0) && 10466 (rsm->r_flags & RACK_IN_GP_WIN) && 10467 (prev->r_flags & RACK_IN_GP_WIN)) 10468 can_use_hookery = 1; 10469 else 10470 can_use_hookery = 0; 10471 if (prev && can_use_hookery && 10472 (prev->r_flags & RACK_ACKED)) { 10473 /** 10474 * Goal, we want the right remainder of rsm to shrink 10475 * in place and span from (rsm->r_start = end) to rsm->r_end. 10476 * We want to expand prev to go all the way 10477 * to prev->r_end <- end. 10478 * so in the tree we have before: 10479 * prev |--------| (acked) 10480 * rsm |-------| (non-acked) 10481 * sackblk |-| 10482 * We churn it so we end up with 10483 * prev |----------| (acked) 10484 * rsm |-----| (non-acked) 10485 * nrsm |-| (temporary) 10486 * 10487 * Note if either prev/rsm is a TLP we don't 10488 * do this. 10489 */ 10490 nrsm = &stack_map; 10491 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10492 tqhash_update_end(rack->r_ctl.tqh, prev, end); 10493 rsm->r_start = end; 10494 rsm->r_flags |= RACK_SHUFFLED; 10495 prev->r_flags |= RACK_SHUFFLED; 10496 /* Now adjust nrsm (stack copy) to be 10497 * the one that is the small 10498 * piece that was "sacked". 10499 */ 10500 nrsm->r_end = end; 10501 rsm->r_dupack = 0; 10502 /* 10503 * Which timestamp do we keep? It is rather 10504 * important in GP measurements to have the 10505 * accurate end of the send window. 10506 * 10507 * We keep the largest value, which is the newest 10508 * send. We do this in case a segment that is 10509 * joined together and not part of a GP estimate 10510 * later gets expanded into the GP estimate. 10511 * 10512 * We prohibit the merging of unlike kinds i.e. 10513 * all pieces that are in the GP estimate can be 10514 * merged and all pieces that are not in a GP estimate 10515 * can be merged, but not disimilar pieces. Combine 10516 * this with taking the highest here and we should 10517 * be ok unless of course the client reneges. Then 10518 * all bets are off. 10519 */ 10520 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 10521 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 10522 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 10523 } 10524 /* 10525 * And we must keep the newest ack arrival time. 10526 */ 10527 10528 if(prev->r_ack_arrival < 10529 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10530 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10531 10532 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10533 /* 10534 * Now that the rsm has had its start moved forward 10535 * lets go ahead and get its new place in the world. 10536 */ 10537 rack_setup_offset_for_rsm(rack, prev, rsm); 10538 /* 10539 * Now nrsm is our new little piece 10540 * that is acked (which was merged 10541 * to prev). Update the rtt and changed 10542 * based on that. Also check for reordering. 10543 */ 10544 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10545 if (rack->app_limited_needs_set) 10546 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10547 changed += (nrsm->r_end - nrsm->r_start); 10548 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10549 if (rsm->r_flags & RACK_WAS_LOST) { 10550 int my_chg; 10551 10552 my_chg = (nrsm->r_end - nrsm->r_start); 10553 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10554 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10555 if (my_chg <= rack->r_ctl.rc_considered_lost) 10556 rack->r_ctl.rc_considered_lost -= my_chg; 10557 else 10558 rack->r_ctl.rc_considered_lost = 0; 10559 } 10560 if (nrsm->r_flags & RACK_SACK_PASSED) { 10561 rack->r_ctl.rc_reorder_ts = cts; 10562 if (rack->r_ctl.rc_reorder_ts == 0) 10563 rack->r_ctl.rc_reorder_ts = 1; 10564 } 10565 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 10566 rsm = prev; 10567 counter_u64_add(rack_sack_used_prev_merge, 1); 10568 } else { 10569 /** 10570 * This is the case where our previous 10571 * block is not acked either, so we must 10572 * split the block in two. 10573 */ 10574 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10575 if (nrsm == NULL) { 10576 /* failed rrs what can we do but loose the sack info? */ 10577 goto out; 10578 } 10579 if ((rsm->r_flags & RACK_TLP) && 10580 (rsm->r_rtr_cnt > 1)) { 10581 /* 10582 * We are splitting a rxt TLP, check 10583 * if we need to save off the start/end 10584 */ 10585 if (rack->rc_last_tlp_acked_set && 10586 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10587 /* 10588 * We already turned this on since this block is inside 10589 * the previous one was a partially sack now we 10590 * are getting another one (maybe all of it). 10591 */ 10592 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10593 /* 10594 * Lets make sure we have all of it though. 10595 */ 10596 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10597 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10598 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10599 rack->r_ctl.last_tlp_acked_end); 10600 } 10601 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10602 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10603 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10604 rack->r_ctl.last_tlp_acked_end); 10605 } 10606 } else { 10607 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10608 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10609 rack->rc_last_tlp_acked_set = 1; 10610 rack->rc_last_tlp_past_cumack = 0; 10611 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10612 } 10613 } 10614 /** 10615 * In this case nrsm becomes 10616 * nrsm->r_start = end; 10617 * nrsm->r_end = rsm->r_end; 10618 * which is un-acked. 10619 * <and> 10620 * rsm->r_end = nrsm->r_start; 10621 * i.e. the remaining un-acked 10622 * piece is left on the left 10623 * hand side. 10624 * 10625 * So we start like this 10626 * rsm |----------| (not acked) 10627 * sackblk |---| 10628 * build it so we have 10629 * rsm |---| (acked) 10630 * nrsm |------| (not acked) 10631 */ 10632 counter_u64_add(rack_sack_splits, 1); 10633 rack_clone_rsm(rack, nrsm, rsm, end); 10634 rsm->r_flags &= (~RACK_HAS_FIN); 10635 rsm->r_just_ret = 0; 10636 #ifndef INVARIANTS 10637 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10638 #else 10639 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10640 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 10641 nrsm, insret, rack, rsm); 10642 } 10643 #endif 10644 if (rsm->r_in_tmap) { 10645 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10646 nrsm->r_in_tmap = 1; 10647 } 10648 nrsm->r_dupack = 0; 10649 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 10650 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10651 changed += (rsm->r_end - rsm->r_start); 10652 if (rsm->r_flags & RACK_WAS_LOST) { 10653 int my_chg; 10654 10655 my_chg = (rsm->r_end - rsm->r_start); 10656 rsm->r_flags &= ~RACK_WAS_LOST; 10657 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10658 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10659 if (my_chg <= rack->r_ctl.rc_considered_lost) 10660 rack->r_ctl.rc_considered_lost -= my_chg; 10661 else 10662 rack->r_ctl.rc_considered_lost = 0; 10663 } 10664 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10665 10666 if (rsm->r_in_tmap) /* should be true */ 10667 rack_log_sack_passed(tp, rack, rsm, cts); 10668 /* Is Reordering occuring? */ 10669 if (rsm->r_flags & RACK_SACK_PASSED) { 10670 rsm->r_flags &= ~RACK_SACK_PASSED; 10671 rack->r_ctl.rc_reorder_ts = cts; 10672 if (rack->r_ctl.rc_reorder_ts == 0) 10673 rack->r_ctl.rc_reorder_ts = 1; 10674 } 10675 if (rack->app_limited_needs_set) 10676 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10677 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10678 rsm->r_flags |= RACK_ACKED; 10679 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10680 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 10681 if (rsm->r_in_tmap) { 10682 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10683 rsm->r_in_tmap = 0; 10684 } 10685 } 10686 } else if (start != end){ 10687 /* 10688 * The block was already acked. 10689 */ 10690 counter_u64_add(rack_sack_skipped_acked, 1); 10691 } 10692 out: 10693 if (rsm && 10694 ((rsm->r_flags & RACK_TLP) == 0) && 10695 (rsm->r_flags & RACK_ACKED)) { 10696 /* 10697 * Now can we merge where we worked 10698 * with either the previous or 10699 * next block? 10700 */ 10701 next = tqhash_next(rack->r_ctl.tqh, rsm); 10702 while (next) { 10703 if (next->r_flags & RACK_TLP) 10704 break; 10705 /* Only allow merges between ones in or out of GP window */ 10706 if ((next->r_flags & RACK_IN_GP_WIN) && 10707 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10708 break; 10709 } 10710 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10711 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10712 break; 10713 } 10714 if (rsm->bindex != next->bindex) 10715 break; 10716 if (rsm->r_flags & RACK_STRADDLE) 10717 break; 10718 if (rsm->r_flags & RACK_IS_PCM) 10719 break; 10720 if (next->r_flags & RACK_STRADDLE) 10721 break; 10722 if (next->r_flags & RACK_IS_PCM) 10723 break; 10724 if (next->r_flags & RACK_ACKED) { 10725 /* yep this and next can be merged */ 10726 rsm = rack_merge_rsm(rack, rsm, next); 10727 next = tqhash_next(rack->r_ctl.tqh, rsm); 10728 } else 10729 break; 10730 } 10731 /* Now what about the previous? */ 10732 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10733 while (prev) { 10734 if (prev->r_flags & RACK_TLP) 10735 break; 10736 /* Only allow merges between ones in or out of GP window */ 10737 if ((prev->r_flags & RACK_IN_GP_WIN) && 10738 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10739 break; 10740 } 10741 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10742 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10743 break; 10744 } 10745 if (rsm->bindex != prev->bindex) 10746 break; 10747 if (rsm->r_flags & RACK_STRADDLE) 10748 break; 10749 if (rsm->r_flags & RACK_IS_PCM) 10750 break; 10751 if (prev->r_flags & RACK_STRADDLE) 10752 break; 10753 if (prev->r_flags & RACK_IS_PCM) 10754 break; 10755 if (prev->r_flags & RACK_ACKED) { 10756 /* yep the previous and this can be merged */ 10757 rsm = rack_merge_rsm(rack, prev, rsm); 10758 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10759 } else 10760 break; 10761 } 10762 } 10763 if (used_ref == 0) { 10764 counter_u64_add(rack_sack_proc_all, 1); 10765 } else { 10766 counter_u64_add(rack_sack_proc_short, 1); 10767 } 10768 /* Save off the next one for quick reference. */ 10769 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10770 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10771 if (IN_RECOVERY(tp->t_flags)) { 10772 rack->r_ctl.bytes_acked_in_recovery += changed; 10773 } 10774 return (changed); 10775 } 10776 10777 static void inline 10778 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10779 { 10780 struct rack_sendmap *tmap; 10781 10782 tmap = NULL; 10783 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10784 /* Its no longer sacked, mark it so */ 10785 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10786 #ifdef INVARIANTS 10787 if (rsm->r_in_tmap) { 10788 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10789 rack, rsm, rsm->r_flags); 10790 } 10791 #endif 10792 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10793 /* Rebuild it into our tmap */ 10794 if (tmap == NULL) { 10795 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10796 tmap = rsm; 10797 } else { 10798 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10799 tmap = rsm; 10800 } 10801 tmap->r_in_tmap = 1; 10802 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10803 } 10804 /* 10805 * Now lets possibly clear the sack filter so we start 10806 * recognizing sacks that cover this area. 10807 */ 10808 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10809 10810 } 10811 10812 10813 static void inline 10814 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10815 { 10816 /* 10817 * We look at advancing the end send time for our GP 10818 * measurement tracking only as the cumulative acknowledgment 10819 * moves forward. You might wonder about this, why not 10820 * at every transmission or retransmission within the 10821 * GP window update the rc_gp_cumack_ts? Well its rather 10822 * nuanced but basically the GP window *may* expand (as 10823 * it does below) or worse and harder to track it may shrink. 10824 * 10825 * This last makes it impossible to track at the time of 10826 * the send, since you may set forward your rc_gp_cumack_ts 10827 * when you send, because that send *is* in your currently 10828 * "guessed" window, but then it shrinks. Now which was 10829 * the send time of the last bytes in the window, by the 10830 * time you ask that question that part of the sendmap 10831 * is freed. So you don't know and you will have too 10832 * long of send window. Instead by updating the time 10833 * marker only when the cumack advances this assures us 10834 * that we will have only the sends in the window of our 10835 * GP measurement. 10836 * 10837 * Another complication from this is the 10838 * merging of sendmap entries. During SACK processing this 10839 * can happen to conserve the sendmap size. That breaks 10840 * everything down in tracking the send window of the GP 10841 * estimate. So to prevent that and keep it working with 10842 * a tiny bit more limited merging, we only allow like 10843 * types to be merged. I.e. if two sends are in the GP window 10844 * then its ok to merge them together. If two sends are not 10845 * in the GP window its ok to merge them together too. Though 10846 * one send in and one send out cannot be merged. We combine 10847 * this with never allowing the shrinking of the GP window when 10848 * we are in recovery so that we can properly calculate the 10849 * sending times. 10850 * 10851 * This all of course seems complicated, because it is.. :) 10852 * 10853 * The cum-ack is being advanced upon the sendmap. 10854 * If we are not doing a GP estimate don't 10855 * proceed. 10856 */ 10857 uint64_t ts; 10858 10859 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10860 return; 10861 /* 10862 * If this sendmap entry is going 10863 * beyond the measurement window we had picked, 10864 * expand the measurement window by that much. 10865 */ 10866 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10867 tp->gput_ack = rsm->r_end; 10868 } 10869 /* 10870 * If we have not setup a ack, then we 10871 * have no idea if the newly acked pieces 10872 * will be "in our seq measurement range". If 10873 * it is when we clear the app_limited_needs_set 10874 * flag the timestamp will be updated. 10875 */ 10876 if (rack->app_limited_needs_set) 10877 return; 10878 /* 10879 * Finally, we grab out the latest timestamp 10880 * that this packet was sent and then see 10881 * if: 10882 * a) The packet touches are newly defined GP range. 10883 * b) The time is greater than (newer) than the 10884 * one we currently have. If so we update 10885 * our sending end time window. 10886 * 10887 * Note we *do not* do this at send time. The reason 10888 * is that if you do you *may* pick up a newer timestamp 10889 * for a range you are not going to measure. We project 10890 * out how far and then sometimes modify that to be 10891 * smaller. If that occurs then you will have a send 10892 * that does not belong to the range included. 10893 */ 10894 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10895 rack->r_ctl.rc_gp_cumack_ts) 10896 return; 10897 if (rack_in_gp_window(tp, rsm)) { 10898 rack->r_ctl.rc_gp_cumack_ts = ts; 10899 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10900 __LINE__, from, rsm); 10901 } 10902 } 10903 10904 static void 10905 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10906 { 10907 struct rack_sendmap *rsm; 10908 /* 10909 * The ACK point is advancing to th_ack, we must drop off 10910 * the packets in the rack log and calculate any eligble 10911 * RTT's. 10912 */ 10913 10914 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 10915 /* 10916 * If we have some sack blocks in the filter 10917 * lets prune them out by calling sfb with no blocks. 10918 */ 10919 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 10920 } 10921 if (SEQ_GT(th_ack, tp->snd_una)) { 10922 /* Clear any app ack remembered settings */ 10923 rack->r_ctl.cleared_app_ack = 0; 10924 } 10925 rack->r_wanted_output = 1; 10926 if (SEQ_GT(th_ack, tp->snd_una)) 10927 rack->r_ctl.last_cumack_advance = acktime; 10928 10929 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10930 if ((rack->rc_last_tlp_acked_set == 1)&& 10931 (rack->rc_last_tlp_past_cumack == 1) && 10932 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10933 /* 10934 * We have reached the point where our last rack 10935 * tlp retransmit sequence is ahead of the cum-ack. 10936 * This can only happen when the cum-ack moves all 10937 * the way around (its been a full 2^^31+1 bytes 10938 * or more since we sent a retransmitted TLP). Lets 10939 * turn off the valid flag since its not really valid. 10940 * 10941 * Note since sack's also turn on this event we have 10942 * a complication, we have to wait to age it out until 10943 * the cum-ack is by the TLP before checking which is 10944 * what the next else clause does. 10945 */ 10946 rack_log_dsack_event(rack, 9, __LINE__, 10947 rack->r_ctl.last_tlp_acked_start, 10948 rack->r_ctl.last_tlp_acked_end); 10949 rack->rc_last_tlp_acked_set = 0; 10950 rack->rc_last_tlp_past_cumack = 0; 10951 } else if ((rack->rc_last_tlp_acked_set == 1) && 10952 (rack->rc_last_tlp_past_cumack == 0) && 10953 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10954 /* 10955 * It is safe to start aging TLP's out. 10956 */ 10957 rack->rc_last_tlp_past_cumack = 1; 10958 } 10959 /* We do the same for the tlp send seq as well */ 10960 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10961 (rack->rc_last_sent_tlp_past_cumack == 1) && 10962 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10963 rack_log_dsack_event(rack, 9, __LINE__, 10964 rack->r_ctl.last_sent_tlp_seq, 10965 (rack->r_ctl.last_sent_tlp_seq + 10966 rack->r_ctl.last_sent_tlp_len)); 10967 rack->rc_last_sent_tlp_seq_valid = 0; 10968 rack->rc_last_sent_tlp_past_cumack = 0; 10969 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10970 (rack->rc_last_sent_tlp_past_cumack == 0) && 10971 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10972 /* 10973 * It is safe to start aging TLP's send. 10974 */ 10975 rack->rc_last_sent_tlp_past_cumack = 1; 10976 } 10977 more: 10978 rsm = tqhash_min(rack->r_ctl.tqh); 10979 if (rsm == NULL) { 10980 if ((th_ack - 1) == tp->iss) { 10981 /* 10982 * For the SYN incoming case we will not 10983 * have called tcp_output for the sending of 10984 * the SYN, so there will be no map. All 10985 * other cases should probably be a panic. 10986 */ 10987 return; 10988 } 10989 if (tp->t_flags & TF_SENTFIN) { 10990 /* if we sent a FIN we often will not have map */ 10991 return; 10992 } 10993 #ifdef INVARIANTS 10994 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 10995 tp, 10996 tp->t_state, th_ack, rack, 10997 tp->snd_una, tp->snd_max); 10998 #endif 10999 return; 11000 } 11001 if (SEQ_LT(th_ack, rsm->r_start)) { 11002 /* Huh map is missing this */ 11003 #ifdef INVARIANTS 11004 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 11005 rsm->r_start, 11006 th_ack, tp->t_state, rack->r_state); 11007 #endif 11008 return; 11009 } 11010 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 11011 11012 /* Now was it a retransmitted TLP? */ 11013 if ((rsm->r_flags & RACK_TLP) && 11014 (rsm->r_rtr_cnt > 1)) { 11015 /* 11016 * Yes, this rsm was a TLP and retransmitted, remember that 11017 * since if a DSACK comes back on this we don't want 11018 * to think of it as a reordered segment. This may 11019 * get updated again with possibly even other TLPs 11020 * in flight, but thats ok. Only when we don't send 11021 * a retransmitted TLP for 1/2 the sequences space 11022 * will it get turned off (above). 11023 */ 11024 if (rack->rc_last_tlp_acked_set && 11025 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 11026 /* 11027 * We already turned this on since the end matches, 11028 * the previous one was a partially ack now we 11029 * are getting another one (maybe all of it). 11030 */ 11031 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 11032 /* 11033 * Lets make sure we have all of it though. 11034 */ 11035 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 11036 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 11037 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 11038 rack->r_ctl.last_tlp_acked_end); 11039 } 11040 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 11041 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 11042 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 11043 rack->r_ctl.last_tlp_acked_end); 11044 } 11045 } else { 11046 rack->rc_last_tlp_past_cumack = 1; 11047 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 11048 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 11049 rack->rc_last_tlp_acked_set = 1; 11050 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 11051 } 11052 } 11053 /* Now do we consume the whole thing? */ 11054 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 11055 if (SEQ_GEQ(th_ack, rsm->r_end)) { 11056 /* Its all consumed. */ 11057 uint32_t left; 11058 uint8_t newly_acked; 11059 11060 if (rsm->r_flags & RACK_WAS_LOST) { 11061 /* 11062 * This can happen when we marked it as lost 11063 * and yet before retransmitting we get an ack 11064 * which can happen due to reordering. 11065 */ 11066 rsm->r_flags &= ~RACK_WAS_LOST; 11067 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 11068 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 11069 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 11070 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 11071 else 11072 rack->r_ctl.rc_considered_lost = 0; 11073 } 11074 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 11075 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 11076 rsm->r_rtr_bytes = 0; 11077 /* 11078 * Record the time of highest cumack sent if its in our measurement 11079 * window and possibly bump out the end. 11080 */ 11081 rack_rsm_sender_update(rack, tp, rsm, 4); 11082 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 11083 if (rsm->r_in_tmap) { 11084 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 11085 rsm->r_in_tmap = 0; 11086 } 11087 newly_acked = 1; 11088 if (((rsm->r_flags & RACK_ACKED) == 0) && 11089 (IN_RECOVERY(tp->t_flags))) { 11090 rack->r_ctl.bytes_acked_in_recovery += (rsm->r_end - rsm->r_start); 11091 } 11092 if (rsm->r_flags & RACK_ACKED) { 11093 /* 11094 * It was acked on the scoreboard -- remove 11095 * it from total 11096 */ 11097 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 11098 newly_acked = 0; 11099 } else if (rsm->r_flags & RACK_SACK_PASSED) { 11100 /* 11101 * There are segments ACKED on the 11102 * scoreboard further up. We are seeing 11103 * reordering. 11104 */ 11105 rsm->r_flags &= ~RACK_SACK_PASSED; 11106 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 11107 rsm->r_flags |= RACK_ACKED; 11108 rack->r_ctl.rc_reorder_ts = cts; 11109 if (rack->r_ctl.rc_reorder_ts == 0) 11110 rack->r_ctl.rc_reorder_ts = 1; 11111 if (rack->r_ent_rec_ns) { 11112 /* 11113 * We have sent no more, and we saw an sack 11114 * then ack arrive. 11115 */ 11116 rack->r_might_revert = 1; 11117 } 11118 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 11119 } else { 11120 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 11121 } 11122 if ((rsm->r_flags & RACK_TO_REXT) && 11123 (tp->t_flags & TF_RCVD_TSTMP) && 11124 (to->to_flags & TOF_TS) && 11125 (to->to_tsecr != 0) && 11126 (tp->t_flags & TF_PREVVALID)) { 11127 /* 11128 * We can use the timestamp to see 11129 * if this retransmission was from the 11130 * first transmit. If so we made a mistake. 11131 */ 11132 tp->t_flags &= ~TF_PREVVALID; 11133 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 11134 /* The first transmit is what this ack is for */ 11135 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 11136 } 11137 } 11138 left = th_ack - rsm->r_end; 11139 if (rack->app_limited_needs_set && newly_acked) 11140 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 11141 /* Free back to zone */ 11142 rack_free(rack, rsm); 11143 if (left) { 11144 goto more; 11145 } 11146 /* Check for reneging */ 11147 rsm = tqhash_min(rack->r_ctl.tqh); 11148 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 11149 /* 11150 * The peer has moved snd_una up to 11151 * the edge of this send, i.e. one 11152 * that it had previously acked. The only 11153 * way that can be true if the peer threw 11154 * away data (space issues) that it had 11155 * previously sacked (else it would have 11156 * given us snd_una up to (rsm->r_end). 11157 * We need to undo the acked markings here. 11158 * 11159 * Note we have to look to make sure th_ack is 11160 * our rsm->r_start in case we get an old ack 11161 * where th_ack is behind snd_una. 11162 */ 11163 rack_peer_reneges(rack, rsm, th_ack); 11164 } 11165 return; 11166 } 11167 if (rsm->r_flags & RACK_ACKED) { 11168 /* 11169 * It was acked on the scoreboard -- remove it from 11170 * total for the part being cum-acked. 11171 */ 11172 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 11173 } else { 11174 if (((rsm->r_flags & RACK_ACKED) == 0) && 11175 (IN_RECOVERY(tp->t_flags))) { 11176 rack->r_ctl.bytes_acked_in_recovery += (th_ack - rsm->r_start); 11177 } 11178 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 11179 } 11180 /* And what about the lost flag? */ 11181 if (rsm->r_flags & RACK_WAS_LOST) { 11182 /* 11183 * This can happen when we marked it as lost 11184 * and yet before retransmitting we get an ack 11185 * which can happen due to reordering. In this 11186 * case its only a partial ack of the send. 11187 */ 11188 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), 11189 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); 11190 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) 11191 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; 11192 else 11193 rack->r_ctl.rc_considered_lost = 0; 11194 } 11195 /* 11196 * Clear the dup ack count for 11197 * the piece that remains. 11198 */ 11199 rsm->r_dupack = 0; 11200 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 11201 if (rsm->r_rtr_bytes) { 11202 /* 11203 * It was retransmitted adjust the 11204 * sack holes for what was acked. 11205 */ 11206 int ack_am; 11207 11208 ack_am = (th_ack - rsm->r_start); 11209 if (ack_am >= rsm->r_rtr_bytes) { 11210 rack->r_ctl.rc_holes_rxt -= ack_am; 11211 rsm->r_rtr_bytes -= ack_am; 11212 } 11213 } 11214 /* 11215 * Update where the piece starts and record 11216 * the time of send of highest cumack sent if 11217 * its in our GP range. 11218 */ 11219 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 11220 /* Now we need to move our offset forward too */ 11221 if (rsm->m && 11222 ((rsm->orig_m_len != rsm->m->m_len) || 11223 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 11224 /* Fix up the orig_m_len and possibly the mbuf offset */ 11225 rack_adjust_orig_mlen(rsm); 11226 } 11227 rsm->soff += (th_ack - rsm->r_start); 11228 rack_rsm_sender_update(rack, tp, rsm, 5); 11229 /* The trim will move th_ack into r_start for us */ 11230 tqhash_trim(rack->r_ctl.tqh, th_ack); 11231 /* Now do we need to move the mbuf fwd too? */ 11232 { 11233 struct mbuf *m; 11234 uint32_t soff; 11235 11236 m = rsm->m; 11237 soff = rsm->soff; 11238 if (m) { 11239 while (soff >= m->m_len) { 11240 soff -= m->m_len; 11241 KASSERT((m->m_next != NULL), 11242 (" rsm:%p off:%u soff:%u m:%p", 11243 rsm, rsm->soff, soff, m)); 11244 m = m->m_next; 11245 if (m == NULL) { 11246 /* 11247 * This is a fall-back that prevents a panic. In reality 11248 * we should be able to walk the mbuf's and find our place. 11249 * At this point snd_una has not been updated with the sbcut() yet 11250 * but tqhash_trim did update rsm->r_start so the offset calcuation 11251 * should work fine. This is undesirable since we will take cache 11252 * hits to access the socket buffer. And even more puzzling is that 11253 * it happens occasionally. It should not :( 11254 */ 11255 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 11256 (rsm->r_start - tp->snd_una), 11257 &soff); 11258 break; 11259 } 11260 } 11261 /* 11262 * Now save in our updated values. 11263 */ 11264 rsm->m = m; 11265 rsm->soff = soff; 11266 rsm->orig_m_len = rsm->m->m_len; 11267 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11268 } 11269 } 11270 if (rack->app_limited_needs_set && 11271 SEQ_GEQ(th_ack, tp->gput_seq)) 11272 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 11273 } 11274 11275 static void 11276 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 11277 { 11278 struct rack_sendmap *rsm; 11279 int sack_pass_fnd = 0; 11280 11281 if (rack->r_might_revert) { 11282 /* 11283 * Ok we have reordering, have not sent anything, we 11284 * might want to revert the congestion state if nothing 11285 * further has SACK_PASSED on it. Lets check. 11286 * 11287 * We also get here when we have DSACKs come in for 11288 * all the data that we FR'd. Note that a rxt or tlp 11289 * timer clears this from happening. 11290 */ 11291 11292 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 11293 if (rsm->r_flags & RACK_SACK_PASSED) { 11294 sack_pass_fnd = 1; 11295 break; 11296 } 11297 } 11298 if (sack_pass_fnd == 0) { 11299 /* 11300 * We went into recovery 11301 * incorrectly due to reordering! 11302 */ 11303 int orig_cwnd; 11304 11305 rack->r_ent_rec_ns = 0; 11306 orig_cwnd = tp->snd_cwnd; 11307 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 11308 tp->snd_recover = tp->snd_una; 11309 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 11310 if (IN_RECOVERY(tp->t_flags)) { 11311 rack_exit_recovery(tp, rack, 3); 11312 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 11313 /* 11314 * We were in recovery, had an RTO 11315 * and then re-entered recovery (more sack's arrived) 11316 * and we have properly recorded the old ssthresh from 11317 * the first recovery. We want to be able to slow-start 11318 * back to this level. The ssthresh from the timeout 11319 * and then back into recovery will end up most likely 11320 * to be min(cwnd=1mss, 2mss). Which makes it basically 11321 * so we get no slow-start after our RTO. 11322 */ 11323 rack->rto_from_rec = 0; 11324 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 11325 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 11326 } 11327 } 11328 rack->r_ctl.bytes_acked_in_recovery = 0; 11329 rack->r_ctl.time_entered_recovery = 0; 11330 } 11331 rack->r_might_revert = 0; 11332 } 11333 } 11334 11335 11336 static int 11337 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 11338 { 11339 11340 uint32_t am, l_end; 11341 int was_tlp = 0; 11342 11343 if (SEQ_GT(end, start)) 11344 am = end - start; 11345 else 11346 am = 0; 11347 if ((rack->rc_last_tlp_acked_set ) && 11348 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 11349 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 11350 /* 11351 * The DSACK is because of a TLP which we don't 11352 * do anything with the reordering window over since 11353 * it was not reordering that caused the DSACK but 11354 * our previous retransmit TLP. 11355 */ 11356 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11357 was_tlp = 1; 11358 goto skip_dsack_round; 11359 } 11360 if (rack->rc_last_sent_tlp_seq_valid) { 11361 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 11362 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 11363 (SEQ_LEQ(end, l_end))) { 11364 /* 11365 * This dsack is from the last sent TLP, ignore it 11366 * for reordering purposes. 11367 */ 11368 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11369 was_tlp = 1; 11370 goto skip_dsack_round; 11371 } 11372 } 11373 if (rack->rc_dsack_round_seen == 0) { 11374 rack->rc_dsack_round_seen = 1; 11375 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 11376 rack->r_ctl.num_dsack++; 11377 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 11378 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 11379 } 11380 skip_dsack_round: 11381 /* 11382 * We keep track of how many DSACK blocks we get 11383 * after a recovery incident. 11384 */ 11385 rack->r_ctl.dsack_byte_cnt += am; 11386 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 11387 rack->r_ctl.retran_during_recovery && 11388 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 11389 /* 11390 * False recovery most likely culprit is reordering. If 11391 * nothing else is missing we need to revert. 11392 */ 11393 rack->r_might_revert = 1; 11394 rack_handle_might_revert(rack->rc_tp, rack); 11395 rack->r_might_revert = 0; 11396 rack->r_ctl.retran_during_recovery = 0; 11397 rack->r_ctl.dsack_byte_cnt = 0; 11398 } 11399 return (was_tlp); 11400 } 11401 11402 static uint32_t 11403 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 11404 { 11405 return (((tp->snd_max - snd_una) - 11406 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 11407 } 11408 11409 static int32_t 11410 rack_compute_pipe(struct tcpcb *tp) 11411 { 11412 return ((int32_t)do_rack_compute_pipe(tp, 11413 (struct tcp_rack *)tp->t_fb_ptr, 11414 tp->snd_una)); 11415 } 11416 11417 static void 11418 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 11419 { 11420 /* Deal with changed and PRR here (in recovery only) */ 11421 uint32_t pipe, snd_una; 11422 11423 rack->r_ctl.rc_prr_delivered += changed; 11424 11425 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 11426 /* 11427 * It is all outstanding, we are application limited 11428 * and thus we don't need more room to send anything. 11429 * Note we use tp->snd_una here and not th_ack because 11430 * the data as yet not been cut from the sb. 11431 */ 11432 rack->r_ctl.rc_prr_sndcnt = 0; 11433 return; 11434 } 11435 /* Compute prr_sndcnt */ 11436 if (SEQ_GT(tp->snd_una, th_ack)) { 11437 snd_una = tp->snd_una; 11438 } else { 11439 snd_una = th_ack; 11440 } 11441 pipe = do_rack_compute_pipe(tp, rack, snd_una); 11442 if (pipe > tp->snd_ssthresh) { 11443 long sndcnt; 11444 11445 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 11446 if (rack->r_ctl.rc_prr_recovery_fs > 0) 11447 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 11448 else { 11449 rack->r_ctl.rc_prr_sndcnt = 0; 11450 rack_log_to_prr(rack, 9, 0, __LINE__); 11451 sndcnt = 0; 11452 } 11453 sndcnt++; 11454 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 11455 sndcnt -= rack->r_ctl.rc_prr_out; 11456 else 11457 sndcnt = 0; 11458 rack->r_ctl.rc_prr_sndcnt = sndcnt; 11459 rack_log_to_prr(rack, 10, 0, __LINE__); 11460 } else { 11461 uint32_t limit; 11462 11463 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 11464 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 11465 else 11466 limit = 0; 11467 if (changed > limit) 11468 limit = changed; 11469 limit += ctf_fixed_maxseg(tp); 11470 if (tp->snd_ssthresh > pipe) { 11471 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 11472 rack_log_to_prr(rack, 11, 0, __LINE__); 11473 } else { 11474 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 11475 rack_log_to_prr(rack, 12, 0, __LINE__); 11476 } 11477 } 11478 } 11479 11480 static void 11481 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 11482 int *dsack_seen, int *sacks_seen) 11483 { 11484 uint32_t changed; 11485 struct tcp_rack *rack; 11486 struct rack_sendmap *rsm; 11487 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 11488 register uint32_t th_ack; 11489 int32_t i, j, k, num_sack_blks = 0; 11490 uint32_t cts, acked, ack_point; 11491 int loop_start = 0; 11492 uint32_t tsused; 11493 uint32_t segsiz; 11494 11495 11496 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11497 if (tcp_get_flags(th) & TH_RST) { 11498 /* We don't log resets */ 11499 return; 11500 } 11501 rack = (struct tcp_rack *)tp->t_fb_ptr; 11502 cts = tcp_get_usecs(NULL); 11503 rsm = tqhash_min(rack->r_ctl.tqh); 11504 changed = 0; 11505 th_ack = th->th_ack; 11506 segsiz = ctf_fixed_maxseg(rack->rc_tp); 11507 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 11508 /* 11509 * You only get credit for 11510 * MSS and greater (and you get extra 11511 * credit for larger cum-ack moves). 11512 */ 11513 int ac; 11514 11515 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 11516 counter_u64_add(rack_ack_total, ac); 11517 } 11518 if (SEQ_GT(th_ack, tp->snd_una)) { 11519 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 11520 tp->t_acktime = ticks; 11521 } 11522 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 11523 changed = th_ack - rsm->r_start; 11524 if (changed) { 11525 rack_process_to_cumack(tp, rack, th_ack, cts, to, 11526 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 11527 } 11528 if ((to->to_flags & TOF_SACK) == 0) { 11529 /* We are done nothing left and no sack. */ 11530 rack_handle_might_revert(tp, rack); 11531 /* 11532 * For cases where we struck a dup-ack 11533 * with no SACK, add to the changes so 11534 * PRR will work right. 11535 */ 11536 if (dup_ack_struck && (changed == 0)) { 11537 changed += ctf_fixed_maxseg(rack->rc_tp); 11538 } 11539 goto out; 11540 } 11541 /* Sack block processing */ 11542 if (SEQ_GT(th_ack, tp->snd_una)) 11543 ack_point = th_ack; 11544 else 11545 ack_point = tp->snd_una; 11546 for (i = 0; i < to->to_nsacks; i++) { 11547 bcopy((to->to_sacks + i * TCPOLEN_SACK), 11548 &sack, sizeof(sack)); 11549 sack.start = ntohl(sack.start); 11550 sack.end = ntohl(sack.end); 11551 if (SEQ_GT(sack.end, sack.start) && 11552 SEQ_GT(sack.start, ack_point) && 11553 SEQ_LT(sack.start, tp->snd_max) && 11554 SEQ_GT(sack.end, ack_point) && 11555 SEQ_LEQ(sack.end, tp->snd_max)) { 11556 sack_blocks[num_sack_blks] = sack; 11557 num_sack_blks++; 11558 } else if (SEQ_LEQ(sack.start, th_ack) && 11559 SEQ_LEQ(sack.end, th_ack)) { 11560 int was_tlp; 11561 11562 if (dsack_seen != NULL) 11563 *dsack_seen = 1; 11564 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 11565 /* 11566 * Its a D-SACK block. 11567 */ 11568 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 11569 } 11570 } 11571 if (rack->rc_dsack_round_seen) { 11572 /* Is the dsack roound over? */ 11573 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 11574 /* Yes it is */ 11575 rack->rc_dsack_round_seen = 0; 11576 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 11577 } 11578 } 11579 /* 11580 * Sort the SACK blocks so we can update the rack scoreboard with 11581 * just one pass. 11582 */ 11583 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 11584 num_sack_blks, th->th_ack); 11585 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 11586 if (sacks_seen != NULL) 11587 *sacks_seen = num_sack_blks; 11588 if (num_sack_blks == 0) { 11589 /* Nothing to sack, but we need to update counts */ 11590 goto out_with_totals; 11591 } 11592 /* Its a sack of some sort */ 11593 if (num_sack_blks < 2) { 11594 /* Only one, we don't need to sort */ 11595 goto do_sack_work; 11596 } 11597 /* Sort the sacks */ 11598 for (i = 0; i < num_sack_blks; i++) { 11599 for (j = i + 1; j < num_sack_blks; j++) { 11600 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 11601 sack = sack_blocks[i]; 11602 sack_blocks[i] = sack_blocks[j]; 11603 sack_blocks[j] = sack; 11604 } 11605 } 11606 } 11607 /* 11608 * Now are any of the sack block ends the same (yes some 11609 * implementations send these)? 11610 */ 11611 again: 11612 if (num_sack_blks == 0) 11613 goto out_with_totals; 11614 if (num_sack_blks > 1) { 11615 for (i = 0; i < num_sack_blks; i++) { 11616 for (j = i + 1; j < num_sack_blks; j++) { 11617 if (sack_blocks[i].end == sack_blocks[j].end) { 11618 /* 11619 * Ok these two have the same end we 11620 * want the smallest end and then 11621 * throw away the larger and start 11622 * again. 11623 */ 11624 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 11625 /* 11626 * The second block covers 11627 * more area use that 11628 */ 11629 sack_blocks[i].start = sack_blocks[j].start; 11630 } 11631 /* 11632 * Now collapse out the dup-sack and 11633 * lower the count 11634 */ 11635 for (k = (j + 1); k < num_sack_blks; k++) { 11636 sack_blocks[j].start = sack_blocks[k].start; 11637 sack_blocks[j].end = sack_blocks[k].end; 11638 j++; 11639 } 11640 num_sack_blks--; 11641 goto again; 11642 } 11643 } 11644 } 11645 } 11646 do_sack_work: 11647 /* 11648 * First lets look to see if 11649 * we have retransmitted and 11650 * can use the transmit next? 11651 */ 11652 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11653 if (rsm && 11654 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 11655 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 11656 /* 11657 * We probably did the FR and the next 11658 * SACK in continues as we would expect. 11659 */ 11660 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); 11661 if (acked) { 11662 rack->r_wanted_output = 1; 11663 changed += acked; 11664 } 11665 if (num_sack_blks == 1) { 11666 /* 11667 * This is what we would expect from 11668 * a normal implementation to happen 11669 * after we have retransmitted the FR, 11670 * i.e the sack-filter pushes down 11671 * to 1 block and the next to be retransmitted 11672 * is the sequence in the sack block (has more 11673 * are acked). Count this as ACK'd data to boost 11674 * up the chances of recovering any false positives. 11675 */ 11676 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 11677 counter_u64_add(rack_express_sack, 1); 11678 goto out_with_totals; 11679 } else { 11680 /* 11681 * Start the loop through the 11682 * rest of blocks, past the first block. 11683 */ 11684 loop_start = 1; 11685 } 11686 } 11687 counter_u64_add(rack_sack_total, 1); 11688 rsm = rack->r_ctl.rc_sacklast; 11689 for (i = loop_start; i < num_sack_blks; i++) { 11690 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); 11691 if (acked) { 11692 rack->r_wanted_output = 1; 11693 changed += acked; 11694 } 11695 } 11696 out_with_totals: 11697 if (num_sack_blks > 1) { 11698 /* 11699 * You get an extra stroke if 11700 * you have more than one sack-blk, this 11701 * could be where we are skipping forward 11702 * and the sack-filter is still working, or 11703 * it could be an attacker constantly 11704 * moving us. 11705 */ 11706 counter_u64_add(rack_move_some, 1); 11707 } 11708 out: 11709 if (changed) { 11710 /* Something changed cancel the rack timer */ 11711 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11712 } 11713 tsused = tcp_get_usecs(NULL); 11714 rsm = tcp_rack_output(tp, rack, tsused); 11715 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11716 rsm && 11717 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11718 /* Enter recovery */ 11719 entered_recovery = 1; 11720 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 11721 /* 11722 * When we enter recovery we need to assure we send 11723 * one packet. 11724 */ 11725 if (rack->rack_no_prr == 0) { 11726 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11727 rack_log_to_prr(rack, 8, 0, __LINE__); 11728 } 11729 rack->r_timer_override = 1; 11730 rack->r_early = 0; 11731 rack->r_ctl.rc_agg_early = 0; 11732 } else if (IN_FASTRECOVERY(tp->t_flags) && 11733 rsm && 11734 (rack->r_rr_config == 3)) { 11735 /* 11736 * Assure we can output and we get no 11737 * remembered pace time except the retransmit. 11738 */ 11739 rack->r_timer_override = 1; 11740 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11741 rack->r_ctl.rc_resend = rsm; 11742 } 11743 if (IN_FASTRECOVERY(tp->t_flags) && 11744 (rack->rack_no_prr == 0) && 11745 (entered_recovery == 0)) { 11746 rack_update_prr(tp, rack, changed, th_ack); 11747 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11748 ((tcp_in_hpts(rack->rc_tp) == 0) && 11749 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11750 /* 11751 * If you are pacing output you don't want 11752 * to override. 11753 */ 11754 rack->r_early = 0; 11755 rack->r_ctl.rc_agg_early = 0; 11756 rack->r_timer_override = 1; 11757 } 11758 } 11759 } 11760 11761 static void 11762 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 11763 { 11764 struct rack_sendmap *rsm; 11765 11766 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11767 while (rsm) { 11768 /* 11769 * We need to skip anything already set 11770 * to be retransmitted. 11771 */ 11772 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11773 (rsm->r_flags & RACK_MUST_RXT)) { 11774 rsm = TAILQ_NEXT(rsm, r_tnext); 11775 continue; 11776 } 11777 break; 11778 } 11779 if (rsm && (rsm->r_dupack < 0xff)) { 11780 rsm->r_dupack++; 11781 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11782 struct timeval tv; 11783 uint32_t cts; 11784 /* 11785 * Here we see if we need to retransmit. For 11786 * a SACK type connection if enough time has passed 11787 * we will get a return of the rsm. For a non-sack 11788 * connection we will get the rsm returned if the 11789 * dupack value is 3 or more. 11790 */ 11791 cts = tcp_get_usecs(&tv); 11792 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11793 if (rack->r_ctl.rc_resend != NULL) { 11794 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11795 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11796 th_ack, __LINE__); 11797 } 11798 rack->r_wanted_output = 1; 11799 rack->r_timer_override = 1; 11800 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11801 } 11802 } else { 11803 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11804 } 11805 } 11806 } 11807 11808 static void 11809 rack_check_bottom_drag(struct tcpcb *tp, 11810 struct tcp_rack *rack, 11811 struct socket *so) 11812 { 11813 /* 11814 * So what is dragging bottom? 11815 * 11816 * Dragging bottom means you were under pacing and had a 11817 * delay in processing inbound acks waiting on our pacing 11818 * timer to expire. While you were waiting all of the acknowledgments 11819 * for the packets you sent have arrived. This means we are pacing 11820 * way underneath the bottleneck to the point where our Goodput 11821 * measurements stop working, since they require more than one 11822 * ack (usually at least 8 packets worth with multiple acks so we can 11823 * gauge the inter-ack times). If that occurs we have a real problem 11824 * since we are stuck in a hole that we can't get out of without 11825 * something speeding us up. 11826 * 11827 * We also check to see if we are widdling down to just one segment 11828 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 11829 * then we are adding the delayed ack interval into our measurments and 11830 * we need to speed up slightly. 11831 */ 11832 uint32_t segsiz, minseg; 11833 11834 segsiz = ctf_fixed_maxseg(tp); 11835 minseg = segsiz; 11836 if (tp->snd_max == tp->snd_una) { 11837 /* 11838 * We are doing dynamic pacing and we are way 11839 * under. Basically everything got acked while 11840 * we were still waiting on the pacer to expire. 11841 * 11842 * This means we need to boost the b/w in 11843 * addition to any earlier boosting of 11844 * the multiplier. 11845 */ 11846 uint64_t lt_bw; 11847 11848 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 11849 lt_bw = rack_get_lt_bw(rack); 11850 rack->rc_dragged_bottom = 1; 11851 rack_validate_multipliers_at_or_above100(rack); 11852 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11853 (rack->dis_lt_bw == 0) && 11854 (rack->use_lesser_lt_bw == 0) && 11855 (lt_bw > 0)) { 11856 /* 11857 * Lets use the long-term b/w we have 11858 * been getting as a base. 11859 */ 11860 if (rack->rc_gp_filled == 0) { 11861 if (lt_bw > ONE_POINT_TWO_MEG) { 11862 /* 11863 * If we have no measurement 11864 * don't let us set in more than 11865 * 1.2Mbps. If we are still too 11866 * low after pacing with this we 11867 * will hopefully have a max b/w 11868 * available to sanity check things. 11869 */ 11870 lt_bw = ONE_POINT_TWO_MEG; 11871 } 11872 rack->r_ctl.rc_rtt_diff = 0; 11873 rack->r_ctl.gp_bw = lt_bw; 11874 rack->rc_gp_filled = 1; 11875 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11876 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11877 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11878 } else if (lt_bw > rack->r_ctl.gp_bw) { 11879 rack->r_ctl.rc_rtt_diff = 0; 11880 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11881 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11882 rack->r_ctl.gp_bw = lt_bw; 11883 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11884 } else 11885 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11886 if ((rack->gp_ready == 0) && 11887 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11888 /* We have enough measurements now */ 11889 rack->gp_ready = 1; 11890 if (rack->dgp_on || 11891 rack->rack_hibeta) 11892 rack_set_cc_pacing(rack); 11893 if (rack->defer_options) 11894 rack_apply_deferred_options(rack); 11895 } 11896 } else { 11897 /* 11898 * zero rtt possibly?, settle for just an old increase. 11899 */ 11900 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11901 } 11902 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11903 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11904 minseg)) && 11905 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11906 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11907 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11908 (segsiz * rack_req_segs))) { 11909 /* 11910 * We are doing dynamic GP pacing and 11911 * we have everything except 1MSS or less 11912 * bytes left out. We are still pacing away. 11913 * And there is data that could be sent, This 11914 * means we are inserting delayed ack time in 11915 * our measurements because we are pacing too slow. 11916 */ 11917 rack_validate_multipliers_at_or_above100(rack); 11918 rack->rc_dragged_bottom = 1; 11919 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11920 } 11921 } 11922 11923 #ifdef TCP_REQUEST_TRK 11924 static void 11925 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11926 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11927 { 11928 int do_log; 11929 11930 do_log = tcp_bblogging_on(rack->rc_tp); 11931 if (do_log == 0) { 11932 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11933 return; 11934 /* We only allow the three below with point logging on */ 11935 if ((mod != HYBRID_LOG_RULES_APP) && 11936 (mod != HYBRID_LOG_RULES_SET) && 11937 (mod != HYBRID_LOG_REQ_COMP)) 11938 return; 11939 11940 } 11941 if (do_log) { 11942 union tcp_log_stackspecific log; 11943 struct timeval tv; 11944 11945 /* Convert our ms to a microsecond */ 11946 memset(&log, 0, sizeof(log)); 11947 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11948 log.u_bbr.flex1 = seq; 11949 log.u_bbr.cwnd_gain = line; 11950 if (cur != NULL) { 11951 uint64_t off; 11952 11953 log.u_bbr.flex2 = cur->start_seq; 11954 log.u_bbr.flex3 = cur->end_seq; 11955 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11956 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11957 log.u_bbr.flex6 = cur->flags; 11958 log.u_bbr.pkts_out = cur->hybrid_flags; 11959 log.u_bbr.rttProp = cur->timestamp; 11960 log.u_bbr.cur_del_rate = cur->cspr; 11961 log.u_bbr.bw_inuse = cur->start; 11962 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11963 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11964 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11965 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11966 log.u_bbr.inhpts = 1; 11967 #ifdef TCP_REQUEST_TRK 11968 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11969 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11970 #endif 11971 } else { 11972 log.u_bbr.flex2 = err; 11973 } 11974 /* 11975 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11976 */ 11977 log.u_bbr.flex7 = rack->rc_catch_up; 11978 log.u_bbr.flex7 <<= 1; 11979 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11980 log.u_bbr.flex7 <<= 1; 11981 log.u_bbr.flex7 |= rack->dgp_on; 11982 /* 11983 * Compose bbr_state to be a bit wise 0000ADHF 11984 * where A is the always_pace flag 11985 * where D is the dgp_on flag 11986 * where H is the hybrid_mode on flag 11987 * where F is the use_fixed_rate flag. 11988 */ 11989 log.u_bbr.bbr_state = rack->rc_always_pace; 11990 log.u_bbr.bbr_state <<= 1; 11991 log.u_bbr.bbr_state |= rack->dgp_on; 11992 log.u_bbr.bbr_state <<= 1; 11993 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 11994 log.u_bbr.bbr_state <<= 1; 11995 log.u_bbr.bbr_state |= rack->use_fixed_rate; 11996 log.u_bbr.flex8 = mod; 11997 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11998 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11999 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12000 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 12001 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 12002 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 12003 tcp_log_event(rack->rc_tp, NULL, 12004 &rack->rc_inp->inp_socket->so_rcv, 12005 &rack->rc_inp->inp_socket->so_snd, 12006 TCP_HYBRID_PACING_LOG, 0, 12007 0, &log, false, NULL, __func__, __LINE__, &tv); 12008 } 12009 } 12010 #endif 12011 12012 #ifdef TCP_REQUEST_TRK 12013 static void 12014 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 12015 { 12016 struct tcp_sendfile_track *rc_cur, *orig_ent; 12017 struct tcpcb *tp; 12018 int err = 0; 12019 12020 orig_ent = rack->r_ctl.rc_last_sft; 12021 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 12022 if (rc_cur == NULL) { 12023 /* If not in the beginning what about the end piece */ 12024 if (rack->rc_hybrid_mode) 12025 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 12026 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 12027 } else { 12028 err = 12345; 12029 } 12030 /* If we find no parameters we are in straight DGP mode */ 12031 if(rc_cur == NULL) { 12032 /* None found for this seq, just DGP for now */ 12033 if (rack->rc_hybrid_mode) { 12034 rack->r_ctl.client_suggested_maxseg = 0; 12035 rack->rc_catch_up = 0; 12036 if (rack->cspr_is_fcc == 0) 12037 rack->r_ctl.bw_rate_cap = 0; 12038 else 12039 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12040 } 12041 if (rack->rc_hybrid_mode) { 12042 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 12043 } 12044 if (rack->r_ctl.rc_last_sft) { 12045 rack->r_ctl.rc_last_sft = NULL; 12046 } 12047 return; 12048 } 12049 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 12050 /* This entry was never setup for hybrid pacing on/off etc */ 12051 if (rack->rc_hybrid_mode) { 12052 rack->r_ctl.client_suggested_maxseg = 0; 12053 rack->rc_catch_up = 0; 12054 rack->r_ctl.bw_rate_cap = 0; 12055 } 12056 if (rack->r_ctl.rc_last_sft) { 12057 rack->r_ctl.rc_last_sft = NULL; 12058 } 12059 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 12060 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 12061 rc_cur->first_send = cts; 12062 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 12063 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 12064 } 12065 return; 12066 } 12067 /* 12068 * Ok if we have a new entry *or* have never 12069 * set up an entry we need to proceed. If 12070 * we have already set it up this entry we 12071 * just continue along with what we already 12072 * setup. 12073 */ 12074 tp = rack->rc_tp; 12075 if ((rack->r_ctl.rc_last_sft != NULL) && 12076 (rack->r_ctl.rc_last_sft == rc_cur)) { 12077 /* Its already in place */ 12078 if (rack->rc_hybrid_mode) 12079 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 12080 return; 12081 } 12082 if (rack->rc_hybrid_mode == 0) { 12083 rack->r_ctl.rc_last_sft = rc_cur; 12084 if (orig_ent) { 12085 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 12086 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 12087 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 12088 } 12089 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 12090 return; 12091 } 12092 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 12093 /* Compensate for all the header overhead's */ 12094 if (rack->cspr_is_fcc == 0) 12095 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 12096 else 12097 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 12098 } else { 12099 if (rack->rc_hybrid_mode) { 12100 if (rack->cspr_is_fcc == 0) 12101 rack->r_ctl.bw_rate_cap = 0; 12102 else 12103 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12104 } 12105 } 12106 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 12107 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 12108 else 12109 rack->r_ctl.client_suggested_maxseg = 0; 12110 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 12111 /* 12112 * It is the same timestamp as the previous one 12113 * add the hybrid flag that will indicate we use 12114 * sendtime not arrival time for catch-up mode. 12115 */ 12116 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 12117 } 12118 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 12119 (rc_cur->cspr > 0)) { 12120 uint64_t len; 12121 12122 rack->rc_catch_up = 1; 12123 /* 12124 * Calculate the deadline time, first set the 12125 * time to when the request arrived. 12126 */ 12127 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 12128 /* 12129 * For cases where its a duplicate tm (we received more 12130 * than one request for a tm) we want to use now, the point 12131 * where we are just sending the first bit of the request. 12132 */ 12133 rc_cur->deadline = cts; 12134 } else { 12135 /* 12136 * Here we have a different tm from the last request 12137 * so we want to use arrival time as our base. 12138 */ 12139 rc_cur->deadline = rc_cur->localtime; 12140 } 12141 /* 12142 * Next calculate the length and compensate for 12143 * TLS if need be. 12144 */ 12145 len = rc_cur->end - rc_cur->start; 12146 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 12147 /* 12148 * This session is doing TLS. Take a swag guess 12149 * at the overhead. 12150 */ 12151 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 12152 } 12153 /* 12154 * Now considering the size, and the cspr, what is the time that 12155 * would be required at the cspr rate. Here we use the raw 12156 * cspr value since the client only looks at the raw data. We 12157 * do use len which includes TLS overhead, but not the TCP/IP etc. 12158 * That will get made up for in the CU pacing rate set. 12159 */ 12160 len *= HPTS_USEC_IN_SEC; 12161 len /= rc_cur->cspr; 12162 rc_cur->deadline += len; 12163 } else { 12164 rack->rc_catch_up = 0; 12165 rc_cur->deadline = 0; 12166 } 12167 if (rack->r_ctl.client_suggested_maxseg != 0) { 12168 /* 12169 * We need to reset the max pace segs if we have a 12170 * client_suggested_maxseg. 12171 */ 12172 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12173 } 12174 if (orig_ent) { 12175 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 12176 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 12177 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 12178 } 12179 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 12180 /* Remember it for next time and for CU mode */ 12181 rack->r_ctl.rc_last_sft = rc_cur; 12182 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 12183 } 12184 #endif 12185 12186 static void 12187 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 12188 { 12189 #ifdef TCP_REQUEST_TRK 12190 struct tcp_sendfile_track *ent; 12191 12192 ent = rack->r_ctl.rc_last_sft; 12193 if ((ent == NULL) || 12194 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 12195 (SEQ_GEQ(seq, ent->end_seq))) { 12196 /* Time to update the track. */ 12197 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 12198 ent = rack->r_ctl.rc_last_sft; 12199 } 12200 /* Out of all */ 12201 if (ent == NULL) { 12202 return; 12203 } 12204 if (SEQ_LT(ent->end_seq, (seq + len))) { 12205 /* 12206 * This is the case where our end_seq guess 12207 * was wrong. This is usually due to TLS having 12208 * more bytes then our guess. It could also be the 12209 * case that the client sent in two requests closely 12210 * and the SB is full of both so we are sending part 12211 * of each (end|beg). In such a case lets move this 12212 * guys end to match the end of this send. That 12213 * way it will complete when all of it is acked. 12214 */ 12215 ent->end_seq = (seq + len); 12216 if (rack->rc_hybrid_mode) 12217 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 12218 } 12219 /* Now validate we have set the send time of this one */ 12220 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 12221 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 12222 ent->first_send = cts; 12223 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 12224 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 12225 } 12226 #endif 12227 } 12228 12229 static void 12230 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 12231 { 12232 /* 12233 * The fast output path is enabled and we 12234 * have moved the cumack forward. Lets see if 12235 * we can expand forward the fast path length by 12236 * that amount. What we would ideally like to 12237 * do is increase the number of bytes in the 12238 * fast path block (left_to_send) by the 12239 * acked amount. However we have to gate that 12240 * by two factors: 12241 * 1) The amount outstanding and the rwnd of the peer 12242 * (i.e. we don't want to exceed the rwnd of the peer). 12243 * <and> 12244 * 2) The amount of data left in the socket buffer (i.e. 12245 * we can't send beyond what is in the buffer). 12246 * 12247 * Note that this does not take into account any increase 12248 * in the cwnd. We will only extend the fast path by 12249 * what was acked. 12250 */ 12251 uint32_t new_total, gating_val; 12252 12253 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 12254 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 12255 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 12256 if (new_total <= gating_val) { 12257 /* We can increase left_to_send by the acked amount */ 12258 counter_u64_add(rack_extended_rfo, 1); 12259 rack->r_ctl.fsb.left_to_send = new_total; 12260 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 12261 ("rack:%p left_to_send:%u sbavail:%u out:%u", 12262 rack, rack->r_ctl.fsb.left_to_send, 12263 sbavail(&rack->rc_inp->inp_socket->so_snd), 12264 (tp->snd_max - tp->snd_una))); 12265 12266 } 12267 } 12268 12269 static void 12270 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 12271 { 12272 /* 12273 * Here any sendmap entry that points to the 12274 * beginning mbuf must be adjusted to the correct 12275 * offset. This must be called with: 12276 * 1) The socket buffer locked 12277 * 2) snd_una adjusted to its new position. 12278 * 12279 * Note that (2) implies rack_ack_received has also 12280 * been called and all the sbcut's have been done. 12281 * 12282 * We grab the first mbuf in the socket buffer and 12283 * then go through the front of the sendmap, recalculating 12284 * the stored offset for any sendmap entry that has 12285 * that mbuf. We must use the sb functions to do this 12286 * since its possible an add was done has well as 12287 * the subtraction we may have just completed. This should 12288 * not be a penalty though, since we just referenced the sb 12289 * to go in and trim off the mbufs that we freed (of course 12290 * there will be a penalty for the sendmap references though). 12291 * 12292 * Note also with INVARIANT on, we validate with a KASSERT 12293 * that the first sendmap entry has a soff of 0. 12294 * 12295 */ 12296 struct mbuf *m; 12297 struct rack_sendmap *rsm; 12298 tcp_seq snd_una; 12299 #ifdef INVARIANTS 12300 int first_processed = 0; 12301 #endif 12302 12303 snd_una = rack->rc_tp->snd_una; 12304 SOCKBUF_LOCK_ASSERT(sb); 12305 m = sb->sb_mb; 12306 rsm = tqhash_min(rack->r_ctl.tqh); 12307 if ((rsm == NULL) || (m == NULL)) { 12308 /* Nothing outstanding */ 12309 return; 12310 } 12311 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 12312 KASSERT((rsm->m == m), 12313 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 12314 rack, sb, rsm)); 12315 while (rsm->m && (rsm->m == m)) { 12316 /* one to adjust */ 12317 #ifdef INVARIANTS 12318 struct mbuf *tm; 12319 uint32_t soff; 12320 12321 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 12322 if ((rsm->orig_m_len != m->m_len) || 12323 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 12324 rack_adjust_orig_mlen(rsm); 12325 } 12326 if (first_processed == 0) { 12327 KASSERT((rsm->soff == 0), 12328 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 12329 rack, rsm)); 12330 first_processed = 1; 12331 } 12332 if ((rsm->soff != soff) || (rsm->m != tm)) { 12333 /* 12334 * This is not a fatal error, we anticipate it 12335 * might happen (the else code), so we count it here 12336 * so that under invariant we can see that it really 12337 * does happen. 12338 */ 12339 counter_u64_add(rack_adjust_map_bw, 1); 12340 } 12341 rsm->m = tm; 12342 rsm->soff = soff; 12343 if (tm) { 12344 rsm->orig_m_len = rsm->m->m_len; 12345 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12346 } else { 12347 rsm->orig_m_len = 0; 12348 rsm->orig_t_space = 0; 12349 } 12350 #else 12351 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 12352 if (rsm->m) { 12353 rsm->orig_m_len = rsm->m->m_len; 12354 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12355 } else { 12356 rsm->orig_m_len = 0; 12357 rsm->orig_t_space = 0; 12358 } 12359 #endif 12360 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 12361 if (rsm == NULL) 12362 break; 12363 } 12364 } 12365 12366 #ifdef TCP_REQUEST_TRK 12367 static inline void 12368 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 12369 { 12370 struct tcp_sendfile_track *ent; 12371 int i; 12372 12373 if ((rack->rc_hybrid_mode == 0) && 12374 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 12375 /* 12376 * Just do normal completions hybrid pacing is not on 12377 * and CLDL is off as well. 12378 */ 12379 tcp_req_check_for_comp(rack->rc_tp, th_ack); 12380 return; 12381 } 12382 /* 12383 * Originally I was just going to find the th_ack associated 12384 * with an entry. But then I realized a large strech ack could 12385 * in theory ack two or more requests at once. So instead we 12386 * need to find all entries that are completed by th_ack not 12387 * just a single entry and do our logging. 12388 */ 12389 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12390 while (ent != NULL) { 12391 /* 12392 * We may be doing hybrid pacing or CLDL and need more details possibly 12393 * so we do it manually instead of calling 12394 * tcp_req_check_for_comp() 12395 */ 12396 uint64_t laa, tim, data, cbw, ftim; 12397 12398 /* Ok this ack frees it */ 12399 rack_log_hybrid(rack, th_ack, 12400 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 12401 rack_log_hybrid_sends(rack, ent, __LINE__); 12402 /* calculate the time based on the ack arrival */ 12403 data = ent->end - ent->start; 12404 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 12405 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 12406 if (ent->first_send > ent->localtime) 12407 ftim = ent->first_send; 12408 else 12409 ftim = ent->localtime; 12410 } else { 12411 /* TSNH */ 12412 ftim = ent->localtime; 12413 } 12414 if (laa > ent->localtime) 12415 tim = laa - ftim; 12416 else 12417 tim = 0; 12418 cbw = data * HPTS_USEC_IN_SEC; 12419 if (tim > 0) 12420 cbw /= tim; 12421 else 12422 cbw = 0; 12423 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 12424 /* 12425 * Check to see if we are freeing what we are pointing to send wise 12426 * if so be sure to NULL the pointer so we know we are no longer 12427 * set to anything. 12428 */ 12429 if (ent == rack->r_ctl.rc_last_sft) { 12430 rack->r_ctl.rc_last_sft = NULL; 12431 if (rack->rc_hybrid_mode) { 12432 rack->rc_catch_up = 0; 12433 if (rack->cspr_is_fcc == 0) 12434 rack->r_ctl.bw_rate_cap = 0; 12435 else 12436 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12437 rack->r_ctl.client_suggested_maxseg = 0; 12438 } 12439 } 12440 /* Generate the log that the tcp_netflix call would have */ 12441 tcp_req_log_req_info(rack->rc_tp, ent, 12442 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 12443 /* Free it and see if there is another one */ 12444 tcp_req_free_a_slot(rack->rc_tp, ent); 12445 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12446 } 12447 } 12448 #endif 12449 12450 12451 /* 12452 * Return value of 1, we do not need to call rack_process_data(). 12453 * return value of 0, rack_process_data can be called. 12454 * For ret_val if its 0 the TCP is locked, if its non-zero 12455 * its unlocked and probably unsafe to touch the TCB. 12456 */ 12457 static int 12458 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12459 struct tcpcb *tp, struct tcpopt *to, 12460 uint32_t tiwin, int32_t tlen, 12461 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 12462 { 12463 int32_t ourfinisacked = 0; 12464 int32_t nsegs, acked_amount; 12465 int32_t acked; 12466 struct mbuf *mfree; 12467 struct tcp_rack *rack; 12468 int32_t under_pacing = 0; 12469 int32_t post_recovery = 0; 12470 uint32_t p_cwnd; 12471 12472 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12473 12474 rack = (struct tcp_rack *)tp->t_fb_ptr; 12475 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { 12476 /* Checking SEG.ACK against ISS is definitely redundant. */ 12477 tp->t_flags2 |= TF2_NO_ISS_CHECK; 12478 } 12479 if (!V_tcp_insecure_ack) { 12480 tcp_seq seq_min; 12481 bool ghost_ack_check; 12482 12483 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { 12484 /* Check for too old ACKs (RFC 5961, Section 5.2). */ 12485 seq_min = tp->snd_una - tp->max_sndwnd; 12486 ghost_ack_check = false; 12487 } else { 12488 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { 12489 /* Checking for ghost ACKs is stricter. */ 12490 seq_min = tp->iss + 1; 12491 ghost_ack_check = true; 12492 } else { 12493 /* 12494 * Checking for too old ACKs (RFC 5961, 12495 * Section 5.2) is stricter. 12496 */ 12497 seq_min = tp->snd_una - tp->max_sndwnd; 12498 ghost_ack_check = false; 12499 } 12500 } 12501 if (SEQ_LT(th->th_ack, seq_min)) { 12502 if (ghost_ack_check) 12503 TCPSTAT_INC(tcps_rcvghostack); 12504 else 12505 TCPSTAT_INC(tcps_rcvacktooold); 12506 /* Send challenge ACK. */ 12507 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 12508 &rack->r_ctl.challenge_ack_ts, 12509 &rack->r_ctl.challenge_ack_cnt); 12510 rack->r_wanted_output = 1; 12511 return (1); 12512 } 12513 } 12514 if (SEQ_GT(th->th_ack, tp->snd_max)) { 12515 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 12516 &rack->r_ctl.challenge_ack_ts, 12517 &rack->r_ctl.challenge_ack_cnt); 12518 rack->r_wanted_output = 1; 12519 return (1); 12520 } 12521 if (rack->gp_ready && 12522 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12523 under_pacing = 1; 12524 } 12525 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 12526 int in_rec, dup_ack_struck = 0; 12527 int dsack_seen = 0, sacks_seen = 0; 12528 12529 in_rec = IN_FASTRECOVERY(tp->t_flags); 12530 if (rack->rc_in_persist) { 12531 tp->t_rxtshift = 0; 12532 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12533 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12534 } 12535 12536 if ((th->th_ack == tp->snd_una) && 12537 (tiwin == tp->snd_wnd) && 12538 (orig_tlen == 0) && 12539 ((to->to_flags & TOF_SACK) == 0)) { 12540 rack_strike_dupack(rack, th->th_ack); 12541 dup_ack_struck = 1; 12542 } 12543 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 12544 dup_ack_struck, &dsack_seen, &sacks_seen); 12545 12546 } 12547 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12548 /* 12549 * Old ack, behind (or duplicate to) the last one rcv'd 12550 * Note: We mark reordering is occuring if its 12551 * less than and we have not closed our window. 12552 */ 12553 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 12554 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12555 if (rack->r_ctl.rc_reorder_ts == 0) 12556 rack->r_ctl.rc_reorder_ts = 1; 12557 } 12558 return (0); 12559 } 12560 /* 12561 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 12562 * something we sent. 12563 */ 12564 if (tp->t_flags & TF_NEEDSYN) { 12565 /* 12566 * T/TCP: Connection was half-synchronized, and our SYN has 12567 * been ACK'd (so connection is now fully synchronized). Go 12568 * to non-starred state, increment snd_una for ACK of SYN, 12569 * and check if we can do window scaling. 12570 */ 12571 tp->t_flags &= ~TF_NEEDSYN; 12572 tp->snd_una++; 12573 /* Do window scaling? */ 12574 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12575 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12576 tp->rcv_scale = tp->request_r_scale; 12577 /* Send window already scaled. */ 12578 } 12579 } 12580 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12581 12582 acked = BYTES_THIS_ACK(tp, th); 12583 if (acked) { 12584 /* 12585 * Any time we move the cum-ack forward clear 12586 * keep-alive tied probe-not-answered. The 12587 * persists clears its own on entry. 12588 */ 12589 rack->probe_not_answered = 0; 12590 } 12591 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12592 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12593 /* 12594 * If we just performed our first retransmit, and the ACK arrives 12595 * within our recovery window, then it was a mistake to do the 12596 * retransmit in the first place. Recover our original cwnd and 12597 * ssthresh, and proceed to transmit where we left off. 12598 */ 12599 if ((tp->t_flags & TF_PREVVALID) && 12600 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12601 tp->t_flags &= ~TF_PREVVALID; 12602 if (tp->t_rxtshift == 1 && 12603 (int)(ticks - tp->t_badrxtwin) < 0) 12604 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12605 } 12606 if (acked) { 12607 /* assure we are not backed off */ 12608 tp->t_rxtshift = 0; 12609 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12610 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12611 rack->rc_tlp_in_progress = 0; 12612 rack->r_ctl.rc_tlp_cnt_out = 0; 12613 /* 12614 * If it is the RXT timer we want to 12615 * stop it, so we can restart a TLP. 12616 */ 12617 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12618 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12619 #ifdef TCP_REQUEST_TRK 12620 rack_req_check_for_comp(rack, th->th_ack); 12621 #endif 12622 } 12623 /* 12624 * If we have a timestamp reply, update smoothed round trip time. If 12625 * no timestamp is present but transmit timer is running and timed 12626 * sequence number was acked, update smoothed round trip time. Since 12627 * we now have an rtt measurement, cancel the timer backoff (cf., 12628 * Phil Karn's retransmit alg.). Recompute the initial retransmit 12629 * timer. 12630 * 12631 * Some boxes send broken timestamp replies during the SYN+ACK 12632 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12633 * and blow up the retransmit timer. 12634 */ 12635 /* 12636 * If all outstanding data is acked, stop retransmit timer and 12637 * remember to restart (more output or persist). If there is more 12638 * data to be acked, restart retransmit timer, using current 12639 * (possibly backed-off) value. 12640 */ 12641 if (acked == 0) { 12642 if (ofia) 12643 *ofia = ourfinisacked; 12644 return (0); 12645 } 12646 if (IN_RECOVERY(tp->t_flags)) { 12647 if (SEQ_LT(th->th_ack, tp->snd_recover) && 12648 (SEQ_LT(th->th_ack, tp->snd_max))) { 12649 tcp_rack_partialack(tp); 12650 } else { 12651 rack_post_recovery(tp, th->th_ack); 12652 post_recovery = 1; 12653 /* 12654 * Grab the segsiz, multiply by 2 and add the snd_cwnd 12655 * that is the max the CC should add if we are exiting 12656 * recovery and doing a late add. 12657 */ 12658 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 12659 p_cwnd <<= 1; 12660 p_cwnd += tp->snd_cwnd; 12661 } 12662 } else if ((rack->rto_from_rec == 1) && 12663 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 12664 /* 12665 * We were in recovery, hit a rxt timeout 12666 * and never re-entered recovery. The timeout(s) 12667 * made up all the lost data. In such a case 12668 * we need to clear the rto_from_rec flag. 12669 */ 12670 rack->rto_from_rec = 0; 12671 } 12672 /* 12673 * Let the congestion control algorithm update congestion control 12674 * related information. This typically means increasing the 12675 * congestion window. 12676 */ 12677 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 12678 if (post_recovery && 12679 (tp->snd_cwnd > p_cwnd)) { 12680 /* Must be non-newreno (cubic) getting too ahead of itself */ 12681 tp->snd_cwnd = p_cwnd; 12682 } 12683 SOCKBUF_LOCK(&so->so_snd); 12684 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 12685 tp->snd_wnd -= acked_amount; 12686 mfree = sbcut_locked(&so->so_snd, acked_amount); 12687 if ((sbused(&so->so_snd) == 0) && 12688 (acked > acked_amount) && 12689 (tp->t_state >= TCPS_FIN_WAIT_1) && 12690 (tp->t_flags & TF_SENTFIN)) { 12691 /* 12692 * We must be sure our fin 12693 * was sent and acked (we can be 12694 * in FIN_WAIT_1 without having 12695 * sent the fin). 12696 */ 12697 ourfinisacked = 1; 12698 } 12699 tp->snd_una = th->th_ack; 12700 /* wakeups? */ 12701 if (acked_amount && sbavail(&so->so_snd)) 12702 rack_adjust_sendmap_head(rack, &so->so_snd); 12703 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12704 /* NB: sowwakeup_locked() does an implicit unlock. */ 12705 sowwakeup_locked(so); 12706 m_freem(mfree); 12707 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12708 tp->snd_recover = tp->snd_una; 12709 12710 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12711 tp->snd_nxt = tp->snd_max; 12712 } 12713 if (under_pacing && 12714 (rack->use_fixed_rate == 0) && 12715 (rack->in_probe_rtt == 0) && 12716 rack->rc_gp_dyn_mul && 12717 rack->rc_always_pace) { 12718 /* Check if we are dragging bottom */ 12719 rack_check_bottom_drag(tp, rack, so); 12720 } 12721 if (tp->snd_una == tp->snd_max) { 12722 /* Nothing left outstanding */ 12723 tp->t_flags &= ~TF_PREVVALID; 12724 rack->r_ctl.idle_snd_una = tp->snd_una; 12725 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12726 if (rack->r_ctl.rc_went_idle_time == 0) 12727 rack->r_ctl.rc_went_idle_time = 1; 12728 rack->r_ctl.retran_during_recovery = 0; 12729 rack->r_ctl.dsack_byte_cnt = 0; 12730 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12731 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12732 tp->t_acktime = 0; 12733 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12734 rack->rc_suspicious = 0; 12735 /* Set need output so persist might get set */ 12736 rack->r_wanted_output = 1; 12737 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12738 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12739 (sbavail(&so->so_snd) == 0) && 12740 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12741 /* 12742 * The socket was gone and the 12743 * peer sent data (now or in the past), time to 12744 * reset him. 12745 */ 12746 *ret_val = 1; 12747 /* tcp_close will kill the inp pre-log the Reset */ 12748 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12749 tp = tcp_close(tp); 12750 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12751 return (1); 12752 } 12753 } 12754 if (ofia) 12755 *ofia = ourfinisacked; 12756 return (0); 12757 } 12758 12759 12760 static void 12761 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12762 int dir, uint32_t flags, struct rack_sendmap *rsm) 12763 { 12764 if (tcp_bblogging_on(rack->rc_tp)) { 12765 union tcp_log_stackspecific log; 12766 struct timeval tv; 12767 12768 memset(&log, 0, sizeof(log)); 12769 log.u_bbr.flex1 = cnt; 12770 log.u_bbr.flex2 = split; 12771 log.u_bbr.flex3 = out; 12772 log.u_bbr.flex4 = line; 12773 log.u_bbr.flex5 = rack->r_must_retran; 12774 log.u_bbr.flex6 = flags; 12775 log.u_bbr.flex7 = rack->rc_has_collapsed; 12776 log.u_bbr.flex8 = dir; /* 12777 * 1 is collapsed, 0 is uncollapsed, 12778 * 2 is log of a rsm being marked, 3 is a split. 12779 */ 12780 if (rsm == NULL) 12781 log.u_bbr.rttProp = 0; 12782 else 12783 log.u_bbr.rttProp = (uintptr_t)rsm; 12784 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12785 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12786 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12787 &rack->rc_inp->inp_socket->so_rcv, 12788 &rack->rc_inp->inp_socket->so_snd, 12789 TCP_RACK_LOG_COLLAPSE, 0, 12790 0, &log, false, &tv); 12791 } 12792 } 12793 12794 static void 12795 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12796 { 12797 /* 12798 * Here all we do is mark the collapsed point and set the flag. 12799 * This may happen again and again, but there is no 12800 * sense splitting our map until we know where the 12801 * peer finally lands in the collapse. 12802 */ 12803 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12804 if ((rack->rc_has_collapsed == 0) || 12805 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12806 counter_u64_add(rack_collapsed_win_seen, 1); 12807 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12808 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12809 rack->rc_has_collapsed = 1; 12810 rack->r_collapse_point_valid = 1; 12811 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12812 } 12813 12814 static void 12815 rack_un_collapse_window(struct tcp_rack *rack, int line) 12816 { 12817 struct rack_sendmap *nrsm, *rsm; 12818 int cnt = 0, split = 0; 12819 int insret __diagused; 12820 12821 12822 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12823 rack->rc_has_collapsed = 0; 12824 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12825 if (rsm == NULL) { 12826 /* Nothing to do maybe the peer ack'ed it all */ 12827 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12828 return; 12829 } 12830 /* Now do we need to split this one? */ 12831 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12832 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12833 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12834 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12835 if (nrsm == NULL) { 12836 /* We can't get a rsm, mark all? */ 12837 nrsm = rsm; 12838 goto no_split; 12839 } 12840 /* Clone it */ 12841 split = 1; 12842 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12843 #ifndef INVARIANTS 12844 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12845 #else 12846 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12847 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12848 nrsm, insret, rack, rsm); 12849 } 12850 #endif 12851 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12852 rack->r_ctl.last_collapse_point, __LINE__); 12853 if (rsm->r_in_tmap) { 12854 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12855 nrsm->r_in_tmap = 1; 12856 } 12857 /* 12858 * Set in the new RSM as the 12859 * collapsed starting point 12860 */ 12861 rsm = nrsm; 12862 } 12863 12864 no_split: 12865 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12866 cnt++; 12867 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12868 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12869 cnt++; 12870 } 12871 if (cnt) { 12872 counter_u64_add(rack_collapsed_win, 1); 12873 } 12874 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12875 } 12876 12877 static void 12878 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12879 int32_t tlen, int32_t tfo_syn) 12880 { 12881 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12882 rack_timer_cancel(tp, rack, 12883 rack->r_ctl.rc_rcvtime, __LINE__); 12884 tp->t_flags |= TF_DELACK; 12885 } else { 12886 rack->r_wanted_output = 1; 12887 tp->t_flags |= TF_ACKNOW; 12888 } 12889 } 12890 12891 static void 12892 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12893 { 12894 /* 12895 * If fast output is in progress, lets validate that 12896 * the new window did not shrink on us and make it 12897 * so fast output should end. 12898 */ 12899 if (rack->r_fast_output) { 12900 uint32_t out; 12901 12902 /* 12903 * Calculate what we will send if left as is 12904 * and compare that to our send window. 12905 */ 12906 out = ctf_outstanding(tp); 12907 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12908 /* ok we have an issue */ 12909 if (out >= tp->snd_wnd) { 12910 /* Turn off fast output the window is met or collapsed */ 12911 rack->r_fast_output = 0; 12912 } else { 12913 /* we have some room left */ 12914 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12915 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12916 /* If not at least 1 full segment never mind */ 12917 rack->r_fast_output = 0; 12918 } 12919 } 12920 } 12921 } 12922 } 12923 12924 /* 12925 * Return value of 1, the TCB is unlocked and most 12926 * likely gone, return value of 0, the TCP is still 12927 * locked. 12928 */ 12929 static int 12930 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12931 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12932 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12933 { 12934 /* 12935 * Update window information. Don't look at window if no ACK: TAC's 12936 * send garbage on first SYN. 12937 */ 12938 int32_t nsegs; 12939 int32_t tfo_syn; 12940 struct tcp_rack *rack; 12941 12942 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12943 12944 rack = (struct tcp_rack *)tp->t_fb_ptr; 12945 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12946 if ((thflags & TH_ACK) && 12947 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12948 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12949 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12950 /* keep track of pure window updates */ 12951 if (tlen == 0 && 12952 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12953 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12954 tp->snd_wnd = tiwin; 12955 rack_validate_fo_sendwin_up(tp, rack); 12956 tp->snd_wl1 = th->th_seq; 12957 tp->snd_wl2 = th->th_ack; 12958 if (tp->snd_wnd > tp->max_sndwnd) 12959 tp->max_sndwnd = tp->snd_wnd; 12960 rack->r_wanted_output = 1; 12961 } else if (thflags & TH_ACK) { 12962 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12963 tp->snd_wnd = tiwin; 12964 rack_validate_fo_sendwin_up(tp, rack); 12965 tp->snd_wl1 = th->th_seq; 12966 tp->snd_wl2 = th->th_ack; 12967 } 12968 } 12969 if (tp->snd_wnd < ctf_outstanding(tp)) 12970 /* The peer collapsed the window */ 12971 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12972 else if (rack->rc_has_collapsed) 12973 rack_un_collapse_window(rack, __LINE__); 12974 if ((rack->r_collapse_point_valid) && 12975 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12976 rack->r_collapse_point_valid = 0; 12977 /* Was persist timer active and now we have window space? */ 12978 if ((rack->rc_in_persist != 0) && 12979 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12980 rack->r_ctl.rc_pace_min_segs))) { 12981 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12982 tp->snd_nxt = tp->snd_max; 12983 /* Make sure we output to start the timer */ 12984 rack->r_wanted_output = 1; 12985 } 12986 /* Do we enter persists? */ 12987 if ((rack->rc_in_persist == 0) && 12988 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12989 TCPS_HAVEESTABLISHED(tp->t_state) && 12990 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12991 sbavail(&tptosocket(tp)->so_snd) && 12992 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12993 /* 12994 * Here the rwnd is less than 12995 * the pacing size, we are established, 12996 * nothing is outstanding, and there is 12997 * data to send. Enter persists. 12998 */ 12999 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 13000 } 13001 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 13002 m_freem(m); 13003 return (0); 13004 } 13005 /* 13006 * don't process the URG bit, ignore them drag 13007 * along the up. 13008 */ 13009 tp->rcv_up = tp->rcv_nxt; 13010 13011 /* 13012 * Process the segment text, merging it into the TCP sequencing 13013 * queue, and arranging for acknowledgment of receipt if necessary. 13014 * This process logically involves adjusting tp->rcv_wnd as data is 13015 * presented to the user (this happens in tcp_usrreq.c, case 13016 * PRU_RCVD). If a FIN has already been received on this connection 13017 * then we just ignore the text. 13018 */ 13019 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 13020 (tp->t_flags & TF_FASTOPEN)); 13021 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 13022 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 13023 tcp_seq save_start = th->th_seq; 13024 tcp_seq save_rnxt = tp->rcv_nxt; 13025 int save_tlen = tlen; 13026 13027 m_adj(m, drop_hdrlen); /* delayed header drop */ 13028 /* 13029 * Insert segment which includes th into TCP reassembly 13030 * queue with control block tp. Set thflags to whether 13031 * reassembly now includes a segment with FIN. This handles 13032 * the common case inline (segment is the next to be 13033 * received on an established connection, and the queue is 13034 * empty), avoiding linkage into and removal from the queue 13035 * and repetition of various conversions. Set DELACK for 13036 * segments received in order, but ack immediately when 13037 * segments are out of order (so fast retransmit can work). 13038 */ 13039 if (th->th_seq == tp->rcv_nxt && 13040 SEGQ_EMPTY(tp) && 13041 (TCPS_HAVEESTABLISHED(tp->t_state) || 13042 tfo_syn)) { 13043 #ifdef NETFLIX_SB_LIMITS 13044 u_int mcnt, appended; 13045 13046 if (so->so_rcv.sb_shlim) { 13047 mcnt = m_memcnt(m); 13048 appended = 0; 13049 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 13050 CFO_NOSLEEP, NULL) == false) { 13051 counter_u64_add(tcp_sb_shlim_fails, 1); 13052 m_freem(m); 13053 return (0); 13054 } 13055 } 13056 #endif 13057 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 13058 tp->rcv_nxt += tlen; 13059 if (tlen && 13060 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 13061 (tp->t_fbyte_in == 0)) { 13062 tp->t_fbyte_in = ticks; 13063 if (tp->t_fbyte_in == 0) 13064 tp->t_fbyte_in = 1; 13065 if (tp->t_fbyte_out && tp->t_fbyte_in) 13066 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 13067 } 13068 thflags = tcp_get_flags(th) & TH_FIN; 13069 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 13070 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 13071 SOCKBUF_LOCK(&so->so_rcv); 13072 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13073 m_freem(m); 13074 } else { 13075 int32_t newsize; 13076 13077 if (tlen > 0) { 13078 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 13079 if (newsize) 13080 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 13081 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 13082 } 13083 #ifdef NETFLIX_SB_LIMITS 13084 appended = 13085 #endif 13086 sbappendstream_locked(&so->so_rcv, m, 0); 13087 } 13088 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 13089 /* NB: sorwakeup_locked() does an implicit unlock. */ 13090 sorwakeup_locked(so); 13091 #ifdef NETFLIX_SB_LIMITS 13092 if (so->so_rcv.sb_shlim && appended != mcnt) 13093 counter_fo_release(so->so_rcv.sb_shlim, 13094 mcnt - appended); 13095 #endif 13096 } else { 13097 /* 13098 * XXX: Due to the header drop above "th" is 13099 * theoretically invalid by now. Fortunately 13100 * m_adj() doesn't actually frees any mbufs when 13101 * trimming from the head. 13102 */ 13103 tcp_seq temp = save_start; 13104 13105 thflags = tcp_reass(tp, th, &temp, &tlen, m); 13106 tp->t_flags |= TF_ACKNOW; 13107 if (tp->t_flags & TF_WAKESOR) { 13108 tp->t_flags &= ~TF_WAKESOR; 13109 /* NB: sorwakeup_locked() does an implicit unlock. */ 13110 sorwakeup_locked(so); 13111 } 13112 } 13113 if ((tp->t_flags & TF_SACK_PERMIT) && 13114 (save_tlen > 0) && 13115 TCPS_HAVEESTABLISHED(tp->t_state)) { 13116 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 13117 /* 13118 * DSACK actually handled in the fastpath 13119 * above. 13120 */ 13121 tcp_update_sack_list(tp, save_start, 13122 save_start + save_tlen); 13123 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 13124 if ((tp->rcv_numsacks >= 1) && 13125 (tp->sackblks[0].end == save_start)) { 13126 /* 13127 * Partial overlap, recorded at todrop 13128 * above. 13129 */ 13130 tcp_update_sack_list(tp, 13131 tp->sackblks[0].start, 13132 tp->sackblks[0].end); 13133 } else { 13134 tcp_update_dsack_list(tp, save_start, 13135 save_start + save_tlen); 13136 } 13137 } else if (tlen >= save_tlen) { 13138 /* Update of sackblks. */ 13139 tcp_update_dsack_list(tp, save_start, 13140 save_start + save_tlen); 13141 } else if (tlen > 0) { 13142 tcp_update_dsack_list(tp, save_start, 13143 save_start + tlen); 13144 } 13145 } 13146 } else { 13147 m_freem(m); 13148 thflags &= ~TH_FIN; 13149 } 13150 13151 /* 13152 * If FIN is received ACK the FIN and let the user know that the 13153 * connection is closing. 13154 */ 13155 if (thflags & TH_FIN) { 13156 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 13157 /* The socket upcall is handled by socantrcvmore. */ 13158 socantrcvmore(so); 13159 /* 13160 * If connection is half-synchronized (ie NEEDSYN 13161 * flag on) then delay ACK, so it may be piggybacked 13162 * when SYN is sent. Otherwise, since we received a 13163 * FIN then no more input can be expected, send ACK 13164 * now. 13165 */ 13166 if (tp->t_flags & TF_NEEDSYN) { 13167 rack_timer_cancel(tp, rack, 13168 rack->r_ctl.rc_rcvtime, __LINE__); 13169 tp->t_flags |= TF_DELACK; 13170 } else { 13171 tp->t_flags |= TF_ACKNOW; 13172 } 13173 tp->rcv_nxt++; 13174 } 13175 switch (tp->t_state) { 13176 /* 13177 * In SYN_RECEIVED and ESTABLISHED STATES enter the 13178 * CLOSE_WAIT state. 13179 */ 13180 case TCPS_SYN_RECEIVED: 13181 tp->t_starttime = ticks; 13182 /* FALLTHROUGH */ 13183 case TCPS_ESTABLISHED: 13184 rack_timer_cancel(tp, rack, 13185 rack->r_ctl.rc_rcvtime, __LINE__); 13186 tcp_state_change(tp, TCPS_CLOSE_WAIT); 13187 break; 13188 13189 /* 13190 * If still in FIN_WAIT_1 STATE FIN has not been 13191 * acked so enter the CLOSING state. 13192 */ 13193 case TCPS_FIN_WAIT_1: 13194 rack_timer_cancel(tp, rack, 13195 rack->r_ctl.rc_rcvtime, __LINE__); 13196 tcp_state_change(tp, TCPS_CLOSING); 13197 break; 13198 13199 /* 13200 * In FIN_WAIT_2 state enter the TIME_WAIT state, 13201 * starting the time-wait timer, turning off the 13202 * other standard timers. 13203 */ 13204 case TCPS_FIN_WAIT_2: 13205 rack_timer_cancel(tp, rack, 13206 rack->r_ctl.rc_rcvtime, __LINE__); 13207 tcp_twstart(tp); 13208 return (1); 13209 } 13210 } 13211 /* 13212 * Return any desired output. 13213 */ 13214 if ((tp->t_flags & TF_ACKNOW) || 13215 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 13216 rack->r_wanted_output = 1; 13217 } 13218 return (0); 13219 } 13220 13221 /* 13222 * Here nothing is really faster, its just that we 13223 * have broken out the fast-data path also just like 13224 * the fast-ack. 13225 */ 13226 static int 13227 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 13228 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13229 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 13230 { 13231 int32_t nsegs; 13232 int32_t newsize = 0; /* automatic sockbuf scaling */ 13233 struct tcp_rack *rack; 13234 #ifdef NETFLIX_SB_LIMITS 13235 u_int mcnt, appended; 13236 #endif 13237 13238 /* 13239 * If last ACK falls within this segment's sequence numbers, record 13240 * the timestamp. NOTE that the test is modified according to the 13241 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 13242 */ 13243 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 13244 return (0); 13245 } 13246 if (tiwin && tiwin != tp->snd_wnd) { 13247 return (0); 13248 } 13249 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 13250 return (0); 13251 } 13252 if (__predict_false((to->to_flags & TOF_TS) && 13253 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 13254 return (0); 13255 } 13256 if (__predict_false((th->th_ack != tp->snd_una))) { 13257 return (0); 13258 } 13259 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 13260 return (0); 13261 } 13262 if ((to->to_flags & TOF_TS) != 0 && 13263 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 13264 tp->ts_recent_age = tcp_ts_getticks(); 13265 tp->ts_recent = to->to_tsval; 13266 } 13267 rack = (struct tcp_rack *)tp->t_fb_ptr; 13268 /* 13269 * This is a pure, in-sequence data packet with nothing on the 13270 * reassembly queue and we have enough buffer space to take it. 13271 */ 13272 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13273 13274 #ifdef NETFLIX_SB_LIMITS 13275 if (so->so_rcv.sb_shlim) { 13276 mcnt = m_memcnt(m); 13277 appended = 0; 13278 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 13279 CFO_NOSLEEP, NULL) == false) { 13280 counter_u64_add(tcp_sb_shlim_fails, 1); 13281 m_freem(m); 13282 return (1); 13283 } 13284 } 13285 #endif 13286 /* Clean receiver SACK report if present */ 13287 if (tp->rcv_numsacks) 13288 tcp_clean_sackreport(tp); 13289 KMOD_TCPSTAT_INC(tcps_preddat); 13290 tp->rcv_nxt += tlen; 13291 if (tlen && 13292 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 13293 (tp->t_fbyte_in == 0)) { 13294 tp->t_fbyte_in = ticks; 13295 if (tp->t_fbyte_in == 0) 13296 tp->t_fbyte_in = 1; 13297 if (tp->t_fbyte_out && tp->t_fbyte_in) 13298 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 13299 } 13300 /* 13301 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 13302 */ 13303 tp->snd_wl1 = th->th_seq; 13304 /* 13305 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 13306 */ 13307 tp->rcv_up = tp->rcv_nxt; 13308 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 13309 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 13310 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 13311 13312 /* Add data to socket buffer. */ 13313 SOCKBUF_LOCK(&so->so_rcv); 13314 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13315 m_freem(m); 13316 } else { 13317 /* 13318 * Set new socket buffer size. Give up when limit is 13319 * reached. 13320 */ 13321 if (newsize) 13322 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 13323 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 13324 m_adj(m, drop_hdrlen); /* delayed header drop */ 13325 #ifdef NETFLIX_SB_LIMITS 13326 appended = 13327 #endif 13328 sbappendstream_locked(&so->so_rcv, m, 0); 13329 ctf_calc_rwin(so, tp); 13330 } 13331 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 13332 /* NB: sorwakeup_locked() does an implicit unlock. */ 13333 sorwakeup_locked(so); 13334 #ifdef NETFLIX_SB_LIMITS 13335 if (so->so_rcv.sb_shlim && mcnt != appended) 13336 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 13337 #endif 13338 rack_handle_delayed_ack(tp, rack, tlen, 0); 13339 if (tp->snd_una == tp->snd_max) 13340 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13341 return (1); 13342 } 13343 13344 /* 13345 * This subfunction is used to try to highly optimize the 13346 * fast path. We again allow window updates that are 13347 * in sequence to remain in the fast-path. We also add 13348 * in the __predict's to attempt to help the compiler. 13349 * Note that if we return a 0, then we can *not* process 13350 * it and the caller should push the packet into the 13351 * slow-path. 13352 */ 13353 static int 13354 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13355 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13356 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 13357 { 13358 int32_t acked; 13359 int32_t nsegs; 13360 int32_t under_pacing = 0; 13361 struct tcp_rack *rack; 13362 13363 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 13364 /* Old ack, behind (or duplicate to) the last one rcv'd */ 13365 return (0); 13366 } 13367 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 13368 /* Above what we have sent? */ 13369 return (0); 13370 } 13371 if (__predict_false(tiwin == 0)) { 13372 /* zero window */ 13373 return (0); 13374 } 13375 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 13376 /* We need a SYN or a FIN, unlikely.. */ 13377 return (0); 13378 } 13379 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 13380 /* Timestamp is behind .. old ack with seq wrap? */ 13381 return (0); 13382 } 13383 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 13384 /* Still recovering */ 13385 return (0); 13386 } 13387 rack = (struct tcp_rack *)tp->t_fb_ptr; 13388 if (rack->r_ctl.rc_sacked) { 13389 /* We have sack holes on our scoreboard */ 13390 return (0); 13391 } 13392 /* Ok if we reach here, we can process a fast-ack */ 13393 if (rack->gp_ready && 13394 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 13395 under_pacing = 1; 13396 } 13397 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13398 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 13399 /* Did the window get updated? */ 13400 if (tiwin != tp->snd_wnd) { 13401 tp->snd_wnd = tiwin; 13402 rack_validate_fo_sendwin_up(tp, rack); 13403 tp->snd_wl1 = th->th_seq; 13404 if (tp->snd_wnd > tp->max_sndwnd) 13405 tp->max_sndwnd = tp->snd_wnd; 13406 } 13407 /* Do we exit persists? */ 13408 if ((rack->rc_in_persist != 0) && 13409 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13410 rack->r_ctl.rc_pace_min_segs))) { 13411 rack_exit_persist(tp, rack, cts); 13412 } 13413 /* Do we enter persists? */ 13414 if ((rack->rc_in_persist == 0) && 13415 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13416 TCPS_HAVEESTABLISHED(tp->t_state) && 13417 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13418 sbavail(&tptosocket(tp)->so_snd) && 13419 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13420 /* 13421 * Here the rwnd is less than 13422 * the pacing size, we are established, 13423 * nothing is outstanding, and there is 13424 * data to send. Enter persists. 13425 */ 13426 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 13427 } 13428 /* 13429 * If last ACK falls within this segment's sequence numbers, record 13430 * the timestamp. NOTE that the test is modified according to the 13431 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 13432 */ 13433 if ((to->to_flags & TOF_TS) != 0 && 13434 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 13435 tp->ts_recent_age = tcp_ts_getticks(); 13436 tp->ts_recent = to->to_tsval; 13437 } 13438 /* 13439 * This is a pure ack for outstanding data. 13440 */ 13441 KMOD_TCPSTAT_INC(tcps_predack); 13442 13443 /* 13444 * "bad retransmit" recovery. 13445 */ 13446 if ((tp->t_flags & TF_PREVVALID) && 13447 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13448 tp->t_flags &= ~TF_PREVVALID; 13449 if (tp->t_rxtshift == 1 && 13450 (int)(ticks - tp->t_badrxtwin) < 0) 13451 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 13452 } 13453 /* 13454 * Recalculate the transmit timer / rtt. 13455 * 13456 * Some boxes send broken timestamp replies during the SYN+ACK 13457 * phase, ignore timestamps of 0 or we could calculate a huge RTT 13458 * and blow up the retransmit timer. 13459 */ 13460 acked = BYTES_THIS_ACK(tp, th); 13461 13462 #ifdef TCP_HHOOK 13463 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 13464 hhook_run_tcp_est_in(tp, th, to); 13465 #endif 13466 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 13467 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13468 if (acked) { 13469 struct mbuf *mfree; 13470 13471 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 13472 SOCKBUF_LOCK(&so->so_snd); 13473 mfree = sbcut_locked(&so->so_snd, acked); 13474 tp->snd_una = th->th_ack; 13475 /* Note we want to hold the sb lock through the sendmap adjust */ 13476 rack_adjust_sendmap_head(rack, &so->so_snd); 13477 /* Wake up the socket if we have room to write more */ 13478 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13479 sowwakeup_locked(so); 13480 m_freem(mfree); 13481 tp->t_rxtshift = 0; 13482 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13483 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13484 rack->rc_tlp_in_progress = 0; 13485 rack->r_ctl.rc_tlp_cnt_out = 0; 13486 /* 13487 * If it is the RXT timer we want to 13488 * stop it, so we can restart a TLP. 13489 */ 13490 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13491 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13492 13493 #ifdef TCP_REQUEST_TRK 13494 rack_req_check_for_comp(rack, th->th_ack); 13495 #endif 13496 } 13497 /* 13498 * Let the congestion control algorithm update congestion control 13499 * related information. This typically means increasing the 13500 * congestion window. 13501 */ 13502 if (tp->snd_wnd < ctf_outstanding(tp)) { 13503 /* The peer collapsed the window */ 13504 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 13505 } else if (rack->rc_has_collapsed) 13506 rack_un_collapse_window(rack, __LINE__); 13507 if ((rack->r_collapse_point_valid) && 13508 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 13509 rack->r_collapse_point_valid = 0; 13510 /* 13511 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 13512 */ 13513 tp->snd_wl2 = th->th_ack; 13514 tp->t_dupacks = 0; 13515 m_freem(m); 13516 /* ND6_HINT(tp); *//* Some progress has been made. */ 13517 13518 /* 13519 * If all outstanding data are acked, stop retransmit timer, 13520 * otherwise restart timer using current (possibly backed-off) 13521 * value. If process is waiting for space, wakeup/selwakeup/signal. 13522 * If data are ready to send, let tcp_output decide between more 13523 * output or persist. 13524 */ 13525 if (under_pacing && 13526 (rack->use_fixed_rate == 0) && 13527 (rack->in_probe_rtt == 0) && 13528 rack->rc_gp_dyn_mul && 13529 rack->rc_always_pace) { 13530 /* Check if we are dragging bottom */ 13531 rack_check_bottom_drag(tp, rack, so); 13532 } 13533 if (tp->snd_una == tp->snd_max) { 13534 tp->t_flags &= ~TF_PREVVALID; 13535 rack->r_ctl.retran_during_recovery = 0; 13536 rack->rc_suspicious = 0; 13537 rack->r_ctl.dsack_byte_cnt = 0; 13538 rack->r_ctl.idle_snd_una = tp->snd_una; 13539 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13540 if (rack->r_ctl.rc_went_idle_time == 0) 13541 rack->r_ctl.rc_went_idle_time = 1; 13542 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13543 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13544 tp->t_acktime = 0; 13545 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13546 } 13547 if (acked && rack->r_fast_output) 13548 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 13549 if (sbavail(&so->so_snd)) { 13550 rack->r_wanted_output = 1; 13551 } 13552 return (1); 13553 } 13554 13555 /* 13556 * Return value of 1, the TCB is unlocked and most 13557 * likely gone, return value of 0, the TCP is still 13558 * locked. 13559 */ 13560 static int 13561 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 13562 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13563 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13564 { 13565 int32_t ret_val = 0; 13566 int32_t orig_tlen = tlen; 13567 int32_t todrop; 13568 int32_t ourfinisacked = 0; 13569 struct tcp_rack *rack; 13570 13571 INP_WLOCK_ASSERT(tptoinpcb(tp)); 13572 13573 ctf_calc_rwin(so, tp); 13574 /* 13575 * If the state is SYN_SENT: if seg contains an ACK, but not for our 13576 * SYN, drop the input. if seg contains a RST, then drop the 13577 * connection. if seg does not contain SYN, then drop it. Otherwise 13578 * this is an acceptable SYN segment initialize tp->rcv_nxt and 13579 * tp->irs if seg contains ack then advance tp->snd_una if seg 13580 * contains an ECE and ECN support is enabled, the stream is ECN 13581 * capable. if SYN has been acked change to ESTABLISHED else 13582 * SYN_RCVD state arrange for segment to be acked (eventually) 13583 * continue processing rest of data/controls. 13584 */ 13585 if ((thflags & TH_ACK) && 13586 (SEQ_LEQ(th->th_ack, tp->iss) || 13587 SEQ_GT(th->th_ack, tp->snd_max))) { 13588 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13589 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13590 return (1); 13591 } 13592 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 13593 TCP_PROBE5(connect__refused, NULL, tp, 13594 mtod(m, const char *), tp, th); 13595 tp = tcp_drop(tp, ECONNREFUSED); 13596 ctf_do_drop(m, tp); 13597 return (1); 13598 } 13599 if (thflags & TH_RST) { 13600 ctf_do_drop(m, tp); 13601 return (1); 13602 } 13603 if (!(thflags & TH_SYN)) { 13604 ctf_do_drop(m, tp); 13605 return (1); 13606 } 13607 tp->irs = th->th_seq; 13608 tcp_rcvseqinit(tp); 13609 rack = (struct tcp_rack *)tp->t_fb_ptr; 13610 if (thflags & TH_ACK) { 13611 int tfo_partial = 0; 13612 13613 KMOD_TCPSTAT_INC(tcps_connects); 13614 soisconnected(so); 13615 #ifdef MAC 13616 mac_socketpeer_set_from_mbuf(m, so); 13617 #endif 13618 /* Do window scaling on this connection? */ 13619 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13620 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13621 tp->rcv_scale = tp->request_r_scale; 13622 } 13623 tp->rcv_adv += min(tp->rcv_wnd, 13624 TCP_MAXWIN << tp->rcv_scale); 13625 /* 13626 * If not all the data that was sent in the TFO SYN 13627 * has been acked, resend the remainder right away. 13628 */ 13629 if ((tp->t_flags & TF_FASTOPEN) && 13630 (tp->snd_una != tp->snd_max)) { 13631 /* Was it a partial ack? */ 13632 if (SEQ_LT(th->th_ack, tp->snd_max)) 13633 tfo_partial = 1; 13634 } 13635 /* 13636 * If there's data, delay ACK; if there's also a FIN ACKNOW 13637 * will be turned on later. 13638 */ 13639 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 13640 rack_timer_cancel(tp, rack, 13641 rack->r_ctl.rc_rcvtime, __LINE__); 13642 tp->t_flags |= TF_DELACK; 13643 } else { 13644 rack->r_wanted_output = 1; 13645 tp->t_flags |= TF_ACKNOW; 13646 } 13647 13648 tcp_ecn_input_syn_sent(tp, thflags, iptos); 13649 13650 if (SEQ_GT(th->th_ack, tp->snd_una)) { 13651 /* 13652 * We advance snd_una for the 13653 * fast open case. If th_ack is 13654 * acknowledging data beyond 13655 * snd_una we can't just call 13656 * ack-processing since the 13657 * data stream in our send-map 13658 * will start at snd_una + 1 (one 13659 * beyond the SYN). If its just 13660 * equal we don't need to do that 13661 * and there is no send_map. 13662 */ 13663 tp->snd_una++; 13664 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 13665 /* 13666 * We sent a SYN with data, and thus have a 13667 * sendmap entry with a SYN set. Lets find it 13668 * and take off the send bit and the byte and 13669 * set it up to be what we send (send it next). 13670 */ 13671 struct rack_sendmap *rsm; 13672 13673 rsm = tqhash_min(rack->r_ctl.tqh); 13674 if (rsm) { 13675 if (rsm->r_flags & RACK_HAS_SYN) { 13676 rsm->r_flags &= ~RACK_HAS_SYN; 13677 rsm->r_start++; 13678 } 13679 rack->r_ctl.rc_resend = rsm; 13680 } 13681 } 13682 } 13683 /* 13684 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 13685 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 13686 */ 13687 tp->t_starttime = ticks; 13688 if (tp->t_flags & TF_NEEDFIN) { 13689 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13690 tp->t_flags &= ~TF_NEEDFIN; 13691 thflags &= ~TH_SYN; 13692 } else { 13693 tcp_state_change(tp, TCPS_ESTABLISHED); 13694 TCP_PROBE5(connect__established, NULL, tp, 13695 mtod(m, const char *), tp, th); 13696 rack_cc_conn_init(tp); 13697 } 13698 } else { 13699 /* 13700 * Received initial SYN in SYN-SENT[*] state => simultaneous 13701 * open. If segment contains CC option and there is a 13702 * cached CC, apply TAO test. If it succeeds, connection is * 13703 * half-synchronized. Otherwise, do 3-way handshake: 13704 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 13705 * there was no CC option, clear cached CC value. 13706 */ 13707 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 13708 tcp_state_change(tp, TCPS_SYN_RECEIVED); 13709 } 13710 /* 13711 * Advance th->th_seq to correspond to first data byte. If data, 13712 * trim to stay within window, dropping FIN if necessary. 13713 */ 13714 th->th_seq++; 13715 if (tlen > tp->rcv_wnd) { 13716 todrop = tlen - tp->rcv_wnd; 13717 m_adj(m, -todrop); 13718 tlen = tp->rcv_wnd; 13719 thflags &= ~TH_FIN; 13720 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13721 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13722 } 13723 tp->snd_wl1 = th->th_seq - 1; 13724 tp->rcv_up = th->th_seq; 13725 /* 13726 * Client side of transaction: already sent SYN and data. If the 13727 * remote host used T/TCP to validate the SYN, our data will be 13728 * ACK'd; if so, enter normal data segment processing in the middle 13729 * of step 5, ack processing. Otherwise, goto step 6. 13730 */ 13731 if (thflags & TH_ACK) { 13732 /* For syn-sent we need to possibly update the rtt */ 13733 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13734 uint32_t t, mcts; 13735 13736 mcts = tcp_ts_getticks(); 13737 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13738 if (!tp->t_rttlow || tp->t_rttlow > t) 13739 tp->t_rttlow = t; 13740 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13741 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13742 tcp_rack_xmit_timer_commit(rack, tp); 13743 } 13744 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 13745 return (ret_val); 13746 /* We may have changed to FIN_WAIT_1 above */ 13747 if (tp->t_state == TCPS_FIN_WAIT_1) { 13748 /* 13749 * In FIN_WAIT_1 STATE in addition to the processing 13750 * for the ESTABLISHED state if our FIN is now 13751 * acknowledged then enter FIN_WAIT_2. 13752 */ 13753 if (ourfinisacked) { 13754 /* 13755 * If we can't receive any more data, then 13756 * closing user can proceed. Starting the 13757 * timer is contrary to the specification, 13758 * but if we don't get a FIN we'll hang 13759 * forever. 13760 * 13761 * XXXjl: we should release the tp also, and 13762 * use a compressed state. 13763 */ 13764 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13765 soisdisconnected(so); 13766 tcp_timer_activate(tp, TT_2MSL, 13767 (tcp_fast_finwait2_recycle ? 13768 tcp_finwait2_timeout : 13769 TP_MAXIDLE(tp))); 13770 } 13771 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13772 } 13773 } 13774 } 13775 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13776 tiwin, thflags, nxt_pkt)); 13777 } 13778 13779 /* 13780 * Return value of 1, the TCB is unlocked and most 13781 * likely gone, return value of 0, the TCP is still 13782 * locked. 13783 */ 13784 static int 13785 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13786 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13787 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13788 { 13789 struct tcp_rack *rack; 13790 int32_t orig_tlen = tlen; 13791 int32_t ret_val = 0; 13792 int32_t ourfinisacked = 0; 13793 13794 rack = (struct tcp_rack *)tp->t_fb_ptr; 13795 ctf_calc_rwin(so, tp); 13796 if ((thflags & TH_RST) || 13797 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13798 return (__ctf_process_rst(m, th, so, tp, 13799 &rack->r_ctl.challenge_ack_ts, 13800 &rack->r_ctl.challenge_ack_cnt)); 13801 if ((thflags & TH_ACK) && 13802 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13803 SEQ_GT(th->th_ack, tp->snd_max))) { 13804 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13805 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13806 return (1); 13807 } 13808 if (tp->t_flags & TF_FASTOPEN) { 13809 /* 13810 * When a TFO connection is in SYN_RECEIVED, the 13811 * only valid packets are the initial SYN, a 13812 * retransmit/copy of the initial SYN (possibly with 13813 * a subset of the original data), a valid ACK, a 13814 * FIN, or a RST. 13815 */ 13816 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13817 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13818 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13819 return (1); 13820 } else if (thflags & TH_SYN) { 13821 /* non-initial SYN is ignored */ 13822 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13823 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13824 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13825 ctf_do_drop(m, NULL); 13826 return (0); 13827 } 13828 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13829 ctf_do_drop(m, NULL); 13830 return (0); 13831 } 13832 } 13833 13834 /* 13835 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13836 * it's less than ts_recent, drop it. 13837 */ 13838 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13839 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13840 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13841 return (ret_val); 13842 } 13843 /* 13844 * In the SYN-RECEIVED state, validate that the packet belongs to 13845 * this connection before trimming the data to fit the receive 13846 * window. Check the sequence number versus IRS since we know the 13847 * sequence numbers haven't wrapped. This is a partial fix for the 13848 * "LAND" DoS attack. 13849 */ 13850 if (SEQ_LT(th->th_seq, tp->irs)) { 13851 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13852 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13853 return (1); 13854 } 13855 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13856 &rack->r_ctl.challenge_ack_ts, 13857 &rack->r_ctl.challenge_ack_cnt)) { 13858 return (ret_val); 13859 } 13860 /* 13861 * If last ACK falls within this segment's sequence numbers, record 13862 * its timestamp. NOTE: 1) That the test incorporates suggestions 13863 * from the latest proposal of the tcplw@cray.com list (Braden 13864 * 1993/04/26). 2) That updating only on newer timestamps interferes 13865 * with our earlier PAWS tests, so this check should be solely 13866 * predicated on the sequence space of this segment. 3) That we 13867 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13868 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13869 * SEG.Len, This modified check allows us to overcome RFC1323's 13870 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13871 * p.869. In such cases, we can still calculate the RTT correctly 13872 * when RCV.NXT == Last.ACK.Sent. 13873 */ 13874 if ((to->to_flags & TOF_TS) != 0 && 13875 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13876 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13877 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13878 tp->ts_recent_age = tcp_ts_getticks(); 13879 tp->ts_recent = to->to_tsval; 13880 } 13881 tp->snd_wnd = tiwin; 13882 rack_validate_fo_sendwin_up(tp, rack); 13883 /* 13884 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13885 * is on (half-synchronized state), then queue data for later 13886 * processing; else drop segment and return. 13887 */ 13888 if ((thflags & TH_ACK) == 0) { 13889 if (tp->t_flags & TF_FASTOPEN) { 13890 rack_cc_conn_init(tp); 13891 } 13892 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13893 tiwin, thflags, nxt_pkt)); 13894 } 13895 KMOD_TCPSTAT_INC(tcps_connects); 13896 if (tp->t_flags & TF_SONOTCONN) { 13897 tp->t_flags &= ~TF_SONOTCONN; 13898 soisconnected(so); 13899 } 13900 /* Do window scaling? */ 13901 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13902 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13903 tp->rcv_scale = tp->request_r_scale; 13904 } 13905 /* 13906 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13907 * FIN-WAIT-1 13908 */ 13909 tp->t_starttime = ticks; 13910 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 13911 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13912 tp->t_tfo_pending = NULL; 13913 } 13914 if (tp->t_flags & TF_NEEDFIN) { 13915 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13916 tp->t_flags &= ~TF_NEEDFIN; 13917 } else { 13918 tcp_state_change(tp, TCPS_ESTABLISHED); 13919 TCP_PROBE5(accept__established, NULL, tp, 13920 mtod(m, const char *), tp, th); 13921 /* 13922 * TFO connections call cc_conn_init() during SYN 13923 * processing. Calling it again here for such connections 13924 * is not harmless as it would undo the snd_cwnd reduction 13925 * that occurs when a TFO SYN|ACK is retransmitted. 13926 */ 13927 if (!(tp->t_flags & TF_FASTOPEN)) 13928 rack_cc_conn_init(tp); 13929 } 13930 /* 13931 * Account for the ACK of our SYN prior to 13932 * regular ACK processing below, except for 13933 * simultaneous SYN, which is handled later. 13934 */ 13935 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13936 tp->snd_una++; 13937 /* 13938 * If segment contains data or ACK, will call tcp_reass() later; if 13939 * not, do so now to pass queued data to user. 13940 */ 13941 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13942 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13943 (struct mbuf *)0); 13944 if (tp->t_flags & TF_WAKESOR) { 13945 tp->t_flags &= ~TF_WAKESOR; 13946 /* NB: sorwakeup_locked() does an implicit unlock. */ 13947 sorwakeup_locked(so); 13948 } 13949 } 13950 tp->snd_wl1 = th->th_seq - 1; 13951 /* For syn-recv we need to possibly update the rtt */ 13952 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13953 uint32_t t, mcts; 13954 13955 mcts = tcp_ts_getticks(); 13956 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13957 if (!tp->t_rttlow || tp->t_rttlow > t) 13958 tp->t_rttlow = t; 13959 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13960 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13961 tcp_rack_xmit_timer_commit(rack, tp); 13962 } 13963 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13964 return (ret_val); 13965 } 13966 if (tp->t_state == TCPS_FIN_WAIT_1) { 13967 /* We could have went to FIN_WAIT_1 (or EST) above */ 13968 /* 13969 * In FIN_WAIT_1 STATE in addition to the processing for the 13970 * ESTABLISHED state if our FIN is now acknowledged then 13971 * enter FIN_WAIT_2. 13972 */ 13973 if (ourfinisacked) { 13974 /* 13975 * If we can't receive any more data, then closing 13976 * user can proceed. Starting the timer is contrary 13977 * to the specification, but if we don't get a FIN 13978 * we'll hang forever. 13979 * 13980 * XXXjl: we should release the tp also, and use a 13981 * compressed state. 13982 */ 13983 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13984 soisdisconnected(so); 13985 tcp_timer_activate(tp, TT_2MSL, 13986 (tcp_fast_finwait2_recycle ? 13987 tcp_finwait2_timeout : 13988 TP_MAXIDLE(tp))); 13989 } 13990 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13991 } 13992 } 13993 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13994 tiwin, thflags, nxt_pkt)); 13995 } 13996 13997 /* 13998 * Return value of 1, the TCB is unlocked and most 13999 * likely gone, return value of 0, the TCP is still 14000 * locked. 14001 */ 14002 static int 14003 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 14004 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14005 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14006 { 14007 int32_t ret_val = 0; 14008 int32_t orig_tlen = tlen; 14009 struct tcp_rack *rack; 14010 14011 /* 14012 * Header prediction: check for the two common cases of a 14013 * uni-directional data xfer. If the packet has no control flags, 14014 * is in-sequence, the window didn't change and we're not 14015 * retransmitting, it's a candidate. If the length is zero and the 14016 * ack moved forward, we're the sender side of the xfer. Just free 14017 * the data acked & wake any higher level process that was blocked 14018 * waiting for space. If the length is non-zero and the ack didn't 14019 * move, we're the receiver side. If we're getting packets in-order 14020 * (the reassembly queue is empty), add the data toc The socket 14021 * buffer and note that we need a delayed ack. Make sure that the 14022 * hidden state-flags are also off. Since we check for 14023 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 14024 */ 14025 rack = (struct tcp_rack *)tp->t_fb_ptr; 14026 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 14027 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 14028 __predict_true(SEGQ_EMPTY(tp)) && 14029 __predict_true(th->th_seq == tp->rcv_nxt)) { 14030 if (tlen == 0) { 14031 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 14032 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 14033 return (0); 14034 } 14035 } else { 14036 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 14037 tiwin, nxt_pkt, iptos)) { 14038 return (0); 14039 } 14040 } 14041 } 14042 ctf_calc_rwin(so, tp); 14043 14044 if ((thflags & TH_RST) || 14045 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14046 return (__ctf_process_rst(m, th, so, tp, 14047 &rack->r_ctl.challenge_ack_ts, 14048 &rack->r_ctl.challenge_ack_cnt)); 14049 14050 /* 14051 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14052 * synchronized state. 14053 */ 14054 if (thflags & TH_SYN) { 14055 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14056 return (ret_val); 14057 } 14058 /* 14059 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14060 * it's less than ts_recent, drop it. 14061 */ 14062 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14063 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14064 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14065 return (ret_val); 14066 } 14067 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14068 &rack->r_ctl.challenge_ack_ts, 14069 &rack->r_ctl.challenge_ack_cnt)) { 14070 return (ret_val); 14071 } 14072 /* 14073 * If last ACK falls within this segment's sequence numbers, record 14074 * its timestamp. NOTE: 1) That the test incorporates suggestions 14075 * from the latest proposal of the tcplw@cray.com list (Braden 14076 * 1993/04/26). 2) That updating only on newer timestamps interferes 14077 * with our earlier PAWS tests, so this check should be solely 14078 * predicated on the sequence space of this segment. 3) That we 14079 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14080 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14081 * SEG.Len, This modified check allows us to overcome RFC1323's 14082 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14083 * p.869. In such cases, we can still calculate the RTT correctly 14084 * when RCV.NXT == Last.ACK.Sent. 14085 */ 14086 if ((to->to_flags & TOF_TS) != 0 && 14087 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14088 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14089 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14090 tp->ts_recent_age = tcp_ts_getticks(); 14091 tp->ts_recent = to->to_tsval; 14092 } 14093 /* 14094 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14095 * is on (half-synchronized state), then queue data for later 14096 * processing; else drop segment and return. 14097 */ 14098 if ((thflags & TH_ACK) == 0) { 14099 if (tp->t_flags & TF_NEEDSYN) { 14100 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14101 tiwin, thflags, nxt_pkt)); 14102 14103 } else if (tp->t_flags & TF_ACKNOW) { 14104 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14105 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14106 return (ret_val); 14107 } else { 14108 ctf_do_drop(m, NULL); 14109 return (0); 14110 } 14111 } 14112 /* 14113 * Ack processing. 14114 */ 14115 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 14116 return (ret_val); 14117 } 14118 if (sbavail(&so->so_snd)) { 14119 if (ctf_progress_timeout_check(tp, true)) { 14120 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 14121 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14122 return (1); 14123 } 14124 } 14125 /* State changes only happen in rack_process_data() */ 14126 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14127 tiwin, thflags, nxt_pkt)); 14128 } 14129 14130 /* 14131 * Return value of 1, the TCB is unlocked and most 14132 * likely gone, return value of 0, the TCP is still 14133 * locked. 14134 */ 14135 static int 14136 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 14137 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14138 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14139 { 14140 int32_t ret_val = 0; 14141 int32_t orig_tlen = tlen; 14142 struct tcp_rack *rack; 14143 14144 rack = (struct tcp_rack *)tp->t_fb_ptr; 14145 ctf_calc_rwin(so, tp); 14146 if ((thflags & TH_RST) || 14147 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14148 return (__ctf_process_rst(m, th, so, tp, 14149 &rack->r_ctl.challenge_ack_ts, 14150 &rack->r_ctl.challenge_ack_cnt)); 14151 /* 14152 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14153 * synchronized state. 14154 */ 14155 if (thflags & TH_SYN) { 14156 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14157 return (ret_val); 14158 } 14159 /* 14160 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14161 * it's less than ts_recent, drop it. 14162 */ 14163 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14164 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14165 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14166 return (ret_val); 14167 } 14168 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14169 &rack->r_ctl.challenge_ack_ts, 14170 &rack->r_ctl.challenge_ack_cnt)) { 14171 return (ret_val); 14172 } 14173 /* 14174 * If last ACK falls within this segment's sequence numbers, record 14175 * its timestamp. NOTE: 1) That the test incorporates suggestions 14176 * from the latest proposal of the tcplw@cray.com list (Braden 14177 * 1993/04/26). 2) That updating only on newer timestamps interferes 14178 * with our earlier PAWS tests, so this check should be solely 14179 * predicated on the sequence space of this segment. 3) That we 14180 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14181 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14182 * SEG.Len, This modified check allows us to overcome RFC1323's 14183 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14184 * p.869. In such cases, we can still calculate the RTT correctly 14185 * when RCV.NXT == Last.ACK.Sent. 14186 */ 14187 if ((to->to_flags & TOF_TS) != 0 && 14188 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14189 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14190 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14191 tp->ts_recent_age = tcp_ts_getticks(); 14192 tp->ts_recent = to->to_tsval; 14193 } 14194 /* 14195 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14196 * is on (half-synchronized state), then queue data for later 14197 * processing; else drop segment and return. 14198 */ 14199 if ((thflags & TH_ACK) == 0) { 14200 if (tp->t_flags & TF_NEEDSYN) { 14201 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14202 tiwin, thflags, nxt_pkt)); 14203 14204 } else if (tp->t_flags & TF_ACKNOW) { 14205 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14206 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14207 return (ret_val); 14208 } else { 14209 ctf_do_drop(m, NULL); 14210 return (0); 14211 } 14212 } 14213 /* 14214 * Ack processing. 14215 */ 14216 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 14217 return (ret_val); 14218 } 14219 if (sbavail(&so->so_snd)) { 14220 if (ctf_progress_timeout_check(tp, true)) { 14221 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14222 tp, tick, PROGRESS_DROP, __LINE__); 14223 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14224 return (1); 14225 } 14226 } 14227 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14228 tiwin, thflags, nxt_pkt)); 14229 } 14230 14231 static int 14232 rack_check_data_after_close(struct mbuf *m, 14233 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 14234 { 14235 struct tcp_rack *rack; 14236 14237 rack = (struct tcp_rack *)tp->t_fb_ptr; 14238 if (rack->rc_allow_data_af_clo == 0) { 14239 close_now: 14240 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 14241 /* tcp_close will kill the inp pre-log the Reset */ 14242 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 14243 tp = tcp_close(tp); 14244 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 14245 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 14246 return (1); 14247 } 14248 if (sbavail(&so->so_snd) == 0) 14249 goto close_now; 14250 /* Ok we allow data that is ignored and a followup reset */ 14251 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 14252 tp->rcv_nxt = th->th_seq + *tlen; 14253 tp->t_flags2 |= TF2_DROP_AF_DATA; 14254 rack->r_wanted_output = 1; 14255 *tlen = 0; 14256 return (0); 14257 } 14258 14259 /* 14260 * Return value of 1, the TCB is unlocked and most 14261 * likely gone, return value of 0, the TCP is still 14262 * locked. 14263 */ 14264 static int 14265 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 14266 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14267 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14268 { 14269 int32_t ret_val = 0; 14270 int32_t orig_tlen = tlen; 14271 int32_t ourfinisacked = 0; 14272 struct tcp_rack *rack; 14273 14274 rack = (struct tcp_rack *)tp->t_fb_ptr; 14275 ctf_calc_rwin(so, tp); 14276 14277 if ((thflags & TH_RST) || 14278 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14279 return (__ctf_process_rst(m, th, so, tp, 14280 &rack->r_ctl.challenge_ack_ts, 14281 &rack->r_ctl.challenge_ack_cnt)); 14282 /* 14283 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14284 * synchronized state. 14285 */ 14286 if (thflags & TH_SYN) { 14287 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14288 return (ret_val); 14289 } 14290 /* 14291 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14292 * it's less than ts_recent, drop it. 14293 */ 14294 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14295 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14296 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14297 return (ret_val); 14298 } 14299 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14300 &rack->r_ctl.challenge_ack_ts, 14301 &rack->r_ctl.challenge_ack_cnt)) { 14302 return (ret_val); 14303 } 14304 /* 14305 * If new data are received on a connection after the user processes 14306 * are gone, then RST the other end. 14307 */ 14308 if ((tp->t_flags & TF_CLOSED) && tlen && 14309 rack_check_data_after_close(m, tp, &tlen, th, so)) 14310 return (1); 14311 /* 14312 * If last ACK falls within this segment's sequence numbers, record 14313 * its timestamp. NOTE: 1) That the test incorporates suggestions 14314 * from the latest proposal of the tcplw@cray.com list (Braden 14315 * 1993/04/26). 2) That updating only on newer timestamps interferes 14316 * with our earlier PAWS tests, so this check should be solely 14317 * predicated on the sequence space of this segment. 3) That we 14318 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14319 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14320 * SEG.Len, This modified check allows us to overcome RFC1323's 14321 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14322 * p.869. In such cases, we can still calculate the RTT correctly 14323 * when RCV.NXT == Last.ACK.Sent. 14324 */ 14325 if ((to->to_flags & TOF_TS) != 0 && 14326 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14327 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14328 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14329 tp->ts_recent_age = tcp_ts_getticks(); 14330 tp->ts_recent = to->to_tsval; 14331 } 14332 /* 14333 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14334 * is on (half-synchronized state), then queue data for later 14335 * processing; else drop segment and return. 14336 */ 14337 if ((thflags & TH_ACK) == 0) { 14338 if (tp->t_flags & TF_NEEDSYN) { 14339 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14340 tiwin, thflags, nxt_pkt)); 14341 } else if (tp->t_flags & TF_ACKNOW) { 14342 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14343 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14344 return (ret_val); 14345 } else { 14346 ctf_do_drop(m, NULL); 14347 return (0); 14348 } 14349 } 14350 /* 14351 * Ack processing. 14352 */ 14353 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14354 return (ret_val); 14355 } 14356 if (ourfinisacked) { 14357 /* 14358 * If we can't receive any more data, then closing user can 14359 * proceed. Starting the timer is contrary to the 14360 * specification, but if we don't get a FIN we'll hang 14361 * forever. 14362 * 14363 * XXXjl: we should release the tp also, and use a 14364 * compressed state. 14365 */ 14366 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 14367 soisdisconnected(so); 14368 tcp_timer_activate(tp, TT_2MSL, 14369 (tcp_fast_finwait2_recycle ? 14370 tcp_finwait2_timeout : 14371 TP_MAXIDLE(tp))); 14372 } 14373 tcp_state_change(tp, TCPS_FIN_WAIT_2); 14374 } 14375 if (sbavail(&so->so_snd)) { 14376 if (ctf_progress_timeout_check(tp, true)) { 14377 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14378 tp, tick, PROGRESS_DROP, __LINE__); 14379 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14380 return (1); 14381 } 14382 } 14383 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14384 tiwin, thflags, nxt_pkt)); 14385 } 14386 14387 /* 14388 * Return value of 1, the TCB is unlocked and most 14389 * likely gone, return value of 0, the TCP is still 14390 * locked. 14391 */ 14392 static int 14393 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 14394 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14395 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14396 { 14397 int32_t ret_val = 0; 14398 int32_t orig_tlen = tlen; 14399 int32_t ourfinisacked = 0; 14400 struct tcp_rack *rack; 14401 14402 rack = (struct tcp_rack *)tp->t_fb_ptr; 14403 ctf_calc_rwin(so, tp); 14404 14405 if ((thflags & TH_RST) || 14406 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14407 return (__ctf_process_rst(m, th, so, tp, 14408 &rack->r_ctl.challenge_ack_ts, 14409 &rack->r_ctl.challenge_ack_cnt)); 14410 /* 14411 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14412 * synchronized state. 14413 */ 14414 if (thflags & TH_SYN) { 14415 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14416 return (ret_val); 14417 } 14418 /* 14419 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14420 * it's less than ts_recent, drop it. 14421 */ 14422 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14423 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14424 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14425 return (ret_val); 14426 } 14427 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14428 &rack->r_ctl.challenge_ack_ts, 14429 &rack->r_ctl.challenge_ack_cnt)) { 14430 return (ret_val); 14431 } 14432 /* 14433 * If last ACK falls within this segment's sequence numbers, record 14434 * its timestamp. NOTE: 1) That the test incorporates suggestions 14435 * from the latest proposal of the tcplw@cray.com list (Braden 14436 * 1993/04/26). 2) That updating only on newer timestamps interferes 14437 * with our earlier PAWS tests, so this check should be solely 14438 * predicated on the sequence space of this segment. 3) That we 14439 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14440 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14441 * SEG.Len, This modified check allows us to overcome RFC1323's 14442 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14443 * p.869. In such cases, we can still calculate the RTT correctly 14444 * when RCV.NXT == Last.ACK.Sent. 14445 */ 14446 if ((to->to_flags & TOF_TS) != 0 && 14447 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14448 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14449 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14450 tp->ts_recent_age = tcp_ts_getticks(); 14451 tp->ts_recent = to->to_tsval; 14452 } 14453 /* 14454 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14455 * is on (half-synchronized state), then queue data for later 14456 * processing; else drop segment and return. 14457 */ 14458 if ((thflags & TH_ACK) == 0) { 14459 if (tp->t_flags & TF_NEEDSYN) { 14460 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14461 tiwin, thflags, nxt_pkt)); 14462 } else if (tp->t_flags & TF_ACKNOW) { 14463 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14464 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14465 return (ret_val); 14466 } else { 14467 ctf_do_drop(m, NULL); 14468 return (0); 14469 } 14470 } 14471 /* 14472 * Ack processing. 14473 */ 14474 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14475 return (ret_val); 14476 } 14477 if (ourfinisacked) { 14478 tcp_twstart(tp); 14479 m_freem(m); 14480 return (1); 14481 } 14482 if (sbavail(&so->so_snd)) { 14483 if (ctf_progress_timeout_check(tp, true)) { 14484 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14485 tp, tick, PROGRESS_DROP, __LINE__); 14486 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14487 return (1); 14488 } 14489 } 14490 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14491 tiwin, thflags, nxt_pkt)); 14492 } 14493 14494 /* 14495 * Return value of 1, the TCB is unlocked and most 14496 * likely gone, return value of 0, the TCP is still 14497 * locked. 14498 */ 14499 static int 14500 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 14501 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14502 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14503 { 14504 int32_t ret_val = 0; 14505 int32_t orig_tlen; 14506 int32_t ourfinisacked = 0; 14507 struct tcp_rack *rack; 14508 14509 rack = (struct tcp_rack *)tp->t_fb_ptr; 14510 ctf_calc_rwin(so, tp); 14511 14512 if ((thflags & TH_RST) || 14513 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14514 return (__ctf_process_rst(m, th, so, tp, 14515 &rack->r_ctl.challenge_ack_ts, 14516 &rack->r_ctl.challenge_ack_cnt)); 14517 /* 14518 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14519 * synchronized state. 14520 */ 14521 if (thflags & TH_SYN) { 14522 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14523 return (ret_val); 14524 } 14525 /* 14526 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14527 * it's less than ts_recent, drop it. 14528 */ 14529 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14530 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14531 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14532 return (ret_val); 14533 } 14534 orig_tlen = tlen; 14535 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14536 &rack->r_ctl.challenge_ack_ts, 14537 &rack->r_ctl.challenge_ack_cnt)) { 14538 return (ret_val); 14539 } 14540 /* 14541 * If last ACK falls within this segment's sequence numbers, record 14542 * its timestamp. NOTE: 1) That the test incorporates suggestions 14543 * from the latest proposal of the tcplw@cray.com list (Braden 14544 * 1993/04/26). 2) That updating only on newer timestamps interferes 14545 * with our earlier PAWS tests, so this check should be solely 14546 * predicated on the sequence space of this segment. 3) That we 14547 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14548 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14549 * SEG.Len, This modified check allows us to overcome RFC1323's 14550 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14551 * p.869. In such cases, we can still calculate the RTT correctly 14552 * when RCV.NXT == Last.ACK.Sent. 14553 */ 14554 if ((to->to_flags & TOF_TS) != 0 && 14555 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14556 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14557 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14558 tp->ts_recent_age = tcp_ts_getticks(); 14559 tp->ts_recent = to->to_tsval; 14560 } 14561 /* 14562 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14563 * is on (half-synchronized state), then queue data for later 14564 * processing; else drop segment and return. 14565 */ 14566 if ((thflags & TH_ACK) == 0) { 14567 if (tp->t_flags & TF_NEEDSYN) { 14568 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14569 tiwin, thflags, nxt_pkt)); 14570 } else if (tp->t_flags & TF_ACKNOW) { 14571 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14572 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14573 return (ret_val); 14574 } else { 14575 ctf_do_drop(m, NULL); 14576 return (0); 14577 } 14578 } 14579 /* 14580 * case TCPS_LAST_ACK: Ack processing. 14581 */ 14582 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14583 return (ret_val); 14584 } 14585 if (ourfinisacked) { 14586 tp = tcp_close(tp); 14587 ctf_do_drop(m, tp); 14588 return (1); 14589 } 14590 if (sbavail(&so->so_snd)) { 14591 if (ctf_progress_timeout_check(tp, true)) { 14592 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14593 tp, tick, PROGRESS_DROP, __LINE__); 14594 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14595 return (1); 14596 } 14597 } 14598 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14599 tiwin, thflags, nxt_pkt)); 14600 } 14601 14602 /* 14603 * Return value of 1, the TCB is unlocked and most 14604 * likely gone, return value of 0, the TCP is still 14605 * locked. 14606 */ 14607 static int 14608 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 14609 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14610 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14611 { 14612 int32_t ret_val = 0; 14613 int32_t orig_tlen = tlen; 14614 int32_t ourfinisacked = 0; 14615 struct tcp_rack *rack; 14616 14617 rack = (struct tcp_rack *)tp->t_fb_ptr; 14618 ctf_calc_rwin(so, tp); 14619 14620 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 14621 if ((thflags & TH_RST) || 14622 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14623 return (__ctf_process_rst(m, th, so, tp, 14624 &rack->r_ctl.challenge_ack_ts, 14625 &rack->r_ctl.challenge_ack_cnt)); 14626 /* 14627 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14628 * synchronized state. 14629 */ 14630 if (thflags & TH_SYN) { 14631 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14632 return (ret_val); 14633 } 14634 /* 14635 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14636 * it's less than ts_recent, drop it. 14637 */ 14638 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14639 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14640 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14641 return (ret_val); 14642 } 14643 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14644 &rack->r_ctl.challenge_ack_ts, 14645 &rack->r_ctl.challenge_ack_cnt)) { 14646 return (ret_val); 14647 } 14648 /* 14649 * If new data are received on a connection after the user processes 14650 * are gone, then RST the other end. 14651 */ 14652 if ((tp->t_flags & TF_CLOSED) && tlen && 14653 rack_check_data_after_close(m, tp, &tlen, th, so)) 14654 return (1); 14655 /* 14656 * If last ACK falls within this segment's sequence numbers, record 14657 * its timestamp. NOTE: 1) That the test incorporates suggestions 14658 * from the latest proposal of the tcplw@cray.com list (Braden 14659 * 1993/04/26). 2) That updating only on newer timestamps interferes 14660 * with our earlier PAWS tests, so this check should be solely 14661 * predicated on the sequence space of this segment. 3) That we 14662 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14663 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14664 * SEG.Len, This modified check allows us to overcome RFC1323's 14665 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14666 * p.869. In such cases, we can still calculate the RTT correctly 14667 * when RCV.NXT == Last.ACK.Sent. 14668 */ 14669 if ((to->to_flags & TOF_TS) != 0 && 14670 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14671 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14672 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14673 tp->ts_recent_age = tcp_ts_getticks(); 14674 tp->ts_recent = to->to_tsval; 14675 } 14676 /* 14677 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14678 * is on (half-synchronized state), then queue data for later 14679 * processing; else drop segment and return. 14680 */ 14681 if ((thflags & TH_ACK) == 0) { 14682 if (tp->t_flags & TF_NEEDSYN) { 14683 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14684 tiwin, thflags, nxt_pkt)); 14685 } else if (tp->t_flags & TF_ACKNOW) { 14686 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14687 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14688 return (ret_val); 14689 } else { 14690 ctf_do_drop(m, NULL); 14691 return (0); 14692 } 14693 } 14694 /* 14695 * Ack processing. 14696 */ 14697 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14698 return (ret_val); 14699 } 14700 if (sbavail(&so->so_snd)) { 14701 if (ctf_progress_timeout_check(tp, true)) { 14702 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14703 tp, tick, PROGRESS_DROP, __LINE__); 14704 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14705 return (1); 14706 } 14707 } 14708 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14709 tiwin, thflags, nxt_pkt)); 14710 } 14711 14712 static void inline 14713 rack_clear_rate_sample(struct tcp_rack *rack) 14714 { 14715 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 14716 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 14717 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 14718 } 14719 14720 static void 14721 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 14722 { 14723 uint64_t bw_est, rate_wanted; 14724 int chged = 0; 14725 uint32_t user_max, orig_min, orig_max; 14726 14727 #ifdef TCP_REQUEST_TRK 14728 if (rack->rc_hybrid_mode && 14729 (rack->r_ctl.rc_pace_max_segs != 0) && 14730 (rack_hybrid_allow_set_maxseg == 1) && 14731 (rack->r_ctl.rc_last_sft != NULL)) { 14732 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 14733 return; 14734 } 14735 #endif 14736 orig_min = rack->r_ctl.rc_pace_min_segs; 14737 orig_max = rack->r_ctl.rc_pace_max_segs; 14738 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 14739 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 14740 chged = 1; 14741 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 14742 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 14743 if (user_max != rack->r_ctl.rc_pace_max_segs) 14744 chged = 1; 14745 } 14746 if (rack->rc_force_max_seg) { 14747 rack->r_ctl.rc_pace_max_segs = user_max; 14748 } else if (rack->use_fixed_rate) { 14749 bw_est = rack_get_bw(rack); 14750 if ((rack->r_ctl.crte == NULL) || 14751 (bw_est != rack->r_ctl.crte->rate)) { 14752 rack->r_ctl.rc_pace_max_segs = user_max; 14753 } else { 14754 /* We are pacing right at the hardware rate */ 14755 uint32_t segsiz, pace_one; 14756 14757 if (rack_pace_one_seg || 14758 (rack->r_ctl.rc_user_set_min_segs == 1)) 14759 pace_one = 1; 14760 else 14761 pace_one = 0; 14762 segsiz = min(ctf_fixed_maxseg(tp), 14763 rack->r_ctl.rc_pace_min_segs); 14764 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14765 tp, bw_est, segsiz, pace_one, 14766 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14767 } 14768 } else if (rack->rc_always_pace) { 14769 if (rack->r_ctl.gp_bw || 14770 rack->r_ctl.init_rate) { 14771 /* We have a rate of some sort set */ 14772 uint32_t orig; 14773 14774 bw_est = rack_get_bw(rack); 14775 orig = rack->r_ctl.rc_pace_max_segs; 14776 if (fill_override) 14777 rate_wanted = *fill_override; 14778 else 14779 rate_wanted = rack_get_gp_est(rack); 14780 if (rate_wanted) { 14781 /* We have something */ 14782 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14783 rate_wanted, 14784 ctf_fixed_maxseg(rack->rc_tp)); 14785 } else 14786 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14787 if (orig != rack->r_ctl.rc_pace_max_segs) 14788 chged = 1; 14789 } else if ((rack->r_ctl.gp_bw == 0) && 14790 (rack->r_ctl.rc_pace_max_segs == 0)) { 14791 /* 14792 * If we have nothing limit us to bursting 14793 * out IW sized pieces. 14794 */ 14795 chged = 1; 14796 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14797 } 14798 } 14799 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14800 chged = 1; 14801 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14802 } 14803 if (chged) 14804 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14805 } 14806 14807 14808 static void 14809 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14810 { 14811 #ifdef INET6 14812 struct ip6_hdr *ip6 = NULL; 14813 #endif 14814 #ifdef INET 14815 struct ip *ip = NULL; 14816 #endif 14817 struct udphdr *udp = NULL; 14818 14819 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14820 #ifdef INET6 14821 if (rack->r_is_v6) { 14822 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14823 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14824 if (tp->t_port) { 14825 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14826 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14827 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14828 udp->uh_dport = tp->t_port; 14829 rack->r_ctl.fsb.udp = udp; 14830 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14831 } else 14832 { 14833 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14834 rack->r_ctl.fsb.udp = NULL; 14835 } 14836 tcpip_fillheaders(rack->rc_inp, 14837 tp->t_port, 14838 ip6, rack->r_ctl.fsb.th); 14839 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14840 } else 14841 #endif /* INET6 */ 14842 #ifdef INET 14843 { 14844 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14845 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14846 if (tp->t_port) { 14847 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14848 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14849 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14850 udp->uh_dport = tp->t_port; 14851 rack->r_ctl.fsb.udp = udp; 14852 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14853 } else 14854 { 14855 rack->r_ctl.fsb.udp = NULL; 14856 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14857 } 14858 tcpip_fillheaders(rack->rc_inp, 14859 tp->t_port, 14860 ip, rack->r_ctl.fsb.th); 14861 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14862 } 14863 #endif 14864 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14865 (long)TCP_MAXWIN << tp->rcv_scale); 14866 rack->r_fsb_inited = 1; 14867 } 14868 14869 static int 14870 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14871 { 14872 /* 14873 * Allocate the larger of spaces V6 if available else just 14874 * V4 and include udphdr (overbook) 14875 */ 14876 #ifdef INET6 14877 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14878 #else 14879 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14880 #endif 14881 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14882 M_TCPFSB, M_NOWAIT|M_ZERO); 14883 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14884 return (ENOMEM); 14885 } 14886 rack->r_fsb_inited = 0; 14887 return (0); 14888 } 14889 14890 static void 14891 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14892 { 14893 /* 14894 * Types of logs (mod value) 14895 * 20 - Initial round setup 14896 * 21 - Rack declares a new round. 14897 */ 14898 struct tcpcb *tp; 14899 14900 tp = rack->rc_tp; 14901 if (tcp_bblogging_on(tp)) { 14902 union tcp_log_stackspecific log; 14903 struct timeval tv; 14904 14905 memset(&log, 0, sizeof(log)); 14906 log.u_bbr.flex1 = rack->r_ctl.current_round; 14907 log.u_bbr.flex2 = rack->r_ctl.roundends; 14908 log.u_bbr.flex3 = high_seq; 14909 log.u_bbr.flex4 = tp->snd_max; 14910 log.u_bbr.flex8 = mod; 14911 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14912 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14913 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14914 TCP_LOG_EVENTP(tp, NULL, 14915 &tptosocket(tp)->so_rcv, 14916 &tptosocket(tp)->so_snd, 14917 TCP_HYSTART, 0, 14918 0, &log, false, &tv); 14919 } 14920 } 14921 14922 static void 14923 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14924 { 14925 rack->rack_deferred_inited = 1; 14926 rack->r_ctl.roundends = tp->snd_max; 14927 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14928 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14929 } 14930 14931 static void 14932 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14933 { 14934 /* Retransmit bit controls. 14935 * 14936 * The setting of these values control one of 14937 * three settings you can have and dictate 14938 * how rack does retransmissions. Note this 14939 * is in *any* mode i.e. pacing on or off DGP 14940 * fixed rate pacing, or just bursting rack. 14941 * 14942 * 1 - Use full sized retransmits i.e. limit 14943 * the size to whatever the pace_max_segments 14944 * size is. 14945 * 14946 * 2 - Use pacer min granularity as a guide to 14947 * the size combined with the current calculated 14948 * goodput b/w measurement. So for example if 14949 * the goodput is measured at 20Mbps we would 14950 * calculate 8125 (pacer minimum 250usec in 14951 * that b/w) and then round it up to the next 14952 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14953 * 14954 * 0 - The rack default 1 MSS (anything not 0/1/2 14955 * fall here too if we are setting via rack_init()). 14956 * 14957 */ 14958 if (ctl == 1) { 14959 rack->full_size_rxt = 1; 14960 rack->shape_rxt_to_pacing_min = 0; 14961 } else if (ctl == 2) { 14962 rack->full_size_rxt = 0; 14963 rack->shape_rxt_to_pacing_min = 1; 14964 } else { 14965 rack->full_size_rxt = 0; 14966 rack->shape_rxt_to_pacing_min = 0; 14967 } 14968 } 14969 14970 static void 14971 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14972 uint32_t flex1, 14973 uint32_t flex2, 14974 uint32_t flex3) 14975 { 14976 if (tcp_bblogging_on(rack->rc_tp)) { 14977 union tcp_log_stackspecific log; 14978 struct timeval tv; 14979 14980 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14981 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14982 log.u_bbr.flex8 = mod; 14983 log.u_bbr.flex1 = flex1; 14984 log.u_bbr.flex2 = flex2; 14985 log.u_bbr.flex3 = flex3; 14986 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14987 0, &log, false, NULL, __func__, __LINE__, &tv); 14988 } 14989 } 14990 14991 static int 14992 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14993 { 14994 struct tcp_rack *rack; 14995 struct rack_sendmap *rsm; 14996 int i; 14997 14998 14999 rack = (struct tcp_rack *)tp->t_fb_ptr; 15000 switch (reqr->req) { 15001 case TCP_QUERY_SENDMAP: 15002 if ((reqr->req_param == tp->snd_max) || 15003 (tp->snd_max == tp->snd_una)){ 15004 /* Unlikely */ 15005 return (0); 15006 } 15007 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 15008 if (rsm == NULL) { 15009 /* Can't find that seq -- unlikely */ 15010 return (0); 15011 } 15012 reqr->sendmap_start = rsm->r_start; 15013 reqr->sendmap_end = rsm->r_end; 15014 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 15015 reqr->sendmap_fas = rsm->r_fas; 15016 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 15017 reqr->sendmap_send_cnt = SNDMAP_NRTX; 15018 for(i=0; i<reqr->sendmap_send_cnt; i++) 15019 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 15020 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 15021 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 15022 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 15023 reqr->sendmap_dupacks = rsm->r_dupack; 15024 rack_log_chg_info(tp, rack, 1, 15025 rsm->r_start, 15026 rsm->r_end, 15027 rsm->r_flags); 15028 return(1); 15029 break; 15030 case TCP_QUERY_TIMERS_UP: 15031 if (rack->r_ctl.rc_hpts_flags == 0) { 15032 /* no timers up */ 15033 return (0); 15034 } 15035 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 15036 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15037 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 15038 } 15039 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 15040 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 15041 } 15042 rack_log_chg_info(tp, rack, 2, 15043 rack->r_ctl.rc_hpts_flags, 15044 rack->r_ctl.rc_last_output_to, 15045 rack->r_ctl.rc_timer_exp); 15046 return (1); 15047 break; 15048 case TCP_QUERY_RACK_TIMES: 15049 /* Reordering items */ 15050 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 15051 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 15052 /* Timerstamps and timers */ 15053 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 15054 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 15055 reqr->rack_rtt = rack->rc_rack_rtt; 15056 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 15057 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 15058 /* PRR data */ 15059 reqr->rack_sacked = rack->r_ctl.rc_sacked; 15060 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 15061 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 15062 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 15063 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 15064 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 15065 /* TLP and persists info */ 15066 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 15067 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 15068 if (rack->rc_in_persist) { 15069 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 15070 reqr->rack_in_persist = 1; 15071 } else { 15072 reqr->rack_time_went_idle = 0; 15073 reqr->rack_in_persist = 0; 15074 } 15075 if (rack->r_wanted_output) 15076 reqr->rack_wanted_output = 1; 15077 else 15078 reqr->rack_wanted_output = 0; 15079 return (1); 15080 break; 15081 default: 15082 return (-EINVAL); 15083 } 15084 } 15085 15086 static void 15087 rack_switch_failed(struct tcpcb *tp) 15088 { 15089 /* 15090 * This method gets called if a stack switch was 15091 * attempted and it failed. We are left 15092 * but our hpts timers were stopped and we 15093 * need to validate time units and t_flags2. 15094 */ 15095 struct tcp_rack *rack; 15096 struct timeval tv; 15097 uint32_t cts; 15098 uint32_t toval; 15099 struct hpts_diag diag; 15100 15101 rack = (struct tcp_rack *)tp->t_fb_ptr; 15102 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 15103 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 15104 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 15105 else 15106 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 15107 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15108 tp->t_flags2 |= TF2_MBUF_ACKCMP; 15109 if (tp->t_in_hpts > IHPTS_NONE) { 15110 /* Strange */ 15111 return; 15112 } 15113 cts = tcp_get_usecs(&tv); 15114 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15115 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 15116 toval = rack->r_ctl.rc_last_output_to - cts; 15117 } else { 15118 /* one slot please */ 15119 toval = HPTS_TICKS_PER_SLOT; 15120 } 15121 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 15122 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 15123 toval = rack->r_ctl.rc_timer_exp - cts; 15124 } else { 15125 /* one slot please */ 15126 toval = HPTS_TICKS_PER_SLOT; 15127 } 15128 } else 15129 toval = HPTS_TICKS_PER_SLOT; 15130 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 15131 __LINE__, &diag); 15132 rack_log_hpts_diag(rack, cts, &diag, &tv); 15133 } 15134 15135 static int 15136 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 15137 { 15138 struct rack_sendmap *rsm, *ersm; 15139 int insret __diagused; 15140 /* 15141 * When initing outstanding, we must be quite careful 15142 * to not refer to tp->t_fb_ptr. This has the old rack 15143 * pointer in it, not the "new" one (when we are doing 15144 * a stack switch). 15145 */ 15146 15147 15148 if (tp->t_fb->tfb_chg_query == NULL) { 15149 /* Create a send map for the current outstanding data */ 15150 15151 rsm = rack_alloc(rack); 15152 if (rsm == NULL) { 15153 uma_zfree(rack_pcb_zone, ptr); 15154 return (ENOMEM); 15155 } 15156 rsm->r_no_rtt_allowed = 1; 15157 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 15158 rsm->r_rtr_cnt = 1; 15159 rsm->r_rtr_bytes = 0; 15160 if (tp->t_flags & TF_SENTFIN) 15161 rsm->r_flags |= RACK_HAS_FIN; 15162 rsm->r_end = tp->snd_max; 15163 if (tp->snd_una == tp->iss) { 15164 /* The data space is one beyond snd_una */ 15165 rsm->r_flags |= RACK_HAS_SYN; 15166 rsm->r_start = tp->iss; 15167 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 15168 } else 15169 rsm->r_start = tp->snd_una; 15170 rsm->r_dupack = 0; 15171 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 15172 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 15173 if (rsm->m) { 15174 rsm->orig_m_len = rsm->m->m_len; 15175 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 15176 } else { 15177 rsm->orig_m_len = 0; 15178 rsm->orig_t_space = 0; 15179 } 15180 } else { 15181 /* 15182 * This can happen if we have a stand-alone FIN or 15183 * SYN. 15184 */ 15185 rsm->m = NULL; 15186 rsm->orig_m_len = 0; 15187 rsm->orig_t_space = 0; 15188 rsm->soff = 0; 15189 } 15190 #ifdef INVARIANTS 15191 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 15192 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 15193 insret, rack, rsm); 15194 } 15195 #else 15196 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 15197 #endif 15198 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 15199 rsm->r_in_tmap = 1; 15200 } else { 15201 /* We have a query mechanism, lets use it */ 15202 struct tcp_query_resp qr; 15203 int i; 15204 tcp_seq at; 15205 15206 at = tp->snd_una; 15207 while (at != tp->snd_max) { 15208 memset(&qr, 0, sizeof(qr)); 15209 qr.req = TCP_QUERY_SENDMAP; 15210 qr.req_param = at; 15211 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 15212 break; 15213 /* Move forward */ 15214 at = qr.sendmap_end; 15215 /* Now lets build the entry for this one */ 15216 rsm = rack_alloc(rack); 15217 if (rsm == NULL) { 15218 uma_zfree(rack_pcb_zone, ptr); 15219 return (ENOMEM); 15220 } 15221 memset(rsm, 0, sizeof(struct rack_sendmap)); 15222 /* Now configure the rsm and insert it */ 15223 rsm->r_dupack = qr.sendmap_dupacks; 15224 rsm->r_start = qr.sendmap_start; 15225 rsm->r_end = qr.sendmap_end; 15226 if (qr.sendmap_fas) 15227 rsm->r_fas = qr.sendmap_end; 15228 else 15229 rsm->r_fas = rsm->r_start - tp->snd_una; 15230 /* 15231 * We have carefully aligned the bits 15232 * so that all we have to do is copy over 15233 * the bits with the mask. 15234 */ 15235 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 15236 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 15237 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 15238 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 15239 for (i=0 ; i<rsm->r_rtr_cnt; i++) 15240 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 15241 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 15242 (rsm->r_start - tp->snd_una), &rsm->soff); 15243 if (rsm->m) { 15244 rsm->orig_m_len = rsm->m->m_len; 15245 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 15246 } else { 15247 rsm->orig_m_len = 0; 15248 rsm->orig_t_space = 0; 15249 } 15250 #ifdef INVARIANTS 15251 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 15252 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 15253 insret, rack, rsm); 15254 } 15255 #else 15256 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 15257 #endif 15258 if ((rsm->r_flags & RACK_ACKED) == 0) { 15259 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 15260 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 15261 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 15262 /* 15263 * If the existing ersm was sent at 15264 * a later time than the new one, then 15265 * the new one should appear ahead of this 15266 * ersm. 15267 */ 15268 rsm->r_in_tmap = 1; 15269 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 15270 break; 15271 } 15272 } 15273 if (rsm->r_in_tmap == 0) { 15274 /* 15275 * Not found so shove it on the tail. 15276 */ 15277 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 15278 rsm->r_in_tmap = 1; 15279 } 15280 } else { 15281 if ((rack->r_ctl.rc_sacklast == NULL) || 15282 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 15283 rack->r_ctl.rc_sacklast = rsm; 15284 } 15285 } 15286 rack_log_chg_info(tp, rack, 3, 15287 rsm->r_start, 15288 rsm->r_end, 15289 rsm->r_flags); 15290 } 15291 } 15292 return (0); 15293 } 15294 15295 static void 15296 rack_translate_policer_detect(struct tcp_rack *rack, uint32_t optval) 15297 { 15298 /* 15299 * P = Percent of retransmits 499 = 49.9% 15300 * A = Average number 1 (.1%) -> 169 (16.9%) 15301 * M = Median number of retrans 1 - 16 15302 * MMMM MMMM AAAA AAAA PPPP PPPP PPPP PPPP 15303 * 15304 */ 15305 uint16_t per, upp; 15306 15307 per = optval & 0x0000ffff; 15308 rack->r_ctl.policer_rxt_threshold = (uint32_t)(per & 0xffff); 15309 upp = ((optval & 0xffff0000) >> 16); 15310 rack->r_ctl.policer_avg_threshold = (0x00ff & upp); 15311 rack->r_ctl.policer_med_threshold = ((upp >> 8) & 0x00ff); 15312 if ((rack->r_ctl.policer_rxt_threshold > 0) && 15313 (rack->r_ctl.policer_avg_threshold > 0) && 15314 (rack->r_ctl.policer_med_threshold > 0)) { 15315 rack->policer_detect_on = 1; 15316 } else { 15317 rack->policer_detect_on = 0; 15318 } 15319 rack->r_ctl.saved_policer_val = optval; 15320 policer_detection_log(rack, optval, 15321 rack->r_ctl.policer_avg_threshold, 15322 rack->r_ctl.policer_med_threshold, 15323 rack->r_ctl.policer_rxt_threshold, 11); 15324 } 15325 15326 static int32_t 15327 rack_init(struct tcpcb *tp, void **ptr) 15328 { 15329 struct inpcb *inp = tptoinpcb(tp); 15330 struct tcp_rack *rack = NULL; 15331 uint32_t iwin, snt, us_cts; 15332 size_t sz; 15333 int err, no_query; 15334 15335 tcp_hpts_init(tp); 15336 15337 /* 15338 * First are we the initial or are we a switched stack? 15339 * If we are initing via tcp_newtcppcb the ptr passed 15340 * will be tp->t_fb_ptr. If its a stack switch that 15341 * has a previous stack we can query it will be a local 15342 * var that will in the end be set into t_fb_ptr. 15343 */ 15344 if (ptr == &tp->t_fb_ptr) 15345 no_query = 1; 15346 else 15347 no_query = 0; 15348 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 15349 if (*ptr == NULL) { 15350 /* 15351 * We need to allocate memory but cant. The INP and INP_INFO 15352 * locks and they are recursive (happens during setup. So a 15353 * scheme to drop the locks fails :( 15354 * 15355 */ 15356 return(ENOMEM); 15357 } 15358 memset(*ptr, 0, sizeof(struct tcp_rack)); 15359 rack = (struct tcp_rack *)*ptr; 15360 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 15361 if (rack->r_ctl.tqh == NULL) { 15362 uma_zfree(rack_pcb_zone, rack); 15363 return(ENOMEM); 15364 } 15365 tqhash_init(rack->r_ctl.tqh); 15366 TAILQ_INIT(&rack->r_ctl.rc_free); 15367 TAILQ_INIT(&rack->r_ctl.rc_tmap); 15368 rack->rc_tp = tp; 15369 rack->rc_inp = inp; 15370 /* Set the flag */ 15371 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 15372 /* Probably not needed but lets be sure */ 15373 rack_clear_rate_sample(rack); 15374 /* 15375 * Save off the default values, socket options will poke 15376 * at these if pacing is not on or we have not yet 15377 * reached where pacing is on (gp_ready/fixed enabled). 15378 * When they get set into the CC module (when gp_ready 15379 * is enabled or we enable fixed) then we will set these 15380 * values into the CC and place in here the old values 15381 * so we have a restoral. Then we will set the flag 15382 * rc_pacing_cc_set. That way whenever we turn off pacing 15383 * or switch off this stack, we will know to go restore 15384 * the saved values. 15385 * 15386 * We specifically put into the beta the ecn value for pacing. 15387 */ 15388 rack->rc_new_rnd_needed = 1; 15389 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 15390 /* We want abe like behavior as well */ 15391 15392 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 15393 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 15394 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 15395 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 15396 rack->r_ctl.policer_del_mss = rack_req_del_mss; 15397 if ((rack_policer_rxt_thresh > 0) && 15398 (rack_policer_avg_thresh > 0) && 15399 (rack_policer_med_thresh > 0)) { 15400 rack->r_ctl.policer_rxt_threshold = rack_policer_rxt_thresh; 15401 rack->r_ctl.policer_avg_threshold = rack_policer_avg_thresh; 15402 rack->r_ctl.policer_med_threshold = rack_policer_med_thresh; 15403 rack->policer_detect_on = 1; 15404 } else { 15405 rack->policer_detect_on = 0; 15406 } 15407 if (rack_fill_cw_state) 15408 rack->rc_pace_to_cwnd = 1; 15409 if (rack_pacing_min_seg) 15410 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 15411 if (use_rack_rr) 15412 rack->use_rack_rr = 1; 15413 if (rack_dnd_default) { 15414 rack->rc_pace_dnd = 1; 15415 } 15416 if (V_tcp_delack_enabled) 15417 tp->t_delayed_ack = 1; 15418 else 15419 tp->t_delayed_ack = 0; 15420 #ifdef TCP_ACCOUNTING 15421 if (rack_tcp_accounting) { 15422 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 15423 } 15424 #endif 15425 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 15426 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 15427 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 15428 if (rack->r_ctl.pcm_s == NULL) { 15429 rack->r_ctl.pcm_i.cnt_alloc = 0; 15430 } 15431 #ifdef NETFLIX_STATS 15432 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; 15433 #endif 15434 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 15435 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 15436 if (rack_enable_shared_cwnd) 15437 rack->rack_enable_scwnd = 1; 15438 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 15439 rack->rc_user_set_max_segs = rack_hptsi_segments; 15440 rack->r_ctl.max_reduction = rack_max_reduce; 15441 rack->rc_force_max_seg = 0; 15442 TAILQ_INIT(&rack->r_ctl.opt_list); 15443 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 15444 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 15445 if (rack_hibeta_setting) { 15446 rack->rack_hibeta = 1; 15447 if ((rack_hibeta_setting >= 50) && 15448 (rack_hibeta_setting <= 100)) { 15449 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; 15450 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 15451 } 15452 } else { 15453 rack->r_ctl.saved_hibeta = 50; 15454 } 15455 /* 15456 * We initialize to all ones so we never match 0 15457 * just in case the client sends in 0, it hopefully 15458 * will never have all 1's in ms :-) 15459 */ 15460 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 15461 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 15462 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 15463 rack->r_ctl.pol_bw_comp = rack_policing_do_bw_comp; 15464 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 15465 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 15466 rack->r_ctl.rc_highest_us_rtt = 0; 15467 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 15468 rack->pcm_enabled = rack_pcm_is_enabled; 15469 if (rack_fillcw_bw_cap) 15470 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 15471 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 15472 if (rack_use_cmp_acks) 15473 rack->r_use_cmp_ack = 1; 15474 if (rack_disable_prr) 15475 rack->rack_no_prr = 1; 15476 if (rack_gp_no_rec_chg) 15477 rack->rc_gp_no_rec_chg = 1; 15478 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 15479 rack->r_ctl.pacing_method |= RACK_REG_PACING; 15480 rack->rc_always_pace = 1; 15481 if (rack->rack_hibeta) 15482 rack_set_cc_pacing(rack); 15483 } else 15484 rack->rc_always_pace = 0; 15485 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 15486 rack->r_mbuf_queue = 1; 15487 else 15488 rack->r_mbuf_queue = 0; 15489 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15490 if (rack_limits_scwnd) 15491 rack->r_limit_scw = 1; 15492 else 15493 rack->r_limit_scw = 0; 15494 rack_init_retransmit_value(rack, rack_rxt_controls); 15495 rack->rc_labc = V_tcp_abc_l_var; 15496 if (rack_honors_hpts_min_to) 15497 rack->r_use_hpts_min = 1; 15498 if (tp->snd_una != 0) { 15499 rack->r_ctl.idle_snd_una = tp->snd_una; 15500 rack->rc_sendvars_notset = 0; 15501 /* 15502 * Make sure any TCP timers are not running. 15503 */ 15504 tcp_timer_stop(tp); 15505 } else { 15506 /* 15507 * Server side, we are called from the 15508 * syn-cache. This means none of the 15509 * snd_una/max are set yet so we have 15510 * to defer this until the first send. 15511 */ 15512 rack->rc_sendvars_notset = 1; 15513 } 15514 15515 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 15516 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 15517 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 15518 rack->r_ctl.rc_min_to = rack_min_to; 15519 microuptime(&rack->r_ctl.act_rcv_time); 15520 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 15521 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 15522 if (rack_hw_up_only) 15523 rack->r_up_only = 1; 15524 if (rack_do_dyn_mul) { 15525 /* When dynamic adjustment is on CA needs to start at 100% */ 15526 rack->rc_gp_dyn_mul = 1; 15527 if (rack_do_dyn_mul >= 100) 15528 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 15529 } else 15530 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 15531 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 15532 if (rack_timely_off) { 15533 rack->rc_skip_timely = 1; 15534 } 15535 if (rack->rc_skip_timely) { 15536 rack->r_ctl.rack_per_of_gp_rec = 90; 15537 rack->r_ctl.rack_per_of_gp_ca = 100; 15538 rack->r_ctl.rack_per_of_gp_ss = 250; 15539 } 15540 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 15541 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 15542 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 15543 15544 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 15545 rack_probertt_filter_life); 15546 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15547 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 15548 rack->r_ctl.rc_time_of_last_probertt = us_cts; 15549 rack->r_ctl.rc_went_idle_time = us_cts; 15550 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks() - (tcp_ack_war_time_window + 1); 15551 rack->r_ctl.rc_time_probertt_starts = 0; 15552 15553 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 15554 if (rack_rnd_cnt_req & 0x10000) 15555 rack->r_ctl.gate_to_fs = 1; 15556 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 15557 if ((rack_rnd_cnt_req & 0x100) > 0) { 15558 15559 } 15560 if (rack_dsack_std_based & 0x1) { 15561 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 15562 rack->rc_rack_tmr_std_based = 1; 15563 } 15564 if (rack_dsack_std_based & 0x2) { 15565 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 15566 rack->rc_rack_use_dsack = 1; 15567 } 15568 /* We require at least one measurement, even if the sysctl is 0 */ 15569 if (rack_req_measurements) 15570 rack->r_ctl.req_measurements = rack_req_measurements; 15571 else 15572 rack->r_ctl.req_measurements = 1; 15573 if (rack_enable_hw_pacing) 15574 rack->rack_hdw_pace_ena = 1; 15575 if (rack_hw_rate_caps) 15576 rack->r_rack_hw_rate_caps = 1; 15577 if (rack_non_rxt_use_cr) 15578 rack->rack_rec_nonrxt_use_cr = 1; 15579 /* Lets setup the fsb block */ 15580 err = rack_init_fsb(tp, rack); 15581 if (err) { 15582 uma_zfree(rack_pcb_zone, *ptr); 15583 *ptr = NULL; 15584 return (err); 15585 } 15586 if (rack_do_hystart) { 15587 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 15588 if (rack_do_hystart > 1) 15589 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 15590 if (rack_do_hystart > 2) 15591 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 15592 } 15593 /* Log what we will do with queries */ 15594 rack_log_chg_info(tp, rack, 7, 15595 no_query, 0, 0); 15596 if (rack_def_profile) 15597 rack_set_profile(rack, rack_def_profile); 15598 /* Cancel the GP measurement in progress */ 15599 tp->t_flags &= ~TF_GPUTINPROG; 15600 if ((tp->t_state != TCPS_CLOSED) && 15601 (tp->t_state != TCPS_TIME_WAIT)) { 15602 /* 15603 * We are already open, we may 15604 * need to adjust a few things. 15605 */ 15606 if (SEQ_GT(tp->snd_max, tp->iss)) 15607 snt = tp->snd_max - tp->iss; 15608 else 15609 snt = 0; 15610 iwin = rc_init_window(rack); 15611 if ((snt < iwin) && 15612 (no_query == 1)) { 15613 /* We are not past the initial window 15614 * on the first init (i.e. a stack switch 15615 * has not yet occured) so we need to make 15616 * sure cwnd and ssthresh is correct. 15617 */ 15618 if (tp->snd_cwnd < iwin) 15619 tp->snd_cwnd = iwin; 15620 /* 15621 * If we are within the initial window 15622 * we want ssthresh to be unlimited. Setting 15623 * it to the rwnd (which the default stack does 15624 * and older racks) is not really a good idea 15625 * since we want to be in SS and grow both the 15626 * cwnd and the rwnd (via dynamic rwnd growth). If 15627 * we set it to the rwnd then as the peer grows its 15628 * rwnd we will be stuck in CA and never hit SS. 15629 * 15630 * Its far better to raise it up high (this takes the 15631 * risk that there as been a loss already, probably 15632 * we should have an indicator in all stacks of loss 15633 * but we don't), but considering the normal use this 15634 * is a risk worth taking. The consequences of not 15635 * hitting SS are far worse than going one more time 15636 * into it early on (before we have sent even a IW). 15637 * It is highly unlikely that we will have had a loss 15638 * before getting the IW out. 15639 */ 15640 tp->snd_ssthresh = 0xffffffff; 15641 } 15642 /* 15643 * Any init based on sequence numbers 15644 * should be done in the deferred init path 15645 * since we can be CLOSED and not have them 15646 * inited when rack_init() is called. We 15647 * are not closed so lets call it. 15648 */ 15649 rack_deferred_init(tp, rack); 15650 } 15651 if ((tp->t_state != TCPS_CLOSED) && 15652 (tp->t_state != TCPS_TIME_WAIT) && 15653 (no_query == 0) && 15654 (tp->snd_una != tp->snd_max)) { 15655 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 15656 if (err) { 15657 *ptr = NULL; 15658 return(err); 15659 } 15660 } 15661 rack_stop_all_timers(tp, rack); 15662 /* Setup all the t_flags2 */ 15663 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 15664 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 15665 else 15666 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 15667 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15668 tp->t_flags2 |= TF2_MBUF_ACKCMP; 15669 /* 15670 * Timers in Rack are kept in microseconds so lets 15671 * convert any initial incoming variables 15672 * from ticks into usecs. Note that we 15673 * also change the values of t_srtt and t_rttvar, if 15674 * they are non-zero. They are kept with a 5 15675 * bit decimal so we have to carefully convert 15676 * these to get the full precision. 15677 */ 15678 rack_convert_rtts(tp); 15679 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 15680 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 15681 /* We do not start any timers on DROPPED connections */ 15682 if (tp->t_fb->tfb_chg_query == NULL) { 15683 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15684 } else { 15685 struct tcp_query_resp qr; 15686 int ret; 15687 15688 memset(&qr, 0, sizeof(qr)); 15689 15690 /* Get the misc time stamps and such for rack */ 15691 qr.req = TCP_QUERY_RACK_TIMES; 15692 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15693 if (ret == 1) { 15694 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 15695 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 15696 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 15697 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 15698 rack->rc_rack_rtt = qr.rack_rtt; 15699 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 15700 rack->r_ctl.rc_sacked = qr.rack_sacked; 15701 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 15702 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 15703 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 15704 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 15705 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 15706 if (qr.rack_tlp_out) { 15707 rack->rc_tlp_in_progress = 1; 15708 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 15709 } else { 15710 rack->rc_tlp_in_progress = 0; 15711 rack->r_ctl.rc_tlp_cnt_out = 0; 15712 } 15713 if (qr.rack_srtt_measured) 15714 rack->rc_srtt_measure_made = 1; 15715 if (qr.rack_in_persist == 1) { 15716 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 15717 #ifdef NETFLIX_SHARED_CWND 15718 if (rack->r_ctl.rc_scw) { 15719 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 15720 rack->rack_scwnd_is_idle = 1; 15721 } 15722 #endif 15723 rack->r_ctl.persist_lost_ends = 0; 15724 rack->probe_not_answered = 0; 15725 rack->forced_ack = 0; 15726 tp->t_rxtshift = 0; 15727 rack->rc_in_persist = 1; 15728 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 15729 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 15730 } 15731 if (qr.rack_wanted_output) 15732 rack->r_wanted_output = 1; 15733 rack_log_chg_info(tp, rack, 6, 15734 qr.rack_min_rtt, 15735 qr.rack_rtt, 15736 qr.rack_reorder_ts); 15737 } 15738 /* Get the old stack timers */ 15739 qr.req_param = 0; 15740 qr.req = TCP_QUERY_TIMERS_UP; 15741 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15742 if (ret) { 15743 /* 15744 * non-zero return means we have a timer('s) 15745 * to start. Zero means no timer (no keepalive 15746 * I suppose). 15747 */ 15748 uint32_t tov = 0; 15749 15750 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 15751 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 15752 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 15753 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 15754 tov = qr.timer_pacing_to - us_cts; 15755 else 15756 tov = HPTS_TICKS_PER_SLOT; 15757 } 15758 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 15759 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 15760 if (tov == 0) { 15761 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 15762 tov = qr.timer_timer_exp - us_cts; 15763 else 15764 tov = HPTS_TICKS_PER_SLOT; 15765 } 15766 } 15767 rack_log_chg_info(tp, rack, 4, 15768 rack->r_ctl.rc_hpts_flags, 15769 rack->r_ctl.rc_last_output_to, 15770 rack->r_ctl.rc_timer_exp); 15771 if (tov) { 15772 struct hpts_diag diag; 15773 15774 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 15775 __LINE__, &diag); 15776 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 15777 } 15778 } 15779 } 15780 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 15781 __LINE__, RACK_RTTS_INIT); 15782 } 15783 return (0); 15784 } 15785 15786 static int 15787 rack_handoff_ok(struct tcpcb *tp) 15788 { 15789 if ((tp->t_state == TCPS_CLOSED) || 15790 (tp->t_state == TCPS_LISTEN)) { 15791 /* Sure no problem though it may not stick */ 15792 return (0); 15793 } 15794 if ((tp->t_state == TCPS_SYN_SENT) || 15795 (tp->t_state == TCPS_SYN_RECEIVED)) { 15796 /* 15797 * We really don't know if you support sack, 15798 * you have to get to ESTAB or beyond to tell. 15799 */ 15800 return (EAGAIN); 15801 } 15802 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15803 /* 15804 * Rack will only send a FIN after all data is acknowledged. 15805 * So in this case we have more data outstanding. We can't 15806 * switch stacks until either all data and only the FIN 15807 * is left (in which case rack_init() now knows how 15808 * to deal with that) <or> all is acknowledged and we 15809 * are only left with incoming data, though why you 15810 * would want to switch to rack after all data is acknowledged 15811 * I have no idea (rrs)! 15812 */ 15813 return (EAGAIN); 15814 } 15815 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15816 return (0); 15817 } 15818 /* 15819 * If we reach here we don't do SACK on this connection so we can 15820 * never do rack. 15821 */ 15822 return (EINVAL); 15823 } 15824 15825 static void 15826 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15827 { 15828 15829 if (tp->t_fb_ptr) { 15830 uint32_t cnt_free = 0; 15831 struct tcp_rack *rack; 15832 struct rack_sendmap *rsm; 15833 15834 tcp_handle_orphaned_packets(tp); 15835 tp->t_flags &= ~TF_FORCEDATA; 15836 rack = (struct tcp_rack *)tp->t_fb_ptr; 15837 rack_log_pacing_delay_calc(rack, 15838 0, 15839 0, 15840 0, 15841 rack_get_gp_est(rack), /* delRate */ 15842 rack_get_lt_bw(rack), /* rttProp */ 15843 20, __LINE__, NULL, 0); 15844 #ifdef NETFLIX_SHARED_CWND 15845 if (rack->r_ctl.rc_scw) { 15846 uint32_t limit; 15847 15848 if (rack->r_limit_scw) 15849 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15850 else 15851 limit = 0; 15852 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15853 rack->r_ctl.rc_scw_index, 15854 limit); 15855 rack->r_ctl.rc_scw = NULL; 15856 } 15857 #endif 15858 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15859 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15860 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15861 rack->r_ctl.fsb.th = NULL; 15862 } 15863 if (rack->rc_always_pace == 1) { 15864 rack_remove_pacing(rack); 15865 } 15866 /* Clean up any options if they were not applied */ 15867 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15868 struct deferred_opt_list *dol; 15869 15870 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15871 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15872 free(dol, M_TCPDO); 15873 } 15874 /* rack does not use force data but other stacks may clear it */ 15875 if (rack->r_ctl.crte != NULL) { 15876 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15877 rack->rack_hdrw_pacing = 0; 15878 rack->r_ctl.crte = NULL; 15879 } 15880 #ifdef TCP_BLACKBOX 15881 tcp_log_flowend(tp); 15882 #endif 15883 /* 15884 * Lets take a different approach to purging just 15885 * get each one and free it like a cum-ack would and 15886 * not use a foreach loop. 15887 */ 15888 rsm = tqhash_min(rack->r_ctl.tqh); 15889 while (rsm) { 15890 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15891 rack->r_ctl.rc_num_maps_alloced--; 15892 uma_zfree(rack_zone, rsm); 15893 rsm = tqhash_min(rack->r_ctl.tqh); 15894 } 15895 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15896 while (rsm) { 15897 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15898 rack->r_ctl.rc_num_maps_alloced--; 15899 rack->rc_free_cnt--; 15900 cnt_free++; 15901 uma_zfree(rack_zone, rsm); 15902 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15903 } 15904 if (rack->r_ctl.pcm_s != NULL) { 15905 free(rack->r_ctl.pcm_s, M_TCPPCM); 15906 rack->r_ctl.pcm_s = NULL; 15907 rack->r_ctl.pcm_i.cnt_alloc = 0; 15908 rack->r_ctl.pcm_i.cnt = 0; 15909 } 15910 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15911 (tcp_bblogging_on(tp))) { 15912 union tcp_log_stackspecific log; 15913 struct timeval tv; 15914 15915 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15916 log.u_bbr.flex8 = 10; 15917 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15918 log.u_bbr.flex2 = rack->rc_free_cnt; 15919 log.u_bbr.flex3 = cnt_free; 15920 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15921 rsm = tqhash_min(rack->r_ctl.tqh); 15922 log.u_bbr.delRate = (uintptr_t)rsm; 15923 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15924 log.u_bbr.cur_del_rate = (uintptr_t)rsm; 15925 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15926 log.u_bbr.pkt_epoch = __LINE__; 15927 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15928 0, &log, false, NULL, NULL, 0, &tv); 15929 } 15930 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15931 ("rack:%p num_aloc:%u after freeing all?", 15932 rack, 15933 rack->r_ctl.rc_num_maps_alloced)); 15934 rack->rc_free_cnt = 0; 15935 free(rack->r_ctl.tqh, M_TCPFSB); 15936 rack->r_ctl.tqh = NULL; 15937 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15938 tp->t_fb_ptr = NULL; 15939 } 15940 /* Make sure snd_nxt is correctly set */ 15941 tp->snd_nxt = tp->snd_max; 15942 } 15943 15944 static void 15945 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15946 { 15947 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15948 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15949 } 15950 switch (tp->t_state) { 15951 case TCPS_SYN_SENT: 15952 rack->r_state = TCPS_SYN_SENT; 15953 rack->r_substate = rack_do_syn_sent; 15954 break; 15955 case TCPS_SYN_RECEIVED: 15956 rack->r_state = TCPS_SYN_RECEIVED; 15957 rack->r_substate = rack_do_syn_recv; 15958 break; 15959 case TCPS_ESTABLISHED: 15960 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15961 rack->r_state = TCPS_ESTABLISHED; 15962 rack->r_substate = rack_do_established; 15963 break; 15964 case TCPS_CLOSE_WAIT: 15965 rack->r_state = TCPS_CLOSE_WAIT; 15966 rack->r_substate = rack_do_close_wait; 15967 break; 15968 case TCPS_FIN_WAIT_1: 15969 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15970 rack->r_state = TCPS_FIN_WAIT_1; 15971 rack->r_substate = rack_do_fin_wait_1; 15972 break; 15973 case TCPS_CLOSING: 15974 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15975 rack->r_state = TCPS_CLOSING; 15976 rack->r_substate = rack_do_closing; 15977 break; 15978 case TCPS_LAST_ACK: 15979 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15980 rack->r_state = TCPS_LAST_ACK; 15981 rack->r_substate = rack_do_lastack; 15982 break; 15983 case TCPS_FIN_WAIT_2: 15984 rack->r_state = TCPS_FIN_WAIT_2; 15985 rack->r_substate = rack_do_fin_wait_2; 15986 break; 15987 case TCPS_LISTEN: 15988 case TCPS_CLOSED: 15989 case TCPS_TIME_WAIT: 15990 default: 15991 break; 15992 }; 15993 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15994 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15995 15996 } 15997 15998 static void 15999 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 16000 { 16001 /* 16002 * We received an ack, and then did not 16003 * call send or were bounced out due to the 16004 * hpts was running. Now a timer is up as well, is 16005 * it the right timer? 16006 */ 16007 struct rack_sendmap *rsm; 16008 int tmr_up; 16009 16010 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 16011 if (tcp_in_hpts(rack->rc_tp) == 0) { 16012 /* 16013 * Ok we probably need some timer up, but no 16014 * matter what the mask we are not in hpts. We 16015 * may have received an old ack and thus did nothing. 16016 */ 16017 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16018 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16019 return; 16020 } 16021 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 16022 return; 16023 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 16024 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 16025 (tmr_up == PACE_TMR_RXT)) { 16026 /* Should be an RXT */ 16027 return; 16028 } 16029 if (rsm == NULL) { 16030 /* Nothing outstanding? */ 16031 if (tp->t_flags & TF_DELACK) { 16032 if (tmr_up == PACE_TMR_DELACK) 16033 /* We are supposed to have delayed ack up and we do */ 16034 return; 16035 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 16036 /* 16037 * if we hit enobufs then we would expect the possibility 16038 * of nothing outstanding and the RXT up (and the hptsi timer). 16039 */ 16040 return; 16041 } else if (((V_tcp_always_keepalive || 16042 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16043 (tp->t_state <= TCPS_CLOSING)) && 16044 (tmr_up == PACE_TMR_KEEP) && 16045 (tp->snd_max == tp->snd_una)) { 16046 /* We should have keep alive up and we do */ 16047 return; 16048 } 16049 } 16050 if (SEQ_GT(tp->snd_max, tp->snd_una) && 16051 ((tmr_up == PACE_TMR_TLP) || 16052 (tmr_up == PACE_TMR_RACK) || 16053 (tmr_up == PACE_TMR_RXT))) { 16054 /* 16055 * Either a Rack, TLP or RXT is fine if we 16056 * have outstanding data. 16057 */ 16058 return; 16059 } else if (tmr_up == PACE_TMR_DELACK) { 16060 /* 16061 * If the delayed ack was going to go off 16062 * before the rtx/tlp/rack timer were going to 16063 * expire, then that would be the timer in control. 16064 * Note we don't check the time here trusting the 16065 * code is correct. 16066 */ 16067 return; 16068 } 16069 /* 16070 * Ok the timer originally started is not what we want now. 16071 * We will force the hpts to be stopped if any, and restart 16072 * with the slot set to what was in the saved slot. 16073 */ 16074 if (tcp_in_hpts(rack->rc_tp)) { 16075 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16076 uint32_t us_cts; 16077 16078 us_cts = tcp_get_usecs(NULL); 16079 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 16080 rack->r_early = 1; 16081 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 16082 } 16083 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16084 } 16085 tcp_hpts_remove(rack->rc_tp); 16086 } 16087 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16088 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16089 } 16090 16091 16092 static void 16093 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 16094 { 16095 if ((SEQ_LT(tp->snd_wl1, seq) || 16096 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 16097 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 16098 /* keep track of pure window updates */ 16099 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 16100 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 16101 tp->snd_wnd = tiwin; 16102 rack_validate_fo_sendwin_up(tp, rack); 16103 tp->snd_wl1 = seq; 16104 tp->snd_wl2 = ack; 16105 if (tp->snd_wnd > tp->max_sndwnd) 16106 tp->max_sndwnd = tp->snd_wnd; 16107 rack->r_wanted_output = 1; 16108 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 16109 tp->snd_wnd = tiwin; 16110 rack_validate_fo_sendwin_up(tp, rack); 16111 tp->snd_wl1 = seq; 16112 tp->snd_wl2 = ack; 16113 } else { 16114 /* Not a valid win update */ 16115 return; 16116 } 16117 if (tp->snd_wnd > tp->max_sndwnd) 16118 tp->max_sndwnd = tp->snd_wnd; 16119 /* Do we exit persists? */ 16120 if ((rack->rc_in_persist != 0) && 16121 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 16122 rack->r_ctl.rc_pace_min_segs))) { 16123 rack_exit_persist(tp, rack, cts); 16124 } 16125 /* Do we enter persists? */ 16126 if ((rack->rc_in_persist == 0) && 16127 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 16128 TCPS_HAVEESTABLISHED(tp->t_state) && 16129 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 16130 sbavail(&tptosocket(tp)->so_snd) && 16131 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 16132 /* 16133 * Here the rwnd is less than 16134 * the pacing size, we are established, 16135 * nothing is outstanding, and there is 16136 * data to send. Enter persists. 16137 */ 16138 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 16139 } 16140 } 16141 16142 static void 16143 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 16144 { 16145 16146 if (tcp_bblogging_on(rack->rc_tp)) { 16147 struct inpcb *inp = tptoinpcb(tp); 16148 union tcp_log_stackspecific log; 16149 struct timeval ltv; 16150 char tcp_hdr_buf[60]; 16151 struct tcphdr *th; 16152 struct timespec ts; 16153 uint32_t orig_snd_una; 16154 uint8_t xx = 0; 16155 16156 #ifdef TCP_REQUEST_TRK 16157 struct tcp_sendfile_track *tcp_req; 16158 16159 if (SEQ_GT(ae->ack, tp->snd_una)) { 16160 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 16161 } else { 16162 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 16163 } 16164 #endif 16165 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16166 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16167 if (rack->rack_no_prr == 0) 16168 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16169 else 16170 log.u_bbr.flex1 = 0; 16171 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16172 log.u_bbr.use_lt_bw <<= 1; 16173 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16174 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16175 log.u_bbr.bbr_state = rack->rc_free_cnt; 16176 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 16177 log.u_bbr.pkts_out = tp->t_maxseg; 16178 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16179 log.u_bbr.flex7 = 1; 16180 log.u_bbr.lost = ae->flags; 16181 log.u_bbr.cwnd_gain = ackval; 16182 log.u_bbr.pacing_gain = 0x2; 16183 if (ae->flags & TSTMP_HDWR) { 16184 /* Record the hardware timestamp if present */ 16185 log.u_bbr.flex3 = M_TSTMP; 16186 ts.tv_sec = ae->timestamp / 1000000000; 16187 ts.tv_nsec = ae->timestamp % 1000000000; 16188 ltv.tv_sec = ts.tv_sec; 16189 ltv.tv_usec = ts.tv_nsec / 1000; 16190 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16191 } else if (ae->flags & TSTMP_LRO) { 16192 /* Record the LRO the arrival timestamp */ 16193 log.u_bbr.flex3 = M_TSTMP_LRO; 16194 ts.tv_sec = ae->timestamp / 1000000000; 16195 ts.tv_nsec = ae->timestamp % 1000000000; 16196 ltv.tv_sec = ts.tv_sec; 16197 ltv.tv_usec = ts.tv_nsec / 1000; 16198 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16199 } 16200 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16201 /* Log the rcv time */ 16202 log.u_bbr.delRate = ae->timestamp; 16203 #ifdef TCP_REQUEST_TRK 16204 log.u_bbr.applimited = tp->t_tcpreq_closed; 16205 log.u_bbr.applimited <<= 8; 16206 log.u_bbr.applimited |= tp->t_tcpreq_open; 16207 log.u_bbr.applimited <<= 8; 16208 log.u_bbr.applimited |= tp->t_tcpreq_req; 16209 if (tcp_req) { 16210 /* Copy out any client req info */ 16211 /* seconds */ 16212 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16213 /* useconds */ 16214 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16215 log.u_bbr.rttProp = tcp_req->timestamp; 16216 log.u_bbr.cur_del_rate = tcp_req->start; 16217 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16218 log.u_bbr.flex8 |= 1; 16219 } else { 16220 log.u_bbr.flex8 |= 2; 16221 log.u_bbr.bw_inuse = tcp_req->end; 16222 } 16223 log.u_bbr.flex6 = tcp_req->start_seq; 16224 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16225 log.u_bbr.flex8 |= 4; 16226 log.u_bbr.epoch = tcp_req->end_seq; 16227 } 16228 } 16229 #endif 16230 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 16231 th = (struct tcphdr *)tcp_hdr_buf; 16232 th->th_seq = ae->seq; 16233 th->th_ack = ae->ack; 16234 th->th_win = ae->win; 16235 /* Now fill in the ports */ 16236 th->th_sport = inp->inp_fport; 16237 th->th_dport = inp->inp_lport; 16238 tcp_set_flags(th, ae->flags); 16239 /* Now do we have a timestamp option? */ 16240 if (ae->flags & HAS_TSTMP) { 16241 u_char *cp; 16242 uint32_t val; 16243 16244 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 16245 cp = (u_char *)(th + 1); 16246 *cp = TCPOPT_NOP; 16247 cp++; 16248 *cp = TCPOPT_NOP; 16249 cp++; 16250 *cp = TCPOPT_TIMESTAMP; 16251 cp++; 16252 *cp = TCPOLEN_TIMESTAMP; 16253 cp++; 16254 val = htonl(ae->ts_value); 16255 bcopy((char *)&val, 16256 (char *)cp, sizeof(uint32_t)); 16257 val = htonl(ae->ts_echo); 16258 bcopy((char *)&val, 16259 (char *)(cp + 4), sizeof(uint32_t)); 16260 } else 16261 th->th_off = (sizeof(struct tcphdr) >> 2); 16262 16263 /* 16264 * For sane logging we need to play a little trick. 16265 * If the ack were fully processed we would have moved 16266 * snd_una to high_seq, but since compressed acks are 16267 * processed in two phases, at this point (logging) snd_una 16268 * won't be advanced. So we would see multiple acks showing 16269 * the advancement. We can prevent that by "pretending" that 16270 * snd_una was advanced and then un-advancing it so that the 16271 * logging code has the right value for tlb_snd_una. 16272 */ 16273 if (tp->snd_una != high_seq) { 16274 orig_snd_una = tp->snd_una; 16275 tp->snd_una = high_seq; 16276 xx = 1; 16277 } else 16278 xx = 0; 16279 TCP_LOG_EVENTP(tp, th, 16280 &tptosocket(tp)->so_rcv, 16281 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 16282 0, &log, true, <v); 16283 if (xx) { 16284 tp->snd_una = orig_snd_una; 16285 } 16286 } 16287 16288 } 16289 16290 static void 16291 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 16292 { 16293 uint32_t us_rtt; 16294 /* 16295 * A persist or keep-alive was forced out, update our 16296 * min rtt time. Note now worry about lost responses. 16297 * When a subsequent keep-alive or persist times out 16298 * and forced_ack is still on, then the last probe 16299 * was not responded to. In such cases we have a 16300 * sysctl that controls the behavior. Either we apply 16301 * the rtt but with reduced confidence (0). Or we just 16302 * plain don't apply the rtt estimate. Having data flow 16303 * will clear the probe_not_answered flag i.e. cum-ack 16304 * move forward <or> exiting and reentering persists. 16305 */ 16306 16307 rack->forced_ack = 0; 16308 rack->rc_tp->t_rxtshift = 0; 16309 if ((rack->rc_in_persist && 16310 (tiwin == rack->rc_tp->snd_wnd)) || 16311 (rack->rc_in_persist == 0)) { 16312 /* 16313 * In persists only apply the RTT update if this is 16314 * a response to our window probe. And that 16315 * means the rwnd sent must match the current 16316 * snd_wnd. If it does not, then we got a 16317 * window update ack instead. For keepalive 16318 * we allow the answer no matter what the window. 16319 * 16320 * Note that if the probe_not_answered is set then 16321 * the forced_ack_ts is the oldest one i.e. the first 16322 * probe sent that might have been lost. This assures 16323 * us that if we do calculate an RTT it is longer not 16324 * some short thing. 16325 */ 16326 if (rack->rc_in_persist) 16327 counter_u64_add(rack_persists_acks, 1); 16328 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 16329 if (us_rtt == 0) 16330 us_rtt = 1; 16331 if (rack->probe_not_answered == 0) { 16332 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 16333 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 16334 } else { 16335 /* We have a retransmitted probe here too */ 16336 if (rack_apply_rtt_with_reduced_conf) { 16337 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 16338 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 16339 } 16340 } 16341 } 16342 } 16343 16344 static void 16345 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 16346 { 16347 /* 16348 * The next send has occurred mark the end of the round 16349 * as when that data gets acknowledged. We can 16350 * also do common things we might need to do when 16351 * a round begins. 16352 */ 16353 rack->r_ctl.roundends = tp->snd_max; 16354 rack->rc_new_rnd_needed = 0; 16355 rack_log_hystart_event(rack, tp->snd_max, 4); 16356 } 16357 16358 16359 static void 16360 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 16361 uint32_t flex3) 16362 { 16363 if (tcp_bblogging_on(rack->rc_tp)) { 16364 union tcp_log_stackspecific log; 16365 struct timeval tv; 16366 16367 (void)tcp_get_usecs(&tv); 16368 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16369 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 16370 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16371 log.u_bbr.flex8 = mod; 16372 log.u_bbr.flex1 = flex1; 16373 log.u_bbr.flex2 = flex2; 16374 log.u_bbr.flex3 = flex3; 16375 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 16376 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 16377 log.u_bbr.bbr_substate = rack->pcm_needed; 16378 log.u_bbr.bbr_substate <<= 1; 16379 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 16380 log.u_bbr.bbr_substate <<= 1; 16381 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 16382 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 16383 0, &log, false, NULL, NULL, 0, &tv); 16384 } 16385 } 16386 16387 static void 16388 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 16389 { 16390 /* 16391 * The round (current_round) has ended. We now 16392 * setup for the next round by incrementing the 16393 * round numnber and doing any round specific 16394 * things. 16395 */ 16396 rack_log_hystart_event(rack, high_seq, 21); 16397 rack->r_ctl.current_round++; 16398 /* New round (current_round) begins at next send */ 16399 rack->rc_new_rnd_needed = 1; 16400 if ((rack->pcm_enabled == 1) && 16401 (rack->pcm_needed == 0) && 16402 (rack->pcm_in_progress == 0)) { 16403 /* 16404 * If we have enabled PCM, then we need to 16405 * check if the round has adanced to the state 16406 * where one is required. 16407 */ 16408 int rnds; 16409 16410 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 16411 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 16412 rack->pcm_needed = 1; 16413 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 16414 } else if (rack_verbose_logging) { 16415 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 16416 } 16417 } 16418 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 16419 /* We have hystart enabled send the round info in */ 16420 if (CC_ALGO(tp)->newround != NULL) { 16421 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 16422 } 16423 } 16424 /* 16425 * For DGP an initial startup check. We want to validate 16426 * that we are not just pushing on slow-start and just 16427 * not gaining.. i.e. filling buffers without getting any 16428 * boost in b/w during the inital slow-start. 16429 */ 16430 if (rack->dgp_on && 16431 (rack->rc_initial_ss_comp == 0) && 16432 (tp->snd_cwnd < tp->snd_ssthresh) && 16433 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 16434 (rack->r_ctl.gp_rnd_thresh > 0) && 16435 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 16436 16437 /* 16438 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 16439 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 16440 * exit SS. 16441 * 16442 * Pick up the flight size now as we enter slowstart (not the 16443 * cwnd which may be inflated). 16444 */ 16445 rack->rc_initial_ss_comp = 1; 16446 16447 if (tcp_bblogging_on(rack->rc_tp)) { 16448 union tcp_log_stackspecific log; 16449 struct timeval tv; 16450 16451 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16452 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 16453 log.u_bbr.flex1 = rack->r_ctl.current_round; 16454 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 16455 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 16456 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 16457 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 16458 log.u_bbr.flex8 = 40; 16459 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 16460 0, &log, false, NULL, __func__, __LINE__,&tv); 16461 } 16462 if ((rack->r_ctl.gate_to_fs == 1) && 16463 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 16464 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 16465 } 16466 tp->snd_ssthresh = tp->snd_cwnd - 1; 16467 /* Turn off any fast output running */ 16468 rack->r_fast_output = 0; 16469 } 16470 } 16471 16472 static int 16473 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 16474 { 16475 /* 16476 * Handle a "special" compressed ack mbuf. Each incoming 16477 * ack has only four possible dispositions: 16478 * 16479 * A) It moves the cum-ack forward 16480 * B) It is behind the cum-ack. 16481 * C) It is a window-update ack. 16482 * D) It is a dup-ack. 16483 * 16484 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 16485 * in the incoming mbuf. We also need to still pay attention 16486 * to nxt_pkt since there may be another packet after this 16487 * one. 16488 */ 16489 #ifdef TCP_ACCOUNTING 16490 uint64_t ts_val; 16491 uint64_t rdstc; 16492 #endif 16493 int segsiz; 16494 struct timespec ts; 16495 struct tcp_rack *rack; 16496 struct tcp_ackent *ae; 16497 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 16498 int cnt, i, did_out, ourfinisacked = 0; 16499 struct tcpopt to_holder, *to = NULL; 16500 #ifdef TCP_ACCOUNTING 16501 int win_up_req = 0; 16502 #endif 16503 int nsegs = 0; 16504 int under_pacing = 0; 16505 int post_recovery = 0; 16506 #ifdef TCP_ACCOUNTING 16507 sched_pin(); 16508 #endif 16509 rack = (struct tcp_rack *)tp->t_fb_ptr; 16510 if (rack->gp_ready && 16511 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 16512 under_pacing = 1; 16513 16514 if (rack->r_state != tp->t_state) 16515 rack_set_state(tp, rack); 16516 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16517 (tp->t_flags & TF_GPUTINPROG)) { 16518 /* 16519 * We have a goodput in progress 16520 * and we have entered a late state. 16521 * Do we have enough data in the sb 16522 * to handle the GPUT request? 16523 */ 16524 uint32_t bytes; 16525 16526 bytes = tp->gput_ack - tp->gput_seq; 16527 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16528 bytes += tp->gput_seq - tp->snd_una; 16529 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16530 /* 16531 * There are not enough bytes in the socket 16532 * buffer that have been sent to cover this 16533 * measurement. Cancel it. 16534 */ 16535 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16536 rack->r_ctl.rc_gp_srtt /*flex1*/, 16537 tp->gput_seq, 16538 0, 0, 18, __LINE__, NULL, 0); 16539 tp->t_flags &= ~TF_GPUTINPROG; 16540 } 16541 } 16542 to = &to_holder; 16543 to->to_flags = 0; 16544 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 16545 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 16546 cnt = m->m_len / sizeof(struct tcp_ackent); 16547 counter_u64_add(rack_multi_single_eq, cnt); 16548 high_seq = tp->snd_una; 16549 the_win = tp->snd_wnd; 16550 win_seq = tp->snd_wl1; 16551 win_upd_ack = tp->snd_wl2; 16552 cts = tcp_tv_to_usectick(tv); 16553 ms_cts = tcp_tv_to_mssectick(tv); 16554 rack->r_ctl.rc_rcvtime = cts; 16555 segsiz = ctf_fixed_maxseg(tp); 16556 if ((rack->rc_gp_dyn_mul) && 16557 (rack->use_fixed_rate == 0) && 16558 (rack->rc_always_pace)) { 16559 /* Check in on probertt */ 16560 rack_check_probe_rtt(rack, cts); 16561 } 16562 for (i = 0; i < cnt; i++) { 16563 #ifdef TCP_ACCOUNTING 16564 ts_val = get_cyclecount(); 16565 #endif 16566 rack_clear_rate_sample(rack); 16567 ae = ((mtod(m, struct tcp_ackent *)) + i); 16568 if (ae->flags & TH_FIN) 16569 rack_log_pacing_delay_calc(rack, 16570 0, 16571 0, 16572 0, 16573 rack_get_gp_est(rack), /* delRate */ 16574 rack_get_lt_bw(rack), /* rttProp */ 16575 20, __LINE__, NULL, 0); 16576 /* Setup the window */ 16577 tiwin = ae->win << tp->snd_scale; 16578 if (tiwin > rack->r_ctl.rc_high_rwnd) 16579 rack->r_ctl.rc_high_rwnd = tiwin; 16580 /* figure out the type of ack */ 16581 if (SEQ_LT(ae->ack, high_seq)) { 16582 /* Case B*/ 16583 ae->ack_val_set = ACK_BEHIND; 16584 } else if (SEQ_GT(ae->ack, high_seq)) { 16585 /* Case A */ 16586 ae->ack_val_set = ACK_CUMACK; 16587 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 16588 /* Case D */ 16589 ae->ack_val_set = ACK_DUPACK; 16590 } else { 16591 /* Case C */ 16592 ae->ack_val_set = ACK_RWND; 16593 } 16594 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16595 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 16596 /* Validate timestamp */ 16597 if (ae->flags & HAS_TSTMP) { 16598 /* Setup for a timestamp */ 16599 to->to_flags = TOF_TS; 16600 ae->ts_echo -= tp->ts_offset; 16601 to->to_tsecr = ae->ts_echo; 16602 to->to_tsval = ae->ts_value; 16603 /* 16604 * If echoed timestamp is later than the current time, fall back to 16605 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16606 * were used when this connection was established. 16607 */ 16608 if (TSTMP_GT(ae->ts_echo, ms_cts)) 16609 to->to_tsecr = 0; 16610 if (tp->ts_recent && 16611 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 16612 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 16613 #ifdef TCP_ACCOUNTING 16614 rdstc = get_cyclecount(); 16615 if (rdstc > ts_val) { 16616 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16617 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16618 } 16619 } 16620 #endif 16621 continue; 16622 } 16623 } 16624 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 16625 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 16626 tp->ts_recent_age = tcp_ts_getticks(); 16627 tp->ts_recent = ae->ts_value; 16628 } 16629 } else { 16630 /* Setup for a no options */ 16631 to->to_flags = 0; 16632 } 16633 /* Update the rcv time and perform idle reduction possibly */ 16634 if (tp->t_idle_reduce && 16635 (tp->snd_max == tp->snd_una) && 16636 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16637 counter_u64_add(rack_input_idle_reduces, 1); 16638 rack_cc_after_idle(rack, tp); 16639 } 16640 tp->t_rcvtime = ticks; 16641 /* Now what about ECN of a chain of pure ACKs? */ 16642 if (tcp_ecn_input_segment(tp, ae->flags, 0, 16643 tcp_packets_this_ack(tp, ae->ack), 16644 ae->codepoint)) 16645 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 16646 #ifdef TCP_ACCOUNTING 16647 /* Count for the specific type of ack in */ 16648 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16649 tp->tcp_cnt_counters[ae->ack_val_set]++; 16650 } 16651 #endif 16652 /* 16653 * Note how we could move up these in the determination 16654 * above, but we don't so that way the timestamp checks (and ECN) 16655 * is done first before we do any processing on the ACK. 16656 * The non-compressed path through the code has this 16657 * weakness (noted by @jtl) that it actually does some 16658 * processing before verifying the timestamp information. 16659 * We don't take that path here which is why we set 16660 * the ack_val_set first, do the timestamp and ecn 16661 * processing, and then look at what we have setup. 16662 */ 16663 if (ae->ack_val_set == ACK_BEHIND) { 16664 /* 16665 * Case B flag reordering, if window is not closed 16666 * or it could be a keep-alive or persists 16667 */ 16668 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 16669 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16670 if (rack->r_ctl.rc_reorder_ts == 0) 16671 rack->r_ctl.rc_reorder_ts = 1; 16672 } 16673 } else if (ae->ack_val_set == ACK_DUPACK) { 16674 /* Case D */ 16675 rack_strike_dupack(rack, ae->ack); 16676 } else if (ae->ack_val_set == ACK_RWND) { 16677 /* Case C */ 16678 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16679 ts.tv_sec = ae->timestamp / 1000000000; 16680 ts.tv_nsec = ae->timestamp % 1000000000; 16681 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16682 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16683 } else { 16684 rack->r_ctl.act_rcv_time = *tv; 16685 } 16686 if (rack->forced_ack) { 16687 rack_handle_probe_response(rack, tiwin, 16688 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 16689 } 16690 #ifdef TCP_ACCOUNTING 16691 win_up_req = 1; 16692 #endif 16693 win_upd_ack = ae->ack; 16694 win_seq = ae->seq; 16695 the_win = tiwin; 16696 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16697 } else { 16698 /* Case A */ 16699 if (SEQ_GT(ae->ack, tp->snd_max)) { 16700 /* 16701 * We just send an ack since the incoming 16702 * ack is beyond the largest seq we sent. 16703 */ 16704 if ((tp->t_flags & TF_ACKNOW) == 0) { 16705 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 16706 if (tp->t_flags && TF_ACKNOW) 16707 rack->r_wanted_output = 1; 16708 } 16709 } else { 16710 nsegs++; 16711 /* If the window changed setup to update */ 16712 if (tiwin != tp->snd_wnd) { 16713 win_upd_ack = ae->ack; 16714 win_seq = ae->seq; 16715 the_win = tiwin; 16716 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16717 } 16718 #ifdef TCP_ACCOUNTING 16719 /* Account for the acks */ 16720 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16721 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 16722 } 16723 #endif 16724 high_seq = ae->ack; 16725 /* Setup our act_rcv_time */ 16726 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16727 ts.tv_sec = ae->timestamp / 1000000000; 16728 ts.tv_nsec = ae->timestamp % 1000000000; 16729 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16730 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16731 } else { 16732 rack->r_ctl.act_rcv_time = *tv; 16733 } 16734 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 16735 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 16736 #ifdef TCP_REQUEST_TRK 16737 rack_req_check_for_comp(rack, high_seq); 16738 #endif 16739 if (rack->rc_dsack_round_seen) { 16740 /* Is the dsack round over? */ 16741 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 16742 /* Yes it is */ 16743 rack->rc_dsack_round_seen = 0; 16744 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 16745 } 16746 } 16747 } 16748 } 16749 /* And lets be sure to commit the rtt measurements for this ack */ 16750 tcp_rack_xmit_timer_commit(rack, tp); 16751 #ifdef TCP_ACCOUNTING 16752 rdstc = get_cyclecount(); 16753 if (rdstc > ts_val) { 16754 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16755 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16756 if (ae->ack_val_set == ACK_CUMACK) 16757 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 16758 } 16759 } 16760 #endif 16761 } 16762 #ifdef TCP_ACCOUNTING 16763 ts_val = get_cyclecount(); 16764 #endif 16765 /* Tend to any collapsed window */ 16766 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 16767 /* The peer collapsed the window */ 16768 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 16769 } else if (rack->rc_has_collapsed) 16770 rack_un_collapse_window(rack, __LINE__); 16771 if ((rack->r_collapse_point_valid) && 16772 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 16773 rack->r_collapse_point_valid = 0; 16774 acked_amount = acked = (high_seq - tp->snd_una); 16775 if (acked) { 16776 /* 16777 * The draft (v3) calls for us to use SEQ_GEQ, but that 16778 * causes issues when we are just going app limited. Lets 16779 * instead use SEQ_GT <or> where its equal but more data 16780 * is outstanding. 16781 * 16782 * Also make sure we are on the last ack of a series. We 16783 * have to have all the ack's processed in queue to know 16784 * if there is something left outstanding. 16785 * 16786 */ 16787 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 16788 (rack->rc_new_rnd_needed == 0) && 16789 (nxt_pkt == 0)) { 16790 /* 16791 * We have crossed into a new round with 16792 * this th_ack value. 16793 */ 16794 rack_new_round_setup(tp, rack, high_seq); 16795 } 16796 /* 16797 * Clear the probe not answered flag 16798 * since cum-ack moved forward. 16799 */ 16800 rack->probe_not_answered = 0; 16801 if (tp->t_flags & TF_NEEDSYN) { 16802 /* 16803 * T/TCP: Connection was half-synchronized, and our SYN has 16804 * been ACK'd (so connection is now fully synchronized). Go 16805 * to non-starred state, increment snd_una for ACK of SYN, 16806 * and check if we can do window scaling. 16807 */ 16808 tp->t_flags &= ~TF_NEEDSYN; 16809 tp->snd_una++; 16810 acked_amount = acked = (high_seq - tp->snd_una); 16811 } 16812 if (acked > sbavail(&so->so_snd)) 16813 acked_amount = sbavail(&so->so_snd); 16814 if (IN_FASTRECOVERY(tp->t_flags) && 16815 (rack->rack_no_prr == 0)) 16816 rack_update_prr(tp, rack, acked_amount, high_seq); 16817 if (IN_RECOVERY(tp->t_flags)) { 16818 if (SEQ_LT(high_seq, tp->snd_recover) && 16819 (SEQ_LT(high_seq, tp->snd_max))) { 16820 tcp_rack_partialack(tp); 16821 } else { 16822 rack_post_recovery(tp, high_seq); 16823 post_recovery = 1; 16824 } 16825 } else if ((rack->rto_from_rec == 1) && 16826 SEQ_GEQ(high_seq, tp->snd_recover)) { 16827 /* 16828 * We were in recovery, hit a rxt timeout 16829 * and never re-entered recovery. The timeout(s) 16830 * made up all the lost data. In such a case 16831 * we need to clear the rto_from_rec flag. 16832 */ 16833 rack->rto_from_rec = 0; 16834 } 16835 /* Handle the rack-log-ack part (sendmap) */ 16836 if ((sbused(&so->so_snd) == 0) && 16837 (acked > acked_amount) && 16838 (tp->t_state >= TCPS_FIN_WAIT_1) && 16839 (tp->t_flags & TF_SENTFIN)) { 16840 /* 16841 * We must be sure our fin 16842 * was sent and acked (we can be 16843 * in FIN_WAIT_1 without having 16844 * sent the fin). 16845 */ 16846 ourfinisacked = 1; 16847 /* 16848 * Lets make sure snd_una is updated 16849 * since most likely acked_amount = 0 (it 16850 * should be). 16851 */ 16852 tp->snd_una = high_seq; 16853 } 16854 /* Did we make a RTO error? */ 16855 if ((tp->t_flags & TF_PREVVALID) && 16856 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16857 tp->t_flags &= ~TF_PREVVALID; 16858 if (tp->t_rxtshift == 1 && 16859 (int)(ticks - tp->t_badrxtwin) < 0) 16860 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16861 } 16862 /* Handle the data in the socket buffer */ 16863 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16864 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16865 if (acked_amount > 0) { 16866 uint32_t p_cwnd; 16867 struct mbuf *mfree; 16868 16869 if (post_recovery) { 16870 /* 16871 * Grab the segsiz, multiply by 2 and add the snd_cwnd 16872 * that is the max the CC should add if we are exiting 16873 * recovery and doing a late add. 16874 */ 16875 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16876 p_cwnd <<= 1; 16877 p_cwnd += tp->snd_cwnd; 16878 } 16879 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 16880 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 16881 /* Must be non-newreno (cubic) getting too ahead of itself */ 16882 tp->snd_cwnd = p_cwnd; 16883 } 16884 SOCKBUF_LOCK(&so->so_snd); 16885 mfree = sbcut_locked(&so->so_snd, acked_amount); 16886 tp->snd_una = high_seq; 16887 /* Note we want to hold the sb lock through the sendmap adjust */ 16888 rack_adjust_sendmap_head(rack, &so->so_snd); 16889 /* Wake up the socket if we have room to write more */ 16890 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16891 sowwakeup_locked(so); 16892 m_freem(mfree); 16893 } 16894 /* update progress */ 16895 tp->t_acktime = ticks; 16896 rack_log_progress_event(rack, tp, tp->t_acktime, 16897 PROGRESS_UPDATE, __LINE__); 16898 /* Clear out shifts and such */ 16899 tp->t_rxtshift = 0; 16900 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16901 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16902 rack->rc_tlp_in_progress = 0; 16903 rack->r_ctl.rc_tlp_cnt_out = 0; 16904 /* Send recover and snd_nxt must be dragged along */ 16905 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16906 tp->snd_recover = tp->snd_una; 16907 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16908 tp->snd_nxt = tp->snd_max; 16909 /* 16910 * If the RXT timer is running we want to 16911 * stop it, so we can restart a TLP (or new RXT). 16912 */ 16913 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16914 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16915 tp->snd_wl2 = high_seq; 16916 tp->t_dupacks = 0; 16917 if (under_pacing && 16918 (rack->use_fixed_rate == 0) && 16919 (rack->in_probe_rtt == 0) && 16920 rack->rc_gp_dyn_mul && 16921 rack->rc_always_pace) { 16922 /* Check if we are dragging bottom */ 16923 rack_check_bottom_drag(tp, rack, so); 16924 } 16925 if (tp->snd_una == tp->snd_max) { 16926 tp->t_flags &= ~TF_PREVVALID; 16927 rack->r_ctl.retran_during_recovery = 0; 16928 rack->rc_suspicious = 0; 16929 rack->r_ctl.dsack_byte_cnt = 0; 16930 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16931 if (rack->r_ctl.rc_went_idle_time == 0) 16932 rack->r_ctl.rc_went_idle_time = 1; 16933 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16934 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16935 tp->t_acktime = 0; 16936 /* Set so we might enter persists... */ 16937 rack->r_wanted_output = 1; 16938 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16939 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16940 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16941 (sbavail(&so->so_snd) == 0) && 16942 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16943 /* 16944 * The socket was gone and the 16945 * peer sent data (not now in the past), time to 16946 * reset him. 16947 */ 16948 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16949 /* tcp_close will kill the inp pre-log the Reset */ 16950 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16951 #ifdef TCP_ACCOUNTING 16952 rdstc = get_cyclecount(); 16953 if (rdstc > ts_val) { 16954 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16955 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16956 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16957 } 16958 } 16959 #endif 16960 m_freem(m); 16961 tp = tcp_close(tp); 16962 if (tp == NULL) { 16963 #ifdef TCP_ACCOUNTING 16964 sched_unpin(); 16965 #endif 16966 return (1); 16967 } 16968 /* 16969 * We would normally do drop-with-reset which would 16970 * send back a reset. We can't since we don't have 16971 * all the needed bits. Instead lets arrange for 16972 * a call to tcp_output(). That way since we 16973 * are in the closed state we will generate a reset. 16974 * 16975 * Note if tcp_accounting is on we don't unpin since 16976 * we do that after the goto label. 16977 */ 16978 goto send_out_a_rst; 16979 } 16980 if ((sbused(&so->so_snd) == 0) && 16981 (tp->t_state >= TCPS_FIN_WAIT_1) && 16982 (tp->t_flags & TF_SENTFIN)) { 16983 /* 16984 * If we can't receive any more data, then closing user can 16985 * proceed. Starting the timer is contrary to the 16986 * specification, but if we don't get a FIN we'll hang 16987 * forever. 16988 * 16989 */ 16990 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16991 soisdisconnected(so); 16992 tcp_timer_activate(tp, TT_2MSL, 16993 (tcp_fast_finwait2_recycle ? 16994 tcp_finwait2_timeout : 16995 TP_MAXIDLE(tp))); 16996 } 16997 if (ourfinisacked == 0) { 16998 /* 16999 * We don't change to fin-wait-2 if we have our fin acked 17000 * which means we are probably in TCPS_CLOSING. 17001 */ 17002 tcp_state_change(tp, TCPS_FIN_WAIT_2); 17003 } 17004 } 17005 } 17006 /* Wake up the socket if we have room to write more */ 17007 if (sbavail(&so->so_snd)) { 17008 rack->r_wanted_output = 1; 17009 if (ctf_progress_timeout_check(tp, true)) { 17010 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 17011 tp, tick, PROGRESS_DROP, __LINE__); 17012 /* 17013 * We cheat here and don't send a RST, we should send one 17014 * when the pacer drops the connection. 17015 */ 17016 #ifdef TCP_ACCOUNTING 17017 rdstc = get_cyclecount(); 17018 if (rdstc > ts_val) { 17019 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17020 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17021 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17022 } 17023 } 17024 sched_unpin(); 17025 #endif 17026 (void)tcp_drop(tp, ETIMEDOUT); 17027 m_freem(m); 17028 return (1); 17029 } 17030 } 17031 if (ourfinisacked) { 17032 switch(tp->t_state) { 17033 case TCPS_CLOSING: 17034 #ifdef TCP_ACCOUNTING 17035 rdstc = get_cyclecount(); 17036 if (rdstc > ts_val) { 17037 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17038 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17039 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17040 } 17041 } 17042 sched_unpin(); 17043 #endif 17044 tcp_twstart(tp); 17045 m_freem(m); 17046 return (1); 17047 break; 17048 case TCPS_LAST_ACK: 17049 #ifdef TCP_ACCOUNTING 17050 rdstc = get_cyclecount(); 17051 if (rdstc > ts_val) { 17052 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17053 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17054 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17055 } 17056 } 17057 sched_unpin(); 17058 #endif 17059 tp = tcp_close(tp); 17060 ctf_do_drop(m, tp); 17061 return (1); 17062 break; 17063 case TCPS_FIN_WAIT_1: 17064 #ifdef TCP_ACCOUNTING 17065 rdstc = get_cyclecount(); 17066 if (rdstc > ts_val) { 17067 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17068 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17069 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17070 } 17071 } 17072 #endif 17073 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 17074 soisdisconnected(so); 17075 tcp_timer_activate(tp, TT_2MSL, 17076 (tcp_fast_finwait2_recycle ? 17077 tcp_finwait2_timeout : 17078 TP_MAXIDLE(tp))); 17079 } 17080 tcp_state_change(tp, TCPS_FIN_WAIT_2); 17081 break; 17082 default: 17083 break; 17084 } 17085 } 17086 if (rack->r_fast_output) { 17087 /* 17088 * We re doing fast output.. can we expand that? 17089 */ 17090 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 17091 } 17092 #ifdef TCP_ACCOUNTING 17093 rdstc = get_cyclecount(); 17094 if (rdstc > ts_val) { 17095 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17096 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17097 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17098 } 17099 } 17100 17101 } else if (win_up_req) { 17102 rdstc = get_cyclecount(); 17103 if (rdstc > ts_val) { 17104 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17105 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 17106 } 17107 } 17108 #endif 17109 } 17110 /* Now is there a next packet, if so we are done */ 17111 m_freem(m); 17112 did_out = 0; 17113 if (nxt_pkt) { 17114 #ifdef TCP_ACCOUNTING 17115 sched_unpin(); 17116 #endif 17117 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 17118 return (0); 17119 } 17120 rack_handle_might_revert(tp, rack); 17121 ctf_calc_rwin(so, tp); 17122 if ((rack->r_wanted_output != 0) || 17123 (rack->r_fast_output != 0) || 17124 (tp->t_flags & TF_ACKNOW )) { 17125 send_out_a_rst: 17126 if (tcp_output(tp) < 0) { 17127 #ifdef TCP_ACCOUNTING 17128 sched_unpin(); 17129 #endif 17130 return (1); 17131 } 17132 did_out = 1; 17133 } 17134 if (tp->t_flags2 & TF2_HPTS_CALLS) 17135 tp->t_flags2 &= ~TF2_HPTS_CALLS; 17136 rack_free_trim(rack); 17137 #ifdef TCP_ACCOUNTING 17138 sched_unpin(); 17139 #endif 17140 rack_timer_audit(tp, rack, &so->so_snd); 17141 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 17142 return (0); 17143 } 17144 17145 #define TCP_LRO_TS_OPTION \ 17146 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 17147 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 17148 17149 static int 17150 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17151 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 17152 struct timeval *tv) 17153 { 17154 struct inpcb *inp = tptoinpcb(tp); 17155 struct socket *so = tptosocket(tp); 17156 #ifdef TCP_ACCOUNTING 17157 uint64_t ts_val; 17158 #endif 17159 int32_t thflags, retval, did_out = 0; 17160 int32_t way_out = 0; 17161 /* 17162 * cts - is the current time from tv (caller gets ts) in microseconds. 17163 * ms_cts - is the current time from tv in milliseconds. 17164 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 17165 */ 17166 uint32_t cts, us_cts, ms_cts; 17167 uint32_t tiwin; 17168 struct timespec ts; 17169 struct tcpopt to; 17170 struct tcp_rack *rack; 17171 struct rack_sendmap *rsm; 17172 int32_t prev_state = 0; 17173 int no_output = 0; 17174 int slot_remaining = 0; 17175 #ifdef TCP_ACCOUNTING 17176 int ack_val_set = 0xf; 17177 #endif 17178 int nsegs; 17179 17180 NET_EPOCH_ASSERT(); 17181 INP_WLOCK_ASSERT(inp); 17182 17183 /* 17184 * tv passed from common code is from either M_TSTMP_LRO or 17185 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 17186 */ 17187 rack = (struct tcp_rack *)tp->t_fb_ptr; 17188 if (rack->rack_deferred_inited == 0) { 17189 /* 17190 * If we are the connecting socket we will 17191 * hit rack_init() when no sequence numbers 17192 * are setup. This makes it so we must defer 17193 * some initialization. Call that now. 17194 */ 17195 rack_deferred_init(tp, rack); 17196 } 17197 /* 17198 * Check to see if we need to skip any output plans. This 17199 * can happen in the non-LRO path where we are pacing and 17200 * must process the ack coming in but need to defer sending 17201 * anything becase a pacing timer is running. 17202 */ 17203 us_cts = tcp_tv_to_usectick(tv); 17204 if (m->m_flags & M_ACKCMP) { 17205 /* 17206 * All compressed ack's are ack's by definition so 17207 * remove any ack required flag and then do the processing. 17208 */ 17209 rack->rc_ack_required = 0; 17210 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 17211 } 17212 thflags = tcp_get_flags(th); 17213 if ((rack->rc_always_pace == 1) && 17214 (rack->rc_ack_can_sendout_data == 0) && 17215 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 17216 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 17217 /* 17218 * Ok conditions are right for queuing the packets 17219 * but we do have to check the flags in the inp, it 17220 * could be, if a sack is present, we want to be awoken and 17221 * so should process the packets. 17222 */ 17223 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 17224 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 17225 no_output = 1; 17226 } else { 17227 /* 17228 * If there is no options, or just a 17229 * timestamp option, we will want to queue 17230 * the packets. This is the same that LRO does 17231 * and will need to change with accurate ECN. 17232 */ 17233 uint32_t *ts_ptr; 17234 int optlen; 17235 17236 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 17237 ts_ptr = (uint32_t *)(th + 1); 17238 if ((optlen == 0) || 17239 ((optlen == TCPOLEN_TSTAMP_APPA) && 17240 (*ts_ptr == TCP_LRO_TS_OPTION))) 17241 no_output = 1; 17242 } 17243 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 17244 /* 17245 * It is unrealistic to think we can pace in less than 17246 * the minimum granularity of the pacer (def:250usec). So 17247 * if we have less than that time remaining we should go 17248 * ahead and allow output to be "early". We will attempt to 17249 * make up for it in any pacing time we try to apply on 17250 * the outbound packet. 17251 */ 17252 no_output = 0; 17253 } 17254 } 17255 /* 17256 * If there is a RST or FIN lets dump out the bw 17257 * with a FIN the connection may go on but we 17258 * may not. 17259 */ 17260 if ((thflags & TH_FIN) || (thflags & TH_RST)) 17261 rack_log_pacing_delay_calc(rack, 17262 rack->r_ctl.gp_bw, 17263 0, 17264 0, 17265 rack_get_gp_est(rack), /* delRate */ 17266 rack_get_lt_bw(rack), /* rttProp */ 17267 20, __LINE__, NULL, 0); 17268 if (m->m_flags & M_ACKCMP) { 17269 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 17270 } 17271 cts = tcp_tv_to_usectick(tv); 17272 ms_cts = tcp_tv_to_mssectick(tv); 17273 nsegs = m->m_pkthdr.lro_nsegs; 17274 counter_u64_add(rack_proc_non_comp_ack, 1); 17275 #ifdef TCP_ACCOUNTING 17276 sched_pin(); 17277 if (thflags & TH_ACK) 17278 ts_val = get_cyclecount(); 17279 #endif 17280 if ((m->m_flags & M_TSTMP) || 17281 (m->m_flags & M_TSTMP_LRO)) { 17282 mbuf_tstmp2timespec(m, &ts); 17283 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 17284 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 17285 } else 17286 rack->r_ctl.act_rcv_time = *tv; 17287 kern_prefetch(rack, &prev_state); 17288 prev_state = 0; 17289 /* 17290 * Unscale the window into a 32-bit value. For the SYN_SENT state 17291 * the scale is zero. 17292 */ 17293 tiwin = th->th_win << tp->snd_scale; 17294 #ifdef TCP_ACCOUNTING 17295 if (thflags & TH_ACK) { 17296 /* 17297 * We have a tradeoff here. We can either do what we are 17298 * doing i.e. pinning to this CPU and then doing the accounting 17299 * <or> we could do a critical enter, setup the rdtsc and cpu 17300 * as in below, and then validate we are on the same CPU on 17301 * exit. I have choosen to not do the critical enter since 17302 * that often will gain you a context switch, and instead lock 17303 * us (line above this if) to the same CPU with sched_pin(). This 17304 * means we may be context switched out for a higher priority 17305 * interupt but we won't be moved to another CPU. 17306 * 17307 * If this occurs (which it won't very often since we most likely 17308 * are running this code in interupt context and only a higher 17309 * priority will bump us ... clock?) we will falsely add in 17310 * to the time the interupt processing time plus the ack processing 17311 * time. This is ok since its a rare event. 17312 */ 17313 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 17314 ctf_fixed_maxseg(tp)); 17315 } 17316 #endif 17317 /* 17318 * Parse options on any incoming segment. 17319 */ 17320 memset(&to, 0, sizeof(to)); 17321 tcp_dooptions(&to, (u_char *)(th + 1), 17322 (th->th_off << 2) - sizeof(struct tcphdr), 17323 (thflags & TH_SYN) ? TO_SYN : 0); 17324 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 17325 __func__)); 17326 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 17327 __func__)); 17328 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 17329 /* 17330 * We don't look at sack's from the 17331 * peer because the MSS is too small which 17332 * can subject us to an attack. 17333 */ 17334 to.to_flags &= ~TOF_SACK; 17335 } 17336 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 17337 (tp->t_flags & TF_GPUTINPROG)) { 17338 /* 17339 * We have a goodput in progress 17340 * and we have entered a late state. 17341 * Do we have enough data in the sb 17342 * to handle the GPUT request? 17343 */ 17344 uint32_t bytes; 17345 17346 bytes = tp->gput_ack - tp->gput_seq; 17347 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 17348 bytes += tp->gput_seq - tp->snd_una; 17349 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 17350 /* 17351 * There are not enough bytes in the socket 17352 * buffer that have been sent to cover this 17353 * measurement. Cancel it. 17354 */ 17355 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17356 rack->r_ctl.rc_gp_srtt /*flex1*/, 17357 tp->gput_seq, 17358 0, 0, 18, __LINE__, NULL, 0); 17359 tp->t_flags &= ~TF_GPUTINPROG; 17360 } 17361 } 17362 if (tcp_bblogging_on(rack->rc_tp)) { 17363 union tcp_log_stackspecific log; 17364 struct timeval ltv; 17365 #ifdef TCP_REQUEST_TRK 17366 struct tcp_sendfile_track *tcp_req; 17367 17368 if (SEQ_GT(th->th_ack, tp->snd_una)) { 17369 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 17370 } else { 17371 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 17372 } 17373 #endif 17374 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 17375 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 17376 if (rack->rack_no_prr == 0) 17377 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 17378 else 17379 log.u_bbr.flex1 = 0; 17380 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 17381 log.u_bbr.use_lt_bw <<= 1; 17382 log.u_bbr.use_lt_bw |= rack->r_might_revert; 17383 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 17384 log.u_bbr.bbr_state = rack->rc_free_cnt; 17385 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17386 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 17387 log.u_bbr.flex3 = m->m_flags; 17388 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 17389 log.u_bbr.lost = thflags; 17390 log.u_bbr.pacing_gain = 0x1; 17391 #ifdef TCP_ACCOUNTING 17392 log.u_bbr.cwnd_gain = ack_val_set; 17393 #endif 17394 log.u_bbr.flex7 = 2; 17395 if (m->m_flags & M_TSTMP) { 17396 /* Record the hardware timestamp if present */ 17397 mbuf_tstmp2timespec(m, &ts); 17398 ltv.tv_sec = ts.tv_sec; 17399 ltv.tv_usec = ts.tv_nsec / 1000; 17400 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 17401 } else if (m->m_flags & M_TSTMP_LRO) { 17402 /* Record the LRO the arrival timestamp */ 17403 mbuf_tstmp2timespec(m, &ts); 17404 ltv.tv_sec = ts.tv_sec; 17405 ltv.tv_usec = ts.tv_nsec / 1000; 17406 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 17407 } 17408 log.u_bbr.timeStamp = tcp_get_usecs(<v); 17409 /* Log the rcv time */ 17410 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 17411 #ifdef TCP_REQUEST_TRK 17412 log.u_bbr.applimited = tp->t_tcpreq_closed; 17413 log.u_bbr.applimited <<= 8; 17414 log.u_bbr.applimited |= tp->t_tcpreq_open; 17415 log.u_bbr.applimited <<= 8; 17416 log.u_bbr.applimited |= tp->t_tcpreq_req; 17417 if (tcp_req) { 17418 /* Copy out any client req info */ 17419 /* seconds */ 17420 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 17421 /* useconds */ 17422 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 17423 log.u_bbr.rttProp = tcp_req->timestamp; 17424 log.u_bbr.cur_del_rate = tcp_req->start; 17425 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 17426 log.u_bbr.flex8 |= 1; 17427 } else { 17428 log.u_bbr.flex8 |= 2; 17429 log.u_bbr.bw_inuse = tcp_req->end; 17430 } 17431 log.u_bbr.flex6 = tcp_req->start_seq; 17432 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 17433 log.u_bbr.flex8 |= 4; 17434 log.u_bbr.epoch = tcp_req->end_seq; 17435 } 17436 } 17437 #endif 17438 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 17439 tlen, &log, true, <v); 17440 } 17441 /* Remove ack required flag if set, we have one */ 17442 if (thflags & TH_ACK) 17443 rack->rc_ack_required = 0; 17444 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 17445 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 17446 way_out = 4; 17447 retval = 0; 17448 m_freem(m); 17449 goto done_with_input; 17450 } 17451 /* 17452 * If a segment with the ACK-bit set arrives in the SYN-SENT state 17453 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 17454 */ 17455 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 17456 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 17457 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 17458 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 17459 #ifdef TCP_ACCOUNTING 17460 sched_unpin(); 17461 #endif 17462 return (1); 17463 } 17464 /* 17465 * If timestamps were negotiated during SYN/ACK and a 17466 * segment without a timestamp is received, silently drop 17467 * the segment, unless it is a RST segment or missing timestamps are 17468 * tolerated. 17469 * See section 3.2 of RFC 7323. 17470 */ 17471 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 17472 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 17473 way_out = 5; 17474 retval = 0; 17475 m_freem(m); 17476 goto done_with_input; 17477 } 17478 /* 17479 * Segment received on connection. Reset idle time and keep-alive 17480 * timer. XXX: This should be done after segment validation to 17481 * ignore broken/spoofed segs. 17482 */ 17483 if (tp->t_idle_reduce && 17484 (tp->snd_max == tp->snd_una) && 17485 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 17486 counter_u64_add(rack_input_idle_reduces, 1); 17487 rack_cc_after_idle(rack, tp); 17488 } 17489 tp->t_rcvtime = ticks; 17490 #ifdef STATS 17491 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 17492 #endif 17493 if (tiwin > rack->r_ctl.rc_high_rwnd) 17494 rack->r_ctl.rc_high_rwnd = tiwin; 17495 /* 17496 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 17497 * this to occur after we've validated the segment. 17498 */ 17499 if (tcp_ecn_input_segment(tp, thflags, tlen, 17500 tcp_packets_this_ack(tp, th->th_ack), 17501 iptos)) 17502 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 17503 17504 /* 17505 * If echoed timestamp is later than the current time, fall back to 17506 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 17507 * were used when this connection was established. 17508 */ 17509 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 17510 to.to_tsecr -= tp->ts_offset; 17511 if (TSTMP_GT(to.to_tsecr, ms_cts)) 17512 to.to_tsecr = 0; 17513 } 17514 if ((rack->r_rcvpath_rtt_up == 1) && 17515 (to.to_flags & TOF_TS) && 17516 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 17517 uint32_t rtt = 0; 17518 17519 /* 17520 * We are receiving only and thus not sending 17521 * data to do an RTT. We set a flag when we first 17522 * sent this TS to the peer. We now have it back 17523 * and have an RTT to share. We log it as a conf 17524 * 4, we are not so sure about it.. since we 17525 * may have lost an ack. 17526 */ 17527 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 17528 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 17529 rack->r_rcvpath_rtt_up = 0; 17530 /* Submit and commit the timer */ 17531 if (rtt > 0) { 17532 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 17533 tcp_rack_xmit_timer_commit(rack, tp); 17534 } 17535 } 17536 /* 17537 * If its the first time in we need to take care of options and 17538 * verify we can do SACK for rack! 17539 */ 17540 if (rack->r_state == 0) { 17541 /* Should be init'd by rack_init() */ 17542 KASSERT(rack->rc_inp != NULL, 17543 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 17544 if (rack->rc_inp == NULL) { 17545 rack->rc_inp = inp; 17546 } 17547 17548 /* 17549 * Process options only when we get SYN/ACK back. The SYN 17550 * case for incoming connections is handled in tcp_syncache. 17551 * According to RFC1323 the window field in a SYN (i.e., a 17552 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 17553 * this is traditional behavior, may need to be cleaned up. 17554 */ 17555 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 17556 /* Handle parallel SYN for ECN */ 17557 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 17558 if ((to.to_flags & TOF_SCALE) && 17559 (tp->t_flags & TF_REQ_SCALE)) { 17560 tp->t_flags |= TF_RCVD_SCALE; 17561 tp->snd_scale = to.to_wscale; 17562 } else 17563 tp->t_flags &= ~TF_REQ_SCALE; 17564 /* 17565 * Initial send window. It will be updated with the 17566 * next incoming segment to the scaled value. 17567 */ 17568 tp->snd_wnd = th->th_win; 17569 rack_validate_fo_sendwin_up(tp, rack); 17570 if ((to.to_flags & TOF_TS) && 17571 (tp->t_flags & TF_REQ_TSTMP)) { 17572 tp->t_flags |= TF_RCVD_TSTMP; 17573 tp->ts_recent = to.to_tsval; 17574 tp->ts_recent_age = cts; 17575 } else 17576 tp->t_flags &= ~TF_REQ_TSTMP; 17577 if (to.to_flags & TOF_MSS) { 17578 tcp_mss(tp, to.to_mss); 17579 } 17580 if ((tp->t_flags & TF_SACK_PERMIT) && 17581 (to.to_flags & TOF_SACKPERM) == 0) 17582 tp->t_flags &= ~TF_SACK_PERMIT; 17583 if (tp->t_flags & TF_FASTOPEN) { 17584 if (to.to_flags & TOF_FASTOPEN) { 17585 uint16_t mss; 17586 17587 if (to.to_flags & TOF_MSS) 17588 mss = to.to_mss; 17589 else 17590 if ((inp->inp_vflag & INP_IPV6) != 0) 17591 mss = TCP6_MSS; 17592 else 17593 mss = TCP_MSS; 17594 tcp_fastopen_update_cache(tp, mss, 17595 to.to_tfo_len, to.to_tfo_cookie); 17596 } else 17597 tcp_fastopen_disable_path(tp); 17598 } 17599 } 17600 /* 17601 * At this point we are at the initial call. Here we decide 17602 * if we are doing RACK or not. We do this by seeing if 17603 * TF_SACK_PERMIT is set and the sack-not-required is clear. 17604 * The code now does do dup-ack counting so if you don't 17605 * switch back you won't get rack & TLP, but you will still 17606 * get this stack. 17607 */ 17608 17609 if ((rack_sack_not_required == 0) && 17610 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 17611 tcp_switch_back_to_default(tp); 17612 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 17613 tlen, iptos); 17614 #ifdef TCP_ACCOUNTING 17615 sched_unpin(); 17616 #endif 17617 return (1); 17618 } 17619 tcp_set_hpts(tp); 17620 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 17621 } 17622 if (thflags & TH_FIN) 17623 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 17624 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 17625 if ((rack->rc_gp_dyn_mul) && 17626 (rack->use_fixed_rate == 0) && 17627 (rack->rc_always_pace)) { 17628 /* Check in on probertt */ 17629 rack_check_probe_rtt(rack, cts); 17630 } 17631 rack_clear_rate_sample(rack); 17632 if ((rack->forced_ack) && 17633 ((tcp_get_flags(th) & TH_RST) == 0)) { 17634 rack_handle_probe_response(rack, tiwin, us_cts); 17635 } 17636 /* 17637 * This is the one exception case where we set the rack state 17638 * always. All other times (timers etc) we must have a rack-state 17639 * set (so we assure we have done the checks above for SACK). 17640 */ 17641 rack->r_ctl.rc_rcvtime = cts; 17642 if (rack->r_state != tp->t_state) 17643 rack_set_state(tp, rack); 17644 if (SEQ_GT(th->th_ack, tp->snd_una) && 17645 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 17646 kern_prefetch(rsm, &prev_state); 17647 prev_state = rack->r_state; 17648 if ((thflags & TH_RST) && 17649 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 17650 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 17651 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 17652 /* The connection will be killed by a reset check the tracepoint */ 17653 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 17654 } 17655 retval = (*rack->r_substate) (m, th, so, 17656 tp, &to, drop_hdrlen, 17657 tlen, tiwin, thflags, nxt_pkt, iptos); 17658 if (retval == 0) { 17659 /* 17660 * If retval is 1 the tcb is unlocked and most likely the tp 17661 * is gone. 17662 */ 17663 INP_WLOCK_ASSERT(inp); 17664 if ((rack->rc_gp_dyn_mul) && 17665 (rack->rc_always_pace) && 17666 (rack->use_fixed_rate == 0) && 17667 rack->in_probe_rtt && 17668 (rack->r_ctl.rc_time_probertt_starts == 0)) { 17669 /* 17670 * If we are going for target, lets recheck before 17671 * we output. 17672 */ 17673 rack_check_probe_rtt(rack, cts); 17674 } 17675 if (rack->set_pacing_done_a_iw == 0) { 17676 /* How much has been acked? */ 17677 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 17678 /* We have enough to set in the pacing segment size */ 17679 rack->set_pacing_done_a_iw = 1; 17680 rack_set_pace_segments(tp, rack, __LINE__, NULL); 17681 } 17682 } 17683 tcp_rack_xmit_timer_commit(rack, tp); 17684 #ifdef TCP_ACCOUNTING 17685 /* 17686 * If we set the ack_val_se to what ack processing we are doing 17687 * we also want to track how many cycles we burned. Note 17688 * the bits after tcp_output we let be "free". This is because 17689 * we are also tracking the tcp_output times as well. Note the 17690 * use of 0xf here since we only have 11 counter (0 - 0xa) and 17691 * 0xf cannot be returned and is what we initialize it too to 17692 * indicate we are not doing the tabulations. 17693 */ 17694 if (ack_val_set != 0xf) { 17695 uint64_t crtsc; 17696 17697 crtsc = get_cyclecount(); 17698 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17699 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 17700 } 17701 } 17702 #endif 17703 if ((nxt_pkt == 0) && (no_output == 0)) { 17704 if ((rack->r_wanted_output != 0) || 17705 (tp->t_flags & TF_ACKNOW) || 17706 (rack->r_fast_output != 0)) { 17707 17708 do_output_now: 17709 if (tcp_output(tp) < 0) { 17710 #ifdef TCP_ACCOUNTING 17711 sched_unpin(); 17712 #endif 17713 return (1); 17714 } 17715 did_out = 1; 17716 } 17717 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 17718 rack_free_trim(rack); 17719 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 17720 goto do_output_now; 17721 } else if ((no_output == 1) && 17722 (nxt_pkt == 0) && 17723 (tcp_in_hpts(rack->rc_tp) == 0)) { 17724 /* 17725 * We are not in hpts and we had a pacing timer up. Use 17726 * the remaining time (slot_remaining) to restart the timer. 17727 */ 17728 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 17729 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 17730 rack_free_trim(rack); 17731 } 17732 /* Clear the flag, it may have been cleared by output but we may not have */ 17733 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 17734 tp->t_flags2 &= ~TF2_HPTS_CALLS; 17735 /* 17736 * The draft (v3) calls for us to use SEQ_GEQ, but that 17737 * causes issues when we are just going app limited. Lets 17738 * instead use SEQ_GT <or> where its equal but more data 17739 * is outstanding. 17740 * 17741 * Also make sure we are on the last ack of a series. We 17742 * have to have all the ack's processed in queue to know 17743 * if there is something left outstanding. 17744 */ 17745 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 17746 (rack->rc_new_rnd_needed == 0) && 17747 (nxt_pkt == 0)) { 17748 /* 17749 * We have crossed into a new round with 17750 * the new snd_unae. 17751 */ 17752 rack_new_round_setup(tp, rack, tp->snd_una); 17753 } 17754 if ((nxt_pkt == 0) && 17755 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 17756 (SEQ_GT(tp->snd_max, tp->snd_una) || 17757 (tp->t_flags & TF_DELACK) || 17758 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 17759 (tp->t_state <= TCPS_CLOSING)))) { 17760 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 17761 if ((tp->snd_max == tp->snd_una) && 17762 ((tp->t_flags & TF_DELACK) == 0) && 17763 (tcp_in_hpts(rack->rc_tp)) && 17764 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 17765 /* keep alive not needed if we are hptsi output yet */ 17766 ; 17767 } else { 17768 int late = 0; 17769 if (tcp_in_hpts(tp)) { 17770 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 17771 us_cts = tcp_get_usecs(NULL); 17772 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 17773 rack->r_early = 1; 17774 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 17775 } else 17776 late = 1; 17777 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 17778 } 17779 tcp_hpts_remove(tp); 17780 } 17781 if (late && (did_out == 0)) { 17782 /* 17783 * We are late in the sending 17784 * and we did not call the output 17785 * (this probably should not happen). 17786 */ 17787 goto do_output_now; 17788 } 17789 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 17790 } 17791 way_out = 1; 17792 } else if (nxt_pkt == 0) { 17793 /* Do we have the correct timer running? */ 17794 rack_timer_audit(tp, rack, &so->so_snd); 17795 way_out = 2; 17796 } 17797 done_with_input: 17798 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 17799 if (did_out) 17800 rack->r_wanted_output = 0; 17801 } 17802 17803 #ifdef TCP_ACCOUNTING 17804 sched_unpin(); 17805 #endif 17806 return (retval); 17807 } 17808 17809 static void 17810 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17811 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17812 { 17813 struct timeval tv; 17814 17815 /* First lets see if we have old packets */ 17816 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17817 if (ctf_do_queued_segments(tp, 1)) { 17818 m_freem(m); 17819 return; 17820 } 17821 } 17822 if (m->m_flags & M_TSTMP_LRO) { 17823 mbuf_tstmp2timeval(m, &tv); 17824 } else { 17825 /* Should not be should we kassert instead? */ 17826 tcp_get_usecs(&tv); 17827 } 17828 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17829 &tv) == 0) { 17830 INP_WUNLOCK(tptoinpcb(tp)); 17831 } 17832 } 17833 17834 struct rack_sendmap * 17835 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17836 { 17837 struct rack_sendmap *rsm = NULL; 17838 int32_t idx; 17839 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17840 17841 /* Return the next guy to be re-transmitted */ 17842 if (tqhash_empty(rack->r_ctl.tqh)) { 17843 return (NULL); 17844 } 17845 if (tp->t_flags & TF_SENTFIN) { 17846 /* retran the end FIN? */ 17847 return (NULL); 17848 } 17849 /* ok lets look at this one */ 17850 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17851 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17852 return (rsm); 17853 } 17854 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17855 goto check_it; 17856 } 17857 rsm = rack_find_lowest_rsm(rack); 17858 if (rsm == NULL) { 17859 return (NULL); 17860 } 17861 check_it: 17862 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 17863 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17864 /* 17865 * No sack so we automatically do the 3 strikes and 17866 * retransmit (no rack timer would be started). 17867 */ 17868 return (rsm); 17869 } 17870 if (rsm->r_flags & RACK_ACKED) { 17871 return (NULL); 17872 } 17873 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17874 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17875 /* Its not yet ready */ 17876 return (NULL); 17877 } 17878 srtt = rack_grab_rtt(tp, rack); 17879 idx = rsm->r_rtr_cnt - 1; 17880 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17881 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 17882 if ((tsused == ts_low) || 17883 (TSTMP_LT(tsused, ts_low))) { 17884 /* No time since sending */ 17885 return (NULL); 17886 } 17887 if ((tsused - ts_low) < thresh) { 17888 /* It has not been long enough yet */ 17889 return (NULL); 17890 } 17891 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17892 ((rsm->r_flags & RACK_SACK_PASSED))) { 17893 /* 17894 * We have passed the dup-ack threshold <or> 17895 * a SACK has indicated this is missing. 17896 * Note that if you are a declared attacker 17897 * it is only the dup-ack threshold that 17898 * will cause retransmits. 17899 */ 17900 /* log retransmit reason */ 17901 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17902 rack->r_fast_output = 0; 17903 return (rsm); 17904 } 17905 return (NULL); 17906 } 17907 17908 static void 17909 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, 17910 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17911 int line, struct rack_sendmap *rsm, uint8_t quality) 17912 { 17913 if (tcp_bblogging_on(rack->rc_tp)) { 17914 union tcp_log_stackspecific log; 17915 struct timeval tv; 17916 17917 if (rack_verbose_logging == 0) { 17918 /* 17919 * We are not verbose screen out all but 17920 * ones we always want. 17921 */ 17922 if ((method != 2) && 17923 (method != 3) && 17924 (method != 7) && 17925 (method != 89) && 17926 (method != 14) && 17927 (method != 20)) { 17928 return; 17929 } 17930 } 17931 memset(&log, 0, sizeof(log)); 17932 log.u_bbr.flex1 = slot; 17933 log.u_bbr.flex2 = len; 17934 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17935 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17936 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17937 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17938 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17939 log.u_bbr.use_lt_bw <<= 1; 17940 log.u_bbr.use_lt_bw |= rack->r_late; 17941 log.u_bbr.use_lt_bw <<= 1; 17942 log.u_bbr.use_lt_bw |= rack->r_early; 17943 log.u_bbr.use_lt_bw <<= 1; 17944 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17945 log.u_bbr.use_lt_bw <<= 1; 17946 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17947 log.u_bbr.use_lt_bw <<= 1; 17948 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17949 log.u_bbr.use_lt_bw <<= 1; 17950 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17951 log.u_bbr.use_lt_bw <<= 1; 17952 log.u_bbr.use_lt_bw |= rack->gp_ready; 17953 log.u_bbr.pkt_epoch = line; 17954 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17955 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17956 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17957 log.u_bbr.bw_inuse = bw_est; 17958 log.u_bbr.delRate = bw; 17959 if (rack->r_ctl.gp_bw == 0) 17960 log.u_bbr.cur_del_rate = 0; 17961 else 17962 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17963 log.u_bbr.rttProp = len_time; 17964 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17965 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17966 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17967 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17968 /* We are in slow start */ 17969 log.u_bbr.flex7 = 1; 17970 } else { 17971 /* we are on congestion avoidance */ 17972 log.u_bbr.flex7 = 0; 17973 } 17974 log.u_bbr.flex8 = method; 17975 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17976 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17977 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17978 log.u_bbr.cwnd_gain <<= 1; 17979 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17980 log.u_bbr.cwnd_gain <<= 1; 17981 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17982 log.u_bbr.bbr_substate = quality; 17983 log.u_bbr.bbr_state = rack->dgp_on; 17984 log.u_bbr.bbr_state <<= 1; 17985 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17986 log.u_bbr.bbr_state <<= 2; 17987 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17988 &rack->rc_inp->inp_socket->so_rcv, 17989 &rack->rc_inp->inp_socket->so_snd, 17990 BBR_LOG_HPTSI_CALC, 0, 17991 0, &log, false, &tv); 17992 } 17993 } 17994 17995 static uint32_t 17996 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17997 { 17998 uint32_t new_tso, user_max, pace_one; 17999 18000 user_max = rack->rc_user_set_max_segs * mss; 18001 if (rack->rc_force_max_seg) { 18002 return (user_max); 18003 } 18004 if (rack->use_fixed_rate && 18005 ((rack->r_ctl.crte == NULL) || 18006 (bw != rack->r_ctl.crte->rate))) { 18007 /* Use the user mss since we are not exactly matched */ 18008 return (user_max); 18009 } 18010 if (rack_pace_one_seg || 18011 (rack->r_ctl.rc_user_set_min_segs == 1)) 18012 pace_one = 1; 18013 else 18014 pace_one = 0; 18015 18016 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 18017 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 18018 if (new_tso > user_max) 18019 new_tso = user_max; 18020 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 18021 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 18022 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 18023 } 18024 if (rack->r_ctl.rc_user_set_min_segs && 18025 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 18026 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 18027 return (new_tso); 18028 } 18029 18030 static uint64_t 18031 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 18032 { 18033 uint64_t reduced_win; 18034 uint32_t gain; 18035 18036 if (window_input < rc_init_window(rack)) { 18037 /* 18038 * The cwnd is collapsed to 18039 * nearly zero, maybe because of a time-out? 18040 * Lets drop back to the lt-bw. 18041 */ 18042 reduced_win = rack_get_lt_bw(rack); 18043 /* Set the flag so the caller knows its a rate and not a reduced window */ 18044 *rate_set = 1; 18045 gain = 100; 18046 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 18047 /* 18048 * If we are in recover our cwnd needs to be less for 18049 * our pacing consideration. 18050 */ 18051 if (rack->rack_hibeta == 0) { 18052 reduced_win = window_input / 2; 18053 gain = 50; 18054 } else { 18055 reduced_win = window_input * rack->r_ctl.saved_hibeta; 18056 reduced_win /= 100; 18057 gain = rack->r_ctl.saved_hibeta; 18058 } 18059 } else { 18060 /* 18061 * Apply Timely factor to increase/decrease the 18062 * amount we are pacing at. 18063 */ 18064 gain = rack_get_output_gain(rack, NULL); 18065 if (gain > rack_gain_p5_ub) { 18066 gain = rack_gain_p5_ub; 18067 } 18068 reduced_win = window_input * gain; 18069 reduced_win /= 100; 18070 } 18071 if (gain_b != NULL) 18072 *gain_b = gain; 18073 /* 18074 * What is being returned here is a trimmed down 18075 * window values in all cases where rate_set is left 18076 * at 0. In one case we actually return the rate (lt_bw). 18077 * the "reduced_win" is returned as a slimmed down cwnd that 18078 * is then calculated by the caller into a rate when rate_set 18079 * is 0. 18080 */ 18081 return (reduced_win); 18082 } 18083 18084 static int32_t 18085 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 18086 { 18087 uint64_t lentim, fill_bw; 18088 18089 rack->r_via_fill_cw = 0; 18090 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 18091 return (slot); 18092 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 18093 return (slot); 18094 if (rack->r_ctl.rc_last_us_rtt == 0) 18095 return (slot); 18096 if (rack->rc_pace_fill_if_rttin_range && 18097 (rack->r_ctl.rc_last_us_rtt >= 18098 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 18099 /* The rtt is huge, N * smallest, lets not fill */ 18100 return (slot); 18101 } 18102 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 18103 return (slot); 18104 /* 18105 * first lets calculate the b/w based on the last us-rtt 18106 * and the the smallest send window. 18107 */ 18108 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 18109 if (rack->rc_fillcw_apply_discount) { 18110 uint32_t rate_set = 0; 18111 18112 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 18113 if (rate_set) { 18114 goto at_lt_bw; 18115 } 18116 } 18117 /* Take the rwnd if its smaller */ 18118 if (fill_bw > rack->rc_tp->snd_wnd) 18119 fill_bw = rack->rc_tp->snd_wnd; 18120 /* Now lets make it into a b/w */ 18121 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 18122 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 18123 /* Adjust to any cap */ 18124 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 18125 fill_bw = rack->r_ctl.fillcw_cap; 18126 18127 at_lt_bw: 18128 if (rack_bw_multipler > 0) { 18129 /* 18130 * We want to limit fill-cw to the some multiplier 18131 * of the max(lt_bw, gp_est). The normal default 18132 * is 0 for off, so a sysctl has enabled it. 18133 */ 18134 uint64_t lt_bw, gp, rate; 18135 18136 gp = rack_get_gp_est(rack); 18137 lt_bw = rack_get_lt_bw(rack); 18138 if (lt_bw > gp) 18139 rate = lt_bw; 18140 else 18141 rate = gp; 18142 rate *= rack_bw_multipler; 18143 rate /= 100; 18144 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18145 union tcp_log_stackspecific log; 18146 struct timeval tv; 18147 18148 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18149 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18150 log.u_bbr.flex1 = rack_bw_multipler; 18151 log.u_bbr.flex2 = len; 18152 log.u_bbr.cur_del_rate = gp; 18153 log.u_bbr.delRate = lt_bw; 18154 log.u_bbr.bw_inuse = rate; 18155 log.u_bbr.rttProp = fill_bw; 18156 log.u_bbr.flex8 = 44; 18157 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 18158 BBR_LOG_CWND, 0, 18159 0, &log, false, NULL, 18160 __func__, __LINE__, &tv); 18161 } 18162 if (fill_bw > rate) 18163 fill_bw = rate; 18164 } 18165 /* We are below the min b/w */ 18166 if (non_paced) 18167 *rate_wanted = fill_bw; 18168 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 18169 return (slot); 18170 rack->r_via_fill_cw = 1; 18171 if (rack->r_rack_hw_rate_caps && 18172 (rack->r_ctl.crte != NULL)) { 18173 uint64_t high_rate; 18174 18175 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 18176 if (fill_bw > high_rate) { 18177 /* We are capping bw at the highest rate table entry */ 18178 if (*rate_wanted > high_rate) { 18179 /* The original rate was also capped */ 18180 rack->r_via_fill_cw = 0; 18181 } 18182 rack_log_hdwr_pacing(rack, 18183 fill_bw, high_rate, __LINE__, 18184 0, 3); 18185 fill_bw = high_rate; 18186 if (capped) 18187 *capped = 1; 18188 } 18189 } else if ((rack->r_ctl.crte == NULL) && 18190 (rack->rack_hdrw_pacing == 0) && 18191 (rack->rack_hdw_pace_ena) && 18192 rack->r_rack_hw_rate_caps && 18193 (rack->rack_attempt_hdwr_pace == 0) && 18194 (rack->rc_inp->inp_route.ro_nh != NULL) && 18195 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 18196 /* 18197 * Ok we may have a first attempt that is greater than our top rate 18198 * lets check. 18199 */ 18200 uint64_t high_rate; 18201 18202 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 18203 if (high_rate) { 18204 if (fill_bw > high_rate) { 18205 fill_bw = high_rate; 18206 if (capped) 18207 *capped = 1; 18208 } 18209 } 18210 } 18211 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 18212 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 18213 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 18214 fill_bw = rack->r_ctl.bw_rate_cap; 18215 } 18216 /* 18217 * Ok fill_bw holds our mythical b/w to fill the cwnd 18218 * in an rtt (unless it was capped), what does that 18219 * time wise equate too? 18220 */ 18221 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 18222 lentim /= fill_bw; 18223 *rate_wanted = fill_bw; 18224 if (non_paced || (lentim < slot)) { 18225 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 18226 0, lentim, 12, __LINE__, NULL, 0); 18227 return ((int32_t)lentim); 18228 } else 18229 return (slot); 18230 } 18231 18232 static uint32_t 18233 rack_policer_check_send(struct tcp_rack *rack, uint32_t len, uint32_t segsiz, uint32_t *needs) 18234 { 18235 uint64_t calc; 18236 18237 rack->rc_policer_should_pace = 0; 18238 calc = rack_policer_bucket_reserve * rack->r_ctl.policer_bucket_size; 18239 calc /= 100; 18240 /* 18241 * Now lets look at if we want more than is in the bucket <or> 18242 * we want more than is reserved in the bucket. 18243 */ 18244 if (rack_verbose_logging > 0) 18245 policer_detection_log(rack, len, segsiz, calc, rack->r_ctl.current_policer_bucket, 8); 18246 if ((calc > rack->r_ctl.current_policer_bucket) || 18247 (len >= (rack->r_ctl.current_policer_bucket - calc))) { 18248 /* 18249 * We may want to pace depending on if we are going 18250 * into the reserve or not. 18251 */ 18252 uint32_t newlen; 18253 18254 if (calc > rack->r_ctl.current_policer_bucket) { 18255 /* 18256 * This will eat into the reserve if we 18257 * don't have room at all some lines 18258 * below will catch it. 18259 */ 18260 newlen = rack->r_ctl.policer_max_seg; 18261 rack->rc_policer_should_pace = 1; 18262 } else { 18263 /* 18264 * We have all of the reserve plus something in the bucket 18265 * that we can give out. 18266 */ 18267 newlen = rack->r_ctl.current_policer_bucket - calc; 18268 if (newlen < rack->r_ctl.policer_max_seg) { 18269 /* 18270 * Into the reserve to get a full policer_max_seg 18271 * so we set the len to that and eat into 18272 * the reserve. If we go over the code 18273 * below will make us wait. 18274 */ 18275 newlen = rack->r_ctl.policer_max_seg; 18276 rack->rc_policer_should_pace = 1; 18277 } 18278 } 18279 if (newlen > rack->r_ctl.current_policer_bucket) { 18280 /* We have to wait some */ 18281 *needs = newlen - rack->r_ctl.current_policer_bucket; 18282 return (0); 18283 } 18284 if (rack_verbose_logging > 0) 18285 policer_detection_log(rack, len, segsiz, newlen, 0, 9); 18286 len = newlen; 18287 } /* else we have all len available above the reserve */ 18288 if (rack_verbose_logging > 0) 18289 policer_detection_log(rack, len, segsiz, calc, 0, 10); 18290 return (len); 18291 } 18292 18293 static uint32_t 18294 rack_policed_sending(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, uint32_t segsiz, int call_line) 18295 { 18296 /* 18297 * Given a send of len, and a token bucket set at current_policer_bucket_size 18298 * are we close enough to the end of the bucket that we need to pace? If so 18299 * calculate out a time and return it. Otherwise subtract the tokens from 18300 * the bucket. 18301 */ 18302 uint64_t calc; 18303 18304 if ((rack->r_ctl.policer_bw == 0) || 18305 (rack->r_ctl.policer_bucket_size < segsiz)) { 18306 /* 18307 * We should have an estimate here... 18308 */ 18309 return (0); 18310 } 18311 calc = (uint64_t)rack_policer_bucket_reserve * (uint64_t)rack->r_ctl.policer_bucket_size; 18312 calc /= 100; 18313 if ((rack->r_ctl.current_policer_bucket < len) || 18314 (rack->rc_policer_should_pace == 1) || 18315 ((rack->r_ctl.current_policer_bucket - len) <= (uint32_t)calc)) { 18316 /* we need to pace */ 18317 uint64_t lentim, res; 18318 uint32_t slot; 18319 18320 lentim = (uint64_t)len * (uint64_t)HPTS_USEC_IN_SEC; 18321 res = lentim / rack->r_ctl.policer_bw; 18322 slot = (uint32_t)res; 18323 if (rack->r_ctl.current_policer_bucket > len) 18324 rack->r_ctl.current_policer_bucket -= len; 18325 else 18326 rack->r_ctl.current_policer_bucket = 0; 18327 policer_detection_log(rack, len, slot, (uint32_t)rack_policer_bucket_reserve, call_line, 5); 18328 rack->rc_policer_should_pace = 0; 18329 return(slot); 18330 } 18331 /* Just take tokens out of the bucket and let rack do whatever it would have */ 18332 policer_detection_log(rack, len, 0, (uint32_t)rack_policer_bucket_reserve, call_line, 6); 18333 if (len < rack->r_ctl.current_policer_bucket) { 18334 rack->r_ctl.current_policer_bucket -= len; 18335 } else { 18336 rack->r_ctl.current_policer_bucket = 0; 18337 } 18338 return (0); 18339 } 18340 18341 18342 static int32_t 18343 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 18344 { 18345 uint64_t srtt; 18346 int32_t slot = 0; 18347 int32_t minslot = 0; 18348 int can_start_hw_pacing = 1; 18349 int err; 18350 int pace_one; 18351 18352 if (rack_pace_one_seg || 18353 (rack->r_ctl.rc_user_set_min_segs == 1)) 18354 pace_one = 1; 18355 else 18356 pace_one = 0; 18357 if (rack->rc_policer_detected == 1) { 18358 /* 18359 * A policer has been detected and we 18360 * have all of our data (policer-bw and 18361 * policer bucket size) calculated. Call 18362 * into the function to find out if we are 18363 * overriding the time. 18364 */ 18365 slot = rack_policed_sending(rack, tp, len, segsiz, line); 18366 if (slot) { 18367 uint64_t logbw; 18368 18369 logbw = rack->r_ctl.current_policer_bucket; 18370 logbw <<= 32; 18371 logbw |= rack->r_ctl.policer_bucket_size; 18372 rack_log_pacing_delay_calc(rack, len, slot, rack->r_ctl.policer_bw, logbw, 0, 89, __LINE__, NULL, 0); 18373 return(slot); 18374 } 18375 } 18376 if (rack->rc_always_pace == 0) { 18377 /* 18378 * We use the most optimistic possible cwnd/srtt for 18379 * sending calculations. This will make our 18380 * calculation anticipate getting more through 18381 * quicker then possible. But thats ok we don't want 18382 * the peer to have a gap in data sending. 18383 */ 18384 uint64_t cwnd, tr_perms = 0; 18385 int32_t reduce = 0; 18386 18387 old_method: 18388 /* 18389 * We keep no precise pacing with the old method 18390 * instead we use the pacer to mitigate bursts. 18391 */ 18392 if (rack->r_ctl.rc_rack_min_rtt) 18393 srtt = rack->r_ctl.rc_rack_min_rtt; 18394 else 18395 srtt = max(tp->t_srtt, 1); 18396 if (rack->r_ctl.rc_rack_largest_cwnd) 18397 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 18398 else 18399 cwnd = rack->r_ctl.cwnd_to_use; 18400 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 18401 tr_perms = (cwnd * 1000) / srtt; 18402 if (tr_perms == 0) { 18403 tr_perms = ctf_fixed_maxseg(tp); 18404 } 18405 /* 18406 * Calculate how long this will take to drain, if 18407 * the calculation comes out to zero, thats ok we 18408 * will use send_a_lot to possibly spin around for 18409 * more increasing tot_len_this_send to the point 18410 * that its going to require a pace, or we hit the 18411 * cwnd. Which in that case we are just waiting for 18412 * a ACK. 18413 */ 18414 slot = len / tr_perms; 18415 /* Now do we reduce the time so we don't run dry? */ 18416 if (slot && rack_slot_reduction) { 18417 reduce = (slot / rack_slot_reduction); 18418 if (reduce < slot) { 18419 slot -= reduce; 18420 } else 18421 slot = 0; 18422 } 18423 slot *= HPTS_USEC_IN_MSEC; 18424 if (rack->rc_pace_to_cwnd) { 18425 uint64_t rate_wanted = 0; 18426 18427 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 18428 rack->rc_ack_can_sendout_data = 1; 18429 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 18430 } else 18431 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 18432 /*******************************************************/ 18433 /* RRS: We insert non-paced call to stats here for len */ 18434 /*******************************************************/ 18435 } else { 18436 uint64_t bw_est, res, lentim, rate_wanted; 18437 uint32_t segs, oh; 18438 int capped = 0; 18439 int prev_fill; 18440 18441 if ((rack->r_rr_config == 1) && rsm) { 18442 return (rack->r_ctl.rc_min_to); 18443 } 18444 if (rack->use_fixed_rate) { 18445 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 18446 } else if ((rack->r_ctl.init_rate == 0) && 18447 (rack->r_ctl.gp_bw == 0)) { 18448 /* no way to yet do an estimate */ 18449 bw_est = rate_wanted = 0; 18450 } else if (rack->dgp_on) { 18451 bw_est = rack_get_bw(rack); 18452 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 18453 } else { 18454 uint32_t gain, rate_set = 0; 18455 18456 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 18457 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 18458 if (rate_set == 0) { 18459 if (rate_wanted > rack->rc_tp->snd_wnd) 18460 rate_wanted = rack->rc_tp->snd_wnd; 18461 /* Now lets make it into a b/w */ 18462 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 18463 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 18464 } 18465 bw_est = rate_wanted; 18466 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 18467 rack->r_ctl.cwnd_to_use, 18468 rate_wanted, bw_est, 18469 rack->r_ctl.rc_last_us_rtt, 18470 88, __LINE__, NULL, gain); 18471 } 18472 if ((bw_est == 0) || (rate_wanted == 0) || 18473 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 18474 /* 18475 * No way yet to make a b/w estimate or 18476 * our raise is set incorrectly. 18477 */ 18478 goto old_method; 18479 } 18480 rack_rate_cap_bw(rack, &rate_wanted, &capped); 18481 /* We need to account for all the overheads */ 18482 segs = (len + segsiz - 1) / segsiz; 18483 /* 18484 * We need the diff between 1514 bytes (e-mtu with e-hdr) 18485 * and how much data we put in each packet. Yes this 18486 * means we may be off if we are larger than 1500 bytes 18487 * or smaller. But this just makes us more conservative. 18488 */ 18489 18490 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 18491 if (rack->r_is_v6) { 18492 #ifdef INET6 18493 oh += sizeof(struct ip6_hdr); 18494 #endif 18495 } else { 18496 #ifdef INET 18497 oh += sizeof(struct ip); 18498 #endif 18499 } 18500 /* We add a fixed 14 for the ethernet header */ 18501 oh += 14; 18502 segs *= oh; 18503 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 18504 res = lentim / rate_wanted; 18505 slot = (uint32_t)res; 18506 if (rack_hw_rate_min && 18507 (rate_wanted < rack_hw_rate_min)) { 18508 can_start_hw_pacing = 0; 18509 if (rack->r_ctl.crte) { 18510 /* 18511 * Ok we need to release it, we 18512 * have fallen too low. 18513 */ 18514 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18515 rack->r_ctl.crte = NULL; 18516 rack->rack_attempt_hdwr_pace = 0; 18517 rack->rack_hdrw_pacing = 0; 18518 } 18519 } 18520 if (rack->r_ctl.crte && 18521 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 18522 /* 18523 * We want more than the hardware can give us, 18524 * don't start any hw pacing. 18525 */ 18526 can_start_hw_pacing = 0; 18527 if (rack->r_rack_hw_rate_caps == 0) { 18528 /* 18529 * Ok we need to release it, we 18530 * want more than the card can give us and 18531 * no rate cap is in place. Set it up so 18532 * when we want less we can retry. 18533 */ 18534 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18535 rack->r_ctl.crte = NULL; 18536 rack->rack_attempt_hdwr_pace = 0; 18537 rack->rack_hdrw_pacing = 0; 18538 } 18539 } 18540 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 18541 /* 18542 * We lost our rate somehow, this can happen 18543 * if the interface changed underneath us. 18544 */ 18545 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18546 rack->r_ctl.crte = NULL; 18547 /* Lets re-allow attempting to setup pacing */ 18548 rack->rack_hdrw_pacing = 0; 18549 rack->rack_attempt_hdwr_pace = 0; 18550 rack_log_hdwr_pacing(rack, 18551 rate_wanted, bw_est, __LINE__, 18552 0, 6); 18553 } 18554 prev_fill = rack->r_via_fill_cw; 18555 if ((rack->rc_pace_to_cwnd) && 18556 (capped == 0) && 18557 (rack->dgp_on == 1) && 18558 (rack->use_fixed_rate == 0) && 18559 (rack->in_probe_rtt == 0) && 18560 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 18561 /* 18562 * We want to pace at our rate *or* faster to 18563 * fill the cwnd to the max if its not full. 18564 */ 18565 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 18566 /* Re-check to make sure we are not exceeding our max b/w */ 18567 if ((rack->r_ctl.crte != NULL) && 18568 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 18569 /* 18570 * We want more than the hardware can give us, 18571 * don't start any hw pacing. 18572 */ 18573 can_start_hw_pacing = 0; 18574 if (rack->r_rack_hw_rate_caps == 0) { 18575 /* 18576 * Ok we need to release it, we 18577 * want more than the card can give us and 18578 * no rate cap is in place. Set it up so 18579 * when we want less we can retry. 18580 */ 18581 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18582 rack->r_ctl.crte = NULL; 18583 rack->rack_attempt_hdwr_pace = 0; 18584 rack->rack_hdrw_pacing = 0; 18585 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 18586 } 18587 } 18588 } 18589 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 18590 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 18591 if ((rack->rack_hdw_pace_ena) && 18592 (can_start_hw_pacing > 0) && 18593 (rack->rack_hdrw_pacing == 0) && 18594 (rack->rack_attempt_hdwr_pace == 0)) { 18595 /* 18596 * Lets attempt to turn on hardware pacing 18597 * if we can. 18598 */ 18599 rack->rack_attempt_hdwr_pace = 1; 18600 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 18601 rack->rc_inp->inp_route.ro_nh->nh_ifp, 18602 rate_wanted, 18603 RS_PACING_GEQ, 18604 &err, &rack->r_ctl.crte_prev_rate); 18605 if (rack->r_ctl.crte) { 18606 rack->rack_hdrw_pacing = 1; 18607 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 18608 pace_one, rack->r_ctl.crte, 18609 NULL, rack->r_ctl.pace_len_divisor); 18610 rack_log_hdwr_pacing(rack, 18611 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 18612 err, 0); 18613 rack->r_ctl.last_hw_bw_req = rate_wanted; 18614 } else { 18615 counter_u64_add(rack_hw_pace_init_fail, 1); 18616 } 18617 } else if (rack->rack_hdrw_pacing && 18618 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 18619 /* Do we need to adjust our rate? */ 18620 const struct tcp_hwrate_limit_table *nrte; 18621 18622 if (rack->r_up_only && 18623 (rate_wanted < rack->r_ctl.crte->rate)) { 18624 /** 18625 * We have four possible states here 18626 * having to do with the previous time 18627 * and this time. 18628 * previous | this-time 18629 * A) 0 | 0 -- fill_cw not in the picture 18630 * B) 1 | 0 -- we were doing a fill-cw but now are not 18631 * C) 1 | 1 -- all rates from fill_cw 18632 * D) 0 | 1 -- we were doing non-fill and now we are filling 18633 * 18634 * For case A, C and D we don't allow a drop. But for 18635 * case B where we now our on our steady rate we do 18636 * allow a drop. 18637 * 18638 */ 18639 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 18640 goto done_w_hdwr; 18641 } 18642 if ((rate_wanted > rack->r_ctl.crte->rate) || 18643 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 18644 if (rack_hw_rate_to_low && 18645 (bw_est < rack_hw_rate_to_low)) { 18646 /* 18647 * The pacing rate is too low for hardware, but 18648 * do allow hardware pacing to be restarted. 18649 */ 18650 rack_log_hdwr_pacing(rack, 18651 bw_est, rack->r_ctl.crte->rate, __LINE__, 18652 0, 5); 18653 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 18654 rack->r_ctl.crte = NULL; 18655 rack->rack_attempt_hdwr_pace = 0; 18656 rack->rack_hdrw_pacing = 0; 18657 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 18658 goto done_w_hdwr; 18659 } 18660 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 18661 rack->rc_tp, 18662 rack->rc_inp->inp_route.ro_nh->nh_ifp, 18663 rate_wanted, 18664 RS_PACING_GEQ, 18665 &err, &rack->r_ctl.crte_prev_rate); 18666 if (nrte == NULL) { 18667 /* 18668 * Lost the rate, lets drop hardware pacing 18669 * period. 18670 */ 18671 rack->rack_hdrw_pacing = 0; 18672 rack->r_ctl.crte = NULL; 18673 rack_log_hdwr_pacing(rack, 18674 rate_wanted, 0, __LINE__, 18675 err, 1); 18676 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 18677 counter_u64_add(rack_hw_pace_lost, 1); 18678 } else if (nrte != rack->r_ctl.crte) { 18679 rack->r_ctl.crte = nrte; 18680 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 18681 segsiz, pace_one, rack->r_ctl.crte, 18682 NULL, rack->r_ctl.pace_len_divisor); 18683 rack_log_hdwr_pacing(rack, 18684 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 18685 err, 2); 18686 rack->r_ctl.last_hw_bw_req = rate_wanted; 18687 } 18688 } else { 18689 /* We just need to adjust the segment size */ 18690 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 18691 rack_log_hdwr_pacing(rack, 18692 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 18693 0, 4); 18694 rack->r_ctl.last_hw_bw_req = rate_wanted; 18695 } 18696 } 18697 } 18698 if (minslot && (minslot > slot)) { 18699 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, 18700 98, __LINE__, NULL, 0); 18701 slot = minslot; 18702 } 18703 done_w_hdwr: 18704 if (rack_limit_time_with_srtt && 18705 (rack->use_fixed_rate == 0) && 18706 (rack->rack_hdrw_pacing == 0)) { 18707 /* 18708 * Sanity check, we do not allow the pacing delay 18709 * to be longer than the SRTT of the path. If it is 18710 * a slow path, then adding a packet should increase 18711 * the RTT and compensate for this i.e. the srtt will 18712 * be greater so the allowed pacing time will be greater. 18713 * 18714 * Note this restriction is not for where a peak rate 18715 * is set, we are doing fixed pacing or hardware pacing. 18716 */ 18717 if (rack->rc_tp->t_srtt) 18718 srtt = rack->rc_tp->t_srtt; 18719 else 18720 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 18721 if (srtt < (uint64_t)slot) { 18722 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 18723 slot = srtt; 18724 } 18725 } 18726 /*******************************************************************/ 18727 /* RRS: We insert paced call to stats here for len and rate_wanted */ 18728 /*******************************************************************/ 18729 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 18730 } 18731 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 18732 /* 18733 * If this rate is seeing enobufs when it 18734 * goes to send then either the nic is out 18735 * of gas or we are mis-estimating the time 18736 * somehow and not letting the queue empty 18737 * completely. Lets add to the pacing time. 18738 */ 18739 int hw_boost_delay; 18740 18741 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 18742 if (hw_boost_delay > rack_enobuf_hw_max) 18743 hw_boost_delay = rack_enobuf_hw_max; 18744 else if (hw_boost_delay < rack_enobuf_hw_min) 18745 hw_boost_delay = rack_enobuf_hw_min; 18746 slot += hw_boost_delay; 18747 } 18748 return (slot); 18749 } 18750 18751 static void 18752 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 18753 tcp_seq startseq, uint32_t sb_offset) 18754 { 18755 struct rack_sendmap *my_rsm = NULL; 18756 18757 if (tp->t_state < TCPS_ESTABLISHED) { 18758 /* 18759 * We don't start any measurements if we are 18760 * not at least established. 18761 */ 18762 return; 18763 } 18764 if (tp->t_state >= TCPS_FIN_WAIT_1) { 18765 /* 18766 * We will get no more data into the SB 18767 * this means we need to have the data available 18768 * before we start a measurement. 18769 */ 18770 18771 if (sbavail(&tptosocket(tp)->so_snd) < 18772 max(rc_init_window(rack), 18773 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 18774 /* Nope not enough data */ 18775 return; 18776 } 18777 } 18778 tp->t_flags |= TF_GPUTINPROG; 18779 rack->r_ctl.rc_gp_cumack_ts = 0; 18780 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 18781 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 18782 tp->gput_seq = startseq; 18783 rack->app_limited_needs_set = 0; 18784 if (rack->in_probe_rtt) 18785 rack->measure_saw_probe_rtt = 1; 18786 else if ((rack->measure_saw_probe_rtt) && 18787 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 18788 rack->measure_saw_probe_rtt = 0; 18789 if (rack->rc_gp_filled) 18790 tp->gput_ts = rack->r_ctl.last_cumack_advance; 18791 else { 18792 /* Special case initial measurement */ 18793 struct timeval tv; 18794 18795 tp->gput_ts = tcp_get_usecs(&tv); 18796 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18797 } 18798 /* 18799 * We take a guess out into the future, 18800 * if we have no measurement and no 18801 * initial rate, we measure the first 18802 * initial-windows worth of data to 18803 * speed up getting some GP measurement and 18804 * thus start pacing. 18805 */ 18806 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 18807 rack->app_limited_needs_set = 1; 18808 tp->gput_ack = startseq + max(rc_init_window(rack), 18809 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 18810 rack_log_pacing_delay_calc(rack, 18811 tp->gput_seq, 18812 tp->gput_ack, 18813 0, 18814 tp->gput_ts, 18815 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18816 9, 18817 __LINE__, NULL, 0); 18818 rack_tend_gp_marks(tp, rack); 18819 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18820 return; 18821 } 18822 if (sb_offset) { 18823 /* 18824 * We are out somewhere in the sb 18825 * can we use the already outstanding data? 18826 */ 18827 18828 if (rack->r_ctl.rc_app_limited_cnt == 0) { 18829 /* 18830 * Yes first one is good and in this case 18831 * the tp->gput_ts is correctly set based on 18832 * the last ack that arrived (no need to 18833 * set things up when an ack comes in). 18834 */ 18835 my_rsm = tqhash_min(rack->r_ctl.tqh); 18836 if ((my_rsm == NULL) || 18837 (my_rsm->r_rtr_cnt != 1)) { 18838 /* retransmission? */ 18839 goto use_latest; 18840 } 18841 } else { 18842 if (rack->r_ctl.rc_first_appl == NULL) { 18843 /* 18844 * If rc_first_appl is NULL 18845 * then the cnt should be 0. 18846 * This is probably an error, maybe 18847 * a KASSERT would be approprate. 18848 */ 18849 goto use_latest; 18850 } 18851 /* 18852 * If we have a marker pointer to the last one that is 18853 * app limited we can use that, but we need to set 18854 * things up so that when it gets ack'ed we record 18855 * the ack time (if its not already acked). 18856 */ 18857 rack->app_limited_needs_set = 1; 18858 /* 18859 * We want to get to the rsm that is either 18860 * next with space i.e. over 1 MSS or the one 18861 * after that (after the app-limited). 18862 */ 18863 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 18864 if (my_rsm) { 18865 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 18866 /* Have to use the next one */ 18867 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18868 else { 18869 /* Use after the first MSS of it is acked */ 18870 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 18871 goto start_set; 18872 } 18873 } 18874 if ((my_rsm == NULL) || 18875 (my_rsm->r_rtr_cnt != 1)) { 18876 /* 18877 * Either its a retransmit or 18878 * the last is the app-limited one. 18879 */ 18880 goto use_latest; 18881 } 18882 } 18883 tp->gput_seq = my_rsm->r_start; 18884 start_set: 18885 if (my_rsm->r_flags & RACK_ACKED) { 18886 /* 18887 * This one has been acked use the arrival ack time 18888 */ 18889 struct rack_sendmap *nrsm; 18890 18891 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18892 rack->app_limited_needs_set = 0; 18893 /* 18894 * Ok in this path we need to use the r_end now 18895 * since this guy is the starting ack. 18896 */ 18897 tp->gput_seq = my_rsm->r_end; 18898 /* 18899 * We also need to adjust up the sendtime 18900 * to the send of the next data after my_rsm. 18901 */ 18902 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18903 if (nrsm != NULL) 18904 my_rsm = nrsm; 18905 else { 18906 /* 18907 * The next as not been sent, thats the 18908 * case for using the latest. 18909 */ 18910 goto use_latest; 18911 } 18912 } 18913 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18914 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 18915 rack->r_ctl.rc_gp_cumack_ts = 0; 18916 if ((rack->r_ctl.cleared_app_ack == 1) && 18917 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { 18918 /* 18919 * We just cleared an application limited period 18920 * so the next seq out needs to skip the first 18921 * ack. 18922 */ 18923 rack->app_limited_needs_set = 1; 18924 rack->r_ctl.cleared_app_ack = 0; 18925 } 18926 rack_log_pacing_delay_calc(rack, 18927 tp->gput_seq, 18928 tp->gput_ack, 18929 (uintptr_t)my_rsm, 18930 tp->gput_ts, 18931 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18932 9, 18933 __LINE__, my_rsm, 0); 18934 /* Now lets make sure all are marked as they should be */ 18935 rack_tend_gp_marks(tp, rack); 18936 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18937 return; 18938 } 18939 18940 use_latest: 18941 /* 18942 * We don't know how long we may have been 18943 * idle or if this is the first-send. Lets 18944 * setup the flag so we will trim off 18945 * the first ack'd data so we get a true 18946 * measurement. 18947 */ 18948 rack->app_limited_needs_set = 1; 18949 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18950 rack->r_ctl.rc_gp_cumack_ts = 0; 18951 /* Find this guy so we can pull the send time */ 18952 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18953 if (my_rsm) { 18954 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18955 if (my_rsm->r_flags & RACK_ACKED) { 18956 /* 18957 * Unlikely since its probably what was 18958 * just transmitted (but I am paranoid). 18959 */ 18960 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18961 rack->app_limited_needs_set = 0; 18962 } 18963 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18964 /* This also is unlikely */ 18965 tp->gput_seq = my_rsm->r_start; 18966 } 18967 } else { 18968 /* 18969 * TSNH unless we have some send-map limit, 18970 * and even at that it should not be hitting 18971 * that limit (we should have stopped sending). 18972 */ 18973 struct timeval tv; 18974 18975 microuptime(&tv); 18976 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18977 } 18978 rack_tend_gp_marks(tp, rack); 18979 rack_log_pacing_delay_calc(rack, 18980 tp->gput_seq, 18981 tp->gput_ack, 18982 (uintptr_t)my_rsm, 18983 tp->gput_ts, 18984 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18985 9, __LINE__, NULL, 0); 18986 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18987 } 18988 18989 static inline uint32_t 18990 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18991 uint32_t avail, int32_t sb_offset) 18992 { 18993 uint32_t len; 18994 uint32_t sendwin; 18995 18996 if (tp->snd_wnd > cwnd_to_use) 18997 sendwin = cwnd_to_use; 18998 else 18999 sendwin = tp->snd_wnd; 19000 if (ctf_outstanding(tp) >= tp->snd_wnd) { 19001 /* We never want to go over our peers rcv-window */ 19002 len = 0; 19003 } else { 19004 uint32_t flight; 19005 19006 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 19007 if (flight >= sendwin) { 19008 /* 19009 * We have in flight what we are allowed by cwnd (if 19010 * it was rwnd blocking it would have hit above out 19011 * >= tp->snd_wnd). 19012 */ 19013 return (0); 19014 } 19015 len = sendwin - flight; 19016 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 19017 /* We would send too much (beyond the rwnd) */ 19018 len = tp->snd_wnd - ctf_outstanding(tp); 19019 } 19020 if ((len + sb_offset) > avail) { 19021 /* 19022 * We don't have that much in the SB, how much is 19023 * there? 19024 */ 19025 len = avail - sb_offset; 19026 } 19027 } 19028 return (len); 19029 } 19030 19031 static void 19032 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 19033 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 19034 int rsm_is_null, int optlen, int line, uint16_t mode) 19035 { 19036 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 19037 union tcp_log_stackspecific log; 19038 struct timeval tv; 19039 19040 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19041 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19042 log.u_bbr.flex1 = error; 19043 log.u_bbr.flex2 = flags; 19044 log.u_bbr.flex3 = rsm_is_null; 19045 log.u_bbr.flex4 = ipoptlen; 19046 log.u_bbr.flex5 = tp->rcv_numsacks; 19047 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19048 log.u_bbr.flex7 = optlen; 19049 log.u_bbr.flex8 = rack->r_fsb_inited; 19050 log.u_bbr.applimited = rack->r_fast_output; 19051 log.u_bbr.bw_inuse = rack_get_bw(rack); 19052 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19053 log.u_bbr.cwnd_gain = mode; 19054 log.u_bbr.pkts_out = orig_len; 19055 log.u_bbr.lt_epoch = len; 19056 log.u_bbr.delivered = line; 19057 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 19058 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19059 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 19060 len, &log, false, NULL, __func__, __LINE__, &tv); 19061 } 19062 } 19063 19064 19065 static struct mbuf * 19066 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 19067 struct rack_fast_send_blk *fsb, 19068 int32_t seglimit, int32_t segsize, int hw_tls) 19069 { 19070 #ifdef KERN_TLS 19071 struct ktls_session *tls, *ntls; 19072 #ifdef INVARIANTS 19073 struct mbuf *start; 19074 #endif 19075 #endif 19076 struct mbuf *m, *n, **np, *smb; 19077 struct mbuf *top; 19078 int32_t off, soff; 19079 int32_t len = *plen; 19080 int32_t fragsize; 19081 int32_t len_cp = 0; 19082 uint32_t mlen, frags; 19083 19084 soff = off = the_off; 19085 smb = m = the_m; 19086 np = ⊤ 19087 top = NULL; 19088 #ifdef KERN_TLS 19089 if (hw_tls && (m->m_flags & M_EXTPG)) 19090 tls = m->m_epg_tls; 19091 else 19092 tls = NULL; 19093 #ifdef INVARIANTS 19094 start = m; 19095 #endif 19096 #endif 19097 while (len > 0) { 19098 if (m == NULL) { 19099 *plen = len_cp; 19100 break; 19101 } 19102 #ifdef KERN_TLS 19103 if (hw_tls) { 19104 if (m->m_flags & M_EXTPG) 19105 ntls = m->m_epg_tls; 19106 else 19107 ntls = NULL; 19108 19109 /* 19110 * Avoid mixing TLS records with handshake 19111 * data or TLS records from different 19112 * sessions. 19113 */ 19114 if (tls != ntls) { 19115 MPASS(m != start); 19116 *plen = len_cp; 19117 break; 19118 } 19119 } 19120 #endif 19121 mlen = min(len, m->m_len - off); 19122 if (seglimit) { 19123 /* 19124 * For M_EXTPG mbufs, add 3 segments 19125 * + 1 in case we are crossing page boundaries 19126 * + 2 in case the TLS hdr/trailer are used 19127 * It is cheaper to just add the segments 19128 * than it is to take the cache miss to look 19129 * at the mbuf ext_pgs state in detail. 19130 */ 19131 if (m->m_flags & M_EXTPG) { 19132 fragsize = min(segsize, PAGE_SIZE); 19133 frags = 3; 19134 } else { 19135 fragsize = segsize; 19136 frags = 0; 19137 } 19138 19139 /* Break if we really can't fit anymore. */ 19140 if ((frags + 1) >= seglimit) { 19141 *plen = len_cp; 19142 break; 19143 } 19144 19145 /* 19146 * Reduce size if you can't copy the whole 19147 * mbuf. If we can't copy the whole mbuf, also 19148 * adjust len so the loop will end after this 19149 * mbuf. 19150 */ 19151 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 19152 mlen = (seglimit - frags - 1) * fragsize; 19153 len = mlen; 19154 *plen = len_cp + len; 19155 } 19156 frags += howmany(mlen, fragsize); 19157 if (frags == 0) 19158 frags++; 19159 seglimit -= frags; 19160 KASSERT(seglimit > 0, 19161 ("%s: seglimit went too low", __func__)); 19162 } 19163 n = m_get(M_NOWAIT, m->m_type); 19164 *np = n; 19165 if (n == NULL) 19166 goto nospace; 19167 n->m_len = mlen; 19168 soff += mlen; 19169 len_cp += n->m_len; 19170 if (m->m_flags & (M_EXT | M_EXTPG)) { 19171 n->m_data = m->m_data + off; 19172 mb_dupcl(n, m); 19173 } else { 19174 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 19175 (u_int)n->m_len); 19176 } 19177 len -= n->m_len; 19178 off = 0; 19179 m = m->m_next; 19180 np = &n->m_next; 19181 if (len || (soff == smb->m_len)) { 19182 /* 19183 * We have more so we move forward or 19184 * we have consumed the entire mbuf and 19185 * len has fell to 0. 19186 */ 19187 soff = 0; 19188 smb = m; 19189 } 19190 19191 } 19192 if (fsb != NULL) { 19193 fsb->m = smb; 19194 fsb->off = soff; 19195 if (smb) { 19196 /* 19197 * Save off the size of the mbuf. We do 19198 * this so that we can recognize when it 19199 * has been trimmed by sbcut() as acks 19200 * come in. 19201 */ 19202 fsb->o_m_len = smb->m_len; 19203 fsb->o_t_len = M_TRAILINGROOM(smb); 19204 } else { 19205 /* 19206 * This is the case where the next mbuf went to NULL. This 19207 * means with this copy we have sent everything in the sb. 19208 * In theory we could clear the fast_output flag, but lets 19209 * not since its possible that we could get more added 19210 * and acks that call the extend function which would let 19211 * us send more. 19212 */ 19213 fsb->o_m_len = 0; 19214 fsb->o_t_len = 0; 19215 } 19216 } 19217 return (top); 19218 nospace: 19219 if (top) 19220 m_freem(top); 19221 return (NULL); 19222 19223 } 19224 19225 /* 19226 * This is a copy of m_copym(), taking the TSO segment size/limit 19227 * constraints into account, and advancing the sndptr as it goes. 19228 */ 19229 static struct mbuf * 19230 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 19231 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 19232 { 19233 struct mbuf *m, *n; 19234 int32_t soff; 19235 19236 m = rack->r_ctl.fsb.m; 19237 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 19238 /* 19239 * The trailing space changed, mbufs can grow 19240 * at the tail but they can't shrink from 19241 * it, KASSERT that. Adjust the orig_m_len to 19242 * compensate for this change. 19243 */ 19244 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 19245 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 19246 m, 19247 rack, 19248 (intmax_t)M_TRAILINGROOM(m), 19249 rack->r_ctl.fsb.o_t_len, 19250 rack->r_ctl.fsb.o_m_len, 19251 m->m_len)); 19252 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 19253 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 19254 } 19255 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 19256 /* 19257 * Mbuf shrank, trimmed off the top by an ack, our 19258 * offset changes. 19259 */ 19260 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 19261 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 19262 m, m->m_len, 19263 rack, rack->r_ctl.fsb.o_m_len, 19264 rack->r_ctl.fsb.off)); 19265 19266 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 19267 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 19268 else 19269 rack->r_ctl.fsb.off = 0; 19270 rack->r_ctl.fsb.o_m_len = m->m_len; 19271 #ifdef INVARIANTS 19272 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 19273 panic("rack:%p m:%p m_len grew outside of t_space compensation", 19274 rack, m); 19275 #endif 19276 } 19277 soff = rack->r_ctl.fsb.off; 19278 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 19279 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 19280 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 19281 __FUNCTION__, 19282 rack, *plen, m, m->m_len)); 19283 /* Save off the right location before we copy and advance */ 19284 *s_soff = soff; 19285 *s_mb = rack->r_ctl.fsb.m; 19286 n = rack_fo_base_copym(m, soff, plen, 19287 &rack->r_ctl.fsb, 19288 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 19289 return (n); 19290 } 19291 19292 /* Log the buffer level */ 19293 static void 19294 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 19295 int len, struct timeval *tv, 19296 uint32_t cts) 19297 { 19298 uint32_t p_rate = 0, p_queue = 0, err = 0; 19299 union tcp_log_stackspecific log; 19300 19301 #ifdef RATELIMIT 19302 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 19303 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 19304 #endif 19305 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19306 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19307 log.u_bbr.flex1 = p_rate; 19308 log.u_bbr.flex2 = p_queue; 19309 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 19310 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 19311 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 19312 log.u_bbr.flex7 = 99; 19313 log.u_bbr.flex8 = 0; 19314 log.u_bbr.pkts_out = err; 19315 log.u_bbr.delRate = rack->r_ctl.crte->rate; 19316 log.u_bbr.timeStamp = cts; 19317 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19318 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 19319 len, &log, false, NULL, __func__, __LINE__, tv); 19320 19321 } 19322 19323 static uint32_t 19324 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 19325 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 19326 { 19327 uint64_t lentime = 0; 19328 #ifdef RATELIMIT 19329 uint32_t p_rate = 0, p_queue = 0, err; 19330 union tcp_log_stackspecific log; 19331 uint64_t bw; 19332 19333 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 19334 /* Failed or queue is zero */ 19335 if (err || (p_queue == 0)) { 19336 lentime = 0; 19337 goto out; 19338 } 19339 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 19340 if (err) { 19341 lentime = 0; 19342 goto out; 19343 } 19344 /* 19345 * If we reach here we have some bytes in 19346 * the queue. The number returned is a value 19347 * between 0 and 0xffff where ffff is full 19348 * and 0 is empty. So how best to make this into 19349 * something usable? 19350 * 19351 * The "safer" way is lets take the b/w gotten 19352 * from the query (which should be our b/w rate) 19353 * and pretend that a full send (our rc_pace_max_segs) 19354 * is outstanding. We factor it so its as if a full 19355 * number of our MSS segment is terms of full 19356 * ethernet segments are outstanding. 19357 */ 19358 bw = p_rate / 8; 19359 if (bw) { 19360 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 19361 lentime *= ETHERNET_SEGMENT_SIZE; 19362 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 19363 lentime /= bw; 19364 } else { 19365 /* TSNH -- KASSERT? */ 19366 lentime = 0; 19367 } 19368 out: 19369 if (tcp_bblogging_on(tp)) { 19370 memset(&log, 0, sizeof(log)); 19371 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19372 log.u_bbr.flex1 = p_rate; 19373 log.u_bbr.flex2 = p_queue; 19374 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 19375 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 19376 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 19377 log.u_bbr.flex7 = 99; 19378 log.u_bbr.flex8 = 0; 19379 log.u_bbr.pkts_out = err; 19380 log.u_bbr.delRate = rack->r_ctl.crte->rate; 19381 log.u_bbr.cur_del_rate = lentime; 19382 log.u_bbr.timeStamp = cts; 19383 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19384 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 19385 len, &log, false, NULL, __func__, __LINE__,tv); 19386 } 19387 #endif 19388 return ((uint32_t)lentime); 19389 } 19390 19391 static int 19392 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 19393 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 19394 { 19395 /* 19396 * Enter the fast retransmit path. We are given that a sched_pin is 19397 * in place (if accounting is compliled in) and the cycle count taken 19398 * at the entry is in the ts_val. The concept her is that the rsm 19399 * now holds the mbuf offsets and such so we can directly transmit 19400 * without a lot of overhead, the len field is already set for 19401 * us to prohibit us from sending too much (usually its 1MSS). 19402 */ 19403 struct ip *ip = NULL; 19404 struct udphdr *udp = NULL; 19405 struct tcphdr *th = NULL; 19406 struct mbuf *m = NULL; 19407 struct inpcb *inp; 19408 uint8_t *cpto; 19409 struct tcp_log_buffer *lgb; 19410 #ifdef TCP_ACCOUNTING 19411 uint64_t crtsc; 19412 int cnt_thru = 1; 19413 #endif 19414 struct tcpopt to; 19415 u_char opt[TCP_MAXOLEN]; 19416 uint32_t hdrlen, optlen; 19417 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 19418 uint16_t flags; 19419 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19420 uint32_t if_hw_tsomaxsegsize; 19421 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 19422 19423 #ifdef INET6 19424 struct ip6_hdr *ip6 = NULL; 19425 19426 if (rack->r_is_v6) { 19427 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19428 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19429 } else 19430 #endif /* INET6 */ 19431 { 19432 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19433 hdrlen = sizeof(struct tcpiphdr); 19434 } 19435 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19436 goto failed; 19437 } 19438 if (doing_tlp) { 19439 /* Its a TLP add the flag, it may already be there but be sure */ 19440 rsm->r_flags |= RACK_TLP; 19441 } else { 19442 /* If it was a TLP it is not not on this retransmit */ 19443 rsm->r_flags &= ~RACK_TLP; 19444 } 19445 startseq = rsm->r_start; 19446 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19447 inp = rack->rc_inp; 19448 to.to_flags = 0; 19449 flags = tcp_outflags[tp->t_state]; 19450 if (flags & (TH_SYN|TH_RST)) { 19451 goto failed; 19452 } 19453 if (rsm->r_flags & RACK_HAS_FIN) { 19454 /* We can't send a FIN here */ 19455 goto failed; 19456 } 19457 if (flags & TH_FIN) { 19458 /* We never send a FIN */ 19459 flags &= ~TH_FIN; 19460 } 19461 if (tp->t_flags & TF_RCVD_TSTMP) { 19462 to.to_tsval = ms_cts + tp->ts_offset; 19463 to.to_tsecr = tp->ts_recent; 19464 to.to_flags = TOF_TS; 19465 } 19466 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19467 /* TCP-MD5 (RFC2385). */ 19468 if (tp->t_flags & TF_SIGNATURE) 19469 to.to_flags |= TOF_SIGNATURE; 19470 #endif 19471 optlen = tcp_addoptions(&to, opt); 19472 hdrlen += optlen; 19473 udp = rack->r_ctl.fsb.udp; 19474 if (udp) 19475 hdrlen += sizeof(struct udphdr); 19476 if (rack->r_ctl.rc_pace_max_segs) 19477 max_val = rack->r_ctl.rc_pace_max_segs; 19478 else if (rack->rc_user_set_max_segs) 19479 max_val = rack->rc_user_set_max_segs * segsiz; 19480 else 19481 max_val = len; 19482 if ((tp->t_flags & TF_TSO) && 19483 V_tcp_do_tso && 19484 (len > segsiz) && 19485 (tp->t_port == 0)) 19486 tso = 1; 19487 #ifdef INET6 19488 if (MHLEN < hdrlen + max_linkhdr) 19489 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19490 else 19491 #endif 19492 m = m_gethdr(M_NOWAIT, MT_DATA); 19493 if (m == NULL) 19494 goto failed; 19495 m->m_data += max_linkhdr; 19496 m->m_len = hdrlen; 19497 th = rack->r_ctl.fsb.th; 19498 /* Establish the len to send */ 19499 if (len > max_val) 19500 len = max_val; 19501 if ((tso) && (len + optlen > segsiz)) { 19502 uint32_t if_hw_tsomax; 19503 int32_t max_len; 19504 19505 /* extract TSO information */ 19506 if_hw_tsomax = tp->t_tsomax; 19507 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19508 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19509 /* 19510 * Check if we should limit by maximum payload 19511 * length: 19512 */ 19513 if (if_hw_tsomax != 0) { 19514 /* compute maximum TSO length */ 19515 max_len = (if_hw_tsomax - hdrlen - 19516 max_linkhdr); 19517 if (max_len <= 0) { 19518 goto failed; 19519 } else if (len > max_len) { 19520 len = max_len; 19521 } 19522 } 19523 if (len <= segsiz) { 19524 /* 19525 * In case there are too many small fragments don't 19526 * use TSO: 19527 */ 19528 tso = 0; 19529 } 19530 } else { 19531 tso = 0; 19532 } 19533 if ((tso == 0) && (len > segsiz)) 19534 len = segsiz; 19535 (void)tcp_get_usecs(tv); 19536 if ((len == 0) || 19537 (len <= MHLEN - hdrlen - max_linkhdr)) { 19538 goto failed; 19539 } 19540 th->th_seq = htonl(rsm->r_start); 19541 th->th_ack = htonl(tp->rcv_nxt); 19542 /* 19543 * The PUSH bit should only be applied 19544 * if the full retransmission is made. If 19545 * we are sending less than this is the 19546 * left hand edge and should not have 19547 * the PUSH bit. 19548 */ 19549 if ((rsm->r_flags & RACK_HAD_PUSH) && 19550 (len == (rsm->r_end - rsm->r_start))) 19551 flags |= TH_PUSH; 19552 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19553 if (th->th_win == 0) { 19554 tp->t_sndzerowin++; 19555 tp->t_flags |= TF_RXWIN0SENT; 19556 } else 19557 tp->t_flags &= ~TF_RXWIN0SENT; 19558 if (rsm->r_flags & RACK_TLP) { 19559 /* 19560 * TLP should not count in retran count, but 19561 * in its own bin 19562 */ 19563 counter_u64_add(rack_tlp_retran, 1); 19564 counter_u64_add(rack_tlp_retran_bytes, len); 19565 } else { 19566 tp->t_sndrexmitpack++; 19567 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 19568 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 19569 } 19570 #ifdef STATS 19571 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 19572 len); 19573 #endif 19574 if (rsm->m == NULL) 19575 goto failed; 19576 if (rsm->m && 19577 ((rsm->orig_m_len != rsm->m->m_len) || 19578 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 19579 /* Fix up the orig_m_len and possibly the mbuf offset */ 19580 rack_adjust_orig_mlen(rsm); 19581 } 19582 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 19583 if (len <= segsiz) { 19584 /* 19585 * Must have ran out of mbufs for the copy 19586 * shorten it to no longer need tso. Lets 19587 * not put on sendalot since we are low on 19588 * mbufs. 19589 */ 19590 tso = 0; 19591 } 19592 if ((m->m_next == NULL) || (len <= 0)){ 19593 goto failed; 19594 } 19595 if (udp) { 19596 if (rack->r_is_v6) 19597 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19598 else 19599 ulen = hdrlen + len - sizeof(struct ip); 19600 udp->uh_ulen = htons(ulen); 19601 } 19602 m->m_pkthdr.rcvif = (struct ifnet *)0; 19603 if (TCPS_HAVERCVDSYN(tp->t_state) && 19604 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19605 int ect = tcp_ecn_output_established(tp, &flags, len, true); 19606 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19607 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19608 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19609 #ifdef INET6 19610 if (rack->r_is_v6) { 19611 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19612 ip6->ip6_flow |= htonl(ect << 20); 19613 } 19614 else 19615 #endif 19616 { 19617 ip->ip_tos &= ~IPTOS_ECN_MASK; 19618 ip->ip_tos |= ect; 19619 } 19620 } 19621 if (rack->r_ctl.crte != NULL) { 19622 /* See if we can send via the hw queue */ 19623 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 19624 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 19625 if (slot == 0) 19626 ip_sendflag = 0; 19627 } 19628 tcp_set_flags(th, flags); 19629 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19630 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19631 if (to.to_flags & TOF_SIGNATURE) { 19632 /* 19633 * Calculate MD5 signature and put it into the place 19634 * determined before. 19635 * NOTE: since TCP options buffer doesn't point into 19636 * mbuf's data, calculate offset and use it. 19637 */ 19638 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19639 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19640 /* 19641 * Do not send segment if the calculation of MD5 19642 * digest has failed. 19643 */ 19644 goto failed; 19645 } 19646 } 19647 #endif 19648 #ifdef INET6 19649 if (rack->r_is_v6) { 19650 if (tp->t_port) { 19651 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19652 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19653 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19654 th->th_sum = htons(0); 19655 UDPSTAT_INC(udps_opackets); 19656 } else { 19657 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19658 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19659 th->th_sum = in6_cksum_pseudo(ip6, 19660 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19661 0); 19662 } 19663 } 19664 #endif 19665 #if defined(INET6) && defined(INET) 19666 else 19667 #endif 19668 #ifdef INET 19669 { 19670 if (tp->t_port) { 19671 m->m_pkthdr.csum_flags = CSUM_UDP; 19672 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19673 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19674 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19675 th->th_sum = htons(0); 19676 UDPSTAT_INC(udps_opackets); 19677 } else { 19678 m->m_pkthdr.csum_flags = CSUM_TCP; 19679 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19680 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19681 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19682 IPPROTO_TCP + len + optlen)); 19683 } 19684 /* IP version must be set here for ipv4/ipv6 checking later */ 19685 KASSERT(ip->ip_v == IPVERSION, 19686 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19687 } 19688 #endif 19689 if (tso) { 19690 /* 19691 * Here we use segsiz since we have no added options besides 19692 * any standard timestamp options (no DSACKs or SACKS are sent 19693 * via either fast-path). 19694 */ 19695 KASSERT(len > segsiz, 19696 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19697 m->m_pkthdr.csum_flags |= CSUM_TSO; 19698 m->m_pkthdr.tso_segsz = segsiz; 19699 } 19700 #ifdef INET6 19701 if (rack->r_is_v6) { 19702 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19703 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19704 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19705 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19706 else 19707 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19708 } 19709 #endif 19710 #if defined(INET) && defined(INET6) 19711 else 19712 #endif 19713 #ifdef INET 19714 { 19715 ip->ip_len = htons(m->m_pkthdr.len); 19716 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19717 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19718 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19719 if (tp->t_port == 0 || len < V_tcp_minmss) { 19720 ip->ip_off |= htons(IP_DF); 19721 } 19722 } else { 19723 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19724 } 19725 } 19726 #endif 19727 if (doing_tlp == 0) { 19728 /* Set we retransmitted */ 19729 rack->rc_gp_saw_rec = 1; 19730 } else { 19731 /* Its a TLP set ca or ss */ 19732 if (tp->snd_cwnd > tp->snd_ssthresh) { 19733 /* Set we sent in CA */ 19734 rack->rc_gp_saw_ca = 1; 19735 } else { 19736 /* Set we sent in SS */ 19737 rack->rc_gp_saw_ss = 1; 19738 } 19739 } 19740 /* Time to copy in our header */ 19741 cpto = mtod(m, uint8_t *); 19742 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19743 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19744 if (optlen) { 19745 bcopy(opt, th + 1, optlen); 19746 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19747 } else { 19748 th->th_off = sizeof(struct tcphdr) >> 2; 19749 } 19750 if (tcp_bblogging_on(rack->rc_tp)) { 19751 union tcp_log_stackspecific log; 19752 19753 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 19754 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 19755 counter_u64_add(rack_collapsed_win_rxt, 1); 19756 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 19757 } 19758 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19759 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19760 if (rack->rack_no_prr) 19761 log.u_bbr.flex1 = 0; 19762 else 19763 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19764 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19765 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19766 log.u_bbr.flex4 = max_val; 19767 /* Save off the early/late values */ 19768 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19769 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19770 log.u_bbr.bw_inuse = rack_get_bw(rack); 19771 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19772 if (doing_tlp == 0) 19773 log.u_bbr.flex8 = 1; 19774 else 19775 log.u_bbr.flex8 = 2; 19776 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19777 log.u_bbr.flex7 = 55; 19778 log.u_bbr.pkts_out = tp->t_maxseg; 19779 log.u_bbr.timeStamp = cts; 19780 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19781 if (rsm && (rsm->r_rtr_cnt > 0)) { 19782 /* 19783 * When we have a retransmit we want to log the 19784 * burst at send and flight at send from before. 19785 */ 19786 log.u_bbr.flex5 = rsm->r_fas; 19787 log.u_bbr.bbr_substate = rsm->r_bas; 19788 } else { 19789 /* 19790 * This is currently unlikely until we do the 19791 * packet pair probes but I will add it for completeness. 19792 */ 19793 log.u_bbr.flex5 = log.u_bbr.inflight; 19794 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19795 } 19796 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19797 log.u_bbr.delivered = 0; 19798 log.u_bbr.rttProp = (uintptr_t)rsm; 19799 log.u_bbr.delRate = rsm->r_flags; 19800 log.u_bbr.delRate <<= 31; 19801 log.u_bbr.delRate |= rack->r_must_retran; 19802 log.u_bbr.delRate <<= 1; 19803 log.u_bbr.delRate |= 1; 19804 log.u_bbr.pkt_epoch = __LINE__; 19805 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19806 len, &log, false, NULL, __func__, __LINE__, tv); 19807 } else 19808 lgb = NULL; 19809 if ((rack->r_ctl.crte != NULL) && 19810 tcp_bblogging_on(tp)) { 19811 rack_log_queue_level(tp, rack, len, tv, cts); 19812 } 19813 #ifdef INET6 19814 if (rack->r_is_v6) { 19815 error = ip6_output(m, inp->in6p_outputopts, 19816 &inp->inp_route6, 19817 ip_sendflag, NULL, NULL, inp); 19818 } 19819 else 19820 #endif 19821 #ifdef INET 19822 { 19823 error = ip_output(m, NULL, 19824 &inp->inp_route, 19825 ip_sendflag, 0, inp); 19826 } 19827 #endif 19828 m = NULL; 19829 if (lgb) { 19830 lgb->tlb_errno = error; 19831 lgb = NULL; 19832 } 19833 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 19834 tp->snd_nxt = tp->snd_max; 19835 if (error) { 19836 goto failed; 19837 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 19838 rack->rc_hw_nobuf = 0; 19839 rack->r_ctl.rc_agg_delayed = 0; 19840 rack->r_early = 0; 19841 rack->r_late = 0; 19842 rack->r_ctl.rc_agg_early = 0; 19843 } 19844 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 19845 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 19846 if (doing_tlp) { 19847 rack->rc_tlp_in_progress = 1; 19848 rack->r_ctl.rc_tlp_cnt_out++; 19849 } 19850 if (error == 0) { 19851 counter_u64_add(rack_total_bytes, len); 19852 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 19853 if (doing_tlp) { 19854 rack->rc_last_sent_tlp_past_cumack = 0; 19855 rack->rc_last_sent_tlp_seq_valid = 1; 19856 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 19857 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 19858 } 19859 if (rack->r_ctl.rc_prr_sndcnt >= len) 19860 rack->r_ctl.rc_prr_sndcnt -= len; 19861 else 19862 rack->r_ctl.rc_prr_sndcnt = 0; 19863 } 19864 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19865 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19866 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19867 rack->r_ctl.retran_during_recovery += len; 19868 { 19869 int idx; 19870 19871 idx = (len / segsiz) + 3; 19872 if (idx >= TCP_MSS_ACCT_ATIMER) 19873 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19874 else 19875 counter_u64_add(rack_out_size[idx], 1); 19876 } 19877 if (tp->t_rtttime == 0) { 19878 tp->t_rtttime = ticks; 19879 tp->t_rtseq = startseq; 19880 KMOD_TCPSTAT_INC(tcps_segstimed); 19881 } 19882 counter_u64_add(rack_fto_rsm_send, 1); 19883 if (error && (error == ENOBUFS)) { 19884 if (rack->r_ctl.crte != NULL) { 19885 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 19886 if (tcp_bblogging_on(rack->rc_tp)) 19887 rack_log_queue_level(tp, rack, len, tv, cts); 19888 } else 19889 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 19890 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19891 if (rack->rc_enobuf < 0x7f) 19892 rack->rc_enobuf++; 19893 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19894 slot = 10 * HPTS_USEC_IN_MSEC; 19895 if (rack->r_ctl.crte != NULL) { 19896 counter_u64_add(rack_saw_enobuf_hw, 1); 19897 tcp_rl_log_enobuf(rack->r_ctl.crte); 19898 } 19899 counter_u64_add(rack_saw_enobuf, 1); 19900 } else { 19901 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 19902 } 19903 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 19904 #ifdef TCP_ACCOUNTING 19905 crtsc = get_cyclecount(); 19906 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19907 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19908 } 19909 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19910 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19911 } 19912 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19913 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 19914 } 19915 sched_unpin(); 19916 #endif 19917 return (0); 19918 failed: 19919 if (m) 19920 m_free(m); 19921 return (-1); 19922 } 19923 19924 static void 19925 rack_sndbuf_autoscale(struct tcp_rack *rack) 19926 { 19927 /* 19928 * Automatic sizing of send socket buffer. Often the send buffer 19929 * size is not optimally adjusted to the actual network conditions 19930 * at hand (delay bandwidth product). Setting the buffer size too 19931 * small limits throughput on links with high bandwidth and high 19932 * delay (eg. trans-continental/oceanic links). Setting the 19933 * buffer size too big consumes too much real kernel memory, 19934 * especially with many connections on busy servers. 19935 * 19936 * The criteria to step up the send buffer one notch are: 19937 * 1. receive window of remote host is larger than send buffer 19938 * (with a fudge factor of 5/4th); 19939 * 2. send buffer is filled to 7/8th with data (so we actually 19940 * have data to make use of it); 19941 * 3. send buffer fill has not hit maximal automatic size; 19942 * 4. our send window (slow start and cogestion controlled) is 19943 * larger than sent but unacknowledged data in send buffer. 19944 * 19945 * Note that the rack version moves things much faster since 19946 * we want to avoid hitting cache lines in the rack_fast_output() 19947 * path so this is called much less often and thus moves 19948 * the SB forward by a percentage. 19949 */ 19950 struct socket *so; 19951 struct tcpcb *tp; 19952 uint32_t sendwin, scaleup; 19953 19954 tp = rack->rc_tp; 19955 so = rack->rc_inp->inp_socket; 19956 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19957 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19958 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19959 sbused(&so->so_snd) >= 19960 (so->so_snd.sb_hiwat / 8 * 7) && 19961 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19962 sendwin >= (sbused(&so->so_snd) - 19963 (tp->snd_max - tp->snd_una))) { 19964 if (rack_autosndbuf_inc) 19965 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19966 else 19967 scaleup = V_tcp_autosndbuf_inc; 19968 if (scaleup < V_tcp_autosndbuf_inc) 19969 scaleup = V_tcp_autosndbuf_inc; 19970 scaleup += so->so_snd.sb_hiwat; 19971 if (scaleup > V_tcp_autosndbuf_max) 19972 scaleup = V_tcp_autosndbuf_max; 19973 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19974 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19975 } 19976 } 19977 } 19978 19979 static int 19980 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19981 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 19982 { 19983 /* 19984 * Enter to do fast output. We are given that the sched_pin is 19985 * in place (if accounting is compiled in) and the cycle count taken 19986 * at entry is in place in ts_val. The idea here is that 19987 * we know how many more bytes needs to be sent (presumably either 19988 * during pacing or to fill the cwnd and that was greater than 19989 * the max-burst). We have how much to send and all the info we 19990 * need to just send. 19991 */ 19992 #ifdef INET 19993 struct ip *ip = NULL; 19994 #endif 19995 struct udphdr *udp = NULL; 19996 struct tcphdr *th = NULL; 19997 struct mbuf *m, *s_mb; 19998 struct inpcb *inp; 19999 uint8_t *cpto; 20000 struct tcp_log_buffer *lgb; 20001 #ifdef TCP_ACCOUNTING 20002 uint64_t crtsc; 20003 #endif 20004 struct tcpopt to; 20005 u_char opt[TCP_MAXOLEN]; 20006 uint32_t hdrlen, optlen; 20007 #ifdef TCP_ACCOUNTING 20008 int cnt_thru = 1; 20009 #endif 20010 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 20011 uint16_t flags; 20012 uint32_t s_soff; 20013 uint32_t if_hw_tsomaxsegcount = 0, startseq; 20014 uint32_t if_hw_tsomaxsegsize; 20015 uint32_t add_flag = RACK_SENT_FP; 20016 #ifdef INET6 20017 struct ip6_hdr *ip6 = NULL; 20018 20019 if (rack->r_is_v6) { 20020 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20021 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 20022 } else 20023 #endif /* INET6 */ 20024 { 20025 #ifdef INET 20026 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20027 hdrlen = sizeof(struct tcpiphdr); 20028 #endif 20029 } 20030 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 20031 m = NULL; 20032 goto failed; 20033 } 20034 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20035 startseq = tp->snd_max; 20036 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20037 inp = rack->rc_inp; 20038 len = rack->r_ctl.fsb.left_to_send; 20039 to.to_flags = 0; 20040 flags = rack->r_ctl.fsb.tcp_flags; 20041 if (tp->t_flags & TF_RCVD_TSTMP) { 20042 to.to_tsval = ms_cts + tp->ts_offset; 20043 to.to_tsecr = tp->ts_recent; 20044 to.to_flags = TOF_TS; 20045 } 20046 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20047 /* TCP-MD5 (RFC2385). */ 20048 if (tp->t_flags & TF_SIGNATURE) 20049 to.to_flags |= TOF_SIGNATURE; 20050 #endif 20051 optlen = tcp_addoptions(&to, opt); 20052 hdrlen += optlen; 20053 udp = rack->r_ctl.fsb.udp; 20054 if (udp) 20055 hdrlen += sizeof(struct udphdr); 20056 if (rack->r_ctl.rc_pace_max_segs) 20057 max_val = rack->r_ctl.rc_pace_max_segs; 20058 else if (rack->rc_user_set_max_segs) 20059 max_val = rack->rc_user_set_max_segs * segsiz; 20060 else 20061 max_val = len; 20062 if ((tp->t_flags & TF_TSO) && 20063 V_tcp_do_tso && 20064 (len > segsiz) && 20065 (tp->t_port == 0)) 20066 tso = 1; 20067 again: 20068 #ifdef INET6 20069 if (MHLEN < hdrlen + max_linkhdr) 20070 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 20071 else 20072 #endif 20073 m = m_gethdr(M_NOWAIT, MT_DATA); 20074 if (m == NULL) 20075 goto failed; 20076 m->m_data += max_linkhdr; 20077 m->m_len = hdrlen; 20078 th = rack->r_ctl.fsb.th; 20079 /* Establish the len to send */ 20080 if (len > max_val) 20081 len = max_val; 20082 if ((tso) && (len + optlen > segsiz)) { 20083 uint32_t if_hw_tsomax; 20084 int32_t max_len; 20085 20086 /* extract TSO information */ 20087 if_hw_tsomax = tp->t_tsomax; 20088 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 20089 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 20090 /* 20091 * Check if we should limit by maximum payload 20092 * length: 20093 */ 20094 if (if_hw_tsomax != 0) { 20095 /* compute maximum TSO length */ 20096 max_len = (if_hw_tsomax - hdrlen - 20097 max_linkhdr); 20098 if (max_len <= 0) { 20099 goto failed; 20100 } else if (len > max_len) { 20101 len = max_len; 20102 } 20103 } 20104 if (len <= segsiz) { 20105 /* 20106 * In case there are too many small fragments don't 20107 * use TSO: 20108 */ 20109 tso = 0; 20110 } 20111 } else { 20112 tso = 0; 20113 } 20114 if ((tso == 0) && (len > segsiz)) 20115 len = segsiz; 20116 (void)tcp_get_usecs(tv); 20117 if ((len == 0) || 20118 (len <= MHLEN - hdrlen - max_linkhdr)) { 20119 goto failed; 20120 } 20121 sb_offset = tp->snd_max - tp->snd_una; 20122 th->th_seq = htonl(tp->snd_max); 20123 th->th_ack = htonl(tp->rcv_nxt); 20124 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 20125 if (th->th_win == 0) { 20126 tp->t_sndzerowin++; 20127 tp->t_flags |= TF_RXWIN0SENT; 20128 } else 20129 tp->t_flags &= ~TF_RXWIN0SENT; 20130 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 20131 KMOD_TCPSTAT_INC(tcps_sndpack); 20132 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 20133 #ifdef STATS 20134 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 20135 len); 20136 #endif 20137 if (rack->r_ctl.fsb.m == NULL) 20138 goto failed; 20139 20140 /* s_mb and s_soff are saved for rack_log_output */ 20141 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 20142 &s_mb, &s_soff); 20143 if (len <= segsiz) { 20144 /* 20145 * Must have ran out of mbufs for the copy 20146 * shorten it to no longer need tso. Lets 20147 * not put on sendalot since we are low on 20148 * mbufs. 20149 */ 20150 tso = 0; 20151 } 20152 if (rack->r_ctl.fsb.rfo_apply_push && 20153 (len == rack->r_ctl.fsb.left_to_send)) { 20154 tcp_set_flags(th, flags | TH_PUSH); 20155 add_flag |= RACK_HAD_PUSH; 20156 } 20157 if ((m->m_next == NULL) || (len <= 0)){ 20158 goto failed; 20159 } 20160 if (udp) { 20161 if (rack->r_is_v6) 20162 ulen = hdrlen + len - sizeof(struct ip6_hdr); 20163 else 20164 ulen = hdrlen + len - sizeof(struct ip); 20165 udp->uh_ulen = htons(ulen); 20166 } 20167 m->m_pkthdr.rcvif = (struct ifnet *)0; 20168 if (TCPS_HAVERCVDSYN(tp->t_state) && 20169 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 20170 int ect = tcp_ecn_output_established(tp, &flags, len, false); 20171 if ((tp->t_state == TCPS_SYN_RECEIVED) && 20172 (tp->t_flags2 & TF2_ECN_SND_ECE)) 20173 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 20174 #ifdef INET6 20175 if (rack->r_is_v6) { 20176 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 20177 ip6->ip6_flow |= htonl(ect << 20); 20178 } 20179 else 20180 #endif 20181 { 20182 #ifdef INET 20183 ip->ip_tos &= ~IPTOS_ECN_MASK; 20184 ip->ip_tos |= ect; 20185 #endif 20186 } 20187 } 20188 tcp_set_flags(th, flags); 20189 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 20190 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20191 if (to.to_flags & TOF_SIGNATURE) { 20192 /* 20193 * Calculate MD5 signature and put it into the place 20194 * determined before. 20195 * NOTE: since TCP options buffer doesn't point into 20196 * mbuf's data, calculate offset and use it. 20197 */ 20198 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 20199 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 20200 /* 20201 * Do not send segment if the calculation of MD5 20202 * digest has failed. 20203 */ 20204 goto failed; 20205 } 20206 } 20207 #endif 20208 #ifdef INET6 20209 if (rack->r_is_v6) { 20210 if (tp->t_port) { 20211 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 20212 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20213 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 20214 th->th_sum = htons(0); 20215 UDPSTAT_INC(udps_opackets); 20216 } else { 20217 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 20218 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20219 th->th_sum = in6_cksum_pseudo(ip6, 20220 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 20221 0); 20222 } 20223 } 20224 #endif 20225 #if defined(INET6) && defined(INET) 20226 else 20227 #endif 20228 #ifdef INET 20229 { 20230 if (tp->t_port) { 20231 m->m_pkthdr.csum_flags = CSUM_UDP; 20232 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20233 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 20234 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 20235 th->th_sum = htons(0); 20236 UDPSTAT_INC(udps_opackets); 20237 } else { 20238 m->m_pkthdr.csum_flags = CSUM_TCP; 20239 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20240 th->th_sum = in_pseudo(ip->ip_src.s_addr, 20241 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 20242 IPPROTO_TCP + len + optlen)); 20243 } 20244 /* IP version must be set here for ipv4/ipv6 checking later */ 20245 KASSERT(ip->ip_v == IPVERSION, 20246 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 20247 } 20248 #endif 20249 if (tso) { 20250 /* 20251 * Here we use segsiz since we have no added options besides 20252 * any standard timestamp options (no DSACKs or SACKS are sent 20253 * via either fast-path). 20254 */ 20255 KASSERT(len > segsiz, 20256 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 20257 m->m_pkthdr.csum_flags |= CSUM_TSO; 20258 m->m_pkthdr.tso_segsz = segsiz; 20259 } 20260 #ifdef INET6 20261 if (rack->r_is_v6) { 20262 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 20263 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 20264 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 20265 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20266 else 20267 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20268 } 20269 #endif 20270 #if defined(INET) && defined(INET6) 20271 else 20272 #endif 20273 #ifdef INET 20274 { 20275 ip->ip_len = htons(m->m_pkthdr.len); 20276 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 20277 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 20278 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20279 if (tp->t_port == 0 || len < V_tcp_minmss) { 20280 ip->ip_off |= htons(IP_DF); 20281 } 20282 } else { 20283 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20284 } 20285 } 20286 #endif 20287 if (tp->snd_cwnd > tp->snd_ssthresh) { 20288 /* Set we sent in CA */ 20289 rack->rc_gp_saw_ca = 1; 20290 } else { 20291 /* Set we sent in SS */ 20292 rack->rc_gp_saw_ss = 1; 20293 } 20294 /* Time to copy in our header */ 20295 cpto = mtod(m, uint8_t *); 20296 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 20297 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 20298 if (optlen) { 20299 bcopy(opt, th + 1, optlen); 20300 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 20301 } else { 20302 th->th_off = sizeof(struct tcphdr) >> 2; 20303 } 20304 if ((rack->r_ctl.crte != NULL) && 20305 tcp_bblogging_on(tp)) { 20306 rack_log_queue_level(tp, rack, len, tv, cts); 20307 } 20308 if (tcp_bblogging_on(rack->rc_tp)) { 20309 union tcp_log_stackspecific log; 20310 20311 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 20312 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 20313 if (rack->rack_no_prr) 20314 log.u_bbr.flex1 = 0; 20315 else 20316 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 20317 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 20318 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 20319 log.u_bbr.flex4 = max_val; 20320 /* Save off the early/late values */ 20321 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 20322 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 20323 log.u_bbr.bw_inuse = rack_get_bw(rack); 20324 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 20325 log.u_bbr.flex8 = 0; 20326 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 20327 log.u_bbr.flex7 = 44; 20328 log.u_bbr.pkts_out = tp->t_maxseg; 20329 log.u_bbr.timeStamp = cts; 20330 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20331 log.u_bbr.flex5 = log.u_bbr.inflight; 20332 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 20333 log.u_bbr.delivered = 0; 20334 log.u_bbr.rttProp = 0; 20335 log.u_bbr.delRate = rack->r_must_retran; 20336 log.u_bbr.delRate <<= 1; 20337 log.u_bbr.pkt_epoch = __LINE__; 20338 /* For fast output no retrans so just inflight and how many mss we send */ 20339 log.u_bbr.flex5 = log.u_bbr.inflight; 20340 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 20341 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 20342 len, &log, false, NULL, __func__, __LINE__, tv); 20343 } else 20344 lgb = NULL; 20345 #ifdef INET6 20346 if (rack->r_is_v6) { 20347 error = ip6_output(m, inp->in6p_outputopts, 20348 &inp->inp_route6, 20349 0, NULL, NULL, inp); 20350 } 20351 #endif 20352 #if defined(INET) && defined(INET6) 20353 else 20354 #endif 20355 #ifdef INET 20356 { 20357 error = ip_output(m, NULL, 20358 &inp->inp_route, 20359 0, 0, inp); 20360 } 20361 #endif 20362 if (lgb) { 20363 lgb->tlb_errno = error; 20364 lgb = NULL; 20365 } 20366 if (error) { 20367 *send_err = error; 20368 m = NULL; 20369 goto failed; 20370 } else if (rack->rc_hw_nobuf) { 20371 rack->rc_hw_nobuf = 0; 20372 rack->r_ctl.rc_agg_delayed = 0; 20373 rack->r_early = 0; 20374 rack->r_late = 0; 20375 rack->r_ctl.rc_agg_early = 0; 20376 } 20377 if ((error == 0) && (rack->lt_bw_up == 0)) { 20378 /* Unlikely */ 20379 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 20380 rack->r_ctl.lt_seq = tp->snd_una; 20381 rack->lt_bw_up = 1; 20382 } else if ((error == 0) && 20383 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 20384 /* 20385 * Need to record what we have since we are 20386 * approaching seq wrap. 20387 */ 20388 struct timeval tv; 20389 uint64_t tmark; 20390 20391 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 20392 rack->r_ctl.lt_seq = tp->snd_una; 20393 tmark = tcp_get_u64_usecs(&tv); 20394 if (tmark > rack->r_ctl.lt_timemark) { 20395 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 20396 rack->r_ctl.lt_timemark = tmark; 20397 } 20398 } 20399 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 20400 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 20401 m = NULL; 20402 if (tp->snd_una == tp->snd_max) { 20403 rack->r_ctl.rc_tlp_rxt_last_time = cts; 20404 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 20405 tp->t_acktime = ticks; 20406 } 20407 counter_u64_add(rack_total_bytes, len); 20408 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 20409 20410 rack->forced_ack = 0; /* If we send something zap the FA flag */ 20411 tot_len += len; 20412 if ((tp->t_flags & TF_GPUTINPROG) == 0) 20413 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 20414 tp->snd_max += len; 20415 tp->snd_nxt = tp->snd_max; 20416 if (rack->rc_new_rnd_needed) { 20417 rack_new_round_starts(tp, rack, tp->snd_max); 20418 } 20419 { 20420 int idx; 20421 20422 idx = (len / segsiz) + 3; 20423 if (idx >= TCP_MSS_ACCT_ATIMER) 20424 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 20425 else 20426 counter_u64_add(rack_out_size[idx], 1); 20427 } 20428 if (len <= rack->r_ctl.fsb.left_to_send) 20429 rack->r_ctl.fsb.left_to_send -= len; 20430 else 20431 rack->r_ctl.fsb.left_to_send = 0; 20432 if (rack->r_ctl.fsb.left_to_send < segsiz) { 20433 rack->r_fast_output = 0; 20434 rack->r_ctl.fsb.left_to_send = 0; 20435 /* At the end of fast_output scale up the sb */ 20436 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 20437 rack_sndbuf_autoscale(rack); 20438 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 20439 } 20440 if (tp->t_rtttime == 0) { 20441 tp->t_rtttime = ticks; 20442 tp->t_rtseq = startseq; 20443 KMOD_TCPSTAT_INC(tcps_segstimed); 20444 } 20445 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 20446 (max_val > len) && 20447 (tso == 0)) { 20448 max_val -= len; 20449 len = segsiz; 20450 th = rack->r_ctl.fsb.th; 20451 #ifdef TCP_ACCOUNTING 20452 cnt_thru++; 20453 #endif 20454 goto again; 20455 } 20456 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 20457 counter_u64_add(rack_fto_send, 1); 20458 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__); 20459 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 20460 #ifdef TCP_ACCOUNTING 20461 crtsc = get_cyclecount(); 20462 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20463 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 20464 } 20465 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20466 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 20467 } 20468 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20469 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 20470 } 20471 sched_unpin(); 20472 #endif 20473 return (0); 20474 failed: 20475 if (m) 20476 m_free(m); 20477 rack->r_fast_output = 0; 20478 return (-1); 20479 } 20480 20481 static inline void 20482 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 20483 struct sockbuf *sb, 20484 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 20485 bool hw_tls, 20486 uint16_t flags) 20487 { 20488 rack->r_fast_output = 1; 20489 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 20490 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 20491 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 20492 rack->r_ctl.fsb.tcp_flags = flags; 20493 rack->r_ctl.fsb.left_to_send = orig_len - len; 20494 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 20495 /* Less than a full sized pace, lets not */ 20496 rack->r_fast_output = 0; 20497 return; 20498 } else { 20499 /* Round down to the nearest pace_max_seg */ 20500 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 20501 } 20502 if (hw_tls) 20503 rack->r_ctl.fsb.hw_tls = 1; 20504 else 20505 rack->r_ctl.fsb.hw_tls = 0; 20506 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 20507 ("rack:%p left_to_send:%u sbavail:%u out:%u", 20508 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 20509 (tp->snd_max - tp->snd_una))); 20510 if (rack->r_ctl.fsb.left_to_send < segsiz) 20511 rack->r_fast_output = 0; 20512 else { 20513 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 20514 rack->r_ctl.fsb.rfo_apply_push = 1; 20515 else 20516 rack->r_ctl.fsb.rfo_apply_push = 0; 20517 } 20518 } 20519 20520 static uint32_t 20521 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 20522 { 20523 uint64_t min_time; 20524 uint32_t maxlen; 20525 20526 min_time = (uint64_t)get_hpts_min_sleep_time(); 20527 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 20528 maxlen = roundup(maxlen, segsiz); 20529 return (maxlen); 20530 } 20531 20532 static struct rack_sendmap * 20533 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 20534 { 20535 struct rack_sendmap *rsm = NULL; 20536 int thresh; 20537 20538 restart: 20539 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 20540 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 20541 /* Nothing, strange turn off validity */ 20542 rack->r_collapse_point_valid = 0; 20543 return (NULL); 20544 } 20545 /* Can we send it yet? */ 20546 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 20547 /* 20548 * Receiver window has not grown enough for 20549 * the segment to be put on the wire. 20550 */ 20551 return (NULL); 20552 } 20553 if (rsm->r_flags & RACK_ACKED) { 20554 /* 20555 * It has been sacked, lets move to the 20556 * next one if possible. 20557 */ 20558 rack->r_ctl.last_collapse_point = rsm->r_end; 20559 /* Are we done? */ 20560 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20561 rack->r_ctl.high_collapse_point)) { 20562 rack->r_collapse_point_valid = 0; 20563 return (NULL); 20564 } 20565 goto restart; 20566 } 20567 /* Now has it been long enough ? */ 20568 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 20569 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 20570 rack_log_collapse(rack, rsm->r_start, 20571 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 20572 thresh, __LINE__, 6, rsm->r_flags, rsm); 20573 return (rsm); 20574 } 20575 /* Not enough time */ 20576 rack_log_collapse(rack, rsm->r_start, 20577 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 20578 thresh, __LINE__, 7, rsm->r_flags, rsm); 20579 return (NULL); 20580 } 20581 20582 static void 20583 rack_credit_back_policer_idle_time(struct tcp_rack *rack, uint64_t idle_t, int line) 20584 { 20585 /* 20586 * We were idle some time (idle_t) and so our policer bucket 20587 * needs to grow. It can go no higher than policer_bucket_size. 20588 */ 20589 uint64_t len; 20590 20591 len = idle_t * rack->r_ctl.policer_bw; 20592 len /= HPTS_USEC_IN_SEC; 20593 rack->r_ctl.current_policer_bucket += (uint32_t)len; 20594 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.current_policer_bucket) { 20595 rack->r_ctl.current_policer_bucket = rack->r_ctl.policer_bucket_size; 20596 } 20597 if (rack_verbose_logging > 0) 20598 policer_detection_log(rack, (uint32_t)len, line, (uint32_t)idle_t, 0, 7); 20599 } 20600 20601 static inline void 20602 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 20603 { 20604 if ((rack->full_size_rxt == 0) && 20605 (rack->shape_rxt_to_pacing_min == 0) && 20606 (*len >= segsiz)) { 20607 *len = segsiz; 20608 } else if (rack->shape_rxt_to_pacing_min && 20609 rack->gp_ready) { 20610 /* We use pacing min as shaping len req */ 20611 uint32_t maxlen; 20612 20613 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20614 if (*len > maxlen) 20615 *len = maxlen; 20616 } else { 20617 /* 20618 * The else is full_size_rxt is on so send it all 20619 * note we do need to check this for exceeding 20620 * our max segment size due to the fact that 20621 * we do sometimes merge chunks together i.e. 20622 * we cannot just assume that we will never have 20623 * a chunk greater than pace_max_seg 20624 */ 20625 if (*len > pace_max_seg) 20626 *len = pace_max_seg; 20627 } 20628 } 20629 20630 static int 20631 rack_output(struct tcpcb *tp) 20632 { 20633 struct socket *so; 20634 uint32_t recwin; 20635 uint32_t sb_offset, s_moff = 0; 20636 int32_t len, error = 0; 20637 uint16_t flags; 20638 struct mbuf *m, *s_mb = NULL; 20639 struct mbuf *mb; 20640 uint32_t if_hw_tsomaxsegcount = 0; 20641 uint32_t if_hw_tsomaxsegsize; 20642 int32_t segsiz, minseg; 20643 long tot_len_this_send = 0; 20644 #ifdef INET 20645 struct ip *ip = NULL; 20646 #endif 20647 struct udphdr *udp = NULL; 20648 struct tcp_rack *rack; 20649 struct tcphdr *th; 20650 uint8_t pass = 0; 20651 uint8_t mark = 0; 20652 uint8_t check_done = 0; 20653 uint8_t wanted_cookie = 0; 20654 u_char opt[TCP_MAXOLEN]; 20655 unsigned ipoptlen, optlen, hdrlen, ulen=0; 20656 uint32_t rack_seq; 20657 20658 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20659 unsigned ipsec_optlen = 0; 20660 20661 #endif 20662 int32_t idle, sendalot; 20663 uint32_t tot_idle; 20664 int32_t sub_from_prr = 0; 20665 volatile int32_t sack_rxmit; 20666 struct rack_sendmap *rsm = NULL; 20667 int32_t tso, mtu; 20668 struct tcpopt to; 20669 int32_t slot = 0; 20670 int32_t sup_rack = 0; 20671 uint32_t cts, ms_cts, delayed, early; 20672 uint32_t add_flag = RACK_SENT_SP; 20673 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 20674 uint8_t doing_tlp = 0; 20675 uint32_t cwnd_to_use, pace_max_seg; 20676 int32_t do_a_prefetch = 0; 20677 int32_t prefetch_rsm = 0; 20678 int32_t orig_len = 0; 20679 struct timeval tv; 20680 int32_t prefetch_so_done = 0; 20681 struct tcp_log_buffer *lgb; 20682 struct inpcb *inp = tptoinpcb(tp); 20683 struct sockbuf *sb; 20684 uint64_t ts_val = 0; 20685 #ifdef TCP_ACCOUNTING 20686 uint64_t crtsc; 20687 #endif 20688 #ifdef INET6 20689 struct ip6_hdr *ip6 = NULL; 20690 int32_t isipv6; 20691 #endif 20692 bool hpts_calling, hw_tls = false; 20693 20694 NET_EPOCH_ASSERT(); 20695 INP_WLOCK_ASSERT(inp); 20696 20697 /* setup and take the cache hits here */ 20698 rack = (struct tcp_rack *)tp->t_fb_ptr; 20699 #ifdef TCP_ACCOUNTING 20700 sched_pin(); 20701 ts_val = get_cyclecount(); 20702 #endif 20703 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 20704 tp->t_flags2 &= ~TF2_HPTS_CALLS; 20705 #ifdef TCP_OFFLOAD 20706 if (tp->t_flags & TF_TOE) { 20707 #ifdef TCP_ACCOUNTING 20708 sched_unpin(); 20709 #endif 20710 return (tcp_offload_output(tp)); 20711 } 20712 #endif 20713 if (rack->rack_deferred_inited == 0) { 20714 /* 20715 * If we are the connecting socket we will 20716 * hit rack_init() when no sequence numbers 20717 * are setup. This makes it so we must defer 20718 * some initialization. Call that now. 20719 */ 20720 rack_deferred_init(tp, rack); 20721 } 20722 /* 20723 * For TFO connections in SYN_RECEIVED, only allow the initial 20724 * SYN|ACK and those sent by the retransmit timer. 20725 */ 20726 if ((tp->t_flags & TF_FASTOPEN) && 20727 (tp->t_state == TCPS_SYN_RECEIVED) && 20728 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 20729 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 20730 #ifdef TCP_ACCOUNTING 20731 sched_unpin(); 20732 #endif 20733 return (0); 20734 } 20735 #ifdef INET6 20736 if (rack->r_state) { 20737 /* Use the cache line loaded if possible */ 20738 isipv6 = rack->r_is_v6; 20739 } else { 20740 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 20741 } 20742 #endif 20743 early = 0; 20744 cts = tcp_get_usecs(&tv); 20745 ms_cts = tcp_tv_to_mssectick(&tv); 20746 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 20747 tcp_in_hpts(rack->rc_tp)) { 20748 /* 20749 * We are on the hpts for some timer but not hptsi output. 20750 * Remove from the hpts unconditionally. 20751 */ 20752 rack_timer_cancel(tp, rack, cts, __LINE__); 20753 } 20754 /* Are we pacing and late? */ 20755 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20756 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 20757 /* We are delayed */ 20758 delayed = cts - rack->r_ctl.rc_last_output_to; 20759 } else { 20760 delayed = 0; 20761 } 20762 /* Do the timers, which may override the pacer */ 20763 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 20764 int retval; 20765 20766 retval = rack_process_timers(tp, rack, cts, hpts_calling, 20767 &doing_tlp); 20768 if (retval != 0) { 20769 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 20770 #ifdef TCP_ACCOUNTING 20771 sched_unpin(); 20772 #endif 20773 /* 20774 * If timers want tcp_drop(), then pass error out, 20775 * otherwise suppress it. 20776 */ 20777 return (retval < 0 ? retval : 0); 20778 } 20779 } 20780 if (rack->rc_in_persist) { 20781 if (tcp_in_hpts(rack->rc_tp) == 0) { 20782 /* Timer is not running */ 20783 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20784 } 20785 #ifdef TCP_ACCOUNTING 20786 sched_unpin(); 20787 #endif 20788 return (0); 20789 } 20790 if ((rack->rc_ack_required == 1) && 20791 (rack->r_timer_override == 0)){ 20792 /* A timeout occurred and no ack has arrived */ 20793 if (tcp_in_hpts(rack->rc_tp) == 0) { 20794 /* Timer is not running */ 20795 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20796 } 20797 #ifdef TCP_ACCOUNTING 20798 sched_unpin(); 20799 #endif 20800 return (0); 20801 } 20802 if ((rack->r_timer_override) || 20803 (rack->rc_ack_can_sendout_data) || 20804 (delayed) || 20805 (tp->t_state < TCPS_ESTABLISHED)) { 20806 rack->rc_ack_can_sendout_data = 0; 20807 if (tcp_in_hpts(rack->rc_tp)) 20808 tcp_hpts_remove(rack->rc_tp); 20809 } else if (tcp_in_hpts(rack->rc_tp)) { 20810 /* 20811 * On the hpts you can't pass even if ACKNOW is on, we will 20812 * when the hpts fires. 20813 */ 20814 #ifdef TCP_ACCOUNTING 20815 crtsc = get_cyclecount(); 20816 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20817 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 20818 } 20819 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20820 tp->tcp_cnt_counters[SND_BLOCKED]++; 20821 } 20822 sched_unpin(); 20823 #endif 20824 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 20825 return (0); 20826 } 20827 /* Finish out both pacing early and late accounting */ 20828 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20829 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 20830 early = rack->r_ctl.rc_last_output_to - cts; 20831 } else 20832 early = 0; 20833 if (delayed && (rack->rc_always_pace == 1)) { 20834 rack->r_ctl.rc_agg_delayed += delayed; 20835 rack->r_late = 1; 20836 } else if (early && (rack->rc_always_pace == 1)) { 20837 rack->r_ctl.rc_agg_early += early; 20838 rack->r_early = 1; 20839 } else if (rack->rc_always_pace == 0) { 20840 /* Non-paced we are not late */ 20841 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 20842 rack->r_early = rack->r_late = 0; 20843 } 20844 /* Now that early/late accounting is done turn off the flag */ 20845 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 20846 rack->r_wanted_output = 0; 20847 rack->r_timer_override = 0; 20848 if ((tp->t_state != rack->r_state) && 20849 TCPS_HAVEESTABLISHED(tp->t_state)) { 20850 rack_set_state(tp, rack); 20851 } 20852 if ((rack->r_fast_output) && 20853 (doing_tlp == 0) && 20854 (tp->rcv_numsacks == 0)) { 20855 int ret; 20856 20857 error = 0; 20858 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 20859 if (ret >= 0) 20860 return(ret); 20861 else if (error) { 20862 inp = rack->rc_inp; 20863 so = inp->inp_socket; 20864 sb = &so->so_snd; 20865 goto nomore; 20866 } 20867 } 20868 inp = rack->rc_inp; 20869 /* 20870 * For TFO connections in SYN_SENT or SYN_RECEIVED, 20871 * only allow the initial SYN or SYN|ACK and those sent 20872 * by the retransmit timer. 20873 */ 20874 if ((tp->t_flags & TF_FASTOPEN) && 20875 ((tp->t_state == TCPS_SYN_RECEIVED) || 20876 (tp->t_state == TCPS_SYN_SENT)) && 20877 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 20878 (tp->t_rxtshift == 0)) { /* not a retransmit */ 20879 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20880 so = inp->inp_socket; 20881 sb = &so->so_snd; 20882 goto just_return_nolock; 20883 } 20884 /* 20885 * Determine length of data that should be transmitted, and flags 20886 * that will be used. If there is some data or critical controls 20887 * (SYN, RST) to send, then transmit; otherwise, investigate 20888 * further. 20889 */ 20890 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 20891 if (tp->t_idle_reduce) { 20892 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 20893 rack_cc_after_idle(rack, tp); 20894 } 20895 tp->t_flags &= ~TF_LASTIDLE; 20896 if (idle) { 20897 if (tp->t_flags & TF_MORETOCOME) { 20898 tp->t_flags |= TF_LASTIDLE; 20899 idle = 0; 20900 } 20901 } 20902 if ((tp->snd_una == tp->snd_max) && 20903 rack->r_ctl.rc_went_idle_time && 20904 (cts > rack->r_ctl.rc_went_idle_time)) { 20905 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 20906 if (tot_idle > rack_min_probertt_hold) { 20907 /* Count as a probe rtt */ 20908 if (rack->in_probe_rtt == 0) { 20909 rack->r_ctl.rc_lower_rtt_us_cts = cts; 20910 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 20911 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 20912 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 20913 } else { 20914 rack_exit_probertt(rack, cts); 20915 } 20916 } 20917 } 20918 if(rack->policer_detect_on) { 20919 /* 20920 * If we are doing policer detetion we at a minium 20921 * record the time but if possible add back to 20922 * the bucket based on the idle time. 20923 */ 20924 uint64_t idle_t, u64_cts; 20925 20926 segsiz = min(ctf_fixed_maxseg(tp), 20927 rack->r_ctl.rc_pace_min_segs); 20928 u64_cts = tcp_tv_to_lusectick(&tv); 20929 if ((rack->rc_policer_detected == 1) && 20930 (rack->r_ctl.policer_bucket_size > segsiz) && 20931 (rack->r_ctl.policer_bw > 0) && 20932 (u64_cts > rack->r_ctl.last_sendtime)) { 20933 /* We are being policed add back the time */ 20934 idle_t = u64_cts - rack->r_ctl.last_sendtime; 20935 rack_credit_back_policer_idle_time(rack, idle_t, __LINE__); 20936 } 20937 rack->r_ctl.last_sendtime = u64_cts; 20938 } 20939 if (rack_use_fsb && 20940 (rack->r_ctl.fsb.tcp_ip_hdr) && 20941 (rack->r_fsb_inited == 0) && 20942 (rack->r_state != TCPS_CLOSED)) 20943 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 20944 if (rack->rc_sendvars_notset == 1) { 20945 rack->r_ctl.idle_snd_una = tp->snd_una; 20946 rack->rc_sendvars_notset = 0; 20947 /* 20948 * Make sure any TCP timers (keep-alive) is not running. 20949 */ 20950 tcp_timer_stop(tp); 20951 } 20952 if ((rack->rack_no_prr == 1) && 20953 (rack->rc_always_pace == 0)) { 20954 /* 20955 * Sanity check before sending, if we have 20956 * no-pacing enabled and prr is turned off that 20957 * is a logistics error. Correct this by turnning 20958 * prr back on. A user *must* set some form of 20959 * pacing in order to turn PRR off. We do this 20960 * in the output path so that we can avoid socket 20961 * option ordering issues that would occur if we 20962 * tried to do it while setting rack_no_prr on. 20963 */ 20964 rack->rack_no_prr = 0; 20965 } 20966 if ((rack->pcm_enabled == 1) && 20967 (rack->pcm_needed == 0) && 20968 (tot_idle > 0)) { 20969 /* 20970 * We have been idle some micro seconds. We need 20971 * to factor this in to see if a PCM is needed. 20972 */ 20973 uint32_t rtts_idle, rnds; 20974 20975 if (tp->t_srtt) 20976 rtts_idle = tot_idle / tp->t_srtt; 20977 else 20978 rtts_idle = 0; 20979 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 20980 rack->r_ctl.pcm_idle_rounds += rtts_idle; 20981 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 20982 rack->pcm_needed = 1; 20983 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 20984 } 20985 } 20986 again: 20987 sendalot = 0; 20988 cts = tcp_get_usecs(&tv); 20989 ms_cts = tcp_tv_to_mssectick(&tv); 20990 tso = 0; 20991 mtu = 0; 20992 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20993 minseg = segsiz; 20994 if (rack->r_ctl.rc_pace_max_segs == 0) 20995 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 20996 else 20997 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 20998 if (TCPS_HAVEESTABLISHED(tp->t_state) && 20999 (rack->r_ctl.pcm_max_seg == 0)) { 21000 /* 21001 * We set in our first send so we know that the ctf_fixed_maxseg 21002 * has been fully set. If we do it in rack_init() we most likely 21003 * see 512 bytes so we end up at 5120, not desirable. 21004 */ 21005 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 21006 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 21007 /* 21008 * Assure our initial PCM probe is at least 10 MSS. 21009 */ 21010 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 21011 } 21012 } 21013 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 21014 uint32_t rw_avail, cwa; 21015 21016 if (tp->snd_wnd > ctf_outstanding(tp)) 21017 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 21018 else 21019 rw_avail = 0; 21020 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 21021 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 21022 else 21023 cwa = 0; 21024 if ((cwa >= rack->r_ctl.pcm_max_seg) && 21025 (rw_avail > rack->r_ctl.pcm_max_seg)) { 21026 /* Raise up the max seg for this trip through */ 21027 pace_max_seg = rack->r_ctl.pcm_max_seg; 21028 /* Disable any fast output */ 21029 rack->r_fast_output = 0; 21030 } 21031 if (rack_verbose_logging) { 21032 rack_log_pcm(rack, 4, 21033 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 21034 } 21035 } 21036 sb_offset = tp->snd_max - tp->snd_una; 21037 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 21038 flags = tcp_outflags[tp->t_state]; 21039 while (rack->rc_free_cnt < rack_free_cache) { 21040 rsm = rack_alloc(rack); 21041 if (rsm == NULL) { 21042 if (hpts_calling) 21043 /* Retry in a ms */ 21044 slot = (1 * HPTS_USEC_IN_MSEC); 21045 so = inp->inp_socket; 21046 sb = &so->so_snd; 21047 goto just_return_nolock; 21048 } 21049 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 21050 rack->rc_free_cnt++; 21051 rsm = NULL; 21052 } 21053 sack_rxmit = 0; 21054 len = 0; 21055 rsm = NULL; 21056 if (flags & TH_RST) { 21057 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 21058 so = inp->inp_socket; 21059 sb = &so->so_snd; 21060 goto send; 21061 } 21062 if (rack->r_ctl.rc_resend) { 21063 /* Retransmit timer */ 21064 rsm = rack->r_ctl.rc_resend; 21065 rack->r_ctl.rc_resend = NULL; 21066 len = rsm->r_end - rsm->r_start; 21067 sack_rxmit = 1; 21068 sendalot = 0; 21069 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21070 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21071 __func__, __LINE__, 21072 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21073 sb_offset = rsm->r_start - tp->snd_una; 21074 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21075 } else if (rack->r_collapse_point_valid && 21076 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 21077 /* 21078 * If an RSM is returned then enough time has passed 21079 * for us to retransmit it. Move up the collapse point, 21080 * since this rsm has its chance to retransmit now. 21081 */ 21082 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 21083 rack->r_ctl.last_collapse_point = rsm->r_end; 21084 /* Are we done? */ 21085 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 21086 rack->r_ctl.high_collapse_point)) 21087 rack->r_collapse_point_valid = 0; 21088 sack_rxmit = 1; 21089 /* We are not doing a TLP */ 21090 doing_tlp = 0; 21091 len = rsm->r_end - rsm->r_start; 21092 sb_offset = rsm->r_start - tp->snd_una; 21093 sendalot = 0; 21094 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21095 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 21096 /* We have a retransmit that takes precedence */ 21097 if ((!IN_FASTRECOVERY(tp->t_flags)) && 21098 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 21099 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 21100 /* Enter recovery if not induced by a time-out */ 21101 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 21102 } 21103 #ifdef INVARIANTS 21104 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 21105 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 21106 tp, rack, rsm, rsm->r_start, tp->snd_una); 21107 } 21108 #endif 21109 len = rsm->r_end - rsm->r_start; 21110 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21111 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21112 __func__, __LINE__, 21113 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21114 sb_offset = rsm->r_start - tp->snd_una; 21115 sendalot = 0; 21116 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21117 if (len > 0) { 21118 sack_rxmit = 1; 21119 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 21120 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 21121 min(len, segsiz)); 21122 } 21123 } else if (rack->r_ctl.rc_tlpsend) { 21124 /* Tail loss probe */ 21125 long cwin; 21126 long tlen; 21127 21128 /* 21129 * Check if we can do a TLP with a RACK'd packet 21130 * this can happen if we are not doing the rack 21131 * cheat and we skipped to a TLP and it 21132 * went off. 21133 */ 21134 rsm = rack->r_ctl.rc_tlpsend; 21135 /* We are doing a TLP make sure the flag is preent */ 21136 rsm->r_flags |= RACK_TLP; 21137 rack->r_ctl.rc_tlpsend = NULL; 21138 sack_rxmit = 1; 21139 tlen = rsm->r_end - rsm->r_start; 21140 if (tlen > segsiz) 21141 tlen = segsiz; 21142 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21143 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21144 __func__, __LINE__, 21145 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21146 sb_offset = rsm->r_start - tp->snd_una; 21147 cwin = min(tp->snd_wnd, tlen); 21148 len = cwin; 21149 } 21150 if (rack->r_must_retran && 21151 (doing_tlp == 0) && 21152 (SEQ_GT(tp->snd_max, tp->snd_una)) && 21153 (rsm == NULL)) { 21154 /* 21155 * There are two different ways that we 21156 * can get into this block: 21157 * a) This is a non-sack connection, we had a time-out 21158 * and thus r_must_retran was set and everything 21159 * left outstanding as been marked for retransmit. 21160 * b) The MTU of the path shrank, so that everything 21161 * was marked to be retransmitted with the smaller 21162 * mtu and r_must_retran was set. 21163 * 21164 * This means that we expect the sendmap (outstanding) 21165 * to all be marked must. We can use the tmap to 21166 * look at them. 21167 * 21168 */ 21169 int sendwin, flight; 21170 21171 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 21172 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 21173 if (flight >= sendwin) { 21174 /* 21175 * We can't send yet. 21176 */ 21177 so = inp->inp_socket; 21178 sb = &so->so_snd; 21179 goto just_return_nolock; 21180 } 21181 /* 21182 * This is the case a/b mentioned above. All 21183 * outstanding/not-acked should be marked. 21184 * We can use the tmap to find them. 21185 */ 21186 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 21187 if (rsm == NULL) { 21188 /* TSNH */ 21189 rack->r_must_retran = 0; 21190 rack->r_ctl.rc_out_at_rto = 0; 21191 so = inp->inp_socket; 21192 sb = &so->so_snd; 21193 goto just_return_nolock; 21194 } 21195 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 21196 /* 21197 * The first one does not have the flag, did we collapse 21198 * further up in our list? 21199 */ 21200 rack->r_must_retran = 0; 21201 rack->r_ctl.rc_out_at_rto = 0; 21202 rsm = NULL; 21203 sack_rxmit = 0; 21204 } else { 21205 sack_rxmit = 1; 21206 len = rsm->r_end - rsm->r_start; 21207 sb_offset = rsm->r_start - tp->snd_una; 21208 sendalot = 0; 21209 if ((rack->full_size_rxt == 0) && 21210 (rack->shape_rxt_to_pacing_min == 0) && 21211 (len >= segsiz)) 21212 len = segsiz; 21213 else if (rack->shape_rxt_to_pacing_min && 21214 rack->gp_ready) { 21215 /* We use pacing min as shaping len req */ 21216 uint32_t maxlen; 21217 21218 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 21219 if (len > maxlen) 21220 len = maxlen; 21221 } 21222 /* 21223 * Delay removing the flag RACK_MUST_RXT so 21224 * that the fastpath for retransmit will 21225 * work with this rsm. 21226 */ 21227 } 21228 } 21229 /* 21230 * Enforce a connection sendmap count limit if set 21231 * as long as we are not retransmiting. 21232 */ 21233 if ((rsm == NULL) && 21234 (V_tcp_map_entries_limit > 0) && 21235 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 21236 counter_u64_add(rack_to_alloc_limited, 1); 21237 if (!rack->alloc_limit_reported) { 21238 rack->alloc_limit_reported = 1; 21239 counter_u64_add(rack_alloc_limited_conns, 1); 21240 } 21241 so = inp->inp_socket; 21242 sb = &so->so_snd; 21243 goto just_return_nolock; 21244 } 21245 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 21246 /* we are retransmitting the fin */ 21247 len--; 21248 if (len) { 21249 /* 21250 * When retransmitting data do *not* include the 21251 * FIN. This could happen from a TLP probe. 21252 */ 21253 flags &= ~TH_FIN; 21254 } 21255 } 21256 if (rsm && rack->r_fsb_inited && 21257 rack_use_rsm_rfo && 21258 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 21259 int ret; 21260 21261 if ((rack->rc_policer_detected == 1) && 21262 (rack->r_ctl.policer_bucket_size > segsiz) && 21263 (rack->r_ctl.policer_bw > 0)) { 21264 /* Check to see if there is room */ 21265 if (rack->r_ctl.current_policer_bucket < len) { 21266 goto skip_fast_output; 21267 } 21268 } 21269 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 21270 if (ret == 0) 21271 return (0); 21272 } 21273 skip_fast_output: 21274 so = inp->inp_socket; 21275 sb = &so->so_snd; 21276 if (do_a_prefetch == 0) { 21277 kern_prefetch(sb, &do_a_prefetch); 21278 do_a_prefetch = 1; 21279 } 21280 #ifdef NETFLIX_SHARED_CWND 21281 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 21282 rack->rack_enable_scwnd) { 21283 /* We are doing cwnd sharing */ 21284 if (rack->gp_ready && 21285 (rack->rack_attempted_scwnd == 0) && 21286 (rack->r_ctl.rc_scw == NULL) && 21287 tp->t_lib) { 21288 /* The pcbid is in, lets make an attempt */ 21289 counter_u64_add(rack_try_scwnd, 1); 21290 rack->rack_attempted_scwnd = 1; 21291 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 21292 &rack->r_ctl.rc_scw_index, 21293 segsiz); 21294 } 21295 if (rack->r_ctl.rc_scw && 21296 (rack->rack_scwnd_is_idle == 1) && 21297 sbavail(&so->so_snd)) { 21298 /* we are no longer out of data */ 21299 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21300 rack->rack_scwnd_is_idle = 0; 21301 } 21302 if (rack->r_ctl.rc_scw) { 21303 /* First lets update and get the cwnd */ 21304 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 21305 rack->r_ctl.rc_scw_index, 21306 tp->snd_cwnd, tp->snd_wnd, segsiz); 21307 } 21308 } 21309 #endif 21310 /* 21311 * Get standard flags, and add SYN or FIN if requested by 'hidden' 21312 * state flags. 21313 */ 21314 if (tp->t_flags & TF_NEEDFIN) 21315 flags |= TH_FIN; 21316 if (tp->t_flags & TF_NEEDSYN) 21317 flags |= TH_SYN; 21318 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 21319 void *end_rsm; 21320 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 21321 if (end_rsm) 21322 kern_prefetch(end_rsm, &prefetch_rsm); 21323 prefetch_rsm = 1; 21324 } 21325 SOCKBUF_LOCK(sb); 21326 if ((sack_rxmit == 0) && 21327 (TCPS_HAVEESTABLISHED(tp->t_state) || 21328 (tp->t_flags & TF_FASTOPEN))) { 21329 /* 21330 * We are not retransmitting (sack_rxmit is 0) so we 21331 * are sending new data. This is always based on snd_max. 21332 * Now in theory snd_max may be equal to snd_una, if so 21333 * then nothing is outstanding and the offset would be 0. 21334 */ 21335 uint32_t avail; 21336 21337 avail = sbavail(sb); 21338 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 21339 sb_offset = tp->snd_max - tp->snd_una; 21340 else 21341 sb_offset = 0; 21342 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 21343 if (rack->r_ctl.rc_tlp_new_data) { 21344 /* TLP is forcing out new data */ 21345 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 21346 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 21347 } 21348 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 21349 if (tp->snd_wnd > sb_offset) 21350 len = tp->snd_wnd - sb_offset; 21351 else 21352 len = 0; 21353 } else { 21354 len = rack->r_ctl.rc_tlp_new_data; 21355 } 21356 rack->r_ctl.rc_tlp_new_data = 0; 21357 } else { 21358 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 21359 } 21360 if ((rack->r_ctl.crte == NULL) && 21361 IN_FASTRECOVERY(tp->t_flags) && 21362 (rack->full_size_rxt == 0) && 21363 (rack->shape_rxt_to_pacing_min == 0) && 21364 (len > segsiz)) { 21365 /* 21366 * For prr=off, we need to send only 1 MSS 21367 * at a time. We do this because another sack could 21368 * be arriving that causes us to send retransmits and 21369 * we don't want to be on a long pace due to a larger send 21370 * that keeps us from sending out the retransmit. 21371 */ 21372 len = segsiz; 21373 } else if (rack->shape_rxt_to_pacing_min && 21374 rack->gp_ready) { 21375 /* We use pacing min as shaping len req */ 21376 uint32_t maxlen; 21377 21378 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 21379 if (len > maxlen) 21380 len = maxlen; 21381 }/* The else is full_size_rxt is on so send it all */ 21382 } else { 21383 uint32_t outstanding; 21384 /* 21385 * We are inside of a Fast recovery episode, this 21386 * is caused by a SACK or 3 dup acks. At this point 21387 * we have sent all the retransmissions and we rely 21388 * on PRR to dictate what we will send in the form of 21389 * new data. 21390 */ 21391 21392 outstanding = tp->snd_max - tp->snd_una; 21393 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 21394 if (tp->snd_wnd > outstanding) { 21395 len = tp->snd_wnd - outstanding; 21396 /* Check to see if we have the data */ 21397 if ((sb_offset + len) > avail) { 21398 /* It does not all fit */ 21399 if (avail > sb_offset) 21400 len = avail - sb_offset; 21401 else 21402 len = 0; 21403 } 21404 } else { 21405 len = 0; 21406 } 21407 } else if (avail > sb_offset) { 21408 len = avail - sb_offset; 21409 } else { 21410 len = 0; 21411 } 21412 if (len > 0) { 21413 if (len > rack->r_ctl.rc_prr_sndcnt) { 21414 len = rack->r_ctl.rc_prr_sndcnt; 21415 } 21416 if (len > 0) { 21417 sub_from_prr = 1; 21418 } 21419 } 21420 if (len > segsiz) { 21421 /* 21422 * We should never send more than a MSS when 21423 * retransmitting or sending new data in prr 21424 * mode unless the override flag is on. Most 21425 * likely the PRR algorithm is not going to 21426 * let us send a lot as well :-) 21427 */ 21428 if (rack->r_ctl.rc_prr_sendalot == 0) { 21429 len = segsiz; 21430 } 21431 } else if (len < segsiz) { 21432 /* 21433 * Do we send any? The idea here is if the 21434 * send empty's the socket buffer we want to 21435 * do it. However if not then lets just wait 21436 * for our prr_sndcnt to get bigger. 21437 */ 21438 long leftinsb; 21439 21440 leftinsb = sbavail(sb) - sb_offset; 21441 if (leftinsb > len) { 21442 /* This send does not empty the sb */ 21443 len = 0; 21444 } 21445 } 21446 } 21447 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 21448 /* 21449 * If you have not established 21450 * and are not doing FAST OPEN 21451 * no data please. 21452 */ 21453 if ((sack_rxmit == 0) && 21454 !(tp->t_flags & TF_FASTOPEN)) { 21455 len = 0; 21456 sb_offset = 0; 21457 } 21458 } 21459 if (prefetch_so_done == 0) { 21460 kern_prefetch(so, &prefetch_so_done); 21461 prefetch_so_done = 1; 21462 } 21463 orig_len = len; 21464 if ((rack->rc_policer_detected == 1) && 21465 (rack->r_ctl.policer_bucket_size > segsiz) && 21466 (rack->r_ctl.policer_bw > 0) && 21467 (len > 0)) { 21468 /* 21469 * Ok we believe we have a policer watching 21470 * what we send, can we send len? If not can 21471 * we tune it down to a smaller value? 21472 */ 21473 uint32_t plen, buck_needs; 21474 21475 plen = rack_policer_check_send(rack, len, segsiz, &buck_needs); 21476 if (plen == 0) { 21477 /* 21478 * We are not allowed to send. How long 21479 * do we need to pace for i.e. how long 21480 * before len is available to send? 21481 */ 21482 uint64_t lentime; 21483 21484 lentime = buck_needs; 21485 lentime *= HPTS_USEC_IN_SEC; 21486 lentime /= rack->r_ctl.policer_bw; 21487 slot = (uint32_t)lentime; 21488 tot_len_this_send = 0; 21489 SOCKBUF_UNLOCK(sb); 21490 if (rack_verbose_logging > 0) 21491 policer_detection_log(rack, len, slot, buck_needs, 0, 12); 21492 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 21493 rack_log_type_just_return(rack, cts, 0, slot, hpts_calling, 0, cwnd_to_use); 21494 goto just_return_clean; 21495 } 21496 if (plen < len) { 21497 sendalot = 0; 21498 len = plen; 21499 } 21500 } 21501 /* 21502 * Lop off SYN bit if it has already been sent. However, if this is 21503 * SYN-SENT state and if segment contains data and if we don't know 21504 * that foreign host supports TAO, suppress sending segment. 21505 */ 21506 if ((flags & TH_SYN) && 21507 SEQ_GT(tp->snd_max, tp->snd_una) && 21508 ((sack_rxmit == 0) && 21509 (tp->t_rxtshift == 0))) { 21510 /* 21511 * When sending additional segments following a TFO SYN|ACK, 21512 * do not include the SYN bit. 21513 */ 21514 if ((tp->t_flags & TF_FASTOPEN) && 21515 (tp->t_state == TCPS_SYN_RECEIVED)) 21516 flags &= ~TH_SYN; 21517 } 21518 /* 21519 * Be careful not to send data and/or FIN on SYN segments. This 21520 * measure is needed to prevent interoperability problems with not 21521 * fully conformant TCP implementations. 21522 */ 21523 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 21524 len = 0; 21525 flags &= ~TH_FIN; 21526 } 21527 /* 21528 * On TFO sockets, ensure no data is sent in the following cases: 21529 * 21530 * - When retransmitting SYN|ACK on a passively-created socket 21531 * 21532 * - When retransmitting SYN on an actively created socket 21533 * 21534 * - When sending a zero-length cookie (cookie request) on an 21535 * actively created socket 21536 * 21537 * - When the socket is in the CLOSED state (RST is being sent) 21538 */ 21539 if ((tp->t_flags & TF_FASTOPEN) && 21540 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 21541 ((tp->t_state == TCPS_SYN_SENT) && 21542 (tp->t_tfo_client_cookie_len == 0)) || 21543 (flags & TH_RST))) { 21544 sack_rxmit = 0; 21545 len = 0; 21546 } 21547 /* Without fast-open there should never be data sent on a SYN */ 21548 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 21549 len = 0; 21550 } 21551 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 21552 /* We only send 1 MSS if we have a DSACK block */ 21553 add_flag |= RACK_SENT_W_DSACK; 21554 len = segsiz; 21555 } 21556 if (len <= 0) { 21557 /* 21558 * We have nothing to send, or the window shrank, or 21559 * is closed, do we need to go into persists? 21560 */ 21561 len = 0; 21562 if ((tp->snd_wnd == 0) && 21563 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21564 (tp->snd_una == tp->snd_max) && 21565 (sb_offset < (int)sbavail(sb))) { 21566 rack_enter_persist(tp, rack, cts, tp->snd_una); 21567 } 21568 } else if ((rsm == NULL) && 21569 (doing_tlp == 0) && 21570 (len < pace_max_seg)) { 21571 /* 21572 * We are not sending a maximum sized segment for 21573 * some reason. Should we not send anything (think 21574 * sws or persists)? 21575 */ 21576 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 21577 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21578 (len < minseg) && 21579 (len < (int)(sbavail(sb) - sb_offset))) { 21580 /* 21581 * Here the rwnd is less than 21582 * the minimum pacing size, this is not a retransmit, 21583 * we are established and 21584 * the send is not the last in the socket buffer 21585 * we send nothing, and we may enter persists 21586 * if nothing is outstanding. 21587 */ 21588 len = 0; 21589 if (tp->snd_max == tp->snd_una) { 21590 /* 21591 * Nothing out we can 21592 * go into persists. 21593 */ 21594 rack_enter_persist(tp, rack, cts, tp->snd_una); 21595 } 21596 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 21597 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 21598 (len < (int)(sbavail(sb) - sb_offset)) && 21599 (len < minseg)) { 21600 /* 21601 * Here we are not retransmitting, and 21602 * the cwnd is not so small that we could 21603 * not send at least a min size (rxt timer 21604 * not having gone off), We have 2 segments or 21605 * more already in flight, its not the tail end 21606 * of the socket buffer and the cwnd is blocking 21607 * us from sending out a minimum pacing segment size. 21608 * Lets not send anything. 21609 */ 21610 len = 0; 21611 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 21612 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 21613 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 21614 (len < (int)(sbavail(sb) - sb_offset)) && 21615 (TCPS_HAVEESTABLISHED(tp->t_state))) { 21616 /* 21617 * Here we have a send window but we have 21618 * filled it up and we can't send another pacing segment. 21619 * We also have in flight more than 2 segments 21620 * and we are not completing the sb i.e. we allow 21621 * the last bytes of the sb to go out even if 21622 * its not a full pacing segment. 21623 */ 21624 len = 0; 21625 } else if ((rack->r_ctl.crte != NULL) && 21626 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 21627 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 21628 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 21629 (len < (int)(sbavail(sb) - sb_offset))) { 21630 /* 21631 * Here we are doing hardware pacing, this is not a TLP, 21632 * we are not sending a pace max segment size, there is rwnd 21633 * room to send at least N pace_max_seg, the cwnd is greater 21634 * than or equal to a full pacing segments plus 4 mss and we have 2 or 21635 * more segments in flight and its not the tail of the socket buffer. 21636 * 21637 * We don't want to send instead we need to get more ack's in to 21638 * allow us to send a full pacing segment. Normally, if we are pacing 21639 * about the right speed, we should have finished our pacing 21640 * send as most of the acks have come back if we are at the 21641 * right rate. This is a bit fuzzy since return path delay 21642 * can delay the acks, which is why we want to make sure we 21643 * have cwnd space to have a bit more than a max pace segments in flight. 21644 * 21645 * If we have not gotten our acks back we are pacing at too high a 21646 * rate delaying will not hurt and will bring our GP estimate down by 21647 * injecting the delay. If we don't do this we will send 21648 * 2 MSS out in response to the acks being clocked in which 21649 * defeats the point of hw-pacing (i.e. to help us get 21650 * larger TSO's out). 21651 */ 21652 len = 0; 21653 } 21654 21655 } 21656 /* len will be >= 0 after this point. */ 21657 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21658 rack_sndbuf_autoscale(rack); 21659 /* 21660 * Decide if we can use TCP Segmentation Offloading (if supported by 21661 * hardware). 21662 * 21663 * TSO may only be used if we are in a pure bulk sending state. The 21664 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 21665 * options prevent using TSO. With TSO the TCP header is the same 21666 * (except for the sequence number) for all generated packets. This 21667 * makes it impossible to transmit any options which vary per 21668 * generated segment or packet. 21669 * 21670 * IPv4 handling has a clear separation of ip options and ip header 21671 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 21672 * the right thing below to provide length of just ip options and thus 21673 * checking for ipoptlen is enough to decide if ip options are present. 21674 */ 21675 ipoptlen = 0; 21676 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21677 /* 21678 * Pre-calculate here as we save another lookup into the darknesses 21679 * of IPsec that way and can actually decide if TSO is ok. 21680 */ 21681 #ifdef INET6 21682 if (isipv6 && IPSEC_ENABLED(ipv6)) 21683 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 21684 #ifdef INET 21685 else 21686 #endif 21687 #endif /* INET6 */ 21688 #ifdef INET 21689 if (IPSEC_ENABLED(ipv4)) 21690 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 21691 #endif /* INET */ 21692 #endif 21693 21694 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21695 ipoptlen += ipsec_optlen; 21696 #endif 21697 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 21698 (tp->t_port == 0) && 21699 ((tp->t_flags & TF_SIGNATURE) == 0) && 21700 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 21701 ipoptlen == 0) 21702 tso = 1; 21703 { 21704 uint32_t outstanding __unused; 21705 21706 outstanding = tp->snd_max - tp->snd_una; 21707 if (tp->t_flags & TF_SENTFIN) { 21708 /* 21709 * If we sent a fin, snd_max is 1 higher than 21710 * snd_una 21711 */ 21712 outstanding--; 21713 } 21714 if (sack_rxmit) { 21715 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 21716 flags &= ~TH_FIN; 21717 } 21718 } 21719 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 21720 (long)TCP_MAXWIN << tp->rcv_scale); 21721 21722 /* 21723 * Sender silly window avoidance. We transmit under the following 21724 * conditions when len is non-zero: 21725 * 21726 * - We have a full segment (or more with TSO) - This is the last 21727 * buffer in a write()/send() and we are either idle or running 21728 * NODELAY - we've timed out (e.g. persist timer) - we have more 21729 * then 1/2 the maximum send window's worth of data (receiver may be 21730 * limited the window size) - we need to retransmit 21731 */ 21732 if (len) { 21733 if (len >= segsiz) { 21734 goto send; 21735 } 21736 /* 21737 * NOTE! on localhost connections an 'ack' from the remote 21738 * end may occur synchronously with the output and cause us 21739 * to flush a buffer queued with moretocome. XXX 21740 * 21741 */ 21742 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 21743 (idle || (tp->t_flags & TF_NODELAY)) && 21744 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 21745 (tp->t_flags & TF_NOPUSH) == 0) { 21746 pass = 2; 21747 goto send; 21748 } 21749 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 21750 pass = 22; 21751 goto send; 21752 } 21753 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 21754 pass = 4; 21755 goto send; 21756 } 21757 if (sack_rxmit) { 21758 pass = 6; 21759 goto send; 21760 } 21761 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 21762 (ctf_outstanding(tp) < (segsiz * 2))) { 21763 /* 21764 * We have less than two MSS outstanding (delayed ack) 21765 * and our rwnd will not let us send a full sized 21766 * MSS. Lets go ahead and let this small segment 21767 * out because we want to try to have at least two 21768 * packets inflight to not be caught by delayed ack. 21769 */ 21770 pass = 12; 21771 goto send; 21772 } 21773 } 21774 /* 21775 * Sending of standalone window updates. 21776 * 21777 * Window updates are important when we close our window due to a 21778 * full socket buffer and are opening it again after the application 21779 * reads data from it. Once the window has opened again and the 21780 * remote end starts to send again the ACK clock takes over and 21781 * provides the most current window information. 21782 * 21783 * We must avoid the silly window syndrome whereas every read from 21784 * the receive buffer, no matter how small, causes a window update 21785 * to be sent. We also should avoid sending a flurry of window 21786 * updates when the socket buffer had queued a lot of data and the 21787 * application is doing small reads. 21788 * 21789 * Prevent a flurry of pointless window updates by only sending an 21790 * update when we can increase the advertized window by more than 21791 * 1/4th of the socket buffer capacity. When the buffer is getting 21792 * full or is very small be more aggressive and send an update 21793 * whenever we can increase by two mss sized segments. In all other 21794 * situations the ACK's to new incoming data will carry further 21795 * window increases. 21796 * 21797 * Don't send an independent window update if a delayed ACK is 21798 * pending (it will get piggy-backed on it) or the remote side 21799 * already has done a half-close and won't send more data. Skip 21800 * this if the connection is in T/TCP half-open state. 21801 */ 21802 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 21803 !(tp->t_flags & TF_DELACK) && 21804 !TCPS_HAVERCVDFIN(tp->t_state)) { 21805 /* 21806 * "adv" is the amount we could increase the window, taking 21807 * into account that we are limited by TCP_MAXWIN << 21808 * tp->rcv_scale. 21809 */ 21810 int32_t adv; 21811 int oldwin; 21812 21813 adv = recwin; 21814 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 21815 oldwin = (tp->rcv_adv - tp->rcv_nxt); 21816 if (adv > oldwin) 21817 adv -= oldwin; 21818 else { 21819 /* We can't increase the window */ 21820 adv = 0; 21821 } 21822 } else 21823 oldwin = 0; 21824 21825 /* 21826 * If the new window size ends up being the same as or less 21827 * than the old size when it is scaled, then don't force 21828 * a window update. 21829 */ 21830 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 21831 goto dontupdate; 21832 21833 if (adv >= (int32_t)(2 * segsiz) && 21834 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 21835 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 21836 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 21837 pass = 7; 21838 goto send; 21839 } 21840 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 21841 pass = 23; 21842 goto send; 21843 } 21844 } 21845 dontupdate: 21846 21847 /* 21848 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 21849 * is also a catch-all for the retransmit timer timeout case. 21850 */ 21851 if (tp->t_flags & TF_ACKNOW) { 21852 pass = 8; 21853 goto send; 21854 } 21855 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 21856 pass = 9; 21857 goto send; 21858 } 21859 /* 21860 * If our state indicates that FIN should be sent and we have not 21861 * yet done so, then we need to send. 21862 */ 21863 if ((flags & TH_FIN) && 21864 (tp->snd_max == tp->snd_una)) { 21865 pass = 11; 21866 goto send; 21867 } 21868 /* 21869 * No reason to send a segment, just return. 21870 */ 21871 just_return: 21872 SOCKBUF_UNLOCK(sb); 21873 just_return_nolock: 21874 { 21875 int app_limited = CTF_JR_SENT_DATA; 21876 21877 if ((tp->t_flags & TF_FASTOPEN) == 0 && 21878 (flags & TH_FIN) && 21879 (len == 0) && 21880 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 21881 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21882 /* 21883 * Ok less than or right at a MSS is 21884 * outstanding. The original FreeBSD stack would 21885 * have sent a FIN, which can speed things up for 21886 * a transactional application doing a MSG_WAITALL. 21887 * To speed things up since we do *not* send a FIN 21888 * if data is outstanding, we send a "challenge ack". 21889 * The idea behind that is instead of having to have 21890 * the peer wait for the delayed-ack timer to run off 21891 * we send an ack that makes the peer send us an ack. 21892 */ 21893 rack_send_ack_challange(rack); 21894 } 21895 if (tot_len_this_send > 0) { 21896 rack->r_ctl.fsb.recwin = recwin; 21897 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 21898 if ((error == 0) && 21899 (rack->rc_policer_detected == 0) && 21900 rack_use_rfo && 21901 ((flags & (TH_SYN|TH_FIN)) == 0) && 21902 (ipoptlen == 0) && 21903 (tp->rcv_numsacks == 0) && 21904 rack->r_fsb_inited && 21905 TCPS_HAVEESTABLISHED(tp->t_state) && 21906 ((IN_RECOVERY(tp->t_flags)) == 0) && 21907 (rack->r_must_retran == 0) && 21908 ((tp->t_flags & TF_NEEDFIN) == 0) && 21909 (len > 0) && (orig_len > 0) && 21910 (orig_len > len) && 21911 ((orig_len - len) >= segsiz) && 21912 ((optlen == 0) || 21913 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 21914 /* We can send at least one more MSS using our fsb */ 21915 rack_setup_fast_output(tp, rack, sb, len, orig_len, 21916 segsiz, pace_max_seg, hw_tls, flags); 21917 } else 21918 rack->r_fast_output = 0; 21919 rack_log_fsb(rack, tp, so, flags, 21920 ipoptlen, orig_len, len, 0, 21921 1, optlen, __LINE__, 1); 21922 /* Assure when we leave that snd_nxt will point to top */ 21923 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 21924 tp->snd_nxt = tp->snd_max; 21925 } else { 21926 int end_window = 0; 21927 uint32_t seq = tp->gput_ack; 21928 21929 rsm = tqhash_max(rack->r_ctl.tqh); 21930 if (rsm) { 21931 /* 21932 * Mark the last sent that we just-returned (hinting 21933 * that delayed ack may play a role in any rtt measurement). 21934 */ 21935 rsm->r_just_ret = 1; 21936 } 21937 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 21938 rack->r_ctl.rc_agg_delayed = 0; 21939 rack->r_early = 0; 21940 rack->r_late = 0; 21941 rack->r_ctl.rc_agg_early = 0; 21942 if ((ctf_outstanding(tp) + 21943 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 21944 minseg)) >= tp->snd_wnd) { 21945 /* We are limited by the rwnd */ 21946 app_limited = CTF_JR_RWND_LIMITED; 21947 if (IN_FASTRECOVERY(tp->t_flags)) 21948 rack->r_ctl.rc_prr_sndcnt = 0; 21949 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 21950 /* We are limited by whats available -- app limited */ 21951 app_limited = CTF_JR_APP_LIMITED; 21952 if (IN_FASTRECOVERY(tp->t_flags)) 21953 rack->r_ctl.rc_prr_sndcnt = 0; 21954 } else if ((idle == 0) && 21955 ((tp->t_flags & TF_NODELAY) == 0) && 21956 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 21957 (len < segsiz)) { 21958 /* 21959 * No delay is not on and the 21960 * user is sending less than 1MSS. This 21961 * brings out SWS avoidance so we 21962 * don't send. Another app-limited case. 21963 */ 21964 app_limited = CTF_JR_APP_LIMITED; 21965 } else if (tp->t_flags & TF_NOPUSH) { 21966 /* 21967 * The user has requested no push of 21968 * the last segment and we are 21969 * at the last segment. Another app 21970 * limited case. 21971 */ 21972 app_limited = CTF_JR_APP_LIMITED; 21973 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 21974 /* Its the cwnd */ 21975 app_limited = CTF_JR_CWND_LIMITED; 21976 } else if (IN_FASTRECOVERY(tp->t_flags) && 21977 (rack->rack_no_prr == 0) && 21978 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 21979 app_limited = CTF_JR_PRR; 21980 } else { 21981 /* Now why here are we not sending? */ 21982 #ifdef NOW 21983 #ifdef INVARIANTS 21984 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 21985 #endif 21986 #endif 21987 app_limited = CTF_JR_ASSESSING; 21988 } 21989 /* 21990 * App limited in some fashion, for our pacing GP 21991 * measurements we don't want any gap (even cwnd). 21992 * Close down the measurement window. 21993 */ 21994 if (rack_cwnd_block_ends_measure && 21995 ((app_limited == CTF_JR_CWND_LIMITED) || 21996 (app_limited == CTF_JR_PRR))) { 21997 /* 21998 * The reason we are not sending is 21999 * the cwnd (or prr). We have been configured 22000 * to end the measurement window in 22001 * this case. 22002 */ 22003 end_window = 1; 22004 } else if (rack_rwnd_block_ends_measure && 22005 (app_limited == CTF_JR_RWND_LIMITED)) { 22006 /* 22007 * We are rwnd limited and have been 22008 * configured to end the measurement 22009 * window in this case. 22010 */ 22011 end_window = 1; 22012 } else if (app_limited == CTF_JR_APP_LIMITED) { 22013 /* 22014 * A true application limited period, we have 22015 * ran out of data. 22016 */ 22017 end_window = 1; 22018 } else if (app_limited == CTF_JR_ASSESSING) { 22019 /* 22020 * In the assessing case we hit the end of 22021 * the if/else and had no known reason 22022 * This will panic us under invariants.. 22023 * 22024 * If we get this out in logs we need to 22025 * investagate which reason we missed. 22026 */ 22027 end_window = 1; 22028 } 22029 if (end_window) { 22030 uint8_t log = 0; 22031 22032 /* Adjust the Gput measurement */ 22033 if ((tp->t_flags & TF_GPUTINPROG) && 22034 SEQ_GT(tp->gput_ack, tp->snd_max)) { 22035 tp->gput_ack = tp->snd_max; 22036 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 22037 /* 22038 * There is not enough to measure. 22039 */ 22040 tp->t_flags &= ~TF_GPUTINPROG; 22041 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 22042 rack->r_ctl.rc_gp_srtt /*flex1*/, 22043 tp->gput_seq, 22044 0, 0, 18, __LINE__, NULL, 0); 22045 } else 22046 log = 1; 22047 } 22048 /* Mark the last packet has app limited */ 22049 rsm = tqhash_max(rack->r_ctl.tqh); 22050 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 22051 if (rack->r_ctl.rc_app_limited_cnt == 0) 22052 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 22053 else { 22054 /* 22055 * Go out to the end app limited and mark 22056 * this new one as next and move the end_appl up 22057 * to this guy. 22058 */ 22059 if (rack->r_ctl.rc_end_appl) 22060 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 22061 rack->r_ctl.rc_end_appl = rsm; 22062 } 22063 rsm->r_flags |= RACK_APP_LIMITED; 22064 rack->r_ctl.rc_app_limited_cnt++; 22065 } 22066 if (log) 22067 rack_log_pacing_delay_calc(rack, 22068 rack->r_ctl.rc_app_limited_cnt, seq, 22069 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 22070 } 22071 } 22072 /* Check if we need to go into persists or not */ 22073 if ((tp->snd_max == tp->snd_una) && 22074 TCPS_HAVEESTABLISHED(tp->t_state) && 22075 sbavail(sb) && 22076 (sbavail(sb) > tp->snd_wnd) && 22077 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 22078 /* Yes lets make sure to move to persist before timer-start */ 22079 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 22080 } 22081 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 22082 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 22083 } 22084 just_return_clean: 22085 #ifdef NETFLIX_SHARED_CWND 22086 if ((sbavail(sb) == 0) && 22087 rack->r_ctl.rc_scw) { 22088 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 22089 rack->rack_scwnd_is_idle = 1; 22090 } 22091 #endif 22092 #ifdef TCP_ACCOUNTING 22093 if (tot_len_this_send > 0) { 22094 crtsc = get_cyclecount(); 22095 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22096 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22097 } 22098 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22099 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 22100 } 22101 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22102 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 22103 } 22104 } else { 22105 crtsc = get_cyclecount(); 22106 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22107 tp->tcp_cnt_counters[SND_LIMITED]++; 22108 } 22109 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22110 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 22111 } 22112 } 22113 sched_unpin(); 22114 #endif 22115 return (0); 22116 22117 send: 22118 if ((rack->r_ctl.crte != NULL) && 22119 (rsm == NULL) && 22120 ((rack->rc_hw_nobuf == 1) || 22121 (rack_hw_check_queue && (check_done == 0)))) { 22122 /* 22123 * We only want to do this once with the hw_check_queue, 22124 * for the enobuf case we would only do it once if 22125 * we come around to again, the flag will be clear. 22126 */ 22127 check_done = 1; 22128 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 22129 if (slot) { 22130 rack->r_ctl.rc_agg_delayed = 0; 22131 rack->r_ctl.rc_agg_early = 0; 22132 rack->r_early = 0; 22133 rack->r_late = 0; 22134 SOCKBUF_UNLOCK(&so->so_snd); 22135 goto skip_all_send; 22136 } 22137 } 22138 if (rsm || sack_rxmit) 22139 counter_u64_add(rack_nfto_resend, 1); 22140 else 22141 counter_u64_add(rack_non_fto_send, 1); 22142 if ((flags & TH_FIN) && 22143 sbavail(sb)) { 22144 /* 22145 * We do not transmit a FIN 22146 * with data outstanding. We 22147 * need to make it so all data 22148 * is acked first. 22149 */ 22150 flags &= ~TH_FIN; 22151 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22152 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 22153 ((tp->snd_max - tp->snd_una) <= segsiz)) { 22154 /* 22155 * Ok less than or right at a MSS is 22156 * outstanding. The original FreeBSD stack would 22157 * have sent a FIN, which can speed things up for 22158 * a transactional application doing a MSG_WAITALL. 22159 * To speed things up since we do *not* send a FIN 22160 * if data is outstanding, we send a "challenge ack". 22161 * The idea behind that is instead of having to have 22162 * the peer wait for the delayed-ack timer to run off 22163 * we send an ack that makes the peer send us an ack. 22164 */ 22165 rack_send_ack_challange(rack); 22166 } 22167 } 22168 /* Enforce stack imposed max seg size if we have one */ 22169 if (pace_max_seg && 22170 (len > pace_max_seg)) { 22171 mark = 1; 22172 len = pace_max_seg; 22173 } 22174 if ((rsm == NULL) && 22175 (rack->pcm_in_progress == 0) && 22176 (rack->r_ctl.pcm_max_seg > 0) && 22177 (len >= rack->r_ctl.pcm_max_seg)) { 22178 /* It is large enough for a measurement */ 22179 add_flag |= RACK_IS_PCM; 22180 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 22181 } else if (rack_verbose_logging) { 22182 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 22183 } 22184 22185 SOCKBUF_LOCK_ASSERT(sb); 22186 if (len > 0) { 22187 if (len >= segsiz) 22188 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 22189 else 22190 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 22191 } 22192 /* 22193 * Before ESTABLISHED, force sending of initial options unless TCP 22194 * set not to do any options. NOTE: we assume that the IP/TCP header 22195 * plus TCP options always fit in a single mbuf, leaving room for a 22196 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 22197 * + optlen <= MCLBYTES 22198 */ 22199 optlen = 0; 22200 #ifdef INET6 22201 if (isipv6) 22202 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 22203 else 22204 #endif 22205 hdrlen = sizeof(struct tcpiphdr); 22206 22207 /* 22208 * Ok what seq are we sending from. If we have 22209 * no rsm to use, then we look at various bits, 22210 * if we are putting out a SYN it will be ISS. 22211 * If we are retransmitting a FIN it will 22212 * be snd_max-1 else its snd_max. 22213 */ 22214 if (rsm == NULL) { 22215 if (flags & TH_SYN) 22216 rack_seq = tp->iss; 22217 else if ((flags & TH_FIN) && 22218 (tp->t_flags & TF_SENTFIN)) 22219 rack_seq = tp->snd_max - 1; 22220 else 22221 rack_seq = tp->snd_max; 22222 } else { 22223 rack_seq = rsm->r_start; 22224 } 22225 /* 22226 * Compute options for segment. We only have to care about SYN and 22227 * established connection segments. Options for SYN-ACK segments 22228 * are handled in TCP syncache. 22229 */ 22230 to.to_flags = 0; 22231 if ((tp->t_flags & TF_NOOPT) == 0) { 22232 /* Maximum segment size. */ 22233 if (flags & TH_SYN) { 22234 to.to_mss = tcp_mssopt(&inp->inp_inc); 22235 if (tp->t_port) 22236 to.to_mss -= V_tcp_udp_tunneling_overhead; 22237 to.to_flags |= TOF_MSS; 22238 22239 /* 22240 * On SYN or SYN|ACK transmits on TFO connections, 22241 * only include the TFO option if it is not a 22242 * retransmit, as the presence of the TFO option may 22243 * have caused the original SYN or SYN|ACK to have 22244 * been dropped by a middlebox. 22245 */ 22246 if ((tp->t_flags & TF_FASTOPEN) && 22247 (tp->t_rxtshift == 0)) { 22248 if (tp->t_state == TCPS_SYN_RECEIVED) { 22249 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 22250 to.to_tfo_cookie = 22251 (u_int8_t *)&tp->t_tfo_cookie.server; 22252 to.to_flags |= TOF_FASTOPEN; 22253 wanted_cookie = 1; 22254 } else if (tp->t_state == TCPS_SYN_SENT) { 22255 to.to_tfo_len = 22256 tp->t_tfo_client_cookie_len; 22257 to.to_tfo_cookie = 22258 tp->t_tfo_cookie.client; 22259 to.to_flags |= TOF_FASTOPEN; 22260 wanted_cookie = 1; 22261 /* 22262 * If we wind up having more data to 22263 * send with the SYN than can fit in 22264 * one segment, don't send any more 22265 * until the SYN|ACK comes back from 22266 * the other end. 22267 */ 22268 sendalot = 0; 22269 } 22270 } 22271 } 22272 /* Window scaling. */ 22273 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 22274 to.to_wscale = tp->request_r_scale; 22275 to.to_flags |= TOF_SCALE; 22276 } 22277 /* Timestamps. */ 22278 if ((tp->t_flags & TF_RCVD_TSTMP) || 22279 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 22280 uint32_t ts_to_use; 22281 22282 if ((rack->r_rcvpath_rtt_up == 1) && 22283 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 22284 /* 22285 * When we are doing a rcv_rtt probe all 22286 * other timestamps use the next msec. This 22287 * is safe since our previous ack is in the 22288 * air and we will just have a few more 22289 * on the next ms. This assures that only 22290 * the one ack has the ms_cts that was on 22291 * our ack-probe. 22292 */ 22293 ts_to_use = ms_cts + 1; 22294 } else { 22295 ts_to_use = ms_cts; 22296 } 22297 to.to_tsval = ts_to_use + tp->ts_offset; 22298 to.to_tsecr = tp->ts_recent; 22299 to.to_flags |= TOF_TS; 22300 if ((len == 0) && 22301 (TCPS_HAVEESTABLISHED(tp->t_state)) && 22302 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 22303 (tp->snd_una == tp->snd_max) && 22304 (flags & TH_ACK) && 22305 (sbavail(sb) == 0) && 22306 (rack->r_ctl.current_round != 0) && 22307 ((flags & (TH_SYN|TH_FIN)) == 0) && 22308 (rack->r_rcvpath_rtt_up == 0)) { 22309 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 22310 rack->r_ctl.last_time_of_arm_rcv = cts; 22311 rack->r_rcvpath_rtt_up = 1; 22312 /* Subtract 1 from seq to force a response */ 22313 rack_seq--; 22314 } 22315 } 22316 /* Set receive buffer autosizing timestamp. */ 22317 if (tp->rfbuf_ts == 0 && 22318 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 22319 tp->rfbuf_ts = ms_cts; 22320 } 22321 /* Selective ACK's. */ 22322 if (tp->t_flags & TF_SACK_PERMIT) { 22323 if (flags & TH_SYN) 22324 to.to_flags |= TOF_SACKPERM; 22325 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 22326 tp->rcv_numsacks > 0) { 22327 to.to_flags |= TOF_SACK; 22328 to.to_nsacks = tp->rcv_numsacks; 22329 to.to_sacks = (u_char *)tp->sackblks; 22330 } 22331 } 22332 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 22333 /* TCP-MD5 (RFC2385). */ 22334 if (tp->t_flags & TF_SIGNATURE) 22335 to.to_flags |= TOF_SIGNATURE; 22336 #endif 22337 22338 /* Processing the options. */ 22339 hdrlen += optlen = tcp_addoptions(&to, opt); 22340 /* 22341 * If we wanted a TFO option to be added, but it was unable 22342 * to fit, ensure no data is sent. 22343 */ 22344 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 22345 !(to.to_flags & TOF_FASTOPEN)) 22346 len = 0; 22347 } 22348 if (tp->t_port) { 22349 if (V_tcp_udp_tunneling_port == 0) { 22350 /* The port was removed?? */ 22351 SOCKBUF_UNLOCK(&so->so_snd); 22352 #ifdef TCP_ACCOUNTING 22353 crtsc = get_cyclecount(); 22354 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22355 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22356 } 22357 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22358 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22359 } 22360 sched_unpin(); 22361 #endif 22362 return (EHOSTUNREACH); 22363 } 22364 hdrlen += sizeof(struct udphdr); 22365 } 22366 #ifdef INET6 22367 if (isipv6) 22368 ipoptlen = ip6_optlen(inp); 22369 else 22370 #endif 22371 if (inp->inp_options) 22372 ipoptlen = inp->inp_options->m_len - 22373 offsetof(struct ipoption, ipopt_list); 22374 else 22375 ipoptlen = 0; 22376 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22377 ipoptlen += ipsec_optlen; 22378 #endif 22379 22380 /* 22381 * Adjust data length if insertion of options will bump the packet 22382 * length beyond the t_maxseg length. Clear the FIN bit because we 22383 * cut off the tail of the segment. 22384 */ 22385 if (len + optlen + ipoptlen > tp->t_maxseg) { 22386 if (tso) { 22387 uint32_t if_hw_tsomax; 22388 uint32_t moff; 22389 int32_t max_len; 22390 22391 /* extract TSO information */ 22392 if_hw_tsomax = tp->t_tsomax; 22393 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 22394 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 22395 KASSERT(ipoptlen == 0, 22396 ("%s: TSO can't do IP options", __func__)); 22397 22398 /* 22399 * Check if we should limit by maximum payload 22400 * length: 22401 */ 22402 if (if_hw_tsomax != 0) { 22403 /* compute maximum TSO length */ 22404 max_len = (if_hw_tsomax - hdrlen - 22405 max_linkhdr); 22406 if (max_len <= 0) { 22407 len = 0; 22408 } else if (len > max_len) { 22409 sendalot = 1; 22410 len = max_len; 22411 mark = 2; 22412 } 22413 } 22414 /* 22415 * Prevent the last segment from being fractional 22416 * unless the send sockbuf can be emptied: 22417 */ 22418 max_len = (tp->t_maxseg - optlen); 22419 if ((sb_offset + len) < sbavail(sb)) { 22420 moff = len % (u_int)max_len; 22421 if (moff != 0) { 22422 mark = 3; 22423 len -= moff; 22424 } 22425 } 22426 /* 22427 * In case there are too many small fragments don't 22428 * use TSO: 22429 */ 22430 if (len <= max_len) { 22431 mark = 4; 22432 tso = 0; 22433 } 22434 /* 22435 * Send the FIN in a separate segment after the bulk 22436 * sending is done. We don't trust the TSO 22437 * implementations to clear the FIN flag on all but 22438 * the last segment. 22439 */ 22440 if (tp->t_flags & TF_NEEDFIN) { 22441 sendalot = 4; 22442 } 22443 } else { 22444 mark = 5; 22445 if (optlen + ipoptlen >= tp->t_maxseg) { 22446 /* 22447 * Since we don't have enough space to put 22448 * the IP header chain and the TCP header in 22449 * one packet as required by RFC 7112, don't 22450 * send it. Also ensure that at least one 22451 * byte of the payload can be put into the 22452 * TCP segment. 22453 */ 22454 SOCKBUF_UNLOCK(&so->so_snd); 22455 error = EMSGSIZE; 22456 sack_rxmit = 0; 22457 goto out; 22458 } 22459 len = tp->t_maxseg - optlen - ipoptlen; 22460 sendalot = 5; 22461 } 22462 } else { 22463 tso = 0; 22464 mark = 6; 22465 } 22466 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 22467 ("%s: len > IP_MAXPACKET", __func__)); 22468 #ifdef DIAGNOSTIC 22469 #ifdef INET6 22470 if (max_linkhdr + hdrlen > MCLBYTES) 22471 #else 22472 if (max_linkhdr + hdrlen > MHLEN) 22473 #endif 22474 panic("tcphdr too big"); 22475 #endif 22476 22477 /* 22478 * This KASSERT is here to catch edge cases at a well defined place. 22479 * Before, those had triggered (random) panic conditions further 22480 * down. 22481 */ 22482 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 22483 if ((len == 0) && 22484 (flags & TH_FIN) && 22485 (sbused(sb))) { 22486 /* 22487 * We have outstanding data, don't send a fin by itself!. 22488 * 22489 * Check to see if we need to send a challenge ack. 22490 */ 22491 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 22492 ((tp->snd_max - tp->snd_una) <= segsiz)) { 22493 /* 22494 * Ok less than or right at a MSS is 22495 * outstanding. The original FreeBSD stack would 22496 * have sent a FIN, which can speed things up for 22497 * a transactional application doing a MSG_WAITALL. 22498 * To speed things up since we do *not* send a FIN 22499 * if data is outstanding, we send a "challenge ack". 22500 * The idea behind that is instead of having to have 22501 * the peer wait for the delayed-ack timer to run off 22502 * we send an ack that makes the peer send us an ack. 22503 */ 22504 rack_send_ack_challange(rack); 22505 } 22506 goto just_return; 22507 } 22508 /* 22509 * Grab a header mbuf, attaching a copy of data to be transmitted, 22510 * and initialize the header from the template for sends on this 22511 * connection. 22512 */ 22513 hw_tls = tp->t_nic_ktls_xmit != 0; 22514 if (len) { 22515 uint32_t max_val; 22516 uint32_t moff; 22517 22518 if (pace_max_seg) 22519 max_val = pace_max_seg; 22520 else 22521 max_val = len; 22522 /* 22523 * We allow a limit on sending with hptsi. 22524 */ 22525 if (len > max_val) { 22526 mark = 7; 22527 len = max_val; 22528 } 22529 #ifdef INET6 22530 if (MHLEN < hdrlen + max_linkhdr) 22531 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 22532 else 22533 #endif 22534 m = m_gethdr(M_NOWAIT, MT_DATA); 22535 22536 if (m == NULL) { 22537 SOCKBUF_UNLOCK(sb); 22538 error = ENOBUFS; 22539 sack_rxmit = 0; 22540 goto out; 22541 } 22542 m->m_data += max_linkhdr; 22543 m->m_len = hdrlen; 22544 22545 /* 22546 * Start the m_copy functions from the closest mbuf to the 22547 * sb_offset in the socket buffer chain. 22548 */ 22549 mb = sbsndptr_noadv(sb, sb_offset, &moff); 22550 s_mb = mb; 22551 s_moff = moff; 22552 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 22553 m_copydata(mb, moff, (int)len, 22554 mtod(m, caddr_t)+hdrlen); 22555 /* 22556 * If we are not retransmitting advance the 22557 * sndptr to help remember the next place in 22558 * the sb. 22559 */ 22560 if (rsm == NULL) 22561 sbsndptr_adv(sb, mb, len); 22562 m->m_len += len; 22563 } else { 22564 struct sockbuf *msb; 22565 22566 /* 22567 * If we are not retransmitting pass in msb so 22568 * the socket buffer can be advanced. Otherwise 22569 * set it to NULL if its a retransmission since 22570 * we don't want to change the sb remembered 22571 * location. 22572 */ 22573 if (rsm == NULL) 22574 msb = sb; 22575 else 22576 msb = NULL; 22577 m->m_next = tcp_m_copym( 22578 mb, moff, &len, 22579 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 22580 ((rsm == NULL) ? hw_tls : 0) 22581 #ifdef NETFLIX_COPY_ARGS 22582 , &s_mb, &s_moff 22583 #endif 22584 ); 22585 if (len <= (tp->t_maxseg - optlen)) { 22586 /* 22587 * Must have ran out of mbufs for the copy 22588 * shorten it to no longer need tso. Lets 22589 * not put on sendalot since we are low on 22590 * mbufs. 22591 */ 22592 tso = 0; 22593 } 22594 if (m->m_next == NULL) { 22595 SOCKBUF_UNLOCK(sb); 22596 (void)m_free(m); 22597 error = ENOBUFS; 22598 sack_rxmit = 0; 22599 goto out; 22600 } 22601 } 22602 if (sack_rxmit) { 22603 if (rsm && (rsm->r_flags & RACK_TLP)) { 22604 /* 22605 * TLP should not count in retran count, but 22606 * in its own bin 22607 */ 22608 counter_u64_add(rack_tlp_retran, 1); 22609 counter_u64_add(rack_tlp_retran_bytes, len); 22610 } else { 22611 tp->t_sndrexmitpack++; 22612 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 22613 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 22614 } 22615 #ifdef STATS 22616 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 22617 len); 22618 #endif 22619 } else { 22620 KMOD_TCPSTAT_INC(tcps_sndpack); 22621 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 22622 #ifdef STATS 22623 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 22624 len); 22625 #endif 22626 } 22627 /* 22628 * If we're sending everything we've got, set PUSH. (This 22629 * will keep happy those implementations which only give 22630 * data to the user when a buffer fills or a PUSH comes in.) 22631 */ 22632 if (sb_offset + len == sbused(sb) && 22633 sbused(sb) && 22634 !(flags & TH_SYN)) { 22635 flags |= TH_PUSH; 22636 add_flag |= RACK_HAD_PUSH; 22637 } 22638 22639 SOCKBUF_UNLOCK(sb); 22640 } else { 22641 SOCKBUF_UNLOCK(sb); 22642 if (tp->t_flags & TF_ACKNOW) 22643 KMOD_TCPSTAT_INC(tcps_sndacks); 22644 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 22645 KMOD_TCPSTAT_INC(tcps_sndctrl); 22646 else 22647 KMOD_TCPSTAT_INC(tcps_sndwinup); 22648 22649 m = m_gethdr(M_NOWAIT, MT_DATA); 22650 if (m == NULL) { 22651 error = ENOBUFS; 22652 sack_rxmit = 0; 22653 goto out; 22654 } 22655 #ifdef INET6 22656 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 22657 MHLEN >= hdrlen) { 22658 M_ALIGN(m, hdrlen); 22659 } else 22660 #endif 22661 m->m_data += max_linkhdr; 22662 m->m_len = hdrlen; 22663 } 22664 SOCKBUF_UNLOCK_ASSERT(sb); 22665 m->m_pkthdr.rcvif = (struct ifnet *)0; 22666 #ifdef MAC 22667 mac_inpcb_create_mbuf(inp, m); 22668 #endif 22669 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 22670 #ifdef INET6 22671 if (isipv6) 22672 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 22673 else 22674 #endif /* INET6 */ 22675 #ifdef INET 22676 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 22677 #endif 22678 th = rack->r_ctl.fsb.th; 22679 udp = rack->r_ctl.fsb.udp; 22680 if (udp) { 22681 #ifdef INET6 22682 if (isipv6) 22683 ulen = hdrlen + len - sizeof(struct ip6_hdr); 22684 else 22685 #endif /* INET6 */ 22686 ulen = hdrlen + len - sizeof(struct ip); 22687 udp->uh_ulen = htons(ulen); 22688 } 22689 } else { 22690 #ifdef INET6 22691 if (isipv6) { 22692 ip6 = mtod(m, struct ip6_hdr *); 22693 if (tp->t_port) { 22694 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 22695 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 22696 udp->uh_dport = tp->t_port; 22697 ulen = hdrlen + len - sizeof(struct ip6_hdr); 22698 udp->uh_ulen = htons(ulen); 22699 th = (struct tcphdr *)(udp + 1); 22700 } else 22701 th = (struct tcphdr *)(ip6 + 1); 22702 tcpip_fillheaders(inp, tp->t_port, ip6, th); 22703 } else 22704 #endif /* INET6 */ 22705 { 22706 #ifdef INET 22707 ip = mtod(m, struct ip *); 22708 if (tp->t_port) { 22709 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 22710 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 22711 udp->uh_dport = tp->t_port; 22712 ulen = hdrlen + len - sizeof(struct ip); 22713 udp->uh_ulen = htons(ulen); 22714 th = (struct tcphdr *)(udp + 1); 22715 } else 22716 th = (struct tcphdr *)(ip + 1); 22717 tcpip_fillheaders(inp, tp->t_port, ip, th); 22718 #endif 22719 } 22720 } 22721 /* 22722 * If we are starting a connection, send ECN setup SYN packet. If we 22723 * are on a retransmit, we may resend those bits a number of times 22724 * as per RFC 3168. 22725 */ 22726 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 22727 flags |= tcp_ecn_output_syn_sent(tp); 22728 } 22729 /* Also handle parallel SYN for ECN */ 22730 if (TCPS_HAVERCVDSYN(tp->t_state) && 22731 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 22732 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 22733 if ((tp->t_state == TCPS_SYN_RECEIVED) && 22734 (tp->t_flags2 & TF2_ECN_SND_ECE)) 22735 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 22736 #ifdef INET6 22737 if (isipv6) { 22738 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 22739 ip6->ip6_flow |= htonl(ect << 20); 22740 } 22741 else 22742 #endif 22743 { 22744 #ifdef INET 22745 ip->ip_tos &= ~IPTOS_ECN_MASK; 22746 ip->ip_tos |= ect; 22747 #endif 22748 } 22749 } 22750 th->th_seq = htonl(rack_seq); 22751 th->th_ack = htonl(tp->rcv_nxt); 22752 tcp_set_flags(th, flags); 22753 /* 22754 * Calculate receive window. Don't shrink window, but avoid silly 22755 * window syndrome. 22756 * If a RST segment is sent, advertise a window of zero. 22757 */ 22758 if (flags & TH_RST) { 22759 recwin = 0; 22760 } else { 22761 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 22762 recwin < (long)segsiz) { 22763 recwin = 0; 22764 } 22765 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 22766 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 22767 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 22768 } 22769 22770 /* 22771 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 22772 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 22773 * handled in syncache. 22774 */ 22775 if (flags & TH_SYN) 22776 th->th_win = htons((u_short) 22777 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 22778 else { 22779 /* Avoid shrinking window with window scaling. */ 22780 recwin = roundup2(recwin, 1 << tp->rcv_scale); 22781 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 22782 } 22783 /* 22784 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 22785 * window. This may cause the remote transmitter to stall. This 22786 * flag tells soreceive() to disable delayed acknowledgements when 22787 * draining the buffer. This can occur if the receiver is 22788 * attempting to read more data than can be buffered prior to 22789 * transmitting on the connection. 22790 */ 22791 if (th->th_win == 0) { 22792 tp->t_sndzerowin++; 22793 tp->t_flags |= TF_RXWIN0SENT; 22794 } else 22795 tp->t_flags &= ~TF_RXWIN0SENT; 22796 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 22797 /* Now are we using fsb?, if so copy the template data to the mbuf */ 22798 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 22799 uint8_t *cpto; 22800 22801 cpto = mtod(m, uint8_t *); 22802 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 22803 /* 22804 * We have just copied in: 22805 * IP/IP6 22806 * <optional udphdr> 22807 * tcphdr (no options) 22808 * 22809 * We need to grab the correct pointers into the mbuf 22810 * for both the tcp header, and possibly the udp header (if tunneling). 22811 * We do this by using the offset in the copy buffer and adding it 22812 * to the mbuf base pointer (cpto). 22813 */ 22814 #ifdef INET6 22815 if (isipv6) 22816 ip6 = mtod(m, struct ip6_hdr *); 22817 else 22818 #endif /* INET6 */ 22819 #ifdef INET 22820 ip = mtod(m, struct ip *); 22821 #endif 22822 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 22823 /* If we have a udp header lets set it into the mbuf as well */ 22824 if (udp) 22825 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 22826 } 22827 if (optlen) { 22828 bcopy(opt, th + 1, optlen); 22829 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 22830 } 22831 /* 22832 * Put TCP length in extended header, and then checksum extended 22833 * header and data. 22834 */ 22835 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 22836 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 22837 if (to.to_flags & TOF_SIGNATURE) { 22838 /* 22839 * Calculate MD5 signature and put it into the place 22840 * determined before. 22841 * NOTE: since TCP options buffer doesn't point into 22842 * mbuf's data, calculate offset and use it. 22843 */ 22844 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 22845 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 22846 /* 22847 * Do not send segment if the calculation of MD5 22848 * digest has failed. 22849 */ 22850 goto out; 22851 } 22852 } 22853 #endif 22854 #ifdef INET6 22855 if (isipv6) { 22856 /* 22857 * ip6_plen is not need to be filled now, and will be filled 22858 * in ip6_output. 22859 */ 22860 if (tp->t_port) { 22861 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 22862 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 22863 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 22864 th->th_sum = htons(0); 22865 UDPSTAT_INC(udps_opackets); 22866 } else { 22867 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 22868 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 22869 th->th_sum = in6_cksum_pseudo(ip6, 22870 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 22871 0); 22872 } 22873 } 22874 #endif 22875 #if defined(INET6) && defined(INET) 22876 else 22877 #endif 22878 #ifdef INET 22879 { 22880 if (tp->t_port) { 22881 m->m_pkthdr.csum_flags = CSUM_UDP; 22882 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 22883 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 22884 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 22885 th->th_sum = htons(0); 22886 UDPSTAT_INC(udps_opackets); 22887 } else { 22888 m->m_pkthdr.csum_flags = CSUM_TCP; 22889 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 22890 th->th_sum = in_pseudo(ip->ip_src.s_addr, 22891 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 22892 IPPROTO_TCP + len + optlen)); 22893 } 22894 /* IP version must be set here for ipv4/ipv6 checking later */ 22895 KASSERT(ip->ip_v == IPVERSION, 22896 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 22897 } 22898 #endif 22899 /* 22900 * Enable TSO and specify the size of the segments. The TCP pseudo 22901 * header checksum is always provided. XXX: Fixme: This is currently 22902 * not the case for IPv6. 22903 */ 22904 if (tso) { 22905 /* 22906 * Here we must use t_maxseg and the optlen since 22907 * the optlen may include SACK's (or DSACK). 22908 */ 22909 KASSERT(len > tp->t_maxseg - optlen, 22910 ("%s: len <= tso_segsz", __func__)); 22911 m->m_pkthdr.csum_flags |= CSUM_TSO; 22912 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 22913 } 22914 KASSERT(len + hdrlen == m_length(m, NULL), 22915 ("%s: mbuf chain different than expected: %d + %u != %u", 22916 __func__, len, hdrlen, m_length(m, NULL))); 22917 22918 #ifdef TCP_HHOOK 22919 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 22920 hhook_run_tcp_est_out(tp, th, &to, len, tso); 22921 #endif 22922 if ((rack->r_ctl.crte != NULL) && 22923 (rack->rc_hw_nobuf == 0) && 22924 tcp_bblogging_on(tp)) { 22925 rack_log_queue_level(tp, rack, len, &tv, cts); 22926 } 22927 /* We're getting ready to send; log now. */ 22928 if (tcp_bblogging_on(rack->rc_tp)) { 22929 union tcp_log_stackspecific log; 22930 22931 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 22932 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 22933 if (rack->rack_no_prr) 22934 log.u_bbr.flex1 = 0; 22935 else 22936 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 22937 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 22938 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 22939 log.u_bbr.flex4 = orig_len; 22940 /* Save off the early/late values */ 22941 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 22942 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 22943 log.u_bbr.bw_inuse = rack_get_bw(rack); 22944 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 22945 log.u_bbr.flex8 = 0; 22946 if (rsm) { 22947 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 22948 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 22949 counter_u64_add(rack_collapsed_win_rxt, 1); 22950 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 22951 } 22952 if (doing_tlp) 22953 log.u_bbr.flex8 = 2; 22954 else 22955 log.u_bbr.flex8 = 1; 22956 } else { 22957 if (doing_tlp) 22958 log.u_bbr.flex8 = 3; 22959 } 22960 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 22961 log.u_bbr.flex7 = mark; 22962 log.u_bbr.flex7 <<= 8; 22963 log.u_bbr.flex7 |= pass; 22964 log.u_bbr.pkts_out = tp->t_maxseg; 22965 log.u_bbr.timeStamp = cts; 22966 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 22967 if (rsm && (rsm->r_rtr_cnt > 0)) { 22968 /* 22969 * When we have a retransmit we want to log the 22970 * burst at send and flight at send from before. 22971 */ 22972 log.u_bbr.flex5 = rsm->r_fas; 22973 log.u_bbr.bbr_substate = rsm->r_bas; 22974 } else { 22975 /* 22976 * New transmits we log in flex5 the inflight again as 22977 * well as the number of segments in our send in the 22978 * substate field. 22979 */ 22980 log.u_bbr.flex5 = log.u_bbr.inflight; 22981 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 22982 } 22983 log.u_bbr.lt_epoch = cwnd_to_use; 22984 log.u_bbr.delivered = sendalot; 22985 log.u_bbr.rttProp = (uintptr_t)rsm; 22986 log.u_bbr.pkt_epoch = __LINE__; 22987 if (rsm) { 22988 log.u_bbr.delRate = rsm->r_flags; 22989 log.u_bbr.delRate <<= 31; 22990 log.u_bbr.delRate |= rack->r_must_retran; 22991 log.u_bbr.delRate <<= 1; 22992 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22993 } else { 22994 log.u_bbr.delRate = rack->r_must_retran; 22995 log.u_bbr.delRate <<= 1; 22996 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22997 } 22998 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 22999 len, &log, false, NULL, __func__, __LINE__, &tv); 23000 } else 23001 lgb = NULL; 23002 23003 /* 23004 * Fill in IP length and desired time to live and send to IP level. 23005 * There should be a better way to handle ttl and tos; we could keep 23006 * them in the template, but need a way to checksum without them. 23007 */ 23008 /* 23009 * m->m_pkthdr.len should have been set before cksum calcuration, 23010 * because in6_cksum() need it. 23011 */ 23012 #ifdef INET6 23013 if (isipv6) { 23014 /* 23015 * we separately set hoplimit for every segment, since the 23016 * user might want to change the value via setsockopt. Also, 23017 * desired default hop limit might be changed via Neighbor 23018 * Discovery. 23019 */ 23020 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 23021 23022 /* 23023 * Set the packet size here for the benefit of DTrace 23024 * probes. ip6_output() will set it properly; it's supposed 23025 * to include the option header lengths as well. 23026 */ 23027 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 23028 23029 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 23030 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 23031 else 23032 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 23033 23034 if (tp->t_state == TCPS_SYN_SENT) 23035 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 23036 23037 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 23038 /* TODO: IPv6 IP6TOS_ECT bit on */ 23039 error = ip6_output(m, 23040 inp->in6p_outputopts, 23041 &inp->inp_route6, 23042 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 23043 NULL, NULL, inp); 23044 23045 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 23046 mtu = inp->inp_route6.ro_nh->nh_mtu; 23047 } 23048 #endif /* INET6 */ 23049 #if defined(INET) && defined(INET6) 23050 else 23051 #endif 23052 #ifdef INET 23053 { 23054 ip->ip_len = htons(m->m_pkthdr.len); 23055 #ifdef INET6 23056 if (inp->inp_vflag & INP_IPV6PROTO) 23057 ip->ip_ttl = in6_selecthlim(inp, NULL); 23058 #endif /* INET6 */ 23059 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 23060 /* 23061 * If we do path MTU discovery, then we set DF on every 23062 * packet. This might not be the best thing to do according 23063 * to RFC3390 Section 2. However the tcp hostcache migitates 23064 * the problem so it affects only the first tcp connection 23065 * with a host. 23066 * 23067 * NB: Don't set DF on small MTU/MSS to have a safe 23068 * fallback. 23069 */ 23070 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 23071 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 23072 if (tp->t_port == 0 || len < V_tcp_minmss) { 23073 ip->ip_off |= htons(IP_DF); 23074 } 23075 } else { 23076 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 23077 } 23078 23079 if (tp->t_state == TCPS_SYN_SENT) 23080 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 23081 23082 TCP_PROBE5(send, NULL, tp, ip, tp, th); 23083 23084 error = ip_output(m, 23085 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 23086 inp->inp_options, 23087 #else 23088 NULL, 23089 #endif 23090 &inp->inp_route, 23091 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 23092 inp); 23093 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 23094 mtu = inp->inp_route.ro_nh->nh_mtu; 23095 } 23096 #endif /* INET */ 23097 if (lgb) { 23098 lgb->tlb_errno = error; 23099 lgb = NULL; 23100 } 23101 23102 out: 23103 /* 23104 * In transmit state, time the transmission and arrange for the 23105 * retransmit. In persist state, just set snd_max. 23106 */ 23107 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 23108 rack_to_usec_ts(&tv), 23109 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 23110 if (error == 0) { 23111 if (add_flag & RACK_IS_PCM) { 23112 /* We just launched a PCM */ 23113 /* rrs here log */ 23114 rack->pcm_in_progress = 1; 23115 rack->pcm_needed = 0; 23116 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 23117 } 23118 if (rsm == NULL) { 23119 if (rack->lt_bw_up == 0) { 23120 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 23121 rack->r_ctl.lt_seq = tp->snd_una; 23122 rack->lt_bw_up = 1; 23123 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 23124 /* 23125 * Need to record what we have since we are 23126 * approaching seq wrap. 23127 */ 23128 uint64_t tmark; 23129 23130 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 23131 rack->r_ctl.lt_seq = tp->snd_una; 23132 tmark = tcp_get_u64_usecs(&tv); 23133 if (tmark > rack->r_ctl.lt_timemark) { 23134 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 23135 rack->r_ctl.lt_timemark = tmark; 23136 } 23137 } 23138 } 23139 rack->forced_ack = 0; /* If we send something zap the FA flag */ 23140 counter_u64_add(rack_total_bytes, len); 23141 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 23142 if (rsm && doing_tlp) { 23143 rack->rc_last_sent_tlp_past_cumack = 0; 23144 rack->rc_last_sent_tlp_seq_valid = 1; 23145 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 23146 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 23147 } 23148 if (rack->rc_hw_nobuf) { 23149 rack->rc_hw_nobuf = 0; 23150 rack->r_ctl.rc_agg_delayed = 0; 23151 rack->r_early = 0; 23152 rack->r_late = 0; 23153 rack->r_ctl.rc_agg_early = 0; 23154 } 23155 if (rsm && (doing_tlp == 0)) { 23156 /* Set we retransmitted */ 23157 rack->rc_gp_saw_rec = 1; 23158 } else { 23159 if (cwnd_to_use > tp->snd_ssthresh) { 23160 /* Set we sent in CA */ 23161 rack->rc_gp_saw_ca = 1; 23162 } else { 23163 /* Set we sent in SS */ 23164 rack->rc_gp_saw_ss = 1; 23165 } 23166 } 23167 if (TCPS_HAVEESTABLISHED(tp->t_state) && 23168 (tp->t_flags & TF_SACK_PERMIT) && 23169 tp->rcv_numsacks > 0) 23170 tcp_clean_dsack_blocks(tp); 23171 tot_len_this_send += len; 23172 if (len == 0) { 23173 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 23174 } else { 23175 int idx; 23176 23177 idx = (len / segsiz) + 3; 23178 if (idx >= TCP_MSS_ACCT_ATIMER) 23179 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 23180 else 23181 counter_u64_add(rack_out_size[idx], 1); 23182 } 23183 } 23184 if ((rack->rack_no_prr == 0) && 23185 sub_from_prr && 23186 (error == 0)) { 23187 if (rack->r_ctl.rc_prr_sndcnt >= len) 23188 rack->r_ctl.rc_prr_sndcnt -= len; 23189 else 23190 rack->r_ctl.rc_prr_sndcnt = 0; 23191 } 23192 sub_from_prr = 0; 23193 if (doing_tlp) { 23194 /* Make sure the TLP is added */ 23195 add_flag |= RACK_TLP; 23196 } else if (rsm) { 23197 /* If its a resend without TLP then it must not have the flag */ 23198 rsm->r_flags &= ~RACK_TLP; 23199 } 23200 23201 23202 if ((error == 0) && 23203 (len > 0) && 23204 (tp->snd_una == tp->snd_max)) 23205 rack->r_ctl.rc_tlp_rxt_last_time = cts; 23206 23207 { 23208 /* 23209 * This block is not associated with the above error == 0 test. 23210 * It is used to advance snd_max if we have a new transmit. 23211 */ 23212 tcp_seq startseq = tp->snd_max; 23213 23214 23215 if (rsm && (doing_tlp == 0)) 23216 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 23217 if (error) 23218 /* We don't log or do anything with errors */ 23219 goto nomore; 23220 if (doing_tlp == 0) { 23221 if (rsm == NULL) { 23222 /* 23223 * Not a retransmission of some 23224 * sort, new data is going out so 23225 * clear our TLP count and flag. 23226 */ 23227 rack->rc_tlp_in_progress = 0; 23228 rack->r_ctl.rc_tlp_cnt_out = 0; 23229 } 23230 } else { 23231 /* 23232 * We have just sent a TLP, mark that it is true 23233 * and make sure our in progress is set so we 23234 * continue to check the count. 23235 */ 23236 rack->rc_tlp_in_progress = 1; 23237 rack->r_ctl.rc_tlp_cnt_out++; 23238 } 23239 /* 23240 * If we are retransmitting we are done, snd_max 23241 * does not get updated. 23242 */ 23243 if (sack_rxmit) 23244 goto nomore; 23245 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 23246 /* 23247 * Update the time we just added data since 23248 * nothing was outstanding. 23249 */ 23250 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 23251 tp->t_acktime = ticks; 23252 } 23253 /* 23254 * Now for special SYN/FIN handling. 23255 */ 23256 if (flags & (TH_SYN | TH_FIN)) { 23257 if ((flags & TH_SYN) && 23258 ((tp->t_flags & TF_SENTSYN) == 0)) { 23259 tp->snd_max++; 23260 tp->t_flags |= TF_SENTSYN; 23261 } 23262 if ((flags & TH_FIN) && 23263 ((tp->t_flags & TF_SENTFIN) == 0)) { 23264 tp->snd_max++; 23265 tp->t_flags |= TF_SENTFIN; 23266 } 23267 } 23268 tp->snd_max += len; 23269 if (rack->rc_new_rnd_needed) { 23270 rack_new_round_starts(tp, rack, tp->snd_max); 23271 } 23272 /* 23273 * Time this transmission if not a retransmission and 23274 * not currently timing anything. 23275 * This is only relevant in case of switching back to 23276 * the base stack. 23277 */ 23278 if (tp->t_rtttime == 0) { 23279 tp->t_rtttime = ticks; 23280 tp->t_rtseq = startseq; 23281 KMOD_TCPSTAT_INC(tcps_segstimed); 23282 } 23283 if (len && 23284 ((tp->t_flags & TF_GPUTINPROG) == 0)) 23285 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 23286 /* 23287 * If we are doing FO we need to update the mbuf position and subtract 23288 * this happens when the peer sends us duplicate information and 23289 * we thus want to send a DSACK. 23290 * 23291 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 23292 * turned off? If not then we are going to echo multiple DSACK blocks 23293 * out (with the TSO), which we should not be doing. 23294 */ 23295 if (rack->r_fast_output && len) { 23296 if (rack->r_ctl.fsb.left_to_send > len) 23297 rack->r_ctl.fsb.left_to_send -= len; 23298 else 23299 rack->r_ctl.fsb.left_to_send = 0; 23300 if (rack->r_ctl.fsb.left_to_send < segsiz) 23301 rack->r_fast_output = 0; 23302 if (rack->r_fast_output) { 23303 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 23304 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 23305 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 23306 } 23307 } 23308 if (rack_pcm_blast == 0) { 23309 if ((orig_len > len) && 23310 (add_flag & RACK_IS_PCM) && 23311 (len < pace_max_seg) && 23312 ((pace_max_seg - len) > segsiz)) { 23313 /* 23314 * We are doing a PCM measurement and we did 23315 * not get enough data in the TSO to meet the 23316 * burst requirement. 23317 */ 23318 uint32_t n_len; 23319 23320 n_len = (orig_len - len); 23321 orig_len -= len; 23322 pace_max_seg -= len; 23323 len = n_len; 23324 sb_offset = tp->snd_max - tp->snd_una; 23325 /* Re-lock for the next spin */ 23326 SOCKBUF_LOCK(sb); 23327 goto send; 23328 } 23329 } else { 23330 if ((orig_len > len) && 23331 (add_flag & RACK_IS_PCM) && 23332 ((orig_len - len) > segsiz)) { 23333 /* 23334 * We are doing a PCM measurement and we did 23335 * not get enough data in the TSO to meet the 23336 * burst requirement. 23337 */ 23338 uint32_t n_len; 23339 23340 n_len = (orig_len - len); 23341 orig_len -= len; 23342 len = n_len; 23343 sb_offset = tp->snd_max - tp->snd_una; 23344 /* Re-lock for the next spin */ 23345 SOCKBUF_LOCK(sb); 23346 goto send; 23347 } 23348 } 23349 } 23350 nomore: 23351 if (error) { 23352 rack->r_ctl.rc_agg_delayed = 0; 23353 rack->r_early = 0; 23354 rack->r_late = 0; 23355 rack->r_ctl.rc_agg_early = 0; 23356 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 23357 /* 23358 * Failures do not advance the seq counter above. For the 23359 * case of ENOBUFS we will fall out and retry in 1ms with 23360 * the hpts. Everything else will just have to retransmit 23361 * with the timer. 23362 * 23363 * In any case, we do not want to loop around for another 23364 * send without a good reason. 23365 */ 23366 sendalot = 0; 23367 switch (error) { 23368 case EPERM: 23369 case EACCES: 23370 tp->t_softerror = error; 23371 #ifdef TCP_ACCOUNTING 23372 crtsc = get_cyclecount(); 23373 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23374 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 23375 } 23376 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23377 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 23378 } 23379 sched_unpin(); 23380 #endif 23381 return (error); 23382 case ENOBUFS: 23383 /* 23384 * Pace us right away to retry in a some 23385 * time 23386 */ 23387 if (rack->r_ctl.crte != NULL) { 23388 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 23389 if (tcp_bblogging_on(rack->rc_tp)) 23390 rack_log_queue_level(tp, rack, len, &tv, cts); 23391 } else 23392 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 23393 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 23394 if (rack->rc_enobuf < 0x7f) 23395 rack->rc_enobuf++; 23396 if (slot < (10 * HPTS_USEC_IN_MSEC)) 23397 slot = 10 * HPTS_USEC_IN_MSEC; 23398 if (rack->r_ctl.crte != NULL) { 23399 counter_u64_add(rack_saw_enobuf_hw, 1); 23400 tcp_rl_log_enobuf(rack->r_ctl.crte); 23401 } 23402 counter_u64_add(rack_saw_enobuf, 1); 23403 goto enobufs; 23404 case EMSGSIZE: 23405 /* 23406 * For some reason the interface we used initially 23407 * to send segments changed to another or lowered 23408 * its MTU. If TSO was active we either got an 23409 * interface without TSO capabilits or TSO was 23410 * turned off. If we obtained mtu from ip_output() 23411 * then update it and try again. 23412 */ 23413 if (tso) 23414 tp->t_flags &= ~TF_TSO; 23415 if (mtu != 0) { 23416 int saved_mtu; 23417 23418 saved_mtu = tp->t_maxseg; 23419 tcp_mss_update(tp, -1, mtu, NULL, NULL); 23420 if (saved_mtu > tp->t_maxseg) { 23421 goto again; 23422 } 23423 } 23424 slot = 10 * HPTS_USEC_IN_MSEC; 23425 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 23426 #ifdef TCP_ACCOUNTING 23427 crtsc = get_cyclecount(); 23428 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23429 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 23430 } 23431 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23432 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 23433 } 23434 sched_unpin(); 23435 #endif 23436 return (error); 23437 case ENETUNREACH: 23438 counter_u64_add(rack_saw_enetunreach, 1); 23439 case EHOSTDOWN: 23440 case EHOSTUNREACH: 23441 case ENETDOWN: 23442 if (TCPS_HAVERCVDSYN(tp->t_state)) { 23443 tp->t_softerror = error; 23444 } 23445 /* FALLTHROUGH */ 23446 default: 23447 slot = 10 * HPTS_USEC_IN_MSEC; 23448 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 23449 #ifdef TCP_ACCOUNTING 23450 crtsc = get_cyclecount(); 23451 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23452 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 23453 } 23454 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23455 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 23456 } 23457 sched_unpin(); 23458 #endif 23459 return (error); 23460 } 23461 } else { 23462 rack->rc_enobuf = 0; 23463 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 23464 rack->r_ctl.retran_during_recovery += len; 23465 } 23466 KMOD_TCPSTAT_INC(tcps_sndtotal); 23467 23468 /* 23469 * Data sent (as far as we can tell). If this advertises a larger 23470 * window than any other segment, then remember the size of the 23471 * advertised window. Any pending ACK has now been sent. 23472 */ 23473 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 23474 tp->rcv_adv = tp->rcv_nxt + recwin; 23475 23476 tp->last_ack_sent = tp->rcv_nxt; 23477 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 23478 enobufs: 23479 if (sendalot) { 23480 /* Do we need to turn off sendalot? */ 23481 if (pace_max_seg && 23482 (tot_len_this_send >= pace_max_seg)) { 23483 /* We hit our max. */ 23484 sendalot = 0; 23485 } 23486 } 23487 if ((error == 0) && (flags & TH_FIN)) 23488 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 23489 if (flags & TH_RST) { 23490 /* 23491 * We don't send again after sending a RST. 23492 */ 23493 slot = 0; 23494 sendalot = 0; 23495 if (error == 0) 23496 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 23497 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 23498 /* 23499 * Get our pacing rate, if an error 23500 * occurred in sending (ENOBUF) we would 23501 * hit the else if with slot preset. Other 23502 * errors return. 23503 */ 23504 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 23505 } 23506 /* We have sent clear the flag */ 23507 rack->r_ent_rec_ns = 0; 23508 if (rack->r_must_retran) { 23509 if (rsm) { 23510 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 23511 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 23512 /* 23513 * We have retransmitted all. 23514 */ 23515 rack->r_must_retran = 0; 23516 rack->r_ctl.rc_out_at_rto = 0; 23517 } 23518 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 23519 /* 23520 * Sending new data will also kill 23521 * the loop. 23522 */ 23523 rack->r_must_retran = 0; 23524 rack->r_ctl.rc_out_at_rto = 0; 23525 } 23526 } 23527 rack->r_ctl.fsb.recwin = recwin; 23528 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 23529 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 23530 /* 23531 * We hit an RTO and now have past snd_max at the RTO 23532 * clear all the WAS flags. 23533 */ 23534 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 23535 } 23536 if (slot) { 23537 /* set the rack tcb into the slot N */ 23538 if ((error == 0) && 23539 rack_use_rfo && 23540 ((flags & (TH_SYN|TH_FIN)) == 0) && 23541 (rsm == NULL) && 23542 (ipoptlen == 0) && 23543 (tp->rcv_numsacks == 0) && 23544 (rack->rc_policer_detected == 0) && 23545 rack->r_fsb_inited && 23546 TCPS_HAVEESTABLISHED(tp->t_state) && 23547 ((IN_RECOVERY(tp->t_flags)) == 0) && 23548 (rack->r_must_retran == 0) && 23549 ((tp->t_flags & TF_NEEDFIN) == 0) && 23550 (len > 0) && (orig_len > 0) && 23551 (orig_len > len) && 23552 ((orig_len - len) >= segsiz) && 23553 ((optlen == 0) || 23554 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 23555 /* We can send at least one more MSS using our fsb */ 23556 rack_setup_fast_output(tp, rack, sb, len, orig_len, 23557 segsiz, pace_max_seg, hw_tls, flags); 23558 } else 23559 rack->r_fast_output = 0; 23560 rack_log_fsb(rack, tp, so, flags, 23561 ipoptlen, orig_len, len, error, 23562 (rsm == NULL), optlen, __LINE__, 2); 23563 } else if (sendalot) { 23564 int ret; 23565 23566 sack_rxmit = 0; 23567 if ((error == 0) && 23568 rack_use_rfo && 23569 ((flags & (TH_SYN|TH_FIN)) == 0) && 23570 (rsm == NULL) && 23571 (ipoptlen == 0) && 23572 (tp->rcv_numsacks == 0) && 23573 (rack->r_must_retran == 0) && 23574 rack->r_fsb_inited && 23575 TCPS_HAVEESTABLISHED(tp->t_state) && 23576 ((IN_RECOVERY(tp->t_flags)) == 0) && 23577 ((tp->t_flags & TF_NEEDFIN) == 0) && 23578 (len > 0) && (orig_len > 0) && 23579 (orig_len > len) && 23580 ((orig_len - len) >= segsiz) && 23581 ((optlen == 0) || 23582 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 23583 /* we can use fast_output for more */ 23584 rack_setup_fast_output(tp, rack, sb, len, orig_len, 23585 segsiz, pace_max_seg, hw_tls, flags); 23586 if (rack->r_fast_output) { 23587 error = 0; 23588 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 23589 if (ret >= 0) 23590 return (ret); 23591 else if (error) 23592 goto nomore; 23593 23594 } 23595 } 23596 goto again; 23597 } 23598 skip_all_send: 23599 /* Assure when we leave that snd_nxt will point to top */ 23600 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 23601 tp->snd_nxt = tp->snd_max; 23602 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 23603 #ifdef TCP_ACCOUNTING 23604 crtsc = get_cyclecount() - ts_val; 23605 if (tot_len_this_send) { 23606 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23607 tp->tcp_cnt_counters[SND_OUT_DATA]++; 23608 } 23609 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23610 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 23611 } 23612 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23613 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 23614 } 23615 } else { 23616 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23617 tp->tcp_cnt_counters[SND_OUT_ACK]++; 23618 } 23619 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 23620 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 23621 } 23622 } 23623 sched_unpin(); 23624 #endif 23625 if (error == ENOBUFS) 23626 error = 0; 23627 return (error); 23628 } 23629 23630 static void 23631 rack_update_seg(struct tcp_rack *rack) 23632 { 23633 uint32_t orig_val; 23634 23635 orig_val = rack->r_ctl.rc_pace_max_segs; 23636 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 23637 if (orig_val != rack->r_ctl.rc_pace_max_segs) 23638 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 23639 } 23640 23641 static void 23642 rack_mtu_change(struct tcpcb *tp) 23643 { 23644 /* 23645 * The MSS may have changed 23646 */ 23647 struct tcp_rack *rack; 23648 struct rack_sendmap *rsm; 23649 23650 rack = (struct tcp_rack *)tp->t_fb_ptr; 23651 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 23652 /* 23653 * The MTU has changed we need to resend everything 23654 * since all we have sent is lost. We first fix 23655 * up the mtu though. 23656 */ 23657 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23658 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 23659 rack_remxt_tmr(tp); 23660 rack->r_fast_output = 0; 23661 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 23662 rack->r_ctl.rc_sacked); 23663 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 23664 rack->r_must_retran = 1; 23665 /* Mark all inflight to needing to be rxt'd */ 23666 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 23667 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 23668 } 23669 } 23670 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 23671 /* We don't use snd_nxt to retransmit */ 23672 tp->snd_nxt = tp->snd_max; 23673 } 23674 23675 static int 23676 rack_set_dgp(struct tcp_rack *rack) 23677 { 23678 if (rack->dgp_on == 1) 23679 return(0); 23680 if ((rack->use_fixed_rate == 1) && 23681 (rack->rc_always_pace == 1)) { 23682 /* 23683 * We are already pacing another 23684 * way. 23685 */ 23686 return (EBUSY); 23687 } 23688 if (rack->rc_always_pace == 1) { 23689 rack_remove_pacing(rack); 23690 } 23691 if (tcp_incr_dgp_pacing_cnt() == 0) 23692 return (ENOSPC); 23693 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 23694 rack->rc_fillcw_apply_discount = 0; 23695 rack->dgp_on = 1; 23696 rack->rc_always_pace = 1; 23697 rack->rc_pace_dnd = 1; 23698 rack->use_fixed_rate = 0; 23699 if (rack->gp_ready) 23700 rack_set_cc_pacing(rack); 23701 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23702 rack->rack_attempt_hdwr_pace = 0; 23703 /* rxt settings */ 23704 rack->full_size_rxt = 1; 23705 rack->shape_rxt_to_pacing_min = 0; 23706 /* cmpack=1 */ 23707 rack->r_use_cmp_ack = 1; 23708 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 23709 rack->r_use_cmp_ack) 23710 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 23711 /* scwnd=1 */ 23712 rack->rack_enable_scwnd = 1; 23713 /* dynamic=100 */ 23714 rack->rc_gp_dyn_mul = 1; 23715 /* gp_inc_ca */ 23716 rack->r_ctl.rack_per_of_gp_ca = 100; 23717 /* rrr_conf=3 */ 23718 rack->r_rr_config = 3; 23719 /* npush=2 */ 23720 rack->r_ctl.rc_no_push_at_mrtt = 2; 23721 /* fillcw=1 */ 23722 rack->rc_pace_to_cwnd = 1; 23723 rack->rc_pace_fill_if_rttin_range = 0; 23724 rack->rtt_limit_mul = 0; 23725 /* noprr=1 */ 23726 rack->rack_no_prr = 1; 23727 /* lscwnd=1 */ 23728 rack->r_limit_scw = 1; 23729 /* gp_inc_rec */ 23730 rack->r_ctl.rack_per_of_gp_rec = 90; 23731 return (0); 23732 } 23733 23734 static int 23735 rack_set_profile(struct tcp_rack *rack, int prof) 23736 { 23737 int err = EINVAL; 23738 if (prof == 1) { 23739 /* 23740 * Profile 1 is "standard" DGP. It ignores 23741 * client buffer level. 23742 */ 23743 err = rack_set_dgp(rack); 23744 if (err) 23745 return (err); 23746 } else if (prof == 6) { 23747 err = rack_set_dgp(rack); 23748 if (err) 23749 return (err); 23750 /* 23751 * Profile 6 tweaks DGP so that it will apply to 23752 * fill-cw the same settings that profile5 does 23753 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 23754 */ 23755 rack->rc_fillcw_apply_discount = 1; 23756 } else if (prof == 0) { 23757 /* This changes things back to the default settings */ 23758 if (rack->rc_always_pace == 1) { 23759 rack_remove_pacing(rack); 23760 } else { 23761 /* Make sure any stray flags are off */ 23762 rack->dgp_on = 0; 23763 rack->rc_hybrid_mode = 0; 23764 rack->use_fixed_rate = 0; 23765 } 23766 err = 0; 23767 if (rack_fill_cw_state) 23768 rack->rc_pace_to_cwnd = 1; 23769 else 23770 rack->rc_pace_to_cwnd = 0; 23771 23772 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 23773 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23774 rack->rc_always_pace = 1; 23775 if (rack->rack_hibeta) 23776 rack_set_cc_pacing(rack); 23777 } else 23778 rack->rc_always_pace = 0; 23779 if (rack_dsack_std_based & 0x1) { 23780 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 23781 rack->rc_rack_tmr_std_based = 1; 23782 } 23783 if (rack_dsack_std_based & 0x2) { 23784 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 23785 rack->rc_rack_use_dsack = 1; 23786 } 23787 if (rack_use_cmp_acks) 23788 rack->r_use_cmp_ack = 1; 23789 else 23790 rack->r_use_cmp_ack = 0; 23791 if (rack_disable_prr) 23792 rack->rack_no_prr = 1; 23793 else 23794 rack->rack_no_prr = 0; 23795 if (rack_gp_no_rec_chg) 23796 rack->rc_gp_no_rec_chg = 1; 23797 else 23798 rack->rc_gp_no_rec_chg = 0; 23799 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 23800 rack->r_mbuf_queue = 1; 23801 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 23802 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 23803 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23804 } else { 23805 rack->r_mbuf_queue = 0; 23806 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23807 } 23808 if (rack_enable_shared_cwnd) 23809 rack->rack_enable_scwnd = 1; 23810 else 23811 rack->rack_enable_scwnd = 0; 23812 if (rack_do_dyn_mul) { 23813 /* When dynamic adjustment is on CA needs to start at 100% */ 23814 rack->rc_gp_dyn_mul = 1; 23815 if (rack_do_dyn_mul >= 100) 23816 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 23817 } else { 23818 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 23819 rack->rc_gp_dyn_mul = 0; 23820 } 23821 rack->r_rr_config = 0; 23822 rack->r_ctl.rc_no_push_at_mrtt = 0; 23823 rack->rc_pace_fill_if_rttin_range = 0; 23824 rack->rtt_limit_mul = 0; 23825 23826 if (rack_enable_hw_pacing) 23827 rack->rack_hdw_pace_ena = 1; 23828 else 23829 rack->rack_hdw_pace_ena = 0; 23830 if (rack_disable_prr) 23831 rack->rack_no_prr = 1; 23832 else 23833 rack->rack_no_prr = 0; 23834 if (rack_limits_scwnd) 23835 rack->r_limit_scw = 1; 23836 else 23837 rack->r_limit_scw = 0; 23838 rack_init_retransmit_value(rack, rack_rxt_controls); 23839 err = 0; 23840 } 23841 return (err); 23842 } 23843 23844 static int 23845 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 23846 { 23847 struct deferred_opt_list *dol; 23848 23849 dol = malloc(sizeof(struct deferred_opt_list), 23850 M_TCPDO, M_NOWAIT|M_ZERO); 23851 if (dol == NULL) { 23852 /* 23853 * No space yikes -- fail out.. 23854 */ 23855 return (0); 23856 } 23857 dol->optname = sopt_name; 23858 dol->optval = loptval; 23859 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 23860 return (1); 23861 } 23862 23863 static int 23864 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 23865 { 23866 #ifdef TCP_REQUEST_TRK 23867 struct tcp_sendfile_track *sft; 23868 struct timeval tv; 23869 tcp_seq seq; 23870 int err; 23871 23872 microuptime(&tv); 23873 23874 /* Make sure no fixed rate is on */ 23875 rack->use_fixed_rate = 0; 23876 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 23877 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 23878 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 23879 /* Now allocate or find our entry that will have these settings */ 23880 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 23881 if (sft == NULL) { 23882 rack->rc_tp->tcp_hybrid_error++; 23883 /* no space, where would it have gone? */ 23884 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 23885 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 23886 return (ENOSPC); 23887 } 23888 /* mask our internal flags */ 23889 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 23890 /* The seq will be snd_una + everything in the buffer */ 23891 seq = sft->start_seq; 23892 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 23893 /* Disabling hybrid pacing */ 23894 if (rack->rc_hybrid_mode) { 23895 rack_set_profile(rack, 0); 23896 rack->rc_tp->tcp_hybrid_stop++; 23897 } 23898 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 23899 return (0); 23900 } 23901 if (rack->dgp_on == 0) { 23902 /* 23903 * If we have not yet turned DGP on, do so 23904 * now setting pure DGP mode, no buffer level 23905 * response. 23906 */ 23907 if ((err = rack_set_profile(rack, 1)) != 0){ 23908 /* Failed to turn pacing on */ 23909 rack->rc_tp->tcp_hybrid_error++; 23910 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 23911 return (err); 23912 } 23913 } 23914 /* 23915 * Now we must switch to hybrid mode as well which also 23916 * means moving to regular pacing. 23917 */ 23918 if (rack->rc_hybrid_mode == 0) { 23919 /* First time */ 23920 if (tcp_can_enable_pacing()) { 23921 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23922 rack->rc_hybrid_mode = 1; 23923 } else { 23924 return (ENOSPC); 23925 } 23926 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 23927 /* 23928 * This should be true. 23929 */ 23930 tcp_dec_dgp_pacing_cnt(); 23931 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23932 } 23933 } 23934 /* Now set in our flags */ 23935 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 23936 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 23937 sft->cspr = hybrid->cspr; 23938 else 23939 sft->cspr = 0; 23940 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 23941 sft->hint_maxseg = hybrid->hint_maxseg; 23942 else 23943 sft->hint_maxseg = 0; 23944 rack->rc_tp->tcp_hybrid_start++; 23945 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 23946 return (0); 23947 #else 23948 return (ENOTSUP); 23949 #endif 23950 } 23951 23952 static int 23953 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 23954 { 23955 /* 23956 * Gather rack specific information. 23957 */ 23958 struct tcp_rack *rack; 23959 23960 rack = (struct tcp_rack *)tp->t_fb_ptr; 23961 /* We pulled a SSI info log out what was there */ 23962 policer_detection_log(rack, rack->rc_highly_buffered, 0, 0, 0, 20); 23963 if (rack->policer_detect_on) { 23964 si->policer_detection_enabled = 1; 23965 if (rack->rc_policer_detected) { 23966 si->policer_detected = 1; 23967 si->policer_bucket_size = rack->r_ctl.policer_bucket_size; 23968 si->policer_last_bw = rack->r_ctl.policer_bw; 23969 } else { 23970 si->policer_detected = 0; 23971 si->policer_bucket_size = 0; 23972 si->policer_last_bw = 0; 23973 } 23974 si->current_round = rack->r_ctl.current_round; 23975 si->highly_buffered = rack->rc_highly_buffered; 23976 } 23977 si->bytes_transmitted = tp->t_sndbytes; 23978 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 23979 return (0); 23980 } 23981 23982 static int 23983 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 23984 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 23985 23986 { 23987 struct epoch_tracker et; 23988 struct sockopt sopt; 23989 struct cc_newreno_opts opt; 23990 uint64_t val; 23991 int error = 0; 23992 uint16_t ca, ss; 23993 23994 switch (sopt_name) { 23995 case TCP_RACK_SET_RXT_OPTIONS: 23996 if ((optval >= 0) && (optval <= 2)) { 23997 rack_init_retransmit_value(rack, optval); 23998 } else { 23999 /* 24000 * You must send in 0, 1 or 2 all else is 24001 * invalid. 24002 */ 24003 error = EINVAL; 24004 } 24005 break; 24006 case TCP_RACK_DSACK_OPT: 24007 RACK_OPTS_INC(tcp_rack_dsack_opt); 24008 if (optval & 0x1) { 24009 rack->rc_rack_tmr_std_based = 1; 24010 } else { 24011 rack->rc_rack_tmr_std_based = 0; 24012 } 24013 if (optval & 0x2) { 24014 rack->rc_rack_use_dsack = 1; 24015 } else { 24016 rack->rc_rack_use_dsack = 0; 24017 } 24018 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 24019 break; 24020 case TCP_RACK_PACING_DIVISOR: 24021 RACK_OPTS_INC(tcp_rack_pacing_divisor); 24022 if (optval == 0) { 24023 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 24024 } else { 24025 if (optval < RL_MIN_DIVISOR) 24026 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 24027 else 24028 rack->r_ctl.pace_len_divisor = optval; 24029 } 24030 break; 24031 case TCP_RACK_HI_BETA: 24032 RACK_OPTS_INC(tcp_rack_hi_beta); 24033 if (optval > 0) { 24034 rack->rack_hibeta = 1; 24035 if ((optval >= 50) && 24036 (optval <= 100)) { 24037 /* 24038 * User wants to set a custom beta. 24039 */ 24040 rack->r_ctl.saved_hibeta = optval; 24041 if (rack->rc_pacing_cc_set) 24042 rack_undo_cc_pacing(rack); 24043 rack->r_ctl.rc_saved_beta.beta = optval; 24044 } 24045 if (rack->rc_pacing_cc_set == 0) 24046 rack_set_cc_pacing(rack); 24047 } else { 24048 rack->rack_hibeta = 0; 24049 if (rack->rc_pacing_cc_set) 24050 rack_undo_cc_pacing(rack); 24051 } 24052 break; 24053 case TCP_RACK_PACING_BETA: 24054 error = EINVAL; 24055 break; 24056 case TCP_RACK_TIMER_SLOP: 24057 RACK_OPTS_INC(tcp_rack_timer_slop); 24058 rack->r_ctl.timer_slop = optval; 24059 if (rack->rc_tp->t_srtt) { 24060 /* 24061 * If we have an SRTT lets update t_rxtcur 24062 * to have the new slop. 24063 */ 24064 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 24065 rack_rto_min, rack_rto_max, 24066 rack->r_ctl.timer_slop); 24067 } 24068 break; 24069 case TCP_RACK_PACING_BETA_ECN: 24070 RACK_OPTS_INC(tcp_rack_beta_ecn); 24071 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 24072 /* This only works for newreno. */ 24073 error = EINVAL; 24074 break; 24075 } 24076 if (rack->rc_pacing_cc_set) { 24077 /* 24078 * Set them into the real CC module 24079 * whats in the rack pcb is the old values 24080 * to be used on restoral/ 24081 */ 24082 sopt.sopt_dir = SOPT_SET; 24083 opt.name = CC_NEWRENO_BETA_ECN; 24084 opt.val = optval; 24085 if (CC_ALGO(tp)->ctl_output != NULL) 24086 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 24087 else 24088 error = ENOENT; 24089 } else { 24090 /* 24091 * Not pacing yet so set it into our local 24092 * rack pcb storage. 24093 */ 24094 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 24095 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 24096 } 24097 break; 24098 case TCP_DEFER_OPTIONS: 24099 RACK_OPTS_INC(tcp_defer_opt); 24100 if (optval) { 24101 if (rack->gp_ready) { 24102 /* Too late */ 24103 error = EINVAL; 24104 break; 24105 } 24106 rack->defer_options = 1; 24107 } else 24108 rack->defer_options = 0; 24109 break; 24110 case TCP_RACK_MEASURE_CNT: 24111 RACK_OPTS_INC(tcp_rack_measure_cnt); 24112 if (optval && (optval <= 0xff)) { 24113 rack->r_ctl.req_measurements = optval; 24114 } else 24115 error = EINVAL; 24116 break; 24117 case TCP_REC_ABC_VAL: 24118 RACK_OPTS_INC(tcp_rec_abc_val); 24119 if (optval > 0) 24120 rack->r_use_labc_for_rec = 1; 24121 else 24122 rack->r_use_labc_for_rec = 0; 24123 break; 24124 case TCP_RACK_ABC_VAL: 24125 RACK_OPTS_INC(tcp_rack_abc_val); 24126 if ((optval > 0) && (optval < 255)) 24127 rack->rc_labc = optval; 24128 else 24129 error = EINVAL; 24130 break; 24131 case TCP_HDWR_UP_ONLY: 24132 RACK_OPTS_INC(tcp_pacing_up_only); 24133 if (optval) 24134 rack->r_up_only = 1; 24135 else 24136 rack->r_up_only = 0; 24137 break; 24138 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24139 RACK_OPTS_INC(tcp_fillcw_rate_cap); 24140 rack->r_ctl.fillcw_cap = loptval; 24141 break; 24142 case TCP_PACING_RATE_CAP: 24143 RACK_OPTS_INC(tcp_pacing_rate_cap); 24144 if ((rack->dgp_on == 1) && 24145 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 24146 /* 24147 * If we are doing DGP we need to switch 24148 * to using the pacing limit. 24149 */ 24150 if (tcp_can_enable_pacing() == 0) { 24151 error = ENOSPC; 24152 break; 24153 } 24154 /* 24155 * Now change up the flags and counts to be correct. 24156 */ 24157 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24158 tcp_dec_dgp_pacing_cnt(); 24159 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 24160 } 24161 rack->r_ctl.bw_rate_cap = loptval; 24162 break; 24163 case TCP_HYBRID_PACING: 24164 if (hybrid == NULL) { 24165 error = EINVAL; 24166 break; 24167 } 24168 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 24169 error = EPERM; 24170 break; 24171 } 24172 error = process_hybrid_pacing(rack, hybrid); 24173 break; 24174 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24175 if (optval) 24176 rack->r_ctl.side_chan_dis_mask = optval; 24177 else 24178 rack->r_ctl.side_chan_dis_mask = 0; 24179 break; 24180 case TCP_RACK_PROFILE: 24181 RACK_OPTS_INC(tcp_profile); 24182 error = rack_set_profile(rack, optval); 24183 break; 24184 case TCP_USE_CMP_ACKS: 24185 RACK_OPTS_INC(tcp_use_cmp_acks); 24186 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 24187 /* You can't turn it off once its on! */ 24188 error = EINVAL; 24189 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 24190 rack->r_use_cmp_ack = 1; 24191 rack->r_mbuf_queue = 1; 24192 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24193 } 24194 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 24195 tp->t_flags2 |= TF2_MBUF_ACKCMP; 24196 break; 24197 case TCP_SHARED_CWND_TIME_LIMIT: 24198 RACK_OPTS_INC(tcp_lscwnd); 24199 if (optval) 24200 rack->r_limit_scw = 1; 24201 else 24202 rack->r_limit_scw = 0; 24203 break; 24204 case TCP_RACK_DGP_IN_REC: 24205 error = EINVAL; 24206 break; 24207 case TCP_POLICER_DETECT: /* URL:pol_det */ 24208 RACK_OPTS_INC(tcp_pol_detect); 24209 rack_translate_policer_detect(rack, optval); 24210 break; 24211 case TCP_POLICER_MSS: 24212 RACK_OPTS_INC(tcp_pol_mss); 24213 rack->r_ctl.policer_del_mss = (uint8_t)optval; 24214 if (optval & 0x00000100) { 24215 /* 24216 * Value is setup like so: 24217 * VVVV VVVV VVVV VVVV VVVV VVAI MMMM MMMM 24218 * Where MMMM MMMM is MSS setting 24219 * I (9th bit) is the Postive value that 24220 * says it is being set (if its 0 then the 24221 * upper bits 11 - 32 have no meaning. 24222 * This allows setting it off with 24223 * 0x000001MM. 24224 * 24225 * The 10th bit is used to turn on the 24226 * alternate median (not the expanded one). 24227 * 24228 */ 24229 rack->r_ctl.pol_bw_comp = (optval >> 10); 24230 } 24231 if (optval & 0x00000200) { 24232 rack->r_ctl.policer_alt_median = 1; 24233 } else { 24234 rack->r_ctl.policer_alt_median = 0; 24235 } 24236 break; 24237 case TCP_RACK_PACE_TO_FILL: 24238 RACK_OPTS_INC(tcp_fillcw); 24239 if (optval == 0) 24240 rack->rc_pace_to_cwnd = 0; 24241 else { 24242 rack->rc_pace_to_cwnd = 1; 24243 } 24244 if ((optval >= rack_gp_rtt_maxmul) && 24245 rack_gp_rtt_maxmul && 24246 (optval < 0xf)) { 24247 rack->rc_pace_fill_if_rttin_range = 1; 24248 rack->rtt_limit_mul = optval; 24249 } else { 24250 rack->rc_pace_fill_if_rttin_range = 0; 24251 rack->rtt_limit_mul = 0; 24252 } 24253 break; 24254 case TCP_RACK_NO_PUSH_AT_MAX: 24255 RACK_OPTS_INC(tcp_npush); 24256 if (optval == 0) 24257 rack->r_ctl.rc_no_push_at_mrtt = 0; 24258 else if (optval < 0xff) 24259 rack->r_ctl.rc_no_push_at_mrtt = optval; 24260 else 24261 error = EINVAL; 24262 break; 24263 case TCP_SHARED_CWND_ENABLE: 24264 RACK_OPTS_INC(tcp_rack_scwnd); 24265 if (optval == 0) 24266 rack->rack_enable_scwnd = 0; 24267 else 24268 rack->rack_enable_scwnd = 1; 24269 break; 24270 case TCP_RACK_MBUF_QUEUE: 24271 /* Now do we use the LRO mbuf-queue feature */ 24272 RACK_OPTS_INC(tcp_rack_mbufq); 24273 if (optval || rack->r_use_cmp_ack) 24274 rack->r_mbuf_queue = 1; 24275 else 24276 rack->r_mbuf_queue = 0; 24277 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 24278 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24279 else 24280 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24281 break; 24282 case TCP_RACK_NONRXT_CFG_RATE: 24283 RACK_OPTS_INC(tcp_rack_cfg_rate); 24284 if (optval == 0) 24285 rack->rack_rec_nonrxt_use_cr = 0; 24286 else 24287 rack->rack_rec_nonrxt_use_cr = 1; 24288 break; 24289 case TCP_NO_PRR: 24290 RACK_OPTS_INC(tcp_rack_noprr); 24291 if (optval == 0) 24292 rack->rack_no_prr = 0; 24293 else if (optval == 1) 24294 rack->rack_no_prr = 1; 24295 else if (optval == 2) 24296 rack->no_prr_addback = 1; 24297 else 24298 error = EINVAL; 24299 break; 24300 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24301 if (optval > 0) 24302 rack->cspr_is_fcc = 1; 24303 else 24304 rack->cspr_is_fcc = 0; 24305 break; 24306 case TCP_TIMELY_DYN_ADJ: 24307 RACK_OPTS_INC(tcp_timely_dyn); 24308 if (optval == 0) 24309 rack->rc_gp_dyn_mul = 0; 24310 else { 24311 rack->rc_gp_dyn_mul = 1; 24312 if (optval >= 100) { 24313 /* 24314 * If the user sets something 100 or more 24315 * its the gp_ca value. 24316 */ 24317 rack->r_ctl.rack_per_of_gp_ca = optval; 24318 } 24319 } 24320 break; 24321 case TCP_RACK_DO_DETECTION: 24322 error = EINVAL; 24323 break; 24324 case TCP_RACK_TLP_USE: 24325 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 24326 error = EINVAL; 24327 break; 24328 } 24329 RACK_OPTS_INC(tcp_tlp_use); 24330 rack->rack_tlp_threshold_use = optval; 24331 break; 24332 case TCP_RACK_TLP_REDUCE: 24333 /* RACK TLP cwnd reduction (bool) */ 24334 RACK_OPTS_INC(tcp_rack_tlp_reduce); 24335 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 24336 break; 24337 /* Pacing related ones */ 24338 case TCP_RACK_PACE_ALWAYS: 24339 /* 24340 * zero is old rack method, 1 is new 24341 * method using a pacing rate. 24342 */ 24343 RACK_OPTS_INC(tcp_rack_pace_always); 24344 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24345 error = EPERM; 24346 break; 24347 } 24348 if (optval > 0) { 24349 if (rack->rc_always_pace) { 24350 error = EALREADY; 24351 break; 24352 } else if (tcp_can_enable_pacing()) { 24353 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24354 rack->rc_always_pace = 1; 24355 if (rack->rack_hibeta) 24356 rack_set_cc_pacing(rack); 24357 } 24358 else { 24359 error = ENOSPC; 24360 break; 24361 } 24362 } else { 24363 if (rack->rc_always_pace == 1) { 24364 rack_remove_pacing(rack); 24365 } 24366 } 24367 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 24368 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24369 else 24370 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24371 /* A rate may be set irate or other, if so set seg size */ 24372 rack_update_seg(rack); 24373 break; 24374 case TCP_BBR_RACK_INIT_RATE: 24375 RACK_OPTS_INC(tcp_initial_rate); 24376 val = optval; 24377 /* Change from kbits per second to bytes per second */ 24378 val *= 1000; 24379 val /= 8; 24380 rack->r_ctl.init_rate = val; 24381 if (rack->rc_always_pace) 24382 rack_update_seg(rack); 24383 break; 24384 case TCP_BBR_IWINTSO: 24385 error = EINVAL; 24386 break; 24387 case TCP_RACK_FORCE_MSEG: 24388 RACK_OPTS_INC(tcp_rack_force_max_seg); 24389 if (optval) 24390 rack->rc_force_max_seg = 1; 24391 else 24392 rack->rc_force_max_seg = 0; 24393 break; 24394 case TCP_RACK_PACE_MIN_SEG: 24395 RACK_OPTS_INC(tcp_rack_min_seg); 24396 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 24397 rack_set_pace_segments(tp, rack, __LINE__, NULL); 24398 break; 24399 case TCP_RACK_PACE_MAX_SEG: 24400 /* Max segments size in a pace in bytes */ 24401 RACK_OPTS_INC(tcp_rack_max_seg); 24402 if ((rack->dgp_on == 1) && 24403 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 24404 /* 24405 * If we set a max-seg and are doing DGP then 24406 * we now fall under the pacing limits not the 24407 * DGP ones. 24408 */ 24409 if (tcp_can_enable_pacing() == 0) { 24410 error = ENOSPC; 24411 break; 24412 } 24413 /* 24414 * Now change up the flags and counts to be correct. 24415 */ 24416 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24417 tcp_dec_dgp_pacing_cnt(); 24418 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 24419 } 24420 if (optval <= MAX_USER_SET_SEG) 24421 rack->rc_user_set_max_segs = optval; 24422 else 24423 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 24424 rack_set_pace_segments(tp, rack, __LINE__, NULL); 24425 break; 24426 case TCP_RACK_PACE_RATE_REC: 24427 /* Set the fixed pacing rate in Bytes per second ca */ 24428 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 24429 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24430 error = EPERM; 24431 break; 24432 } 24433 if (rack->dgp_on) { 24434 /* 24435 * We are already pacing another 24436 * way. 24437 */ 24438 error = EBUSY; 24439 break; 24440 } 24441 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 24442 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 24443 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 24444 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 24445 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 24446 rack->use_fixed_rate = 1; 24447 if (rack->rack_hibeta) 24448 rack_set_cc_pacing(rack); 24449 rack_log_pacing_delay_calc(rack, 24450 rack->r_ctl.rc_fixed_pacing_rate_ss, 24451 rack->r_ctl.rc_fixed_pacing_rate_ca, 24452 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 24453 __LINE__, NULL,0); 24454 break; 24455 24456 case TCP_RACK_PACE_RATE_SS: 24457 /* Set the fixed pacing rate in Bytes per second ca */ 24458 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 24459 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24460 error = EPERM; 24461 break; 24462 } 24463 if (rack->dgp_on) { 24464 /* 24465 * We are already pacing another 24466 * way. 24467 */ 24468 error = EBUSY; 24469 break; 24470 } 24471 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 24472 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 24473 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 24474 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 24475 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 24476 rack->use_fixed_rate = 1; 24477 if (rack->rack_hibeta) 24478 rack_set_cc_pacing(rack); 24479 rack_log_pacing_delay_calc(rack, 24480 rack->r_ctl.rc_fixed_pacing_rate_ss, 24481 rack->r_ctl.rc_fixed_pacing_rate_ca, 24482 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 24483 __LINE__, NULL, 0); 24484 break; 24485 24486 case TCP_RACK_PACE_RATE_CA: 24487 /* Set the fixed pacing rate in Bytes per second ca */ 24488 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 24489 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24490 error = EPERM; 24491 break; 24492 } 24493 if (rack->dgp_on) { 24494 /* 24495 * We are already pacing another 24496 * way. 24497 */ 24498 error = EBUSY; 24499 break; 24500 } 24501 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 24502 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 24503 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 24504 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 24505 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 24506 rack->use_fixed_rate = 1; 24507 if (rack->rack_hibeta) 24508 rack_set_cc_pacing(rack); 24509 rack_log_pacing_delay_calc(rack, 24510 rack->r_ctl.rc_fixed_pacing_rate_ss, 24511 rack->r_ctl.rc_fixed_pacing_rate_ca, 24512 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 24513 __LINE__, NULL, 0); 24514 break; 24515 case TCP_RACK_GP_INCREASE_REC: 24516 RACK_OPTS_INC(tcp_gp_inc_rec); 24517 rack->r_ctl.rack_per_of_gp_rec = optval; 24518 rack_log_pacing_delay_calc(rack, 24519 rack->r_ctl.rack_per_of_gp_ss, 24520 rack->r_ctl.rack_per_of_gp_ca, 24521 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 24522 __LINE__, NULL, 0); 24523 break; 24524 case TCP_RACK_GP_INCREASE_CA: 24525 RACK_OPTS_INC(tcp_gp_inc_ca); 24526 ca = optval; 24527 if (ca < 100) { 24528 /* 24529 * We don't allow any reduction 24530 * over the GP b/w. 24531 */ 24532 error = EINVAL; 24533 break; 24534 } 24535 rack->r_ctl.rack_per_of_gp_ca = ca; 24536 rack_log_pacing_delay_calc(rack, 24537 rack->r_ctl.rack_per_of_gp_ss, 24538 rack->r_ctl.rack_per_of_gp_ca, 24539 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 24540 __LINE__, NULL, 0); 24541 break; 24542 case TCP_RACK_GP_INCREASE_SS: 24543 RACK_OPTS_INC(tcp_gp_inc_ss); 24544 ss = optval; 24545 if (ss < 100) { 24546 /* 24547 * We don't allow any reduction 24548 * over the GP b/w. 24549 */ 24550 error = EINVAL; 24551 break; 24552 } 24553 rack->r_ctl.rack_per_of_gp_ss = ss; 24554 rack_log_pacing_delay_calc(rack, 24555 rack->r_ctl.rack_per_of_gp_ss, 24556 rack->r_ctl.rack_per_of_gp_ca, 24557 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 24558 __LINE__, NULL, 0); 24559 break; 24560 case TCP_RACK_RR_CONF: 24561 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 24562 if (optval && optval <= 3) 24563 rack->r_rr_config = optval; 24564 else 24565 rack->r_rr_config = 0; 24566 break; 24567 case TCP_PACING_DND: /* URL:dnd */ 24568 if (optval > 0) 24569 rack->rc_pace_dnd = 1; 24570 else 24571 rack->rc_pace_dnd = 0; 24572 break; 24573 case TCP_HDWR_RATE_CAP: 24574 RACK_OPTS_INC(tcp_hdwr_rate_cap); 24575 if (optval) { 24576 if (rack->r_rack_hw_rate_caps == 0) 24577 rack->r_rack_hw_rate_caps = 1; 24578 else 24579 error = EALREADY; 24580 } else { 24581 rack->r_rack_hw_rate_caps = 0; 24582 } 24583 break; 24584 case TCP_DGP_UPPER_BOUNDS: 24585 { 24586 uint8_t val; 24587 val = optval & 0x0000ff; 24588 rack->r_ctl.rack_per_upper_bound_ca = val; 24589 val = (optval >> 16) & 0x0000ff; 24590 rack->r_ctl.rack_per_upper_bound_ss = val; 24591 break; 24592 } 24593 case TCP_SS_EEXIT: /* URL:eexit */ 24594 if (optval > 0) { 24595 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 24596 if (optval & 0x10000) { 24597 rack->r_ctl.gate_to_fs = 1; 24598 } else { 24599 rack->r_ctl.gate_to_fs = 0; 24600 } 24601 if (optval & 0x20000) { 24602 rack->r_ctl.use_gp_not_last = 1; 24603 } else { 24604 rack->r_ctl.use_gp_not_last = 0; 24605 } 24606 if (optval & 0xfffc0000) { 24607 uint32_t v; 24608 24609 v = (optval >> 18) & 0x00003fff; 24610 if (v >= 1000) 24611 rack->r_ctl.gp_gain_req = v; 24612 } 24613 } else { 24614 /* We do not do ss early exit at all */ 24615 rack->rc_initial_ss_comp = 1; 24616 rack->r_ctl.gp_rnd_thresh = 0; 24617 } 24618 break; 24619 case TCP_RACK_SPLIT_LIMIT: 24620 RACK_OPTS_INC(tcp_split_limit); 24621 rack->r_ctl.rc_split_limit = optval; 24622 break; 24623 case TCP_BBR_HDWR_PACE: 24624 RACK_OPTS_INC(tcp_hdwr_pacing); 24625 if (optval){ 24626 if (rack->rack_hdrw_pacing == 0) { 24627 rack->rack_hdw_pace_ena = 1; 24628 rack->rack_attempt_hdwr_pace = 0; 24629 } else 24630 error = EALREADY; 24631 } else { 24632 rack->rack_hdw_pace_ena = 0; 24633 #ifdef RATELIMIT 24634 if (rack->r_ctl.crte != NULL) { 24635 rack->rack_hdrw_pacing = 0; 24636 rack->rack_attempt_hdwr_pace = 0; 24637 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 24638 rack->r_ctl.crte = NULL; 24639 } 24640 #endif 24641 } 24642 break; 24643 /* End Pacing related ones */ 24644 case TCP_RACK_PRR_SENDALOT: 24645 /* Allow PRR to send more than one seg */ 24646 RACK_OPTS_INC(tcp_rack_prr_sendalot); 24647 rack->r_ctl.rc_prr_sendalot = optval; 24648 break; 24649 case TCP_RACK_MIN_TO: 24650 /* Minimum time between rack t-o's in ms */ 24651 RACK_OPTS_INC(tcp_rack_min_to); 24652 rack->r_ctl.rc_min_to = optval; 24653 break; 24654 case TCP_RACK_EARLY_SEG: 24655 /* If early recovery max segments */ 24656 RACK_OPTS_INC(tcp_rack_early_seg); 24657 rack->r_ctl.rc_early_recovery_segs = optval; 24658 break; 24659 case TCP_RACK_ENABLE_HYSTART: 24660 { 24661 if (optval) { 24662 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 24663 if (rack_do_hystart > RACK_HYSTART_ON) 24664 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 24665 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 24666 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 24667 } else { 24668 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 24669 } 24670 } 24671 break; 24672 case TCP_RACK_REORD_THRESH: 24673 /* RACK reorder threshold (shift amount) */ 24674 RACK_OPTS_INC(tcp_rack_reord_thresh); 24675 if ((optval > 0) && (optval < 31)) 24676 rack->r_ctl.rc_reorder_shift = optval; 24677 else 24678 error = EINVAL; 24679 break; 24680 case TCP_RACK_REORD_FADE: 24681 /* Does reordering fade after ms time */ 24682 RACK_OPTS_INC(tcp_rack_reord_fade); 24683 rack->r_ctl.rc_reorder_fade = optval; 24684 break; 24685 case TCP_RACK_TLP_THRESH: 24686 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24687 RACK_OPTS_INC(tcp_rack_tlp_thresh); 24688 if (optval) 24689 rack->r_ctl.rc_tlp_threshold = optval; 24690 else 24691 error = EINVAL; 24692 break; 24693 case TCP_BBR_USE_RACK_RR: 24694 RACK_OPTS_INC(tcp_rack_rr); 24695 if (optval) 24696 rack->use_rack_rr = 1; 24697 else 24698 rack->use_rack_rr = 0; 24699 break; 24700 case TCP_RACK_PKT_DELAY: 24701 /* RACK added ms i.e. rack-rtt + reord + N */ 24702 RACK_OPTS_INC(tcp_rack_pkt_delay); 24703 rack->r_ctl.rc_pkt_delay = optval; 24704 break; 24705 case TCP_DELACK: 24706 RACK_OPTS_INC(tcp_rack_delayed_ack); 24707 if (optval == 0) 24708 tp->t_delayed_ack = 0; 24709 else 24710 tp->t_delayed_ack = 1; 24711 if (tp->t_flags & TF_DELACK) { 24712 tp->t_flags &= ~TF_DELACK; 24713 tp->t_flags |= TF_ACKNOW; 24714 NET_EPOCH_ENTER(et); 24715 rack_output(tp); 24716 NET_EPOCH_EXIT(et); 24717 } 24718 break; 24719 24720 case TCP_BBR_RACK_RTT_USE: 24721 RACK_OPTS_INC(tcp_rack_rtt_use); 24722 if ((optval != USE_RTT_HIGH) && 24723 (optval != USE_RTT_LOW) && 24724 (optval != USE_RTT_AVG)) 24725 error = EINVAL; 24726 else 24727 rack->r_ctl.rc_rate_sample_method = optval; 24728 break; 24729 case TCP_HONOR_HPTS_MIN: 24730 RACK_OPTS_INC(tcp_honor_hpts); 24731 if (optval) { 24732 rack->r_use_hpts_min = 1; 24733 /* 24734 * Must be between 2 - 80% to be a reduction else 24735 * we keep the default (10%). 24736 */ 24737 if ((optval > 1) && (optval <= 80)) { 24738 rack->r_ctl.max_reduction = optval; 24739 } 24740 } else 24741 rack->r_use_hpts_min = 0; 24742 break; 24743 case TCP_REC_IS_DYN: /* URL:dynrec */ 24744 RACK_OPTS_INC(tcp_dyn_rec); 24745 if (optval) 24746 rack->rc_gp_no_rec_chg = 1; 24747 else 24748 rack->rc_gp_no_rec_chg = 0; 24749 break; 24750 case TCP_NO_TIMELY: 24751 RACK_OPTS_INC(tcp_notimely); 24752 if (optval) { 24753 rack->rc_skip_timely = 1; 24754 rack->r_ctl.rack_per_of_gp_rec = 90; 24755 rack->r_ctl.rack_per_of_gp_ca = 100; 24756 rack->r_ctl.rack_per_of_gp_ss = 250; 24757 } else { 24758 rack->rc_skip_timely = 0; 24759 } 24760 break; 24761 case TCP_GP_USE_LTBW: 24762 if (optval == 0) { 24763 rack->use_lesser_lt_bw = 0; 24764 rack->dis_lt_bw = 1; 24765 } else if (optval == 1) { 24766 rack->use_lesser_lt_bw = 1; 24767 rack->dis_lt_bw = 0; 24768 } else if (optval == 2) { 24769 rack->use_lesser_lt_bw = 0; 24770 rack->dis_lt_bw = 0; 24771 } 24772 break; 24773 case TCP_DATA_AFTER_CLOSE: 24774 RACK_OPTS_INC(tcp_data_after_close); 24775 if (optval) 24776 rack->rc_allow_data_af_clo = 1; 24777 else 24778 rack->rc_allow_data_af_clo = 0; 24779 break; 24780 default: 24781 break; 24782 } 24783 tcp_log_socket_option(tp, sopt_name, optval, error); 24784 return (error); 24785 } 24786 24787 static void 24788 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 24789 { 24790 /* 24791 * A new connection has been created (tp) and 24792 * the parent is the inpcb given. We want to 24793 * apply a read-lock to the parent (we are already 24794 * holding a write lock on the tp) and copy anything 24795 * out of the rack specific data as long as its tfb is 24796 * the same as ours i.e. we are the same stack. Otherwise 24797 * we just return. 24798 */ 24799 struct tcpcb *par; 24800 struct tcp_rack *dest, *src; 24801 int cnt = 0; 24802 24803 par = intotcpcb(parent); 24804 if (par->t_fb != tp->t_fb) { 24805 /* Not the same stack */ 24806 tcp_log_socket_option(tp, 0, 0, 1); 24807 return; 24808 } 24809 /* Ok if we reach here lets setup the two rack pointers */ 24810 dest = (struct tcp_rack *)tp->t_fb_ptr; 24811 src = (struct tcp_rack *)par->t_fb_ptr; 24812 if ((src == NULL) || (dest == NULL)) { 24813 /* Huh? */ 24814 tcp_log_socket_option(tp, 0, 0, 2); 24815 return; 24816 } 24817 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 24818 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 24819 if ((src->dgp_on) && (dest->dgp_on == 0)) { 24820 /* Profile 1 had to be set via sock opt */ 24821 rack_set_dgp(dest); 24822 cnt++; 24823 } 24824 /* TCP_RACK_SET_RXT_OPTIONS */ 24825 if (dest->full_size_rxt != src->full_size_rxt) { 24826 dest->full_size_rxt = src->full_size_rxt; 24827 cnt++; 24828 } 24829 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 24830 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 24831 cnt++; 24832 } 24833 /* TCP_RACK_DSACK_OPT */ 24834 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 24835 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 24836 cnt++; 24837 } 24838 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 24839 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 24840 cnt++; 24841 } 24842 /* TCP_RACK_PACING_DIVISOR */ 24843 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 24844 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 24845 cnt++; 24846 } 24847 /* TCP_RACK_HI_BETA */ 24848 if (src->rack_hibeta != dest->rack_hibeta) { 24849 cnt++; 24850 if (src->rack_hibeta) { 24851 dest->r_ctl.rc_saved_beta.beta = src->r_ctl.rc_saved_beta.beta; 24852 dest->rack_hibeta = 1; 24853 } else { 24854 dest->rack_hibeta = 0; 24855 } 24856 } 24857 /* TCP_RACK_TIMER_SLOP */ 24858 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 24859 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 24860 cnt++; 24861 } 24862 /* TCP_RACK_PACING_BETA_ECN */ 24863 if (dest->r_ctl.rc_saved_beta.beta_ecn != src->r_ctl.rc_saved_beta.beta_ecn) { 24864 dest->r_ctl.rc_saved_beta.beta_ecn = src->r_ctl.rc_saved_beta.beta_ecn; 24865 cnt++; 24866 } 24867 if (dest->r_ctl.rc_saved_beta.newreno_flags != src->r_ctl.rc_saved_beta.newreno_flags) { 24868 dest->r_ctl.rc_saved_beta.newreno_flags = src->r_ctl.rc_saved_beta.newreno_flags; 24869 cnt++; 24870 } 24871 /* We do not do TCP_DEFER_OPTIONS */ 24872 /* TCP_RACK_MEASURE_CNT */ 24873 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 24874 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 24875 cnt++; 24876 } 24877 /* TCP_HDWR_UP_ONLY */ 24878 if (dest->r_up_only != src->r_up_only) { 24879 dest->r_up_only = src->r_up_only; 24880 cnt++; 24881 } 24882 /* TCP_FILLCW_RATE_CAP */ 24883 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 24884 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 24885 cnt++; 24886 } 24887 /* TCP_PACING_RATE_CAP */ 24888 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 24889 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 24890 cnt++; 24891 } 24892 /* A listener can't set TCP_HYBRID_PACING */ 24893 /* TCP_SIDECHAN_DIS */ 24894 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 24895 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 24896 cnt++; 24897 } 24898 /* TCP_SHARED_CWND_TIME_LIMIT */ 24899 if (dest->r_limit_scw != src->r_limit_scw) { 24900 dest->r_limit_scw = src->r_limit_scw; 24901 cnt++; 24902 } 24903 /* TCP_POLICER_DETECT */ 24904 if (dest->r_ctl.policer_rxt_threshold != src->r_ctl.policer_rxt_threshold) { 24905 dest->r_ctl.policer_rxt_threshold = src->r_ctl.policer_rxt_threshold; 24906 cnt++; 24907 } 24908 if (dest->r_ctl.policer_avg_threshold != src->r_ctl.policer_avg_threshold) { 24909 dest->r_ctl.policer_avg_threshold = src->r_ctl.policer_avg_threshold; 24910 cnt++; 24911 } 24912 if (dest->r_ctl.policer_med_threshold != src->r_ctl.policer_med_threshold) { 24913 dest->r_ctl.policer_med_threshold = src->r_ctl.policer_med_threshold; 24914 cnt++; 24915 } 24916 if (dest->policer_detect_on != src->policer_detect_on) { 24917 dest->policer_detect_on = src->policer_detect_on; 24918 cnt++; 24919 } 24920 24921 if (dest->r_ctl.saved_policer_val != src->r_ctl.saved_policer_val) { 24922 dest->r_ctl.saved_policer_val = src->r_ctl.saved_policer_val; 24923 cnt++; 24924 } 24925 /* TCP_POLICER_MSS */ 24926 if (dest->r_ctl.policer_del_mss != src->r_ctl.policer_del_mss) { 24927 dest->r_ctl.policer_del_mss = src->r_ctl.policer_del_mss; 24928 cnt++; 24929 } 24930 24931 if (dest->r_ctl.pol_bw_comp != src->r_ctl.pol_bw_comp) { 24932 dest->r_ctl.pol_bw_comp = src->r_ctl.pol_bw_comp; 24933 cnt++; 24934 } 24935 24936 if (dest->r_ctl.policer_alt_median != src->r_ctl.policer_alt_median) { 24937 dest->r_ctl.policer_alt_median = src->r_ctl.policer_alt_median; 24938 cnt++; 24939 } 24940 /* TCP_RACK_PACE_TO_FILL */ 24941 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 24942 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 24943 cnt++; 24944 } 24945 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 24946 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 24947 cnt++; 24948 } 24949 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 24950 dest->rtt_limit_mul = src->rtt_limit_mul; 24951 cnt++; 24952 } 24953 /* TCP_RACK_NO_PUSH_AT_MAX */ 24954 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 24955 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 24956 cnt++; 24957 } 24958 /* TCP_SHARED_CWND_ENABLE */ 24959 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 24960 dest->rack_enable_scwnd = src->rack_enable_scwnd; 24961 cnt++; 24962 } 24963 /* TCP_USE_CMP_ACKS */ 24964 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 24965 dest->r_use_cmp_ack = src->r_use_cmp_ack; 24966 cnt++; 24967 } 24968 24969 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 24970 dest->r_mbuf_queue = src->r_mbuf_queue; 24971 cnt++; 24972 } 24973 /* TCP_RACK_MBUF_QUEUE */ 24974 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 24975 dest->r_mbuf_queue = src->r_mbuf_queue; 24976 cnt++; 24977 } 24978 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 24979 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24980 } else { 24981 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24982 } 24983 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 24984 tp->t_flags2 |= TF2_MBUF_ACKCMP; 24985 } 24986 /* TCP_RACK_NONRXT_CFG_RATE */ 24987 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 24988 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 24989 cnt++; 24990 } 24991 /* TCP_NO_PRR */ 24992 if (dest->rack_no_prr != src->rack_no_prr) { 24993 dest->rack_no_prr = src->rack_no_prr; 24994 cnt++; 24995 } 24996 if (dest->no_prr_addback != src->no_prr_addback) { 24997 dest->no_prr_addback = src->no_prr_addback; 24998 cnt++; 24999 } 25000 /* RACK_CSPR_IS_FCC */ 25001 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 25002 dest->cspr_is_fcc = src->cspr_is_fcc; 25003 cnt++; 25004 } 25005 /* TCP_TIMELY_DYN_ADJ */ 25006 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 25007 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 25008 cnt++; 25009 } 25010 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 25011 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 25012 cnt++; 25013 } 25014 /* TCP_RACK_TLP_USE */ 25015 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 25016 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 25017 cnt++; 25018 } 25019 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 25020 /* TCP_BBR_RACK_INIT_RATE */ 25021 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 25022 dest->r_ctl.init_rate = src->r_ctl.init_rate; 25023 cnt++; 25024 } 25025 /* TCP_RACK_FORCE_MSEG */ 25026 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 25027 dest->rc_force_max_seg = src->rc_force_max_seg; 25028 cnt++; 25029 } 25030 /* TCP_RACK_PACE_MIN_SEG */ 25031 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 25032 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 25033 cnt++; 25034 } 25035 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 25036 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 25037 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 25038 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 25039 cnt++; 25040 } 25041 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 25042 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 25043 cnt++; 25044 } 25045 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 25046 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 25047 cnt++; 25048 } 25049 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 25050 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 25051 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 25052 cnt++; 25053 } 25054 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 25055 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 25056 cnt++; 25057 } 25058 25059 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 25060 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 25061 cnt++; 25062 } 25063 /* TCP_RACK_RR_CONF */ 25064 if (dest->r_rr_config != src->r_rr_config) { 25065 dest->r_rr_config = src->r_rr_config; 25066 cnt++; 25067 } 25068 /* TCP_PACING_DND */ 25069 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 25070 dest->rc_pace_dnd = src->rc_pace_dnd; 25071 cnt++; 25072 } 25073 /* TCP_HDWR_RATE_CAP */ 25074 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 25075 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 25076 cnt++; 25077 } 25078 /* TCP_DGP_UPPER_BOUNDS */ 25079 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 25080 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 25081 cnt++; 25082 } 25083 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 25084 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 25085 cnt++; 25086 } 25087 /* TCP_SS_EEXIT */ 25088 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 25089 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 25090 cnt++; 25091 } 25092 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 25093 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 25094 cnt++; 25095 } 25096 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 25097 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 25098 cnt++; 25099 } 25100 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 25101 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 25102 cnt++; 25103 } 25104 /* TCP_BBR_HDWR_PACE */ 25105 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 25106 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 25107 cnt++; 25108 } 25109 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 25110 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 25111 cnt++; 25112 } 25113 /* TCP_RACK_PRR_SENDALOT */ 25114 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 25115 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 25116 cnt++; 25117 } 25118 /* TCP_RACK_MIN_TO */ 25119 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 25120 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 25121 cnt++; 25122 } 25123 /* TCP_RACK_EARLY_SEG */ 25124 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 25125 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 25126 cnt++; 25127 } 25128 /* TCP_RACK_ENABLE_HYSTART */ 25129 if (par->t_ccv.flags != tp->t_ccv.flags) { 25130 cnt++; 25131 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 25132 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 25133 if (rack_do_hystart > RACK_HYSTART_ON) 25134 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 25135 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 25136 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 25137 } else { 25138 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 25139 } 25140 } 25141 /* TCP_RACK_REORD_THRESH */ 25142 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 25143 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 25144 cnt++; 25145 } 25146 /* TCP_RACK_REORD_FADE */ 25147 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 25148 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 25149 cnt++; 25150 } 25151 /* TCP_RACK_TLP_THRESH */ 25152 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 25153 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 25154 cnt++; 25155 } 25156 /* TCP_BBR_USE_RACK_RR */ 25157 if (dest->use_rack_rr != src->use_rack_rr) { 25158 dest->use_rack_rr = src->use_rack_rr; 25159 cnt++; 25160 } 25161 /* TCP_RACK_PKT_DELAY */ 25162 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 25163 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 25164 cnt++; 25165 } 25166 /* TCP_DELACK will get copied via the main code if applicable */ 25167 /* TCP_BBR_RACK_RTT_USE */ 25168 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 25169 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 25170 cnt++; 25171 } 25172 /* TCP_HONOR_HPTS_MIN */ 25173 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 25174 dest->r_use_hpts_min = src->r_use_hpts_min; 25175 cnt++; 25176 } 25177 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 25178 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 25179 cnt++; 25180 } 25181 /* TCP_REC_IS_DYN */ 25182 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 25183 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 25184 cnt++; 25185 } 25186 if (dest->rc_skip_timely != src->rc_skip_timely) { 25187 dest->rc_skip_timely = src->rc_skip_timely; 25188 cnt++; 25189 } 25190 /* TCP_DATA_AFTER_CLOSE */ 25191 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 25192 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 25193 cnt++; 25194 } 25195 /* TCP_GP_USE_LTBW */ 25196 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 25197 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 25198 cnt++; 25199 } 25200 if (dest->dis_lt_bw != src->dis_lt_bw) { 25201 dest->dis_lt_bw = src->dis_lt_bw; 25202 cnt++; 25203 } 25204 tcp_log_socket_option(tp, 0, cnt, 0); 25205 } 25206 25207 25208 static void 25209 rack_apply_deferred_options(struct tcp_rack *rack) 25210 { 25211 struct deferred_opt_list *dol, *sdol; 25212 uint32_t s_optval; 25213 25214 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 25215 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 25216 /* Disadvantage of deferal is you loose the error return */ 25217 s_optval = (uint32_t)dol->optval; 25218 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 25219 free(dol, M_TCPDO); 25220 } 25221 } 25222 25223 static void 25224 rack_hw_tls_change(struct tcpcb *tp, int chg) 25225 { 25226 /* Update HW tls state */ 25227 struct tcp_rack *rack; 25228 25229 rack = (struct tcp_rack *)tp->t_fb_ptr; 25230 if (chg) 25231 rack->r_ctl.fsb.hw_tls = 1; 25232 else 25233 rack->r_ctl.fsb.hw_tls = 0; 25234 } 25235 25236 static int 25237 rack_pru_options(struct tcpcb *tp, int flags) 25238 { 25239 if (flags & PRUS_OOB) 25240 return (EOPNOTSUPP); 25241 return (0); 25242 } 25243 25244 static bool 25245 rack_wake_check(struct tcpcb *tp) 25246 { 25247 struct tcp_rack *rack; 25248 struct timeval tv; 25249 uint32_t cts; 25250 25251 rack = (struct tcp_rack *)tp->t_fb_ptr; 25252 if (rack->r_ctl.rc_hpts_flags) { 25253 cts = tcp_get_usecs(&tv); 25254 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 25255 /* 25256 * Pacing timer is up, check if we are ready. 25257 */ 25258 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 25259 return (true); 25260 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 25261 /* 25262 * A timer is up, check if we are ready. 25263 */ 25264 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 25265 return (true); 25266 } 25267 } 25268 return (false); 25269 } 25270 25271 static struct tcp_function_block __tcp_rack = { 25272 .tfb_tcp_block_name = __XSTRING(STACKNAME), 25273 .tfb_tcp_output = rack_output, 25274 .tfb_do_queued_segments = ctf_do_queued_segments, 25275 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 25276 .tfb_tcp_do_segment = rack_do_segment, 25277 .tfb_tcp_ctloutput = rack_ctloutput, 25278 .tfb_tcp_fb_init = rack_init, 25279 .tfb_tcp_fb_fini = rack_fini, 25280 .tfb_tcp_timer_stop_all = rack_stopall, 25281 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 25282 .tfb_tcp_handoff_ok = rack_handoff_ok, 25283 .tfb_tcp_mtu_chg = rack_mtu_change, 25284 .tfb_pru_options = rack_pru_options, 25285 .tfb_hwtls_change = rack_hw_tls_change, 25286 .tfb_chg_query = rack_chg_query, 25287 .tfb_switch_failed = rack_switch_failed, 25288 .tfb_early_wake_check = rack_wake_check, 25289 .tfb_compute_pipe = rack_compute_pipe, 25290 .tfb_stack_info = rack_stack_information, 25291 .tfb_inherit = rack_inherit, 25292 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP | TCP_FUNC_DEFAULT_OK, 25293 25294 }; 25295 25296 /* 25297 * rack_ctloutput() must drop the inpcb lock before performing copyin on 25298 * socket option arguments. When it re-acquires the lock after the copy, it 25299 * has to revalidate that the connection is still valid for the socket 25300 * option. 25301 */ 25302 static int 25303 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 25304 { 25305 struct inpcb *inp = tptoinpcb(tp); 25306 #ifdef INET 25307 struct ip *ip; 25308 #endif 25309 struct tcp_rack *rack; 25310 struct tcp_hybrid_req hybrid; 25311 uint64_t loptval; 25312 int32_t error = 0, optval; 25313 25314 rack = (struct tcp_rack *)tp->t_fb_ptr; 25315 if (rack == NULL) { 25316 INP_WUNLOCK(inp); 25317 return (EINVAL); 25318 } 25319 #ifdef INET 25320 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 25321 #endif 25322 25323 switch (sopt->sopt_level) { 25324 #ifdef INET6 25325 case IPPROTO_IPV6: 25326 MPASS(inp->inp_vflag & INP_IPV6PROTO); 25327 switch (sopt->sopt_name) { 25328 case IPV6_USE_MIN_MTU: 25329 tcp6_use_min_mtu(tp); 25330 break; 25331 } 25332 INP_WUNLOCK(inp); 25333 return (0); 25334 #endif 25335 #ifdef INET 25336 case IPPROTO_IP: 25337 switch (sopt->sopt_name) { 25338 case IP_TOS: 25339 /* 25340 * The DSCP codepoint has changed, update the fsb. 25341 */ 25342 ip->ip_tos = rack->rc_inp->inp_ip_tos; 25343 break; 25344 case IP_TTL: 25345 /* 25346 * The TTL has changed, update the fsb. 25347 */ 25348 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 25349 break; 25350 } 25351 INP_WUNLOCK(inp); 25352 return (0); 25353 #endif 25354 #ifdef SO_PEERPRIO 25355 case SOL_SOCKET: 25356 switch (sopt->sopt_name) { 25357 case SO_PEERPRIO: /* SC-URL:bs */ 25358 /* Already read in and sanity checked in sosetopt(). */ 25359 if (inp->inp_socket) { 25360 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 25361 } 25362 break; 25363 } 25364 INP_WUNLOCK(inp); 25365 return (0); 25366 #endif 25367 case IPPROTO_TCP: 25368 switch (sopt->sopt_name) { 25369 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 25370 /* Pacing related ones */ 25371 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 25372 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 25373 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 25374 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 25375 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 25376 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 25377 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 25378 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 25379 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 25380 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 25381 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 25382 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 25383 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 25384 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 25385 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 25386 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 25387 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 25388 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 25389 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 25390 /* End pacing related */ 25391 case TCP_POLICER_DETECT: /* URL:pol_det */ 25392 case TCP_POLICER_MSS: /* URL:pol_mss */ 25393 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 25394 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 25395 case TCP_RACK_MIN_TO: /* URL:min_to */ 25396 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 25397 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 25398 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 25399 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 25400 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 25401 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 25402 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 25403 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 25404 case TCP_NO_PRR: /* URL:noprr */ 25405 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 25406 case TCP_DATA_AFTER_CLOSE: /* no URL */ 25407 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 25408 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 25409 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 25410 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 25411 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 25412 case TCP_RACK_PROFILE: /* URL:profile */ 25413 case TCP_SIDECHAN_DIS: /* URL:scodm */ 25414 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 25415 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 25416 case TCP_RACK_ABC_VAL: /* URL:labc */ 25417 case TCP_REC_ABC_VAL: /* URL:reclabc */ 25418 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 25419 case TCP_DEFER_OPTIONS: /* URL:defer */ 25420 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 25421 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 25422 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 25423 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 25424 case TCP_RACK_HI_BETA: /* URL:hibeta */ 25425 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 25426 case TCP_SS_EEXIT: /* URL:eexit */ 25427 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 25428 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 25429 case TCP_PACING_DND: /* URL:dnd */ 25430 case TCP_NO_TIMELY: /* URL:notimely */ 25431 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 25432 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 25433 case TCP_REC_IS_DYN: /* URL:dynrec */ 25434 case TCP_GP_USE_LTBW: /* URL:useltbw */ 25435 goto process_opt; 25436 break; 25437 default: 25438 /* Filter off all unknown options to the base stack */ 25439 return (tcp_default_ctloutput(tp, sopt)); 25440 break; 25441 } 25442 default: 25443 INP_WUNLOCK(inp); 25444 return (0); 25445 } 25446 process_opt: 25447 INP_WUNLOCK(inp); 25448 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 25449 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 25450 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 25451 /* 25452 * We truncate it down to 32 bits for the socket-option trace this 25453 * means rates > 34Gbps won't show right, but thats probably ok. 25454 */ 25455 optval = (uint32_t)loptval; 25456 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 25457 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 25458 } else { 25459 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 25460 /* Save it in 64 bit form too */ 25461 loptval = optval; 25462 } 25463 if (error) 25464 return (error); 25465 INP_WLOCK(inp); 25466 if (tp->t_fb != &__tcp_rack) { 25467 INP_WUNLOCK(inp); 25468 return (ENOPROTOOPT); 25469 } 25470 if (rack->defer_options && (rack->gp_ready == 0) && 25471 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 25472 (sopt->sopt_name != TCP_HYBRID_PACING) && 25473 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 25474 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 25475 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 25476 /* Options are being deferred */ 25477 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 25478 INP_WUNLOCK(inp); 25479 return (0); 25480 } else { 25481 /* No memory to defer, fail */ 25482 INP_WUNLOCK(inp); 25483 return (ENOMEM); 25484 } 25485 } 25486 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 25487 INP_WUNLOCK(inp); 25488 return (error); 25489 } 25490 25491 static void 25492 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 25493 { 25494 25495 INP_WLOCK_ASSERT(tptoinpcb(tp)); 25496 bzero(ti, sizeof(*ti)); 25497 25498 ti->tcpi_state = tp->t_state; 25499 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 25500 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 25501 if (tp->t_flags & TF_SACK_PERMIT) 25502 ti->tcpi_options |= TCPI_OPT_SACK; 25503 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 25504 ti->tcpi_options |= TCPI_OPT_WSCALE; 25505 ti->tcpi_snd_wscale = tp->snd_scale; 25506 ti->tcpi_rcv_wscale = tp->rcv_scale; 25507 } 25508 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 25509 ti->tcpi_options |= TCPI_OPT_ECN; 25510 if (tp->t_flags & TF_FASTOPEN) 25511 ti->tcpi_options |= TCPI_OPT_TFO; 25512 /* still kept in ticks is t_rcvtime */ 25513 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 25514 /* Since we hold everything in precise useconds this is easy */ 25515 ti->tcpi_rtt = tp->t_srtt; 25516 ti->tcpi_rttvar = tp->t_rttvar; 25517 ti->tcpi_rto = tp->t_rxtcur; 25518 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 25519 ti->tcpi_snd_cwnd = tp->snd_cwnd; 25520 /* 25521 * FreeBSD-specific extension fields for tcp_info. 25522 */ 25523 ti->tcpi_rcv_space = tp->rcv_wnd; 25524 ti->tcpi_rcv_nxt = tp->rcv_nxt; 25525 ti->tcpi_snd_wnd = tp->snd_wnd; 25526 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 25527 ti->tcpi_snd_nxt = tp->snd_nxt; 25528 ti->tcpi_snd_mss = tp->t_maxseg; 25529 ti->tcpi_rcv_mss = tp->t_maxseg; 25530 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 25531 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 25532 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 25533 ti->tcpi_total_tlp = tp->t_sndtlppack; 25534 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 25535 ti->tcpi_rttmin = tp->t_rttlow; 25536 #ifdef NETFLIX_STATS 25537 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 25538 #endif 25539 #ifdef TCP_OFFLOAD 25540 if (tp->t_flags & TF_TOE) { 25541 ti->tcpi_options |= TCPI_OPT_TOE; 25542 tcp_offload_tcp_info(tp, ti); 25543 } 25544 #endif 25545 } 25546 25547 static int 25548 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 25549 { 25550 struct inpcb *inp = tptoinpcb(tp); 25551 struct tcp_rack *rack; 25552 int32_t error, optval; 25553 uint64_t val, loptval; 25554 struct tcp_info ti; 25555 /* 25556 * Because all our options are either boolean or an int, we can just 25557 * pull everything into optval and then unlock and copy. If we ever 25558 * add a option that is not a int, then this will have quite an 25559 * impact to this routine. 25560 */ 25561 error = 0; 25562 rack = (struct tcp_rack *)tp->t_fb_ptr; 25563 if (rack == NULL) { 25564 INP_WUNLOCK(inp); 25565 return (EINVAL); 25566 } 25567 switch (sopt->sopt_name) { 25568 case TCP_INFO: 25569 /* First get the info filled */ 25570 rack_fill_info(tp, &ti); 25571 /* Fix up the rtt related fields if needed */ 25572 INP_WUNLOCK(inp); 25573 error = sooptcopyout(sopt, &ti, sizeof ti); 25574 return (error); 25575 /* 25576 * Beta is the congestion control value for NewReno that influences how 25577 * much of a backoff happens when loss is detected. It is normally set 25578 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 25579 * when you exit recovery. 25580 */ 25581 case TCP_RACK_PACING_BETA: 25582 break; 25583 /* 25584 * Beta_ecn is the congestion control value for NewReno that influences how 25585 * much of a backoff happens when a ECN mark is detected. It is normally set 25586 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 25587 * you exit recovery. Note that classic ECN has a beta of 50, it is only 25588 * ABE Ecn that uses this "less" value, but we do too with pacing :) 25589 */ 25590 25591 case TCP_RACK_PACING_BETA_ECN: 25592 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 25593 error = EINVAL; 25594 else if (rack->rc_pacing_cc_set == 0) 25595 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 25596 else { 25597 /* 25598 * Reach out into the CC data and report back what 25599 * I have previously set. Yeah it looks hackish but 25600 * we don't want to report the saved values. 25601 */ 25602 if (tp->t_ccv.cc_data) 25603 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 25604 else 25605 error = EINVAL; 25606 } 25607 break; 25608 case TCP_RACK_DSACK_OPT: 25609 optval = 0; 25610 if (rack->rc_rack_tmr_std_based) { 25611 optval |= 1; 25612 } 25613 if (rack->rc_rack_use_dsack) { 25614 optval |= 2; 25615 } 25616 break; 25617 case TCP_RACK_ENABLE_HYSTART: 25618 { 25619 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 25620 optval = RACK_HYSTART_ON; 25621 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 25622 optval = RACK_HYSTART_ON_W_SC; 25623 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 25624 optval = RACK_HYSTART_ON_W_SC_C; 25625 } else { 25626 optval = RACK_HYSTART_OFF; 25627 } 25628 } 25629 break; 25630 case TCP_RACK_DGP_IN_REC: 25631 error = EINVAL; 25632 break; 25633 case TCP_RACK_HI_BETA: 25634 optval = rack->rack_hibeta; 25635 break; 25636 case TCP_POLICER_MSS: 25637 optval = rack->r_ctl.policer_del_mss; 25638 break; 25639 case TCP_POLICER_DETECT: 25640 optval = rack->r_ctl.saved_policer_val; 25641 break; 25642 case TCP_DEFER_OPTIONS: 25643 optval = rack->defer_options; 25644 break; 25645 case TCP_RACK_MEASURE_CNT: 25646 optval = rack->r_ctl.req_measurements; 25647 break; 25648 case TCP_REC_ABC_VAL: 25649 optval = rack->r_use_labc_for_rec; 25650 break; 25651 case TCP_RACK_ABC_VAL: 25652 optval = rack->rc_labc; 25653 break; 25654 case TCP_HDWR_UP_ONLY: 25655 optval= rack->r_up_only; 25656 break; 25657 case TCP_FILLCW_RATE_CAP: 25658 loptval = rack->r_ctl.fillcw_cap; 25659 break; 25660 case TCP_PACING_RATE_CAP: 25661 loptval = rack->r_ctl.bw_rate_cap; 25662 break; 25663 case TCP_RACK_PROFILE: 25664 /* You cannot retrieve a profile, its write only */ 25665 error = EINVAL; 25666 break; 25667 case TCP_SIDECHAN_DIS: 25668 optval = rack->r_ctl.side_chan_dis_mask; 25669 break; 25670 case TCP_HYBRID_PACING: 25671 /* You cannot retrieve hybrid pacing information, its write only */ 25672 error = EINVAL; 25673 break; 25674 case TCP_USE_CMP_ACKS: 25675 optval = rack->r_use_cmp_ack; 25676 break; 25677 case TCP_RACK_PACE_TO_FILL: 25678 optval = rack->rc_pace_to_cwnd; 25679 break; 25680 case TCP_RACK_NO_PUSH_AT_MAX: 25681 optval = rack->r_ctl.rc_no_push_at_mrtt; 25682 break; 25683 case TCP_SHARED_CWND_ENABLE: 25684 optval = rack->rack_enable_scwnd; 25685 break; 25686 case TCP_RACK_NONRXT_CFG_RATE: 25687 optval = rack->rack_rec_nonrxt_use_cr; 25688 break; 25689 case TCP_NO_PRR: 25690 if (rack->rack_no_prr == 1) 25691 optval = 1; 25692 else if (rack->no_prr_addback == 1) 25693 optval = 2; 25694 else 25695 optval = 0; 25696 break; 25697 case TCP_GP_USE_LTBW: 25698 if (rack->dis_lt_bw) { 25699 /* It is not used */ 25700 optval = 0; 25701 } else if (rack->use_lesser_lt_bw) { 25702 /* we use min() */ 25703 optval = 1; 25704 } else { 25705 /* we use max() */ 25706 optval = 2; 25707 } 25708 break; 25709 case TCP_RACK_DO_DETECTION: 25710 error = EINVAL; 25711 break; 25712 case TCP_RACK_MBUF_QUEUE: 25713 /* Now do we use the LRO mbuf-queue feature */ 25714 optval = rack->r_mbuf_queue; 25715 break; 25716 case RACK_CSPR_IS_FCC: 25717 optval = rack->cspr_is_fcc; 25718 break; 25719 case TCP_TIMELY_DYN_ADJ: 25720 optval = rack->rc_gp_dyn_mul; 25721 break; 25722 case TCP_BBR_IWINTSO: 25723 error = EINVAL; 25724 break; 25725 case TCP_RACK_TLP_REDUCE: 25726 /* RACK TLP cwnd reduction (bool) */ 25727 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 25728 break; 25729 case TCP_BBR_RACK_INIT_RATE: 25730 val = rack->r_ctl.init_rate; 25731 /* convert to kbits per sec */ 25732 val *= 8; 25733 val /= 1000; 25734 optval = (uint32_t)val; 25735 break; 25736 case TCP_RACK_FORCE_MSEG: 25737 optval = rack->rc_force_max_seg; 25738 break; 25739 case TCP_RACK_PACE_MIN_SEG: 25740 optval = rack->r_ctl.rc_user_set_min_segs; 25741 break; 25742 case TCP_RACK_PACE_MAX_SEG: 25743 /* Max segments in a pace */ 25744 optval = rack->rc_user_set_max_segs; 25745 break; 25746 case TCP_RACK_PACE_ALWAYS: 25747 /* Use the always pace method */ 25748 optval = rack->rc_always_pace; 25749 break; 25750 case TCP_RACK_PRR_SENDALOT: 25751 /* Allow PRR to send more than one seg */ 25752 optval = rack->r_ctl.rc_prr_sendalot; 25753 break; 25754 case TCP_RACK_MIN_TO: 25755 /* Minimum time between rack t-o's in ms */ 25756 optval = rack->r_ctl.rc_min_to; 25757 break; 25758 case TCP_RACK_SPLIT_LIMIT: 25759 optval = rack->r_ctl.rc_split_limit; 25760 break; 25761 case TCP_RACK_EARLY_SEG: 25762 /* If early recovery max segments */ 25763 optval = rack->r_ctl.rc_early_recovery_segs; 25764 break; 25765 case TCP_RACK_REORD_THRESH: 25766 /* RACK reorder threshold (shift amount) */ 25767 optval = rack->r_ctl.rc_reorder_shift; 25768 break; 25769 case TCP_SS_EEXIT: 25770 if (rack->r_ctl.gp_rnd_thresh) { 25771 uint32_t v; 25772 25773 v = rack->r_ctl.gp_gain_req; 25774 v <<= 17; 25775 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 25776 if (rack->r_ctl.gate_to_fs == 1) 25777 optval |= 0x10000; 25778 } else 25779 optval = 0; 25780 break; 25781 case TCP_RACK_REORD_FADE: 25782 /* Does reordering fade after ms time */ 25783 optval = rack->r_ctl.rc_reorder_fade; 25784 break; 25785 case TCP_BBR_USE_RACK_RR: 25786 /* Do we use the rack cheat for rxt */ 25787 optval = rack->use_rack_rr; 25788 break; 25789 case TCP_RACK_RR_CONF: 25790 optval = rack->r_rr_config; 25791 break; 25792 case TCP_HDWR_RATE_CAP: 25793 optval = rack->r_rack_hw_rate_caps; 25794 break; 25795 case TCP_BBR_HDWR_PACE: 25796 optval = rack->rack_hdw_pace_ena; 25797 break; 25798 case TCP_RACK_TLP_THRESH: 25799 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 25800 optval = rack->r_ctl.rc_tlp_threshold; 25801 break; 25802 case TCP_RACK_PKT_DELAY: 25803 /* RACK added ms i.e. rack-rtt + reord + N */ 25804 optval = rack->r_ctl.rc_pkt_delay; 25805 break; 25806 case TCP_RACK_TLP_USE: 25807 optval = rack->rack_tlp_threshold_use; 25808 break; 25809 case TCP_PACING_DND: 25810 optval = rack->rc_pace_dnd; 25811 break; 25812 case TCP_RACK_PACE_RATE_CA: 25813 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 25814 break; 25815 case TCP_RACK_PACE_RATE_SS: 25816 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 25817 break; 25818 case TCP_RACK_PACE_RATE_REC: 25819 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 25820 break; 25821 case TCP_DGP_UPPER_BOUNDS: 25822 optval = rack->r_ctl.rack_per_upper_bound_ss; 25823 optval <<= 16; 25824 optval |= rack->r_ctl.rack_per_upper_bound_ca; 25825 break; 25826 case TCP_RACK_GP_INCREASE_SS: 25827 optval = rack->r_ctl.rack_per_of_gp_ca; 25828 break; 25829 case TCP_RACK_GP_INCREASE_CA: 25830 optval = rack->r_ctl.rack_per_of_gp_ss; 25831 break; 25832 case TCP_RACK_PACING_DIVISOR: 25833 optval = rack->r_ctl.pace_len_divisor; 25834 break; 25835 case TCP_BBR_RACK_RTT_USE: 25836 optval = rack->r_ctl.rc_rate_sample_method; 25837 break; 25838 case TCP_DELACK: 25839 optval = tp->t_delayed_ack; 25840 break; 25841 case TCP_DATA_AFTER_CLOSE: 25842 optval = rack->rc_allow_data_af_clo; 25843 break; 25844 case TCP_SHARED_CWND_TIME_LIMIT: 25845 optval = rack->r_limit_scw; 25846 break; 25847 case TCP_HONOR_HPTS_MIN: 25848 if (rack->r_use_hpts_min) 25849 optval = rack->r_ctl.max_reduction; 25850 else 25851 optval = 0; 25852 break; 25853 case TCP_REC_IS_DYN: 25854 optval = rack->rc_gp_no_rec_chg; 25855 break; 25856 case TCP_NO_TIMELY: 25857 optval = rack->rc_skip_timely; 25858 break; 25859 case TCP_RACK_TIMER_SLOP: 25860 optval = rack->r_ctl.timer_slop; 25861 break; 25862 default: 25863 return (tcp_default_ctloutput(tp, sopt)); 25864 break; 25865 } 25866 INP_WUNLOCK(inp); 25867 if (error == 0) { 25868 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 25869 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 25870 error = sooptcopyout(sopt, &loptval, sizeof loptval); 25871 else 25872 error = sooptcopyout(sopt, &optval, sizeof optval); 25873 } 25874 return (error); 25875 } 25876 25877 static int 25878 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 25879 { 25880 if (sopt->sopt_dir == SOPT_SET) { 25881 return (rack_set_sockopt(tp, sopt)); 25882 } else if (sopt->sopt_dir == SOPT_GET) { 25883 return (rack_get_sockopt(tp, sopt)); 25884 } else { 25885 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 25886 } 25887 } 25888 25889 static const char *rack_stack_names[] = { 25890 __XSTRING(STACKNAME), 25891 #ifdef STACKALIAS 25892 __XSTRING(STACKALIAS), 25893 #endif 25894 }; 25895 25896 static int 25897 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 25898 { 25899 memset(mem, 0, size); 25900 return (0); 25901 } 25902 25903 static void 25904 rack_dtor(void *mem, int32_t size, void *arg) 25905 { 25906 25907 } 25908 25909 static bool rack_mod_inited = false; 25910 25911 static int 25912 tcp_addrack(module_t mod, int32_t type, void *data) 25913 { 25914 int32_t err = 0; 25915 int num_stacks; 25916 25917 switch (type) { 25918 case MOD_LOAD: 25919 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 25920 sizeof(struct rack_sendmap), 25921 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 25922 25923 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 25924 sizeof(struct tcp_rack), 25925 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 25926 25927 sysctl_ctx_init(&rack_sysctl_ctx); 25928 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 25929 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 25930 OID_AUTO, 25931 #ifdef STACKALIAS 25932 __XSTRING(STACKALIAS), 25933 #else 25934 __XSTRING(STACKNAME), 25935 #endif 25936 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 25937 ""); 25938 if (rack_sysctl_root == NULL) { 25939 printf("Failed to add sysctl node\n"); 25940 err = EFAULT; 25941 goto free_uma; 25942 } 25943 rack_init_sysctls(); 25944 num_stacks = nitems(rack_stack_names); 25945 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 25946 rack_stack_names, &num_stacks); 25947 if (err) { 25948 printf("Failed to register %s stack name for " 25949 "%s module\n", rack_stack_names[num_stacks], 25950 __XSTRING(MODNAME)); 25951 sysctl_ctx_free(&rack_sysctl_ctx); 25952 free_uma: 25953 uma_zdestroy(rack_zone); 25954 uma_zdestroy(rack_pcb_zone); 25955 rack_counter_destroy(); 25956 printf("Failed to register rack module -- err:%d\n", err); 25957 return (err); 25958 } 25959 tcp_lro_reg_mbufq(); 25960 rack_mod_inited = true; 25961 break; 25962 case MOD_QUIESCE: 25963 err = deregister_tcp_functions(&__tcp_rack, true, false); 25964 break; 25965 case MOD_UNLOAD: 25966 err = deregister_tcp_functions(&__tcp_rack, false, true); 25967 if (err == EBUSY) 25968 break; 25969 if (rack_mod_inited) { 25970 uma_zdestroy(rack_zone); 25971 uma_zdestroy(rack_pcb_zone); 25972 sysctl_ctx_free(&rack_sysctl_ctx); 25973 rack_counter_destroy(); 25974 rack_mod_inited = false; 25975 } 25976 tcp_lro_dereg_mbufq(); 25977 err = 0; 25978 break; 25979 default: 25980 return (EOPNOTSUPP); 25981 } 25982 return (err); 25983 } 25984 25985 static moduledata_t tcp_rack = { 25986 .name = __XSTRING(MODNAME), 25987 .evhand = tcp_addrack, 25988 .priv = 0 25989 }; 25990 25991 MODULE_VERSION(MODNAME, 1); 25992 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 25993 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 25994 25995 #endif /* #if !defined(INET) && !defined(INET6) */ 25996