1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> /* for proc0 declaration */ 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #ifdef STATS 52 #include <sys/qmath.h> 53 #include <sys/tree.h> 54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 55 #else 56 #include <sys/tree.h> 57 #endif 58 #include <sys/refcount.h> 59 #include <sys/queue.h> 60 #include <sys/tim_filter.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 #include <sys/protosw.h> 65 #ifdef TCP_ACCOUNTING 66 #include <sys/sched.h> 67 #include <machine/cpu.h> 68 #endif 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/route/nhop.h> 73 #include <net/vnet.h> 74 75 #define TCPSTATES /* for logging */ 76 77 #include <netinet/in.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/ip.h> 81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 83 #include <netinet/ip_var.h> 84 #include <netinet/ip6.h> 85 #include <netinet6/in6_pcb.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/tcp.h> 88 #define TCPOUTFLAGS 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_log_buf.h> 94 #include <netinet/tcp_syncache.h> 95 #include <netinet/tcp_hpts.h> 96 #include <netinet/tcp_ratelimit.h> 97 #include <netinet/tcp_accounting.h> 98 #include <netinet/tcpip.h> 99 #include <netinet/cc/cc.h> 100 #include <netinet/cc/cc_newreno.h> 101 #include <netinet/tcp_fastopen.h> 102 #include <netinet/tcp_lro.h> 103 #ifdef NETFLIX_SHARED_CWND 104 #include <netinet/tcp_shared_cwnd.h> 105 #endif 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_ecn.h> 113 114 #include <netipsec/ipsec_support.h> 115 116 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 117 #include <netipsec/ipsec.h> 118 #include <netipsec/ipsec6.h> 119 #endif /* IPSEC */ 120 121 #include <netinet/udp.h> 122 #include <netinet/udp_var.h> 123 #include <machine/in_cksum.h> 124 125 #ifdef MAC 126 #include <security/mac/mac_framework.h> 127 #endif 128 #include "sack_filter.h" 129 #include "tcp_rack.h" 130 #include "tailq_hash.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 146 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 150 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 151 152 struct sysctl_ctx_list rack_sysctl_ctx; 153 struct sysctl_oid *rack_sysctl_root; 154 155 #define CUM_ACKED 1 156 #define SACKED 2 157 158 /* 159 * The RACK module incorporates a number of 160 * TCP ideas that have been put out into the IETF 161 * over the last few years: 162 * - Matt Mathis's Rate Halving which slowly drops 163 * the congestion window so that the ack clock can 164 * be maintained during a recovery. 165 * - Yuchung Cheng's RACK TCP (for which its named) that 166 * will stop us using the number of dup acks and instead 167 * use time as the gage of when we retransmit. 168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 169 * of Dukkipati et.al. 170 * RACK depends on SACK, so if an endpoint arrives that 171 * cannot do SACK the state machine below will shuttle the 172 * connection back to using the "default" TCP stack that is 173 * in FreeBSD. 174 * 175 * To implement RACK the original TCP stack was first decomposed 176 * into a functional state machine with individual states 177 * for each of the possible TCP connection states. The do_segment 178 * functions role in life is to mandate the connection supports SACK 179 * initially and then assure that the RACK state matches the conenction 180 * state before calling the states do_segment function. Each 181 * state is simplified due to the fact that the original do_segment 182 * has been decomposed and we *know* what state we are in (no 183 * switches on the state) and all tests for SACK are gone. This 184 * greatly simplifies what each state does. 185 * 186 * TCP output is also over-written with a new version since it 187 * must maintain the new rack scoreboard. 188 * 189 */ 190 static int32_t rack_tlp_thresh = 1; 191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 192 static int32_t rack_tlp_use_greater = 1; 193 static int32_t rack_reorder_thresh = 2; 194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 195 * - 60 seconds */ 196 static uint16_t rack_policer_rxt_thresh= 0; /* 499 = 49.9%, 0 is off */ 197 static uint8_t rack_policer_avg_thresh = 0; /* 3.2 */ 198 static uint8_t rack_policer_med_thresh = 0; /* 1 - 16 */ 199 static uint16_t rack_policer_bucket_reserve = 20; /* How much % is reserved in the bucket */ 200 static uint64_t rack_pol_min_bw = 125000; /* 1mbps in Bytes per sec */ 201 static uint32_t rack_policer_data_thresh = 64000; /* 64,000 bytes must be sent before we engage */ 202 static uint32_t rack_policing_do_bw_comp = 1; 203 static uint32_t rack_pcm_every_n_rounds = 100; 204 static uint32_t rack_pcm_blast = 0; 205 static uint32_t rack_pcm_is_enabled = 1; 206 static uint8_t rack_req_del_mss = 18; /* How many segments need to be sent in a recovery episode to do policer_detection */ 207 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 208 209 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */ 210 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 211 212 213 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 214 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 215 static int32_t rack_rxt_controls = 0; 216 static int32_t rack_fill_cw_state = 0; 217 static uint8_t rack_req_measurements = 1; 218 /* Attack threshold detections */ 219 static uint32_t rack_highest_sack_thresh_seen = 0; 220 static uint32_t rack_highest_move_thresh_seen = 0; 221 static uint32_t rack_merge_out_sacks_on_attack = 0; 222 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 223 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */ 224 static int32_t rack_hw_rate_caps = 0; /* 1; */ 225 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 226 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 227 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 228 static int32_t rack_hw_up_only = 0; 229 static int32_t rack_stats_gets_ms_rtt = 1; 230 static int32_t rack_prr_addbackmax = 2; 231 static int32_t rack_do_hystart = 0; 232 static int32_t rack_apply_rtt_with_reduced_conf = 0; 233 static int32_t rack_hibeta_setting = 0; 234 static int32_t rack_default_pacing_divisor = 250; 235 static uint16_t rack_pacing_min_seg = 0; 236 static int32_t rack_timely_off = 0; 237 238 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 239 static int32_t rack_pkt_delay = 1000; 240 static int32_t rack_send_a_lot_in_prr = 1; 241 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 242 static int32_t rack_verbose_logging = 0; 243 static int32_t rack_ignore_data_after_close = 1; 244 static int32_t rack_enable_shared_cwnd = 1; 245 static int32_t rack_use_cmp_acks = 1; 246 static int32_t rack_use_fsb = 1; 247 static int32_t rack_use_rfo = 1; 248 static int32_t rack_use_rsm_rfo = 1; 249 static int32_t rack_max_abc_post_recovery = 2; 250 static int32_t rack_client_low_buf = 0; 251 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 252 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 253 #ifdef TCP_ACCOUNTING 254 static int32_t rack_tcp_accounting = 0; 255 #endif 256 static int32_t rack_limits_scwnd = 1; 257 static int32_t rack_enable_mqueue_for_nonpaced = 0; 258 static int32_t rack_hybrid_allow_set_maxseg = 0; 259 static int32_t rack_disable_prr = 0; 260 static int32_t use_rack_rr = 1; 261 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 262 static int32_t rack_persist_min = 250000; /* 250usec */ 263 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 264 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 265 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ 266 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 267 static int32_t rack_limit_time_with_srtt = 0; 268 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 269 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 270 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 271 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 272 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 273 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 274 static int32_t rack_full_buffer_discount = 10; 275 /* 276 * Currently regular tcp has a rto_min of 30ms 277 * the backoff goes 12 times so that ends up 278 * being a total of 122.850 seconds before a 279 * connection is killed. 280 */ 281 static uint32_t rack_def_data_window = 20; 282 static uint32_t rack_goal_bdp = 2; 283 static uint32_t rack_min_srtts = 1; 284 static uint32_t rack_min_measure_usec = 0; 285 static int32_t rack_tlp_min = 10000; /* 10ms */ 286 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 287 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 288 static const int32_t rack_free_cache = 2; 289 static int32_t rack_hptsi_segments = 40; 290 static int32_t rack_rate_sample_method = USE_RTT_LOW; 291 static int32_t rack_pace_every_seg = 0; 292 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 293 static int32_t rack_slot_reduction = 4; 294 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 295 static int32_t rack_cwnd_block_ends_measure = 0; 296 static int32_t rack_rwnd_block_ends_measure = 0; 297 static int32_t rack_def_profile = 0; 298 299 static int32_t rack_lower_cwnd_at_tlp = 0; 300 static int32_t rack_always_send_oldest = 0; 301 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 302 303 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 304 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 305 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 306 307 /* Probertt */ 308 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 309 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 310 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 311 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 312 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 313 314 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 315 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 316 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 317 static uint32_t rack_probertt_use_min_rtt_exit = 0; 318 static uint32_t rack_probe_rtt_sets_cwnd = 0; 319 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 320 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 321 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 322 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 323 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 324 static uint32_t rack_probertt_filter_life = 10000000; 325 static uint32_t rack_probertt_lower_within = 10; 326 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 327 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 328 static int32_t rack_probertt_clear_is = 1; 329 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 330 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 331 332 /* Part of pacing */ 333 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 334 335 /* Timely information: 336 * 337 * Here we have various control parameters on how 338 * timely may change the multiplier. rack_gain_p5_ub 339 * is associated with timely but not directly influencing 340 * the rate decision like the other variables. It controls 341 * the way fill-cw interacts with timely and caps how much 342 * timely can boost the fill-cw b/w. 343 * 344 * The other values are various boost/shrink numbers as well 345 * as potential caps when adjustments are made to the timely 346 * gain (returned by rack_get_output_gain(). Remember too that 347 * the gain returned can be overriden by other factors such as 348 * probeRTT as well as fixed-rate-pacing. 349 */ 350 static int32_t rack_gain_p5_ub = 250; 351 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 352 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 353 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 354 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 355 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 356 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 357 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 358 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 359 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 360 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 361 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 362 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 363 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 364 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 365 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 366 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 367 static int32_t rack_use_max_for_nobackoff = 0; 368 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 369 static int32_t rack_timely_no_stopping = 0; 370 static int32_t rack_down_raise_thresh = 100; 371 static int32_t rack_req_segs = 1; 372 static uint64_t rack_bw_rate_cap = 0; 373 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 374 375 376 /* Rack specific counters */ 377 counter_u64_t rack_saw_enobuf; 378 counter_u64_t rack_saw_enobuf_hw; 379 counter_u64_t rack_saw_enetunreach; 380 counter_u64_t rack_persists_sends; 381 counter_u64_t rack_persists_acks; 382 counter_u64_t rack_persists_loss; 383 counter_u64_t rack_persists_lost_ends; 384 counter_u64_t rack_total_bytes; 385 #ifdef INVARIANTS 386 counter_u64_t rack_adjust_map_bw; 387 #endif 388 /* Tail loss probe counters */ 389 counter_u64_t rack_tlp_tot; 390 counter_u64_t rack_tlp_newdata; 391 counter_u64_t rack_tlp_retran; 392 counter_u64_t rack_tlp_retran_bytes; 393 counter_u64_t rack_to_tot; 394 counter_u64_t rack_hot_alloc; 395 counter_u64_t tcp_policer_detected; 396 counter_u64_t rack_to_alloc; 397 counter_u64_t rack_to_alloc_hard; 398 counter_u64_t rack_to_alloc_emerg; 399 counter_u64_t rack_to_alloc_limited; 400 counter_u64_t rack_alloc_limited_conns; 401 counter_u64_t rack_split_limited; 402 counter_u64_t rack_rxt_clamps_cwnd; 403 counter_u64_t rack_rxt_clamps_cwnd_uniq; 404 405 counter_u64_t rack_multi_single_eq; 406 counter_u64_t rack_proc_non_comp_ack; 407 408 counter_u64_t rack_fto_send; 409 counter_u64_t rack_fto_rsm_send; 410 counter_u64_t rack_nfto_resend; 411 counter_u64_t rack_non_fto_send; 412 counter_u64_t rack_extended_rfo; 413 414 counter_u64_t rack_sack_proc_all; 415 counter_u64_t rack_sack_proc_short; 416 counter_u64_t rack_sack_proc_restart; 417 counter_u64_t rack_sack_attacks_detected; 418 counter_u64_t rack_sack_attacks_reversed; 419 counter_u64_t rack_sack_attacks_suspect; 420 counter_u64_t rack_sack_used_next_merge; 421 counter_u64_t rack_sack_splits; 422 counter_u64_t rack_sack_used_prev_merge; 423 counter_u64_t rack_sack_skipped_acked; 424 counter_u64_t rack_ack_total; 425 counter_u64_t rack_express_sack; 426 counter_u64_t rack_sack_total; 427 counter_u64_t rack_move_none; 428 counter_u64_t rack_move_some; 429 430 counter_u64_t rack_input_idle_reduces; 431 counter_u64_t rack_collapsed_win; 432 counter_u64_t rack_collapsed_win_seen; 433 counter_u64_t rack_collapsed_win_rxt; 434 counter_u64_t rack_collapsed_win_rxt_bytes; 435 counter_u64_t rack_try_scwnd; 436 counter_u64_t rack_hw_pace_init_fail; 437 counter_u64_t rack_hw_pace_lost; 438 439 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 440 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 441 442 443 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 444 445 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 446 (tv) = (value) + slop; \ 447 if ((u_long)(tv) < (u_long)(tvmin)) \ 448 (tv) = (tvmin); \ 449 if ((u_long)(tv) > (u_long)(tvmax)) \ 450 (tv) = (tvmax); \ 451 } while (0) 452 453 static void 454 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 455 456 static int 457 rack_process_ack(struct mbuf *m, struct tcphdr *th, 458 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 459 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 460 static int 461 rack_process_data(struct mbuf *m, struct tcphdr *th, 462 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 463 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 464 static void 465 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 466 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 467 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 468 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 469 uint8_t limit_type); 470 static struct rack_sendmap * 471 rack_check_recovery_mode(struct tcpcb *tp, 472 uint32_t tsused); 473 static uint32_t 474 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 475 static void 476 rack_cong_signal(struct tcpcb *tp, 477 uint32_t type, uint32_t ack, int ); 478 static void rack_counter_destroy(void); 479 static int 480 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 481 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 482 static void 483 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 484 static void 485 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 486 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 487 static void rack_dtor(void *mem, int32_t size, void *arg); 488 static void 489 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 490 uint32_t flex1, uint32_t flex2, 491 uint32_t flex3, uint32_t flex4, 492 uint32_t flex5, uint32_t flex6, 493 uint16_t flex7, uint8_t mod); 494 495 static void 496 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 497 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 498 struct rack_sendmap *rsm, uint8_t quality); 499 static struct rack_sendmap * 500 rack_find_high_nonack(struct tcp_rack *rack, 501 struct rack_sendmap *rsm); 502 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 503 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 504 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 505 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 506 static void 507 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 508 tcp_seq th_ack, int line, uint8_t quality); 509 static void 510 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 511 512 static uint32_t 513 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 514 static int32_t rack_handoff_ok(struct tcpcb *tp); 515 static int32_t rack_init(struct tcpcb *tp, void **ptr); 516 static void rack_init_sysctls(void); 517 518 static void 519 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 520 struct tcphdr *th, int entered_rec, int dup_ack_struck, 521 int *dsack_seen, int *sacks_seen); 522 static void 523 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 524 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 525 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 526 527 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 528 529 530 static void 531 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 532 struct rack_sendmap *rsm, uint32_t cts); 533 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 534 static int32_t rack_output(struct tcpcb *tp); 535 536 static uint32_t 537 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 538 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 539 uint32_t cts, int *no_extra, int *moved_two, uint32_t segsiz); 540 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 541 static void rack_remxt_tmr(struct tcpcb *tp); 542 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 543 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 544 static int32_t rack_stopall(struct tcpcb *tp); 545 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 546 static uint32_t 547 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 548 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 549 static void 550 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 551 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 552 static int 553 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 554 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 555 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 556 static int 557 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 559 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 560 561 static void 562 rack_peg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz); 563 564 static int 565 rack_do_closing(struct mbuf *m, struct tcphdr *th, 566 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 567 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 568 static int 569 rack_do_established(struct mbuf *m, struct tcphdr *th, 570 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 571 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 572 static int 573 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 574 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 575 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 576 static int 577 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 578 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 579 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 580 static int 581 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 582 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 583 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 584 static int 585 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 586 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 587 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 588 static int 589 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 590 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 591 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 592 static int 593 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 594 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 595 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 596 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 597 struct rack_sendmap * 598 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 599 uint32_t tsused); 600 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 601 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 602 static void 603 tcp_rack_partialack(struct tcpcb *tp); 604 static int 605 rack_set_profile(struct tcp_rack *rack, int prof); 606 static void 607 rack_apply_deferred_options(struct tcp_rack *rack); 608 609 int32_t rack_clear_counter=0; 610 611 static uint64_t 612 rack_get_lt_bw(struct tcp_rack *rack) 613 { 614 struct timeval tv; 615 uint64_t tim, bytes; 616 617 tim = rack->r_ctl.lt_bw_time; 618 bytes = rack->r_ctl.lt_bw_bytes; 619 if (rack->lt_bw_up) { 620 /* Include all the current bytes too */ 621 microuptime(&tv); 622 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 623 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 624 } 625 if ((bytes != 0) && (tim != 0)) 626 return ((bytes * (uint64_t)1000000) / tim); 627 else 628 return (0); 629 } 630 631 static void 632 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 633 { 634 struct sockopt sopt; 635 struct cc_newreno_opts opt; 636 struct newreno old; 637 struct tcpcb *tp; 638 int error, failed = 0; 639 640 tp = rack->rc_tp; 641 if (tp->t_cc == NULL) { 642 /* Tcb is leaving */ 643 return; 644 } 645 rack->rc_pacing_cc_set = 1; 646 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 647 /* Not new-reno we can't play games with beta! */ 648 failed = 1; 649 goto out; 650 651 } 652 if (CC_ALGO(tp)->ctl_output == NULL) { 653 /* Huh, not using new-reno so no swaps.? */ 654 failed = 2; 655 goto out; 656 } 657 /* Get the current values out */ 658 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 659 sopt.sopt_dir = SOPT_GET; 660 opt.name = CC_NEWRENO_BETA; 661 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 662 if (error) { 663 failed = 3; 664 goto out; 665 } 666 old.beta = opt.val; 667 opt.name = CC_NEWRENO_BETA_ECN; 668 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 669 if (error) { 670 failed = 4; 671 goto out; 672 } 673 old.beta_ecn = opt.val; 674 675 /* Now lets set in the values we have stored */ 676 sopt.sopt_dir = SOPT_SET; 677 opt.name = CC_NEWRENO_BETA; 678 opt.val = rack->r_ctl.rc_saved_beta.beta; 679 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 680 if (error) { 681 failed = 5; 682 goto out; 683 } 684 opt.name = CC_NEWRENO_BETA_ECN; 685 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 686 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 687 if (error) { 688 failed = 6; 689 goto out; 690 } 691 /* Save off the values for restoral */ 692 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 693 out: 694 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 695 union tcp_log_stackspecific log; 696 struct timeval tv; 697 struct newreno *ptr; 698 699 ptr = ((struct newreno *)tp->t_ccv.cc_data); 700 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 701 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 702 log.u_bbr.flex1 = ptr->beta; 703 log.u_bbr.flex2 = ptr->beta_ecn; 704 log.u_bbr.flex3 = ptr->newreno_flags; 705 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 706 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 707 log.u_bbr.flex6 = failed; 708 log.u_bbr.flex7 = rack->gp_ready; 709 log.u_bbr.flex7 <<= 1; 710 log.u_bbr.flex7 |= rack->use_fixed_rate; 711 log.u_bbr.flex7 <<= 1; 712 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 713 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 714 log.u_bbr.flex8 = flex8; 715 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 716 0, &log, false, NULL, NULL, 0, &tv); 717 } 718 } 719 720 static void 721 rack_set_cc_pacing(struct tcp_rack *rack) 722 { 723 if (rack->rc_pacing_cc_set) 724 return; 725 /* 726 * Use the swap utility placing in 3 for flex8 to id a 727 * set of a new set of values. 728 */ 729 rack->rc_pacing_cc_set = 1; 730 rack_swap_beta_values(rack, 3); 731 } 732 733 static void 734 rack_undo_cc_pacing(struct tcp_rack *rack) 735 { 736 if (rack->rc_pacing_cc_set == 0) 737 return; 738 /* 739 * Use the swap utility placing in 4 for flex8 to id a 740 * restoral of the old values. 741 */ 742 rack->rc_pacing_cc_set = 0; 743 rack_swap_beta_values(rack, 4); 744 } 745 746 static void 747 rack_remove_pacing(struct tcp_rack *rack) 748 { 749 if (rack->rc_pacing_cc_set) 750 rack_undo_cc_pacing(rack); 751 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 752 tcp_decrement_paced_conn(); 753 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 754 tcp_dec_dgp_pacing_cnt(); 755 rack->rc_always_pace = 0; 756 rack->r_ctl.pacing_method = RACK_PACING_NONE; 757 rack->dgp_on = 0; 758 rack->rc_hybrid_mode = 0; 759 rack->use_fixed_rate = 0; 760 } 761 762 static void 763 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 764 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 765 { 766 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 767 union tcp_log_stackspecific log; 768 struct timeval tv; 769 770 memset(&log, 0, sizeof(log)); 771 log.u_bbr.flex1 = seq_end; 772 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 773 log.u_bbr.flex3 = ack_end_t; 774 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 775 log.u_bbr.flex5 = send_end_t; 776 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 777 log.u_bbr.flex7 = mode; 778 log.u_bbr.flex8 = 69; 779 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 780 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 781 log.u_bbr.pkts_out = line; 782 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 783 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 784 log.u_bbr.epoch = rack->r_ctl.current_round; 785 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 786 if (rsm != NULL) { 787 log.u_bbr.applimited = rsm->r_start; 788 log.u_bbr.delivered = rsm->r_end; 789 log.u_bbr.epoch = rsm->r_flags; 790 } 791 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 792 TCP_LOG_EVENTP(rack->rc_tp, NULL, 793 &rack->rc_inp->inp_socket->so_rcv, 794 &rack->rc_inp->inp_socket->so_snd, 795 BBR_LOG_HPTSI_CALC, 0, 796 0, &log, false, &tv); 797 } 798 } 799 800 static int 801 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 802 { 803 uint32_t stat; 804 int32_t error; 805 806 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 807 if (error || req->newptr == NULL) 808 return error; 809 810 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 811 if (error) 812 return (error); 813 if (stat == 1) { 814 #ifdef INVARIANTS 815 printf("Clearing RACK counters\n"); 816 #endif 817 counter_u64_zero(rack_tlp_tot); 818 counter_u64_zero(rack_tlp_newdata); 819 counter_u64_zero(rack_tlp_retran); 820 counter_u64_zero(rack_tlp_retran_bytes); 821 counter_u64_zero(rack_to_tot); 822 counter_u64_zero(rack_saw_enobuf); 823 counter_u64_zero(rack_saw_enobuf_hw); 824 counter_u64_zero(rack_saw_enetunreach); 825 counter_u64_zero(rack_persists_sends); 826 counter_u64_zero(rack_total_bytes); 827 counter_u64_zero(rack_persists_acks); 828 counter_u64_zero(rack_persists_loss); 829 counter_u64_zero(rack_persists_lost_ends); 830 #ifdef INVARIANTS 831 counter_u64_zero(rack_adjust_map_bw); 832 #endif 833 counter_u64_zero(rack_to_alloc_hard); 834 counter_u64_zero(rack_to_alloc_emerg); 835 counter_u64_zero(rack_sack_proc_all); 836 counter_u64_zero(rack_fto_send); 837 counter_u64_zero(rack_fto_rsm_send); 838 counter_u64_zero(rack_extended_rfo); 839 counter_u64_zero(rack_hw_pace_init_fail); 840 counter_u64_zero(rack_hw_pace_lost); 841 counter_u64_zero(rack_non_fto_send); 842 counter_u64_zero(rack_nfto_resend); 843 counter_u64_zero(rack_sack_proc_short); 844 counter_u64_zero(rack_sack_proc_restart); 845 counter_u64_zero(rack_to_alloc); 846 counter_u64_zero(rack_to_alloc_limited); 847 counter_u64_zero(rack_alloc_limited_conns); 848 counter_u64_zero(rack_split_limited); 849 counter_u64_zero(rack_rxt_clamps_cwnd); 850 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 851 counter_u64_zero(rack_multi_single_eq); 852 counter_u64_zero(rack_proc_non_comp_ack); 853 counter_u64_zero(rack_sack_attacks_detected); 854 counter_u64_zero(rack_sack_attacks_reversed); 855 counter_u64_zero(rack_sack_attacks_suspect); 856 counter_u64_zero(rack_sack_used_next_merge); 857 counter_u64_zero(rack_sack_used_prev_merge); 858 counter_u64_zero(rack_sack_splits); 859 counter_u64_zero(rack_sack_skipped_acked); 860 counter_u64_zero(rack_ack_total); 861 counter_u64_zero(rack_express_sack); 862 counter_u64_zero(rack_sack_total); 863 counter_u64_zero(rack_move_none); 864 counter_u64_zero(rack_move_some); 865 counter_u64_zero(rack_try_scwnd); 866 counter_u64_zero(rack_collapsed_win); 867 counter_u64_zero(rack_collapsed_win_rxt); 868 counter_u64_zero(rack_collapsed_win_seen); 869 counter_u64_zero(rack_collapsed_win_rxt_bytes); 870 } else if (stat == 2) { 871 #ifdef INVARIANTS 872 printf("Clearing RACK option array\n"); 873 #endif 874 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 875 } else if (stat == 3) { 876 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 877 } else if (stat == 4) { 878 #ifdef INVARIANTS 879 printf("Clearing RACK out size array\n"); 880 #endif 881 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 882 } 883 rack_clear_counter = 0; 884 return (0); 885 } 886 887 static void 888 rack_init_sysctls(void) 889 { 890 struct sysctl_oid *rack_counters; 891 struct sysctl_oid *rack_attack; 892 struct sysctl_oid *rack_pacing; 893 struct sysctl_oid *rack_timely; 894 struct sysctl_oid *rack_timers; 895 struct sysctl_oid *rack_tlp; 896 struct sysctl_oid *rack_misc; 897 struct sysctl_oid *rack_features; 898 struct sysctl_oid *rack_measure; 899 struct sysctl_oid *rack_probertt; 900 struct sysctl_oid *rack_hw_pacing; 901 struct sysctl_oid *rack_policing; 902 903 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 904 SYSCTL_CHILDREN(rack_sysctl_root), 905 OID_AUTO, 906 "sack_attack", 907 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 908 "Rack Sack Attack Counters and Controls"); 909 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 910 SYSCTL_CHILDREN(rack_sysctl_root), 911 OID_AUTO, 912 "stats", 913 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 914 "Rack Counters"); 915 SYSCTL_ADD_S32(&rack_sysctl_ctx, 916 SYSCTL_CHILDREN(rack_sysctl_root), 917 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 918 &rack_rate_sample_method , USE_RTT_LOW, 919 "What method should we use for rate sampling 0=high, 1=low "); 920 /* Probe rtt related controls */ 921 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 922 SYSCTL_CHILDREN(rack_sysctl_root), 923 OID_AUTO, 924 "probertt", 925 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 926 "ProbeRTT related Controls"); 927 SYSCTL_ADD_U16(&rack_sysctl_ctx, 928 SYSCTL_CHILDREN(rack_probertt), 929 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 930 &rack_atexit_prtt_hbp, 130, 931 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 932 SYSCTL_ADD_U16(&rack_sysctl_ctx, 933 SYSCTL_CHILDREN(rack_probertt), 934 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 935 &rack_atexit_prtt, 130, 936 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 937 SYSCTL_ADD_U16(&rack_sysctl_ctx, 938 SYSCTL_CHILDREN(rack_probertt), 939 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 940 &rack_per_of_gp_probertt, 60, 941 "What percentage of goodput do we pace at in probertt"); 942 SYSCTL_ADD_U16(&rack_sysctl_ctx, 943 SYSCTL_CHILDREN(rack_probertt), 944 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 945 &rack_per_of_gp_probertt_reduce, 10, 946 "What percentage of goodput do we reduce every gp_srtt"); 947 SYSCTL_ADD_U16(&rack_sysctl_ctx, 948 SYSCTL_CHILDREN(rack_probertt), 949 OID_AUTO, "gp_per_low", CTLFLAG_RW, 950 &rack_per_of_gp_lowthresh, 40, 951 "What percentage of goodput do we allow the multiplier to fall to"); 952 SYSCTL_ADD_U32(&rack_sysctl_ctx, 953 SYSCTL_CHILDREN(rack_probertt), 954 OID_AUTO, "time_between", CTLFLAG_RW, 955 & rack_time_between_probertt, 96000000, 956 "How many useconds between the lowest rtt falling must past before we enter probertt"); 957 SYSCTL_ADD_U32(&rack_sysctl_ctx, 958 SYSCTL_CHILDREN(rack_probertt), 959 OID_AUTO, "safety", CTLFLAG_RW, 960 &rack_probe_rtt_safety_val, 2000000, 961 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 962 SYSCTL_ADD_U32(&rack_sysctl_ctx, 963 SYSCTL_CHILDREN(rack_probertt), 964 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 965 &rack_probe_rtt_sets_cwnd, 0, 966 "Do we set the cwnd too (if always_lower is on)"); 967 SYSCTL_ADD_U32(&rack_sysctl_ctx, 968 SYSCTL_CHILDREN(rack_probertt), 969 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 970 &rack_max_drain_wait, 2, 971 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 972 SYSCTL_ADD_U32(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_probertt), 974 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 975 &rack_must_drain, 1, 976 "We must drain this many gp_srtt's waiting for flight to reach goal"); 977 SYSCTL_ADD_U32(&rack_sysctl_ctx, 978 SYSCTL_CHILDREN(rack_probertt), 979 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 980 &rack_probertt_use_min_rtt_entry, 1, 981 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 982 SYSCTL_ADD_U32(&rack_sysctl_ctx, 983 SYSCTL_CHILDREN(rack_probertt), 984 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 985 &rack_probertt_use_min_rtt_exit, 0, 986 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 987 SYSCTL_ADD_U32(&rack_sysctl_ctx, 988 SYSCTL_CHILDREN(rack_probertt), 989 OID_AUTO, "length_div", CTLFLAG_RW, 990 &rack_probertt_gpsrtt_cnt_div, 0, 991 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 992 SYSCTL_ADD_U32(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_probertt), 994 OID_AUTO, "length_mul", CTLFLAG_RW, 995 &rack_probertt_gpsrtt_cnt_mul, 0, 996 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 997 SYSCTL_ADD_U32(&rack_sysctl_ctx, 998 SYSCTL_CHILDREN(rack_probertt), 999 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 1000 &rack_min_probertt_hold, 200000, 1001 "What is the minimum time we hold probertt at target"); 1002 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_probertt), 1004 OID_AUTO, "filter_life", CTLFLAG_RW, 1005 &rack_probertt_filter_life, 10000000, 1006 "What is the time for the filters life in useconds"); 1007 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_probertt), 1009 OID_AUTO, "lower_within", CTLFLAG_RW, 1010 &rack_probertt_lower_within, 10, 1011 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 1012 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_probertt), 1014 OID_AUTO, "must_move", CTLFLAG_RW, 1015 &rack_min_rtt_movement, 250, 1016 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1017 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1018 SYSCTL_CHILDREN(rack_probertt), 1019 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1020 &rack_probertt_clear_is, 1, 1021 "Do we clear I/S counts on exiting probe-rtt"); 1022 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_probertt), 1024 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1025 &rack_max_drain_hbp, 1, 1026 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1027 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_probertt), 1029 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1030 &rack_hbp_thresh, 3, 1031 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1032 /* Pacing related sysctls */ 1033 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_sysctl_root), 1035 OID_AUTO, 1036 "pacing", 1037 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1038 "Pacing related Controls"); 1039 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_pacing), 1041 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1042 &rack_pcm_is_enabled, 1, 1043 "Do we by default do PCM measurements?"); 1044 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1045 SYSCTL_CHILDREN(rack_pacing), 1046 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1047 &rack_pcm_every_n_rounds, 100, 1048 "How many rounds before we need to do a PCM measurement"); 1049 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1050 SYSCTL_CHILDREN(rack_pacing), 1051 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1052 &rack_pcm_blast, 0, 1053 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1054 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1055 SYSCTL_CHILDREN(rack_pacing), 1056 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1057 &rack_gp_gain_req, 1200, 1058 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1059 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1060 SYSCTL_CHILDREN(rack_pacing), 1061 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1062 &rack_rnd_cnt_req, 0x10005, 1063 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_pacing), 1066 OID_AUTO, "no_timely", CTLFLAG_RW, 1067 &rack_timely_off, 0, 1068 "Do we not use timely in DGP?"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_pacing), 1071 OID_AUTO, "fullbufdisc", CTLFLAG_RW, 1072 &rack_full_buffer_discount, 10, 1073 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_pacing), 1076 OID_AUTO, "fillcw", CTLFLAG_RW, 1077 &rack_fill_cw_state, 0, 1078 "Enable fillcw on new connections (default=0 off)?"); 1079 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_pacing), 1081 OID_AUTO, "min_burst", CTLFLAG_RW, 1082 &rack_pacing_min_seg, 0, 1083 "What is the min burst size for pacing (0 disables)?"); 1084 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_pacing), 1086 OID_AUTO, "divisor", CTLFLAG_RW, 1087 &rack_default_pacing_divisor, 250, 1088 "What is the default divisor given to the rl code?"); 1089 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1090 SYSCTL_CHILDREN(rack_pacing), 1091 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1092 &rack_bw_multipler, 0, 1093 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1094 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_pacing), 1096 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1097 &rack_max_per_above, 30, 1098 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1099 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_pacing), 1101 OID_AUTO, "allow1mss", CTLFLAG_RW, 1102 &rack_pace_one_seg, 0, 1103 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_pacing), 1106 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1107 &rack_limit_time_with_srtt, 0, 1108 "Do we limit pacing time based on srtt"); 1109 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_pacing), 1111 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1112 &rack_per_of_gp_ss, 250, 1113 "If non zero, what percentage of goodput to pace at in slow start"); 1114 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_pacing), 1116 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1117 &rack_per_of_gp_ca, 150, 1118 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1119 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_pacing), 1121 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1122 &rack_per_of_gp_rec, 200, 1123 "If non zero, what percentage of goodput to pace at in recovery"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_pacing), 1126 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1127 &rack_hptsi_segments, 40, 1128 "What size is the max for TSO segments in pacing and burst mitigation"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_pacing), 1131 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1132 &rack_slot_reduction, 4, 1133 "When doing only burst mitigation what is the reduce divisor"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_sysctl_root), 1136 OID_AUTO, "use_pacing", CTLFLAG_RW, 1137 &rack_pace_every_seg, 0, 1138 "If set we use pacing, if clear we use only the original burst mitigation"); 1139 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_pacing), 1141 OID_AUTO, "rate_cap", CTLFLAG_RW, 1142 &rack_bw_rate_cap, 0, 1143 "If set we apply this value to the absolute rate cap used by pacing"); 1144 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_pacing), 1146 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1147 &rack_fillcw_bw_cap, 3750000, 1148 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1149 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_sysctl_root), 1151 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1152 &rack_req_measurements, 1, 1153 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1154 /* Hardware pacing */ 1155 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1156 SYSCTL_CHILDREN(rack_sysctl_root), 1157 OID_AUTO, 1158 "hdwr_pacing", 1159 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1160 "Pacing related Controls"); 1161 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1162 SYSCTL_CHILDREN(rack_hw_pacing), 1163 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1164 &rack_hw_rwnd_factor, 2, 1165 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1166 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1167 SYSCTL_CHILDREN(rack_hw_pacing), 1168 OID_AUTO, "precheck", CTLFLAG_RW, 1169 &rack_hw_check_queue, 0, 1170 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1171 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1172 SYSCTL_CHILDREN(rack_hw_pacing), 1173 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1174 &rack_enobuf_hw_boost_mult, 0, 1175 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1176 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1177 SYSCTL_CHILDREN(rack_hw_pacing), 1178 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1179 &rack_enobuf_hw_max, 2, 1180 "What is the max boost the pacing time if we see a ENOBUFS?"); 1181 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1182 SYSCTL_CHILDREN(rack_hw_pacing), 1183 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1184 &rack_enobuf_hw_min, 2, 1185 "What is the min boost the pacing time if we see a ENOBUFS?"); 1186 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1187 SYSCTL_CHILDREN(rack_hw_pacing), 1188 OID_AUTO, "enable", CTLFLAG_RW, 1189 &rack_enable_hw_pacing, 0, 1190 "Should RACK attempt to use hw pacing?"); 1191 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1192 SYSCTL_CHILDREN(rack_hw_pacing), 1193 OID_AUTO, "rate_cap", CTLFLAG_RW, 1194 &rack_hw_rate_caps, 0, 1195 "Does the highest hardware pacing rate cap the rate we will send at??"); 1196 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1197 SYSCTL_CHILDREN(rack_hw_pacing), 1198 OID_AUTO, "uncap_per", CTLFLAG_RW, 1199 &rack_hw_rate_cap_per, 0, 1200 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1201 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1202 SYSCTL_CHILDREN(rack_hw_pacing), 1203 OID_AUTO, "rate_min", CTLFLAG_RW, 1204 &rack_hw_rate_min, 0, 1205 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1206 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1207 SYSCTL_CHILDREN(rack_hw_pacing), 1208 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1209 &rack_hw_rate_to_low, 0, 1210 "If we fall below this rate, dis-engage hw pacing?"); 1211 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1212 SYSCTL_CHILDREN(rack_hw_pacing), 1213 OID_AUTO, "up_only", CTLFLAG_RW, 1214 &rack_hw_up_only, 0, 1215 "Do we allow hw pacing to lower the rate selected?"); 1216 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1217 SYSCTL_CHILDREN(rack_hw_pacing), 1218 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1219 &rack_hw_pace_extra_slots, 0, 1220 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1221 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1222 SYSCTL_CHILDREN(rack_sysctl_root), 1223 OID_AUTO, 1224 "timely", 1225 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1226 "Rack Timely RTT Controls"); 1227 /* Timely based GP dynmics */ 1228 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1229 SYSCTL_CHILDREN(rack_timely), 1230 OID_AUTO, "upper", CTLFLAG_RW, 1231 &rack_gp_per_bw_mul_up, 2, 1232 "Rack timely upper range for equal b/w (in percentage)"); 1233 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1234 SYSCTL_CHILDREN(rack_timely), 1235 OID_AUTO, "lower", CTLFLAG_RW, 1236 &rack_gp_per_bw_mul_down, 4, 1237 "Rack timely lower range for equal b/w (in percentage)"); 1238 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1239 SYSCTL_CHILDREN(rack_timely), 1240 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1241 &rack_gp_rtt_maxmul, 3, 1242 "Rack timely multiplier of lowest rtt for rtt_max"); 1243 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1244 SYSCTL_CHILDREN(rack_timely), 1245 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1246 &rack_gp_rtt_mindiv, 4, 1247 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1248 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1249 SYSCTL_CHILDREN(rack_timely), 1250 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1251 &rack_gp_rtt_minmul, 1, 1252 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1253 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1254 SYSCTL_CHILDREN(rack_timely), 1255 OID_AUTO, "decrease", CTLFLAG_RW, 1256 &rack_gp_decrease_per, 80, 1257 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1258 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1259 SYSCTL_CHILDREN(rack_timely), 1260 OID_AUTO, "increase", CTLFLAG_RW, 1261 &rack_gp_increase_per, 2, 1262 "Rack timely increase perentage of our GP multiplication factor"); 1263 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1264 SYSCTL_CHILDREN(rack_timely), 1265 OID_AUTO, "lowerbound", CTLFLAG_RW, 1266 &rack_per_lower_bound, 50, 1267 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1268 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1269 SYSCTL_CHILDREN(rack_timely), 1270 OID_AUTO, "p5_upper", CTLFLAG_RW, 1271 &rack_gain_p5_ub, 250, 1272 "Profile 5 upper bound to timely gain"); 1273 1274 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1275 SYSCTL_CHILDREN(rack_timely), 1276 OID_AUTO, "upperboundss", CTLFLAG_RW, 1277 &rack_per_upper_bound_ss, 0, 1278 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1279 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1280 SYSCTL_CHILDREN(rack_timely), 1281 OID_AUTO, "upperboundca", CTLFLAG_RW, 1282 &rack_per_upper_bound_ca, 0, 1283 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1284 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1285 SYSCTL_CHILDREN(rack_timely), 1286 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1287 &rack_do_dyn_mul, 0, 1288 "Rack timely do we enable dynmaic timely goodput by default"); 1289 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1290 SYSCTL_CHILDREN(rack_timely), 1291 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1292 &rack_gp_no_rec_chg, 1, 1293 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1294 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1295 SYSCTL_CHILDREN(rack_timely), 1296 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1297 &rack_timely_dec_clear, 6, 1298 "Rack timely what threshold do we count to before another boost during b/w decent"); 1299 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1300 SYSCTL_CHILDREN(rack_timely), 1301 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1302 &rack_timely_max_push_rise, 3, 1303 "Rack timely how many times do we push up with b/w increase"); 1304 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1305 SYSCTL_CHILDREN(rack_timely), 1306 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1307 &rack_timely_max_push_drop, 3, 1308 "Rack timely how many times do we push back on b/w decent"); 1309 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1310 SYSCTL_CHILDREN(rack_timely), 1311 OID_AUTO, "min_segs", CTLFLAG_RW, 1312 &rack_timely_min_segs, 4, 1313 "Rack timely when setting the cwnd what is the min num segments"); 1314 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1315 SYSCTL_CHILDREN(rack_timely), 1316 OID_AUTO, "noback_max", CTLFLAG_RW, 1317 &rack_use_max_for_nobackoff, 0, 1318 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1319 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1320 SYSCTL_CHILDREN(rack_timely), 1321 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1322 &rack_timely_int_timely_only, 0, 1323 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1324 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1325 SYSCTL_CHILDREN(rack_timely), 1326 OID_AUTO, "nonstop", CTLFLAG_RW, 1327 &rack_timely_no_stopping, 0, 1328 "Rack timely don't stop increase"); 1329 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_timely), 1331 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1332 &rack_down_raise_thresh, 100, 1333 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_timely), 1336 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1337 &rack_req_segs, 1, 1338 "Bottom dragging if not these many segments outstanding and room"); 1339 1340 /* TLP and Rack related parameters */ 1341 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1342 SYSCTL_CHILDREN(rack_sysctl_root), 1343 OID_AUTO, 1344 "tlp", 1345 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1346 "TLP and Rack related Controls"); 1347 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1348 SYSCTL_CHILDREN(rack_tlp), 1349 OID_AUTO, "use_rrr", CTLFLAG_RW, 1350 &use_rack_rr, 1, 1351 "Do we use Rack Rapid Recovery"); 1352 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1353 SYSCTL_CHILDREN(rack_tlp), 1354 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1355 &rack_max_abc_post_recovery, 2, 1356 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1357 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1358 SYSCTL_CHILDREN(rack_tlp), 1359 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1360 &rack_non_rxt_use_cr, 0, 1361 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1362 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1363 SYSCTL_CHILDREN(rack_tlp), 1364 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1365 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1366 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1367 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1368 SYSCTL_CHILDREN(rack_tlp), 1369 OID_AUTO, "limit", CTLFLAG_RW, 1370 &rack_tlp_limit, 2, 1371 "How many TLP's can be sent without sending new data"); 1372 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1373 SYSCTL_CHILDREN(rack_tlp), 1374 OID_AUTO, "use_greater", CTLFLAG_RW, 1375 &rack_tlp_use_greater, 1, 1376 "Should we use the rack_rtt time if its greater than srtt"); 1377 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1378 SYSCTL_CHILDREN(rack_tlp), 1379 OID_AUTO, "tlpminto", CTLFLAG_RW, 1380 &rack_tlp_min, 10000, 1381 "TLP minimum timeout per the specification (in microseconds)"); 1382 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1383 SYSCTL_CHILDREN(rack_tlp), 1384 OID_AUTO, "send_oldest", CTLFLAG_RW, 1385 &rack_always_send_oldest, 0, 1386 "Should we always send the oldest TLP and RACK-TLP"); 1387 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1388 SYSCTL_CHILDREN(rack_tlp), 1389 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1390 &rack_lower_cwnd_at_tlp, 0, 1391 "When a TLP completes a retran should we enter recovery"); 1392 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1393 SYSCTL_CHILDREN(rack_tlp), 1394 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1395 &rack_reorder_thresh, 2, 1396 "What factor for rack will be added when seeing reordering (shift right)"); 1397 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1398 SYSCTL_CHILDREN(rack_tlp), 1399 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1400 &rack_tlp_thresh, 1, 1401 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1402 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1403 SYSCTL_CHILDREN(rack_tlp), 1404 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1405 &rack_reorder_fade, 60000000, 1406 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1407 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1408 SYSCTL_CHILDREN(rack_tlp), 1409 OID_AUTO, "pktdelay", CTLFLAG_RW, 1410 &rack_pkt_delay, 1000, 1411 "Extra RACK time (in microseconds) besides reordering thresh"); 1412 1413 /* Timer related controls */ 1414 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1415 SYSCTL_CHILDREN(rack_sysctl_root), 1416 OID_AUTO, 1417 "timers", 1418 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1419 "Timer related controls"); 1420 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1421 SYSCTL_CHILDREN(rack_timers), 1422 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1423 &rack_ssthresh_rest_rto_rec, 0, 1424 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1425 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1426 SYSCTL_CHILDREN(rack_timers), 1427 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1428 &rack_rxt_scoreboard_clear_thresh, 2, 1429 "How many RTO's are allowed before we clear the scoreboard"); 1430 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1431 SYSCTL_CHILDREN(rack_timers), 1432 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1433 &rack_honors_hpts_min_to, 1, 1434 "Do rack pacing timers honor hpts min timeout"); 1435 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1436 SYSCTL_CHILDREN(rack_timers), 1437 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1438 &rack_max_reduce, 10, 1439 "Max percentage we will reduce slot by for pacing when we are behind"); 1440 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1441 SYSCTL_CHILDREN(rack_timers), 1442 OID_AUTO, "persmin", CTLFLAG_RW, 1443 &rack_persist_min, 250000, 1444 "What is the minimum time in microseconds between persists"); 1445 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_timers), 1447 OID_AUTO, "persmax", CTLFLAG_RW, 1448 &rack_persist_max, 2000000, 1449 "What is the largest delay in microseconds between persists"); 1450 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1451 SYSCTL_CHILDREN(rack_timers), 1452 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1453 &rack_delayed_ack_time, 40000, 1454 "Delayed ack time (40ms in microseconds)"); 1455 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1456 SYSCTL_CHILDREN(rack_timers), 1457 OID_AUTO, "minrto", CTLFLAG_RW, 1458 &rack_rto_min, 30000, 1459 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1460 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1461 SYSCTL_CHILDREN(rack_timers), 1462 OID_AUTO, "maxrto", CTLFLAG_RW, 1463 &rack_rto_max, 4000000, 1464 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1465 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_timers), 1467 OID_AUTO, "minto", CTLFLAG_RW, 1468 &rack_min_to, 1000, 1469 "Minimum rack timeout in microseconds"); 1470 /* Measure controls */ 1471 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1472 SYSCTL_CHILDREN(rack_sysctl_root), 1473 OID_AUTO, 1474 "measure", 1475 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1476 "Measure related controls"); 1477 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1478 SYSCTL_CHILDREN(rack_measure), 1479 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1480 &rack_wma_divisor, 8, 1481 "When doing b/w calculation what is the divisor for the WMA"); 1482 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1483 SYSCTL_CHILDREN(rack_measure), 1484 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1485 &rack_cwnd_block_ends_measure, 0, 1486 "Does a cwnd just-return end the measurement window (app limited)"); 1487 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1488 SYSCTL_CHILDREN(rack_measure), 1489 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1490 &rack_rwnd_block_ends_measure, 0, 1491 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1492 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1493 SYSCTL_CHILDREN(rack_measure), 1494 OID_AUTO, "min_target", CTLFLAG_RW, 1495 &rack_def_data_window, 20, 1496 "What is the minimum target window (in mss) for a GP measurements"); 1497 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_measure), 1499 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1500 &rack_goal_bdp, 2, 1501 "What is the goal BDP to measure"); 1502 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_measure), 1504 OID_AUTO, "min_srtts", CTLFLAG_RW, 1505 &rack_min_srtts, 1, 1506 "What is the goal BDP to measure"); 1507 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1508 SYSCTL_CHILDREN(rack_measure), 1509 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1510 &rack_min_measure_usec, 0, 1511 "What is the Minimum time time for a measurement if 0, this is off"); 1512 /* Features */ 1513 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1514 SYSCTL_CHILDREN(rack_sysctl_root), 1515 OID_AUTO, 1516 "features", 1517 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1518 "Feature controls"); 1519 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1520 SYSCTL_CHILDREN(rack_features), 1521 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1522 &rack_hybrid_allow_set_maxseg, 0, 1523 "Should hybrid pacing allow the setmss command"); 1524 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1525 SYSCTL_CHILDREN(rack_features), 1526 OID_AUTO, "cmpack", CTLFLAG_RW, 1527 &rack_use_cmp_acks, 1, 1528 "Should RACK have LRO send compressed acks"); 1529 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1530 SYSCTL_CHILDREN(rack_features), 1531 OID_AUTO, "fsb", CTLFLAG_RW, 1532 &rack_use_fsb, 1, 1533 "Should RACK use the fast send block?"); 1534 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1535 SYSCTL_CHILDREN(rack_features), 1536 OID_AUTO, "rfo", CTLFLAG_RW, 1537 &rack_use_rfo, 1, 1538 "Should RACK use rack_fast_output()?"); 1539 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1540 SYSCTL_CHILDREN(rack_features), 1541 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1542 &rack_use_rsm_rfo, 1, 1543 "Should RACK use rack_fast_rsm_output()?"); 1544 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1545 SYSCTL_CHILDREN(rack_features), 1546 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1547 &rack_enable_mqueue_for_nonpaced, 0, 1548 "Should RACK use mbuf queuing for non-paced connections"); 1549 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1550 SYSCTL_CHILDREN(rack_features), 1551 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1552 &rack_do_hystart, 0, 1553 "Should RACK enable HyStart++ on connections?"); 1554 /* Policer detection */ 1555 rack_policing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1556 SYSCTL_CHILDREN(rack_sysctl_root), 1557 OID_AUTO, 1558 "policing", 1559 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1560 "policer detection"); 1561 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_policing), 1563 OID_AUTO, "rxt_thresh", CTLFLAG_RW, 1564 &rack_policer_rxt_thresh, 0, 1565 "Percentage of retransmits we need to be a possible policer (499 = 49.9 percent)"); 1566 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1567 SYSCTL_CHILDREN(rack_policing), 1568 OID_AUTO, "avg_thresh", CTLFLAG_RW, 1569 &rack_policer_avg_thresh, 0, 1570 "What threshold of average retransmits needed to recover a lost packet (1 - 169 aka 21 = 2.1)?"); 1571 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1572 SYSCTL_CHILDREN(rack_policing), 1573 OID_AUTO, "med_thresh", CTLFLAG_RW, 1574 &rack_policer_med_thresh, 0, 1575 "What threshold of Median retransmits needed to recover a lost packet (1 - 16)?"); 1576 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1577 SYSCTL_CHILDREN(rack_policing), 1578 OID_AUTO, "data_thresh", CTLFLAG_RW, 1579 &rack_policer_data_thresh, 64000, 1580 "How many bytes must have gotten through before we can start doing policer detection?"); 1581 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_policing), 1583 OID_AUTO, "bwcomp", CTLFLAG_RW, 1584 &rack_policing_do_bw_comp, 1, 1585 "Do we raise up low b/w so that at least pace_max_seg can be sent in the srtt?"); 1586 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1587 SYSCTL_CHILDREN(rack_policing), 1588 OID_AUTO, "recmss", CTLFLAG_RW, 1589 &rack_req_del_mss, 18, 1590 "How many MSS must be delivered during recovery to engage policer detection?"); 1591 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_policing), 1593 OID_AUTO, "res_div", CTLFLAG_RW, 1594 &rack_policer_bucket_reserve, 20, 1595 "What percentage is reserved in the policer bucket?"); 1596 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1597 SYSCTL_CHILDREN(rack_policing), 1598 OID_AUTO, "min_comp_bw", CTLFLAG_RW, 1599 &rack_pol_min_bw, 125000, 1600 "Do we have a min b/w for b/w compensation (0 = no)?"); 1601 /* Misc rack controls */ 1602 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1603 SYSCTL_CHILDREN(rack_sysctl_root), 1604 OID_AUTO, 1605 "misc", 1606 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1607 "Misc related controls"); 1608 #ifdef TCP_ACCOUNTING 1609 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1610 SYSCTL_CHILDREN(rack_misc), 1611 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1612 &rack_tcp_accounting, 0, 1613 "Should we turn on TCP accounting for all rack sessions?"); 1614 #endif 1615 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1616 SYSCTL_CHILDREN(rack_misc), 1617 OID_AUTO, "dnd", CTLFLAG_RW, 1618 &rack_dnd_default, 0, 1619 "Do not disturb default for rack_rrr = 3"); 1620 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1621 SYSCTL_CHILDREN(rack_misc), 1622 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1623 &sad_seg_size_per, 800, 1624 "Percentage of segment size needed in a sack 800 = 80.0?"); 1625 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_misc), 1627 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1628 &rack_rxt_controls, 0, 1629 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1630 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1631 SYSCTL_CHILDREN(rack_misc), 1632 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1633 &rack_hibeta_setting, 0, 1634 "Do we ue a high beta (80 instead of 50)?"); 1635 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_misc), 1637 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1638 &rack_apply_rtt_with_reduced_conf, 0, 1639 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1640 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1641 SYSCTL_CHILDREN(rack_misc), 1642 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1643 &rack_dsack_std_based, 3, 1644 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1645 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1646 SYSCTL_CHILDREN(rack_misc), 1647 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1648 &rack_prr_addbackmax, 2, 1649 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1650 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1651 SYSCTL_CHILDREN(rack_misc), 1652 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1653 &rack_stats_gets_ms_rtt, 1, 1654 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1655 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_misc), 1657 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1658 &rack_client_low_buf, 0, 1659 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1660 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1661 SYSCTL_CHILDREN(rack_misc), 1662 OID_AUTO, "defprofile", CTLFLAG_RW, 1663 &rack_def_profile, 0, 1664 "Should RACK use a default profile (0=no, num == profile num)?"); 1665 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_misc), 1667 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1668 &rack_enable_shared_cwnd, 1, 1669 "Should RACK try to use the shared cwnd on connections where allowed"); 1670 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1671 SYSCTL_CHILDREN(rack_misc), 1672 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1673 &rack_limits_scwnd, 1, 1674 "Should RACK place low end time limits on the shared cwnd feature"); 1675 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1676 SYSCTL_CHILDREN(rack_misc), 1677 OID_AUTO, "no_prr", CTLFLAG_RW, 1678 &rack_disable_prr, 0, 1679 "Should RACK not use prr and only pace (must have pacing on)"); 1680 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1681 SYSCTL_CHILDREN(rack_misc), 1682 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1683 &rack_verbose_logging, 0, 1684 "Should RACK black box logging be verbose"); 1685 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_misc), 1687 OID_AUTO, "data_after_close", CTLFLAG_RW, 1688 &rack_ignore_data_after_close, 1, 1689 "Do we hold off sending a RST until all pending data is ack'd"); 1690 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1691 SYSCTL_CHILDREN(rack_misc), 1692 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1693 &rack_sack_not_required, 1, 1694 "Do we allow rack to run on connections not supporting SACK"); 1695 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1696 SYSCTL_CHILDREN(rack_misc), 1697 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1698 &rack_send_a_lot_in_prr, 1, 1699 "Send a lot in prr"); 1700 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1701 SYSCTL_CHILDREN(rack_misc), 1702 OID_AUTO, "autoscale", CTLFLAG_RW, 1703 &rack_autosndbuf_inc, 20, 1704 "What percentage should rack scale up its snd buffer by?"); 1705 1706 1707 /* Sack Attacker detection stuff */ 1708 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1709 SYSCTL_CHILDREN(rack_attack), 1710 OID_AUTO, "merge_out", CTLFLAG_RW, 1711 &rack_merge_out_sacks_on_attack, 0, 1712 "Do we merge the sendmap when we decide we are being attacked?"); 1713 1714 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1715 SYSCTL_CHILDREN(rack_attack), 1716 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1717 &rack_highest_sack_thresh_seen, 0, 1718 "Highest sack to ack ratio seen"); 1719 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1720 SYSCTL_CHILDREN(rack_attack), 1721 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1722 &rack_highest_move_thresh_seen, 0, 1723 "Highest move to non-move ratio seen"); 1724 rack_ack_total = counter_u64_alloc(M_WAITOK); 1725 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1726 SYSCTL_CHILDREN(rack_attack), 1727 OID_AUTO, "acktotal", CTLFLAG_RD, 1728 &rack_ack_total, 1729 "Total number of Ack's"); 1730 rack_express_sack = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_attack), 1733 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1734 &rack_express_sack, 1735 "Total expresss number of Sack's"); 1736 rack_sack_total = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_attack), 1739 OID_AUTO, "sacktotal", CTLFLAG_RD, 1740 &rack_sack_total, 1741 "Total number of SACKs"); 1742 rack_move_none = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_attack), 1745 OID_AUTO, "move_none", CTLFLAG_RD, 1746 &rack_move_none, 1747 "Total number of SACK index reuse of positions under threshold"); 1748 rack_move_some = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_attack), 1751 OID_AUTO, "move_some", CTLFLAG_RD, 1752 &rack_move_some, 1753 "Total number of SACK index reuse of positions over threshold"); 1754 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_attack), 1757 OID_AUTO, "attacks", CTLFLAG_RD, 1758 &rack_sack_attacks_detected, 1759 "Total number of SACK attackers that had sack disabled"); 1760 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1762 SYSCTL_CHILDREN(rack_attack), 1763 OID_AUTO, "reversed", CTLFLAG_RD, 1764 &rack_sack_attacks_reversed, 1765 "Total number of SACK attackers that were later determined false positive"); 1766 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_attack), 1769 OID_AUTO, "suspect", CTLFLAG_RD, 1770 &rack_sack_attacks_suspect, 1771 "Total number of SACKs that triggered early detection"); 1772 1773 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1775 SYSCTL_CHILDREN(rack_attack), 1776 OID_AUTO, "nextmerge", CTLFLAG_RD, 1777 &rack_sack_used_next_merge, 1778 "Total number of times we used the next merge"); 1779 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1780 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1781 SYSCTL_CHILDREN(rack_attack), 1782 OID_AUTO, "prevmerge", CTLFLAG_RD, 1783 &rack_sack_used_prev_merge, 1784 "Total number of times we used the prev merge"); 1785 /* Counters */ 1786 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1787 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1788 SYSCTL_CHILDREN(rack_counters), 1789 OID_AUTO, "totalbytes", CTLFLAG_RD, 1790 &rack_total_bytes, 1791 "Total number of bytes sent"); 1792 rack_fto_send = counter_u64_alloc(M_WAITOK); 1793 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1794 SYSCTL_CHILDREN(rack_counters), 1795 OID_AUTO, "fto_send", CTLFLAG_RD, 1796 &rack_fto_send, "Total number of rack_fast_output sends"); 1797 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1798 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1799 SYSCTL_CHILDREN(rack_counters), 1800 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1801 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1802 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1803 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1804 SYSCTL_CHILDREN(rack_counters), 1805 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1806 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1807 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1809 SYSCTL_CHILDREN(rack_counters), 1810 OID_AUTO, "nfto_send", CTLFLAG_RD, 1811 &rack_non_fto_send, "Total number of rack_output first sends"); 1812 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1813 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1814 SYSCTL_CHILDREN(rack_counters), 1815 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1816 &rack_extended_rfo, "Total number of times we extended rfo"); 1817 1818 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1819 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1820 SYSCTL_CHILDREN(rack_counters), 1821 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1822 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1823 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1824 1825 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1826 SYSCTL_CHILDREN(rack_counters), 1827 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1828 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1829 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1830 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1831 SYSCTL_CHILDREN(rack_counters), 1832 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1833 &rack_tlp_tot, 1834 "Total number of tail loss probe expirations"); 1835 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1836 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1837 SYSCTL_CHILDREN(rack_counters), 1838 OID_AUTO, "tlp_new", CTLFLAG_RD, 1839 &rack_tlp_newdata, 1840 "Total number of tail loss probe sending new data"); 1841 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1842 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1843 SYSCTL_CHILDREN(rack_counters), 1844 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1845 &rack_tlp_retran, 1846 "Total number of tail loss probe sending retransmitted data"); 1847 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1848 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1849 SYSCTL_CHILDREN(rack_counters), 1850 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1851 &rack_tlp_retran_bytes, 1852 "Total bytes of tail loss probe sending retransmitted data"); 1853 rack_to_tot = counter_u64_alloc(M_WAITOK); 1854 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1855 SYSCTL_CHILDREN(rack_counters), 1856 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1857 &rack_to_tot, 1858 "Total number of times the rack to expired"); 1859 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1861 SYSCTL_CHILDREN(rack_counters), 1862 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1863 &rack_saw_enobuf, 1864 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1865 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1867 SYSCTL_CHILDREN(rack_counters), 1868 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1869 &rack_saw_enobuf_hw, 1870 "Total number of times a send returned enobuf for hdwr paced connections"); 1871 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1873 SYSCTL_CHILDREN(rack_counters), 1874 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1875 &rack_saw_enetunreach, 1876 "Total number of times a send received a enetunreachable"); 1877 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1879 SYSCTL_CHILDREN(rack_counters), 1880 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1881 &rack_hot_alloc, 1882 "Total allocations from the top of our list"); 1883 tcp_policer_detected = counter_u64_alloc(M_WAITOK); 1884 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1885 SYSCTL_CHILDREN(rack_counters), 1886 OID_AUTO, "policer_detected", CTLFLAG_RD, 1887 &tcp_policer_detected, 1888 "Total policer_detections"); 1889 1890 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1891 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1892 SYSCTL_CHILDREN(rack_counters), 1893 OID_AUTO, "allocs", CTLFLAG_RD, 1894 &rack_to_alloc, 1895 "Total allocations of tracking structures"); 1896 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1898 SYSCTL_CHILDREN(rack_counters), 1899 OID_AUTO, "allochard", CTLFLAG_RD, 1900 &rack_to_alloc_hard, 1901 "Total allocations done with sleeping the hard way"); 1902 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1903 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1904 SYSCTL_CHILDREN(rack_counters), 1905 OID_AUTO, "allocemerg", CTLFLAG_RD, 1906 &rack_to_alloc_emerg, 1907 "Total allocations done from emergency cache"); 1908 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1909 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1910 SYSCTL_CHILDREN(rack_counters), 1911 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1912 &rack_to_alloc_limited, 1913 "Total allocations dropped due to limit"); 1914 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1915 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1916 SYSCTL_CHILDREN(rack_counters), 1917 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1918 &rack_alloc_limited_conns, 1919 "Connections with allocations dropped due to limit"); 1920 rack_split_limited = counter_u64_alloc(M_WAITOK); 1921 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1922 SYSCTL_CHILDREN(rack_counters), 1923 OID_AUTO, "split_limited", CTLFLAG_RD, 1924 &rack_split_limited, 1925 "Split allocations dropped due to limit"); 1926 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1927 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1928 SYSCTL_CHILDREN(rack_counters), 1929 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1930 &rack_rxt_clamps_cwnd, 1931 "Number of times that excessive rxt clamped the cwnd down"); 1932 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1933 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1934 SYSCTL_CHILDREN(rack_counters), 1935 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1936 &rack_rxt_clamps_cwnd_uniq, 1937 "Number of connections that have had excessive rxt clamped the cwnd down"); 1938 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1939 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1940 SYSCTL_CHILDREN(rack_counters), 1941 OID_AUTO, "persist_sends", CTLFLAG_RD, 1942 &rack_persists_sends, 1943 "Number of times we sent a persist probe"); 1944 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1945 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1946 SYSCTL_CHILDREN(rack_counters), 1947 OID_AUTO, "persist_acks", CTLFLAG_RD, 1948 &rack_persists_acks, 1949 "Number of times a persist probe was acked"); 1950 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1951 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1952 SYSCTL_CHILDREN(rack_counters), 1953 OID_AUTO, "persist_loss", CTLFLAG_RD, 1954 &rack_persists_loss, 1955 "Number of times we detected a lost persist probe (no ack)"); 1956 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1957 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1958 SYSCTL_CHILDREN(rack_counters), 1959 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1960 &rack_persists_lost_ends, 1961 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1962 #ifdef INVARIANTS 1963 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1964 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1965 SYSCTL_CHILDREN(rack_counters), 1966 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1967 &rack_adjust_map_bw, 1968 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1969 #endif 1970 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1971 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1972 SYSCTL_CHILDREN(rack_counters), 1973 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1974 &rack_multi_single_eq, 1975 "Number of compressed acks total represented"); 1976 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1977 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1978 SYSCTL_CHILDREN(rack_counters), 1979 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1980 &rack_proc_non_comp_ack, 1981 "Number of non compresseds acks that we processed"); 1982 1983 1984 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1985 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1986 SYSCTL_CHILDREN(rack_counters), 1987 OID_AUTO, "sack_long", CTLFLAG_RD, 1988 &rack_sack_proc_all, 1989 "Total times we had to walk whole list for sack processing"); 1990 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1991 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1992 SYSCTL_CHILDREN(rack_counters), 1993 OID_AUTO, "sack_restart", CTLFLAG_RD, 1994 &rack_sack_proc_restart, 1995 "Total times we had to walk whole list due to a restart"); 1996 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1997 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1998 SYSCTL_CHILDREN(rack_counters), 1999 OID_AUTO, "sack_short", CTLFLAG_RD, 2000 &rack_sack_proc_short, 2001 "Total times we took shortcut for sack processing"); 2002 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 2003 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2004 SYSCTL_CHILDREN(rack_attack), 2005 OID_AUTO, "skipacked", CTLFLAG_RD, 2006 &rack_sack_skipped_acked, 2007 "Total number of times we skipped previously sacked"); 2008 rack_sack_splits = counter_u64_alloc(M_WAITOK); 2009 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2010 SYSCTL_CHILDREN(rack_attack), 2011 OID_AUTO, "ofsplit", CTLFLAG_RD, 2012 &rack_sack_splits, 2013 "Total number of times we did the old fashion tree split"); 2014 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 2015 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2016 SYSCTL_CHILDREN(rack_counters), 2017 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 2018 &rack_input_idle_reduces, 2019 "Total number of idle reductions on input"); 2020 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 2021 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2022 SYSCTL_CHILDREN(rack_counters), 2023 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 2024 &rack_collapsed_win_seen, 2025 "Total number of collapsed window events seen (where our window shrinks)"); 2026 2027 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 2028 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2029 SYSCTL_CHILDREN(rack_counters), 2030 OID_AUTO, "collapsed_win", CTLFLAG_RD, 2031 &rack_collapsed_win, 2032 "Total number of collapsed window events where we mark packets"); 2033 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 2034 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2035 SYSCTL_CHILDREN(rack_counters), 2036 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 2037 &rack_collapsed_win_rxt, 2038 "Total number of packets that were retransmitted"); 2039 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 2040 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2041 SYSCTL_CHILDREN(rack_counters), 2042 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 2043 &rack_collapsed_win_rxt_bytes, 2044 "Total number of bytes that were retransmitted"); 2045 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 2046 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 2047 SYSCTL_CHILDREN(rack_counters), 2048 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 2049 &rack_try_scwnd, 2050 "Total number of scwnd attempts"); 2051 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 2052 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 2053 OID_AUTO, "outsize", CTLFLAG_RD, 2054 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 2055 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 2056 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 2057 OID_AUTO, "opts", CTLFLAG_RD, 2058 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 2059 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 2060 SYSCTL_CHILDREN(rack_sysctl_root), 2061 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2062 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 2063 } 2064 2065 static uint32_t 2066 rc_init_window(struct tcp_rack *rack) 2067 { 2068 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 2069 2070 } 2071 2072 static uint64_t 2073 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 2074 { 2075 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 2076 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 2077 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2078 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 2079 else 2080 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 2081 } 2082 2083 static void 2084 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 2085 uint64_t data, uint8_t mod, uint16_t aux, 2086 struct tcp_sendfile_track *cur, int line) 2087 { 2088 #ifdef TCP_REQUEST_TRK 2089 int do_log = 0; 2090 2091 /* 2092 * The rate cap one is noisy and only should come out when normal BB logging 2093 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2094 * once per chunk and make up the BBpoint that can be turned on by the client. 2095 */ 2096 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2097 /* 2098 * The very noisy two need to only come out when 2099 * we have verbose logging on. 2100 */ 2101 if (rack_verbose_logging != 0) 2102 do_log = tcp_bblogging_on(rack->rc_tp); 2103 else 2104 do_log = 0; 2105 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2106 /* 2107 * All other less noisy logs here except the measure which 2108 * also needs to come out on the point and the log. 2109 */ 2110 do_log = tcp_bblogging_on(rack->rc_tp); 2111 } else { 2112 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2113 } 2114 2115 if (do_log) { 2116 union tcp_log_stackspecific log; 2117 struct timeval tv; 2118 uint64_t lt_bw; 2119 2120 /* Convert our ms to a microsecond */ 2121 memset(&log, 0, sizeof(log)); 2122 2123 log.u_bbr.cwnd_gain = line; 2124 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2125 log.u_bbr.rttProp = tim; 2126 log.u_bbr.bw_inuse = cbw; 2127 log.u_bbr.delRate = rack_get_gp_est(rack); 2128 lt_bw = rack_get_lt_bw(rack); 2129 log.u_bbr.flex1 = seq; 2130 log.u_bbr.pacing_gain = aux; 2131 /* lt_bw = < flex3 | flex2 > */ 2132 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2133 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2134 /* Record the last obtained us rtt in inflight */ 2135 if (cur == NULL) { 2136 /* Make sure we are looking at the right log if an overide comes in */ 2137 cur = rack->r_ctl.rc_last_sft; 2138 } 2139 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2140 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2141 else { 2142 /* Use the last known rtt i.e. the rack-rtt */ 2143 log.u_bbr.inflight = rack->rc_rack_rtt; 2144 } 2145 if (cur != NULL) { 2146 uint64_t off; 2147 2148 log.u_bbr.cur_del_rate = cur->deadline; 2149 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2150 /* start = < lost | pkt_epoch > */ 2151 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2152 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2153 log.u_bbr.flex6 = cur->start_seq; 2154 log.u_bbr.pkts_out = cur->end_seq; 2155 } else { 2156 /* start = < lost | pkt_epoch > */ 2157 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2158 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2159 /* end = < pkts_out | flex6 > */ 2160 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2161 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2162 } 2163 /* first_send = <lt_epoch | epoch> */ 2164 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2165 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2166 /* localtime = <delivered | applimited>*/ 2167 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2168 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2169 #ifdef TCP_REQUEST_TRK 2170 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2171 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2172 #endif 2173 log.u_bbr.inhpts = 1; 2174 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2175 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2176 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2177 } else { 2178 log.u_bbr.flex7 = 0xffff; 2179 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2180 } 2181 /* 2182 * Compose bbr_state to be a bit wise 0000ADHF 2183 * where A is the always_pace flag 2184 * where D is the dgp_on flag 2185 * where H is the hybrid_mode on flag 2186 * where F is the use_fixed_rate flag. 2187 */ 2188 log.u_bbr.bbr_state = rack->rc_always_pace; 2189 log.u_bbr.bbr_state <<= 1; 2190 log.u_bbr.bbr_state |= rack->dgp_on; 2191 log.u_bbr.bbr_state <<= 1; 2192 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2193 log.u_bbr.bbr_state <<= 1; 2194 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2195 log.u_bbr.flex8 = mod; 2196 tcp_log_event(rack->rc_tp, NULL, 2197 &rack->rc_inp->inp_socket->so_rcv, 2198 &rack->rc_inp->inp_socket->so_snd, 2199 TCP_HYBRID_PACING_LOG, 0, 2200 0, &log, false, NULL, __func__, __LINE__, &tv); 2201 2202 } 2203 #endif 2204 } 2205 2206 #ifdef TCP_REQUEST_TRK 2207 static void 2208 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2209 { 2210 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2211 union tcp_log_stackspecific log; 2212 struct timeval tv; 2213 uint64_t off; 2214 2215 /* Convert our ms to a microsecond */ 2216 memset(&log, 0, sizeof(log)); 2217 2218 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2219 log.u_bbr.delRate = cur->sent_at_fs; 2220 2221 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2222 /* 2223 * We did not get a new Rules Applied to set so 2224 * no overlapping send occured, this means the 2225 * current byte counts are correct. 2226 */ 2227 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2228 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2229 } else { 2230 /* 2231 * Overlapping send case, we switched to a new 2232 * send and did a rules applied. 2233 */ 2234 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2235 log.u_bbr.rttProp = cur->rxt_at_ls; 2236 } 2237 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2238 log.u_bbr.cwnd_gain = line; 2239 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2240 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2241 /* start = < flex1 | flex2 > */ 2242 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2243 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2244 /* end = < flex3 | flex4 > */ 2245 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2246 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2247 2248 /* localtime = <delivered | applimited>*/ 2249 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2250 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2251 /* client timestamp = <lt_epoch | epoch>*/ 2252 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2253 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2254 /* now set all the flags in */ 2255 log.u_bbr.pkts_out = cur->hybrid_flags; 2256 log.u_bbr.lost = cur->playout_ms; 2257 log.u_bbr.flex6 = cur->flags; 2258 /* 2259 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2260 * where a false retransmit occurred so first_send <-> lastsend may 2261 * include longer time then it actually took if we have a false rxt. 2262 */ 2263 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2264 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2265 /* 2266 * Compose bbr_state to be a bit wise 0000ADHF 2267 * where A is the always_pace flag 2268 * where D is the dgp_on flag 2269 * where H is the hybrid_mode on flag 2270 * where F is the use_fixed_rate flag. 2271 */ 2272 log.u_bbr.bbr_state = rack->rc_always_pace; 2273 log.u_bbr.bbr_state <<= 1; 2274 log.u_bbr.bbr_state |= rack->dgp_on; 2275 log.u_bbr.bbr_state <<= 1; 2276 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2277 log.u_bbr.bbr_state <<= 1; 2278 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2279 2280 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2281 tcp_log_event(rack->rc_tp, NULL, 2282 &rack->rc_inp->inp_socket->so_rcv, 2283 &rack->rc_inp->inp_socket->so_snd, 2284 TCP_HYBRID_PACING_LOG, 0, 2285 0, &log, false, NULL, __func__, __LINE__, &tv); 2286 } 2287 } 2288 #endif 2289 2290 static inline uint64_t 2291 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2292 { 2293 uint64_t ret_bw, ether; 2294 uint64_t u_segsiz; 2295 2296 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2297 if (rack->r_is_v6){ 2298 #ifdef INET6 2299 ether += sizeof(struct ip6_hdr); 2300 #endif 2301 ether += 14; /* eheader size 6+6+2 */ 2302 } else { 2303 #ifdef INET 2304 ether += sizeof(struct ip); 2305 #endif 2306 ether += 14; /* eheader size 6+6+2 */ 2307 } 2308 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2309 ret_bw = bw; 2310 ret_bw *= ether; 2311 ret_bw /= u_segsiz; 2312 return (ret_bw); 2313 } 2314 2315 static void 2316 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2317 { 2318 #ifdef TCP_REQUEST_TRK 2319 struct timeval tv; 2320 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2321 #endif 2322 2323 if (rack->r_ctl.bw_rate_cap == 0) 2324 return; 2325 #ifdef TCP_REQUEST_TRK 2326 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2327 (rack->r_ctl.rc_last_sft != NULL)) { 2328 /* 2329 * We have a dynamic cap. The original target 2330 * is in bw_rate_cap, but we need to look at 2331 * how long it is until we hit the deadline. 2332 */ 2333 struct tcp_sendfile_track *ent; 2334 2335 ent = rack->r_ctl.rc_last_sft; 2336 microuptime(&tv); 2337 timenow = tcp_tv_to_lusectick(&tv); 2338 if (timenow >= ent->deadline) { 2339 /* No time left we do DGP only */ 2340 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2341 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2342 rack->r_ctl.bw_rate_cap = 0; 2343 return; 2344 } 2345 /* We have the time */ 2346 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2347 if (timeleft < HPTS_MSEC_IN_SEC) { 2348 /* If there is less than a ms left just use DGPs rate */ 2349 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2350 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2351 rack->r_ctl.bw_rate_cap = 0; 2352 return; 2353 } 2354 /* 2355 * Now lets find the amount of data left to send. 2356 * 2357 * Now ideally we want to use the end_seq to figure out how much more 2358 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2359 */ 2360 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2361 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2362 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2363 else { 2364 /* TSNH, we should catch it at the send */ 2365 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2366 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2367 rack->r_ctl.bw_rate_cap = 0; 2368 return; 2369 } 2370 } else { 2371 /* 2372 * The hard way, figure out how much is gone and then 2373 * take that away from the total the client asked for 2374 * (thats off by tls overhead if this is tls). 2375 */ 2376 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2377 lengone = rack->rc_tp->snd_una - ent->start_seq; 2378 else 2379 lengone = 0; 2380 if (lengone < (ent->end - ent->start)) 2381 lenleft = (ent->end - ent->start) - lengone; 2382 else { 2383 /* TSNH, we should catch it at the send */ 2384 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2385 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2386 rack->r_ctl.bw_rate_cap = 0; 2387 return; 2388 } 2389 } 2390 if (lenleft == 0) { 2391 /* We have it all sent */ 2392 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2393 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2394 if (rack->r_ctl.bw_rate_cap) 2395 goto normal_ratecap; 2396 else 2397 return; 2398 } 2399 calcbw = lenleft * HPTS_USEC_IN_SEC; 2400 calcbw /= timeleft; 2401 /* Now we must compensate for IP/TCP overhead */ 2402 calcbw = rack_compensate_for_linerate(rack, calcbw); 2403 /* Update the bit rate cap */ 2404 rack->r_ctl.bw_rate_cap = calcbw; 2405 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2406 (rack_hybrid_allow_set_maxseg == 1) && 2407 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2408 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2409 uint32_t orig_max; 2410 2411 orig_max = rack->r_ctl.rc_pace_max_segs; 2412 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2413 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2414 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2415 } 2416 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2417 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2418 if ((calcbw > 0) && (*bw > calcbw)) { 2419 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2420 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2421 *capped = 1; 2422 *bw = calcbw; 2423 } 2424 return; 2425 } 2426 normal_ratecap: 2427 #endif 2428 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2429 #ifdef TCP_REQUEST_TRK 2430 if (rack->rc_hybrid_mode && 2431 rack->rc_catch_up && 2432 (rack->r_ctl.rc_last_sft != NULL) && 2433 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2434 (rack_hybrid_allow_set_maxseg == 1) && 2435 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2436 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2437 uint32_t orig_max; 2438 2439 orig_max = rack->r_ctl.rc_pace_max_segs; 2440 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2441 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2442 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2443 } 2444 #endif 2445 *capped = 1; 2446 *bw = rack->r_ctl.bw_rate_cap; 2447 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2448 *bw, 0, 0, 2449 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2450 } 2451 } 2452 2453 static uint64_t 2454 rack_get_gp_est(struct tcp_rack *rack) 2455 { 2456 uint64_t bw, lt_bw, ret_bw; 2457 2458 if (rack->rc_gp_filled == 0) { 2459 /* 2460 * We have yet no b/w measurement, 2461 * if we have a user set initial bw 2462 * return it. If we don't have that and 2463 * we have an srtt, use the tcp IW (10) to 2464 * calculate a fictional b/w over the SRTT 2465 * which is more or less a guess. Note 2466 * we don't use our IW from rack on purpose 2467 * so if we have like IW=30, we are not 2468 * calculating a "huge" b/w. 2469 */ 2470 uint64_t srtt; 2471 2472 if (rack->dis_lt_bw == 1) 2473 lt_bw = 0; 2474 else 2475 lt_bw = rack_get_lt_bw(rack); 2476 if (lt_bw) { 2477 /* 2478 * No goodput bw but a long-term b/w does exist 2479 * lets use that. 2480 */ 2481 ret_bw = lt_bw; 2482 goto compensate; 2483 } 2484 if (rack->r_ctl.init_rate) 2485 return (rack->r_ctl.init_rate); 2486 2487 /* Ok lets come up with the IW guess, if we have a srtt */ 2488 if (rack->rc_tp->t_srtt == 0) { 2489 /* 2490 * Go with old pacing method 2491 * i.e. burst mitigation only. 2492 */ 2493 return (0); 2494 } 2495 /* Ok lets get the initial TCP win (not racks) */ 2496 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2497 srtt = (uint64_t)rack->rc_tp->t_srtt; 2498 bw *= (uint64_t)USECS_IN_SECOND; 2499 bw /= srtt; 2500 ret_bw = bw; 2501 goto compensate; 2502 2503 } 2504 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2505 /* Averaging is done, we can return the value */ 2506 bw = rack->r_ctl.gp_bw; 2507 } else { 2508 /* Still doing initial average must calculate */ 2509 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2510 } 2511 if (rack->dis_lt_bw) { 2512 /* We are not using lt-bw */ 2513 ret_bw = bw; 2514 goto compensate; 2515 } 2516 lt_bw = rack_get_lt_bw(rack); 2517 if (lt_bw == 0) { 2518 /* If we don't have one then equate it to the gp_bw */ 2519 lt_bw = rack->r_ctl.gp_bw; 2520 } 2521 if (rack->use_lesser_lt_bw) { 2522 if (lt_bw < bw) 2523 ret_bw = lt_bw; 2524 else 2525 ret_bw = bw; 2526 } else { 2527 if (lt_bw > bw) 2528 ret_bw = lt_bw; 2529 else 2530 ret_bw = bw; 2531 } 2532 /* 2533 * Now lets compensate based on the TCP/IP overhead. Our 2534 * Goodput estimate does not include this so we must pace out 2535 * a bit faster since our pacing calculations do. The pacing 2536 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2537 * we are using to do this, so we do that here in the opposite 2538 * direction as well. This means that if we are tunneled and the 2539 * segsiz is say 1200 bytes we will get quite a boost, but its 2540 * compensated for in the pacing time the opposite way. 2541 */ 2542 compensate: 2543 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2544 return(ret_bw); 2545 } 2546 2547 2548 static uint64_t 2549 rack_get_bw(struct tcp_rack *rack) 2550 { 2551 uint64_t bw; 2552 2553 if (rack->use_fixed_rate) { 2554 /* Return the fixed pacing rate */ 2555 return (rack_get_fixed_pacing_bw(rack)); 2556 } 2557 bw = rack_get_gp_est(rack); 2558 return (bw); 2559 } 2560 2561 static uint16_t 2562 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2563 { 2564 if (rack->use_fixed_rate) { 2565 return (100); 2566 } else if (rack->in_probe_rtt && (rsm == NULL)) 2567 return (rack->r_ctl.rack_per_of_gp_probertt); 2568 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2569 rack->r_ctl.rack_per_of_gp_rec)) { 2570 if (rsm) { 2571 /* a retransmission always use the recovery rate */ 2572 return (rack->r_ctl.rack_per_of_gp_rec); 2573 } else if (rack->rack_rec_nonrxt_use_cr) { 2574 /* Directed to use the configured rate */ 2575 goto configured_rate; 2576 } else if (rack->rack_no_prr && 2577 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2578 /* No PRR, lets just use the b/w estimate only */ 2579 return (100); 2580 } else { 2581 /* 2582 * Here we may have a non-retransmit but we 2583 * have no overrides, so just use the recovery 2584 * rate (prr is in effect). 2585 */ 2586 return (rack->r_ctl.rack_per_of_gp_rec); 2587 } 2588 } 2589 configured_rate: 2590 /* For the configured rate we look at our cwnd vs the ssthresh */ 2591 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2592 return (rack->r_ctl.rack_per_of_gp_ss); 2593 else 2594 return (rack->r_ctl.rack_per_of_gp_ca); 2595 } 2596 2597 static void 2598 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2599 { 2600 /* 2601 * Types of logs (mod value) 2602 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2603 * 2 = a dsack round begins, persist is reset to 16. 2604 * 3 = a dsack round ends 2605 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2606 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2607 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2608 */ 2609 if (tcp_bblogging_on(rack->rc_tp)) { 2610 union tcp_log_stackspecific log; 2611 struct timeval tv; 2612 2613 memset(&log, 0, sizeof(log)); 2614 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2615 log.u_bbr.flex1 <<= 1; 2616 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2617 log.u_bbr.flex1 <<= 1; 2618 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2619 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2620 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2621 log.u_bbr.flex4 = flex4; 2622 log.u_bbr.flex5 = flex5; 2623 log.u_bbr.flex6 = flex6; 2624 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2625 log.u_bbr.flex8 = mod; 2626 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2627 log.u_bbr.epoch = rack->r_ctl.current_round; 2628 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2629 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2630 &rack->rc_inp->inp_socket->so_rcv, 2631 &rack->rc_inp->inp_socket->so_snd, 2632 RACK_DSACK_HANDLING, 0, 2633 0, &log, false, &tv); 2634 } 2635 } 2636 2637 static void 2638 rack_log_hdwr_pacing(struct tcp_rack *rack, 2639 uint64_t rate, uint64_t hw_rate, int line, 2640 int error, uint16_t mod) 2641 { 2642 if (tcp_bblogging_on(rack->rc_tp)) { 2643 union tcp_log_stackspecific log; 2644 struct timeval tv; 2645 const struct ifnet *ifp; 2646 2647 memset(&log, 0, sizeof(log)); 2648 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2649 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2650 if (rack->r_ctl.crte) { 2651 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2652 } else if (rack->rc_inp->inp_route.ro_nh && 2653 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2654 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2655 } else 2656 ifp = NULL; 2657 if (ifp) { 2658 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2659 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2660 } 2661 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2662 log.u_bbr.bw_inuse = rate; 2663 log.u_bbr.flex5 = line; 2664 log.u_bbr.flex6 = error; 2665 log.u_bbr.flex7 = mod; 2666 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2667 log.u_bbr.flex8 = rack->use_fixed_rate; 2668 log.u_bbr.flex8 <<= 1; 2669 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2670 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2671 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2672 if (rack->r_ctl.crte) 2673 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2674 else 2675 log.u_bbr.cur_del_rate = 0; 2676 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2677 log.u_bbr.epoch = rack->r_ctl.current_round; 2678 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2679 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2680 &rack->rc_inp->inp_socket->so_rcv, 2681 &rack->rc_inp->inp_socket->so_snd, 2682 BBR_LOG_HDWR_PACE, 0, 2683 0, &log, false, &tv); 2684 } 2685 } 2686 2687 static uint64_t 2688 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2689 { 2690 /* 2691 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2692 */ 2693 uint64_t bw_est, high_rate; 2694 uint64_t gain; 2695 2696 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2697 bw_est = bw * gain; 2698 bw_est /= (uint64_t)100; 2699 /* Never fall below the minimum (def 64kbps) */ 2700 if (bw_est < RACK_MIN_BW) 2701 bw_est = RACK_MIN_BW; 2702 if (rack->r_rack_hw_rate_caps) { 2703 /* Rate caps are in place */ 2704 if (rack->r_ctl.crte != NULL) { 2705 /* We have a hdwr rate already */ 2706 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2707 if (bw_est >= high_rate) { 2708 /* We are capping bw at the highest rate table entry */ 2709 if (rack_hw_rate_cap_per && 2710 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2711 rack->r_rack_hw_rate_caps = 0; 2712 goto done; 2713 } 2714 rack_log_hdwr_pacing(rack, 2715 bw_est, high_rate, __LINE__, 2716 0, 3); 2717 bw_est = high_rate; 2718 if (capped) 2719 *capped = 1; 2720 } 2721 } else if ((rack->rack_hdrw_pacing == 0) && 2722 (rack->rack_hdw_pace_ena) && 2723 (rack->rack_attempt_hdwr_pace == 0) && 2724 (rack->rc_inp->inp_route.ro_nh != NULL) && 2725 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2726 /* 2727 * Special case, we have not yet attempted hardware 2728 * pacing, and yet we may, when we do, find out if we are 2729 * above the highest rate. We need to know the maxbw for the interface 2730 * in question (if it supports ratelimiting). We get back 2731 * a 0, if the interface is not found in the RL lists. 2732 */ 2733 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2734 if (high_rate) { 2735 /* Yep, we have a rate is it above this rate? */ 2736 if (bw_est > high_rate) { 2737 bw_est = high_rate; 2738 if (capped) 2739 *capped = 1; 2740 } 2741 } 2742 } 2743 } 2744 done: 2745 return (bw_est); 2746 } 2747 2748 static void 2749 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2750 { 2751 if (tcp_bblogging_on(rack->rc_tp)) { 2752 union tcp_log_stackspecific log; 2753 struct timeval tv; 2754 2755 if (rack->sack_attack_disable > 0) 2756 goto log_anyway; 2757 if ((mod != 1) && (rack_verbose_logging == 0)) { 2758 /* 2759 * We get 3 values currently for mod 2760 * 1 - We are retransmitting and this tells the reason. 2761 * 2 - We are clearing a dup-ack count. 2762 * 3 - We are incrementing a dup-ack count. 2763 * 2764 * The clear/increment are only logged 2765 * if you have BBverbose on. 2766 */ 2767 return; 2768 } 2769 log_anyway: 2770 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2771 log.u_bbr.flex1 = tsused; 2772 log.u_bbr.flex2 = thresh; 2773 log.u_bbr.flex3 = rsm->r_flags; 2774 log.u_bbr.flex4 = rsm->r_dupack; 2775 log.u_bbr.flex5 = rsm->r_start; 2776 log.u_bbr.flex6 = rsm->r_end; 2777 log.u_bbr.flex8 = mod; 2778 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2779 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2780 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2781 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2782 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2783 log.u_bbr.pacing_gain = rack->r_must_retran; 2784 log.u_bbr.epoch = rack->r_ctl.current_round; 2785 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2786 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2787 &rack->rc_inp->inp_socket->so_rcv, 2788 &rack->rc_inp->inp_socket->so_snd, 2789 BBR_LOG_SETTINGS_CHG, 0, 2790 0, &log, false, &tv); 2791 } 2792 } 2793 2794 static void 2795 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2796 { 2797 if (tcp_bblogging_on(rack->rc_tp)) { 2798 union tcp_log_stackspecific log; 2799 struct timeval tv; 2800 2801 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2802 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2803 log.u_bbr.flex2 = to; 2804 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2805 log.u_bbr.flex4 = slot; 2806 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2807 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2808 log.u_bbr.flex7 = rack->rc_in_persist; 2809 log.u_bbr.flex8 = which; 2810 if (rack->rack_no_prr) 2811 log.u_bbr.pkts_out = 0; 2812 else 2813 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2814 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2815 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2816 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2817 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2818 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2819 log.u_bbr.pacing_gain = rack->r_must_retran; 2820 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2821 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2822 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2823 log.u_bbr.lost = rack_rto_min; 2824 log.u_bbr.epoch = rack->r_ctl.roundends; 2825 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2826 log.u_bbr.bw_inuse <<= 32; 2827 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2828 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2829 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2830 &rack->rc_inp->inp_socket->so_rcv, 2831 &rack->rc_inp->inp_socket->so_snd, 2832 BBR_LOG_TIMERSTAR, 0, 2833 0, &log, false, &tv); 2834 } 2835 } 2836 2837 static void 2838 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2839 { 2840 if (tcp_bblogging_on(rack->rc_tp)) { 2841 union tcp_log_stackspecific log; 2842 struct timeval tv; 2843 2844 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2845 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2846 log.u_bbr.flex8 = to_num; 2847 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2848 log.u_bbr.flex2 = rack->rc_rack_rtt; 2849 if (rsm == NULL) 2850 log.u_bbr.flex3 = 0; 2851 else 2852 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2853 if (rack->rack_no_prr) 2854 log.u_bbr.flex5 = 0; 2855 else 2856 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2857 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2858 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2859 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2860 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2861 log.u_bbr.pacing_gain = rack->r_must_retran; 2862 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2863 log.u_bbr.bw_inuse <<= 32; 2864 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2865 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2866 &rack->rc_inp->inp_socket->so_rcv, 2867 &rack->rc_inp->inp_socket->so_snd, 2868 BBR_LOG_RTO, 0, 2869 0, &log, false, &tv); 2870 } 2871 } 2872 2873 static void 2874 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2875 struct rack_sendmap *prev, 2876 struct rack_sendmap *rsm, 2877 struct rack_sendmap *next, 2878 int flag, uint32_t th_ack, int line) 2879 { 2880 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2881 union tcp_log_stackspecific log; 2882 struct timeval tv; 2883 2884 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2885 log.u_bbr.flex8 = flag; 2886 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2887 log.u_bbr.cur_del_rate = (uint64_t)prev; 2888 log.u_bbr.delRate = (uint64_t)rsm; 2889 log.u_bbr.rttProp = (uint64_t)next; 2890 log.u_bbr.flex7 = 0; 2891 if (prev) { 2892 log.u_bbr.flex1 = prev->r_start; 2893 log.u_bbr.flex2 = prev->r_end; 2894 log.u_bbr.flex7 |= 0x4; 2895 } 2896 if (rsm) { 2897 log.u_bbr.flex3 = rsm->r_start; 2898 log.u_bbr.flex4 = rsm->r_end; 2899 log.u_bbr.flex7 |= 0x2; 2900 } 2901 if (next) { 2902 log.u_bbr.flex5 = next->r_start; 2903 log.u_bbr.flex6 = next->r_end; 2904 log.u_bbr.flex7 |= 0x1; 2905 } 2906 log.u_bbr.applimited = line; 2907 log.u_bbr.pkts_out = th_ack; 2908 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2909 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2910 if (rack->rack_no_prr) 2911 log.u_bbr.lost = 0; 2912 else 2913 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2914 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2915 log.u_bbr.bw_inuse <<= 32; 2916 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2917 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2918 &rack->rc_inp->inp_socket->so_rcv, 2919 &rack->rc_inp->inp_socket->so_snd, 2920 TCP_LOG_MAPCHG, 0, 2921 0, &log, false, &tv); 2922 } 2923 } 2924 2925 static void 2926 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2927 struct rack_sendmap *rsm, int conf) 2928 { 2929 if (tcp_bblogging_on(tp)) { 2930 union tcp_log_stackspecific log; 2931 struct timeval tv; 2932 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2933 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2934 log.u_bbr.flex1 = t; 2935 log.u_bbr.flex2 = len; 2936 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2937 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2938 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2939 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2940 log.u_bbr.flex7 = conf; 2941 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2942 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2943 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2944 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2945 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2946 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2947 if (rsm) { 2948 log.u_bbr.pkt_epoch = rsm->r_start; 2949 log.u_bbr.lost = rsm->r_end; 2950 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2951 /* We loose any upper of the 24 bits */ 2952 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2953 } else { 2954 /* Its a SYN */ 2955 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2956 log.u_bbr.lost = 0; 2957 log.u_bbr.cwnd_gain = 0; 2958 log.u_bbr.pacing_gain = 0; 2959 } 2960 /* Write out general bits of interest rrs here */ 2961 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2962 log.u_bbr.use_lt_bw <<= 1; 2963 log.u_bbr.use_lt_bw |= rack->forced_ack; 2964 log.u_bbr.use_lt_bw <<= 1; 2965 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2966 log.u_bbr.use_lt_bw <<= 1; 2967 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2968 log.u_bbr.use_lt_bw <<= 1; 2969 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2970 log.u_bbr.use_lt_bw <<= 1; 2971 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2972 log.u_bbr.use_lt_bw <<= 1; 2973 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2974 log.u_bbr.use_lt_bw <<= 1; 2975 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2976 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2977 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2978 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2979 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2980 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2981 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2982 log.u_bbr.bw_inuse <<= 32; 2983 if (rsm) 2984 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2985 TCP_LOG_EVENTP(tp, NULL, 2986 &rack->rc_inp->inp_socket->so_rcv, 2987 &rack->rc_inp->inp_socket->so_snd, 2988 BBR_LOG_BBRRTT, 0, 2989 0, &log, false, &tv); 2990 2991 2992 } 2993 } 2994 2995 static void 2996 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2997 { 2998 /* 2999 * Log the rtt sample we are 3000 * applying to the srtt algorithm in 3001 * useconds. 3002 */ 3003 if (tcp_bblogging_on(rack->rc_tp)) { 3004 union tcp_log_stackspecific log; 3005 struct timeval tv; 3006 3007 /* Convert our ms to a microsecond */ 3008 memset(&log, 0, sizeof(log)); 3009 log.u_bbr.flex1 = rtt; 3010 log.u_bbr.flex2 = rack->r_ctl.ack_count; 3011 log.u_bbr.flex3 = rack->r_ctl.sack_count; 3012 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 3013 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 3014 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3015 log.u_bbr.flex7 = 1; 3016 log.u_bbr.flex8 = rack->sack_attack_disable; 3017 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3018 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3019 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3020 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3021 log.u_bbr.pacing_gain = rack->r_must_retran; 3022 /* 3023 * We capture in delRate the upper 32 bits as 3024 * the confidence level we had declared, and the 3025 * lower 32 bits as the actual RTT using the arrival 3026 * timestamp. 3027 */ 3028 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 3029 log.u_bbr.delRate <<= 32; 3030 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 3031 /* Lets capture all the things that make up t_rtxcur */ 3032 log.u_bbr.applimited = rack_rto_min; 3033 log.u_bbr.epoch = rack_rto_max; 3034 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 3035 log.u_bbr.lost = rack_rto_min; 3036 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 3037 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 3038 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 3039 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 3040 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 3041 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3042 &rack->rc_inp->inp_socket->so_rcv, 3043 &rack->rc_inp->inp_socket->so_snd, 3044 TCP_LOG_RTT, 0, 3045 0, &log, false, &tv); 3046 } 3047 } 3048 3049 static void 3050 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 3051 { 3052 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3053 union tcp_log_stackspecific log; 3054 struct timeval tv; 3055 3056 /* Convert our ms to a microsecond */ 3057 memset(&log, 0, sizeof(log)); 3058 log.u_bbr.flex1 = rtt; 3059 log.u_bbr.flex2 = send_time; 3060 log.u_bbr.flex3 = ack_time; 3061 log.u_bbr.flex4 = where; 3062 log.u_bbr.flex7 = 2; 3063 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3064 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3065 log.u_bbr.bw_inuse <<= 32; 3066 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3067 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3068 &rack->rc_inp->inp_socket->so_rcv, 3069 &rack->rc_inp->inp_socket->so_snd, 3070 TCP_LOG_RTT, 0, 3071 0, &log, false, &tv); 3072 } 3073 } 3074 3075 3076 static void 3077 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 3078 { 3079 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3080 union tcp_log_stackspecific log; 3081 struct timeval tv; 3082 3083 /* Convert our ms to a microsecond */ 3084 memset(&log, 0, sizeof(log)); 3085 log.u_bbr.flex1 = idx; 3086 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 3087 log.u_bbr.flex3 = tsecho; 3088 log.u_bbr.flex7 = 3; 3089 log.u_bbr.rttProp = tsv; 3090 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3091 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3092 log.u_bbr.bw_inuse <<= 32; 3093 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3094 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3095 &rack->rc_inp->inp_socket->so_rcv, 3096 &rack->rc_inp->inp_socket->so_snd, 3097 TCP_LOG_RTT, 0, 3098 0, &log, false, &tv); 3099 } 3100 } 3101 3102 3103 static inline void 3104 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3105 { 3106 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3107 union tcp_log_stackspecific log; 3108 struct timeval tv; 3109 3110 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3111 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3112 log.u_bbr.flex1 = line; 3113 log.u_bbr.flex2 = tick; 3114 log.u_bbr.flex3 = tp->t_maxunacktime; 3115 log.u_bbr.flex4 = tp->t_acktime; 3116 log.u_bbr.flex8 = event; 3117 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3118 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3119 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3120 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3121 log.u_bbr.pacing_gain = rack->r_must_retran; 3122 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3123 log.u_bbr.bw_inuse <<= 32; 3124 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3125 TCP_LOG_EVENTP(tp, NULL, 3126 &rack->rc_inp->inp_socket->so_rcv, 3127 &rack->rc_inp->inp_socket->so_snd, 3128 BBR_LOG_PROGRESS, 0, 3129 0, &log, false, &tv); 3130 } 3131 } 3132 3133 static void 3134 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 3135 { 3136 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3137 union tcp_log_stackspecific log; 3138 3139 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3140 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3141 log.u_bbr.flex1 = slot; 3142 if (rack->rack_no_prr) 3143 log.u_bbr.flex2 = 0; 3144 else 3145 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3146 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3147 log.u_bbr.flex5 = rack->r_ctl.ack_during_sd; 3148 log.u_bbr.flex6 = line; 3149 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3150 log.u_bbr.flex8 = rack->rc_in_persist; 3151 log.u_bbr.timeStamp = cts; 3152 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3153 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3154 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3155 log.u_bbr.pacing_gain = rack->r_must_retran; 3156 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3157 &rack->rc_inp->inp_socket->so_rcv, 3158 &rack->rc_inp->inp_socket->so_snd, 3159 BBR_LOG_BBRSND, 0, 3160 0, &log, false, tv); 3161 } 3162 } 3163 3164 static void 3165 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3166 { 3167 if (tcp_bblogging_on(rack->rc_tp)) { 3168 union tcp_log_stackspecific log; 3169 struct timeval tv; 3170 3171 memset(&log, 0, sizeof(log)); 3172 log.u_bbr.flex1 = did_out; 3173 log.u_bbr.flex2 = nxt_pkt; 3174 log.u_bbr.flex3 = way_out; 3175 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3176 if (rack->rack_no_prr) 3177 log.u_bbr.flex5 = 0; 3178 else 3179 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3180 log.u_bbr.flex6 = nsegs; 3181 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3182 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3183 log.u_bbr.flex7 <<= 1; 3184 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3185 log.u_bbr.flex7 <<= 1; 3186 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3187 log.u_bbr.flex8 = rack->rc_in_persist; 3188 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3189 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3190 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3191 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3192 log.u_bbr.use_lt_bw <<= 1; 3193 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3194 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3195 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3196 log.u_bbr.pacing_gain = rack->r_must_retran; 3197 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3198 log.u_bbr.bw_inuse <<= 32; 3199 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3200 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3201 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3202 log.u_bbr.lost = rack->rc_tp->t_srtt; 3203 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3204 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3205 &rack->rc_inp->inp_socket->so_rcv, 3206 &rack->rc_inp->inp_socket->so_snd, 3207 BBR_LOG_DOSEG_DONE, 0, 3208 0, &log, false, &tv); 3209 } 3210 } 3211 3212 static void 3213 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3214 { 3215 if (tcp_bblogging_on(rack->rc_tp)) { 3216 union tcp_log_stackspecific log; 3217 struct timeval tv; 3218 3219 memset(&log, 0, sizeof(log)); 3220 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3221 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3222 log.u_bbr.flex4 = arg1; 3223 log.u_bbr.flex5 = arg2; 3224 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3225 log.u_bbr.flex6 = arg3; 3226 log.u_bbr.flex8 = frm; 3227 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3228 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3229 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3230 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3231 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3232 log.u_bbr.pacing_gain = rack->r_must_retran; 3233 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3234 &tptosocket(tp)->so_snd, 3235 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3236 } 3237 } 3238 3239 static void 3240 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3241 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3242 { 3243 if (tcp_bblogging_on(rack->rc_tp)) { 3244 union tcp_log_stackspecific log; 3245 struct timeval tv; 3246 3247 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3248 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3249 log.u_bbr.flex1 = slot; 3250 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3251 log.u_bbr.flex4 = reason; 3252 if (rack->rack_no_prr) 3253 log.u_bbr.flex5 = 0; 3254 else 3255 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3256 log.u_bbr.flex7 = hpts_calling; 3257 log.u_bbr.flex8 = rack->rc_in_persist; 3258 log.u_bbr.lt_epoch = cwnd_to_use; 3259 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3260 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3261 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3262 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3263 log.u_bbr.pacing_gain = rack->r_must_retran; 3264 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3265 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3266 log.u_bbr.bw_inuse <<= 32; 3267 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3268 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3269 &rack->rc_inp->inp_socket->so_rcv, 3270 &rack->rc_inp->inp_socket->so_snd, 3271 BBR_LOG_JUSTRET, 0, 3272 tlen, &log, false, &tv); 3273 } 3274 } 3275 3276 static void 3277 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3278 struct timeval *tv, uint32_t flags_on_entry) 3279 { 3280 if (tcp_bblogging_on(rack->rc_tp)) { 3281 union tcp_log_stackspecific log; 3282 3283 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3284 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3285 log.u_bbr.flex1 = line; 3286 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3287 log.u_bbr.flex3 = flags_on_entry; 3288 log.u_bbr.flex4 = us_cts; 3289 if (rack->rack_no_prr) 3290 log.u_bbr.flex5 = 0; 3291 else 3292 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3293 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3294 log.u_bbr.flex7 = hpts_removed; 3295 log.u_bbr.flex8 = 1; 3296 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3297 log.u_bbr.timeStamp = us_cts; 3298 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3299 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3300 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3301 log.u_bbr.pacing_gain = rack->r_must_retran; 3302 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3303 log.u_bbr.bw_inuse <<= 32; 3304 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3305 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3306 &rack->rc_inp->inp_socket->so_rcv, 3307 &rack->rc_inp->inp_socket->so_snd, 3308 BBR_LOG_TIMERCANC, 0, 3309 0, &log, false, tv); 3310 } 3311 } 3312 3313 static void 3314 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3315 uint32_t flex1, uint32_t flex2, 3316 uint32_t flex3, uint32_t flex4, 3317 uint32_t flex5, uint32_t flex6, 3318 uint16_t flex7, uint8_t mod) 3319 { 3320 if (tcp_bblogging_on(rack->rc_tp)) { 3321 union tcp_log_stackspecific log; 3322 struct timeval tv; 3323 3324 if (mod == 1) { 3325 /* No you can't use 1, its for the real to cancel */ 3326 return; 3327 } 3328 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3329 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3330 log.u_bbr.flex1 = flex1; 3331 log.u_bbr.flex2 = flex2; 3332 log.u_bbr.flex3 = flex3; 3333 log.u_bbr.flex4 = flex4; 3334 log.u_bbr.flex5 = flex5; 3335 log.u_bbr.flex6 = flex6; 3336 log.u_bbr.flex7 = flex7; 3337 log.u_bbr.flex8 = mod; 3338 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3339 &rack->rc_inp->inp_socket->so_rcv, 3340 &rack->rc_inp->inp_socket->so_snd, 3341 BBR_LOG_TIMERCANC, 0, 3342 0, &log, false, &tv); 3343 } 3344 } 3345 3346 static void 3347 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3348 { 3349 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3350 union tcp_log_stackspecific log; 3351 struct timeval tv; 3352 3353 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3354 log.u_bbr.flex1 = timers; 3355 log.u_bbr.flex2 = ret; 3356 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3357 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3358 log.u_bbr.flex5 = cts; 3359 if (rack->rack_no_prr) 3360 log.u_bbr.flex6 = 0; 3361 else 3362 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3363 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3364 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3365 log.u_bbr.pacing_gain = rack->r_must_retran; 3366 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3367 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3368 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3369 &rack->rc_inp->inp_socket->so_rcv, 3370 &rack->rc_inp->inp_socket->so_snd, 3371 BBR_LOG_TO_PROCESS, 0, 3372 0, &log, false, &tv); 3373 } 3374 } 3375 3376 static void 3377 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3378 { 3379 if (tcp_bblogging_on(rack->rc_tp)) { 3380 union tcp_log_stackspecific log; 3381 struct timeval tv; 3382 3383 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3384 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3385 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3386 if (rack->rack_no_prr) 3387 log.u_bbr.flex3 = 0; 3388 else 3389 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3390 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3391 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3392 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3393 log.u_bbr.flex7 = line; 3394 log.u_bbr.flex8 = frm; 3395 log.u_bbr.pkts_out = orig_cwnd; 3396 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3397 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3398 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3399 log.u_bbr.use_lt_bw <<= 1; 3400 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3401 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3402 &rack->rc_inp->inp_socket->so_rcv, 3403 &rack->rc_inp->inp_socket->so_snd, 3404 BBR_LOG_BBRUPD, 0, 3405 0, &log, false, &tv); 3406 } 3407 } 3408 3409 #ifdef TCP_SAD_DETECTION 3410 static void 3411 rack_log_sad(struct tcp_rack *rack, int event) 3412 { 3413 if (tcp_bblogging_on(rack->rc_tp)) { 3414 union tcp_log_stackspecific log; 3415 struct timeval tv; 3416 3417 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3418 log.u_bbr.flex1 = rack->r_ctl.sack_count; 3419 log.u_bbr.flex2 = rack->r_ctl.ack_count; 3420 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 3421 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 3422 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 3423 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 3424 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 3425 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 3426 log.u_bbr.lt_epoch |= rack->do_detection; 3427 log.u_bbr.applimited = tcp_map_minimum; 3428 log.u_bbr.flex7 = rack->sack_attack_disable; 3429 log.u_bbr.flex8 = event; 3430 log.u_bbr.bbr_state = rack->rc_suspicious; 3431 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3432 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3433 log.u_bbr.delivered = tcp_sad_decay_val; 3434 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3435 &rack->rc_inp->inp_socket->so_rcv, 3436 &rack->rc_inp->inp_socket->so_snd, 3437 TCP_SAD_DETECT, 0, 3438 0, &log, false, &tv); 3439 } 3440 } 3441 #endif 3442 3443 static void 3444 rack_counter_destroy(void) 3445 { 3446 counter_u64_free(rack_total_bytes); 3447 counter_u64_free(rack_fto_send); 3448 counter_u64_free(rack_fto_rsm_send); 3449 counter_u64_free(rack_nfto_resend); 3450 counter_u64_free(rack_hw_pace_init_fail); 3451 counter_u64_free(rack_hw_pace_lost); 3452 counter_u64_free(rack_non_fto_send); 3453 counter_u64_free(rack_extended_rfo); 3454 counter_u64_free(rack_ack_total); 3455 counter_u64_free(rack_express_sack); 3456 counter_u64_free(rack_sack_total); 3457 counter_u64_free(rack_move_none); 3458 counter_u64_free(rack_move_some); 3459 counter_u64_free(rack_sack_attacks_detected); 3460 counter_u64_free(rack_sack_attacks_reversed); 3461 counter_u64_free(rack_sack_attacks_suspect); 3462 counter_u64_free(rack_sack_used_next_merge); 3463 counter_u64_free(rack_sack_used_prev_merge); 3464 counter_u64_free(rack_tlp_tot); 3465 counter_u64_free(rack_tlp_newdata); 3466 counter_u64_free(rack_tlp_retran); 3467 counter_u64_free(rack_tlp_retran_bytes); 3468 counter_u64_free(rack_to_tot); 3469 counter_u64_free(rack_saw_enobuf); 3470 counter_u64_free(rack_saw_enobuf_hw); 3471 counter_u64_free(rack_saw_enetunreach); 3472 counter_u64_free(rack_hot_alloc); 3473 counter_u64_free(tcp_policer_detected); 3474 counter_u64_free(rack_to_alloc); 3475 counter_u64_free(rack_to_alloc_hard); 3476 counter_u64_free(rack_to_alloc_emerg); 3477 counter_u64_free(rack_to_alloc_limited); 3478 counter_u64_free(rack_alloc_limited_conns); 3479 counter_u64_free(rack_split_limited); 3480 counter_u64_free(rack_multi_single_eq); 3481 counter_u64_free(rack_rxt_clamps_cwnd); 3482 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3483 counter_u64_free(rack_proc_non_comp_ack); 3484 counter_u64_free(rack_sack_proc_all); 3485 counter_u64_free(rack_sack_proc_restart); 3486 counter_u64_free(rack_sack_proc_short); 3487 counter_u64_free(rack_sack_skipped_acked); 3488 counter_u64_free(rack_sack_splits); 3489 counter_u64_free(rack_input_idle_reduces); 3490 counter_u64_free(rack_collapsed_win); 3491 counter_u64_free(rack_collapsed_win_rxt); 3492 counter_u64_free(rack_collapsed_win_rxt_bytes); 3493 counter_u64_free(rack_collapsed_win_seen); 3494 counter_u64_free(rack_try_scwnd); 3495 counter_u64_free(rack_persists_sends); 3496 counter_u64_free(rack_persists_acks); 3497 counter_u64_free(rack_persists_loss); 3498 counter_u64_free(rack_persists_lost_ends); 3499 #ifdef INVARIANTS 3500 counter_u64_free(rack_adjust_map_bw); 3501 #endif 3502 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3503 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3504 } 3505 3506 static struct rack_sendmap * 3507 rack_alloc(struct tcp_rack *rack) 3508 { 3509 struct rack_sendmap *rsm; 3510 3511 /* 3512 * First get the top of the list it in 3513 * theory is the "hottest" rsm we have, 3514 * possibly just freed by ack processing. 3515 */ 3516 if (rack->rc_free_cnt > rack_free_cache) { 3517 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3518 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3519 counter_u64_add(rack_hot_alloc, 1); 3520 rack->rc_free_cnt--; 3521 return (rsm); 3522 } 3523 /* 3524 * Once we get under our free cache we probably 3525 * no longer have a "hot" one available. Lets 3526 * get one from UMA. 3527 */ 3528 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3529 if (rsm) { 3530 rack->r_ctl.rc_num_maps_alloced++; 3531 counter_u64_add(rack_to_alloc, 1); 3532 return (rsm); 3533 } 3534 /* 3535 * Dig in to our aux rsm's (the last two) since 3536 * UMA failed to get us one. 3537 */ 3538 if (rack->rc_free_cnt) { 3539 counter_u64_add(rack_to_alloc_emerg, 1); 3540 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3541 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3542 rack->rc_free_cnt--; 3543 return (rsm); 3544 } 3545 return (NULL); 3546 } 3547 3548 static struct rack_sendmap * 3549 rack_alloc_full_limit(struct tcp_rack *rack) 3550 { 3551 if ((V_tcp_map_entries_limit > 0) && 3552 (rack->do_detection == 0) && 3553 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3554 counter_u64_add(rack_to_alloc_limited, 1); 3555 if (!rack->alloc_limit_reported) { 3556 rack->alloc_limit_reported = 1; 3557 counter_u64_add(rack_alloc_limited_conns, 1); 3558 } 3559 return (NULL); 3560 } 3561 return (rack_alloc(rack)); 3562 } 3563 3564 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3565 static struct rack_sendmap * 3566 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3567 { 3568 struct rack_sendmap *rsm; 3569 3570 if (limit_type) { 3571 /* currently there is only one limit type */ 3572 if (rack->r_ctl.rc_split_limit > 0 && 3573 (rack->do_detection == 0) && 3574 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3575 counter_u64_add(rack_split_limited, 1); 3576 if (!rack->alloc_limit_reported) { 3577 rack->alloc_limit_reported = 1; 3578 counter_u64_add(rack_alloc_limited_conns, 1); 3579 } 3580 return (NULL); 3581 #ifdef TCP_SAD_DETECTION 3582 } else if ((tcp_sad_limit != 0) && 3583 (rack->do_detection == 1) && 3584 (rack->r_ctl.rc_num_split_allocs >= tcp_sad_limit)) { 3585 counter_u64_add(rack_split_limited, 1); 3586 if (!rack->alloc_limit_reported) { 3587 rack->alloc_limit_reported = 1; 3588 counter_u64_add(rack_alloc_limited_conns, 1); 3589 } 3590 return (NULL); 3591 #endif 3592 } 3593 } 3594 3595 /* allocate and mark in the limit type, if set */ 3596 rsm = rack_alloc(rack); 3597 if (rsm != NULL && limit_type) { 3598 rsm->r_limit_type = limit_type; 3599 rack->r_ctl.rc_num_split_allocs++; 3600 } 3601 return (rsm); 3602 } 3603 3604 static void 3605 rack_free_trim(struct tcp_rack *rack) 3606 { 3607 struct rack_sendmap *rsm; 3608 3609 /* 3610 * Free up all the tail entries until 3611 * we get our list down to the limit. 3612 */ 3613 while (rack->rc_free_cnt > rack_free_cache) { 3614 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3615 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3616 rack->rc_free_cnt--; 3617 rack->r_ctl.rc_num_maps_alloced--; 3618 uma_zfree(rack_zone, rsm); 3619 } 3620 } 3621 3622 static void 3623 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3624 { 3625 if (rsm->r_flags & RACK_APP_LIMITED) { 3626 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3627 rack->r_ctl.rc_app_limited_cnt--; 3628 } 3629 } 3630 if (rsm->r_limit_type) { 3631 /* currently there is only one limit type */ 3632 rack->r_ctl.rc_num_split_allocs--; 3633 } 3634 if (rsm == rack->r_ctl.rc_first_appl) { 3635 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start); 3636 rack->r_ctl.cleared_app_ack = 1; 3637 if (rack->r_ctl.rc_app_limited_cnt == 0) 3638 rack->r_ctl.rc_first_appl = NULL; 3639 else 3640 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3641 } 3642 if (rsm == rack->r_ctl.rc_resend) 3643 rack->r_ctl.rc_resend = NULL; 3644 if (rsm == rack->r_ctl.rc_end_appl) 3645 rack->r_ctl.rc_end_appl = NULL; 3646 if (rack->r_ctl.rc_tlpsend == rsm) 3647 rack->r_ctl.rc_tlpsend = NULL; 3648 if (rack->r_ctl.rc_sacklast == rsm) 3649 rack->r_ctl.rc_sacklast = NULL; 3650 memset(rsm, 0, sizeof(struct rack_sendmap)); 3651 /* Make sure we are not going to overrun our count limit of 0xff */ 3652 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3653 rack_free_trim(rack); 3654 } 3655 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3656 rack->rc_free_cnt++; 3657 } 3658 3659 static uint32_t 3660 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3661 { 3662 uint64_t srtt, bw, len, tim; 3663 uint32_t segsiz, def_len, minl; 3664 3665 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3666 def_len = rack_def_data_window * segsiz; 3667 if (rack->rc_gp_filled == 0) { 3668 /* 3669 * We have no measurement (IW is in flight?) so 3670 * we can only guess using our data_window sysctl 3671 * value (usually 20MSS). 3672 */ 3673 return (def_len); 3674 } 3675 /* 3676 * Now we have a number of factors to consider. 3677 * 3678 * 1) We have a desired BDP which is usually 3679 * at least 2. 3680 * 2) We have a minimum number of rtt's usually 1 SRTT 3681 * but we allow it too to be more. 3682 * 3) We want to make sure a measurement last N useconds (if 3683 * we have set rack_min_measure_usec. 3684 * 3685 * We handle the first concern here by trying to create a data 3686 * window of max(rack_def_data_window, DesiredBDP). The 3687 * second concern we handle in not letting the measurement 3688 * window end normally until at least the required SRTT's 3689 * have gone by which is done further below in 3690 * rack_enough_for_measurement(). Finally the third concern 3691 * we also handle here by calculating how long that time 3692 * would take at the current BW and then return the 3693 * max of our first calculation and that length. Note 3694 * that if rack_min_measure_usec is 0, we don't deal 3695 * with concern 3. Also for both Concern 1 and 3 an 3696 * application limited period could end the measurement 3697 * earlier. 3698 * 3699 * So lets calculate the BDP with the "known" b/w using 3700 * the SRTT has our rtt and then multiply it by the 3701 * goal. 3702 */ 3703 bw = rack_get_bw(rack); 3704 srtt = (uint64_t)tp->t_srtt; 3705 len = bw * srtt; 3706 len /= (uint64_t)HPTS_USEC_IN_SEC; 3707 len *= max(1, rack_goal_bdp); 3708 /* Now we need to round up to the nearest MSS */ 3709 len = roundup(len, segsiz); 3710 if (rack_min_measure_usec) { 3711 /* Now calculate our min length for this b/w */ 3712 tim = rack_min_measure_usec; 3713 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3714 if (minl == 0) 3715 minl = 1; 3716 minl = roundup(minl, segsiz); 3717 if (len < minl) 3718 len = minl; 3719 } 3720 /* 3721 * Now if we have a very small window we want 3722 * to attempt to get the window that is 3723 * as small as possible. This happens on 3724 * low b/w connections and we don't want to 3725 * span huge numbers of rtt's between measurements. 3726 * 3727 * We basically include 2 over our "MIN window" so 3728 * that the measurement can be shortened (possibly) by 3729 * an ack'ed packet. 3730 */ 3731 if (len < def_len) 3732 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3733 else 3734 return (max((uint32_t)len, def_len)); 3735 3736 } 3737 3738 static int 3739 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3740 { 3741 uint32_t tim, srtts, segsiz; 3742 3743 /* 3744 * Has enough time passed for the GP measurement to be valid? 3745 */ 3746 if (SEQ_LT(th_ack, tp->gput_seq)) { 3747 /* Not enough bytes yet */ 3748 return (0); 3749 } 3750 if ((tp->snd_max == tp->snd_una) || 3751 (th_ack == tp->snd_max)){ 3752 /* 3753 * All is acked quality of all acked is 3754 * usually low or medium, but we in theory could split 3755 * all acked into two cases, where you got 3756 * a signifigant amount of your window and 3757 * where you did not. For now we leave it 3758 * but it is something to contemplate in the 3759 * future. The danger here is that delayed ack 3760 * is effecting the last byte (which is a 50:50 chance). 3761 */ 3762 *quality = RACK_QUALITY_ALLACKED; 3763 return (1); 3764 } 3765 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3766 /* 3767 * We obtained our entire window of data we wanted 3768 * no matter if we are in recovery or not then 3769 * its ok since expanding the window does not 3770 * make things fuzzy (or at least not as much). 3771 */ 3772 *quality = RACK_QUALITY_HIGH; 3773 return (1); 3774 } 3775 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3776 if (SEQ_LT(th_ack, tp->gput_ack) && 3777 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3778 /* Not enough bytes yet */ 3779 return (0); 3780 } 3781 if (rack->r_ctl.rc_first_appl && 3782 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3783 /* 3784 * We are up to the app limited send point 3785 * we have to measure irrespective of the time.. 3786 */ 3787 *quality = RACK_QUALITY_APPLIMITED; 3788 return (1); 3789 } 3790 /* Now what about time? */ 3791 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3792 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3793 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3794 /* 3795 * We do not allow a measurement if we are in recovery 3796 * that would shrink the goodput window we wanted. 3797 * This is to prevent cloudyness of when the last send 3798 * was actually made. 3799 */ 3800 *quality = RACK_QUALITY_HIGH; 3801 return (1); 3802 } 3803 /* Nope not even a full SRTT has passed */ 3804 return (0); 3805 } 3806 3807 static void 3808 rack_log_timely(struct tcp_rack *rack, 3809 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3810 uint64_t up_bnd, int line, uint8_t method) 3811 { 3812 if (tcp_bblogging_on(rack->rc_tp)) { 3813 union tcp_log_stackspecific log; 3814 struct timeval tv; 3815 3816 memset(&log, 0, sizeof(log)); 3817 log.u_bbr.flex1 = logged; 3818 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3819 log.u_bbr.flex2 <<= 4; 3820 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3821 log.u_bbr.flex2 <<= 4; 3822 log.u_bbr.flex2 |= rack->rc_gp_incr; 3823 log.u_bbr.flex2 <<= 4; 3824 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3825 log.u_bbr.flex3 = rack->rc_gp_incr; 3826 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3827 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3828 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3829 log.u_bbr.flex7 = rack->rc_gp_bwred; 3830 log.u_bbr.flex8 = method; 3831 log.u_bbr.cur_del_rate = cur_bw; 3832 log.u_bbr.delRate = low_bnd; 3833 log.u_bbr.bw_inuse = up_bnd; 3834 log.u_bbr.rttProp = rack_get_bw(rack); 3835 log.u_bbr.pkt_epoch = line; 3836 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3837 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3838 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3839 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3840 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3841 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3842 log.u_bbr.cwnd_gain <<= 1; 3843 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3844 log.u_bbr.cwnd_gain <<= 1; 3845 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3846 log.u_bbr.cwnd_gain <<= 1; 3847 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3848 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3849 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3850 &rack->rc_inp->inp_socket->so_rcv, 3851 &rack->rc_inp->inp_socket->so_snd, 3852 TCP_TIMELY_WORK, 0, 3853 0, &log, false, &tv); 3854 } 3855 } 3856 3857 static int 3858 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3859 { 3860 /* 3861 * Before we increase we need to know if 3862 * the estimate just made was less than 3863 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3864 * 3865 * If we already are pacing at a fast enough 3866 * rate to push us faster there is no sense of 3867 * increasing. 3868 * 3869 * We first caculate our actual pacing rate (ss or ca multiplier 3870 * times our cur_bw). 3871 * 3872 * Then we take the last measured rate and multipy by our 3873 * maximum pacing overage to give us a max allowable rate. 3874 * 3875 * If our act_rate is smaller than our max_allowable rate 3876 * then we should increase. Else we should hold steady. 3877 * 3878 */ 3879 uint64_t act_rate, max_allow_rate; 3880 3881 if (rack_timely_no_stopping) 3882 return (1); 3883 3884 if ((cur_bw == 0) || (last_bw_est == 0)) { 3885 /* 3886 * Initial startup case or 3887 * everything is acked case. 3888 */ 3889 rack_log_timely(rack, mult, cur_bw, 0, 0, 3890 __LINE__, 9); 3891 return (1); 3892 } 3893 if (mult <= 100) { 3894 /* 3895 * We can always pace at or slightly above our rate. 3896 */ 3897 rack_log_timely(rack, mult, cur_bw, 0, 0, 3898 __LINE__, 9); 3899 return (1); 3900 } 3901 act_rate = cur_bw * (uint64_t)mult; 3902 act_rate /= 100; 3903 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3904 max_allow_rate /= 100; 3905 if (act_rate < max_allow_rate) { 3906 /* 3907 * Here the rate we are actually pacing at 3908 * is smaller than 10% above our last measurement. 3909 * This means we are pacing below what we would 3910 * like to try to achieve (plus some wiggle room). 3911 */ 3912 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3913 __LINE__, 9); 3914 return (1); 3915 } else { 3916 /* 3917 * Here we are already pacing at least rack_max_per_above(10%) 3918 * what we are getting back. This indicates most likely 3919 * that we are being limited (cwnd/rwnd/app) and can't 3920 * get any more b/w. There is no sense of trying to 3921 * raise up the pacing rate its not speeding us up 3922 * and we already are pacing faster than we are getting. 3923 */ 3924 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3925 __LINE__, 8); 3926 return (0); 3927 } 3928 } 3929 3930 static void 3931 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3932 { 3933 /* 3934 * When we drag bottom, we want to assure 3935 * that no multiplier is below 1.0, if so 3936 * we want to restore it to at least that. 3937 */ 3938 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3939 /* This is unlikely we usually do not touch recovery */ 3940 rack->r_ctl.rack_per_of_gp_rec = 100; 3941 } 3942 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3943 rack->r_ctl.rack_per_of_gp_ca = 100; 3944 } 3945 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3946 rack->r_ctl.rack_per_of_gp_ss = 100; 3947 } 3948 } 3949 3950 static void 3951 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3952 { 3953 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3954 rack->r_ctl.rack_per_of_gp_ca = 100; 3955 } 3956 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3957 rack->r_ctl.rack_per_of_gp_ss = 100; 3958 } 3959 } 3960 3961 static void 3962 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3963 { 3964 int32_t calc, logged, plus; 3965 3966 logged = 0; 3967 3968 if (rack->rc_skip_timely) 3969 return; 3970 if (override) { 3971 /* 3972 * override is passed when we are 3973 * loosing b/w and making one last 3974 * gasp at trying to not loose out 3975 * to a new-reno flow. 3976 */ 3977 goto extra_boost; 3978 } 3979 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3980 if (rack->rc_gp_incr && 3981 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3982 /* 3983 * Reset and get 5 strokes more before the boost. Note 3984 * that the count is 0 based so we have to add one. 3985 */ 3986 extra_boost: 3987 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3988 rack->rc_gp_timely_inc_cnt = 0; 3989 } else 3990 plus = (uint32_t)rack_gp_increase_per; 3991 /* Must be at least 1% increase for true timely increases */ 3992 if ((plus < 1) && 3993 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3994 plus = 1; 3995 if (rack->rc_gp_saw_rec && 3996 (rack->rc_gp_no_rec_chg == 0) && 3997 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3998 rack->r_ctl.rack_per_of_gp_rec)) { 3999 /* We have been in recovery ding it too */ 4000 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 4001 if (calc > 0xffff) 4002 calc = 0xffff; 4003 logged |= 1; 4004 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 4005 if (rack->r_ctl.rack_per_upper_bound_ca && 4006 (rack->rc_dragged_bottom == 0) && 4007 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 4008 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 4009 } 4010 if (rack->rc_gp_saw_ca && 4011 (rack->rc_gp_saw_ss == 0) && 4012 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 4013 rack->r_ctl.rack_per_of_gp_ca)) { 4014 /* In CA */ 4015 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 4016 if (calc > 0xffff) 4017 calc = 0xffff; 4018 logged |= 2; 4019 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 4020 if (rack->r_ctl.rack_per_upper_bound_ca && 4021 (rack->rc_dragged_bottom == 0) && 4022 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 4023 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 4024 } 4025 if (rack->rc_gp_saw_ss && 4026 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 4027 rack->r_ctl.rack_per_of_gp_ss)) { 4028 /* In SS */ 4029 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 4030 if (calc > 0xffff) 4031 calc = 0xffff; 4032 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 4033 if (rack->r_ctl.rack_per_upper_bound_ss && 4034 (rack->rc_dragged_bottom == 0) && 4035 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 4036 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 4037 logged |= 4; 4038 } 4039 if (logged && 4040 (rack->rc_gp_incr == 0)){ 4041 /* Go into increment mode */ 4042 rack->rc_gp_incr = 1; 4043 rack->rc_gp_timely_inc_cnt = 0; 4044 } 4045 if (rack->rc_gp_incr && 4046 logged && 4047 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 4048 rack->rc_gp_timely_inc_cnt++; 4049 } 4050 rack_log_timely(rack, logged, plus, 0, 0, 4051 __LINE__, 1); 4052 } 4053 4054 static uint32_t 4055 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 4056 { 4057 /*- 4058 * norm_grad = rtt_diff / minrtt; 4059 * new_per = curper * (1 - B * norm_grad) 4060 * 4061 * B = rack_gp_decrease_per (default 80%) 4062 * rtt_dif = input var current rtt-diff 4063 * curper = input var current percentage 4064 * minrtt = from rack filter 4065 * 4066 * In order to do the floating point calculations above we 4067 * do an integer conversion. The code looks confusing so let me 4068 * translate it into something that use more variables and 4069 * is clearer for us humans :) 4070 * 4071 * uint64_t norm_grad, inverse, reduce_by, final_result; 4072 * uint32_t perf; 4073 * 4074 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 4075 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 4076 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 4077 * inverse /= 1000000; 4078 * reduce_by = (1000000 - inverse); 4079 * final_result = (cur_per * reduce_by) / 1000000; 4080 * perf = (uint32_t)final_result; 4081 */ 4082 uint64_t perf; 4083 4084 perf = (((uint64_t)curper * ((uint64_t)1000000 - 4085 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 4086 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 4087 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 4088 (uint64_t)1000000)) / 4089 (uint64_t)1000000); 4090 if (perf > curper) { 4091 /* TSNH */ 4092 perf = curper - 1; 4093 } 4094 return ((uint32_t)perf); 4095 } 4096 4097 static uint32_t 4098 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 4099 { 4100 /* 4101 * highrttthresh 4102 * result = curper * (1 - (B * ( 1 - ------ )) 4103 * gp_srtt 4104 * 4105 * B = rack_gp_decrease_per (default .8 i.e. 80) 4106 * highrttthresh = filter_min * rack_gp_rtt_maxmul 4107 */ 4108 uint64_t perf; 4109 uint32_t highrttthresh; 4110 4111 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4112 4113 perf = (((uint64_t)curper * ((uint64_t)1000000 - 4114 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 4115 ((uint64_t)highrttthresh * (uint64_t)1000000) / 4116 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 4117 if (tcp_bblogging_on(rack->rc_tp)) { 4118 uint64_t log1; 4119 4120 log1 = rtt; 4121 log1 <<= 32; 4122 log1 |= highrttthresh; 4123 rack_log_timely(rack, 4124 rack_gp_decrease_per, 4125 (uint64_t)curper, 4126 log1, 4127 perf, 4128 __LINE__, 4129 15); 4130 } 4131 return (perf); 4132 } 4133 4134 static void 4135 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 4136 { 4137 uint64_t logvar, logvar2, logvar3; 4138 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 4139 4140 if (rack->rc_skip_timely) 4141 return; 4142 if (rack->rc_gp_incr) { 4143 /* Turn off increment counting */ 4144 rack->rc_gp_incr = 0; 4145 rack->rc_gp_timely_inc_cnt = 0; 4146 } 4147 ss_red = ca_red = rec_red = 0; 4148 logged = 0; 4149 /* Calculate the reduction value */ 4150 if (rtt_diff < 0) { 4151 rtt_diff *= -1; 4152 } 4153 /* Must be at least 1% reduction */ 4154 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4155 /* We have been in recovery ding it too */ 4156 if (timely_says == 2) { 4157 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4158 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4159 if (alt < new_per) 4160 val = alt; 4161 else 4162 val = new_per; 4163 } else 4164 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4165 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4166 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4167 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4168 } else { 4169 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4170 rec_red = 0; 4171 } 4172 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4173 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4174 logged |= 1; 4175 } 4176 if (rack->rc_gp_saw_ss) { 4177 /* Sent in SS */ 4178 if (timely_says == 2) { 4179 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4180 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4181 if (alt < new_per) 4182 val = alt; 4183 else 4184 val = new_per; 4185 } else 4186 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4187 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4188 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4189 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4190 } else { 4191 ss_red = new_per; 4192 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4193 logvar = new_per; 4194 logvar <<= 32; 4195 logvar |= alt; 4196 logvar2 = (uint32_t)rtt; 4197 logvar2 <<= 32; 4198 logvar2 |= (uint32_t)rtt_diff; 4199 logvar3 = rack_gp_rtt_maxmul; 4200 logvar3 <<= 32; 4201 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4202 rack_log_timely(rack, timely_says, 4203 logvar2, logvar3, 4204 logvar, __LINE__, 10); 4205 } 4206 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4207 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4208 logged |= 4; 4209 } else if (rack->rc_gp_saw_ca) { 4210 /* Sent in CA */ 4211 if (timely_says == 2) { 4212 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4213 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4214 if (alt < new_per) 4215 val = alt; 4216 else 4217 val = new_per; 4218 } else 4219 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4220 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4221 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4222 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4223 } else { 4224 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4225 ca_red = 0; 4226 logvar = new_per; 4227 logvar <<= 32; 4228 logvar |= alt; 4229 logvar2 = (uint32_t)rtt; 4230 logvar2 <<= 32; 4231 logvar2 |= (uint32_t)rtt_diff; 4232 logvar3 = rack_gp_rtt_maxmul; 4233 logvar3 <<= 32; 4234 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4235 rack_log_timely(rack, timely_says, 4236 logvar2, logvar3, 4237 logvar, __LINE__, 10); 4238 } 4239 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4240 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4241 logged |= 2; 4242 } 4243 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4244 rack->rc_gp_timely_dec_cnt++; 4245 if (rack_timely_dec_clear && 4246 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4247 rack->rc_gp_timely_dec_cnt = 0; 4248 } 4249 logvar = ss_red; 4250 logvar <<= 32; 4251 logvar |= ca_red; 4252 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4253 __LINE__, 2); 4254 } 4255 4256 static void 4257 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4258 uint32_t rtt, uint32_t line, uint8_t reas) 4259 { 4260 if (tcp_bblogging_on(rack->rc_tp)) { 4261 union tcp_log_stackspecific log; 4262 struct timeval tv; 4263 4264 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4265 log.u_bbr.flex1 = line; 4266 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4267 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4268 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4269 log.u_bbr.flex5 = rtt; 4270 log.u_bbr.flex6 = rack->rc_highly_buffered; 4271 log.u_bbr.flex6 <<= 1; 4272 log.u_bbr.flex6 |= rack->forced_ack; 4273 log.u_bbr.flex6 <<= 1; 4274 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4275 log.u_bbr.flex6 <<= 1; 4276 log.u_bbr.flex6 |= rack->in_probe_rtt; 4277 log.u_bbr.flex6 <<= 1; 4278 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4279 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4280 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4281 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4282 log.u_bbr.flex8 = reas; 4283 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4284 log.u_bbr.delRate = rack_get_bw(rack); 4285 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4286 log.u_bbr.cur_del_rate <<= 32; 4287 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4288 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4289 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4290 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4291 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4292 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4293 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4294 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4295 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4296 log.u_bbr.rttProp = us_cts; 4297 log.u_bbr.rttProp <<= 32; 4298 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4299 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4300 &rack->rc_inp->inp_socket->so_rcv, 4301 &rack->rc_inp->inp_socket->so_snd, 4302 BBR_LOG_RTT_SHRINKS, 0, 4303 0, &log, false, &rack->r_ctl.act_rcv_time); 4304 } 4305 } 4306 4307 static void 4308 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4309 { 4310 uint64_t bwdp; 4311 4312 bwdp = rack_get_bw(rack); 4313 bwdp *= (uint64_t)rtt; 4314 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4315 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4316 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4317 /* 4318 * A window protocol must be able to have 4 packets 4319 * outstanding as the floor in order to function 4320 * (especially considering delayed ack :D). 4321 */ 4322 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4323 } 4324 } 4325 4326 static void 4327 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4328 { 4329 /** 4330 * ProbeRTT is a bit different in rack_pacing than in 4331 * BBR. It is like BBR in that it uses the lowering of 4332 * the RTT as a signal that we saw something new and 4333 * counts from there for how long between. But it is 4334 * different in that its quite simple. It does not 4335 * play with the cwnd and wait until we get down 4336 * to N segments outstanding and hold that for 4337 * 200ms. Instead it just sets the pacing reduction 4338 * rate to a set percentage (70 by default) and hold 4339 * that for a number of recent GP Srtt's. 4340 */ 4341 uint32_t segsiz; 4342 4343 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4344 if (rack->rc_gp_dyn_mul == 0) 4345 return; 4346 4347 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4348 /* We are idle */ 4349 return; 4350 } 4351 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4352 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4353 /* 4354 * Stop the goodput now, the idea here is 4355 * that future measurements with in_probe_rtt 4356 * won't register if they are not greater so 4357 * we want to get what info (if any) is available 4358 * now. 4359 */ 4360 rack_do_goodput_measurement(rack->rc_tp, rack, 4361 rack->rc_tp->snd_una, __LINE__, 4362 RACK_QUALITY_PROBERTT); 4363 } 4364 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4365 rack->r_ctl.rc_time_probertt_entered = us_cts; 4366 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4367 rack->r_ctl.rc_pace_min_segs); 4368 rack->in_probe_rtt = 1; 4369 rack->measure_saw_probe_rtt = 1; 4370 rack->r_ctl.rc_time_probertt_starts = 0; 4371 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4372 if (rack_probertt_use_min_rtt_entry) 4373 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4374 else 4375 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4376 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4377 __LINE__, RACK_RTTS_ENTERPROBE); 4378 } 4379 4380 static void 4381 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4382 { 4383 struct rack_sendmap *rsm; 4384 uint32_t segsiz; 4385 4386 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4387 rack->r_ctl.rc_pace_min_segs); 4388 rack->in_probe_rtt = 0; 4389 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4390 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4391 /* 4392 * Stop the goodput now, the idea here is 4393 * that future measurements with in_probe_rtt 4394 * won't register if they are not greater so 4395 * we want to get what info (if any) is available 4396 * now. 4397 */ 4398 rack_do_goodput_measurement(rack->rc_tp, rack, 4399 rack->rc_tp->snd_una, __LINE__, 4400 RACK_QUALITY_PROBERTT); 4401 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4402 /* 4403 * We don't have enough data to make a measurement. 4404 * So lets just stop and start here after exiting 4405 * probe-rtt. We probably are not interested in 4406 * the results anyway. 4407 */ 4408 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4409 } 4410 /* 4411 * Measurements through the current snd_max are going 4412 * to be limited by the slower pacing rate. 4413 * 4414 * We need to mark these as app-limited so we 4415 * don't collapse the b/w. 4416 */ 4417 rsm = tqhash_max(rack->r_ctl.tqh); 4418 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4419 if (rack->r_ctl.rc_app_limited_cnt == 0) 4420 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4421 else { 4422 /* 4423 * Go out to the end app limited and mark 4424 * this new one as next and move the end_appl up 4425 * to this guy. 4426 */ 4427 if (rack->r_ctl.rc_end_appl) 4428 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4429 rack->r_ctl.rc_end_appl = rsm; 4430 } 4431 rsm->r_flags |= RACK_APP_LIMITED; 4432 rack->r_ctl.rc_app_limited_cnt++; 4433 } 4434 /* 4435 * Now, we need to examine our pacing rate multipliers. 4436 * If its under 100%, we need to kick it back up to 4437 * 100%. We also don't let it be over our "max" above 4438 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4439 * Note setting clamp_atexit_prtt to 0 has the effect 4440 * of setting CA/SS to 100% always at exit (which is 4441 * the default behavior). 4442 */ 4443 if (rack_probertt_clear_is) { 4444 rack->rc_gp_incr = 0; 4445 rack->rc_gp_bwred = 0; 4446 rack->rc_gp_timely_inc_cnt = 0; 4447 rack->rc_gp_timely_dec_cnt = 0; 4448 } 4449 /* Do we do any clamping at exit? */ 4450 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4451 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4452 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4453 } 4454 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4455 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4456 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4457 } 4458 /* 4459 * Lets set rtt_diff to 0, so that we will get a "boost" 4460 * after exiting. 4461 */ 4462 rack->r_ctl.rc_rtt_diff = 0; 4463 4464 /* Clear all flags so we start fresh */ 4465 rack->rc_tp->t_bytes_acked = 0; 4466 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4467 /* 4468 * If configured to, set the cwnd and ssthresh to 4469 * our targets. 4470 */ 4471 if (rack_probe_rtt_sets_cwnd) { 4472 uint64_t ebdp; 4473 uint32_t setto; 4474 4475 /* Set ssthresh so we get into CA once we hit our target */ 4476 if (rack_probertt_use_min_rtt_exit == 1) { 4477 /* Set to min rtt */ 4478 rack_set_prtt_target(rack, segsiz, 4479 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4480 } else if (rack_probertt_use_min_rtt_exit == 2) { 4481 /* Set to current gp rtt */ 4482 rack_set_prtt_target(rack, segsiz, 4483 rack->r_ctl.rc_gp_srtt); 4484 } else if (rack_probertt_use_min_rtt_exit == 3) { 4485 /* Set to entry gp rtt */ 4486 rack_set_prtt_target(rack, segsiz, 4487 rack->r_ctl.rc_entry_gp_rtt); 4488 } else { 4489 uint64_t sum; 4490 uint32_t setval; 4491 4492 sum = rack->r_ctl.rc_entry_gp_rtt; 4493 sum *= 10; 4494 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4495 if (sum >= 20) { 4496 /* 4497 * A highly buffered path needs 4498 * cwnd space for timely to work. 4499 * Lets set things up as if 4500 * we are heading back here again. 4501 */ 4502 setval = rack->r_ctl.rc_entry_gp_rtt; 4503 } else if (sum >= 15) { 4504 /* 4505 * Lets take the smaller of the 4506 * two since we are just somewhat 4507 * buffered. 4508 */ 4509 setval = rack->r_ctl.rc_gp_srtt; 4510 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4511 setval = rack->r_ctl.rc_entry_gp_rtt; 4512 } else { 4513 /* 4514 * Here we are not highly buffered 4515 * and should pick the min we can to 4516 * keep from causing loss. 4517 */ 4518 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4519 } 4520 rack_set_prtt_target(rack, segsiz, 4521 setval); 4522 } 4523 if (rack_probe_rtt_sets_cwnd > 1) { 4524 /* There is a percentage here to boost */ 4525 ebdp = rack->r_ctl.rc_target_probertt_flight; 4526 ebdp *= rack_probe_rtt_sets_cwnd; 4527 ebdp /= 100; 4528 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4529 } else 4530 setto = rack->r_ctl.rc_target_probertt_flight; 4531 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4532 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4533 /* Enforce a min */ 4534 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4535 } 4536 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4537 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4538 } 4539 rack_log_rtt_shrinks(rack, us_cts, 4540 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4541 __LINE__, RACK_RTTS_EXITPROBE); 4542 /* Clear times last so log has all the info */ 4543 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4544 rack->r_ctl.rc_time_probertt_entered = us_cts; 4545 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4546 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4547 } 4548 4549 static void 4550 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4551 { 4552 /* Check in on probe-rtt */ 4553 4554 if (rack->rc_gp_filled == 0) { 4555 /* We do not do p-rtt unless we have gp measurements */ 4556 return; 4557 } 4558 if (rack->in_probe_rtt) { 4559 uint64_t no_overflow; 4560 uint32_t endtime, must_stay; 4561 4562 if (rack->r_ctl.rc_went_idle_time && 4563 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4564 /* 4565 * We went idle during prtt, just exit now. 4566 */ 4567 rack_exit_probertt(rack, us_cts); 4568 } else if (rack_probe_rtt_safety_val && 4569 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4570 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4571 /* 4572 * Probe RTT safety value triggered! 4573 */ 4574 rack_log_rtt_shrinks(rack, us_cts, 4575 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4576 __LINE__, RACK_RTTS_SAFETY); 4577 rack_exit_probertt(rack, us_cts); 4578 } 4579 /* Calculate the max we will wait */ 4580 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4581 if (rack->rc_highly_buffered) 4582 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4583 /* Calculate the min we must wait */ 4584 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4585 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4586 TSTMP_LT(us_cts, endtime)) { 4587 uint32_t calc; 4588 /* Do we lower more? */ 4589 no_exit: 4590 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4591 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4592 else 4593 calc = 0; 4594 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4595 if (calc) { 4596 /* Maybe */ 4597 calc *= rack_per_of_gp_probertt_reduce; 4598 if (calc > rack_per_of_gp_probertt) 4599 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4600 else 4601 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4602 /* Limit it too */ 4603 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4604 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4605 } 4606 /* We must reach target or the time set */ 4607 return; 4608 } 4609 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4610 if ((TSTMP_LT(us_cts, must_stay) && 4611 rack->rc_highly_buffered) || 4612 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4613 rack->r_ctl.rc_target_probertt_flight)) { 4614 /* We are not past the must_stay time */ 4615 goto no_exit; 4616 } 4617 rack_log_rtt_shrinks(rack, us_cts, 4618 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4619 __LINE__, RACK_RTTS_REACHTARGET); 4620 rack->r_ctl.rc_time_probertt_starts = us_cts; 4621 if (rack->r_ctl.rc_time_probertt_starts == 0) 4622 rack->r_ctl.rc_time_probertt_starts = 1; 4623 /* Restore back to our rate we want to pace at in prtt */ 4624 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4625 } 4626 /* 4627 * Setup our end time, some number of gp_srtts plus 200ms. 4628 */ 4629 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4630 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4631 if (rack_probertt_gpsrtt_cnt_div) 4632 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4633 else 4634 endtime = 0; 4635 endtime += rack_min_probertt_hold; 4636 endtime += rack->r_ctl.rc_time_probertt_starts; 4637 if (TSTMP_GEQ(us_cts, endtime)) { 4638 /* yes, exit probertt */ 4639 rack_exit_probertt(rack, us_cts); 4640 } 4641 4642 } else if ((rack->rc_skip_timely == 0) && 4643 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4644 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4645 /* Go into probertt, its been too long since we went lower */ 4646 rack_enter_probertt(rack, us_cts); 4647 } 4648 } 4649 4650 static void 4651 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4652 uint32_t rtt, int32_t rtt_diff) 4653 { 4654 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4655 uint32_t losses; 4656 4657 if ((rack->rc_gp_dyn_mul == 0) || 4658 (rack->use_fixed_rate) || 4659 (rack->in_probe_rtt) || 4660 (rack->rc_always_pace == 0)) { 4661 /* No dynamic GP multiplier in play */ 4662 return; 4663 } 4664 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4665 cur_bw = rack_get_bw(rack); 4666 /* Calculate our up and down range */ 4667 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4668 up_bnd /= 100; 4669 up_bnd += rack->r_ctl.last_gp_comp_bw; 4670 4671 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4672 subfr /= 100; 4673 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4674 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4675 /* 4676 * This is the case where our RTT is above 4677 * the max target and we have been configured 4678 * to just do timely no bonus up stuff in that case. 4679 * 4680 * There are two configurations, set to 1, and we 4681 * just do timely if we are over our max. If its 4682 * set above 1 then we slam the multipliers down 4683 * to 100 and then decrement per timely. 4684 */ 4685 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4686 __LINE__, 3); 4687 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4688 rack_validate_multipliers_at_or_below_100(rack); 4689 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4690 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4691 /* 4692 * We are decreasing this is a bit complicated this 4693 * means we are loosing ground. This could be 4694 * because another flow entered and we are competing 4695 * for b/w with it. This will push the RTT up which 4696 * makes timely unusable unless we want to get shoved 4697 * into a corner and just be backed off (the age 4698 * old problem with delay based CC). 4699 * 4700 * On the other hand if it was a route change we 4701 * would like to stay somewhat contained and not 4702 * blow out the buffers. 4703 */ 4704 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4705 __LINE__, 3); 4706 rack->r_ctl.last_gp_comp_bw = cur_bw; 4707 if (rack->rc_gp_bwred == 0) { 4708 /* Go into reduction counting */ 4709 rack->rc_gp_bwred = 1; 4710 rack->rc_gp_timely_dec_cnt = 0; 4711 } 4712 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4713 /* 4714 * Push another time with a faster pacing 4715 * to try to gain back (we include override to 4716 * get a full raise factor). 4717 */ 4718 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4719 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4720 (timely_says == 0) || 4721 (rack_down_raise_thresh == 0)) { 4722 /* 4723 * Do an override up in b/w if we were 4724 * below the threshold or if the threshold 4725 * is zero we always do the raise. 4726 */ 4727 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4728 } else { 4729 /* Log it stays the same */ 4730 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4731 __LINE__, 11); 4732 } 4733 rack->rc_gp_timely_dec_cnt++; 4734 /* We are not incrementing really no-count */ 4735 rack->rc_gp_incr = 0; 4736 rack->rc_gp_timely_inc_cnt = 0; 4737 } else { 4738 /* 4739 * Lets just use the RTT 4740 * information and give up 4741 * pushing. 4742 */ 4743 goto use_timely; 4744 } 4745 } else if ((timely_says != 2) && 4746 !losses && 4747 (last_bw_est > up_bnd)) { 4748 /* 4749 * We are increasing b/w lets keep going, updating 4750 * our b/w and ignoring any timely input, unless 4751 * of course we are at our max raise (if there is one). 4752 */ 4753 4754 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4755 __LINE__, 3); 4756 rack->r_ctl.last_gp_comp_bw = cur_bw; 4757 if (rack->rc_gp_saw_ss && 4758 rack->r_ctl.rack_per_upper_bound_ss && 4759 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4760 /* 4761 * In cases where we can't go higher 4762 * we should just use timely. 4763 */ 4764 goto use_timely; 4765 } 4766 if (rack->rc_gp_saw_ca && 4767 rack->r_ctl.rack_per_upper_bound_ca && 4768 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4769 /* 4770 * In cases where we can't go higher 4771 * we should just use timely. 4772 */ 4773 goto use_timely; 4774 } 4775 rack->rc_gp_bwred = 0; 4776 rack->rc_gp_timely_dec_cnt = 0; 4777 /* You get a set number of pushes if timely is trying to reduce */ 4778 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4779 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4780 } else { 4781 /* Log it stays the same */ 4782 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4783 __LINE__, 12); 4784 } 4785 return; 4786 } else { 4787 /* 4788 * We are staying between the lower and upper range bounds 4789 * so use timely to decide. 4790 */ 4791 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4792 __LINE__, 3); 4793 use_timely: 4794 if (timely_says) { 4795 rack->rc_gp_incr = 0; 4796 rack->rc_gp_timely_inc_cnt = 0; 4797 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4798 !losses && 4799 (last_bw_est < low_bnd)) { 4800 /* We are loosing ground */ 4801 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4802 rack->rc_gp_timely_dec_cnt++; 4803 /* We are not incrementing really no-count */ 4804 rack->rc_gp_incr = 0; 4805 rack->rc_gp_timely_inc_cnt = 0; 4806 } else 4807 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4808 } else { 4809 rack->rc_gp_bwred = 0; 4810 rack->rc_gp_timely_dec_cnt = 0; 4811 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4812 } 4813 } 4814 } 4815 4816 static int32_t 4817 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4818 { 4819 int32_t timely_says; 4820 uint64_t log_mult, log_rtt_a_diff; 4821 4822 log_rtt_a_diff = rtt; 4823 log_rtt_a_diff <<= 32; 4824 log_rtt_a_diff |= (uint32_t)rtt_diff; 4825 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4826 rack_gp_rtt_maxmul)) { 4827 /* Reduce the b/w multiplier */ 4828 timely_says = 2; 4829 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4830 log_mult <<= 32; 4831 log_mult |= prev_rtt; 4832 rack_log_timely(rack, timely_says, log_mult, 4833 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4834 log_rtt_a_diff, __LINE__, 4); 4835 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4836 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4837 max(rack_gp_rtt_mindiv , 1)))) { 4838 /* Increase the b/w multiplier */ 4839 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4840 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4841 max(rack_gp_rtt_mindiv , 1)); 4842 log_mult <<= 32; 4843 log_mult |= prev_rtt; 4844 timely_says = 0; 4845 rack_log_timely(rack, timely_says, log_mult , 4846 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4847 log_rtt_a_diff, __LINE__, 5); 4848 } else { 4849 /* 4850 * Use a gradient to find it the timely gradient 4851 * is: 4852 * grad = rc_rtt_diff / min_rtt; 4853 * 4854 * anything below or equal to 0 will be 4855 * a increase indication. Anything above 4856 * zero is a decrease. Note we take care 4857 * of the actual gradient calculation 4858 * in the reduction (its not needed for 4859 * increase). 4860 */ 4861 log_mult = prev_rtt; 4862 if (rtt_diff <= 0) { 4863 /* 4864 * Rttdiff is less than zero, increase the 4865 * b/w multiplier (its 0 or negative) 4866 */ 4867 timely_says = 0; 4868 rack_log_timely(rack, timely_says, log_mult, 4869 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4870 } else { 4871 /* Reduce the b/w multiplier */ 4872 timely_says = 1; 4873 rack_log_timely(rack, timely_says, log_mult, 4874 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4875 } 4876 } 4877 return (timely_says); 4878 } 4879 4880 static __inline int 4881 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4882 { 4883 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4884 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4885 /** 4886 * This covers the case that the 4887 * resent is completely inside 4888 * the gp range or up to it. 4889 * |----------------| 4890 * |-----| <or> 4891 * |----| 4892 * <or> |---| 4893 */ 4894 return (1); 4895 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4896 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4897 /** 4898 * This covers the case of 4899 * |--------------| 4900 * |-------->| 4901 */ 4902 return (1); 4903 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4904 SEQ_LT(rsm->r_start, tp->gput_ack) && 4905 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4906 4907 /** 4908 * This covers the case of 4909 * |--------------| 4910 * |-------->| 4911 */ 4912 return (1); 4913 } 4914 return (0); 4915 } 4916 4917 static __inline void 4918 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4919 { 4920 4921 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4922 return; 4923 /* 4924 * We have a Goodput measurement in progress. Mark 4925 * the send if its within the window. If its not 4926 * in the window make sure it does not have the mark. 4927 */ 4928 if (rack_in_gp_window(tp, rsm)) 4929 rsm->r_flags |= RACK_IN_GP_WIN; 4930 else 4931 rsm->r_flags &= ~RACK_IN_GP_WIN; 4932 } 4933 4934 static __inline void 4935 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4936 { 4937 /* A GP measurement is ending, clear all marks on the send map*/ 4938 struct rack_sendmap *rsm = NULL; 4939 4940 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4941 if (rsm == NULL) { 4942 rsm = tqhash_min(rack->r_ctl.tqh); 4943 } 4944 /* Nothing left? */ 4945 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4946 rsm->r_flags &= ~RACK_IN_GP_WIN; 4947 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4948 } 4949 } 4950 4951 4952 static __inline void 4953 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4954 { 4955 struct rack_sendmap *rsm = NULL; 4956 4957 if (tp->snd_una == tp->snd_max) { 4958 /* Nothing outstanding yet, nothing to do here */ 4959 return; 4960 } 4961 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4962 /* 4963 * We are measuring ahead of some outstanding 4964 * data. We need to walk through up until we get 4965 * to gp_seq marking so that no rsm is set incorrectly 4966 * with RACK_IN_GP_WIN. 4967 */ 4968 rsm = tqhash_min(rack->r_ctl.tqh); 4969 while (rsm != NULL) { 4970 rack_mark_in_gp_win(tp, rsm); 4971 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4972 break; 4973 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4974 } 4975 } 4976 if (rsm == NULL) { 4977 /* 4978 * Need to find the GP seq, if rsm is 4979 * set we stopped as we hit it. 4980 */ 4981 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4982 if (rsm == NULL) 4983 return; 4984 rack_mark_in_gp_win(tp, rsm); 4985 } 4986 /* 4987 * Now we may need to mark already sent rsm, ahead of 4988 * gput_seq in the window since they may have been sent 4989 * *before* we started our measurment. The rsm, if non-null 4990 * has been marked (note if rsm would have been NULL we would have 4991 * returned in the previous block). So we go to the next, and continue 4992 * until we run out of entries or we exceed the gp_ack value. 4993 */ 4994 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4995 while (rsm) { 4996 rack_mark_in_gp_win(tp, rsm); 4997 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4998 break; 4999 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 5000 } 5001 } 5002 5003 static void 5004 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 5005 { 5006 if (tcp_bblogging_on(rack->rc_tp)) { 5007 union tcp_log_stackspecific log; 5008 struct timeval tv; 5009 5010 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5011 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5012 log.u_bbr.flex1 = add_part; 5013 log.u_bbr.flex2 = sub_part; 5014 log.u_bbr.flex3 = rack_wma_divisor; 5015 log.u_bbr.flex4 = srtt; 5016 log.u_bbr.flex7 = (uint16_t)line; 5017 log.u_bbr.flex8 = meth; 5018 log.u_bbr.delRate = rack->r_ctl.gp_bw; 5019 log.u_bbr.cur_del_rate = meas_bw; 5020 log.u_bbr.rttProp = utim; 5021 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5022 &rack->rc_inp->inp_socket->so_rcv, 5023 &rack->rc_inp->inp_socket->so_snd, 5024 BBR_LOG_THRESH_CALC, 0, 5025 0, &log, false, &rack->r_ctl.act_rcv_time); 5026 } 5027 } 5028 5029 static void 5030 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 5031 tcp_seq th_ack, int line, uint8_t quality) 5032 { 5033 uint64_t tim, bytes_ps, stim, utim; 5034 uint32_t segsiz, bytes, reqbytes, us_cts; 5035 int32_t gput, new_rtt_diff, timely_says; 5036 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 5037 int did_add = 0; 5038 5039 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5040 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5041 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 5042 tim = us_cts - tp->gput_ts; 5043 else 5044 tim = 0; 5045 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 5046 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 5047 else 5048 stim = 0; 5049 /* 5050 * Use the larger of the send time or ack time. This prevents us 5051 * from being influenced by ack artifacts to come up with too 5052 * high of measurement. Note that since we are spanning over many more 5053 * bytes in most of our measurements hopefully that is less likely to 5054 * occur. 5055 */ 5056 if (tim > stim) 5057 utim = max(tim, 1); 5058 else 5059 utim = max(stim, 1); 5060 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 5061 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 5062 if ((tim == 0) && (stim == 0)) { 5063 /* 5064 * Invalid measurement time, maybe 5065 * all on one ack/one send? 5066 */ 5067 bytes = 0; 5068 bytes_ps = 0; 5069 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5070 0, 0, 0, 10, __LINE__, NULL, quality); 5071 goto skip_measurement; 5072 } 5073 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 5074 /* We never made a us_rtt measurement? */ 5075 bytes = 0; 5076 bytes_ps = 0; 5077 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5078 0, 0, 0, 10, __LINE__, NULL, quality); 5079 goto skip_measurement; 5080 } 5081 /* 5082 * Calculate the maximum possible b/w this connection 5083 * could have. We base our calculation on the lowest 5084 * rtt we have seen during the measurement and the 5085 * largest rwnd the client has given us in that time. This 5086 * forms a BDP that is the maximum that we could ever 5087 * get to the client. Anything larger is not valid. 5088 * 5089 * I originally had code here that rejected measurements 5090 * where the time was less than 1/2 the latest us_rtt. 5091 * But after thinking on that I realized its wrong since 5092 * say you had a 150Mbps or even 1Gbps link, and you 5093 * were a long way away.. example I am in Europe (100ms rtt) 5094 * talking to my 1Gbps link in S.C. Now measuring say 150,000 5095 * bytes my time would be 1.2ms, and yet my rtt would say 5096 * the measurement was invalid the time was < 50ms. The 5097 * same thing is true for 150Mb (8ms of time). 5098 * 5099 * A better way I realized is to look at what the maximum 5100 * the connection could possibly do. This is gated on 5101 * the lowest RTT we have seen and the highest rwnd. 5102 * We should in theory never exceed that, if we are 5103 * then something on the path is storing up packets 5104 * and then feeding them all at once to our endpoint 5105 * messing up our measurement. 5106 */ 5107 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 5108 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 5109 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 5110 if (SEQ_LT(th_ack, tp->gput_seq)) { 5111 /* No measurement can be made */ 5112 bytes = 0; 5113 bytes_ps = 0; 5114 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5115 0, 0, 0, 10, __LINE__, NULL, quality); 5116 goto skip_measurement; 5117 } else 5118 bytes = (th_ack - tp->gput_seq); 5119 bytes_ps = (uint64_t)bytes; 5120 /* 5121 * Don't measure a b/w for pacing unless we have gotten at least 5122 * an initial windows worth of data in this measurement interval. 5123 * 5124 * Small numbers of bytes get badly influenced by delayed ack and 5125 * other artifacts. Note we take the initial window or our 5126 * defined minimum GP (defaulting to 10 which hopefully is the 5127 * IW). 5128 */ 5129 if (rack->rc_gp_filled == 0) { 5130 /* 5131 * The initial estimate is special. We 5132 * have blasted out an IW worth of packets 5133 * without a real valid ack ts results. We 5134 * then setup the app_limited_needs_set flag, 5135 * this should get the first ack in (probably 2 5136 * MSS worth) to be recorded as the timestamp. 5137 * We thus allow a smaller number of bytes i.e. 5138 * IW - 2MSS. 5139 */ 5140 reqbytes -= (2 * segsiz); 5141 /* Also lets fill previous for our first measurement to be neutral */ 5142 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5143 } 5144 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 5145 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5146 rack->r_ctl.rc_app_limited_cnt, 5147 0, 0, 10, __LINE__, NULL, quality); 5148 goto skip_measurement; 5149 } 5150 /* 5151 * We now need to calculate the Timely like status so 5152 * we can update (possibly) the b/w multipliers. 5153 */ 5154 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5155 if (rack->rc_gp_filled == 0) { 5156 /* No previous reading */ 5157 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5158 } else { 5159 if (rack->measure_saw_probe_rtt == 0) { 5160 /* 5161 * We don't want a probertt to be counted 5162 * since it will be negative incorrectly. We 5163 * expect to be reducing the RTT when we 5164 * pace at a slower rate. 5165 */ 5166 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5167 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5168 } 5169 } 5170 timely_says = rack_make_timely_judgement(rack, 5171 rack->r_ctl.rc_gp_srtt, 5172 rack->r_ctl.rc_rtt_diff, 5173 rack->r_ctl.rc_prev_gp_srtt 5174 ); 5175 bytes_ps *= HPTS_USEC_IN_SEC; 5176 bytes_ps /= utim; 5177 if (bytes_ps > rack->r_ctl.last_max_bw) { 5178 /* 5179 * Something is on path playing 5180 * since this b/w is not possible based 5181 * on our BDP (highest rwnd and lowest rtt 5182 * we saw in the measurement window). 5183 * 5184 * Another option here would be to 5185 * instead skip the measurement. 5186 */ 5187 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5188 bytes_ps, rack->r_ctl.last_max_bw, 0, 5189 11, __LINE__, NULL, quality); 5190 bytes_ps = rack->r_ctl.last_max_bw; 5191 } 5192 /* We store gp for b/w in bytes per second */ 5193 if (rack->rc_gp_filled == 0) { 5194 /* Initial measurement */ 5195 if (bytes_ps) { 5196 rack->r_ctl.gp_bw = bytes_ps; 5197 rack->rc_gp_filled = 1; 5198 rack->r_ctl.num_measurements = 1; 5199 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5200 } else { 5201 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5202 rack->r_ctl.rc_app_limited_cnt, 5203 0, 0, 10, __LINE__, NULL, quality); 5204 } 5205 if (tcp_in_hpts(rack->rc_tp) && 5206 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5207 /* 5208 * Ok we can't trust the pacer in this case 5209 * where we transition from un-paced to paced. 5210 * Or for that matter when the burst mitigation 5211 * was making a wild guess and got it wrong. 5212 * Stop the pacer and clear up all the aggregate 5213 * delays etc. 5214 */ 5215 tcp_hpts_remove(rack->rc_tp); 5216 rack->r_ctl.rc_hpts_flags = 0; 5217 rack->r_ctl.rc_last_output_to = 0; 5218 } 5219 did_add = 2; 5220 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5221 /* Still a small number run an average */ 5222 rack->r_ctl.gp_bw += bytes_ps; 5223 addpart = rack->r_ctl.num_measurements; 5224 rack->r_ctl.num_measurements++; 5225 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5226 /* We have collected enough to move forward */ 5227 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5228 } 5229 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5230 did_add = 3; 5231 } else { 5232 /* 5233 * We want to take 1/wma of the goodput and add in to 7/8th 5234 * of the old value weighted by the srtt. So if your measurement 5235 * period is say 2 SRTT's long you would get 1/4 as the 5236 * value, if it was like 1/2 SRTT then you would get 1/16th. 5237 * 5238 * But we must be careful not to take too much i.e. if the 5239 * srtt is say 20ms and the measurement is taken over 5240 * 400ms our weight would be 400/20 i.e. 20. On the 5241 * other hand if we get a measurement over 1ms with a 5242 * 10ms rtt we only want to take a much smaller portion. 5243 */ 5244 uint8_t meth; 5245 5246 if (rack->r_ctl.num_measurements < 0xff) { 5247 rack->r_ctl.num_measurements++; 5248 } 5249 srtt = (uint64_t)tp->t_srtt; 5250 if (srtt == 0) { 5251 /* 5252 * Strange why did t_srtt go back to zero? 5253 */ 5254 if (rack->r_ctl.rc_rack_min_rtt) 5255 srtt = rack->r_ctl.rc_rack_min_rtt; 5256 else 5257 srtt = HPTS_USEC_IN_MSEC; 5258 } 5259 /* 5260 * XXXrrs: Note for reviewers, in playing with 5261 * dynamic pacing I discovered this GP calculation 5262 * as done originally leads to some undesired results. 5263 * Basically you can get longer measurements contributing 5264 * too much to the WMA. Thus I changed it if you are doing 5265 * dynamic adjustments to only do the aportioned adjustment 5266 * if we have a very small (time wise) measurement. Longer 5267 * measurements just get there weight (defaulting to 1/8) 5268 * add to the WMA. We may want to think about changing 5269 * this to always do that for both sides i.e. dynamic 5270 * and non-dynamic... but considering lots of folks 5271 * were playing with this I did not want to change the 5272 * calculation per.se. without your thoughts.. Lawerence? 5273 * Peter?? 5274 */ 5275 if (rack->rc_gp_dyn_mul == 0) { 5276 subpart = rack->r_ctl.gp_bw * utim; 5277 subpart /= (srtt * 8); 5278 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5279 /* 5280 * The b/w update takes no more 5281 * away then 1/2 our running total 5282 * so factor it in. 5283 */ 5284 addpart = bytes_ps * utim; 5285 addpart /= (srtt * 8); 5286 meth = 1; 5287 } else { 5288 /* 5289 * Don't allow a single measurement 5290 * to account for more than 1/2 of the 5291 * WMA. This could happen on a retransmission 5292 * where utim becomes huge compared to 5293 * srtt (multiple retransmissions when using 5294 * the sending rate which factors in all the 5295 * transmissions from the first one). 5296 */ 5297 subpart = rack->r_ctl.gp_bw / 2; 5298 addpart = bytes_ps / 2; 5299 meth = 2; 5300 } 5301 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5302 resid_bw = rack->r_ctl.gp_bw - subpart; 5303 rack->r_ctl.gp_bw = resid_bw + addpart; 5304 did_add = 1; 5305 } else { 5306 if ((utim / srtt) <= 1) { 5307 /* 5308 * The b/w update was over a small period 5309 * of time. The idea here is to prevent a small 5310 * measurement time period from counting 5311 * too much. So we scale it based on the 5312 * time so it attributes less than 1/rack_wma_divisor 5313 * of its measurement. 5314 */ 5315 subpart = rack->r_ctl.gp_bw * utim; 5316 subpart /= (srtt * rack_wma_divisor); 5317 addpart = bytes_ps * utim; 5318 addpart /= (srtt * rack_wma_divisor); 5319 meth = 3; 5320 } else { 5321 /* 5322 * The scaled measurement was long 5323 * enough so lets just add in the 5324 * portion of the measurement i.e. 1/rack_wma_divisor 5325 */ 5326 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5327 addpart = bytes_ps / rack_wma_divisor; 5328 meth = 4; 5329 } 5330 if ((rack->measure_saw_probe_rtt == 0) || 5331 (bytes_ps > rack->r_ctl.gp_bw)) { 5332 /* 5333 * For probe-rtt we only add it in 5334 * if its larger, all others we just 5335 * add in. 5336 */ 5337 did_add = 1; 5338 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5339 resid_bw = rack->r_ctl.gp_bw - subpart; 5340 rack->r_ctl.gp_bw = resid_bw + addpart; 5341 } 5342 } 5343 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5344 } 5345 /* 5346 * We only watch the growth of the GP during the initial startup 5347 * or first-slowstart that ensues. If we ever needed to watch 5348 * growth of gp outside of that period all we need to do is 5349 * remove the first clause of this if (rc_initial_ss_comp). 5350 */ 5351 if ((rack->rc_initial_ss_comp == 0) && 5352 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5353 uint64_t gp_est; 5354 5355 gp_est = bytes_ps; 5356 if (tcp_bblogging_on(rack->rc_tp)) { 5357 union tcp_log_stackspecific log; 5358 struct timeval tv; 5359 5360 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5361 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5362 log.u_bbr.flex1 = rack->r_ctl.current_round; 5363 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5364 log.u_bbr.delRate = gp_est; 5365 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5366 log.u_bbr.flex8 = 41; 5367 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5368 0, &log, false, NULL, __func__, __LINE__,&tv); 5369 } 5370 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5371 (rack->r_ctl.last_gpest == 0)) { 5372 /* 5373 * The round we get our measurement averaging going 5374 * is the base round so it always is the source point 5375 * for when we had our first increment. From there on 5376 * we only record the round that had a rise. 5377 */ 5378 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5379 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5380 } else if (gp_est >= rack->r_ctl.last_gpest) { 5381 /* 5382 * Test to see if its gone up enough 5383 * to set the round count up to now. Note 5384 * that on the seeding of the 4th measurement we 5385 */ 5386 gp_est *= 1000; 5387 gp_est /= rack->r_ctl.last_gpest; 5388 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5389 /* 5390 * We went up enough to record the round. 5391 */ 5392 if (tcp_bblogging_on(rack->rc_tp)) { 5393 union tcp_log_stackspecific log; 5394 struct timeval tv; 5395 5396 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5397 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5398 log.u_bbr.flex1 = rack->r_ctl.current_round; 5399 log.u_bbr.flex2 = (uint32_t)gp_est; 5400 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5401 log.u_bbr.delRate = gp_est; 5402 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5403 log.u_bbr.flex8 = 42; 5404 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5405 0, &log, false, NULL, __func__, __LINE__,&tv); 5406 } 5407 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5408 if (rack->r_ctl.use_gp_not_last == 1) 5409 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5410 else 5411 rack->r_ctl.last_gpest = bytes_ps; 5412 } 5413 } 5414 } 5415 if ((rack->gp_ready == 0) && 5416 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5417 /* We have enough measurements now */ 5418 rack->gp_ready = 1; 5419 if (rack->dgp_on || 5420 rack->rack_hibeta) 5421 rack_set_cc_pacing(rack); 5422 if (rack->defer_options) 5423 rack_apply_deferred_options(rack); 5424 } 5425 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5426 rack_get_bw(rack), 22, did_add, NULL, quality); 5427 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5428 5429 if ((rack->measure_saw_probe_rtt == 0) && 5430 rack->rc_gp_rtt_set) { 5431 if (rack->rc_skip_timely == 0) { 5432 rack_update_multiplier(rack, timely_says, bytes_ps, 5433 rack->r_ctl.rc_gp_srtt, 5434 rack->r_ctl.rc_rtt_diff); 5435 } 5436 } 5437 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5438 rack_get_bw(rack), 3, line, NULL, quality); 5439 rack_log_pacing_delay_calc(rack, 5440 bytes, /* flex2 */ 5441 tim, /* flex1 */ 5442 bytes_ps, /* bw_inuse */ 5443 rack->r_ctl.gp_bw, /* delRate */ 5444 rack_get_lt_bw(rack), /* rttProp */ 5445 20, line, NULL, 0); 5446 /* reset the gp srtt and setup the new prev */ 5447 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5448 /* Record the lost count for the next measurement */ 5449 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5450 skip_measurement: 5451 /* 5452 * We restart our diffs based on the gpsrtt in the 5453 * measurement window. 5454 */ 5455 rack->rc_gp_rtt_set = 0; 5456 rack->rc_gp_saw_rec = 0; 5457 rack->rc_gp_saw_ca = 0; 5458 rack->rc_gp_saw_ss = 0; 5459 rack->rc_dragged_bottom = 0; 5460 if (quality == RACK_QUALITY_HIGH) { 5461 /* 5462 * Gput in the stats world is in kbps where bytes_ps is 5463 * bytes per second so we do ((x * 8)/ 1000). 5464 */ 5465 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5466 #ifdef STATS 5467 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5468 gput); 5469 /* 5470 * XXXLAS: This is a temporary hack, and should be 5471 * chained off VOI_TCP_GPUT when stats(9) grows an 5472 * API to deal with chained VOIs. 5473 */ 5474 if (tp->t_stats_gput_prev > 0) 5475 stats_voi_update_abs_s32(tp->t_stats, 5476 VOI_TCP_GPUT_ND, 5477 ((gput - tp->t_stats_gput_prev) * 100) / 5478 tp->t_stats_gput_prev); 5479 #endif 5480 tp->t_stats_gput_prev = gput; 5481 } 5482 tp->t_flags &= ~TF_GPUTINPROG; 5483 /* 5484 * Now are we app limited now and there is space from where we 5485 * were to where we want to go? 5486 * 5487 * We don't do the other case i.e. non-applimited here since 5488 * the next send will trigger us picking up the missing data. 5489 */ 5490 if (rack->r_ctl.rc_first_appl && 5491 TCPS_HAVEESTABLISHED(tp->t_state) && 5492 rack->r_ctl.rc_app_limited_cnt && 5493 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5494 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5495 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5496 /* 5497 * Yep there is enough outstanding to make a measurement here. 5498 */ 5499 struct rack_sendmap *rsm; 5500 5501 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5502 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5503 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5504 rack->app_limited_needs_set = 0; 5505 tp->gput_seq = th_ack; 5506 if (rack->in_probe_rtt) 5507 rack->measure_saw_probe_rtt = 1; 5508 else if ((rack->measure_saw_probe_rtt) && 5509 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5510 rack->measure_saw_probe_rtt = 0; 5511 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5512 /* There is a full window to gain info from */ 5513 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5514 } else { 5515 /* We can only measure up to the applimited point */ 5516 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5517 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5518 /* 5519 * We don't have enough to make a measurement. 5520 */ 5521 tp->t_flags &= ~TF_GPUTINPROG; 5522 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5523 0, 0, 0, 6, __LINE__, NULL, quality); 5524 return; 5525 } 5526 } 5527 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5528 /* 5529 * We will get no more data into the SB 5530 * this means we need to have the data available 5531 * before we start a measurement. 5532 */ 5533 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5534 /* Nope not enough data. */ 5535 return; 5536 } 5537 } 5538 tp->t_flags |= TF_GPUTINPROG; 5539 /* 5540 * Now we need to find the timestamp of the send at tp->gput_seq 5541 * for the send based measurement. 5542 */ 5543 rack->r_ctl.rc_gp_cumack_ts = 0; 5544 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5545 if (rsm) { 5546 /* Ok send-based limit is set */ 5547 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5548 /* 5549 * Move back to include the earlier part 5550 * so our ack time lines up right (this may 5551 * make an overlapping measurement but thats 5552 * ok). 5553 */ 5554 tp->gput_seq = rsm->r_start; 5555 } 5556 if (rsm->r_flags & RACK_ACKED) { 5557 struct rack_sendmap *nrsm; 5558 5559 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5560 tp->gput_seq = rsm->r_end; 5561 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5562 if (nrsm) 5563 rsm = nrsm; 5564 else { 5565 rack->app_limited_needs_set = 1; 5566 } 5567 } else 5568 rack->app_limited_needs_set = 1; 5569 /* We always go from the first send */ 5570 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5571 } else { 5572 /* 5573 * If we don't find the rsm due to some 5574 * send-limit set the current time, which 5575 * basically disables the send-limit. 5576 */ 5577 struct timeval tv; 5578 5579 microuptime(&tv); 5580 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5581 } 5582 rack_tend_gp_marks(tp, rack); 5583 rack_log_pacing_delay_calc(rack, 5584 tp->gput_seq, 5585 tp->gput_ack, 5586 (uint64_t)rsm, 5587 tp->gput_ts, 5588 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5589 9, 5590 __LINE__, rsm, quality); 5591 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5592 } else { 5593 /* 5594 * To make sure proper timestamp merging occurs, we need to clear 5595 * all GP marks if we don't start a measurement. 5596 */ 5597 rack_clear_gp_marks(tp, rack); 5598 } 5599 } 5600 5601 /* 5602 * CC wrapper hook functions 5603 */ 5604 static void 5605 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5606 uint16_t type, int32_t post_recovery) 5607 { 5608 uint32_t prior_cwnd, acked; 5609 struct tcp_log_buffer *lgb = NULL; 5610 uint8_t labc_to_use, quality; 5611 5612 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5613 tp->t_ccv.nsegs = nsegs; 5614 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5615 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5616 uint32_t max; 5617 5618 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5619 if (tp->t_ccv.bytes_this_ack > max) { 5620 tp->t_ccv.bytes_this_ack = max; 5621 } 5622 } 5623 #ifdef STATS 5624 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5625 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5626 #endif 5627 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5628 /* 5629 * We will ack all the data, time to end any 5630 * lt_bw_up we have running until something 5631 * new is sent. Note we need to use the actual 5632 * ack_rcv_time which with pacing may be different. 5633 */ 5634 uint64_t tmark; 5635 5636 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5637 rack->r_ctl.lt_seq = tp->snd_max; 5638 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 5639 if (tmark >= rack->r_ctl.lt_timemark) { 5640 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5641 } 5642 rack->r_ctl.lt_timemark = tmark; 5643 rack->lt_bw_up = 0; 5644 } 5645 quality = RACK_QUALITY_NONE; 5646 if ((tp->t_flags & TF_GPUTINPROG) && 5647 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5648 /* Measure the Goodput */ 5649 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5650 } 5651 /* Which way our we limited, if not cwnd limited no advance in CA */ 5652 if (tp->snd_cwnd <= tp->snd_wnd) 5653 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5654 else 5655 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5656 if (tp->snd_cwnd > tp->snd_ssthresh) { 5657 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5658 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5659 /* For the setting of a window past use the actual scwnd we are using */ 5660 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5661 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5662 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5663 } 5664 } else { 5665 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5666 tp->t_bytes_acked = 0; 5667 } 5668 prior_cwnd = tp->snd_cwnd; 5669 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5670 (rack_client_low_buf && rack->client_bufferlvl && 5671 (rack->client_bufferlvl < rack_client_low_buf))) 5672 labc_to_use = rack->rc_labc; 5673 else 5674 labc_to_use = rack_max_abc_post_recovery; 5675 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5676 union tcp_log_stackspecific log; 5677 struct timeval tv; 5678 5679 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5680 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5681 log.u_bbr.flex1 = th_ack; 5682 log.u_bbr.flex2 = tp->t_ccv.flags; 5683 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5684 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5685 log.u_bbr.flex5 = labc_to_use; 5686 log.u_bbr.flex6 = prior_cwnd; 5687 log.u_bbr.flex7 = V_tcp_do_newsack; 5688 log.u_bbr.flex8 = 1; 5689 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5690 0, &log, false, NULL, __func__, __LINE__,&tv); 5691 } 5692 if (CC_ALGO(tp)->ack_received != NULL) { 5693 /* XXXLAS: Find a way to live without this */ 5694 tp->t_ccv.curack = th_ack; 5695 tp->t_ccv.labc = labc_to_use; 5696 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5697 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5698 } 5699 if (lgb) { 5700 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5701 } 5702 if (rack->r_must_retran) { 5703 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5704 /* 5705 * We now are beyond the rxt point so lets disable 5706 * the flag. 5707 */ 5708 rack->r_ctl.rc_out_at_rto = 0; 5709 rack->r_must_retran = 0; 5710 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5711 /* 5712 * Only decrement the rc_out_at_rto if the cwnd advances 5713 * at least a whole segment. Otherwise next time the peer 5714 * acks, we won't be able to send this generaly happens 5715 * when we are in Congestion Avoidance. 5716 */ 5717 if (acked <= rack->r_ctl.rc_out_at_rto){ 5718 rack->r_ctl.rc_out_at_rto -= acked; 5719 } else { 5720 rack->r_ctl.rc_out_at_rto = 0; 5721 } 5722 } 5723 } 5724 #ifdef STATS 5725 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5726 #endif 5727 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5728 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5729 } 5730 if ((rack->rc_initial_ss_comp == 0) && 5731 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5732 /* 5733 * The cwnd has grown beyond ssthresh we have 5734 * entered ca and completed our first Slowstart. 5735 */ 5736 rack->rc_initial_ss_comp = 1; 5737 } 5738 } 5739 5740 static void 5741 tcp_rack_partialack(struct tcpcb *tp) 5742 { 5743 struct tcp_rack *rack; 5744 5745 rack = (struct tcp_rack *)tp->t_fb_ptr; 5746 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5747 /* 5748 * If we are doing PRR and have enough 5749 * room to send <or> we are pacing and prr 5750 * is disabled we will want to see if we 5751 * can send data (by setting r_wanted_output to 5752 * true). 5753 */ 5754 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5755 rack->rack_no_prr) 5756 rack->r_wanted_output = 1; 5757 } 5758 5759 static inline uint64_t 5760 rack_get_rxt_per(uint64_t snds, uint64_t rxts) 5761 { 5762 uint64_t rxt_per; 5763 5764 if (snds > 0) { 5765 rxt_per = rxts * 1000; 5766 rxt_per /= snds; 5767 } else { 5768 /* This is an unlikely path */ 5769 if (rxts) { 5770 /* Its the max it was all re-transmits */ 5771 rxt_per = 0xffffffffffffffff; 5772 } else { 5773 rxt_per = 0; 5774 } 5775 } 5776 return (rxt_per); 5777 } 5778 5779 static void 5780 policer_detection_log(struct tcp_rack *rack, uint32_t flex1, uint32_t flex2, uint32_t flex3, uint32_t flex4, uint8_t flex8) 5781 { 5782 if (tcp_bblogging_on(rack->rc_tp)) { 5783 union tcp_log_stackspecific log; 5784 struct timeval tv; 5785 5786 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5787 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5788 log.u_bbr.flex1 = flex1; 5789 log.u_bbr.flex2 = flex2; 5790 log.u_bbr.flex3 = flex3; 5791 log.u_bbr.flex4 = flex4; 5792 log.u_bbr.flex5 = rack->r_ctl.current_policer_bucket; 5793 log.u_bbr.flex6 = rack->r_ctl.policer_bucket_size; 5794 log.u_bbr.flex7 = 0; 5795 log.u_bbr.flex8 = flex8; 5796 log.u_bbr.bw_inuse = rack->r_ctl.policer_bw; 5797 log.u_bbr.applimited = rack->r_ctl.current_round; 5798 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 5799 log.u_bbr.delivered = (uint32_t)rack->r_ctl.bytes_acked_in_recovery; 5800 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 5801 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 5802 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 5803 log.u_bbr.bbr_state = rack->rc_policer_detected; 5804 log.u_bbr.bbr_substate = 0; 5805 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5806 log.u_bbr.use_lt_bw = rack->policer_detect_on; 5807 log.u_bbr.lt_epoch = 0; 5808 log.u_bbr.pkts_out = 0; 5809 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 5810 0, &log, false, NULL, NULL, 0, &tv); 5811 } 5812 5813 } 5814 5815 static void 5816 policer_detection(struct tcpcb *tp, struct tcp_rack *rack, int post_recovery) 5817 { 5818 /* 5819 * Rack excess rxt accounting is turned on. If we 5820 * are above a threshold of rxt's in at least N 5821 * rounds, then back off the cwnd and ssthresh 5822 * to fit into the long-term b/w. 5823 */ 5824 5825 uint32_t pkts, mid, med, alt_med, avg, segsiz, tot_retran_pkt_count = 0; 5826 uint32_t cnt_of_mape_rxt = 0; 5827 uint64_t snds, rxts, rxt_per, tim, del, del_bw; 5828 int i; 5829 struct timeval tv; 5830 5831 5832 /* 5833 * First is there enough packets delivered during recovery to make 5834 * a determiniation of b/w? 5835 */ 5836 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5837 if ((rack->rc_policer_detected == 0) && 5838 (rack->r_ctl.policer_del_mss > 0) && 5839 ((uint32_t)rack->r_ctl.policer_del_mss > ((rack->r_ctl.bytes_acked_in_recovery + segsiz - 1)/segsiz))) { 5840 /* 5841 * Not enough data sent in recovery for initial detection. Once 5842 * we have deteced a policer we allow less than the threshold (polcer_del_mss) 5843 * amount of data in a recovery to let us fall through and double check 5844 * our policer settings and possibly expand or collapse the bucket size and 5845 * the polcier b/w. 5846 * 5847 * Once you are declared to be policed. this block of code cannot be 5848 * reached, instead blocks further down will re-check the policer detection 5849 * triggers and possibly reset the measurements if somehow we have let the 5850 * policer bucket size grow too large. 5851 */ 5852 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5853 policer_detection_log(rack, rack->r_ctl.policer_del_mss, 5854 ((rack->r_ctl.bytes_acked_in_recovery + segsiz - 1)/segsiz), 5855 rack->r_ctl.bytes_acked_in_recovery, segsiz, 18); 5856 } 5857 return; 5858 } 5859 tcp_get_usecs(&tv); 5860 tim = tcp_tv_to_lusectick(&tv) - rack->r_ctl.time_entered_recovery; 5861 del = rack->r_ctl.bytes_acked_in_recovery; 5862 if (tim > 0) 5863 del_bw = (del * (uint64_t)1000000) / tim; 5864 else 5865 del_bw = 0; 5866 /* B/W compensation? */ 5867 5868 if (rack->r_ctl.pol_bw_comp && ((rack->r_ctl.policer_bw > 0) || 5869 (del_bw > 0))) { 5870 /* 5871 * Sanity check now that the data is in. How long does it 5872 * take for us to pace out two of our policer_max_seg's? 5873 * 5874 * If it is longer than the RTT then we are set 5875 * too slow, maybe because of not enough data 5876 * sent during recovery. 5877 */ 5878 uint64_t lentime, res, srtt, max_delbw, alt_bw; 5879 5880 srtt = (uint64_t)rack_grab_rtt(tp, rack); 5881 if ((tp->t_srtt > 0) && (srtt > tp->t_srtt)) 5882 srtt = tp->t_srtt; 5883 lentime = rack->r_ctl.policer_max_seg * (uint64_t)HPTS_USEC_IN_SEC * 2; 5884 if (del_bw > rack->r_ctl.policer_bw) { 5885 max_delbw = del_bw; 5886 } else { 5887 max_delbw = rack->r_ctl.policer_bw; 5888 } 5889 res = lentime / max_delbw; 5890 if ((srtt > 0) && (res > srtt)) { 5891 /* 5892 * At this rate we can not get two policer_maxsegs 5893 * out before the ack arrives back. 5894 * 5895 * Lets at least get it raised up so that 5896 * we can be a bit faster than that if possible. 5897 */ 5898 lentime = (rack->r_ctl.policer_max_seg * 2); 5899 tim = srtt; 5900 alt_bw = (lentime * (uint64_t)HPTS_USEC_IN_SEC) / tim; 5901 if (alt_bw > max_delbw) { 5902 uint64_t cap_alt_bw; 5903 5904 cap_alt_bw = (max_delbw + (max_delbw * rack->r_ctl.pol_bw_comp)); 5905 if ((rack_pol_min_bw > 0) && (cap_alt_bw < rack_pol_min_bw)) { 5906 /* We place a min on the cap which defaults to 1Mbps */ 5907 cap_alt_bw = rack_pol_min_bw; 5908 } 5909 if (alt_bw <= cap_alt_bw) { 5910 /* It should be */ 5911 del_bw = alt_bw; 5912 policer_detection_log(rack, 5913 (uint32_t)tim, 5914 rack->r_ctl.policer_max_seg, 5915 0, 5916 0, 5917 16); 5918 } else { 5919 /* 5920 * This is an odd case where likely the RTT is very very 5921 * low. And yet it is still being policed. We don't want 5922 * to get more than (rack_policing_do_bw_comp+1) x del-rate 5923 * where del-rate is what we got in recovery for either the 5924 * first Policer Detection(PD) or this PD we are on now. 5925 */ 5926 del_bw = cap_alt_bw; 5927 policer_detection_log(rack, 5928 (uint32_t)tim, 5929 rack->r_ctl.policer_max_seg, 5930 (uint32_t)max_delbw, 5931 (rack->r_ctl.pol_bw_comp + 1), 5932 16); 5933 } 5934 } 5935 } 5936 } 5937 snds = tp->t_sndbytes - rack->r_ctl.last_policer_sndbytes; 5938 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_policer_snd_rxt_bytes; 5939 rxt_per = rack_get_rxt_per(snds, rxts); 5940 /* Figure up the average and median */ 5941 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5942 if (rack->r_ctl.rc_cnt_of_retran[i] > 0) { 5943 tot_retran_pkt_count += (i + 1) * rack->r_ctl.rc_cnt_of_retran[i]; 5944 cnt_of_mape_rxt += rack->r_ctl.rc_cnt_of_retran[i]; 5945 } 5946 } 5947 if (cnt_of_mape_rxt) 5948 avg = (tot_retran_pkt_count * 10)/cnt_of_mape_rxt; 5949 else 5950 avg = 0; 5951 alt_med = med = 0; 5952 mid = tot_retran_pkt_count/2; 5953 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5954 pkts = (i + 1) * rack->r_ctl.rc_cnt_of_retran[i]; 5955 if (mid > pkts) { 5956 mid -= pkts; 5957 continue; 5958 } 5959 med = (i + 1); 5960 break; 5961 } 5962 mid = cnt_of_mape_rxt / 2; 5963 for(i = 0; i < RETRAN_CNT_SIZE; i++) { 5964 if (mid > rack->r_ctl.rc_cnt_of_retran[i]) { 5965 mid -= rack->r_ctl.rc_cnt_of_retran[i]; 5966 continue; 5967 } 5968 alt_med = (i + 1); 5969 break; 5970 } 5971 if (rack->r_ctl.policer_alt_median) { 5972 /* Swap the medians */ 5973 uint32_t swap; 5974 5975 swap = med; 5976 med = alt_med; 5977 alt_med = swap; 5978 } 5979 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5980 union tcp_log_stackspecific log; 5981 struct timeval tv; 5982 5983 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5984 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5985 log.u_bbr.flex1 = avg; 5986 log.u_bbr.flex2 = med; 5987 log.u_bbr.flex3 = (uint32_t)rxt_per; 5988 log.u_bbr.flex4 = rack->r_ctl.policer_avg_threshold; 5989 log.u_bbr.flex5 = rack->r_ctl.policer_med_threshold; 5990 log.u_bbr.flex6 = rack->r_ctl.policer_rxt_threshold; 5991 log.u_bbr.flex7 = rack->r_ctl.policer_alt_median; 5992 log.u_bbr.flex8 = 1; 5993 log.u_bbr.delivered = rack->r_ctl.policer_bucket_size; 5994 log.u_bbr.applimited = rack->r_ctl.current_round; 5995 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 5996 log.u_bbr.bw_inuse = del_bw; 5997 log.u_bbr.cur_del_rate = rxts; 5998 log.u_bbr.delRate = snds; 5999 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 6000 log.u_bbr.bbr_state = rack->rc_policer_detected; 6001 log.u_bbr.bbr_substate = 0; 6002 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 6003 log.u_bbr.use_lt_bw = rack->policer_detect_on; 6004 log.u_bbr.lt_epoch = (uint32_t)tim; 6005 log.u_bbr.pkts_out = rack->r_ctl.bytes_acked_in_recovery; 6006 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 6007 0, &log, false, NULL, NULL, 0, &tv); 6008 } 6009 if (med == RETRAN_CNT_SIZE) { 6010 /* 6011 * If the median is the maximum, then what we 6012 * likely have here is a network breakage. Either that 6013 * or we are so unlucky that all of our traffic is being 6014 * dropped and having to be retransmitted the maximum times 6015 * and this just is not how a policer works. 6016 * 6017 * If it is truely a policer eventually we will come 6018 * through and it won't be the maximum. 6019 */ 6020 return; 6021 } 6022 /* Has enough rounds progressed for us to re-measure? */ 6023 if ((rxt_per >= (uint64_t)rack->r_ctl.policer_rxt_threshold) && 6024 (avg >= rack->r_ctl.policer_avg_threshold) && 6025 (med >= rack->r_ctl.policer_med_threshold)) { 6026 /* 6027 * We hit all thresholds that indicate we are 6028 * being policed. Now we may be doing this from a rack timeout 6029 * which then means the rest of recovery will hopefully go 6030 * smoother as we pace. At the end of recovery we will 6031 * fall back in here and reset the values using the 6032 * results of the entire recovery episode (we could also 6033 * hit this as we exit recovery as well which means only 6034 * one time in here). 6035 * 6036 * This is done explicitly that if we hit the thresholds 6037 * again in a second recovery we overwrite the values. We do 6038 * that because over time, as we pace the policer_bucket_size may 6039 * continue to grow. This then provides more and more times when 6040 * we are not pacing to the policer rate. This lets us compensate 6041 * for when we hit a false positive and those flows continue to 6042 * increase. However if its a real policer we will then get over its 6043 * limit, over time, again and thus end up back here hitting the 6044 * thresholds again. 6045 * 6046 * The alternative to this is to instead whenever we pace due to 6047 * policing in rack_policed_sending we could add the amount len paced to the 6048 * idle_snd_una value (which decreases the amount in last_amount_before_rec 6049 * since that is always [th_ack - idle_snd_una]). This would then prevent 6050 * the polcier_bucket_size from growing in additional recovery episodes 6051 * Which would then mean false postives would be pretty much stuck 6052 * after things got back to normal (assuming that what caused the 6053 * false positive was a small network outage). 6054 * 6055 */ 6056 tcp_trace_point(rack->rc_tp, TCP_TP_POLICER_DET); 6057 if (rack->rc_policer_detected == 0) { 6058 /* 6059 * Increment the stat that tells us we identified 6060 * a policer only once. Note that if we ever allow 6061 * the flag to be cleared (reverted) then we need 6062 * to adjust this to not do multi-counting. 6063 */ 6064 counter_u64_add(tcp_policer_detected, 1); 6065 } 6066 rack->r_ctl.last_policer_sndbytes = tp->t_sndbytes; 6067 rack->r_ctl.last_policer_snd_rxt_bytes = tp->t_snd_rxt_bytes; 6068 rack->r_ctl.policer_bw = del_bw; 6069 rack->r_ctl.policer_max_seg = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, 6070 rack->r_ctl.policer_bw, 6071 min(ctf_fixed_maxseg(rack->rc_tp), 6072 rack->r_ctl.rc_pace_min_segs), 6073 0, NULL, 6074 NULL, rack->r_ctl.pace_len_divisor); 6075 /* Now what about the policer bucket size */ 6076 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6077 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.policer_max_seg) { 6078 /* We must be able to send our max-seg or else chaos ensues */ 6079 rack->r_ctl.policer_bucket_size = rack->r_ctl.policer_max_seg * 2; 6080 } 6081 if (rack->rc_policer_detected == 0) 6082 rack->r_ctl.current_policer_bucket = 0; 6083 if (tcp_bblogging_on(rack->rc_tp)) { 6084 union tcp_log_stackspecific log; 6085 struct timeval tv; 6086 6087 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6088 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6089 log.u_bbr.flex1 = avg; 6090 log.u_bbr.flex2 = med; 6091 log.u_bbr.flex3 = rxt_per; 6092 log.u_bbr.flex4 = rack->r_ctl.policer_avg_threshold; 6093 log.u_bbr.flex5 = rack->r_ctl.policer_med_threshold; 6094 log.u_bbr.flex6 = rack->r_ctl.policer_rxt_threshold; 6095 log.u_bbr.flex7 = rack->r_ctl.policer_alt_median; 6096 log.u_bbr.flex8 = 2; 6097 log.u_bbr.applimited = rack->r_ctl.current_round; 6098 log.u_bbr.bw_inuse = del_bw; 6099 log.u_bbr.delivered = rack->r_ctl.policer_bucket_size; 6100 log.u_bbr.cur_del_rate = rxts; 6101 log.u_bbr.delRate = snds; 6102 log.u_bbr.rttProp = rack->r_ctl.gp_bw; 6103 log.u_bbr.bbr_state = rack->rc_policer_detected; 6104 log.u_bbr.bbr_substate = 0; 6105 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 6106 log.u_bbr.use_lt_bw = rack->policer_detect_on; 6107 log.u_bbr.epoch = rack->r_ctl.policer_max_seg; 6108 log.u_bbr.lt_epoch = (uint32_t)tim; 6109 log.u_bbr.pkts_out = rack->r_ctl.bytes_acked_in_recovery; 6110 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 6111 0, &log, false, NULL, NULL, 0, &tv); 6112 /* 6113 * Put out an added log, 19, for the sole purpose 6114 * of getting the txt/rxt so that we can benchmark 6115 * in read-bbrlog the ongoing rxt rate after our 6116 * policer invocation in the HYSTART announcments. 6117 */ 6118 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6119 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 6120 log.u_bbr.flex1 = alt_med; 6121 log.u_bbr.flex8 = 19; 6122 log.u_bbr.cur_del_rate = tp->t_sndbytes; 6123 log.u_bbr.delRate = tp->t_snd_rxt_bytes; 6124 tcp_log_event(tp, NULL, NULL, NULL, TCP_POLICER_DET, 0, 6125 0, &log, false, NULL, NULL, 0, &tv); 6126 } 6127 /* Turn off any fast output, thats ended */ 6128 rack->r_fast_output = 0; 6129 /* Mark the time for credits */ 6130 rack->r_ctl.last_sendtime = tcp_get_u64_usecs(NULL); 6131 if (rack->r_rr_config < 2) { 6132 /* 6133 * We need to be stricter on the RR config so 6134 * the pacing has priority. 6135 */ 6136 rack->r_rr_config = 2; 6137 } 6138 policer_detection_log(rack, 6139 rack->r_ctl.idle_snd_una, 6140 rack->r_ctl.ack_for_idle, 6141 0, 6142 (uint32_t)tim, 6143 14); 6144 rack->rc_policer_detected = 1; 6145 } else if ((rack->rc_policer_detected == 1) && 6146 (post_recovery == 1)) { 6147 /* 6148 * If we are exiting recovery and have already detected 6149 * we need to possibly update the values. 6150 * 6151 * First: Update the idle -> recovery sent value. 6152 */ 6153 uint32_t srtt; 6154 6155 if (rack->r_ctl.last_amount_before_rec > rack->r_ctl.policer_bucket_size) { 6156 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6157 } 6158 srtt = (uint64_t)rack_grab_rtt(tp, rack); 6159 if ((tp->t_srtt > 0) && (srtt > tp->t_srtt)) 6160 srtt = tp->t_srtt; 6161 if ((srtt != 0) && 6162 (tim < (uint64_t)srtt)) { 6163 /* 6164 * Not long enough. 6165 */ 6166 if (rack_verbose_logging) 6167 policer_detection_log(rack, 6168 (uint32_t)tim, 6169 0, 6170 0, 6171 0, 6172 15); 6173 return; 6174 } 6175 /* 6176 * Finally update the b/w if its grown. 6177 */ 6178 if (del_bw > rack->r_ctl.policer_bw) { 6179 rack->r_ctl.policer_bw = del_bw; 6180 rack->r_ctl.policer_max_seg = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, 6181 rack->r_ctl.policer_bw, 6182 min(ctf_fixed_maxseg(rack->rc_tp), 6183 rack->r_ctl.rc_pace_min_segs), 6184 0, NULL, 6185 NULL, rack->r_ctl.pace_len_divisor); 6186 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.policer_max_seg) { 6187 /* We must be able to send our max-seg or else chaos ensues */ 6188 rack->r_ctl.policer_bucket_size = rack->r_ctl.policer_max_seg * 2; 6189 } 6190 } 6191 policer_detection_log(rack, 6192 rack->r_ctl.idle_snd_una, 6193 rack->r_ctl.ack_for_idle, 6194 0, 6195 (uint32_t)tim, 6196 3); 6197 } 6198 } 6199 6200 static void 6201 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 6202 { 6203 /* now check with the policer if on */ 6204 if (rack->policer_detect_on == 1) { 6205 policer_detection(tp, rack, 1); 6206 } 6207 /* 6208 * Now exit recovery, note we must do the idle set after the policer_detection 6209 * to get the amount acked prior to recovery correct. 6210 */ 6211 rack->r_ctl.idle_snd_una = tp->snd_una; 6212 EXIT_RECOVERY(tp->t_flags); 6213 } 6214 6215 static void 6216 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 6217 { 6218 struct tcp_rack *rack; 6219 uint32_t orig_cwnd; 6220 6221 orig_cwnd = tp->snd_cwnd; 6222 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6223 rack = (struct tcp_rack *)tp->t_fb_ptr; 6224 /* only alert CC if we alerted when we entered */ 6225 if (CC_ALGO(tp)->post_recovery != NULL) { 6226 tp->t_ccv.curack = th_ack; 6227 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 6228 if (tp->snd_cwnd < tp->snd_ssthresh) { 6229 /* 6230 * Rack has burst control and pacing 6231 * so lets not set this any lower than 6232 * snd_ssthresh per RFC-6582 (option 2). 6233 */ 6234 tp->snd_cwnd = tp->snd_ssthresh; 6235 } 6236 } 6237 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6238 union tcp_log_stackspecific log; 6239 struct timeval tv; 6240 6241 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6242 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6243 log.u_bbr.flex1 = th_ack; 6244 log.u_bbr.flex2 = tp->t_ccv.flags; 6245 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 6246 log.u_bbr.flex4 = tp->t_ccv.nsegs; 6247 log.u_bbr.flex5 = V_tcp_abc_l_var; 6248 log.u_bbr.flex6 = orig_cwnd; 6249 log.u_bbr.flex7 = V_tcp_do_newsack; 6250 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 6251 log.u_bbr.flex8 = 2; 6252 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 6253 0, &log, false, NULL, __func__, __LINE__, &tv); 6254 } 6255 if ((rack->rack_no_prr == 0) && 6256 (rack->no_prr_addback == 0) && 6257 (rack->r_ctl.rc_prr_sndcnt > 0)) { 6258 /* 6259 * Suck the next prr cnt back into cwnd, but 6260 * only do that if we are not application limited. 6261 */ 6262 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 6263 /* 6264 * We are allowed to add back to the cwnd the amount we did 6265 * not get out if: 6266 * a) no_prr_addback is off. 6267 * b) we are not app limited 6268 * c) we are doing prr 6269 * <and> 6270 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 6271 */ 6272 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 6273 rack->r_ctl.rc_prr_sndcnt); 6274 } 6275 rack->r_ctl.rc_prr_sndcnt = 0; 6276 rack_log_to_prr(rack, 1, 0, __LINE__); 6277 } 6278 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 6279 tp->snd_recover = tp->snd_una; 6280 if (rack->r_ctl.dsack_persist) { 6281 rack->r_ctl.dsack_persist--; 6282 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6283 rack->r_ctl.num_dsack = 0; 6284 } 6285 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6286 } 6287 if (rack->rto_from_rec == 1) { 6288 rack->rto_from_rec = 0; 6289 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 6290 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 6291 } 6292 rack_exit_recovery(tp, rack, 1); 6293 } 6294 6295 static void 6296 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 6297 { 6298 struct tcp_rack *rack; 6299 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 6300 6301 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6302 #ifdef STATS 6303 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 6304 #endif 6305 if (IN_RECOVERY(tp->t_flags) == 0) { 6306 in_rec_at_entry = 0; 6307 ssthresh_enter = tp->snd_ssthresh; 6308 cwnd_enter = tp->snd_cwnd; 6309 } else 6310 in_rec_at_entry = 1; 6311 rack = (struct tcp_rack *)tp->t_fb_ptr; 6312 switch (type) { 6313 case CC_NDUPACK: 6314 tp->t_flags &= ~TF_WASFRECOVERY; 6315 tp->t_flags &= ~TF_WASCRECOVERY; 6316 if (!IN_FASTRECOVERY(tp->t_flags)) { 6317 struct rack_sendmap *rsm; 6318 struct timeval tv; 6319 uint32_t segsiz; 6320 6321 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 6322 if (rack->rc_initial_ss_comp == 0) { 6323 /* Yep it is the end of the initial slowstart */ 6324 rack->rc_initial_ss_comp = 1; 6325 } 6326 microuptime(&tv); 6327 rack->r_ctl.time_entered_recovery = tcp_tv_to_lusectick(&tv); 6328 if (SEQ_GEQ(ack, tp->snd_una)) { 6329 /* 6330 * The ack is above snd_una. Lets see 6331 * if we can establish a postive distance from 6332 * our idle mark. 6333 */ 6334 rack->r_ctl.ack_for_idle = ack; 6335 if (SEQ_GT(ack, rack->r_ctl.idle_snd_una)) { 6336 rack->r_ctl.last_amount_before_rec = ack - rack->r_ctl.idle_snd_una; 6337 } else { 6338 /* No data thru yet */ 6339 rack->r_ctl.last_amount_before_rec = 0; 6340 } 6341 } else if (SEQ_GT(tp->snd_una, rack->r_ctl.idle_snd_una)) { 6342 /* 6343 * The ack is out of order and behind the snd_una. It may 6344 * have contained SACK information which we processed else 6345 * we would have rejected it. 6346 */ 6347 rack->r_ctl.ack_for_idle = tp->snd_una; 6348 rack->r_ctl.last_amount_before_rec = tp->snd_una - rack->r_ctl.idle_snd_una; 6349 } else { 6350 rack->r_ctl.ack_for_idle = ack; 6351 rack->r_ctl.last_amount_before_rec = 0; 6352 } 6353 if (rack->rc_policer_detected) { 6354 /* 6355 * If we are being policed and we have a loss, it 6356 * means our bucket is now empty. This can happen 6357 * where some other flow on the same host sends 6358 * that this connection is not aware of. 6359 */ 6360 rack->r_ctl.current_policer_bucket = 0; 6361 if (rack_verbose_logging) 6362 policer_detection_log(rack, rack->r_ctl.last_amount_before_rec, 0, 0, 0, 4); 6363 if (rack->r_ctl.last_amount_before_rec > rack->r_ctl.policer_bucket_size) { 6364 rack->r_ctl.policer_bucket_size = rack->r_ctl.last_amount_before_rec; 6365 } 6366 } 6367 memset(rack->r_ctl.rc_cnt_of_retran, 0, sizeof(rack->r_ctl.rc_cnt_of_retran)); 6368 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6369 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6370 /* 6371 * Go through the outstanding and re-peg 6372 * any that should have been left in the 6373 * retransmit list (on a double recovery). 6374 */ 6375 if (rsm->r_act_rxt_cnt > 0) { 6376 rack_peg_rxt(rack, rsm, segsiz); 6377 } 6378 } 6379 rack->r_ctl.bytes_acked_in_recovery = 0; 6380 rack->r_ctl.rc_prr_delivered = 0; 6381 rack->r_ctl.rc_prr_out = 0; 6382 rack->r_fast_output = 0; 6383 if (rack->rack_no_prr == 0) { 6384 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 6385 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 6386 } 6387 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 6388 tp->snd_recover = tp->snd_max; 6389 if (tp->t_flags2 & TF2_ECN_PERMIT) 6390 tp->t_flags2 |= TF2_ECN_SND_CWR; 6391 } 6392 break; 6393 case CC_ECN: 6394 if (!IN_CONGRECOVERY(tp->t_flags) || 6395 /* 6396 * Allow ECN reaction on ACK to CWR, if 6397 * that data segment was also CE marked. 6398 */ 6399 SEQ_GEQ(ack, tp->snd_recover)) { 6400 EXIT_CONGRECOVERY(tp->t_flags); 6401 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 6402 rack->r_fast_output = 0; 6403 tp->snd_recover = tp->snd_max + 1; 6404 if (tp->t_flags2 & TF2_ECN_PERMIT) 6405 tp->t_flags2 |= TF2_ECN_SND_CWR; 6406 } 6407 break; 6408 case CC_RTO: 6409 tp->t_dupacks = 0; 6410 tp->t_bytes_acked = 0; 6411 rack->r_fast_output = 0; 6412 if (IN_RECOVERY(tp->t_flags)) 6413 rack_exit_recovery(tp, rack, 2); 6414 rack->r_ctl.bytes_acked_in_recovery = 0; 6415 rack->r_ctl.time_entered_recovery = 0; 6416 orig_cwnd = tp->snd_cwnd; 6417 rack_log_to_prr(rack, 16, orig_cwnd, line); 6418 if (CC_ALGO(tp)->cong_signal == NULL) { 6419 /* TSNH */ 6420 tp->snd_ssthresh = max(2, 6421 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 6422 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 6423 tp->snd_cwnd = ctf_fixed_maxseg(tp); 6424 } 6425 if (tp->t_flags2 & TF2_ECN_PERMIT) 6426 tp->t_flags2 |= TF2_ECN_SND_CWR; 6427 break; 6428 case CC_RTO_ERR: 6429 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 6430 /* RTO was unnecessary, so reset everything. */ 6431 tp->snd_cwnd = tp->snd_cwnd_prev; 6432 tp->snd_ssthresh = tp->snd_ssthresh_prev; 6433 tp->snd_recover = tp->snd_recover_prev; 6434 if (tp->t_flags & TF_WASFRECOVERY) { 6435 ENTER_FASTRECOVERY(tp->t_flags); 6436 tp->t_flags &= ~TF_WASFRECOVERY; 6437 } 6438 if (tp->t_flags & TF_WASCRECOVERY) { 6439 ENTER_CONGRECOVERY(tp->t_flags); 6440 tp->t_flags &= ~TF_WASCRECOVERY; 6441 } 6442 tp->snd_nxt = tp->snd_max; 6443 tp->t_badrxtwin = 0; 6444 break; 6445 } 6446 if ((CC_ALGO(tp)->cong_signal != NULL) && 6447 (type != CC_RTO)){ 6448 tp->t_ccv.curack = ack; 6449 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 6450 } 6451 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 6452 rack_log_to_prr(rack, 15, cwnd_enter, line); 6453 rack->r_ctl.dsack_byte_cnt = 0; 6454 rack->r_ctl.retran_during_recovery = 0; 6455 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 6456 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 6457 rack->r_ent_rec_ns = 1; 6458 } 6459 } 6460 6461 static inline void 6462 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 6463 { 6464 uint32_t i_cwnd; 6465 6466 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6467 6468 if (CC_ALGO(tp)->after_idle != NULL) 6469 CC_ALGO(tp)->after_idle(&tp->t_ccv); 6470 6471 if (tp->snd_cwnd == 1) 6472 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 6473 else 6474 i_cwnd = rc_init_window(rack); 6475 6476 /* 6477 * Being idle is no different than the initial window. If the cc 6478 * clamps it down below the initial window raise it to the initial 6479 * window. 6480 */ 6481 if (tp->snd_cwnd < i_cwnd) { 6482 tp->snd_cwnd = i_cwnd; 6483 } 6484 } 6485 6486 /* 6487 * Indicate whether this ack should be delayed. We can delay the ack if 6488 * following conditions are met: 6489 * - There is no delayed ack timer in progress. 6490 * - Our last ack wasn't a 0-sized window. We never want to delay 6491 * the ack that opens up a 0-sized window. 6492 * - LRO wasn't used for this segment. We make sure by checking that the 6493 * segment size is not larger than the MSS. 6494 * - Delayed acks are enabled or this is a half-synchronized T/TCP 6495 * connection. 6496 */ 6497 #define DELAY_ACK(tp, tlen) \ 6498 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 6499 ((tp->t_flags & TF_DELACK) == 0) && \ 6500 (tlen <= tp->t_maxseg) && \ 6501 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 6502 6503 static struct rack_sendmap * 6504 rack_find_lowest_rsm(struct tcp_rack *rack) 6505 { 6506 struct rack_sendmap *rsm; 6507 6508 /* 6509 * Walk the time-order transmitted list looking for an rsm that is 6510 * not acked. This will be the one that was sent the longest time 6511 * ago that is still outstanding. 6512 */ 6513 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6514 if (rsm->r_flags & RACK_ACKED) { 6515 continue; 6516 } 6517 goto finish; 6518 } 6519 finish: 6520 return (rsm); 6521 } 6522 6523 static struct rack_sendmap * 6524 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 6525 { 6526 struct rack_sendmap *prsm; 6527 6528 /* 6529 * Walk the sequence order list backward until we hit and arrive at 6530 * the highest seq not acked. In theory when this is called it 6531 * should be the last segment (which it was not). 6532 */ 6533 prsm = rsm; 6534 6535 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 6536 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 6537 continue; 6538 } 6539 return (prsm); 6540 } 6541 return (NULL); 6542 } 6543 6544 static uint32_t 6545 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 6546 { 6547 int32_t lro; 6548 uint32_t thresh; 6549 6550 /* 6551 * lro is the flag we use to determine if we have seen reordering. 6552 * If it gets set we have seen reordering. The reorder logic either 6553 * works in one of two ways: 6554 * 6555 * If reorder-fade is configured, then we track the last time we saw 6556 * re-ordering occur. If we reach the point where enough time as 6557 * passed we no longer consider reordering has occuring. 6558 * 6559 * Or if reorder-face is 0, then once we see reordering we consider 6560 * the connection to alway be subject to reordering and just set lro 6561 * to 1. 6562 * 6563 * In the end if lro is non-zero we add the extra time for 6564 * reordering in. 6565 */ 6566 if (srtt == 0) 6567 srtt = 1; 6568 if (rack->r_ctl.rc_reorder_ts) { 6569 if (rack->r_ctl.rc_reorder_fade) { 6570 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 6571 lro = cts - rack->r_ctl.rc_reorder_ts; 6572 if (lro == 0) { 6573 /* 6574 * No time as passed since the last 6575 * reorder, mark it as reordering. 6576 */ 6577 lro = 1; 6578 } 6579 } else { 6580 /* Negative time? */ 6581 lro = 0; 6582 } 6583 if (lro > rack->r_ctl.rc_reorder_fade) { 6584 /* Turn off reordering seen too */ 6585 rack->r_ctl.rc_reorder_ts = 0; 6586 lro = 0; 6587 } 6588 } else { 6589 /* Reodering does not fade */ 6590 lro = 1; 6591 } 6592 } else { 6593 lro = 0; 6594 } 6595 if (rack->rc_rack_tmr_std_based == 0) { 6596 thresh = srtt + rack->r_ctl.rc_pkt_delay; 6597 } else { 6598 /* Standards based pkt-delay is 1/4 srtt */ 6599 thresh = srtt + (srtt >> 2); 6600 } 6601 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 6602 /* It must be set, if not you get 1/4 rtt */ 6603 if (rack->r_ctl.rc_reorder_shift) 6604 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 6605 else 6606 thresh += (srtt >> 2); 6607 } 6608 if (rack->rc_rack_use_dsack && 6609 lro && 6610 (rack->r_ctl.num_dsack > 0)) { 6611 /* 6612 * We only increase the reordering window if we 6613 * have seen reordering <and> we have a DSACK count. 6614 */ 6615 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 6616 if (log_allowed) 6617 rack_log_dsack_event(rack, 4, line, srtt, thresh); 6618 } 6619 /* SRTT * 2 is the ceiling */ 6620 if (thresh > (srtt * 2)) { 6621 thresh = srtt * 2; 6622 } 6623 /* And we don't want it above the RTO max either */ 6624 if (thresh > rack_rto_max) { 6625 thresh = rack_rto_max; 6626 } 6627 if (log_allowed) 6628 rack_log_dsack_event(rack, 6, line, srtt, thresh); 6629 return (thresh); 6630 } 6631 6632 static uint32_t 6633 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 6634 struct rack_sendmap *rsm, uint32_t srtt) 6635 { 6636 struct rack_sendmap *prsm; 6637 uint32_t thresh, len; 6638 int segsiz; 6639 6640 if (srtt == 0) 6641 srtt = 1; 6642 if (rack->r_ctl.rc_tlp_threshold) 6643 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 6644 else 6645 thresh = (srtt * 2); 6646 6647 /* Get the previous sent packet, if any */ 6648 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6649 len = rsm->r_end - rsm->r_start; 6650 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6651 /* Exactly like the ID */ 6652 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6653 uint32_t alt_thresh; 6654 /* 6655 * Compensate for delayed-ack with the d-ack time. 6656 */ 6657 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6658 if (alt_thresh > thresh) 6659 thresh = alt_thresh; 6660 } 6661 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6662 /* 2.1 behavior */ 6663 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6664 if (prsm && (len <= segsiz)) { 6665 /* 6666 * Two packets outstanding, thresh should be (2*srtt) + 6667 * possible inter-packet delay (if any). 6668 */ 6669 uint32_t inter_gap = 0; 6670 int idx, nidx; 6671 6672 idx = rsm->r_rtr_cnt - 1; 6673 nidx = prsm->r_rtr_cnt - 1; 6674 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6675 /* Yes it was sent later (or at the same time) */ 6676 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6677 } 6678 thresh += inter_gap; 6679 } else if (len <= segsiz) { 6680 /* 6681 * Possibly compensate for delayed-ack. 6682 */ 6683 uint32_t alt_thresh; 6684 6685 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6686 if (alt_thresh > thresh) 6687 thresh = alt_thresh; 6688 } 6689 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6690 /* 2.2 behavior */ 6691 if (len <= segsiz) { 6692 uint32_t alt_thresh; 6693 /* 6694 * Compensate for delayed-ack with the d-ack time. 6695 */ 6696 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6697 if (alt_thresh > thresh) 6698 thresh = alt_thresh; 6699 } 6700 } 6701 /* Not above an RTO */ 6702 if (thresh > tp->t_rxtcur) { 6703 thresh = tp->t_rxtcur; 6704 } 6705 /* Not above a RTO max */ 6706 if (thresh > rack_rto_max) { 6707 thresh = rack_rto_max; 6708 } 6709 /* Apply user supplied min TLP */ 6710 if (thresh < rack_tlp_min) { 6711 thresh = rack_tlp_min; 6712 } 6713 return (thresh); 6714 } 6715 6716 static uint32_t 6717 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6718 { 6719 /* 6720 * We want the rack_rtt which is the 6721 * last rtt we measured. However if that 6722 * does not exist we fallback to the srtt (which 6723 * we probably will never do) and then as a last 6724 * resort we use RACK_INITIAL_RTO if no srtt is 6725 * yet set. 6726 */ 6727 if (rack->rc_rack_rtt) 6728 return (rack->rc_rack_rtt); 6729 else if (tp->t_srtt == 0) 6730 return (RACK_INITIAL_RTO); 6731 return (tp->t_srtt); 6732 } 6733 6734 static struct rack_sendmap * 6735 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6736 { 6737 /* 6738 * Check to see that we don't need to fall into recovery. We will 6739 * need to do so if our oldest transmit is past the time we should 6740 * have had an ack. 6741 */ 6742 struct tcp_rack *rack; 6743 struct rack_sendmap *rsm; 6744 int32_t idx; 6745 uint32_t srtt, thresh; 6746 6747 rack = (struct tcp_rack *)tp->t_fb_ptr; 6748 if (tqhash_empty(rack->r_ctl.tqh)) { 6749 return (NULL); 6750 } 6751 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6752 if (rsm == NULL) 6753 return (NULL); 6754 6755 6756 if (rsm->r_flags & RACK_ACKED) { 6757 rsm = rack_find_lowest_rsm(rack); 6758 if (rsm == NULL) 6759 return (NULL); 6760 } 6761 idx = rsm->r_rtr_cnt - 1; 6762 srtt = rack_grab_rtt(tp, rack); 6763 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6764 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6765 return (NULL); 6766 } 6767 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6768 return (NULL); 6769 } 6770 /* Ok if we reach here we are over-due and this guy can be sent */ 6771 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6772 return (rsm); 6773 } 6774 6775 static uint32_t 6776 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6777 { 6778 int32_t t; 6779 int32_t tt; 6780 uint32_t ret_val; 6781 6782 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6783 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6784 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6785 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6786 ret_val = (uint32_t)tt; 6787 return (ret_val); 6788 } 6789 6790 static uint32_t 6791 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6792 { 6793 /* 6794 * Start the FR timer, we do this based on getting the first one in 6795 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6796 * events we need to stop the running timer (if its running) before 6797 * starting the new one. 6798 */ 6799 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6800 uint32_t srtt_cur; 6801 int32_t idx; 6802 int32_t is_tlp_timer = 0; 6803 struct rack_sendmap *rsm; 6804 6805 if (rack->t_timers_stopped) { 6806 /* All timers have been stopped none are to run */ 6807 return (0); 6808 } 6809 if (rack->rc_in_persist) { 6810 /* We can't start any timer in persists */ 6811 return (rack_get_persists_timer_val(tp, rack)); 6812 } 6813 rack->rc_on_min_to = 0; 6814 if ((tp->t_state < TCPS_ESTABLISHED) || 6815 (rack->sack_attack_disable > 0) || 6816 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6817 goto activate_rxt; 6818 } 6819 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6820 if ((rsm == NULL) || sup_rack) { 6821 /* Nothing on the send map or no rack */ 6822 activate_rxt: 6823 time_since_sent = 0; 6824 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6825 if (rsm) { 6826 /* 6827 * Should we discount the RTX timer any? 6828 * 6829 * We want to discount it the smallest amount. 6830 * If a timer (Rack/TLP or RXT) has gone off more 6831 * recently thats the discount we want to use (now - timer time). 6832 * If the retransmit of the oldest packet was more recent then 6833 * we want to use that (now - oldest-packet-last_transmit_time). 6834 * 6835 */ 6836 idx = rsm->r_rtr_cnt - 1; 6837 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6838 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6839 else 6840 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6841 if (TSTMP_GT(cts, tstmp_touse)) 6842 time_since_sent = cts - tstmp_touse; 6843 } 6844 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6845 sbavail(&tptosocket(tp)->so_snd)) { 6846 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6847 to = tp->t_rxtcur; 6848 if (to > time_since_sent) 6849 to -= time_since_sent; 6850 else 6851 to = rack->r_ctl.rc_min_to; 6852 if (to == 0) 6853 to = 1; 6854 /* Special case for KEEPINIT */ 6855 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6856 (TP_KEEPINIT(tp) != 0) && 6857 rsm) { 6858 /* 6859 * We have to put a ceiling on the rxt timer 6860 * of the keep-init timeout. 6861 */ 6862 uint32_t max_time, red; 6863 6864 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6865 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6866 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6867 if (red < max_time) 6868 max_time -= red; 6869 else 6870 max_time = 1; 6871 } 6872 /* Reduce timeout to the keep value if needed */ 6873 if (max_time < to) 6874 to = max_time; 6875 } 6876 return (to); 6877 } 6878 return (0); 6879 } 6880 if (rsm->r_flags & RACK_ACKED) { 6881 rsm = rack_find_lowest_rsm(rack); 6882 if (rsm == NULL) { 6883 /* No lowest? */ 6884 goto activate_rxt; 6885 } 6886 } 6887 if (rack->sack_attack_disable) { 6888 /* 6889 * We don't want to do 6890 * any TLP's if you are an attacker. 6891 * Though if you are doing what 6892 * is expected you may still have 6893 * SACK-PASSED marks. 6894 */ 6895 goto activate_rxt; 6896 } 6897 /* Convert from ms to usecs */ 6898 if ((rsm->r_flags & RACK_SACK_PASSED) || 6899 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6900 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6901 if ((tp->t_flags & TF_SENTFIN) && 6902 ((tp->snd_max - tp->snd_una) == 1) && 6903 (rsm->r_flags & RACK_HAS_FIN)) { 6904 /* 6905 * We don't start a rack timer if all we have is a 6906 * FIN outstanding. 6907 */ 6908 goto activate_rxt; 6909 } 6910 if ((rack->use_rack_rr == 0) && 6911 (IN_FASTRECOVERY(tp->t_flags)) && 6912 (rack->rack_no_prr == 0) && 6913 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6914 /* 6915 * We are not cheating, in recovery and 6916 * not enough ack's to yet get our next 6917 * retransmission out. 6918 * 6919 * Note that classified attackers do not 6920 * get to use the rack-cheat. 6921 */ 6922 goto activate_tlp; 6923 } 6924 srtt = rack_grab_rtt(tp, rack); 6925 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6926 idx = rsm->r_rtr_cnt - 1; 6927 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6928 if (SEQ_GEQ(exp, cts)) { 6929 to = exp - cts; 6930 if (to < rack->r_ctl.rc_min_to) { 6931 to = rack->r_ctl.rc_min_to; 6932 if (rack->r_rr_config == 3) 6933 rack->rc_on_min_to = 1; 6934 } 6935 } else { 6936 to = rack->r_ctl.rc_min_to; 6937 if (rack->r_rr_config == 3) 6938 rack->rc_on_min_to = 1; 6939 } 6940 } else { 6941 /* Ok we need to do a TLP not RACK */ 6942 activate_tlp: 6943 if ((rack->rc_tlp_in_progress != 0) && 6944 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6945 /* 6946 * The previous send was a TLP and we have sent 6947 * N TLP's without sending new data. 6948 */ 6949 goto activate_rxt; 6950 } 6951 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6952 if (rsm == NULL) { 6953 /* We found no rsm to TLP with. */ 6954 goto activate_rxt; 6955 } 6956 if (rsm->r_flags & RACK_HAS_FIN) { 6957 /* If its a FIN we dont do TLP */ 6958 rsm = NULL; 6959 goto activate_rxt; 6960 } 6961 idx = rsm->r_rtr_cnt - 1; 6962 time_since_sent = 0; 6963 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6964 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6965 else 6966 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6967 if (TSTMP_GT(cts, tstmp_touse)) 6968 time_since_sent = cts - tstmp_touse; 6969 is_tlp_timer = 1; 6970 if (tp->t_srtt) { 6971 if ((rack->rc_srtt_measure_made == 0) && 6972 (tp->t_srtt == 1)) { 6973 /* 6974 * If another stack as run and set srtt to 1, 6975 * then the srtt was 0, so lets use the initial. 6976 */ 6977 srtt = RACK_INITIAL_RTO; 6978 } else { 6979 srtt_cur = tp->t_srtt; 6980 srtt = srtt_cur; 6981 } 6982 } else 6983 srtt = RACK_INITIAL_RTO; 6984 /* 6985 * If the SRTT is not keeping up and the 6986 * rack RTT has spiked we want to use 6987 * the last RTT not the smoothed one. 6988 */ 6989 if (rack_tlp_use_greater && 6990 tp->t_srtt && 6991 (srtt < rack_grab_rtt(tp, rack))) { 6992 srtt = rack_grab_rtt(tp, rack); 6993 } 6994 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6995 if (thresh > time_since_sent) { 6996 to = thresh - time_since_sent; 6997 } else { 6998 to = rack->r_ctl.rc_min_to; 6999 rack_log_alt_to_to_cancel(rack, 7000 thresh, /* flex1 */ 7001 time_since_sent, /* flex2 */ 7002 tstmp_touse, /* flex3 */ 7003 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 7004 (uint32_t)rsm->r_tim_lastsent[idx], 7005 srtt, 7006 idx, 99); 7007 } 7008 if (to < rack_tlp_min) { 7009 to = rack_tlp_min; 7010 } 7011 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 7012 /* 7013 * If the TLP time works out to larger than the max 7014 * RTO lets not do TLP.. just RTO. 7015 */ 7016 goto activate_rxt; 7017 } 7018 } 7019 if (is_tlp_timer == 0) { 7020 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 7021 } else { 7022 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 7023 } 7024 if (to == 0) 7025 to = 1; 7026 return (to); 7027 } 7028 7029 static void 7030 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 7031 { 7032 if (rack->rc_in_persist == 0) { 7033 if (tp->t_flags & TF_GPUTINPROG) { 7034 /* 7035 * Stop the goodput now, the calling of the 7036 * measurement function clears the flag. 7037 */ 7038 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 7039 RACK_QUALITY_PERSIST); 7040 } 7041 #ifdef NETFLIX_SHARED_CWND 7042 if (rack->r_ctl.rc_scw) { 7043 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 7044 rack->rack_scwnd_is_idle = 1; 7045 } 7046 #endif 7047 rack->r_ctl.rc_went_idle_time = cts; 7048 if (rack->r_ctl.rc_went_idle_time == 0) 7049 rack->r_ctl.rc_went_idle_time = 1; 7050 if (rack->lt_bw_up) { 7051 /* Suspend our LT BW measurement */ 7052 uint64_t tmark; 7053 7054 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 7055 rack->r_ctl.lt_seq = snd_una; 7056 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 7057 if (tmark >= rack->r_ctl.lt_timemark) { 7058 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 7059 } 7060 rack->r_ctl.lt_timemark = tmark; 7061 rack->lt_bw_up = 0; 7062 rack->r_persist_lt_bw_off = 1; 7063 } 7064 rack_timer_cancel(tp, rack, cts, __LINE__); 7065 rack->r_ctl.persist_lost_ends = 0; 7066 rack->probe_not_answered = 0; 7067 rack->forced_ack = 0; 7068 tp->t_rxtshift = 0; 7069 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7070 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7071 rack->rc_in_persist = 1; 7072 } 7073 } 7074 7075 static void 7076 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7077 { 7078 if (tcp_in_hpts(rack->rc_tp)) { 7079 tcp_hpts_remove(rack->rc_tp); 7080 rack->r_ctl.rc_hpts_flags = 0; 7081 } 7082 #ifdef NETFLIX_SHARED_CWND 7083 if (rack->r_ctl.rc_scw) { 7084 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 7085 rack->rack_scwnd_is_idle = 0; 7086 } 7087 #endif 7088 if (rack->rc_gp_dyn_mul && 7089 (rack->use_fixed_rate == 0) && 7090 (rack->rc_always_pace)) { 7091 /* 7092 * Do we count this as if a probe-rtt just 7093 * finished? 7094 */ 7095 uint32_t time_idle, idle_min; 7096 7097 time_idle = cts - rack->r_ctl.rc_went_idle_time; 7098 idle_min = rack_min_probertt_hold; 7099 if (rack_probertt_gpsrtt_cnt_div) { 7100 uint64_t extra; 7101 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 7102 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 7103 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 7104 idle_min += (uint32_t)extra; 7105 } 7106 if (time_idle >= idle_min) { 7107 /* Yes, we count it as a probe-rtt. */ 7108 uint32_t us_cts; 7109 7110 us_cts = tcp_get_usecs(NULL); 7111 if (rack->in_probe_rtt == 0) { 7112 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7113 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 7114 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 7115 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 7116 } else { 7117 rack_exit_probertt(rack, us_cts); 7118 } 7119 } 7120 } 7121 if (rack->r_persist_lt_bw_off) { 7122 /* Continue where we left off */ 7123 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 7124 rack->lt_bw_up = 1; 7125 rack->r_persist_lt_bw_off = 0; 7126 } 7127 rack->r_ctl.idle_snd_una = tp->snd_una; 7128 rack->rc_in_persist = 0; 7129 rack->r_ctl.rc_went_idle_time = 0; 7130 tp->t_rxtshift = 0; 7131 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7132 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7133 rack->r_ctl.rc_agg_delayed = 0; 7134 rack->r_early = 0; 7135 rack->r_late = 0; 7136 rack->r_ctl.rc_agg_early = 0; 7137 } 7138 7139 static void 7140 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 7141 struct hpts_diag *diag, struct timeval *tv) 7142 { 7143 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 7144 union tcp_log_stackspecific log; 7145 7146 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7147 log.u_bbr.flex1 = diag->p_nxt_slot; 7148 log.u_bbr.flex2 = diag->p_cur_slot; 7149 log.u_bbr.flex3 = diag->slot_req; 7150 log.u_bbr.flex4 = diag->inp_hptsslot; 7151 log.u_bbr.flex5 = diag->slot_remaining; 7152 log.u_bbr.flex6 = diag->need_new_to; 7153 log.u_bbr.flex7 = diag->p_hpts_active; 7154 log.u_bbr.flex8 = diag->p_on_min_sleep; 7155 /* Hijack other fields as needed */ 7156 log.u_bbr.epoch = diag->have_slept; 7157 log.u_bbr.lt_epoch = diag->yet_to_sleep; 7158 log.u_bbr.pkts_out = diag->co_ret; 7159 log.u_bbr.applimited = diag->hpts_sleep_time; 7160 log.u_bbr.delivered = diag->p_prev_slot; 7161 log.u_bbr.inflight = diag->p_runningslot; 7162 log.u_bbr.bw_inuse = diag->wheel_slot; 7163 log.u_bbr.rttProp = diag->wheel_cts; 7164 log.u_bbr.timeStamp = cts; 7165 log.u_bbr.delRate = diag->maxslots; 7166 log.u_bbr.cur_del_rate = diag->p_curtick; 7167 log.u_bbr.cur_del_rate <<= 32; 7168 log.u_bbr.cur_del_rate |= diag->p_lasttick; 7169 TCP_LOG_EVENTP(rack->rc_tp, NULL, 7170 &rack->rc_inp->inp_socket->so_rcv, 7171 &rack->rc_inp->inp_socket->so_snd, 7172 BBR_LOG_HPTSDIAG, 0, 7173 0, &log, false, tv); 7174 } 7175 7176 } 7177 7178 static void 7179 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 7180 { 7181 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 7182 union tcp_log_stackspecific log; 7183 struct timeval tv; 7184 7185 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7186 log.u_bbr.flex1 = sb->sb_flags; 7187 log.u_bbr.flex2 = len; 7188 log.u_bbr.flex3 = sb->sb_state; 7189 log.u_bbr.flex8 = type; 7190 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 7191 TCP_LOG_EVENTP(rack->rc_tp, NULL, 7192 &rack->rc_inp->inp_socket->so_rcv, 7193 &rack->rc_inp->inp_socket->so_snd, 7194 TCP_LOG_SB_WAKE, 0, 7195 len, &log, false, &tv); 7196 } 7197 } 7198 7199 static void 7200 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 7201 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 7202 { 7203 struct hpts_diag diag; 7204 struct inpcb *inp = tptoinpcb(tp); 7205 struct timeval tv; 7206 uint32_t delayed_ack = 0; 7207 uint32_t hpts_timeout; 7208 uint32_t entry_slot = slot; 7209 uint8_t stopped; 7210 uint32_t left = 0; 7211 uint32_t us_cts; 7212 7213 if ((tp->t_state == TCPS_CLOSED) || 7214 (tp->t_state == TCPS_LISTEN)) { 7215 return; 7216 } 7217 if (tcp_in_hpts(tp)) { 7218 /* Already on the pacer */ 7219 return; 7220 } 7221 stopped = rack->rc_tmr_stopped; 7222 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 7223 left = rack->r_ctl.rc_timer_exp - cts; 7224 } 7225 rack->r_ctl.rc_timer_exp = 0; 7226 rack->r_ctl.rc_hpts_flags = 0; 7227 us_cts = tcp_get_usecs(&tv); 7228 /* Now early/late accounting */ 7229 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 7230 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 7231 /* 7232 * We have a early carry over set, 7233 * we can always add more time so we 7234 * can always make this compensation. 7235 * 7236 * Note if ack's are allowed to wake us do not 7237 * penalize the next timer for being awoke 7238 * by an ack aka the rc_agg_early (non-paced mode). 7239 */ 7240 slot += rack->r_ctl.rc_agg_early; 7241 rack->r_early = 0; 7242 rack->r_ctl.rc_agg_early = 0; 7243 } 7244 if ((rack->r_late) && 7245 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 7246 /* 7247 * This is harder, we can 7248 * compensate some but it 7249 * really depends on what 7250 * the current pacing time is. 7251 */ 7252 if (rack->r_ctl.rc_agg_delayed >= slot) { 7253 /* 7254 * We can't compensate for it all. 7255 * And we have to have some time 7256 * on the clock. We always have a min 7257 * 10 slots (10 x 10 i.e. 100 usecs). 7258 */ 7259 if (slot <= HPTS_TICKS_PER_SLOT) { 7260 /* We gain delay */ 7261 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 7262 slot = HPTS_TICKS_PER_SLOT; 7263 } else { 7264 /* We take off some */ 7265 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 7266 slot = HPTS_TICKS_PER_SLOT; 7267 } 7268 } else { 7269 slot -= rack->r_ctl.rc_agg_delayed; 7270 rack->r_ctl.rc_agg_delayed = 0; 7271 /* Make sure we have 100 useconds at minimum */ 7272 if (slot < HPTS_TICKS_PER_SLOT) { 7273 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 7274 slot = HPTS_TICKS_PER_SLOT; 7275 } 7276 if (rack->r_ctl.rc_agg_delayed == 0) 7277 rack->r_late = 0; 7278 } 7279 } else if (rack->r_late) { 7280 /* r_use_hpts_min is on and so is DGP */ 7281 uint32_t max_red; 7282 7283 max_red = (slot * rack->r_ctl.max_reduction) / 100; 7284 if (max_red >= rack->r_ctl.rc_agg_delayed) { 7285 slot -= rack->r_ctl.rc_agg_delayed; 7286 rack->r_ctl.rc_agg_delayed = 0; 7287 } else { 7288 slot -= max_red; 7289 rack->r_ctl.rc_agg_delayed -= max_red; 7290 } 7291 } 7292 if ((rack->r_use_hpts_min == 1) && 7293 (slot > 0) && 7294 (rack->dgp_on == 1)) { 7295 /* 7296 * We are enforcing a min pacing timer 7297 * based on our hpts min timeout. 7298 */ 7299 uint32_t min; 7300 7301 min = get_hpts_min_sleep_time(); 7302 if (min > slot) { 7303 slot = min; 7304 } 7305 } 7306 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 7307 #ifdef TCP_SAD_DETECTION 7308 if (rack->sack_attack_disable && 7309 (rack->r_ctl.ack_during_sd > 0) && 7310 (slot < tcp_sad_pacing_interval)) { 7311 /* 7312 * We have a potential attacker on 7313 * the line. We have possibly some 7314 * (or now) pacing time set. We want to 7315 * slow down the processing of sacks by some 7316 * amount (if it is an attacker). Set the default 7317 * slot for attackers in place (unless the original 7318 * interval is longer). Its stored in 7319 * micro-seconds, so lets convert to msecs. 7320 */ 7321 slot = tcp_sad_pacing_interval; 7322 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 7323 rack->r_ctl.ack_during_sd = 0; 7324 } 7325 #endif 7326 if (tp->t_flags & TF_DELACK) { 7327 delayed_ack = TICKS_2_USEC(tcp_delacktime); 7328 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 7329 } 7330 if (delayed_ack && ((hpts_timeout == 0) || 7331 (delayed_ack < hpts_timeout))) 7332 hpts_timeout = delayed_ack; 7333 else 7334 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7335 /* 7336 * If no timers are going to run and we will fall off the hptsi 7337 * wheel, we resort to a keep-alive timer if its configured. 7338 */ 7339 if ((hpts_timeout == 0) && 7340 (slot == 0)) { 7341 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7342 (tp->t_state <= TCPS_CLOSING)) { 7343 /* 7344 * Ok we have no timer (persists, rack, tlp, rxt or 7345 * del-ack), we don't have segments being paced. So 7346 * all that is left is the keepalive timer. 7347 */ 7348 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7349 /* Get the established keep-alive time */ 7350 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 7351 } else { 7352 /* 7353 * Get the initial setup keep-alive time, 7354 * note that this is probably not going to 7355 * happen, since rack will be running a rxt timer 7356 * if a SYN of some sort is outstanding. It is 7357 * actually handled in rack_timeout_rxt(). 7358 */ 7359 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 7360 } 7361 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 7362 if (rack->in_probe_rtt) { 7363 /* 7364 * We want to instead not wake up a long time from 7365 * now but to wake up about the time we would 7366 * exit probe-rtt and initiate a keep-alive ack. 7367 * This will get us out of probe-rtt and update 7368 * our min-rtt. 7369 */ 7370 hpts_timeout = rack_min_probertt_hold; 7371 } 7372 } 7373 } 7374 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 7375 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 7376 /* 7377 * RACK, TLP, persists and RXT timers all are restartable 7378 * based on actions input .. i.e we received a packet (ack 7379 * or sack) and that changes things (rw, or snd_una etc). 7380 * Thus we can restart them with a new value. For 7381 * keep-alive, delayed_ack we keep track of what was left 7382 * and restart the timer with a smaller value. 7383 */ 7384 if (left < hpts_timeout) 7385 hpts_timeout = left; 7386 } 7387 if (hpts_timeout) { 7388 /* 7389 * Hack alert for now we can't time-out over 2,147,483 7390 * seconds (a bit more than 596 hours), which is probably ok 7391 * :). 7392 */ 7393 if (hpts_timeout > 0x7ffffffe) 7394 hpts_timeout = 0x7ffffffe; 7395 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 7396 } 7397 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 7398 if ((rack->gp_ready == 0) && 7399 (rack->use_fixed_rate == 0) && 7400 (hpts_timeout < slot) && 7401 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 7402 /* 7403 * We have no good estimate yet for the 7404 * old clunky burst mitigation or the 7405 * real pacing. And the tlp or rxt is smaller 7406 * than the pacing calculation. Lets not 7407 * pace that long since we know the calculation 7408 * so far is not accurate. 7409 */ 7410 slot = hpts_timeout; 7411 } 7412 /** 7413 * Turn off all the flags for queuing by default. The 7414 * flags have important meanings to what happens when 7415 * LRO interacts with the transport. Most likely (by default now) 7416 * mbuf_queueing and ack compression are on. So the transport 7417 * has a couple of flags that control what happens (if those 7418 * are not on then these flags won't have any effect since it 7419 * won't go through the queuing LRO path). 7420 * 7421 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 7422 * pacing output, so don't disturb. But 7423 * it also means LRO can wake me if there 7424 * is a SACK arrival. 7425 * 7426 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 7427 * with the above flag (QUEUE_READY) and 7428 * when present it says don't even wake me 7429 * if a SACK arrives. 7430 * 7431 * The idea behind these flags is that if we are pacing we 7432 * set the MBUF_QUEUE_READY and only get woken up if 7433 * a SACK arrives (which could change things) or if 7434 * our pacing timer expires. If, however, we have a rack 7435 * timer running, then we don't even want a sack to wake 7436 * us since the rack timer has to expire before we can send. 7437 * 7438 * Other cases should usually have none of the flags set 7439 * so LRO can call into us. 7440 */ 7441 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 7442 if (slot) { 7443 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 7444 rack->r_ctl.rc_last_output_to = us_cts + slot; 7445 /* 7446 * A pacing timer (slot) is being set, in 7447 * such a case we cannot send (we are blocked by 7448 * the timer). So lets tell LRO that it should not 7449 * wake us unless there is a SACK. Note this only 7450 * will be effective if mbuf queueing is on or 7451 * compressed acks are being processed. 7452 */ 7453 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 7454 /* 7455 * But wait if we have a Rack timer running 7456 * even a SACK should not disturb us (with 7457 * the exception of r_rr_config 3). 7458 */ 7459 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 7460 (IN_RECOVERY(tp->t_flags))) { 7461 if (rack->r_rr_config != 3) 7462 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 7463 else if (rack->rc_pace_dnd) { 7464 /* 7465 * When DND is on, we only let a sack 7466 * interrupt us if we are not in recovery. 7467 * 7468 * If DND is off, then we never hit here 7469 * and let all sacks wake us up. 7470 * 7471 */ 7472 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 7473 } 7474 } 7475 /* For sack attackers we want to ignore sack */ 7476 if (rack->sack_attack_disable == 1) { 7477 tp->t_flags2 |= (TF2_DONT_SACK_QUEUE | 7478 TF2_MBUF_QUEUE_READY); 7479 } else if (rack->rc_ack_can_sendout_data) { 7480 /* 7481 * Ahh but wait, this is that special case 7482 * where the pacing timer can be disturbed 7483 * backout the changes (used for non-paced 7484 * burst limiting). 7485 */ 7486 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 7487 TF2_MBUF_QUEUE_READY); 7488 } 7489 if ((rack->use_rack_rr) && 7490 (rack->r_rr_config < 2) && 7491 ((hpts_timeout) && (hpts_timeout < slot))) { 7492 /* 7493 * Arrange for the hpts to kick back in after the 7494 * t-o if the t-o does not cause a send. 7495 */ 7496 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7497 __LINE__, &diag); 7498 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7499 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7500 } else { 7501 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 7502 __LINE__, &diag); 7503 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7504 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 7505 } 7506 } else if (hpts_timeout) { 7507 /* 7508 * With respect to t_flags2(?) here, lets let any new acks wake 7509 * us up here. Since we are not pacing (no pacing timer), output 7510 * can happen so we should let it. If its a Rack timer, then any inbound 7511 * packet probably won't change the sending (we will be blocked) 7512 * but it may change the prr stats so letting it in (the set defaults 7513 * at the start of this block) are good enough. 7514 */ 7515 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7516 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7517 __LINE__, &diag); 7518 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7519 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7520 } else { 7521 /* No timer starting */ 7522 #ifdef INVARIANTS 7523 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 7524 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 7525 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 7526 } 7527 #endif 7528 } 7529 rack->rc_tmr_stopped = 0; 7530 if (slot) 7531 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 7532 } 7533 7534 static void 7535 rack_mark_lost(struct tcpcb *tp, 7536 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 7537 { 7538 struct rack_sendmap *nrsm; 7539 uint32_t thresh, exp; 7540 7541 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 7542 nrsm = rsm; 7543 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 7544 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 7545 /* Got up to all that were marked sack-passed */ 7546 break; 7547 } 7548 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 7549 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 7550 if (TSTMP_LT(exp, cts) || (exp == cts)) { 7551 /* We now consider it lost */ 7552 nrsm->r_flags |= RACK_WAS_LOST; 7553 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 7554 } else { 7555 /* Past here it won't be lost so stop */ 7556 break; 7557 } 7558 } 7559 } 7560 } 7561 7562 /* 7563 * RACK Timer, here we simply do logging and house keeping. 7564 * the normal rack_output() function will call the 7565 * appropriate thing to check if we need to do a RACK retransmit. 7566 * We return 1, saying don't proceed with rack_output only 7567 * when all timers have been stopped (destroyed PCB?). 7568 */ 7569 static int 7570 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7571 { 7572 /* 7573 * This timer simply provides an internal trigger to send out data. 7574 * The check_recovery_mode call will see if there are needed 7575 * retransmissions, if so we will enter fast-recovery. The output 7576 * call may or may not do the same thing depending on sysctl 7577 * settings. 7578 */ 7579 struct rack_sendmap *rsm; 7580 7581 counter_u64_add(rack_to_tot, 1); 7582 if (rack->r_state && (rack->r_state != tp->t_state)) 7583 rack_set_state(tp, rack); 7584 rack->rc_on_min_to = 0; 7585 rsm = rack_check_recovery_mode(tp, cts); 7586 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 7587 if (rsm) { 7588 /* We need to stroke any lost that are now declared as lost */ 7589 rack_mark_lost(tp, rack, rsm, cts); 7590 rack->r_ctl.rc_resend = rsm; 7591 rack->r_timer_override = 1; 7592 if (rack->use_rack_rr) { 7593 /* 7594 * Don't accumulate extra pacing delay 7595 * we are allowing the rack timer to 7596 * over-ride pacing i.e. rrr takes precedence 7597 * if the pacing interval is longer than the rrr 7598 * time (in other words we get the min pacing 7599 * time versus rrr pacing time). 7600 */ 7601 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7602 } 7603 } 7604 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 7605 if (rsm == NULL) { 7606 /* restart a timer and return 1 */ 7607 rack_start_hpts_timer(rack, tp, cts, 7608 0, 0, 0); 7609 return (1); 7610 } 7611 if ((rack->policer_detect_on == 1) && 7612 (rack->rc_policer_detected == 0)) { 7613 /* 7614 * We do this early if we have not 7615 * deteceted to attempt to detect 7616 * quicker. Normally we want to do this 7617 * as recovery exits (and we will again). 7618 */ 7619 policer_detection(tp, rack, 0); 7620 } 7621 return (0); 7622 } 7623 7624 7625 7626 static void 7627 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 7628 { 7629 7630 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 7631 /* 7632 * The trailing space changed, mbufs can grow 7633 * at the tail but they can't shrink from 7634 * it, KASSERT that. Adjust the orig_m_len to 7635 * compensate for this change. 7636 */ 7637 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 7638 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 7639 rsm->m, 7640 rsm, 7641 (intmax_t)M_TRAILINGROOM(rsm->m), 7642 rsm->orig_t_space, 7643 rsm->orig_m_len, 7644 rsm->m->m_len)); 7645 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 7646 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7647 } 7648 if (rsm->m->m_len < rsm->orig_m_len) { 7649 /* 7650 * Mbuf shrank, trimmed off the top by an ack, our 7651 * offset changes. 7652 */ 7653 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 7654 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 7655 rsm->m, rsm->m->m_len, 7656 rsm, rsm->orig_m_len, 7657 rsm->soff)); 7658 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 7659 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 7660 else 7661 rsm->soff = 0; 7662 rsm->orig_m_len = rsm->m->m_len; 7663 #ifdef INVARIANTS 7664 } else if (rsm->m->m_len > rsm->orig_m_len) { 7665 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 7666 rsm, rsm->m); 7667 #endif 7668 } 7669 } 7670 7671 static void 7672 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 7673 { 7674 struct mbuf *m; 7675 uint32_t soff; 7676 7677 if (src_rsm->m && 7678 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 7679 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 7680 /* Fix up the orig_m_len and possibly the mbuf offset */ 7681 rack_adjust_orig_mlen(src_rsm); 7682 } 7683 m = src_rsm->m; 7684 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 7685 while (soff >= m->m_len) { 7686 /* Move out past this mbuf */ 7687 soff -= m->m_len; 7688 m = m->m_next; 7689 KASSERT((m != NULL), 7690 ("rsm:%p nrsm:%p hit at soff:%u null m", 7691 src_rsm, rsm, soff)); 7692 if (m == NULL) { 7693 /* This should *not* happen which is why there is a kassert */ 7694 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7695 (src_rsm->r_start - rack->rc_tp->snd_una), 7696 &src_rsm->soff); 7697 src_rsm->orig_m_len = src_rsm->m->m_len; 7698 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7699 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7700 (rsm->r_start - rack->rc_tp->snd_una), 7701 &rsm->soff); 7702 rsm->orig_m_len = rsm->m->m_len; 7703 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7704 return; 7705 } 7706 } 7707 rsm->m = m; 7708 rsm->soff = soff; 7709 rsm->orig_m_len = m->m_len; 7710 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7711 } 7712 7713 static __inline void 7714 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7715 struct rack_sendmap *rsm, uint32_t start) 7716 { 7717 int idx; 7718 7719 nrsm->r_start = start; 7720 nrsm->r_end = rsm->r_end; 7721 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7722 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7723 nrsm->r_flags = rsm->r_flags; 7724 nrsm->r_dupack = rsm->r_dupack; 7725 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7726 nrsm->r_rtr_bytes = 0; 7727 nrsm->r_fas = rsm->r_fas; 7728 nrsm->r_bas = rsm->r_bas; 7729 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7730 nrsm->r_just_ret = rsm->r_just_ret; 7731 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7732 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7733 } 7734 /* Now if we have SYN flag we keep it on the left edge */ 7735 if (nrsm->r_flags & RACK_HAS_SYN) 7736 nrsm->r_flags &= ~RACK_HAS_SYN; 7737 /* Now if we have a FIN flag we keep it on the right edge */ 7738 if (rsm->r_flags & RACK_HAS_FIN) 7739 rsm->r_flags &= ~RACK_HAS_FIN; 7740 /* Push bit must go to the right edge as well */ 7741 if (rsm->r_flags & RACK_HAD_PUSH) 7742 rsm->r_flags &= ~RACK_HAD_PUSH; 7743 /* Clone over the state of the hw_tls flag */ 7744 nrsm->r_hw_tls = rsm->r_hw_tls; 7745 /* 7746 * Now we need to find nrsm's new location in the mbuf chain 7747 * we basically calculate a new offset, which is soff + 7748 * how much is left in original rsm. Then we walk out the mbuf 7749 * chain to find the righ position, it may be the same mbuf 7750 * or maybe not. 7751 */ 7752 KASSERT(((rsm->m != NULL) || 7753 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7754 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7755 if (rsm->m) 7756 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7757 } 7758 7759 static struct rack_sendmap * 7760 rack_merge_rsm(struct tcp_rack *rack, 7761 struct rack_sendmap *l_rsm, 7762 struct rack_sendmap *r_rsm) 7763 { 7764 /* 7765 * We are merging two ack'd RSM's, 7766 * the l_rsm is on the left (lower seq 7767 * values) and the r_rsm is on the right 7768 * (higher seq value). The simplest way 7769 * to merge these is to move the right 7770 * one into the left. I don't think there 7771 * is any reason we need to try to find 7772 * the oldest (or last oldest retransmitted). 7773 */ 7774 rack_log_map_chg(rack->rc_tp, rack, NULL, 7775 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7776 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7777 if (l_rsm->r_dupack < r_rsm->r_dupack) 7778 l_rsm->r_dupack = r_rsm->r_dupack; 7779 if (r_rsm->r_rtr_bytes) 7780 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7781 if (r_rsm->r_in_tmap) { 7782 /* This really should not happen */ 7783 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7784 r_rsm->r_in_tmap = 0; 7785 } 7786 7787 /* Now the flags */ 7788 if (r_rsm->r_flags & RACK_HAS_FIN) 7789 l_rsm->r_flags |= RACK_HAS_FIN; 7790 if (r_rsm->r_flags & RACK_TLP) 7791 l_rsm->r_flags |= RACK_TLP; 7792 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7793 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7794 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7795 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7796 /* 7797 * If both are app-limited then let the 7798 * free lower the count. If right is app 7799 * limited and left is not, transfer. 7800 */ 7801 l_rsm->r_flags |= RACK_APP_LIMITED; 7802 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7803 if (r_rsm == rack->r_ctl.rc_first_appl) 7804 rack->r_ctl.rc_first_appl = l_rsm; 7805 } 7806 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7807 /* 7808 * We keep the largest value, which is the newest 7809 * send. We do this in case a segment that is 7810 * joined together and not part of a GP estimate 7811 * later gets expanded into the GP estimate. 7812 * 7813 * We prohibit the merging of unlike kinds i.e. 7814 * all pieces that are in the GP estimate can be 7815 * merged and all pieces that are not in a GP estimate 7816 * can be merged, but not disimilar pieces. Combine 7817 * this with taking the highest here and we should 7818 * be ok unless of course the client reneges. Then 7819 * all bets are off. 7820 */ 7821 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7822 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7823 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7824 } 7825 /* 7826 * When merging two RSM's we also need to consider the ack time and keep 7827 * newest. If the ack gets merged into a measurement then that is the 7828 * one we will want to be using. 7829 */ 7830 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7831 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7832 7833 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7834 /* Transfer the split limit to the map we free */ 7835 r_rsm->r_limit_type = l_rsm->r_limit_type; 7836 l_rsm->r_limit_type = 0; 7837 } 7838 rack_free(rack, r_rsm); 7839 l_rsm->r_flags |= RACK_MERGED; 7840 return (l_rsm); 7841 } 7842 7843 /* 7844 * TLP Timer, here we simply setup what segment we want to 7845 * have the TLP expire on, the normal rack_output() will then 7846 * send it out. 7847 * 7848 * We return 1, saying don't proceed with rack_output only 7849 * when all timers have been stopped (destroyed PCB?). 7850 */ 7851 static int 7852 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7853 { 7854 /* 7855 * Tail Loss Probe. 7856 */ 7857 struct rack_sendmap *rsm = NULL; 7858 int insret __diagused; 7859 struct socket *so = tptosocket(tp); 7860 uint32_t amm; 7861 uint32_t out, avail; 7862 int collapsed_win = 0; 7863 7864 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7865 /* Its not time yet */ 7866 return (0); 7867 } 7868 if (ctf_progress_timeout_check(tp, true)) { 7869 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7870 return (-ETIMEDOUT); /* tcp_drop() */ 7871 } 7872 /* 7873 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7874 * need to figure out how to force a full MSS segment out. 7875 */ 7876 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7877 rack->r_ctl.retran_during_recovery = 0; 7878 rack->r_might_revert = 0; 7879 rack->r_ctl.dsack_byte_cnt = 0; 7880 counter_u64_add(rack_tlp_tot, 1); 7881 if (rack->r_state && (rack->r_state != tp->t_state)) 7882 rack_set_state(tp, rack); 7883 avail = sbavail(&so->so_snd); 7884 out = tp->snd_max - tp->snd_una; 7885 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7886 /* special case, we need a retransmission */ 7887 collapsed_win = 1; 7888 goto need_retran; 7889 } 7890 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7891 rack->r_ctl.dsack_persist--; 7892 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7893 rack->r_ctl.num_dsack = 0; 7894 } 7895 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7896 } 7897 if ((tp->t_flags & TF_GPUTINPROG) && 7898 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7899 /* 7900 * If this is the second in a row 7901 * TLP and we are doing a measurement 7902 * its time to abandon the measurement. 7903 * Something is likely broken on 7904 * the clients network and measuring a 7905 * broken network does us no good. 7906 */ 7907 tp->t_flags &= ~TF_GPUTINPROG; 7908 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7909 rack->r_ctl.rc_gp_srtt /*flex1*/, 7910 tp->gput_seq, 7911 0, 0, 18, __LINE__, NULL, 0); 7912 } 7913 /* 7914 * Check our send oldest always settings, and if 7915 * there is an oldest to send jump to the need_retran. 7916 */ 7917 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7918 goto need_retran; 7919 7920 if (avail > out) { 7921 /* New data is available */ 7922 amm = avail - out; 7923 if (amm > ctf_fixed_maxseg(tp)) { 7924 amm = ctf_fixed_maxseg(tp); 7925 if ((amm + out) > tp->snd_wnd) { 7926 /* We are rwnd limited */ 7927 goto need_retran; 7928 } 7929 } else if (amm < ctf_fixed_maxseg(tp)) { 7930 /* not enough to fill a MTU */ 7931 goto need_retran; 7932 } 7933 if (IN_FASTRECOVERY(tp->t_flags)) { 7934 /* Unlikely */ 7935 if (rack->rack_no_prr == 0) { 7936 if (out + amm <= tp->snd_wnd) { 7937 rack->r_ctl.rc_prr_sndcnt = amm; 7938 rack->r_ctl.rc_tlp_new_data = amm; 7939 rack_log_to_prr(rack, 4, 0, __LINE__); 7940 } 7941 } else 7942 goto need_retran; 7943 } else { 7944 /* Set the send-new override */ 7945 if (out + amm <= tp->snd_wnd) 7946 rack->r_ctl.rc_tlp_new_data = amm; 7947 else 7948 goto need_retran; 7949 } 7950 rack->r_ctl.rc_tlpsend = NULL; 7951 counter_u64_add(rack_tlp_newdata, 1); 7952 goto send; 7953 } 7954 need_retran: 7955 /* 7956 * Ok we need to arrange the last un-acked segment to be re-sent, or 7957 * optionally the first un-acked segment. 7958 */ 7959 if (collapsed_win == 0) { 7960 if (rack_always_send_oldest) 7961 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7962 else { 7963 rsm = tqhash_max(rack->r_ctl.tqh); 7964 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7965 rsm = rack_find_high_nonack(rack, rsm); 7966 } 7967 } 7968 if (rsm == NULL) { 7969 #ifdef TCP_BLACKBOX 7970 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7971 #endif 7972 goto out; 7973 } 7974 } else { 7975 /* 7976 * We had a collapsed window, lets find 7977 * the point before the collapse. 7978 */ 7979 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7980 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7981 else { 7982 rsm = tqhash_min(rack->r_ctl.tqh); 7983 } 7984 if (rsm == NULL) { 7985 /* Huh */ 7986 goto out; 7987 } 7988 } 7989 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7990 /* 7991 * We need to split this the last segment in two. 7992 */ 7993 struct rack_sendmap *nrsm; 7994 7995 nrsm = rack_alloc_full_limit(rack); 7996 if (nrsm == NULL) { 7997 /* 7998 * No memory to split, we will just exit and punt 7999 * off to the RXT timer. 8000 */ 8001 goto out; 8002 } 8003 rack_clone_rsm(rack, nrsm, rsm, 8004 (rsm->r_end - ctf_fixed_maxseg(tp))); 8005 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8006 #ifndef INVARIANTS 8007 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8008 #else 8009 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8010 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8011 nrsm, insret, rack, rsm); 8012 } 8013 #endif 8014 if (rsm->r_in_tmap) { 8015 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8016 nrsm->r_in_tmap = 1; 8017 } 8018 rsm = nrsm; 8019 } 8020 rack->r_ctl.rc_tlpsend = rsm; 8021 send: 8022 /* Make sure output path knows we are doing a TLP */ 8023 *doing_tlp = 1; 8024 rack->r_timer_override = 1; 8025 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 8026 return (0); 8027 out: 8028 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 8029 return (0); 8030 } 8031 8032 /* 8033 * Delayed ack Timer, here we simply need to setup the 8034 * ACK_NOW flag and remove the DELACK flag. From there 8035 * the output routine will send the ack out. 8036 * 8037 * We only return 1, saying don't proceed, if all timers 8038 * are stopped (destroyed PCB?). 8039 */ 8040 static int 8041 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8042 { 8043 8044 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 8045 tp->t_flags &= ~TF_DELACK; 8046 tp->t_flags |= TF_ACKNOW; 8047 KMOD_TCPSTAT_INC(tcps_delack); 8048 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 8049 return (0); 8050 } 8051 8052 static inline int 8053 rack_send_ack_challange(struct tcp_rack *rack) 8054 { 8055 struct tcptemp *t_template; 8056 8057 t_template = tcpip_maketemplate(rack->rc_inp); 8058 if (t_template) { 8059 if (rack->forced_ack == 0) { 8060 rack->forced_ack = 1; 8061 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 8062 } else { 8063 rack->probe_not_answered = 1; 8064 } 8065 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 8066 &t_template->tt_t, (struct mbuf *)NULL, 8067 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 8068 free(t_template, M_TEMP); 8069 /* This does send an ack so kill any D-ack timer */ 8070 if (rack->rc_tp->t_flags & TF_DELACK) 8071 rack->rc_tp->t_flags &= ~TF_DELACK; 8072 return(1); 8073 } else 8074 return (0); 8075 8076 } 8077 8078 /* 8079 * Persists timer, here we simply send the 8080 * same thing as a keepalive will. 8081 * the one byte send. 8082 * 8083 * We only return 1, saying don't proceed, if all timers 8084 * are stopped (destroyed PCB?). 8085 */ 8086 static int 8087 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8088 { 8089 int32_t retval = 1; 8090 8091 if (rack->rc_in_persist == 0) 8092 return (0); 8093 if (ctf_progress_timeout_check(tp, false)) { 8094 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8095 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 8096 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8097 return (-ETIMEDOUT); /* tcp_drop() */ 8098 } 8099 /* 8100 * Persistence timer into zero window. Force a byte to be output, if 8101 * possible. 8102 */ 8103 KMOD_TCPSTAT_INC(tcps_persisttimeo); 8104 /* 8105 * Hack: if the peer is dead/unreachable, we do not time out if the 8106 * window is closed. After a full backoff, drop the connection if 8107 * the idle time (no responses to probes) reaches the maximum 8108 * backoff that we would use if retransmitting. 8109 */ 8110 if (tp->t_rxtshift >= V_tcp_retries && 8111 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 8112 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 8113 KMOD_TCPSTAT_INC(tcps_persistdrop); 8114 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8115 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8116 retval = -ETIMEDOUT; /* tcp_drop() */ 8117 goto out; 8118 } 8119 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 8120 tp->snd_una == tp->snd_max) 8121 rack_exit_persist(tp, rack, cts); 8122 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 8123 /* 8124 * If the user has closed the socket then drop a persisting 8125 * connection after a much reduced timeout. 8126 */ 8127 if (tp->t_state > TCPS_CLOSE_WAIT && 8128 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 8129 KMOD_TCPSTAT_INC(tcps_persistdrop); 8130 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 8131 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 8132 retval = -ETIMEDOUT; /* tcp_drop() */ 8133 goto out; 8134 } 8135 if (rack_send_ack_challange(rack)) { 8136 /* only set it if we were answered */ 8137 if (rack->probe_not_answered) { 8138 counter_u64_add(rack_persists_loss, 1); 8139 rack->r_ctl.persist_lost_ends++; 8140 } 8141 counter_u64_add(rack_persists_sends, 1); 8142 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 8143 } 8144 if (tp->t_rxtshift < V_tcp_retries) 8145 tp->t_rxtshift++; 8146 out: 8147 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 8148 rack_start_hpts_timer(rack, tp, cts, 8149 0, 0, 0); 8150 return (retval); 8151 } 8152 8153 /* 8154 * If a keepalive goes off, we had no other timers 8155 * happening. We always return 1 here since this 8156 * routine either drops the connection or sends 8157 * out a segment with respond. 8158 */ 8159 static int 8160 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8161 { 8162 struct inpcb *inp = tptoinpcb(tp); 8163 8164 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 8165 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 8166 /* 8167 * Keep-alive timer went off; send something or drop connection if 8168 * idle for too long. 8169 */ 8170 KMOD_TCPSTAT_INC(tcps_keeptimeo); 8171 if (tp->t_state < TCPS_ESTABLISHED) 8172 goto dropit; 8173 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 8174 tp->t_state <= TCPS_CLOSING) { 8175 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 8176 goto dropit; 8177 /* 8178 * Send a packet designed to force a response if the peer is 8179 * up and reachable: either an ACK if the connection is 8180 * still alive, or an RST if the peer has closed the 8181 * connection due to timeout or reboot. Using sequence 8182 * number tp->snd_una-1 causes the transmitted zero-length 8183 * segment to lie outside the receive window; by the 8184 * protocol spec, this requires the correspondent TCP to 8185 * respond. 8186 */ 8187 KMOD_TCPSTAT_INC(tcps_keepprobe); 8188 rack_send_ack_challange(rack); 8189 } 8190 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 8191 return (1); 8192 dropit: 8193 KMOD_TCPSTAT_INC(tcps_keepdrops); 8194 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 8195 return (-ETIMEDOUT); /* tcp_drop() */ 8196 } 8197 8198 /* 8199 * Retransmit helper function, clear up all the ack 8200 * flags and take care of important book keeping. 8201 */ 8202 static void 8203 rack_remxt_tmr(struct tcpcb *tp) 8204 { 8205 /* 8206 * The retransmit timer went off, all sack'd blocks must be 8207 * un-acked. 8208 */ 8209 struct rack_sendmap *rsm, *trsm = NULL; 8210 struct tcp_rack *rack; 8211 8212 rack = (struct tcp_rack *)tp->t_fb_ptr; 8213 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 8214 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 8215 rack->r_timer_override = 1; 8216 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 8217 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 8218 rack->r_late = 0; 8219 rack->r_early = 0; 8220 rack->r_ctl.rc_agg_delayed = 0; 8221 rack->r_ctl.rc_agg_early = 0; 8222 if (rack->r_state && (rack->r_state != tp->t_state)) 8223 rack_set_state(tp, rack); 8224 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 8225 /* 8226 * We do not clear the scoreboard until we have had 8227 * more than rack_rxt_scoreboard_clear_thresh time-outs. 8228 */ 8229 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 8230 if (rack->r_ctl.rc_resend != NULL) 8231 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8232 8233 return; 8234 } 8235 /* 8236 * Ideally we would like to be able to 8237 * mark SACK-PASS on anything not acked here. 8238 * 8239 * However, if we do that we would burst out 8240 * all that data 1ms apart. This would be unwise, 8241 * so for now we will just let the normal rxt timer 8242 * and tlp timer take care of it. 8243 * 8244 * Also we really need to stick them back in sequence 8245 * order. This way we send in the proper order and any 8246 * sacks that come floating in will "re-ack" the data. 8247 * To do this we zap the tmap with an INIT and then 8248 * walk through and place every rsm in the tail queue 8249 * hash table back in its seq ordered place. 8250 */ 8251 TAILQ_INIT(&rack->r_ctl.rc_tmap); 8252 8253 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8254 rsm->r_dupack = 0; 8255 if (rack_verbose_logging) 8256 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8257 /* We must re-add it back to the tlist */ 8258 if (trsm == NULL) { 8259 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8260 } else { 8261 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 8262 } 8263 rsm->r_in_tmap = 1; 8264 trsm = rsm; 8265 if (rsm->r_flags & RACK_ACKED) 8266 rsm->r_flags |= RACK_WAS_ACKED; 8267 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 8268 rsm->r_flags |= RACK_MUST_RXT; 8269 } 8270 /* zero the lost since it's all gone */ 8271 rack->r_ctl.rc_considered_lost = 0; 8272 /* Clear the count (we just un-acked them) */ 8273 rack->r_ctl.rc_sacked = 0; 8274 rack->r_ctl.rc_sacklast = NULL; 8275 /* Clear the tlp rtx mark */ 8276 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 8277 if (rack->r_ctl.rc_resend != NULL) 8278 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8279 rack->r_ctl.rc_prr_sndcnt = 0; 8280 rack_log_to_prr(rack, 6, 0, __LINE__); 8281 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 8282 if (rack->r_ctl.rc_resend != NULL) 8283 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 8284 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 8285 #ifdef TCP_SAD_DETECTION 8286 || (rack->sack_attack_disable != 0) 8287 #endif 8288 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 8289 /* 8290 * For non-sack customers new data 8291 * needs to go out as retransmits until 8292 * we retransmit up to snd_max. 8293 */ 8294 rack->r_must_retran = 1; 8295 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 8296 rack->r_ctl.rc_sacked); 8297 } 8298 } 8299 8300 static void 8301 rack_convert_rtts(struct tcpcb *tp) 8302 { 8303 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 8304 tp->t_rxtcur = RACK_REXMTVAL(tp); 8305 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 8306 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 8307 } 8308 if (tp->t_rxtcur > rack_rto_max) { 8309 tp->t_rxtcur = rack_rto_max; 8310 } 8311 } 8312 8313 static void 8314 rack_cc_conn_init(struct tcpcb *tp) 8315 { 8316 struct tcp_rack *rack; 8317 uint32_t srtt; 8318 8319 rack = (struct tcp_rack *)tp->t_fb_ptr; 8320 srtt = tp->t_srtt; 8321 cc_conn_init(tp); 8322 /* 8323 * Now convert to rack's internal format, 8324 * if required. 8325 */ 8326 if ((srtt == 0) && (tp->t_srtt != 0)) 8327 rack_convert_rtts(tp); 8328 /* 8329 * We want a chance to stay in slowstart as 8330 * we create a connection. TCP spec says that 8331 * initially ssthresh is infinite. For our 8332 * purposes that is the snd_wnd. 8333 */ 8334 if (tp->snd_ssthresh < tp->snd_wnd) { 8335 tp->snd_ssthresh = tp->snd_wnd; 8336 } 8337 /* 8338 * We also want to assure a IW worth of 8339 * data can get inflight. 8340 */ 8341 if (rc_init_window(rack) < tp->snd_cwnd) 8342 tp->snd_cwnd = rc_init_window(rack); 8343 } 8344 8345 /* 8346 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 8347 * we will setup to retransmit the lowest seq number outstanding. 8348 */ 8349 static int 8350 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 8351 { 8352 struct inpcb *inp = tptoinpcb(tp); 8353 int32_t rexmt; 8354 int32_t retval = 0; 8355 bool isipv6; 8356 8357 if ((tp->t_flags & TF_GPUTINPROG) && 8358 (tp->t_rxtshift)) { 8359 /* 8360 * We have had a second timeout 8361 * measurements on successive rxt's are not profitable. 8362 * It is unlikely to be of any use (the network is 8363 * broken or the client went away). 8364 */ 8365 tp->t_flags &= ~TF_GPUTINPROG; 8366 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8367 rack->r_ctl.rc_gp_srtt /*flex1*/, 8368 tp->gput_seq, 8369 0, 0, 18, __LINE__, NULL, 0); 8370 } 8371 if (ctf_progress_timeout_check(tp, false)) { 8372 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 8373 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 8374 return (-ETIMEDOUT); /* tcp_drop() */ 8375 } 8376 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 8377 rack->r_ctl.retran_during_recovery = 0; 8378 rack->rc_ack_required = 1; 8379 rack->r_ctl.dsack_byte_cnt = 0; 8380 if (IN_RECOVERY(tp->t_flags) && 8381 (rack->rto_from_rec == 0)) { 8382 /* 8383 * Mark that we had a rto while in recovery 8384 * and save the ssthresh so if we go back 8385 * into recovery we will have a chance 8386 * to slowstart back to the level. 8387 */ 8388 rack->rto_from_rec = 1; 8389 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 8390 } 8391 if (IN_FASTRECOVERY(tp->t_flags)) 8392 tp->t_flags |= TF_WASFRECOVERY; 8393 else 8394 tp->t_flags &= ~TF_WASFRECOVERY; 8395 if (IN_CONGRECOVERY(tp->t_flags)) 8396 tp->t_flags |= TF_WASCRECOVERY; 8397 else 8398 tp->t_flags &= ~TF_WASCRECOVERY; 8399 if (TCPS_HAVEESTABLISHED(tp->t_state) && 8400 (tp->snd_una == tp->snd_max)) { 8401 /* Nothing outstanding .. nothing to do */ 8402 return (0); 8403 } 8404 if (rack->r_ctl.dsack_persist) { 8405 rack->r_ctl.dsack_persist--; 8406 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 8407 rack->r_ctl.num_dsack = 0; 8408 } 8409 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 8410 } 8411 /* 8412 * Rack can only run one timer at a time, so we cannot 8413 * run a KEEPINIT (gating SYN sending) and a retransmit 8414 * timer for the SYN. So if we are in a front state and 8415 * have a KEEPINIT timer we need to check the first transmit 8416 * against now to see if we have exceeded the KEEPINIT time 8417 * (if one is set). 8418 */ 8419 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 8420 (TP_KEEPINIT(tp) != 0)) { 8421 struct rack_sendmap *rsm; 8422 8423 rsm = tqhash_min(rack->r_ctl.tqh); 8424 if (rsm) { 8425 /* Ok we have something outstanding to test keepinit with */ 8426 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 8427 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 8428 /* We have exceeded the KEEPINIT time */ 8429 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 8430 goto drop_it; 8431 } 8432 } 8433 } 8434 /* 8435 * Retransmission timer went off. Message has not been acked within 8436 * retransmit interval. Back off to a longer retransmit interval 8437 * and retransmit one segment. 8438 */ 8439 if ((rack->r_ctl.rc_resend == NULL) || 8440 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 8441 /* 8442 * If the rwnd collapsed on 8443 * the one we are retransmitting 8444 * it does not count against the 8445 * rxt count. 8446 */ 8447 tp->t_rxtshift++; 8448 } 8449 rack_remxt_tmr(tp); 8450 if (tp->t_rxtshift > V_tcp_retries) { 8451 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 8452 drop_it: 8453 tp->t_rxtshift = V_tcp_retries; 8454 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 8455 /* XXXGL: previously t_softerror was casted to uint16_t */ 8456 MPASS(tp->t_softerror >= 0); 8457 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 8458 goto out; /* tcp_drop() */ 8459 } 8460 if (tp->t_state == TCPS_SYN_SENT) { 8461 /* 8462 * If the SYN was retransmitted, indicate CWND to be limited 8463 * to 1 segment in cc_conn_init(). 8464 */ 8465 tp->snd_cwnd = 1; 8466 } else if (tp->t_rxtshift == 1) { 8467 /* 8468 * first retransmit; record ssthresh and cwnd so they can be 8469 * recovered if this turns out to be a "bad" retransmit. A 8470 * retransmit is considered "bad" if an ACK for this segment 8471 * is received within RTT/2 interval; the assumption here is 8472 * that the ACK was already in flight. See "On Estimating 8473 * End-to-End Network Path Properties" by Allman and Paxson 8474 * for more details. 8475 */ 8476 tp->snd_cwnd_prev = tp->snd_cwnd; 8477 tp->snd_ssthresh_prev = tp->snd_ssthresh; 8478 tp->snd_recover_prev = tp->snd_recover; 8479 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 8480 tp->t_flags |= TF_PREVVALID; 8481 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 8482 tp->t_flags &= ~TF_PREVVALID; 8483 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 8484 if ((tp->t_state == TCPS_SYN_SENT) || 8485 (tp->t_state == TCPS_SYN_RECEIVED)) 8486 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 8487 else 8488 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 8489 8490 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 8491 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 8492 /* 8493 * We enter the path for PLMTUD if connection is established or, if 8494 * connection is FIN_WAIT_1 status, reason for the last is that if 8495 * amount of data we send is very small, we could send it in couple 8496 * of packets and process straight to FIN. In that case we won't 8497 * catch ESTABLISHED state. 8498 */ 8499 #ifdef INET6 8500 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 8501 #else 8502 isipv6 = false; 8503 #endif 8504 if (((V_tcp_pmtud_blackhole_detect == 1) || 8505 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 8506 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 8507 ((tp->t_state == TCPS_ESTABLISHED) || 8508 (tp->t_state == TCPS_FIN_WAIT_1))) { 8509 /* 8510 * Idea here is that at each stage of mtu probe (usually, 8511 * 1448 -> 1188 -> 524) should be given 2 chances to recover 8512 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 8513 * should take care of that. 8514 */ 8515 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 8516 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 8517 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 8518 tp->t_rxtshift % 2 == 0)) { 8519 /* 8520 * Enter Path MTU Black-hole Detection mechanism: - 8521 * Disable Path MTU Discovery (IP "DF" bit). - 8522 * Reduce MTU to lower value than what we negotiated 8523 * with peer. 8524 */ 8525 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 8526 /* Record that we may have found a black hole. */ 8527 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 8528 /* Keep track of previous MSS. */ 8529 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 8530 } 8531 8532 /* 8533 * Reduce the MSS to blackhole value or to the 8534 * default in an attempt to retransmit. 8535 */ 8536 #ifdef INET6 8537 if (isipv6 && 8538 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 8539 /* Use the sysctl tuneable blackhole MSS. */ 8540 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 8541 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 8542 } else if (isipv6) { 8543 /* Use the default MSS. */ 8544 tp->t_maxseg = V_tcp_v6mssdflt; 8545 /* 8546 * Disable Path MTU Discovery when we switch 8547 * to minmss. 8548 */ 8549 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8550 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8551 } 8552 #endif 8553 #if defined(INET6) && defined(INET) 8554 else 8555 #endif 8556 #ifdef INET 8557 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 8558 /* Use the sysctl tuneable blackhole MSS. */ 8559 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 8560 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 8561 } else { 8562 /* Use the default MSS. */ 8563 tp->t_maxseg = V_tcp_mssdflt; 8564 /* 8565 * Disable Path MTU Discovery when we switch 8566 * to minmss. 8567 */ 8568 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8569 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8570 } 8571 #endif 8572 } else { 8573 /* 8574 * If further retransmissions are still unsuccessful 8575 * with a lowered MTU, maybe this isn't a blackhole 8576 * and we restore the previous MSS and blackhole 8577 * detection flags. The limit '6' is determined by 8578 * giving each probe stage (1448, 1188, 524) 2 8579 * chances to recover. 8580 */ 8581 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 8582 (tp->t_rxtshift >= 6)) { 8583 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 8584 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 8585 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 8586 if (tp->t_maxseg < V_tcp_mssdflt) { 8587 /* 8588 * The MSS is so small we should not 8589 * process incoming SACK's since we are 8590 * subject to attack in such a case. 8591 */ 8592 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 8593 } else { 8594 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 8595 } 8596 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 8597 } 8598 } 8599 } 8600 /* 8601 * Disable RFC1323 and SACK if we haven't got any response to 8602 * our third SYN to work-around some broken terminal servers 8603 * (most of which have hopefully been retired) that have bad VJ 8604 * header compression code which trashes TCP segments containing 8605 * unknown-to-them TCP options. 8606 */ 8607 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 8608 (tp->t_rxtshift == 3)) 8609 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 8610 /* 8611 * If we backed off this far, our srtt estimate is probably bogus. 8612 * Clobber it so we'll take the next rtt measurement as our srtt; 8613 * move the current srtt into rttvar to keep the current retransmit 8614 * times until then. 8615 */ 8616 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 8617 #ifdef INET6 8618 if ((inp->inp_vflag & INP_IPV6) != 0) 8619 in6_losing(inp); 8620 else 8621 #endif 8622 in_losing(inp); 8623 tp->t_rttvar += tp->t_srtt; 8624 tp->t_srtt = 0; 8625 } 8626 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 8627 tp->snd_recover = tp->snd_max; 8628 tp->t_flags |= TF_ACKNOW; 8629 tp->t_rtttime = 0; 8630 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 8631 out: 8632 return (retval); 8633 } 8634 8635 static int 8636 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 8637 { 8638 int32_t ret = 0; 8639 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 8640 8641 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 8642 (tp->t_flags & TF_GPUTINPROG)) { 8643 /* 8644 * We have a goodput in progress 8645 * and we have entered a late state. 8646 * Do we have enough data in the sb 8647 * to handle the GPUT request? 8648 */ 8649 uint32_t bytes; 8650 8651 bytes = tp->gput_ack - tp->gput_seq; 8652 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 8653 bytes += tp->gput_seq - tp->snd_una; 8654 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 8655 /* 8656 * There are not enough bytes in the socket 8657 * buffer that have been sent to cover this 8658 * measurement. Cancel it. 8659 */ 8660 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8661 rack->r_ctl.rc_gp_srtt /*flex1*/, 8662 tp->gput_seq, 8663 0, 0, 18, __LINE__, NULL, 0); 8664 tp->t_flags &= ~TF_GPUTINPROG; 8665 } 8666 } 8667 if (timers == 0) { 8668 return (0); 8669 } 8670 if (tp->t_state == TCPS_LISTEN) { 8671 /* no timers on listen sockets */ 8672 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 8673 return (0); 8674 return (1); 8675 } 8676 if ((timers & PACE_TMR_RACK) && 8677 rack->rc_on_min_to) { 8678 /* 8679 * For the rack timer when we 8680 * are on a min-timeout (which means rrr_conf = 3) 8681 * we don't want to check the timer. It may 8682 * be going off for a pace and thats ok we 8683 * want to send the retransmit (if its ready). 8684 * 8685 * If its on a normal rack timer (non-min) then 8686 * we will check if its expired. 8687 */ 8688 goto skip_time_check; 8689 } 8690 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 8691 uint32_t left; 8692 8693 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8694 ret = -1; 8695 rack_log_to_processing(rack, cts, ret, 0); 8696 return (0); 8697 } 8698 if (hpts_calling == 0) { 8699 /* 8700 * A user send or queued mbuf (sack) has called us? We 8701 * return 0 and let the pacing guards 8702 * deal with it if they should or 8703 * should not cause a send. 8704 */ 8705 ret = -2; 8706 rack_log_to_processing(rack, cts, ret, 0); 8707 return (0); 8708 } 8709 /* 8710 * Ok our timer went off early and we are not paced false 8711 * alarm, go back to sleep. We make sure we don't have 8712 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8713 * flag in place. 8714 */ 8715 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8716 ret = -3; 8717 left = rack->r_ctl.rc_timer_exp - cts; 8718 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8719 rack_log_to_processing(rack, cts, ret, left); 8720 return (1); 8721 } 8722 skip_time_check: 8723 rack->rc_tmr_stopped = 0; 8724 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8725 if (timers & PACE_TMR_DELACK) { 8726 ret = rack_timeout_delack(tp, rack, cts); 8727 } else if (timers & PACE_TMR_RACK) { 8728 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8729 rack->r_fast_output = 0; 8730 ret = rack_timeout_rack(tp, rack, cts); 8731 } else if (timers & PACE_TMR_TLP) { 8732 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8733 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8734 } else if (timers & PACE_TMR_RXT) { 8735 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8736 rack->r_fast_output = 0; 8737 ret = rack_timeout_rxt(tp, rack, cts); 8738 } else if (timers & PACE_TMR_PERSIT) { 8739 ret = rack_timeout_persist(tp, rack, cts); 8740 } else if (timers & PACE_TMR_KEEP) { 8741 ret = rack_timeout_keepalive(tp, rack, cts); 8742 } 8743 rack_log_to_processing(rack, cts, ret, timers); 8744 return (ret); 8745 } 8746 8747 static void 8748 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8749 { 8750 struct timeval tv; 8751 uint32_t us_cts, flags_on_entry; 8752 uint8_t hpts_removed = 0; 8753 8754 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8755 us_cts = tcp_get_usecs(&tv); 8756 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8757 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8758 ((tp->snd_max - tp->snd_una) == 0))) { 8759 tcp_hpts_remove(rack->rc_tp); 8760 hpts_removed = 1; 8761 /* If we were not delayed cancel out the flag. */ 8762 if ((tp->snd_max - tp->snd_una) == 0) 8763 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8764 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8765 } 8766 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8767 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8768 if (tcp_in_hpts(rack->rc_tp) && 8769 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8770 /* 8771 * Canceling timer's when we have no output being 8772 * paced. We also must remove ourselves from the 8773 * hpts. 8774 */ 8775 tcp_hpts_remove(rack->rc_tp); 8776 hpts_removed = 1; 8777 } 8778 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8779 } 8780 if (hpts_removed == 0) 8781 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8782 } 8783 8784 static int 8785 rack_stopall(struct tcpcb *tp) 8786 { 8787 struct tcp_rack *rack; 8788 8789 rack = (struct tcp_rack *)tp->t_fb_ptr; 8790 rack->t_timers_stopped = 1; 8791 8792 tcp_hpts_remove(tp); 8793 8794 return (0); 8795 } 8796 8797 static void 8798 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8799 { 8800 /* 8801 * Assure no timers are running. 8802 */ 8803 if (tcp_timer_active(tp, TT_PERSIST)) { 8804 /* We enter in persists, set the flag appropriately */ 8805 rack->rc_in_persist = 1; 8806 } 8807 if (tcp_in_hpts(rack->rc_tp)) { 8808 tcp_hpts_remove(rack->rc_tp); 8809 } 8810 } 8811 8812 /* 8813 * We maintain an array fo 16 (RETRAN_CNT_SIZE) entries. This 8814 * array is zeroed at the start of recovery. Each time a segment 8815 * is retransmitted, we translate that into a number of packets 8816 * (based on segsiz) and based on how many times its been retransmitted 8817 * increment by the number of packets the counter that represents 8818 * retansmitted N times. Index 0 is retransmitted 1 time, index 1 8819 * is retransmitted 2 times etc. 8820 * 8821 * So for example when we send a 4344 byte transmission with a 1448 8822 * byte segsize, and its the third time we have retransmitted this 8823 * segment, we would add to the rc_cnt_of_retran[2] the value of 8824 * 3. That represents 3 MSS were retransmitted 3 times (index is 8825 * the number of times retranmitted minus 1). 8826 */ 8827 static void 8828 rack_peg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz) 8829 { 8830 int idx; 8831 uint32_t peg; 8832 8833 peg = ((rsm->r_end - rsm->r_start) + segsiz) - 1; 8834 peg /= segsiz; 8835 idx = rsm->r_act_rxt_cnt - 1; 8836 if (idx >= RETRAN_CNT_SIZE) 8837 idx = RETRAN_CNT_SIZE - 1; 8838 /* Max of a uint16_t retransmits in a bucket */ 8839 if ((rack->r_ctl.rc_cnt_of_retran[idx] + peg) < 0xffff) 8840 rack->r_ctl.rc_cnt_of_retran[idx] += peg; 8841 else 8842 rack->r_ctl.rc_cnt_of_retran[idx] = 0xffff; 8843 } 8844 8845 /* 8846 * We maintain an array fo 16 (RETRAN_CNT_SIZE) entries. This 8847 * array is zeroed at the start of recovery. Each time a segment 8848 * is retransmitted, we translate that into a number of packets 8849 * (based on segsiz) and based on how many times its been retransmitted 8850 * increment by the number of packets the counter that represents 8851 * retansmitted N times. Index 0 is retransmitted 1 time, index 1 8852 * is retransmitted 2 times etc. 8853 * 8854 * The rack_unpeg_rxt is used when we go to retransmit a segment 8855 * again. Basically if the segment had previously been retransmitted 8856 * say 3 times (as our previous example illustrated in the comment 8857 * above rack_peg_rxt() prior to calling that and incrementing 8858 * r_ack_rxt_cnt we would have called rack_unpeg_rxt() that would 8859 * subtract back the previous add from its last rxt (in this 8860 * example r_act_cnt would have been 2 for 2 retransmissions. So 8861 * we would have subtracted 3 from rc_cnt_of_reetran[1] to remove 8862 * those 3 segments. You will see this in the rack_update_rsm() 8863 * below where we do: 8864 * if (rsm->r_act_rxt_cnt > 0) { 8865 * rack_unpeg_rxt(rack, rsm, segsiz); 8866 * } 8867 * rsm->r_act_rxt_cnt++; 8868 * rack_peg_rxt(rack, rsm, segsiz); 8869 * 8870 * This effectively moves the count from rc_cnt_of_retran[1] to 8871 * rc_cnt_of_retran[2]. 8872 */ 8873 static void 8874 rack_unpeg_rxt(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t segsiz) 8875 { 8876 int idx; 8877 uint32_t peg; 8878 8879 idx = rsm->r_act_rxt_cnt - 1; 8880 if (idx >= RETRAN_CNT_SIZE) 8881 idx = RETRAN_CNT_SIZE - 1; 8882 peg = ((rsm->r_end - rsm->r_start) + segsiz) - 1; 8883 peg /= segsiz; 8884 if (peg < rack->r_ctl.rc_cnt_of_retran[idx]) 8885 rack->r_ctl.rc_cnt_of_retran[idx] -= peg; 8886 else { 8887 /* TSNH */ 8888 rack->r_ctl.rc_cnt_of_retran[idx] = 0; 8889 } 8890 } 8891 8892 static void 8893 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8894 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8895 { 8896 int32_t idx; 8897 8898 rsm->r_rtr_cnt++; 8899 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8900 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8901 rsm->r_flags |= RACK_OVERMAX; 8902 } 8903 if (rsm->r_act_rxt_cnt > 0) { 8904 /* Drop the count back for this, its retransmitting again */ 8905 rack_unpeg_rxt(rack, rsm, segsiz); 8906 } 8907 rsm->r_act_rxt_cnt++; 8908 /* Peg the count/index */ 8909 rack_peg_rxt(rack, rsm, segsiz); 8910 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8911 rsm->r_dupack = 0; 8912 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8913 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8914 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8915 } 8916 if (rsm->r_flags & RACK_WAS_LOST) { 8917 /* 8918 * We retransmitted it putting it back in flight 8919 * remove the lost desgination and reduce the 8920 * bytes considered lost. 8921 */ 8922 rsm->r_flags &= ~RACK_WAS_LOST; 8923 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 8924 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 8925 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 8926 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 8927 else 8928 rack->r_ctl.rc_considered_lost = 0; 8929 } 8930 idx = rsm->r_rtr_cnt - 1; 8931 rsm->r_tim_lastsent[idx] = ts; 8932 /* 8933 * Here we don't add in the len of send, since its already 8934 * in snduna <->snd_max. 8935 */ 8936 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8937 rack->r_ctl.rc_sacked); 8938 if (rsm->r_flags & RACK_ACKED) { 8939 /* Problably MTU discovery messing with us */ 8940 rsm->r_flags &= ~RACK_ACKED; 8941 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8942 } 8943 if (rsm->r_in_tmap) { 8944 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8945 rsm->r_in_tmap = 0; 8946 } 8947 /* Lets make sure it really is in or not the GP window */ 8948 rack_mark_in_gp_win(tp, rsm); 8949 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8950 rsm->r_in_tmap = 1; 8951 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8952 /* Take off the must retransmit flag, if its on */ 8953 if (rsm->r_flags & RACK_MUST_RXT) { 8954 if (rack->r_must_retran) 8955 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8956 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8957 /* 8958 * We have retransmitted all we need. Clear 8959 * any must retransmit flags. 8960 */ 8961 rack->r_must_retran = 0; 8962 rack->r_ctl.rc_out_at_rto = 0; 8963 } 8964 rsm->r_flags &= ~RACK_MUST_RXT; 8965 } 8966 /* Remove any collapsed flag */ 8967 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8968 if (rsm->r_flags & RACK_SACK_PASSED) { 8969 /* We have retransmitted due to the SACK pass */ 8970 rsm->r_flags &= ~RACK_SACK_PASSED; 8971 rsm->r_flags |= RACK_WAS_SACKPASS; 8972 } 8973 } 8974 8975 static uint32_t 8976 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8977 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8978 { 8979 /* 8980 * We (re-)transmitted starting at rsm->r_start for some length 8981 * (possibly less than r_end. 8982 */ 8983 struct rack_sendmap *nrsm; 8984 int insret __diagused; 8985 uint32_t c_end; 8986 int32_t len; 8987 8988 len = *lenp; 8989 c_end = rsm->r_start + len; 8990 if (SEQ_GEQ(c_end, rsm->r_end)) { 8991 /* 8992 * We retransmitted the whole piece or more than the whole 8993 * slopping into the next rsm. 8994 */ 8995 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8996 if (c_end == rsm->r_end) { 8997 *lenp = 0; 8998 return (0); 8999 } else { 9000 int32_t act_len; 9001 9002 /* Hangs over the end return whats left */ 9003 act_len = rsm->r_end - rsm->r_start; 9004 *lenp = (len - act_len); 9005 return (rsm->r_end); 9006 } 9007 /* We don't get out of this block. */ 9008 } 9009 /* 9010 * Here we retransmitted less than the whole thing which means we 9011 * have to split this into what was transmitted and what was not. 9012 */ 9013 nrsm = rack_alloc_full_limit(rack); 9014 if (nrsm == NULL) { 9015 /* 9016 * We can't get memory, so lets not proceed. 9017 */ 9018 *lenp = 0; 9019 return (0); 9020 } 9021 /* 9022 * So here we are going to take the original rsm and make it what we 9023 * retransmitted. nrsm will be the tail portion we did not 9024 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 9025 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 9026 * 1, 6 and the new piece will be 6, 11. 9027 */ 9028 rack_clone_rsm(rack, nrsm, rsm, c_end); 9029 nrsm->r_dupack = 0; 9030 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 9031 #ifndef INVARIANTS 9032 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9033 #else 9034 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9035 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9036 nrsm, insret, rack, rsm); 9037 } 9038 #endif 9039 if (rsm->r_in_tmap) { 9040 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9041 nrsm->r_in_tmap = 1; 9042 } 9043 rsm->r_flags &= (~RACK_HAS_FIN); 9044 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 9045 /* Log a split of rsm into rsm and nrsm */ 9046 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 9047 *lenp = 0; 9048 return (0); 9049 } 9050 9051 static void 9052 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 9053 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 9054 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 9055 uint32_t s_moff, int hw_tls, int segsiz) 9056 { 9057 struct tcp_rack *rack; 9058 struct rack_sendmap *rsm, *nrsm; 9059 int insret __diagused; 9060 9061 register uint32_t snd_max, snd_una; 9062 9063 /* 9064 * Add to the RACK log of packets in flight or retransmitted. If 9065 * there is a TS option we will use the TS echoed, if not we will 9066 * grab a TS. 9067 * 9068 * Retransmissions will increment the count and move the ts to its 9069 * proper place. Note that if options do not include TS's then we 9070 * won't be able to effectively use the ACK for an RTT on a retran. 9071 * 9072 * Notes about r_start and r_end. Lets consider a send starting at 9073 * sequence 1 for 10 bytes. In such an example the r_start would be 9074 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 9075 * This means that r_end is actually the first sequence for the next 9076 * slot (11). 9077 * 9078 */ 9079 /* 9080 * If err is set what do we do XXXrrs? should we not add the thing? 9081 * -- i.e. return if err != 0 or should we pretend we sent it? -- 9082 * i.e. proceed with add ** do this for now. 9083 */ 9084 INP_WLOCK_ASSERT(tptoinpcb(tp)); 9085 if (err) 9086 /* 9087 * We don't log errors -- we could but snd_max does not 9088 * advance in this case either. 9089 */ 9090 return; 9091 9092 if (th_flags & TH_RST) { 9093 /* 9094 * We don't log resets and we return immediately from 9095 * sending 9096 */ 9097 return; 9098 } 9099 rack = (struct tcp_rack *)tp->t_fb_ptr; 9100 snd_una = tp->snd_una; 9101 snd_max = tp->snd_max; 9102 if (th_flags & (TH_SYN | TH_FIN)) { 9103 /* 9104 * The call to rack_log_output is made before bumping 9105 * snd_max. This means we can record one extra byte on a SYN 9106 * or FIN if seq_out is adding more on and a FIN is present 9107 * (and we are not resending). 9108 */ 9109 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 9110 len++; 9111 if (th_flags & TH_FIN) 9112 len++; 9113 } 9114 if (SEQ_LEQ((seq_out + len), snd_una)) { 9115 /* Are sending an old segment to induce an ack (keep-alive)? */ 9116 return; 9117 } 9118 if (SEQ_LT(seq_out, snd_una)) { 9119 /* huh? should we panic? */ 9120 uint32_t end; 9121 9122 end = seq_out + len; 9123 seq_out = snd_una; 9124 if (SEQ_GEQ(end, seq_out)) 9125 len = end - seq_out; 9126 else 9127 len = 0; 9128 } 9129 if (len == 0) { 9130 /* We don't log zero window probes */ 9131 return; 9132 } 9133 if (IN_FASTRECOVERY(tp->t_flags)) { 9134 rack->r_ctl.rc_prr_out += len; 9135 } 9136 /* First question is it a retransmission or new? */ 9137 if (seq_out == snd_max) { 9138 /* Its new */ 9139 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 9140 again: 9141 rsm = rack_alloc(rack); 9142 if (rsm == NULL) { 9143 /* 9144 * Hmm out of memory and the tcb got destroyed while 9145 * we tried to wait. 9146 */ 9147 return; 9148 } 9149 if (th_flags & TH_FIN) { 9150 rsm->r_flags = RACK_HAS_FIN|add_flag; 9151 } else { 9152 rsm->r_flags = add_flag; 9153 } 9154 if (hw_tls) 9155 rsm->r_hw_tls = 1; 9156 rsm->r_tim_lastsent[0] = cts; 9157 rsm->r_rtr_cnt = 1; 9158 rsm->r_act_rxt_cnt = 0; 9159 rsm->r_rtr_bytes = 0; 9160 if (th_flags & TH_SYN) { 9161 /* The data space is one beyond snd_una */ 9162 rsm->r_flags |= RACK_HAS_SYN; 9163 } 9164 rsm->r_start = seq_out; 9165 rsm->r_end = rsm->r_start + len; 9166 rack_mark_in_gp_win(tp, rsm); 9167 rsm->r_dupack = 0; 9168 /* 9169 * save off the mbuf location that 9170 * sndmbuf_noadv returned (which is 9171 * where we started copying from).. 9172 */ 9173 rsm->m = s_mb; 9174 rsm->soff = s_moff; 9175 /* 9176 * Here we do add in the len of send, since its not yet 9177 * reflected in in snduna <->snd_max 9178 */ 9179 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 9180 rack->r_ctl.rc_sacked) + 9181 (rsm->r_end - rsm->r_start)); 9182 if ((rack->rc_initial_ss_comp == 0) && 9183 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 9184 rack->r_ctl.ss_hi_fs = rsm->r_fas; 9185 } 9186 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 9187 if (rsm->m) { 9188 if (rsm->m->m_len <= rsm->soff) { 9189 /* 9190 * XXXrrs Question, will this happen? 9191 * 9192 * If sbsndptr is set at the correct place 9193 * then s_moff should always be somewhere 9194 * within rsm->m. But if the sbsndptr was 9195 * off then that won't be true. If it occurs 9196 * we need to walkout to the correct location. 9197 */ 9198 struct mbuf *lm; 9199 9200 lm = rsm->m; 9201 while (lm->m_len <= rsm->soff) { 9202 rsm->soff -= lm->m_len; 9203 lm = lm->m_next; 9204 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 9205 __func__, rack, s_moff, s_mb, rsm->soff)); 9206 } 9207 rsm->m = lm; 9208 } 9209 rsm->orig_m_len = rsm->m->m_len; 9210 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 9211 } else { 9212 rsm->orig_m_len = 0; 9213 rsm->orig_t_space = 0; 9214 } 9215 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 9216 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9217 /* Log a new rsm */ 9218 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 9219 #ifndef INVARIANTS 9220 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 9221 #else 9222 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 9223 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9224 nrsm, insret, rack, rsm); 9225 } 9226 #endif 9227 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9228 rsm->r_in_tmap = 1; 9229 if (rsm->r_flags & RACK_IS_PCM) { 9230 rack->r_ctl.pcm_i.send_time = cts; 9231 rack->r_ctl.pcm_i.eseq = rsm->r_end; 9232 /* First time through we set the start too */ 9233 if (rack->pcm_in_progress == 0) 9234 rack->r_ctl.pcm_i.sseq = rsm->r_start; 9235 } 9236 /* 9237 * Special case detection, is there just a single 9238 * packet outstanding when we are not in recovery? 9239 * 9240 * If this is true mark it so. 9241 */ 9242 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9243 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 9244 struct rack_sendmap *prsm; 9245 9246 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 9247 if (prsm) 9248 prsm->r_one_out_nr = 1; 9249 } 9250 return; 9251 } 9252 /* 9253 * If we reach here its a retransmission and we need to find it. 9254 */ 9255 more: 9256 if (hintrsm && (hintrsm->r_start == seq_out)) { 9257 rsm = hintrsm; 9258 hintrsm = NULL; 9259 } else { 9260 /* No hints sorry */ 9261 rsm = NULL; 9262 } 9263 if ((rsm) && (rsm->r_start == seq_out)) { 9264 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 9265 if (len == 0) { 9266 return; 9267 } else { 9268 goto more; 9269 } 9270 } 9271 /* Ok it was not the last pointer go through it the hard way. */ 9272 refind: 9273 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 9274 if (rsm) { 9275 if (rsm->r_start == seq_out) { 9276 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 9277 if (len == 0) { 9278 return; 9279 } else { 9280 goto refind; 9281 } 9282 } 9283 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 9284 /* Transmitted within this piece */ 9285 /* 9286 * Ok we must split off the front and then let the 9287 * update do the rest 9288 */ 9289 nrsm = rack_alloc_full_limit(rack); 9290 if (nrsm == NULL) { 9291 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 9292 return; 9293 } 9294 /* 9295 * copy rsm to nrsm and then trim the front of rsm 9296 * to not include this part. 9297 */ 9298 rack_clone_rsm(rack, nrsm, rsm, seq_out); 9299 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 9300 #ifndef INVARIANTS 9301 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9302 #else 9303 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9304 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9305 nrsm, insret, rack, rsm); 9306 } 9307 #endif 9308 if (rsm->r_in_tmap) { 9309 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9310 nrsm->r_in_tmap = 1; 9311 } 9312 rsm->r_flags &= (~RACK_HAS_FIN); 9313 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 9314 if (len == 0) { 9315 return; 9316 } else if (len > 0) 9317 goto refind; 9318 } 9319 } 9320 /* 9321 * Hmm not found in map did they retransmit both old and on into the 9322 * new? 9323 */ 9324 if (seq_out == tp->snd_max) { 9325 goto again; 9326 } else if (SEQ_LT(seq_out, tp->snd_max)) { 9327 #ifdef INVARIANTS 9328 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 9329 seq_out, len, tp->snd_una, tp->snd_max); 9330 printf("Starting Dump of all rack entries\n"); 9331 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 9332 printf("rsm:%p start:%u end:%u\n", 9333 rsm, rsm->r_start, rsm->r_end); 9334 } 9335 printf("Dump complete\n"); 9336 panic("seq_out not found rack:%p tp:%p", 9337 rack, tp); 9338 #endif 9339 } else { 9340 #ifdef INVARIANTS 9341 /* 9342 * Hmm beyond sndmax? (only if we are using the new rtt-pack 9343 * flag) 9344 */ 9345 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 9346 seq_out, len, tp->snd_max, tp); 9347 #endif 9348 } 9349 } 9350 9351 /* 9352 * Record one of the RTT updates from an ack into 9353 * our sample structure. 9354 */ 9355 9356 static void 9357 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 9358 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 9359 { 9360 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9361 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 9362 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 9363 } 9364 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9365 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 9366 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 9367 } 9368 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 9369 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 9370 rack->r_ctl.rc_gp_lowrtt = us_rtt; 9371 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 9372 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 9373 } 9374 if ((confidence == 1) && 9375 ((rsm == NULL) || 9376 (rsm->r_just_ret) || 9377 (rsm->r_one_out_nr && 9378 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 9379 /* 9380 * If the rsm had a just return 9381 * hit it then we can't trust the 9382 * rtt measurement for buffer deterimination 9383 * Note that a confidence of 2, indicates 9384 * SACK'd which overrides the r_just_ret or 9385 * the r_one_out_nr. If it was a CUM-ACK and 9386 * we had only two outstanding, but get an 9387 * ack for only 1. Then that also lowers our 9388 * confidence. 9389 */ 9390 confidence = 0; 9391 } 9392 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 9393 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 9394 if (rack->r_ctl.rack_rs.confidence == 0) { 9395 /* 9396 * We take anything with no current confidence 9397 * saved. 9398 */ 9399 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 9400 rack->r_ctl.rack_rs.confidence = confidence; 9401 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 9402 } else if (confidence != 0) { 9403 /* 9404 * Once we have a confident number, 9405 * we can update it with a smaller 9406 * value since this confident number 9407 * may include the DSACK time until 9408 * the next segment (the second one) arrived. 9409 */ 9410 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 9411 rack->r_ctl.rack_rs.confidence = confidence; 9412 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 9413 } 9414 } 9415 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 9416 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 9417 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 9418 rack->r_ctl.rack_rs.rs_rtt_cnt++; 9419 } 9420 9421 /* 9422 * Collect new round-trip time estimate 9423 * and update averages and current timeout. 9424 */ 9425 static void 9426 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 9427 { 9428 int32_t delta; 9429 int32_t rtt; 9430 9431 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 9432 /* No valid sample */ 9433 return; 9434 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 9435 /* We are to use the lowest RTT seen in a single ack */ 9436 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9437 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 9438 /* We are to use the highest RTT seen in a single ack */ 9439 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 9440 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 9441 /* We are to use the average RTT seen in a single ack */ 9442 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 9443 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 9444 } else { 9445 #ifdef INVARIANTS 9446 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 9447 #endif 9448 return; 9449 } 9450 if (rtt == 0) 9451 rtt = 1; 9452 if (rack->rc_gp_rtt_set == 0) { 9453 /* 9454 * With no RTT we have to accept 9455 * even one we are not confident of. 9456 */ 9457 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 9458 rack->rc_gp_rtt_set = 1; 9459 } else if (rack->r_ctl.rack_rs.confidence) { 9460 /* update the running gp srtt */ 9461 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 9462 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 9463 } 9464 if (rack->r_ctl.rack_rs.confidence) { 9465 /* 9466 * record the low and high for highly buffered path computation, 9467 * we only do this if we are confident (not a retransmission). 9468 */ 9469 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 9470 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9471 } 9472 if (rack->rc_highly_buffered == 0) { 9473 /* 9474 * Currently once we declare a path has 9475 * highly buffered there is no going 9476 * back, which may be a problem... 9477 */ 9478 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 9479 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 9480 rack->r_ctl.rc_highest_us_rtt, 9481 rack->r_ctl.rc_lowest_us_rtt, 9482 RACK_RTTS_SEEHBP); 9483 rack->rc_highly_buffered = 1; 9484 } 9485 } 9486 } 9487 if ((rack->r_ctl.rack_rs.confidence) || 9488 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 9489 /* 9490 * If we are highly confident of it <or> it was 9491 * never retransmitted we accept it as the last us_rtt. 9492 */ 9493 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9494 /* The lowest rtt can be set if its was not retransmited */ 9495 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 9496 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9497 if (rack->r_ctl.rc_lowest_us_rtt == 0) 9498 rack->r_ctl.rc_lowest_us_rtt = 1; 9499 } 9500 } 9501 rack = (struct tcp_rack *)tp->t_fb_ptr; 9502 if (tp->t_srtt != 0) { 9503 /* 9504 * We keep a simple srtt in microseconds, like our rtt 9505 * measurement. We don't need to do any tricks with shifting 9506 * etc. Instead we just add in 1/8th of the new measurement 9507 * and subtract out 1/8 of the old srtt. We do the same with 9508 * the variance after finding the absolute value of the 9509 * difference between this sample and the current srtt. 9510 */ 9511 delta = tp->t_srtt - rtt; 9512 /* Take off 1/8th of the current sRTT */ 9513 tp->t_srtt -= (tp->t_srtt >> 3); 9514 /* Add in 1/8th of the new RTT just measured */ 9515 tp->t_srtt += (rtt >> 3); 9516 if (tp->t_srtt <= 0) 9517 tp->t_srtt = 1; 9518 /* Now lets make the absolute value of the variance */ 9519 if (delta < 0) 9520 delta = -delta; 9521 /* Subtract out 1/8th */ 9522 tp->t_rttvar -= (tp->t_rttvar >> 3); 9523 /* Add in 1/8th of the new variance we just saw */ 9524 tp->t_rttvar += (delta >> 3); 9525 if (tp->t_rttvar <= 0) 9526 tp->t_rttvar = 1; 9527 } else { 9528 /* 9529 * No rtt measurement yet - use the unsmoothed rtt. Set the 9530 * variance to half the rtt (so our first retransmit happens 9531 * at 3*rtt). 9532 */ 9533 tp->t_srtt = rtt; 9534 tp->t_rttvar = rtt >> 1; 9535 } 9536 rack->rc_srtt_measure_made = 1; 9537 KMOD_TCPSTAT_INC(tcps_rttupdated); 9538 if (tp->t_rttupdated < UCHAR_MAX) 9539 tp->t_rttupdated++; 9540 #ifdef STATS 9541 if (rack_stats_gets_ms_rtt == 0) { 9542 /* Send in the microsecond rtt used for rxt timeout purposes */ 9543 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 9544 } else if (rack_stats_gets_ms_rtt == 1) { 9545 /* Send in the millisecond rtt used for rxt timeout purposes */ 9546 int32_t ms_rtt; 9547 9548 /* Round up */ 9549 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 9550 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 9551 } else if (rack_stats_gets_ms_rtt == 2) { 9552 /* Send in the millisecond rtt has close to the path RTT as we can get */ 9553 int32_t ms_rtt; 9554 9555 /* Round up */ 9556 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 9557 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 9558 } else { 9559 /* Send in the microsecond rtt has close to the path RTT as we can get */ 9560 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 9561 } 9562 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 9563 #endif 9564 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 9565 /* 9566 * the retransmit should happen at rtt + 4 * rttvar. Because of the 9567 * way we do the smoothing, srtt and rttvar will each average +1/2 9568 * tick of bias. When we compute the retransmit timer, we want 1/2 9569 * tick of rounding and 1 extra tick because of +-1/2 tick 9570 * uncertainty in the firing of the timer. The bias will give us 9571 * exactly the 1.5 tick we need. But, because the bias is 9572 * statistical, we have to test that we don't drop below the minimum 9573 * feasible timer (which is 2 ticks). 9574 */ 9575 tp->t_rxtshift = 0; 9576 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9577 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 9578 rack_log_rtt_sample(rack, rtt); 9579 tp->t_softerror = 0; 9580 } 9581 9582 9583 static void 9584 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 9585 { 9586 /* 9587 * Apply to filter the inbound us-rtt at us_cts. 9588 */ 9589 uint32_t old_rtt; 9590 9591 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 9592 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 9593 us_rtt, us_cts); 9594 if (old_rtt > us_rtt) { 9595 /* We just hit a new lower rtt time */ 9596 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 9597 __LINE__, RACK_RTTS_NEWRTT); 9598 /* 9599 * Only count it if its lower than what we saw within our 9600 * calculated range. 9601 */ 9602 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 9603 if (rack_probertt_lower_within && 9604 rack->rc_gp_dyn_mul && 9605 (rack->use_fixed_rate == 0) && 9606 (rack->rc_always_pace)) { 9607 /* 9608 * We are seeing a new lower rtt very close 9609 * to the time that we would have entered probe-rtt. 9610 * This is probably due to the fact that a peer flow 9611 * has entered probe-rtt. Lets go in now too. 9612 */ 9613 uint32_t val; 9614 9615 val = rack_probertt_lower_within * rack_time_between_probertt; 9616 val /= 100; 9617 if ((rack->in_probe_rtt == 0) && 9618 (rack->rc_skip_timely == 0) && 9619 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 9620 rack_enter_probertt(rack, us_cts); 9621 } 9622 } 9623 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 9624 } 9625 } 9626 } 9627 9628 static int 9629 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 9630 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 9631 { 9632 uint32_t us_rtt; 9633 int32_t i, all; 9634 uint32_t t, len_acked; 9635 9636 if ((rsm->r_flags & RACK_ACKED) || 9637 (rsm->r_flags & RACK_WAS_ACKED)) 9638 /* Already done */ 9639 return (0); 9640 if (rsm->r_no_rtt_allowed) { 9641 /* Not allowed */ 9642 return (0); 9643 } 9644 if (ack_type == CUM_ACKED) { 9645 if (SEQ_GT(th_ack, rsm->r_end)) { 9646 len_acked = rsm->r_end - rsm->r_start; 9647 all = 1; 9648 } else { 9649 len_acked = th_ack - rsm->r_start; 9650 all = 0; 9651 } 9652 } else { 9653 len_acked = rsm->r_end - rsm->r_start; 9654 all = 0; 9655 } 9656 if (rsm->r_rtr_cnt == 1) { 9657 9658 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9659 if ((int)t <= 0) 9660 t = 1; 9661 if (!tp->t_rttlow || tp->t_rttlow > t) 9662 tp->t_rttlow = t; 9663 if (!rack->r_ctl.rc_rack_min_rtt || 9664 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9665 rack->r_ctl.rc_rack_min_rtt = t; 9666 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9667 rack->r_ctl.rc_rack_min_rtt = 1; 9668 } 9669 } 9670 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 9671 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9672 else 9673 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9674 if (us_rtt == 0) 9675 us_rtt = 1; 9676 if (CC_ALGO(tp)->rttsample != NULL) { 9677 /* Kick the RTT to the CC */ 9678 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9679 } 9680 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 9681 if (ack_type == SACKED) { 9682 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 9683 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 9684 } else { 9685 /* 9686 * We need to setup what our confidence 9687 * is in this ack. 9688 * 9689 * If the rsm was app limited and it is 9690 * less than a mss in length (the end 9691 * of the send) then we have a gap. If we 9692 * were app limited but say we were sending 9693 * multiple MSS's then we are more confident 9694 * int it. 9695 * 9696 * When we are not app-limited then we see if 9697 * the rsm is being included in the current 9698 * measurement, we tell this by the app_limited_needs_set 9699 * flag. 9700 * 9701 * Note that being cwnd blocked is not applimited 9702 * as well as the pacing delay between packets which 9703 * are sending only 1 or 2 MSS's also will show up 9704 * in the RTT. We probably need to examine this algorithm 9705 * a bit more and enhance it to account for the delay 9706 * between rsm's. We could do that by saving off the 9707 * pacing delay of each rsm (in an rsm) and then 9708 * factoring that in somehow though for now I am 9709 * not sure how :) 9710 */ 9711 int calc_conf = 0; 9712 9713 if (rsm->r_flags & RACK_APP_LIMITED) { 9714 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 9715 calc_conf = 0; 9716 else 9717 calc_conf = 1; 9718 } else if (rack->app_limited_needs_set == 0) { 9719 calc_conf = 1; 9720 } else { 9721 calc_conf = 0; 9722 } 9723 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 9724 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 9725 calc_conf, rsm, rsm->r_rtr_cnt); 9726 } 9727 if ((rsm->r_flags & RACK_TLP) && 9728 (!IN_FASTRECOVERY(tp->t_flags))) { 9729 /* Segment was a TLP and our retrans matched */ 9730 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 9731 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 9732 } 9733 } 9734 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9735 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9736 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9737 /* New more recent rack_tmit_time */ 9738 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9739 if (rack->r_ctl.rc_rack_tmit_time == 0) 9740 rack->r_ctl.rc_rack_tmit_time = 1; 9741 rack->rc_rack_rtt = t; 9742 } 9743 return (1); 9744 } 9745 /* 9746 * We clear the soft/rxtshift since we got an ack. 9747 * There is no assurance we will call the commit() function 9748 * so we need to clear these to avoid incorrect handling. 9749 */ 9750 tp->t_rxtshift = 0; 9751 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9752 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 9753 tp->t_softerror = 0; 9754 if (to && (to->to_flags & TOF_TS) && 9755 (ack_type == CUM_ACKED) && 9756 (to->to_tsecr) && 9757 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 9758 /* 9759 * Now which timestamp does it match? In this block the ACK 9760 * must be coming from a previous transmission. 9761 */ 9762 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9763 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 9764 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9765 if ((int)t <= 0) 9766 t = 1; 9767 if (CC_ALGO(tp)->rttsample != NULL) { 9768 /* 9769 * Kick the RTT to the CC, here 9770 * we lie a bit in that we know the 9771 * retransmission is correct even though 9772 * we retransmitted. This is because 9773 * we match the timestamps. 9774 */ 9775 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 9776 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 9777 else 9778 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 9779 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9780 } 9781 if ((i + 1) < rsm->r_rtr_cnt) { 9782 /* 9783 * The peer ack'd from our previous 9784 * transmission. We have a spurious 9785 * retransmission and thus we dont 9786 * want to update our rack_rtt. 9787 * 9788 * Hmm should there be a CC revert here? 9789 * 9790 */ 9791 return (0); 9792 } 9793 if (!tp->t_rttlow || tp->t_rttlow > t) 9794 tp->t_rttlow = t; 9795 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9796 rack->r_ctl.rc_rack_min_rtt = t; 9797 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9798 rack->r_ctl.rc_rack_min_rtt = 1; 9799 } 9800 } 9801 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9802 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9803 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9804 /* New more recent rack_tmit_time */ 9805 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9806 if (rack->r_ctl.rc_rack_tmit_time == 0) 9807 rack->r_ctl.rc_rack_tmit_time = 1; 9808 rack->rc_rack_rtt = t; 9809 } 9810 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9811 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9812 rsm->r_rtr_cnt); 9813 return (1); 9814 } 9815 } 9816 /* If we are logging log out the sendmap */ 9817 if (tcp_bblogging_on(rack->rc_tp)) { 9818 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9819 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9820 } 9821 } 9822 goto ts_not_found; 9823 } else { 9824 /* 9825 * Ok its a SACK block that we retransmitted. or a windows 9826 * machine without timestamps. We can tell nothing from the 9827 * time-stamp since its not there or the time the peer last 9828 * received a segment that moved forward its cum-ack point. 9829 */ 9830 ts_not_found: 9831 i = rsm->r_rtr_cnt - 1; 9832 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9833 if ((int)t <= 0) 9834 t = 1; 9835 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9836 /* 9837 * We retransmitted and the ack came back in less 9838 * than the smallest rtt we have observed. We most 9839 * likely did an improper retransmit as outlined in 9840 * 6.2 Step 2 point 2 in the rack-draft so we 9841 * don't want to update our rack_rtt. We in 9842 * theory (in future) might want to think about reverting our 9843 * cwnd state but we won't for now. 9844 */ 9845 return (0); 9846 } else if (rack->r_ctl.rc_rack_min_rtt) { 9847 /* 9848 * We retransmitted it and the retransmit did the 9849 * job. 9850 */ 9851 if (!rack->r_ctl.rc_rack_min_rtt || 9852 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9853 rack->r_ctl.rc_rack_min_rtt = t; 9854 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9855 rack->r_ctl.rc_rack_min_rtt = 1; 9856 } 9857 } 9858 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9859 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9860 (uint32_t)rsm->r_tim_lastsent[i]))) { 9861 /* New more recent rack_tmit_time */ 9862 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9863 if (rack->r_ctl.rc_rack_tmit_time == 0) 9864 rack->r_ctl.rc_rack_tmit_time = 1; 9865 rack->rc_rack_rtt = t; 9866 } 9867 return (1); 9868 } 9869 } 9870 return (0); 9871 } 9872 9873 /* 9874 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9875 */ 9876 static void 9877 rack_log_sack_passed(struct tcpcb *tp, 9878 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9879 { 9880 struct rack_sendmap *nrsm; 9881 uint32_t thresh; 9882 9883 /* Get our rxt threshold for lost consideration */ 9884 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9885 /* Now start looking at rsm's */ 9886 nrsm = rsm; 9887 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9888 rack_head, r_tnext) { 9889 if (nrsm == rsm) { 9890 /* Skip original segment he is acked */ 9891 continue; 9892 } 9893 if (nrsm->r_flags & RACK_ACKED) { 9894 /* 9895 * Skip ack'd segments, though we 9896 * should not see these, since tmap 9897 * should not have ack'd segments. 9898 */ 9899 continue; 9900 } 9901 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9902 /* 9903 * If the peer dropped the rwnd on 9904 * these then we don't worry about them. 9905 */ 9906 continue; 9907 } 9908 /* Check lost state */ 9909 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9910 uint32_t exp; 9911 9912 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9913 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9914 /* We consider it lost */ 9915 nrsm->r_flags |= RACK_WAS_LOST; 9916 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9917 } 9918 } 9919 if (nrsm->r_flags & RACK_SACK_PASSED) { 9920 /* 9921 * We found one that is already marked 9922 * passed, we have been here before and 9923 * so all others below this are marked. 9924 */ 9925 break; 9926 } 9927 nrsm->r_flags |= RACK_SACK_PASSED; 9928 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9929 } 9930 } 9931 9932 static void 9933 rack_need_set_test(struct tcpcb *tp, 9934 struct tcp_rack *rack, 9935 struct rack_sendmap *rsm, 9936 tcp_seq th_ack, 9937 int line, 9938 int use_which) 9939 { 9940 struct rack_sendmap *s_rsm; 9941 9942 if ((tp->t_flags & TF_GPUTINPROG) && 9943 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9944 /* 9945 * We were app limited, and this ack 9946 * butts up or goes beyond the point where we want 9947 * to start our next measurement. We need 9948 * to record the new gput_ts as here and 9949 * possibly update the start sequence. 9950 */ 9951 uint32_t seq, ts; 9952 9953 if (rsm->r_rtr_cnt > 1) { 9954 /* 9955 * This is a retransmit, can we 9956 * really make any assessment at this 9957 * point? We are not really sure of 9958 * the timestamp, is it this or the 9959 * previous transmission? 9960 * 9961 * Lets wait for something better that 9962 * is not retransmitted. 9963 */ 9964 return; 9965 } 9966 seq = tp->gput_seq; 9967 ts = tp->gput_ts; 9968 rack->app_limited_needs_set = 0; 9969 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9970 /* Do we start at a new end? */ 9971 if ((use_which == RACK_USE_BEG) && 9972 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9973 /* 9974 * When we get an ACK that just eats 9975 * up some of the rsm, we set RACK_USE_BEG 9976 * since whats at r_start (i.e. th_ack) 9977 * is left unacked and thats where the 9978 * measurement now starts. 9979 */ 9980 tp->gput_seq = rsm->r_start; 9981 } 9982 if ((use_which == RACK_USE_END) && 9983 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9984 /* 9985 * We use the end when the cumack 9986 * is moving forward and completely 9987 * deleting the rsm passed so basically 9988 * r_end holds th_ack. 9989 * 9990 * For SACK's we also want to use the end 9991 * since this piece just got sacked and 9992 * we want to target anything after that 9993 * in our measurement. 9994 */ 9995 tp->gput_seq = rsm->r_end; 9996 } 9997 if (use_which == RACK_USE_END_OR_THACK) { 9998 /* 9999 * special case for ack moving forward, 10000 * not a sack, we need to move all the 10001 * way up to where this ack cum-ack moves 10002 * to. 10003 */ 10004 if (SEQ_GT(th_ack, rsm->r_end)) 10005 tp->gput_seq = th_ack; 10006 else 10007 tp->gput_seq = rsm->r_end; 10008 } 10009 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 10010 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 10011 else 10012 s_rsm = NULL; 10013 /* 10014 * Pick up the correct send time if we can the rsm passed in 10015 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 10016 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 10017 * find a different seq i.e. the next send up. 10018 * 10019 * If that has not been sent, s_rsm will be NULL and we must 10020 * arrange it so this function will get called again by setting 10021 * app_limited_needs_set. 10022 */ 10023 if (s_rsm) 10024 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 10025 else { 10026 /* If we hit here we have to have *not* sent tp->gput_seq */ 10027 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 10028 /* Set it up so we will go through here again */ 10029 rack->app_limited_needs_set = 1; 10030 } 10031 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 10032 /* 10033 * We moved beyond this guy's range, re-calculate 10034 * the new end point. 10035 */ 10036 if (rack->rc_gp_filled == 0) { 10037 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 10038 } else { 10039 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 10040 } 10041 } 10042 /* 10043 * We are moving the goal post, we may be able to clear the 10044 * measure_saw_probe_rtt flag. 10045 */ 10046 if ((rack->in_probe_rtt == 0) && 10047 (rack->measure_saw_probe_rtt) && 10048 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 10049 rack->measure_saw_probe_rtt = 0; 10050 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 10051 seq, tp->gput_seq, 10052 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 10053 (uint64_t)rack->r_ctl.rc_gp_output_ts), 10054 5, line, NULL, 0); 10055 if (rack->rc_gp_filled && 10056 ((tp->gput_ack - tp->gput_seq) < 10057 max(rc_init_window(rack), (MIN_GP_WIN * 10058 ctf_fixed_maxseg(tp))))) { 10059 uint32_t ideal_amount; 10060 10061 ideal_amount = rack_get_measure_window(tp, rack); 10062 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 10063 /* 10064 * There is no sense of continuing this measurement 10065 * because its too small to gain us anything we 10066 * trust. Skip it and that way we can start a new 10067 * measurement quicker. 10068 */ 10069 tp->t_flags &= ~TF_GPUTINPROG; 10070 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 10071 0, 0, 10072 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 10073 (uint64_t)rack->r_ctl.rc_gp_output_ts), 10074 6, __LINE__, NULL, 0); 10075 } else { 10076 /* 10077 * Reset the window further out. 10078 */ 10079 tp->gput_ack = tp->gput_seq + ideal_amount; 10080 } 10081 } 10082 rack_tend_gp_marks(tp, rack); 10083 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 10084 } 10085 } 10086 10087 static inline int 10088 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 10089 { 10090 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 10091 /* Behind our TLP definition or right at */ 10092 return (0); 10093 } 10094 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 10095 /* The start is beyond or right at our end of TLP definition */ 10096 return (0); 10097 } 10098 /* It has to be a sub-part of the original TLP recorded */ 10099 return (1); 10100 } 10101 10102 static uint32_t 10103 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 10104 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 10105 int *no_extra, 10106 int *moved_two, uint32_t segsiz) 10107 { 10108 uint32_t start, end, changed = 0; 10109 struct rack_sendmap stack_map; 10110 struct rack_sendmap *rsm, *nrsm, *prev, *next; 10111 int insret __diagused; 10112 int32_t used_ref = 1; 10113 int moved = 0; 10114 #ifdef TCP_SAD_DETECTION 10115 int allow_segsiz; 10116 int first_time_through = 1; 10117 #endif 10118 int noextra = 0; 10119 int can_use_hookery = 0; 10120 10121 start = sack->start; 10122 end = sack->end; 10123 rsm = *prsm; 10124 10125 #ifdef TCP_SAD_DETECTION 10126 /* 10127 * There are a strange number of proxys and meddle boxes in the world 10128 * that seem to cut up segments on different boundaries. This gets us 10129 * smaller sacks that are still ok in terms of it being an attacker. 10130 * We use the base segsiz to calculate an allowable smallness but 10131 * also enforce a min on the segsiz in case it is an attacker playing 10132 * games with MSS. So basically if the sack arrives and it is 10133 * larger than a worse case 960 bytes, we don't classify the guy 10134 * as supicious. 10135 */ 10136 allow_segsiz = max(segsiz, 1200) * sad_seg_size_per; 10137 allow_segsiz /= 1000; 10138 #endif 10139 do_rest_ofb: 10140 if ((rsm == NULL) || 10141 (SEQ_LT(end, rsm->r_start)) || 10142 (SEQ_GEQ(start, rsm->r_end)) || 10143 (SEQ_LT(start, rsm->r_start))) { 10144 /* 10145 * We are not in the right spot, 10146 * find the correct spot in the tree. 10147 */ 10148 used_ref = 0; 10149 rsm = tqhash_find(rack->r_ctl.tqh, start); 10150 moved++; 10151 } 10152 if (rsm == NULL) { 10153 /* TSNH */ 10154 goto out; 10155 } 10156 #ifdef TCP_SAD_DETECTION 10157 /* Now we must check for suspicous activity */ 10158 if ((first_time_through == 1) && 10159 ((end - start) < min((rsm->r_end - rsm->r_start), allow_segsiz)) && 10160 ((rsm->r_flags & RACK_PMTU_CHG) == 0) && 10161 ((rsm->r_flags & RACK_TLP) == 0)) { 10162 /* 10163 * Its less than a full MSS or the segment being acked 10164 * this should only happen if the rsm in question had the 10165 * r_just_ret flag set <and> the end matches the end of 10166 * the rsm block. 10167 * 10168 * Note we do not look at segments that have had TLP's on 10169 * them since we can get un-reported rwnd collapses that 10170 * basically we TLP on and then we get back a sack block 10171 * that goes from the start to only a small way. 10172 * 10173 */ 10174 int loss, ok; 10175 10176 ok = 0; 10177 if (SEQ_GEQ(end, rsm->r_end)) { 10178 if (rsm->r_just_ret == 1) { 10179 /* This was at the end of a send which is ok */ 10180 ok = 1; 10181 } else { 10182 /* A bit harder was it the end of our segment */ 10183 int segs, len; 10184 10185 len = (rsm->r_end - rsm->r_start); 10186 segs = len / segsiz; 10187 segs *= segsiz; 10188 if ((segs + (rsm->r_end - start)) == len) { 10189 /* 10190 * So this last bit was the 10191 * end of our send if we cut it 10192 * up into segsiz pieces so its ok. 10193 */ 10194 ok = 1; 10195 } 10196 } 10197 } 10198 if (ok == 0) { 10199 /* 10200 * This guy is doing something suspicious 10201 * lets start detection. 10202 */ 10203 if (rack->rc_suspicious == 0) { 10204 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_SUSPECT); 10205 counter_u64_add(rack_sack_attacks_suspect, 1); 10206 rack->rc_suspicious = 1; 10207 rack_log_sad(rack, 4); 10208 if (tcp_bblogging_on(rack->rc_tp)) { 10209 union tcp_log_stackspecific log; 10210 struct timeval tv; 10211 10212 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 10213 log.u_bbr.flex1 = end; 10214 log.u_bbr.flex2 = start; 10215 log.u_bbr.flex3 = rsm->r_end; 10216 log.u_bbr.flex4 = rsm->r_start; 10217 log.u_bbr.flex5 = segsiz; 10218 log.u_bbr.flex6 = rsm->r_fas; 10219 log.u_bbr.flex7 = rsm->r_bas; 10220 log.u_bbr.flex8 = 5; 10221 log.u_bbr.pkts_out = rsm->r_flags; 10222 log.u_bbr.bbr_state = rack->rc_suspicious; 10223 log.u_bbr.bbr_substate = rsm->r_just_ret; 10224 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 10225 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10226 TCP_LOG_EVENTP(rack->rc_tp, NULL, 10227 &rack->rc_inp->inp_socket->so_rcv, 10228 &rack->rc_inp->inp_socket->so_snd, 10229 TCP_SAD_DETECTION, 0, 10230 0, &log, false, &tv); 10231 } 10232 } 10233 /* You loose some ack count every time you sack 10234 * a small bit that is not butting to the end of 10235 * what we have sent. This is because we never 10236 * send small bits unless its the end of the sb. 10237 * Anyone sending a sack that is not at the end 10238 * is thus very very suspicious. 10239 */ 10240 loss = (segsiz/2) / (end - start); 10241 if (loss < rack->r_ctl.ack_count) 10242 rack->r_ctl.ack_count -= loss; 10243 else 10244 rack->r_ctl.ack_count = 0; 10245 } 10246 } 10247 first_time_through = 0; 10248 #endif 10249 /* Ok we have an ACK for some piece of this rsm */ 10250 if (rsm->r_start != start) { 10251 if ((rsm->r_flags & RACK_ACKED) == 0) { 10252 /* 10253 * Before any splitting or hookery is 10254 * done is it a TLP of interest i.e. rxt? 10255 */ 10256 if ((rsm->r_flags & RACK_TLP) && 10257 (rsm->r_rtr_cnt > 1)) { 10258 /* 10259 * We are splitting a rxt TLP, check 10260 * if we need to save off the start/end 10261 */ 10262 if (rack->rc_last_tlp_acked_set && 10263 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10264 /* 10265 * We already turned this on since we are inside 10266 * the previous one was a partially sack now we 10267 * are getting another one (maybe all of it). 10268 * 10269 */ 10270 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10271 /* 10272 * Lets make sure we have all of it though. 10273 */ 10274 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10275 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10276 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10277 rack->r_ctl.last_tlp_acked_end); 10278 } 10279 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10280 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10281 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10282 rack->r_ctl.last_tlp_acked_end); 10283 } 10284 } else { 10285 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10286 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10287 rack->rc_last_tlp_past_cumack = 0; 10288 rack->rc_last_tlp_acked_set = 1; 10289 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10290 } 10291 } 10292 /** 10293 * Need to split this in two pieces the before and after, 10294 * the before remains in the map, the after must be 10295 * added. In other words we have: 10296 * rsm |--------------| 10297 * sackblk |-------> 10298 * rsm will become 10299 * rsm |---| 10300 * and nrsm will be the sacked piece 10301 * nrsm |----------| 10302 * 10303 * But before we start down that path lets 10304 * see if the sack spans over on top of 10305 * the next guy and it is already sacked. 10306 * 10307 */ 10308 /* 10309 * Hookery can only be used if the two entries 10310 * are in the same bucket and neither one of 10311 * them staddle the bucket line. 10312 */ 10313 next = tqhash_next(rack->r_ctl.tqh, rsm); 10314 if (next && 10315 (rsm->bindex == next->bindex) && 10316 ((rsm->r_flags & RACK_STRADDLE) == 0) && 10317 ((next->r_flags & RACK_STRADDLE) == 0) && 10318 ((rsm->r_flags & RACK_IS_PCM) == 0) && 10319 ((next->r_flags & RACK_IS_PCM) == 0) && 10320 (rsm->r_flags & RACK_IN_GP_WIN) && 10321 (next->r_flags & RACK_IN_GP_WIN)) 10322 can_use_hookery = 1; 10323 else 10324 can_use_hookery = 0; 10325 if (next && can_use_hookery && 10326 (next->r_flags & RACK_ACKED) && 10327 SEQ_GEQ(end, next->r_start)) { 10328 /** 10329 * So the next one is already acked, and 10330 * we can thus by hookery use our stack_map 10331 * to reflect the piece being sacked and 10332 * then adjust the two tree entries moving 10333 * the start and ends around. So we start like: 10334 * rsm |------------| (not-acked) 10335 * next |-----------| (acked) 10336 * sackblk |--------> 10337 * We want to end like so: 10338 * rsm |------| (not-acked) 10339 * next |-----------------| (acked) 10340 * nrsm |-----| 10341 * Where nrsm is a temporary stack piece we 10342 * use to update all the gizmos. 10343 */ 10344 /* Copy up our fudge block */ 10345 noextra++; 10346 nrsm = &stack_map; 10347 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10348 /* Now adjust our tree blocks */ 10349 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 10350 next->r_start = start; 10351 rsm->r_flags |= RACK_SHUFFLED; 10352 next->r_flags |= RACK_SHUFFLED; 10353 /* Now we must adjust back where next->m is */ 10354 rack_setup_offset_for_rsm(rack, rsm, next); 10355 /* 10356 * Which timestamp do we keep? It is rather 10357 * important in GP measurements to have the 10358 * accurate end of the send window. 10359 * 10360 * We keep the largest value, which is the newest 10361 * send. We do this in case a segment that is 10362 * joined together and not part of a GP estimate 10363 * later gets expanded into the GP estimate. 10364 * 10365 * We prohibit the merging of unlike kinds i.e. 10366 * all pieces that are in the GP estimate can be 10367 * merged and all pieces that are not in a GP estimate 10368 * can be merged, but not disimilar pieces. Combine 10369 * this with taking the highest here and we should 10370 * be ok unless of course the client reneges. Then 10371 * all bets are off. 10372 */ 10373 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 10374 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 10375 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 10376 /* 10377 * And we must keep the newest ack arrival time. 10378 */ 10379 if (next->r_ack_arrival < 10380 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10381 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10382 10383 10384 /* We don't need to adjust rsm, it did not change */ 10385 /* Clear out the dup ack count of the remainder */ 10386 rsm->r_dupack = 0; 10387 rsm->r_just_ret = 0; 10388 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10389 /* Now lets make sure our fudge block is right */ 10390 nrsm->r_start = start; 10391 /* Now lets update all the stats and such */ 10392 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10393 if (rack->app_limited_needs_set) 10394 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10395 changed += (nrsm->r_end - nrsm->r_start); 10396 /* You get a count for acking a whole segment or more */ 10397 if ((nrsm->r_end - nrsm->r_start) >= segsiz) 10398 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz); 10399 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10400 if (rsm->r_flags & RACK_WAS_LOST) { 10401 int my_chg; 10402 10403 my_chg = (nrsm->r_end - nrsm->r_start); 10404 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10405 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10406 if (my_chg <= rack->r_ctl.rc_considered_lost) 10407 rack->r_ctl.rc_considered_lost -= my_chg; 10408 else 10409 rack->r_ctl.rc_considered_lost = 0; 10410 } 10411 if (nrsm->r_flags & RACK_SACK_PASSED) { 10412 rack->r_ctl.rc_reorder_ts = cts; 10413 if (rack->r_ctl.rc_reorder_ts == 0) 10414 rack->r_ctl.rc_reorder_ts = 1; 10415 } 10416 /* 10417 * Now we want to go up from rsm (the 10418 * one left un-acked) to the next one 10419 * in the tmap. We do this so when 10420 * we walk backwards we include marking 10421 * sack-passed on rsm (The one passed in 10422 * is skipped since it is generally called 10423 * on something sacked before removing it 10424 * from the tmap). 10425 */ 10426 if (rsm->r_in_tmap) { 10427 nrsm = TAILQ_NEXT(rsm, r_tnext); 10428 /* 10429 * Now that we have the next 10430 * one walk backwards from there. 10431 */ 10432 if (nrsm && nrsm->r_in_tmap) 10433 rack_log_sack_passed(tp, rack, nrsm, cts); 10434 } 10435 /* Now are we done? */ 10436 if (SEQ_LT(end, next->r_end) || 10437 (end == next->r_end)) { 10438 /* Done with block */ 10439 goto out; 10440 } 10441 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 10442 counter_u64_add(rack_sack_used_next_merge, 1); 10443 /* Postion for the next block */ 10444 start = next->r_end; 10445 rsm = tqhash_next(rack->r_ctl.tqh, next); 10446 if (rsm == NULL) 10447 goto out; 10448 } else { 10449 /** 10450 * We can't use any hookery here, so we 10451 * need to split the map. We enter like 10452 * so: 10453 * rsm |--------| 10454 * sackblk |-----> 10455 * We will add the new block nrsm and 10456 * that will be the new portion, and then 10457 * fall through after reseting rsm. So we 10458 * split and look like this: 10459 * rsm |----| 10460 * sackblk |-----> 10461 * nrsm |---| 10462 * We then fall through reseting 10463 * rsm to nrsm, so the next block 10464 * picks it up. 10465 */ 10466 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10467 if (nrsm == NULL) { 10468 /* 10469 * failed XXXrrs what can we do but loose the sack 10470 * info? 10471 */ 10472 goto out; 10473 } 10474 counter_u64_add(rack_sack_splits, 1); 10475 rack_clone_rsm(rack, nrsm, rsm, start); 10476 moved++; 10477 rsm->r_just_ret = 0; 10478 #ifndef INVARIANTS 10479 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10480 #else 10481 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10482 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 10483 nrsm, insret, rack, rsm); 10484 } 10485 #endif 10486 if (rsm->r_in_tmap) { 10487 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10488 nrsm->r_in_tmap = 1; 10489 } 10490 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 10491 rsm->r_flags &= (~RACK_HAS_FIN); 10492 /* Position us to point to the new nrsm that starts the sack blk */ 10493 rsm = nrsm; 10494 } 10495 } else { 10496 /* Already sacked this piece */ 10497 counter_u64_add(rack_sack_skipped_acked, 1); 10498 moved++; 10499 if (end == rsm->r_end) { 10500 /* Done with block */ 10501 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10502 goto out; 10503 } else if (SEQ_LT(end, rsm->r_end)) { 10504 /* A partial sack to a already sacked block */ 10505 moved++; 10506 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10507 goto out; 10508 } else { 10509 /* 10510 * The end goes beyond this guy 10511 * reposition the start to the 10512 * next block. 10513 */ 10514 start = rsm->r_end; 10515 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10516 if (rsm == NULL) 10517 goto out; 10518 } 10519 } 10520 } 10521 if (SEQ_GEQ(end, rsm->r_end)) { 10522 /** 10523 * The end of this block is either beyond this guy or right 10524 * at this guy. I.e.: 10525 * rsm --- |-----| 10526 * end |-----| 10527 * <or> 10528 * end |---------| 10529 */ 10530 if ((rsm->r_flags & RACK_ACKED) == 0) { 10531 /* 10532 * Is it a TLP of interest? 10533 */ 10534 if ((rsm->r_flags & RACK_TLP) && 10535 (rsm->r_rtr_cnt > 1)) { 10536 /* 10537 * We are splitting a rxt TLP, check 10538 * if we need to save off the start/end 10539 */ 10540 if (rack->rc_last_tlp_acked_set && 10541 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10542 /* 10543 * We already turned this on since we are inside 10544 * the previous one was a partially sack now we 10545 * are getting another one (maybe all of it). 10546 */ 10547 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10548 /* 10549 * Lets make sure we have all of it though. 10550 */ 10551 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10552 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10553 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10554 rack->r_ctl.last_tlp_acked_end); 10555 } 10556 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10557 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10558 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10559 rack->r_ctl.last_tlp_acked_end); 10560 } 10561 } else { 10562 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10563 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10564 rack->rc_last_tlp_past_cumack = 0; 10565 rack->rc_last_tlp_acked_set = 1; 10566 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10567 } 10568 } 10569 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10570 changed += (rsm->r_end - rsm->r_start); 10571 /* You get a count for acking a whole segment or more */ 10572 if ((rsm->r_end - rsm->r_start) >= segsiz) 10573 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz); 10574 if (rsm->r_flags & RACK_WAS_LOST) { 10575 int my_chg; 10576 10577 my_chg = (rsm->r_end - rsm->r_start); 10578 rsm->r_flags &= ~RACK_WAS_LOST; 10579 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10580 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10581 if (my_chg <= rack->r_ctl.rc_considered_lost) 10582 rack->r_ctl.rc_considered_lost -= my_chg; 10583 else 10584 rack->r_ctl.rc_considered_lost = 0; 10585 } 10586 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10587 if (rsm->r_in_tmap) /* should be true */ 10588 rack_log_sack_passed(tp, rack, rsm, cts); 10589 /* Is Reordering occuring? */ 10590 if (rsm->r_flags & RACK_SACK_PASSED) { 10591 rsm->r_flags &= ~RACK_SACK_PASSED; 10592 rack->r_ctl.rc_reorder_ts = cts; 10593 if (rack->r_ctl.rc_reorder_ts == 0) 10594 rack->r_ctl.rc_reorder_ts = 1; 10595 } 10596 if (rack->app_limited_needs_set) 10597 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10598 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10599 rsm->r_flags |= RACK_ACKED; 10600 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10601 if (rsm->r_in_tmap) { 10602 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10603 rsm->r_in_tmap = 0; 10604 } 10605 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 10606 } else { 10607 counter_u64_add(rack_sack_skipped_acked, 1); 10608 moved++; 10609 } 10610 if (end == rsm->r_end) { 10611 /* This block only - done, setup for next */ 10612 goto out; 10613 } 10614 /* 10615 * There is more not coverend by this rsm move on 10616 * to the next block in the tail queue hash table. 10617 */ 10618 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 10619 start = rsm->r_end; 10620 rsm = nrsm; 10621 if (rsm == NULL) 10622 goto out; 10623 goto do_rest_ofb; 10624 } 10625 /** 10626 * The end of this sack block is smaller than 10627 * our rsm i.e.: 10628 * rsm --- |-----| 10629 * end |--| 10630 */ 10631 if ((rsm->r_flags & RACK_ACKED) == 0) { 10632 /* 10633 * Is it a TLP of interest? 10634 */ 10635 if ((rsm->r_flags & RACK_TLP) && 10636 (rsm->r_rtr_cnt > 1)) { 10637 /* 10638 * We are splitting a rxt TLP, check 10639 * if we need to save off the start/end 10640 */ 10641 if (rack->rc_last_tlp_acked_set && 10642 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10643 /* 10644 * We already turned this on since we are inside 10645 * the previous one was a partially sack now we 10646 * are getting another one (maybe all of it). 10647 */ 10648 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10649 /* 10650 * Lets make sure we have all of it though. 10651 */ 10652 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10653 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10654 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10655 rack->r_ctl.last_tlp_acked_end); 10656 } 10657 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10658 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10659 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10660 rack->r_ctl.last_tlp_acked_end); 10661 } 10662 } else { 10663 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10664 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10665 rack->rc_last_tlp_past_cumack = 0; 10666 rack->rc_last_tlp_acked_set = 1; 10667 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10668 } 10669 } 10670 /* 10671 * Hookery can only be used if the two entries 10672 * are in the same bucket and neither one of 10673 * them staddle the bucket line. 10674 */ 10675 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10676 if (prev && 10677 (rsm->bindex == prev->bindex) && 10678 ((rsm->r_flags & RACK_STRADDLE) == 0) && 10679 ((prev->r_flags & RACK_STRADDLE) == 0) && 10680 ((rsm->r_flags & RACK_IS_PCM) == 0) && 10681 ((prev->r_flags & RACK_IS_PCM) == 0) && 10682 (rsm->r_flags & RACK_IN_GP_WIN) && 10683 (prev->r_flags & RACK_IN_GP_WIN)) 10684 can_use_hookery = 1; 10685 else 10686 can_use_hookery = 0; 10687 if (prev && can_use_hookery && 10688 (prev->r_flags & RACK_ACKED)) { 10689 /** 10690 * Goal, we want the right remainder of rsm to shrink 10691 * in place and span from (rsm->r_start = end) to rsm->r_end. 10692 * We want to expand prev to go all the way 10693 * to prev->r_end <- end. 10694 * so in the tree we have before: 10695 * prev |--------| (acked) 10696 * rsm |-------| (non-acked) 10697 * sackblk |-| 10698 * We churn it so we end up with 10699 * prev |----------| (acked) 10700 * rsm |-----| (non-acked) 10701 * nrsm |-| (temporary) 10702 * 10703 * Note if either prev/rsm is a TLP we don't 10704 * do this. 10705 */ 10706 noextra++; 10707 nrsm = &stack_map; 10708 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10709 tqhash_update_end(rack->r_ctl.tqh, prev, end); 10710 rsm->r_start = end; 10711 rsm->r_flags |= RACK_SHUFFLED; 10712 prev->r_flags |= RACK_SHUFFLED; 10713 /* Now adjust nrsm (stack copy) to be 10714 * the one that is the small 10715 * piece that was "sacked". 10716 */ 10717 nrsm->r_end = end; 10718 rsm->r_dupack = 0; 10719 /* 10720 * Which timestamp do we keep? It is rather 10721 * important in GP measurements to have the 10722 * accurate end of the send window. 10723 * 10724 * We keep the largest value, which is the newest 10725 * send. We do this in case a segment that is 10726 * joined together and not part of a GP estimate 10727 * later gets expanded into the GP estimate. 10728 * 10729 * We prohibit the merging of unlike kinds i.e. 10730 * all pieces that are in the GP estimate can be 10731 * merged and all pieces that are not in a GP estimate 10732 * can be merged, but not disimilar pieces. Combine 10733 * this with taking the highest here and we should 10734 * be ok unless of course the client reneges. Then 10735 * all bets are off. 10736 */ 10737 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 10738 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 10739 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 10740 } 10741 /* 10742 * And we must keep the newest ack arrival time. 10743 */ 10744 10745 if(prev->r_ack_arrival < 10746 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10747 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10748 10749 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10750 /* 10751 * Now that the rsm has had its start moved forward 10752 * lets go ahead and get its new place in the world. 10753 */ 10754 rack_setup_offset_for_rsm(rack, prev, rsm); 10755 /* 10756 * Now nrsm is our new little piece 10757 * that is acked (which was merged 10758 * to prev). Update the rtt and changed 10759 * based on that. Also check for reordering. 10760 */ 10761 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10762 if (rack->app_limited_needs_set) 10763 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10764 changed += (nrsm->r_end - nrsm->r_start); 10765 /* You get a count for acking a whole segment or more */ 10766 if ((nrsm->r_end - nrsm->r_start) >= segsiz) 10767 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz); 10768 10769 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10770 if (rsm->r_flags & RACK_WAS_LOST) { 10771 int my_chg; 10772 10773 my_chg = (nrsm->r_end - nrsm->r_start); 10774 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10775 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10776 if (my_chg <= rack->r_ctl.rc_considered_lost) 10777 rack->r_ctl.rc_considered_lost -= my_chg; 10778 else 10779 rack->r_ctl.rc_considered_lost = 0; 10780 } 10781 if (nrsm->r_flags & RACK_SACK_PASSED) { 10782 rack->r_ctl.rc_reorder_ts = cts; 10783 if (rack->r_ctl.rc_reorder_ts == 0) 10784 rack->r_ctl.rc_reorder_ts = 1; 10785 } 10786 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 10787 rsm = prev; 10788 counter_u64_add(rack_sack_used_prev_merge, 1); 10789 } else { 10790 /** 10791 * This is the case where our previous 10792 * block is not acked either, so we must 10793 * split the block in two. 10794 */ 10795 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10796 if (nrsm == NULL) { 10797 /* failed rrs what can we do but loose the sack info? */ 10798 goto out; 10799 } 10800 if ((rsm->r_flags & RACK_TLP) && 10801 (rsm->r_rtr_cnt > 1)) { 10802 /* 10803 * We are splitting a rxt TLP, check 10804 * if we need to save off the start/end 10805 */ 10806 if (rack->rc_last_tlp_acked_set && 10807 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10808 /* 10809 * We already turned this on since this block is inside 10810 * the previous one was a partially sack now we 10811 * are getting another one (maybe all of it). 10812 */ 10813 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10814 /* 10815 * Lets make sure we have all of it though. 10816 */ 10817 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10818 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10819 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10820 rack->r_ctl.last_tlp_acked_end); 10821 } 10822 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10823 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10824 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10825 rack->r_ctl.last_tlp_acked_end); 10826 } 10827 } else { 10828 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10829 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10830 rack->rc_last_tlp_acked_set = 1; 10831 rack->rc_last_tlp_past_cumack = 0; 10832 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10833 } 10834 } 10835 /** 10836 * In this case nrsm becomes 10837 * nrsm->r_start = end; 10838 * nrsm->r_end = rsm->r_end; 10839 * which is un-acked. 10840 * <and> 10841 * rsm->r_end = nrsm->r_start; 10842 * i.e. the remaining un-acked 10843 * piece is left on the left 10844 * hand side. 10845 * 10846 * So we start like this 10847 * rsm |----------| (not acked) 10848 * sackblk |---| 10849 * build it so we have 10850 * rsm |---| (acked) 10851 * nrsm |------| (not acked) 10852 */ 10853 counter_u64_add(rack_sack_splits, 1); 10854 rack_clone_rsm(rack, nrsm, rsm, end); 10855 moved++; 10856 rsm->r_flags &= (~RACK_HAS_FIN); 10857 rsm->r_just_ret = 0; 10858 #ifndef INVARIANTS 10859 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10860 #else 10861 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10862 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 10863 nrsm, insret, rack, rsm); 10864 } 10865 #endif 10866 if (rsm->r_in_tmap) { 10867 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10868 nrsm->r_in_tmap = 1; 10869 } 10870 nrsm->r_dupack = 0; 10871 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 10872 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10873 changed += (rsm->r_end - rsm->r_start); 10874 /* You get a count for acking a whole segment or more */ 10875 if ((rsm->r_end - rsm->r_start) >= segsiz) 10876 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz); 10877 if (rsm->r_flags & RACK_WAS_LOST) { 10878 int my_chg; 10879 10880 my_chg = (rsm->r_end - rsm->r_start); 10881 rsm->r_flags &= ~RACK_WAS_LOST; 10882 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 10883 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10884 if (my_chg <= rack->r_ctl.rc_considered_lost) 10885 rack->r_ctl.rc_considered_lost -= my_chg; 10886 else 10887 rack->r_ctl.rc_considered_lost = 0; 10888 } 10889 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10890 10891 if (rsm->r_in_tmap) /* should be true */ 10892 rack_log_sack_passed(tp, rack, rsm, cts); 10893 /* Is Reordering occuring? */ 10894 if (rsm->r_flags & RACK_SACK_PASSED) { 10895 rsm->r_flags &= ~RACK_SACK_PASSED; 10896 rack->r_ctl.rc_reorder_ts = cts; 10897 if (rack->r_ctl.rc_reorder_ts == 0) 10898 rack->r_ctl.rc_reorder_ts = 1; 10899 } 10900 if (rack->app_limited_needs_set) 10901 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10902 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10903 rsm->r_flags |= RACK_ACKED; 10904 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 10905 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 10906 if (rsm->r_in_tmap) { 10907 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10908 rsm->r_in_tmap = 0; 10909 } 10910 } 10911 } else if (start != end){ 10912 /* 10913 * The block was already acked. 10914 */ 10915 counter_u64_add(rack_sack_skipped_acked, 1); 10916 moved++; 10917 } 10918 out: 10919 if (rsm && 10920 ((rsm->r_flags & RACK_TLP) == 0) && 10921 (rsm->r_flags & RACK_ACKED)) { 10922 /* 10923 * Now can we merge where we worked 10924 * with either the previous or 10925 * next block? 10926 */ 10927 next = tqhash_next(rack->r_ctl.tqh, rsm); 10928 while (next) { 10929 if (next->r_flags & RACK_TLP) 10930 break; 10931 /* Only allow merges between ones in or out of GP window */ 10932 if ((next->r_flags & RACK_IN_GP_WIN) && 10933 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10934 break; 10935 } 10936 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10937 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10938 break; 10939 } 10940 if (rsm->bindex != next->bindex) 10941 break; 10942 if (rsm->r_flags & RACK_STRADDLE) 10943 break; 10944 if (rsm->r_flags & RACK_IS_PCM) 10945 break; 10946 if (next->r_flags & RACK_STRADDLE) 10947 break; 10948 if (next->r_flags & RACK_IS_PCM) 10949 break; 10950 if (next->r_flags & RACK_ACKED) { 10951 /* yep this and next can be merged */ 10952 rsm = rack_merge_rsm(rack, rsm, next); 10953 noextra++; 10954 next = tqhash_next(rack->r_ctl.tqh, rsm); 10955 } else 10956 break; 10957 } 10958 /* Now what about the previous? */ 10959 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10960 while (prev) { 10961 if (prev->r_flags & RACK_TLP) 10962 break; 10963 /* Only allow merges between ones in or out of GP window */ 10964 if ((prev->r_flags & RACK_IN_GP_WIN) && 10965 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10966 break; 10967 } 10968 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10969 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10970 break; 10971 } 10972 if (rsm->bindex != prev->bindex) 10973 break; 10974 if (rsm->r_flags & RACK_STRADDLE) 10975 break; 10976 if (rsm->r_flags & RACK_IS_PCM) 10977 break; 10978 if (prev->r_flags & RACK_STRADDLE) 10979 break; 10980 if (prev->r_flags & RACK_IS_PCM) 10981 break; 10982 if (prev->r_flags & RACK_ACKED) { 10983 /* yep the previous and this can be merged */ 10984 rsm = rack_merge_rsm(rack, prev, rsm); 10985 noextra++; 10986 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10987 } else 10988 break; 10989 } 10990 } 10991 if (used_ref == 0) { 10992 counter_u64_add(rack_sack_proc_all, 1); 10993 } else { 10994 counter_u64_add(rack_sack_proc_short, 1); 10995 } 10996 /* Save off the next one for quick reference. */ 10997 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10998 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10999 /* Pass back the moved. */ 11000 *moved_two = moved; 11001 *no_extra = noextra; 11002 if (IN_RECOVERY(tp->t_flags)) { 11003 rack->r_ctl.bytes_acked_in_recovery += changed; 11004 } 11005 return (changed); 11006 } 11007 11008 static void inline 11009 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 11010 { 11011 struct rack_sendmap *tmap; 11012 11013 tmap = NULL; 11014 while (rsm && (rsm->r_flags & RACK_ACKED)) { 11015 /* Its no longer sacked, mark it so */ 11016 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 11017 #ifdef INVARIANTS 11018 if (rsm->r_in_tmap) { 11019 panic("rack:%p rsm:%p flags:0x%x in tmap?", 11020 rack, rsm, rsm->r_flags); 11021 } 11022 #endif 11023 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 11024 /* Rebuild it into our tmap */ 11025 if (tmap == NULL) { 11026 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 11027 tmap = rsm; 11028 } else { 11029 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 11030 tmap = rsm; 11031 } 11032 tmap->r_in_tmap = 1; 11033 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 11034 } 11035 /* 11036 * Now lets possibly clear the sack filter so we start 11037 * recognizing sacks that cover this area. 11038 */ 11039 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 11040 11041 } 11042 11043 static void 11044 rack_do_decay(struct tcp_rack *rack) 11045 { 11046 struct timeval res; 11047 11048 #define timersub(tvp, uvp, vvp) \ 11049 do { \ 11050 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 11051 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 11052 if ((vvp)->tv_usec < 0) { \ 11053 (vvp)->tv_sec--; \ 11054 (vvp)->tv_usec += 1000000; \ 11055 } \ 11056 } while (0) 11057 11058 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 11059 #undef timersub 11060 11061 rack->r_ctl.input_pkt++; 11062 if ((rack->rc_in_persist) || 11063 (res.tv_sec >= 1) || 11064 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 11065 /* 11066 * Check for decay of non-SAD, 11067 * we want all SAD detection metrics to 11068 * decay 1/4 per second (or more) passed. 11069 * Current default is 800 so it decays 11070 * 80% every second. 11071 */ 11072 #ifdef TCP_SAD_DETECTION 11073 uint32_t pkt_delta; 11074 11075 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 11076 #endif 11077 /* Update our saved tracking values */ 11078 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 11079 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 11080 /* Now do we escape without decay? */ 11081 #ifdef TCP_SAD_DETECTION 11082 if (rack->rc_in_persist || 11083 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 11084 (pkt_delta < tcp_sad_low_pps)){ 11085 /* 11086 * We don't decay idle connections 11087 * or ones that have a low input pps. 11088 */ 11089 return; 11090 } 11091 /* Decay the counters */ 11092 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 11093 tcp_sad_decay_val); 11094 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 11095 tcp_sad_decay_val); 11096 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 11097 tcp_sad_decay_val); 11098 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 11099 tcp_sad_decay_val); 11100 #endif 11101 } 11102 } 11103 11104 static void inline 11105 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 11106 { 11107 /* 11108 * We look at advancing the end send time for our GP 11109 * measurement tracking only as the cumulative acknowledgment 11110 * moves forward. You might wonder about this, why not 11111 * at every transmission or retransmission within the 11112 * GP window update the rc_gp_cumack_ts? Well its rather 11113 * nuanced but basically the GP window *may* expand (as 11114 * it does below) or worse and harder to track it may shrink. 11115 * 11116 * This last makes it impossible to track at the time of 11117 * the send, since you may set forward your rc_gp_cumack_ts 11118 * when you send, because that send *is* in your currently 11119 * "guessed" window, but then it shrinks. Now which was 11120 * the send time of the last bytes in the window, by the 11121 * time you ask that question that part of the sendmap 11122 * is freed. So you don't know and you will have too 11123 * long of send window. Instead by updating the time 11124 * marker only when the cumack advances this assures us 11125 * that we will have only the sends in the window of our 11126 * GP measurement. 11127 * 11128 * Another complication from this is the 11129 * merging of sendmap entries. During SACK processing this 11130 * can happen to conserve the sendmap size. That breaks 11131 * everything down in tracking the send window of the GP 11132 * estimate. So to prevent that and keep it working with 11133 * a tiny bit more limited merging, we only allow like 11134 * types to be merged. I.e. if two sends are in the GP window 11135 * then its ok to merge them together. If two sends are not 11136 * in the GP window its ok to merge them together too. Though 11137 * one send in and one send out cannot be merged. We combine 11138 * this with never allowing the shrinking of the GP window when 11139 * we are in recovery so that we can properly calculate the 11140 * sending times. 11141 * 11142 * This all of course seems complicated, because it is.. :) 11143 * 11144 * The cum-ack is being advanced upon the sendmap. 11145 * If we are not doing a GP estimate don't 11146 * proceed. 11147 */ 11148 uint64_t ts; 11149 11150 if ((tp->t_flags & TF_GPUTINPROG) == 0) 11151 return; 11152 /* 11153 * If this sendmap entry is going 11154 * beyond the measurement window we had picked, 11155 * expand the measurement window by that much. 11156 */ 11157 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 11158 tp->gput_ack = rsm->r_end; 11159 } 11160 /* 11161 * If we have not setup a ack, then we 11162 * have no idea if the newly acked pieces 11163 * will be "in our seq measurement range". If 11164 * it is when we clear the app_limited_needs_set 11165 * flag the timestamp will be updated. 11166 */ 11167 if (rack->app_limited_needs_set) 11168 return; 11169 /* 11170 * Finally, we grab out the latest timestamp 11171 * that this packet was sent and then see 11172 * if: 11173 * a) The packet touches are newly defined GP range. 11174 * b) The time is greater than (newer) than the 11175 * one we currently have. If so we update 11176 * our sending end time window. 11177 * 11178 * Note we *do not* do this at send time. The reason 11179 * is that if you do you *may* pick up a newer timestamp 11180 * for a range you are not going to measure. We project 11181 * out how far and then sometimes modify that to be 11182 * smaller. If that occurs then you will have a send 11183 * that does not belong to the range included. 11184 */ 11185 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 11186 rack->r_ctl.rc_gp_cumack_ts) 11187 return; 11188 if (rack_in_gp_window(tp, rsm)) { 11189 rack->r_ctl.rc_gp_cumack_ts = ts; 11190 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 11191 __LINE__, from, rsm); 11192 } 11193 } 11194 11195 static void 11196 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 11197 { 11198 struct rack_sendmap *rsm; 11199 /* 11200 * The ACK point is advancing to th_ack, we must drop off 11201 * the packets in the rack log and calculate any eligble 11202 * RTT's. 11203 */ 11204 11205 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 11206 /* 11207 * If we have some sack blocks in the filter 11208 * lets prune them out by calling sfb with no blocks. 11209 */ 11210 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 11211 } 11212 if (SEQ_GT(th_ack, tp->snd_una)) { 11213 /* Clear any app ack remembered settings */ 11214 rack->r_ctl.cleared_app_ack = 0; 11215 } 11216 rack->r_wanted_output = 1; 11217 if (SEQ_GT(th_ack, tp->snd_una)) 11218 rack->r_ctl.last_cumack_advance = acktime; 11219 11220 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 11221 if ((rack->rc_last_tlp_acked_set == 1)&& 11222 (rack->rc_last_tlp_past_cumack == 1) && 11223 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 11224 /* 11225 * We have reached the point where our last rack 11226 * tlp retransmit sequence is ahead of the cum-ack. 11227 * This can only happen when the cum-ack moves all 11228 * the way around (its been a full 2^^31+1 bytes 11229 * or more since we sent a retransmitted TLP). Lets 11230 * turn off the valid flag since its not really valid. 11231 * 11232 * Note since sack's also turn on this event we have 11233 * a complication, we have to wait to age it out until 11234 * the cum-ack is by the TLP before checking which is 11235 * what the next else clause does. 11236 */ 11237 rack_log_dsack_event(rack, 9, __LINE__, 11238 rack->r_ctl.last_tlp_acked_start, 11239 rack->r_ctl.last_tlp_acked_end); 11240 rack->rc_last_tlp_acked_set = 0; 11241 rack->rc_last_tlp_past_cumack = 0; 11242 } else if ((rack->rc_last_tlp_acked_set == 1) && 11243 (rack->rc_last_tlp_past_cumack == 0) && 11244 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 11245 /* 11246 * It is safe to start aging TLP's out. 11247 */ 11248 rack->rc_last_tlp_past_cumack = 1; 11249 } 11250 /* We do the same for the tlp send seq as well */ 11251 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 11252 (rack->rc_last_sent_tlp_past_cumack == 1) && 11253 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 11254 rack_log_dsack_event(rack, 9, __LINE__, 11255 rack->r_ctl.last_sent_tlp_seq, 11256 (rack->r_ctl.last_sent_tlp_seq + 11257 rack->r_ctl.last_sent_tlp_len)); 11258 rack->rc_last_sent_tlp_seq_valid = 0; 11259 rack->rc_last_sent_tlp_past_cumack = 0; 11260 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 11261 (rack->rc_last_sent_tlp_past_cumack == 0) && 11262 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 11263 /* 11264 * It is safe to start aging TLP's send. 11265 */ 11266 rack->rc_last_sent_tlp_past_cumack = 1; 11267 } 11268 more: 11269 rsm = tqhash_min(rack->r_ctl.tqh); 11270 if (rsm == NULL) { 11271 if ((th_ack - 1) == tp->iss) { 11272 /* 11273 * For the SYN incoming case we will not 11274 * have called tcp_output for the sending of 11275 * the SYN, so there will be no map. All 11276 * other cases should probably be a panic. 11277 */ 11278 return; 11279 } 11280 if (tp->t_flags & TF_SENTFIN) { 11281 /* if we sent a FIN we often will not have map */ 11282 return; 11283 } 11284 #ifdef INVARIANTS 11285 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 11286 tp, 11287 tp->t_state, th_ack, rack, 11288 tp->snd_una, tp->snd_max); 11289 #endif 11290 return; 11291 } 11292 if (SEQ_LT(th_ack, rsm->r_start)) { 11293 /* Huh map is missing this */ 11294 #ifdef INVARIANTS 11295 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 11296 rsm->r_start, 11297 th_ack, tp->t_state, rack->r_state); 11298 #endif 11299 return; 11300 } 11301 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 11302 11303 /* Now was it a retransmitted TLP? */ 11304 if ((rsm->r_flags & RACK_TLP) && 11305 (rsm->r_rtr_cnt > 1)) { 11306 /* 11307 * Yes, this rsm was a TLP and retransmitted, remember that 11308 * since if a DSACK comes back on this we don't want 11309 * to think of it as a reordered segment. This may 11310 * get updated again with possibly even other TLPs 11311 * in flight, but thats ok. Only when we don't send 11312 * a retransmitted TLP for 1/2 the sequences space 11313 * will it get turned off (above). 11314 */ 11315 if (rack->rc_last_tlp_acked_set && 11316 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 11317 /* 11318 * We already turned this on since the end matches, 11319 * the previous one was a partially ack now we 11320 * are getting another one (maybe all of it). 11321 */ 11322 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 11323 /* 11324 * Lets make sure we have all of it though. 11325 */ 11326 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 11327 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 11328 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 11329 rack->r_ctl.last_tlp_acked_end); 11330 } 11331 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 11332 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 11333 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 11334 rack->r_ctl.last_tlp_acked_end); 11335 } 11336 } else { 11337 rack->rc_last_tlp_past_cumack = 1; 11338 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 11339 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 11340 rack->rc_last_tlp_acked_set = 1; 11341 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 11342 } 11343 } 11344 /* Now do we consume the whole thing? */ 11345 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 11346 if (SEQ_GEQ(th_ack, rsm->r_end)) { 11347 /* Its all consumed. */ 11348 uint32_t left; 11349 uint8_t newly_acked; 11350 11351 if (rsm->r_flags & RACK_WAS_LOST) { 11352 /* 11353 * This can happen when we marked it as lost 11354 * and yet before retransmitting we get an ack 11355 * which can happen due to reordering. 11356 */ 11357 rsm->r_flags &= ~RACK_WAS_LOST; 11358 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 11359 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 11360 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 11361 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 11362 else 11363 rack->r_ctl.rc_considered_lost = 0; 11364 } 11365 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 11366 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 11367 rsm->r_rtr_bytes = 0; 11368 /* 11369 * Record the time of highest cumack sent if its in our measurement 11370 * window and possibly bump out the end. 11371 */ 11372 rack_rsm_sender_update(rack, tp, rsm, 4); 11373 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 11374 if (rsm->r_in_tmap) { 11375 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 11376 rsm->r_in_tmap = 0; 11377 } 11378 newly_acked = 1; 11379 if (((rsm->r_flags & RACK_ACKED) == 0) && 11380 (IN_RECOVERY(tp->t_flags))) { 11381 rack->r_ctl.bytes_acked_in_recovery += (rsm->r_end - rsm->r_start); 11382 } 11383 if (rsm->r_flags & RACK_ACKED) { 11384 /* 11385 * It was acked on the scoreboard -- remove 11386 * it from total 11387 */ 11388 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 11389 newly_acked = 0; 11390 } else if (rsm->r_flags & RACK_SACK_PASSED) { 11391 /* 11392 * There are segments ACKED on the 11393 * scoreboard further up. We are seeing 11394 * reordering. 11395 */ 11396 rsm->r_flags &= ~RACK_SACK_PASSED; 11397 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 11398 rsm->r_flags |= RACK_ACKED; 11399 rack->r_ctl.rc_reorder_ts = cts; 11400 if (rack->r_ctl.rc_reorder_ts == 0) 11401 rack->r_ctl.rc_reorder_ts = 1; 11402 if (rack->r_ent_rec_ns) { 11403 /* 11404 * We have sent no more, and we saw an sack 11405 * then ack arrive. 11406 */ 11407 rack->r_might_revert = 1; 11408 } 11409 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 11410 } else { 11411 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 11412 } 11413 if ((rsm->r_flags & RACK_TO_REXT) && 11414 (tp->t_flags & TF_RCVD_TSTMP) && 11415 (to->to_flags & TOF_TS) && 11416 (to->to_tsecr != 0) && 11417 (tp->t_flags & TF_PREVVALID)) { 11418 /* 11419 * We can use the timestamp to see 11420 * if this retransmission was from the 11421 * first transmit. If so we made a mistake. 11422 */ 11423 tp->t_flags &= ~TF_PREVVALID; 11424 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 11425 /* The first transmit is what this ack is for */ 11426 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 11427 } 11428 } 11429 left = th_ack - rsm->r_end; 11430 if (rack->app_limited_needs_set && newly_acked) 11431 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 11432 /* Free back to zone */ 11433 rack_free(rack, rsm); 11434 if (left) { 11435 goto more; 11436 } 11437 /* Check for reneging */ 11438 rsm = tqhash_min(rack->r_ctl.tqh); 11439 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 11440 /* 11441 * The peer has moved snd_una up to 11442 * the edge of this send, i.e. one 11443 * that it had previously acked. The only 11444 * way that can be true if the peer threw 11445 * away data (space issues) that it had 11446 * previously sacked (else it would have 11447 * given us snd_una up to (rsm->r_end). 11448 * We need to undo the acked markings here. 11449 * 11450 * Note we have to look to make sure th_ack is 11451 * our rsm->r_start in case we get an old ack 11452 * where th_ack is behind snd_una. 11453 */ 11454 rack_peer_reneges(rack, rsm, th_ack); 11455 } 11456 return; 11457 } 11458 if (rsm->r_flags & RACK_ACKED) { 11459 /* 11460 * It was acked on the scoreboard -- remove it from 11461 * total for the part being cum-acked. 11462 */ 11463 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 11464 } else { 11465 if (((rsm->r_flags & RACK_ACKED) == 0) && 11466 (IN_RECOVERY(tp->t_flags))) { 11467 rack->r_ctl.bytes_acked_in_recovery += (th_ack - rsm->r_start); 11468 } 11469 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 11470 } 11471 /* And what about the lost flag? */ 11472 if (rsm->r_flags & RACK_WAS_LOST) { 11473 /* 11474 * This can happen when we marked it as lost 11475 * and yet before retransmitting we get an ack 11476 * which can happen due to reordering. In this 11477 * case its only a partial ack of the send. 11478 */ 11479 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), 11480 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); 11481 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) 11482 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; 11483 else 11484 rack->r_ctl.rc_considered_lost = 0; 11485 } 11486 /* 11487 * Clear the dup ack count for 11488 * the piece that remains. 11489 */ 11490 rsm->r_dupack = 0; 11491 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 11492 if (rsm->r_rtr_bytes) { 11493 /* 11494 * It was retransmitted adjust the 11495 * sack holes for what was acked. 11496 */ 11497 int ack_am; 11498 11499 ack_am = (th_ack - rsm->r_start); 11500 if (ack_am >= rsm->r_rtr_bytes) { 11501 rack->r_ctl.rc_holes_rxt -= ack_am; 11502 rsm->r_rtr_bytes -= ack_am; 11503 } 11504 } 11505 /* 11506 * Update where the piece starts and record 11507 * the time of send of highest cumack sent if 11508 * its in our GP range. 11509 */ 11510 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 11511 /* Now we need to move our offset forward too */ 11512 if (rsm->m && 11513 ((rsm->orig_m_len != rsm->m->m_len) || 11514 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 11515 /* Fix up the orig_m_len and possibly the mbuf offset */ 11516 rack_adjust_orig_mlen(rsm); 11517 } 11518 rsm->soff += (th_ack - rsm->r_start); 11519 rack_rsm_sender_update(rack, tp, rsm, 5); 11520 /* The trim will move th_ack into r_start for us */ 11521 tqhash_trim(rack->r_ctl.tqh, th_ack); 11522 /* Now do we need to move the mbuf fwd too? */ 11523 { 11524 struct mbuf *m; 11525 uint32_t soff; 11526 11527 m = rsm->m; 11528 soff = rsm->soff; 11529 if (m) { 11530 while (soff >= m->m_len) { 11531 soff -= m->m_len; 11532 KASSERT((m->m_next != NULL), 11533 (" rsm:%p off:%u soff:%u m:%p", 11534 rsm, rsm->soff, soff, m)); 11535 m = m->m_next; 11536 if (m == NULL) { 11537 /* 11538 * This is a fall-back that prevents a panic. In reality 11539 * we should be able to walk the mbuf's and find our place. 11540 * At this point snd_una has not been updated with the sbcut() yet 11541 * but tqhash_trim did update rsm->r_start so the offset calcuation 11542 * should work fine. This is undesirable since we will take cache 11543 * hits to access the socket buffer. And even more puzzling is that 11544 * it happens occasionally. It should not :( 11545 */ 11546 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 11547 (rsm->r_start - tp->snd_una), 11548 &soff); 11549 break; 11550 } 11551 } 11552 /* 11553 * Now save in our updated values. 11554 */ 11555 rsm->m = m; 11556 rsm->soff = soff; 11557 rsm->orig_m_len = rsm->m->m_len; 11558 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11559 } 11560 } 11561 if (rack->app_limited_needs_set && 11562 SEQ_GEQ(th_ack, tp->gput_seq)) 11563 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 11564 } 11565 11566 static void 11567 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 11568 { 11569 struct rack_sendmap *rsm; 11570 int sack_pass_fnd = 0; 11571 11572 if (rack->r_might_revert) { 11573 /* 11574 * Ok we have reordering, have not sent anything, we 11575 * might want to revert the congestion state if nothing 11576 * further has SACK_PASSED on it. Lets check. 11577 * 11578 * We also get here when we have DSACKs come in for 11579 * all the data that we FR'd. Note that a rxt or tlp 11580 * timer clears this from happening. 11581 */ 11582 11583 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 11584 if (rsm->r_flags & RACK_SACK_PASSED) { 11585 sack_pass_fnd = 1; 11586 break; 11587 } 11588 } 11589 if (sack_pass_fnd == 0) { 11590 /* 11591 * We went into recovery 11592 * incorrectly due to reordering! 11593 */ 11594 int orig_cwnd; 11595 11596 rack->r_ent_rec_ns = 0; 11597 orig_cwnd = tp->snd_cwnd; 11598 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 11599 tp->snd_recover = tp->snd_una; 11600 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 11601 if (IN_RECOVERY(tp->t_flags)) { 11602 rack_exit_recovery(tp, rack, 3); 11603 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 11604 /* 11605 * We were in recovery, had an RTO 11606 * and then re-entered recovery (more sack's arrived) 11607 * and we have properly recorded the old ssthresh from 11608 * the first recovery. We want to be able to slow-start 11609 * back to this level. The ssthresh from the timeout 11610 * and then back into recovery will end up most likely 11611 * to be min(cwnd=1mss, 2mss). Which makes it basically 11612 * so we get no slow-start after our RTO. 11613 */ 11614 rack->rto_from_rec = 0; 11615 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 11616 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 11617 } 11618 } 11619 rack->r_ctl.bytes_acked_in_recovery = 0; 11620 rack->r_ctl.time_entered_recovery = 0; 11621 } 11622 rack->r_might_revert = 0; 11623 } 11624 } 11625 11626 #ifdef TCP_SAD_DETECTION 11627 11628 static void 11629 rack_merge_out_sacks(struct tcp_rack *rack) 11630 { 11631 struct rack_sendmap *cur, *next, *rsm, *trsm = NULL; 11632 11633 cur = tqhash_min(rack->r_ctl.tqh); 11634 while(cur) { 11635 next = tqhash_next(rack->r_ctl.tqh, cur); 11636 /* 11637 * The idea is to go through all and merge back 11638 * together the pieces sent together, 11639 */ 11640 if ((next != NULL) && 11641 (cur->r_tim_lastsent[0] == next->r_tim_lastsent[0])) { 11642 rack_merge_rsm(rack, cur, next); 11643 } else { 11644 cur = next; 11645 } 11646 } 11647 /* 11648 * now treat it like a rxt event, everything is outstanding 11649 * and sent nothing acvked and dupacks are all zero. If this 11650 * is not an attacker it will have to dupack its way through 11651 * it all. 11652 */ 11653 TAILQ_INIT(&rack->r_ctl.rc_tmap); 11654 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 11655 rsm->r_dupack = 0; 11656 /* We must re-add it back to the tlist */ 11657 if (trsm == NULL) { 11658 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 11659 } else { 11660 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 11661 } 11662 rsm->r_in_tmap = 1; 11663 trsm = rsm; 11664 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 11665 } 11666 sack_filter_clear(&rack->r_ctl.rack_sf, rack->rc_tp->snd_una); 11667 } 11668 11669 static void 11670 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 11671 { 11672 int do_detection = 0; 11673 11674 if (rack->sack_attack_disable || rack->rc_suspicious) { 11675 /* 11676 * If we have been disabled we must detect 11677 * to possibly reverse it. Or if the guy has 11678 * sent in suspicious sacks we want to do detection too. 11679 */ 11680 do_detection = 1; 11681 11682 } else if ((rack->do_detection || tcp_force_detection) && 11683 (tcp_sack_to_ack_thresh > 0) && 11684 (tcp_sack_to_move_thresh > 0) && 11685 (rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum)) { 11686 /* 11687 * We only detect here if: 11688 * 1) System wide forcing is on <or> do_detection is on 11689 * <and> 11690 * 2) We have thresholds for move and ack (set one to 0 and we are off) 11691 * <and> 11692 * 3) We have maps allocated larger than our min (500). 11693 */ 11694 do_detection = 1; 11695 } 11696 if (do_detection > 0) { 11697 /* 11698 * We have thresholds set to find 11699 * possible attackers and disable sack. 11700 * Check them. 11701 */ 11702 uint64_t ackratio, moveratio, movetotal; 11703 11704 /* Log detecting */ 11705 rack_log_sad(rack, 1); 11706 /* Do we establish a ack ratio */ 11707 if ((rack->r_ctl.sack_count > tcp_map_minimum) || 11708 (rack->rc_suspicious == 1) || 11709 (rack->sack_attack_disable > 0)) { 11710 ackratio = (uint64_t)(rack->r_ctl.sack_count); 11711 ackratio *= (uint64_t)(1000); 11712 if (rack->r_ctl.ack_count) 11713 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 11714 else { 11715 /* We can hit this due to ack totals degregation (via small sacks) */ 11716 ackratio = 1000; 11717 } 11718 } else { 11719 /* 11720 * No ack ratio needed if we have not 11721 * seen more sacks then the number of map entries. 11722 * The exception to that is if we have disabled sack then 11723 * we need to find a ratio. 11724 */ 11725 ackratio = 0; 11726 } 11727 11728 if ((rack->sack_attack_disable == 0) && 11729 (ackratio > rack_highest_sack_thresh_seen)) 11730 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 11731 /* Do we establish a move ratio? */ 11732 if ((rack->r_ctl.sack_moved_extra > tcp_map_minimum) || 11733 (rack->rc_suspicious == 1) || 11734 (rack->sack_attack_disable > 0)) { 11735 /* 11736 * We need to have more sack moves than maps 11737 * allocated to have a move ratio considered. 11738 */ 11739 movetotal = rack->r_ctl.sack_moved_extra; 11740 movetotal += rack->r_ctl.sack_noextra_move; 11741 moveratio = rack->r_ctl.sack_moved_extra; 11742 moveratio *= (uint64_t)1000; 11743 if (movetotal) 11744 moveratio /= movetotal; 11745 else { 11746 /* No moves, thats pretty good */ 11747 moveratio = 0; 11748 } 11749 } else { 11750 /* 11751 * Not enough moves have occured to consider 11752 * if we are out of whack in that ratio. 11753 * The exception to that is if we have disabled sack then 11754 * we need to find a ratio. 11755 */ 11756 moveratio = 0; 11757 } 11758 if ((rack->sack_attack_disable == 0) && 11759 (moveratio > rack_highest_move_thresh_seen)) 11760 rack_highest_move_thresh_seen = (uint32_t)moveratio; 11761 /* Now the tests */ 11762 if (rack->sack_attack_disable == 0) { 11763 /* Not disabled, do we need to disable? */ 11764 if ((ackratio > tcp_sack_to_ack_thresh) && 11765 (moveratio > tcp_sack_to_move_thresh)) { 11766 /* Disable sack processing */ 11767 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED); 11768 rack->sack_attack_disable = 1; 11769 /* set it so we have the built in delay */ 11770 rack->r_ctl.ack_during_sd = 1; 11771 if (rack_merge_out_sacks_on_attack) 11772 rack_merge_out_sacks(rack); 11773 counter_u64_add(rack_sack_attacks_detected, 1); 11774 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED); 11775 /* Clamp the cwnd at flight size */ 11776 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 11777 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11778 rack_log_sad(rack, 2); 11779 } 11780 } else { 11781 /* We are sack-disabled check for false positives */ 11782 if ((ackratio <= tcp_restoral_thresh) || 11783 ((rack_merge_out_sacks_on_attack == 0) && 11784 (rack->rc_suspicious == 0) && 11785 (rack->r_ctl.rc_num_maps_alloced <= (tcp_map_minimum/2)))) { 11786 rack->sack_attack_disable = 0; 11787 rack_log_sad(rack, 3); 11788 /* Restart counting */ 11789 rack->r_ctl.sack_count = 0; 11790 rack->r_ctl.sack_moved_extra = 0; 11791 rack->r_ctl.sack_noextra_move = 1; 11792 rack->rc_suspicious = 0; 11793 rack->r_ctl.ack_count = max(1, 11794 (bytes_this_ack / segsiz)); 11795 11796 counter_u64_add(rack_sack_attacks_reversed, 1); 11797 /* Restore the cwnd */ 11798 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 11799 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 11800 } 11801 } 11802 } 11803 } 11804 #endif 11805 11806 static int 11807 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 11808 { 11809 11810 uint32_t am, l_end; 11811 int was_tlp = 0; 11812 11813 if (SEQ_GT(end, start)) 11814 am = end - start; 11815 else 11816 am = 0; 11817 if ((rack->rc_last_tlp_acked_set ) && 11818 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 11819 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 11820 /* 11821 * The DSACK is because of a TLP which we don't 11822 * do anything with the reordering window over since 11823 * it was not reordering that caused the DSACK but 11824 * our previous retransmit TLP. 11825 */ 11826 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11827 was_tlp = 1; 11828 goto skip_dsack_round; 11829 } 11830 if (rack->rc_last_sent_tlp_seq_valid) { 11831 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 11832 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 11833 (SEQ_LEQ(end, l_end))) { 11834 /* 11835 * This dsack is from the last sent TLP, ignore it 11836 * for reordering purposes. 11837 */ 11838 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11839 was_tlp = 1; 11840 goto skip_dsack_round; 11841 } 11842 } 11843 if (rack->rc_dsack_round_seen == 0) { 11844 rack->rc_dsack_round_seen = 1; 11845 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 11846 rack->r_ctl.num_dsack++; 11847 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 11848 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 11849 } 11850 skip_dsack_round: 11851 /* 11852 * We keep track of how many DSACK blocks we get 11853 * after a recovery incident. 11854 */ 11855 rack->r_ctl.dsack_byte_cnt += am; 11856 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 11857 rack->r_ctl.retran_during_recovery && 11858 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 11859 /* 11860 * False recovery most likely culprit is reordering. If 11861 * nothing else is missing we need to revert. 11862 */ 11863 rack->r_might_revert = 1; 11864 rack_handle_might_revert(rack->rc_tp, rack); 11865 rack->r_might_revert = 0; 11866 rack->r_ctl.retran_during_recovery = 0; 11867 rack->r_ctl.dsack_byte_cnt = 0; 11868 } 11869 return (was_tlp); 11870 } 11871 11872 static uint32_t 11873 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 11874 { 11875 return (((tp->snd_max - snd_una) - 11876 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 11877 } 11878 11879 static int32_t 11880 rack_compute_pipe(struct tcpcb *tp) 11881 { 11882 return ((int32_t)do_rack_compute_pipe(tp, 11883 (struct tcp_rack *)tp->t_fb_ptr, 11884 tp->snd_una)); 11885 } 11886 11887 static void 11888 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 11889 { 11890 /* Deal with changed and PRR here (in recovery only) */ 11891 uint32_t pipe, snd_una; 11892 11893 rack->r_ctl.rc_prr_delivered += changed; 11894 11895 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 11896 /* 11897 * It is all outstanding, we are application limited 11898 * and thus we don't need more room to send anything. 11899 * Note we use tp->snd_una here and not th_ack because 11900 * the data as yet not been cut from the sb. 11901 */ 11902 rack->r_ctl.rc_prr_sndcnt = 0; 11903 return; 11904 } 11905 /* Compute prr_sndcnt */ 11906 if (SEQ_GT(tp->snd_una, th_ack)) { 11907 snd_una = tp->snd_una; 11908 } else { 11909 snd_una = th_ack; 11910 } 11911 pipe = do_rack_compute_pipe(tp, rack, snd_una); 11912 if (pipe > tp->snd_ssthresh) { 11913 long sndcnt; 11914 11915 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 11916 if (rack->r_ctl.rc_prr_recovery_fs > 0) 11917 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 11918 else { 11919 rack->r_ctl.rc_prr_sndcnt = 0; 11920 rack_log_to_prr(rack, 9, 0, __LINE__); 11921 sndcnt = 0; 11922 } 11923 sndcnt++; 11924 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 11925 sndcnt -= rack->r_ctl.rc_prr_out; 11926 else 11927 sndcnt = 0; 11928 rack->r_ctl.rc_prr_sndcnt = sndcnt; 11929 rack_log_to_prr(rack, 10, 0, __LINE__); 11930 } else { 11931 uint32_t limit; 11932 11933 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 11934 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 11935 else 11936 limit = 0; 11937 if (changed > limit) 11938 limit = changed; 11939 limit += ctf_fixed_maxseg(tp); 11940 if (tp->snd_ssthresh > pipe) { 11941 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 11942 rack_log_to_prr(rack, 11, 0, __LINE__); 11943 } else { 11944 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 11945 rack_log_to_prr(rack, 12, 0, __LINE__); 11946 } 11947 } 11948 } 11949 11950 static void 11951 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 11952 int *dsack_seen, int *sacks_seen) 11953 { 11954 uint32_t changed; 11955 struct tcp_rack *rack; 11956 struct rack_sendmap *rsm; 11957 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 11958 register uint32_t th_ack; 11959 int32_t i, j, k, num_sack_blks = 0; 11960 uint32_t cts, acked, ack_point; 11961 int loop_start = 0, moved_two = 0, no_extra = 0; 11962 uint32_t tsused; 11963 uint32_t segsiz, o_cnt; 11964 11965 11966 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11967 if (tcp_get_flags(th) & TH_RST) { 11968 /* We don't log resets */ 11969 return; 11970 } 11971 rack = (struct tcp_rack *)tp->t_fb_ptr; 11972 cts = tcp_get_usecs(NULL); 11973 rsm = tqhash_min(rack->r_ctl.tqh); 11974 changed = 0; 11975 th_ack = th->th_ack; 11976 if (rack->sack_attack_disable == 0) 11977 rack_do_decay(rack); 11978 segsiz = ctf_fixed_maxseg(rack->rc_tp); 11979 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 11980 /* 11981 * You only get credit for 11982 * MSS and greater (and you get extra 11983 * credit for larger cum-ack moves). 11984 */ 11985 int ac; 11986 11987 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 11988 rack->r_ctl.ack_count += ac; 11989 counter_u64_add(rack_ack_total, ac); 11990 } 11991 if (rack->r_ctl.ack_count > 0xfff00000) { 11992 /* 11993 * reduce the number to keep us under 11994 * a uint32_t. 11995 */ 11996 rack->r_ctl.ack_count /= 2; 11997 rack->r_ctl.sack_count /= 2; 11998 } 11999 if (SEQ_GT(th_ack, tp->snd_una)) { 12000 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 12001 tp->t_acktime = ticks; 12002 } 12003 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 12004 changed = th_ack - rsm->r_start; 12005 if (changed) { 12006 rack_process_to_cumack(tp, rack, th_ack, cts, to, 12007 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 12008 } 12009 if ((to->to_flags & TOF_SACK) == 0) { 12010 /* We are done nothing left and no sack. */ 12011 rack_handle_might_revert(tp, rack); 12012 /* 12013 * For cases where we struck a dup-ack 12014 * with no SACK, add to the changes so 12015 * PRR will work right. 12016 */ 12017 if (dup_ack_struck && (changed == 0)) { 12018 changed += ctf_fixed_maxseg(rack->rc_tp); 12019 } 12020 goto out; 12021 } 12022 /* Sack block processing */ 12023 if (SEQ_GT(th_ack, tp->snd_una)) 12024 ack_point = th_ack; 12025 else 12026 ack_point = tp->snd_una; 12027 for (i = 0; i < to->to_nsacks; i++) { 12028 bcopy((to->to_sacks + i * TCPOLEN_SACK), 12029 &sack, sizeof(sack)); 12030 sack.start = ntohl(sack.start); 12031 sack.end = ntohl(sack.end); 12032 if (SEQ_GT(sack.end, sack.start) && 12033 SEQ_GT(sack.start, ack_point) && 12034 SEQ_LT(sack.start, tp->snd_max) && 12035 SEQ_GT(sack.end, ack_point) && 12036 SEQ_LEQ(sack.end, tp->snd_max)) { 12037 sack_blocks[num_sack_blks] = sack; 12038 num_sack_blks++; 12039 } else if (SEQ_LEQ(sack.start, th_ack) && 12040 SEQ_LEQ(sack.end, th_ack)) { 12041 int was_tlp; 12042 12043 if (dsack_seen != NULL) 12044 *dsack_seen = 1; 12045 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 12046 /* 12047 * Its a D-SACK block. 12048 */ 12049 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 12050 } 12051 } 12052 if (rack->rc_dsack_round_seen) { 12053 /* Is the dsack roound over? */ 12054 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 12055 /* Yes it is */ 12056 rack->rc_dsack_round_seen = 0; 12057 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 12058 } 12059 } 12060 /* 12061 * Sort the SACK blocks so we can update the rack scoreboard with 12062 * just one pass. 12063 */ 12064 o_cnt = num_sack_blks; 12065 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 12066 num_sack_blks, th->th_ack); 12067 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 12068 if (sacks_seen != NULL) 12069 *sacks_seen = num_sack_blks; 12070 if (num_sack_blks == 0) { 12071 /* Nothing to sack, but we need to update counts */ 12072 if ((o_cnt == 1) && 12073 (*dsack_seen != 1)) 12074 rack->r_ctl.sack_count++; 12075 else if (o_cnt > 1) 12076 rack->r_ctl.sack_count++; 12077 goto out_with_totals; 12078 } 12079 if (rack->sack_attack_disable) { 12080 /* 12081 * An attacker disablement is in place, for 12082 * every sack block that is not at least a full MSS 12083 * count up sack_count. 12084 */ 12085 for (i = 0; i < num_sack_blks; i++) { 12086 if ((sack_blocks[i].end - sack_blocks[i].start) < segsiz) { 12087 rack->r_ctl.sack_count++; 12088 } 12089 if (rack->r_ctl.sack_count > 0xfff00000) { 12090 /* 12091 * reduce the number to keep us under 12092 * a uint32_t. 12093 */ 12094 rack->r_ctl.ack_count /= 2; 12095 rack->r_ctl.sack_count /= 2; 12096 } 12097 } 12098 goto out; 12099 } 12100 /* Its a sack of some sort */ 12101 rack->r_ctl.sack_count += num_sack_blks; 12102 if (rack->r_ctl.sack_count > 0xfff00000) { 12103 /* 12104 * reduce the number to keep us under 12105 * a uint32_t. 12106 */ 12107 rack->r_ctl.ack_count /= 2; 12108 rack->r_ctl.sack_count /= 2; 12109 } 12110 if (num_sack_blks < 2) { 12111 /* Only one, we don't need to sort */ 12112 goto do_sack_work; 12113 } 12114 /* Sort the sacks */ 12115 for (i = 0; i < num_sack_blks; i++) { 12116 for (j = i + 1; j < num_sack_blks; j++) { 12117 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 12118 sack = sack_blocks[i]; 12119 sack_blocks[i] = sack_blocks[j]; 12120 sack_blocks[j] = sack; 12121 } 12122 } 12123 } 12124 /* 12125 * Now are any of the sack block ends the same (yes some 12126 * implementations send these)? 12127 */ 12128 again: 12129 if (num_sack_blks == 0) 12130 goto out_with_totals; 12131 if (num_sack_blks > 1) { 12132 for (i = 0; i < num_sack_blks; i++) { 12133 for (j = i + 1; j < num_sack_blks; j++) { 12134 if (sack_blocks[i].end == sack_blocks[j].end) { 12135 /* 12136 * Ok these two have the same end we 12137 * want the smallest end and then 12138 * throw away the larger and start 12139 * again. 12140 */ 12141 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 12142 /* 12143 * The second block covers 12144 * more area use that 12145 */ 12146 sack_blocks[i].start = sack_blocks[j].start; 12147 } 12148 /* 12149 * Now collapse out the dup-sack and 12150 * lower the count 12151 */ 12152 for (k = (j + 1); k < num_sack_blks; k++) { 12153 sack_blocks[j].start = sack_blocks[k].start; 12154 sack_blocks[j].end = sack_blocks[k].end; 12155 j++; 12156 } 12157 num_sack_blks--; 12158 goto again; 12159 } 12160 } 12161 } 12162 } 12163 do_sack_work: 12164 /* 12165 * First lets look to see if 12166 * we have retransmitted and 12167 * can use the transmit next? 12168 */ 12169 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12170 if (rsm && 12171 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 12172 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 12173 /* 12174 * We probably did the FR and the next 12175 * SACK in continues as we would expect. 12176 */ 12177 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &no_extra, &moved_two, segsiz); 12178 if (acked) { 12179 rack->r_wanted_output = 1; 12180 changed += acked; 12181 } 12182 if (num_sack_blks == 1) { 12183 /* 12184 * This is what we would expect from 12185 * a normal implementation to happen 12186 * after we have retransmitted the FR, 12187 * i.e the sack-filter pushes down 12188 * to 1 block and the next to be retransmitted 12189 * is the sequence in the sack block (has more 12190 * are acked). Count this as ACK'd data to boost 12191 * up the chances of recovering any false positives. 12192 */ 12193 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 12194 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 12195 counter_u64_add(rack_express_sack, 1); 12196 if (rack->r_ctl.ack_count > 0xfff00000) { 12197 /* 12198 * reduce the number to keep us under 12199 * a uint32_t. 12200 */ 12201 rack->r_ctl.ack_count /= 2; 12202 rack->r_ctl.sack_count /= 2; 12203 } 12204 if (moved_two) { 12205 /* 12206 * If we did not get a SACK for at least a MSS and 12207 * had to move at all, or if we moved more than our 12208 * threshold, it counts against the "extra" move. 12209 */ 12210 rack->r_ctl.sack_moved_extra += moved_two; 12211 rack->r_ctl.sack_noextra_move += no_extra; 12212 counter_u64_add(rack_move_some, 1); 12213 } else { 12214 /* 12215 * else we did not have to move 12216 * any more than we would expect. 12217 */ 12218 rack->r_ctl.sack_noextra_move += no_extra; 12219 rack->r_ctl.sack_noextra_move++; 12220 counter_u64_add(rack_move_none, 1); 12221 } 12222 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 12223 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 12224 rack->r_ctl.sack_moved_extra /= 2; 12225 rack->r_ctl.sack_noextra_move /= 2; 12226 } 12227 goto out_with_totals; 12228 } else { 12229 /* 12230 * Start the loop through the 12231 * rest of blocks, past the first block. 12232 */ 12233 loop_start = 1; 12234 } 12235 } 12236 counter_u64_add(rack_sack_total, 1); 12237 rsm = rack->r_ctl.rc_sacklast; 12238 for (i = loop_start; i < num_sack_blks; i++) { 12239 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &no_extra, &moved_two, segsiz); 12240 if (acked) { 12241 rack->r_wanted_output = 1; 12242 changed += acked; 12243 } 12244 if (moved_two) { 12245 /* 12246 * If we did not get a SACK for at least a MSS and 12247 * had to move at all, or if we moved more than our 12248 * threshold, it counts against the "extra" move. 12249 */ 12250 rack->r_ctl.sack_moved_extra += moved_two; 12251 rack->r_ctl.sack_noextra_move += no_extra; 12252 counter_u64_add(rack_move_some, 1); 12253 } else { 12254 /* 12255 * else we did not have to move 12256 * any more than we would expect. 12257 */ 12258 rack->r_ctl.sack_noextra_move += no_extra; 12259 rack->r_ctl.sack_noextra_move++; 12260 counter_u64_add(rack_move_none, 1); 12261 } 12262 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 12263 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 12264 rack->r_ctl.sack_moved_extra /= 2; 12265 rack->r_ctl.sack_noextra_move /= 2; 12266 } 12267 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 12268 /* 12269 * If the SACK was not a full MSS then 12270 * we add to sack_count the number of 12271 * MSS's (or possibly more than 12272 * a MSS if its a TSO send) we had to skip by. 12273 */ 12274 rack->r_ctl.sack_count += moved_two; 12275 if (rack->r_ctl.sack_count > 0xfff00000) { 12276 rack->r_ctl.ack_count /= 2; 12277 rack->r_ctl.sack_count /= 2; 12278 } 12279 counter_u64_add(rack_sack_total, moved_two); 12280 } 12281 /* 12282 * Now we need to setup for the next 12283 * round. First we make sure we won't 12284 * exceed the size of our uint32_t on 12285 * the various counts, and then clear out 12286 * moved_two. 12287 */ 12288 moved_two = 0; 12289 no_extra = 0; 12290 } 12291 out_with_totals: 12292 if (num_sack_blks > 1) { 12293 /* 12294 * You get an extra stroke if 12295 * you have more than one sack-blk, this 12296 * could be where we are skipping forward 12297 * and the sack-filter is still working, or 12298 * it could be an attacker constantly 12299 * moving us. 12300 */ 12301 rack->r_ctl.sack_moved_extra++; 12302 counter_u64_add(rack_move_some, 1); 12303 } 12304 out: 12305 #ifdef TCP_SAD_DETECTION 12306 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 12307 #endif 12308 if (changed) { 12309 /* Something changed cancel the rack timer */ 12310 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12311 } 12312 tsused = tcp_get_usecs(NULL); 12313 rsm = tcp_rack_output(tp, rack, tsused); 12314 if ((!IN_FASTRECOVERY(tp->t_flags)) && 12315 rsm && 12316 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 12317 /* Enter recovery */ 12318 entered_recovery = 1; 12319 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 12320 /* 12321 * When we enter recovery we need to assure we send 12322 * one packet. 12323 */ 12324 if (rack->rack_no_prr == 0) { 12325 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 12326 rack_log_to_prr(rack, 8, 0, __LINE__); 12327 } 12328 rack->r_timer_override = 1; 12329 rack->r_early = 0; 12330 rack->r_ctl.rc_agg_early = 0; 12331 } else if (IN_FASTRECOVERY(tp->t_flags) && 12332 rsm && 12333 (rack->r_rr_config == 3)) { 12334 /* 12335 * Assure we can output and we get no 12336 * remembered pace time except the retransmit. 12337 */ 12338 rack->r_timer_override = 1; 12339 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 12340 rack->r_ctl.rc_resend = rsm; 12341 } 12342 if (IN_FASTRECOVERY(tp->t_flags) && 12343 (rack->rack_no_prr == 0) && 12344 (entered_recovery == 0)) { 12345 rack_update_prr(tp, rack, changed, th_ack); 12346 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 12347 ((tcp_in_hpts(rack->rc_tp) == 0) && 12348 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 12349 /* 12350 * If you are pacing output you don't want 12351 * to override. 12352 */ 12353 rack->r_early = 0; 12354 rack->r_ctl.rc_agg_early = 0; 12355 rack->r_timer_override = 1; 12356 } 12357 } 12358 } 12359 12360 static void 12361 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 12362 { 12363 struct rack_sendmap *rsm; 12364 12365 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12366 while (rsm) { 12367 /* 12368 * We need to skip anything already set 12369 * to be retransmitted. 12370 */ 12371 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 12372 (rsm->r_flags & RACK_MUST_RXT)) { 12373 rsm = TAILQ_NEXT(rsm, r_tnext); 12374 continue; 12375 } 12376 break; 12377 } 12378 if (rsm && (rsm->r_dupack < 0xff)) { 12379 rsm->r_dupack++; 12380 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 12381 struct timeval tv; 12382 uint32_t cts; 12383 /* 12384 * Here we see if we need to retransmit. For 12385 * a SACK type connection if enough time has passed 12386 * we will get a return of the rsm. For a non-sack 12387 * connection we will get the rsm returned if the 12388 * dupack value is 3 or more. 12389 */ 12390 cts = tcp_get_usecs(&tv); 12391 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 12392 if (rack->r_ctl.rc_resend != NULL) { 12393 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 12394 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 12395 th_ack, __LINE__); 12396 } 12397 rack->r_wanted_output = 1; 12398 rack->r_timer_override = 1; 12399 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 12400 } 12401 } else { 12402 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 12403 } 12404 } 12405 } 12406 12407 static void 12408 rack_check_bottom_drag(struct tcpcb *tp, 12409 struct tcp_rack *rack, 12410 struct socket *so) 12411 { 12412 /* 12413 * So what is dragging bottom? 12414 * 12415 * Dragging bottom means you were under pacing and had a 12416 * delay in processing inbound acks waiting on our pacing 12417 * timer to expire. While you were waiting all of the acknowledgments 12418 * for the packets you sent have arrived. This means we are pacing 12419 * way underneath the bottleneck to the point where our Goodput 12420 * measurements stop working, since they require more than one 12421 * ack (usually at least 8 packets worth with multiple acks so we can 12422 * gauge the inter-ack times). If that occurs we have a real problem 12423 * since we are stuck in a hole that we can't get out of without 12424 * something speeding us up. 12425 * 12426 * We also check to see if we are widdling down to just one segment 12427 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 12428 * then we are adding the delayed ack interval into our measurments and 12429 * we need to speed up slightly. 12430 */ 12431 uint32_t segsiz, minseg; 12432 12433 segsiz = ctf_fixed_maxseg(tp); 12434 minseg = segsiz; 12435 if (tp->snd_max == tp->snd_una) { 12436 /* 12437 * We are doing dynamic pacing and we are way 12438 * under. Basically everything got acked while 12439 * we were still waiting on the pacer to expire. 12440 * 12441 * This means we need to boost the b/w in 12442 * addition to any earlier boosting of 12443 * the multiplier. 12444 */ 12445 uint64_t lt_bw; 12446 12447 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 12448 lt_bw = rack_get_lt_bw(rack); 12449 rack->rc_dragged_bottom = 1; 12450 rack_validate_multipliers_at_or_above100(rack); 12451 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 12452 (rack->dis_lt_bw == 0) && 12453 (rack->use_lesser_lt_bw == 0) && 12454 (lt_bw > 0)) { 12455 /* 12456 * Lets use the long-term b/w we have 12457 * been getting as a base. 12458 */ 12459 if (rack->rc_gp_filled == 0) { 12460 if (lt_bw > ONE_POINT_TWO_MEG) { 12461 /* 12462 * If we have no measurement 12463 * don't let us set in more than 12464 * 1.2Mbps. If we are still too 12465 * low after pacing with this we 12466 * will hopefully have a max b/w 12467 * available to sanity check things. 12468 */ 12469 lt_bw = ONE_POINT_TWO_MEG; 12470 } 12471 rack->r_ctl.rc_rtt_diff = 0; 12472 rack->r_ctl.gp_bw = lt_bw; 12473 rack->rc_gp_filled = 1; 12474 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 12475 rack->r_ctl.num_measurements = RACK_REQ_AVG; 12476 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 12477 } else if (lt_bw > rack->r_ctl.gp_bw) { 12478 rack->r_ctl.rc_rtt_diff = 0; 12479 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 12480 rack->r_ctl.num_measurements = RACK_REQ_AVG; 12481 rack->r_ctl.gp_bw = lt_bw; 12482 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 12483 } else 12484 rack_increase_bw_mul(rack, -1, 0, 0, 1); 12485 if ((rack->gp_ready == 0) && 12486 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 12487 /* We have enough measurements now */ 12488 rack->gp_ready = 1; 12489 if (rack->dgp_on || 12490 rack->rack_hibeta) 12491 rack_set_cc_pacing(rack); 12492 if (rack->defer_options) 12493 rack_apply_deferred_options(rack); 12494 } 12495 } else { 12496 /* 12497 * zero rtt possibly?, settle for just an old increase. 12498 */ 12499 rack_increase_bw_mul(rack, -1, 0, 0, 1); 12500 } 12501 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 12502 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 12503 minseg)) && 12504 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 12505 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 12506 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 12507 (segsiz * rack_req_segs))) { 12508 /* 12509 * We are doing dynamic GP pacing and 12510 * we have everything except 1MSS or less 12511 * bytes left out. We are still pacing away. 12512 * And there is data that could be sent, This 12513 * means we are inserting delayed ack time in 12514 * our measurements because we are pacing too slow. 12515 */ 12516 rack_validate_multipliers_at_or_above100(rack); 12517 rack->rc_dragged_bottom = 1; 12518 rack_increase_bw_mul(rack, -1, 0, 0, 1); 12519 } 12520 } 12521 12522 #ifdef TCP_REQUEST_TRK 12523 static void 12524 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 12525 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 12526 { 12527 int do_log; 12528 12529 do_log = tcp_bblogging_on(rack->rc_tp); 12530 if (do_log == 0) { 12531 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 12532 return; 12533 /* We only allow the three below with point logging on */ 12534 if ((mod != HYBRID_LOG_RULES_APP) && 12535 (mod != HYBRID_LOG_RULES_SET) && 12536 (mod != HYBRID_LOG_REQ_COMP)) 12537 return; 12538 12539 } 12540 if (do_log) { 12541 union tcp_log_stackspecific log; 12542 struct timeval tv; 12543 12544 /* Convert our ms to a microsecond */ 12545 memset(&log, 0, sizeof(log)); 12546 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12547 log.u_bbr.flex1 = seq; 12548 log.u_bbr.cwnd_gain = line; 12549 if (cur != NULL) { 12550 uint64_t off; 12551 12552 log.u_bbr.flex2 = cur->start_seq; 12553 log.u_bbr.flex3 = cur->end_seq; 12554 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 12555 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 12556 log.u_bbr.flex6 = cur->flags; 12557 log.u_bbr.pkts_out = cur->hybrid_flags; 12558 log.u_bbr.rttProp = cur->timestamp; 12559 log.u_bbr.cur_del_rate = cur->cspr; 12560 log.u_bbr.bw_inuse = cur->start; 12561 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 12562 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 12563 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 12564 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 12565 log.u_bbr.inhpts = 1; 12566 #ifdef TCP_REQUEST_TRK 12567 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 12568 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 12569 #endif 12570 } else { 12571 log.u_bbr.flex2 = err; 12572 } 12573 /* 12574 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 12575 */ 12576 log.u_bbr.flex7 = rack->rc_catch_up; 12577 log.u_bbr.flex7 <<= 1; 12578 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 12579 log.u_bbr.flex7 <<= 1; 12580 log.u_bbr.flex7 |= rack->dgp_on; 12581 /* 12582 * Compose bbr_state to be a bit wise 0000ADHF 12583 * where A is the always_pace flag 12584 * where D is the dgp_on flag 12585 * where H is the hybrid_mode on flag 12586 * where F is the use_fixed_rate flag. 12587 */ 12588 log.u_bbr.bbr_state = rack->rc_always_pace; 12589 log.u_bbr.bbr_state <<= 1; 12590 log.u_bbr.bbr_state |= rack->dgp_on; 12591 log.u_bbr.bbr_state <<= 1; 12592 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 12593 log.u_bbr.bbr_state <<= 1; 12594 log.u_bbr.bbr_state |= rack->use_fixed_rate; 12595 log.u_bbr.flex8 = mod; 12596 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 12597 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 12598 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12599 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 12600 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 12601 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 12602 tcp_log_event(rack->rc_tp, NULL, 12603 &rack->rc_inp->inp_socket->so_rcv, 12604 &rack->rc_inp->inp_socket->so_snd, 12605 TCP_HYBRID_PACING_LOG, 0, 12606 0, &log, false, NULL, __func__, __LINE__, &tv); 12607 } 12608 } 12609 #endif 12610 12611 #ifdef TCP_REQUEST_TRK 12612 static void 12613 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 12614 { 12615 struct tcp_sendfile_track *rc_cur, *orig_ent; 12616 struct tcpcb *tp; 12617 int err = 0; 12618 12619 orig_ent = rack->r_ctl.rc_last_sft; 12620 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 12621 if (rc_cur == NULL) { 12622 /* If not in the beginning what about the end piece */ 12623 if (rack->rc_hybrid_mode) 12624 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 12625 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 12626 } else { 12627 err = 12345; 12628 } 12629 /* If we find no parameters we are in straight DGP mode */ 12630 if(rc_cur == NULL) { 12631 /* None found for this seq, just DGP for now */ 12632 if (rack->rc_hybrid_mode) { 12633 rack->r_ctl.client_suggested_maxseg = 0; 12634 rack->rc_catch_up = 0; 12635 if (rack->cspr_is_fcc == 0) 12636 rack->r_ctl.bw_rate_cap = 0; 12637 else 12638 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12639 } 12640 if (rack->rc_hybrid_mode) { 12641 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 12642 } 12643 if (rack->r_ctl.rc_last_sft) { 12644 rack->r_ctl.rc_last_sft = NULL; 12645 } 12646 return; 12647 } 12648 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 12649 /* This entry was never setup for hybrid pacing on/off etc */ 12650 if (rack->rc_hybrid_mode) { 12651 rack->r_ctl.client_suggested_maxseg = 0; 12652 rack->rc_catch_up = 0; 12653 rack->r_ctl.bw_rate_cap = 0; 12654 } 12655 if (rack->r_ctl.rc_last_sft) { 12656 rack->r_ctl.rc_last_sft = NULL; 12657 } 12658 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 12659 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 12660 rc_cur->first_send = cts; 12661 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 12662 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 12663 } 12664 return; 12665 } 12666 /* 12667 * Ok if we have a new entry *or* have never 12668 * set up an entry we need to proceed. If 12669 * we have already set it up this entry we 12670 * just continue along with what we already 12671 * setup. 12672 */ 12673 tp = rack->rc_tp; 12674 if ((rack->r_ctl.rc_last_sft != NULL) && 12675 (rack->r_ctl.rc_last_sft == rc_cur)) { 12676 /* Its already in place */ 12677 if (rack->rc_hybrid_mode) 12678 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 12679 return; 12680 } 12681 if (rack->rc_hybrid_mode == 0) { 12682 rack->r_ctl.rc_last_sft = rc_cur; 12683 if (orig_ent) { 12684 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 12685 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 12686 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 12687 } 12688 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 12689 return; 12690 } 12691 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 12692 /* Compensate for all the header overhead's */ 12693 if (rack->cspr_is_fcc == 0) 12694 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 12695 else 12696 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 12697 } else { 12698 if (rack->rc_hybrid_mode) { 12699 if (rack->cspr_is_fcc == 0) 12700 rack->r_ctl.bw_rate_cap = 0; 12701 else 12702 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 12703 } 12704 } 12705 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 12706 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 12707 else 12708 rack->r_ctl.client_suggested_maxseg = 0; 12709 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 12710 /* 12711 * It is the same timestamp as the previous one 12712 * add the hybrid flag that will indicate we use 12713 * sendtime not arrival time for catch-up mode. 12714 */ 12715 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 12716 } 12717 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 12718 (rc_cur->cspr > 0)) { 12719 uint64_t len; 12720 12721 rack->rc_catch_up = 1; 12722 /* 12723 * Calculate the deadline time, first set the 12724 * time to when the request arrived. 12725 */ 12726 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 12727 /* 12728 * For cases where its a duplicate tm (we received more 12729 * than one request for a tm) we want to use now, the point 12730 * where we are just sending the first bit of the request. 12731 */ 12732 rc_cur->deadline = cts; 12733 } else { 12734 /* 12735 * Here we have a different tm from the last request 12736 * so we want to use arrival time as our base. 12737 */ 12738 rc_cur->deadline = rc_cur->localtime; 12739 } 12740 /* 12741 * Next calculate the length and compensate for 12742 * TLS if need be. 12743 */ 12744 len = rc_cur->end - rc_cur->start; 12745 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 12746 /* 12747 * This session is doing TLS. Take a swag guess 12748 * at the overhead. 12749 */ 12750 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 12751 } 12752 /* 12753 * Now considering the size, and the cspr, what is the time that 12754 * would be required at the cspr rate. Here we use the raw 12755 * cspr value since the client only looks at the raw data. We 12756 * do use len which includes TLS overhead, but not the TCP/IP etc. 12757 * That will get made up for in the CU pacing rate set. 12758 */ 12759 len *= HPTS_USEC_IN_SEC; 12760 len /= rc_cur->cspr; 12761 rc_cur->deadline += len; 12762 } else { 12763 rack->rc_catch_up = 0; 12764 rc_cur->deadline = 0; 12765 } 12766 if (rack->r_ctl.client_suggested_maxseg != 0) { 12767 /* 12768 * We need to reset the max pace segs if we have a 12769 * client_suggested_maxseg. 12770 */ 12771 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12772 } 12773 if (orig_ent) { 12774 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 12775 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 12776 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 12777 } 12778 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 12779 /* Remember it for next time and for CU mode */ 12780 rack->r_ctl.rc_last_sft = rc_cur; 12781 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 12782 } 12783 #endif 12784 12785 static void 12786 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 12787 { 12788 #ifdef TCP_REQUEST_TRK 12789 struct tcp_sendfile_track *ent; 12790 12791 ent = rack->r_ctl.rc_last_sft; 12792 if ((ent == NULL) || 12793 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 12794 (SEQ_GEQ(seq, ent->end_seq))) { 12795 /* Time to update the track. */ 12796 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 12797 ent = rack->r_ctl.rc_last_sft; 12798 } 12799 /* Out of all */ 12800 if (ent == NULL) { 12801 return; 12802 } 12803 if (SEQ_LT(ent->end_seq, (seq + len))) { 12804 /* 12805 * This is the case where our end_seq guess 12806 * was wrong. This is usually due to TLS having 12807 * more bytes then our guess. It could also be the 12808 * case that the client sent in two requests closely 12809 * and the SB is full of both so we are sending part 12810 * of each (end|beg). In such a case lets move this 12811 * guys end to match the end of this send. That 12812 * way it will complete when all of it is acked. 12813 */ 12814 ent->end_seq = (seq + len); 12815 if (rack->rc_hybrid_mode) 12816 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 12817 } 12818 /* Now validate we have set the send time of this one */ 12819 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 12820 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 12821 ent->first_send = cts; 12822 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 12823 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 12824 } 12825 #endif 12826 } 12827 12828 static void 12829 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 12830 { 12831 /* 12832 * The fast output path is enabled and we 12833 * have moved the cumack forward. Lets see if 12834 * we can expand forward the fast path length by 12835 * that amount. What we would ideally like to 12836 * do is increase the number of bytes in the 12837 * fast path block (left_to_send) by the 12838 * acked amount. However we have to gate that 12839 * by two factors: 12840 * 1) The amount outstanding and the rwnd of the peer 12841 * (i.e. we don't want to exceed the rwnd of the peer). 12842 * <and> 12843 * 2) The amount of data left in the socket buffer (i.e. 12844 * we can't send beyond what is in the buffer). 12845 * 12846 * Note that this does not take into account any increase 12847 * in the cwnd. We will only extend the fast path by 12848 * what was acked. 12849 */ 12850 uint32_t new_total, gating_val; 12851 12852 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 12853 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 12854 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 12855 if (new_total <= gating_val) { 12856 /* We can increase left_to_send by the acked amount */ 12857 counter_u64_add(rack_extended_rfo, 1); 12858 rack->r_ctl.fsb.left_to_send = new_total; 12859 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 12860 ("rack:%p left_to_send:%u sbavail:%u out:%u", 12861 rack, rack->r_ctl.fsb.left_to_send, 12862 sbavail(&rack->rc_inp->inp_socket->so_snd), 12863 (tp->snd_max - tp->snd_una))); 12864 12865 } 12866 } 12867 12868 static void 12869 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 12870 { 12871 /* 12872 * Here any sendmap entry that points to the 12873 * beginning mbuf must be adjusted to the correct 12874 * offset. This must be called with: 12875 * 1) The socket buffer locked 12876 * 2) snd_una adjusted to its new position. 12877 * 12878 * Note that (2) implies rack_ack_received has also 12879 * been called and all the sbcut's have been done. 12880 * 12881 * We grab the first mbuf in the socket buffer and 12882 * then go through the front of the sendmap, recalculating 12883 * the stored offset for any sendmap entry that has 12884 * that mbuf. We must use the sb functions to do this 12885 * since its possible an add was done has well as 12886 * the subtraction we may have just completed. This should 12887 * not be a penalty though, since we just referenced the sb 12888 * to go in and trim off the mbufs that we freed (of course 12889 * there will be a penalty for the sendmap references though). 12890 * 12891 * Note also with INVARIANT on, we validate with a KASSERT 12892 * that the first sendmap entry has a soff of 0. 12893 * 12894 */ 12895 struct mbuf *m; 12896 struct rack_sendmap *rsm; 12897 tcp_seq snd_una; 12898 #ifdef INVARIANTS 12899 int first_processed = 0; 12900 #endif 12901 12902 snd_una = rack->rc_tp->snd_una; 12903 SOCKBUF_LOCK_ASSERT(sb); 12904 m = sb->sb_mb; 12905 rsm = tqhash_min(rack->r_ctl.tqh); 12906 if ((rsm == NULL) || (m == NULL)) { 12907 /* Nothing outstanding */ 12908 return; 12909 } 12910 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 12911 KASSERT((rsm->m == m), 12912 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 12913 rack, sb, rsm)); 12914 while (rsm->m && (rsm->m == m)) { 12915 /* one to adjust */ 12916 #ifdef INVARIANTS 12917 struct mbuf *tm; 12918 uint32_t soff; 12919 12920 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 12921 if ((rsm->orig_m_len != m->m_len) || 12922 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 12923 rack_adjust_orig_mlen(rsm); 12924 } 12925 if (first_processed == 0) { 12926 KASSERT((rsm->soff == 0), 12927 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 12928 rack, rsm)); 12929 first_processed = 1; 12930 } 12931 if ((rsm->soff != soff) || (rsm->m != tm)) { 12932 /* 12933 * This is not a fatal error, we anticipate it 12934 * might happen (the else code), so we count it here 12935 * so that under invariant we can see that it really 12936 * does happen. 12937 */ 12938 counter_u64_add(rack_adjust_map_bw, 1); 12939 } 12940 rsm->m = tm; 12941 rsm->soff = soff; 12942 if (tm) { 12943 rsm->orig_m_len = rsm->m->m_len; 12944 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12945 } else { 12946 rsm->orig_m_len = 0; 12947 rsm->orig_t_space = 0; 12948 } 12949 #else 12950 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 12951 if (rsm->m) { 12952 rsm->orig_m_len = rsm->m->m_len; 12953 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12954 } else { 12955 rsm->orig_m_len = 0; 12956 rsm->orig_t_space = 0; 12957 } 12958 #endif 12959 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 12960 if (rsm == NULL) 12961 break; 12962 } 12963 } 12964 12965 #ifdef TCP_REQUEST_TRK 12966 static inline void 12967 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 12968 { 12969 struct tcp_sendfile_track *ent; 12970 int i; 12971 12972 if ((rack->rc_hybrid_mode == 0) && 12973 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 12974 /* 12975 * Just do normal completions hybrid pacing is not on 12976 * and CLDL is off as well. 12977 */ 12978 tcp_req_check_for_comp(rack->rc_tp, th_ack); 12979 return; 12980 } 12981 /* 12982 * Originally I was just going to find the th_ack associated 12983 * with an entry. But then I realized a large strech ack could 12984 * in theory ack two or more requests at once. So instead we 12985 * need to find all entries that are completed by th_ack not 12986 * just a single entry and do our logging. 12987 */ 12988 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12989 while (ent != NULL) { 12990 /* 12991 * We may be doing hybrid pacing or CLDL and need more details possibly 12992 * so we do it manually instead of calling 12993 * tcp_req_check_for_comp() 12994 */ 12995 uint64_t laa, tim, data, cbw, ftim; 12996 12997 /* Ok this ack frees it */ 12998 rack_log_hybrid(rack, th_ack, 12999 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 13000 rack_log_hybrid_sends(rack, ent, __LINE__); 13001 /* calculate the time based on the ack arrival */ 13002 data = ent->end - ent->start; 13003 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 13004 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 13005 if (ent->first_send > ent->localtime) 13006 ftim = ent->first_send; 13007 else 13008 ftim = ent->localtime; 13009 } else { 13010 /* TSNH */ 13011 ftim = ent->localtime; 13012 } 13013 if (laa > ent->localtime) 13014 tim = laa - ftim; 13015 else 13016 tim = 0; 13017 cbw = data * HPTS_USEC_IN_SEC; 13018 if (tim > 0) 13019 cbw /= tim; 13020 else 13021 cbw = 0; 13022 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 13023 /* 13024 * Check to see if we are freeing what we are pointing to send wise 13025 * if so be sure to NULL the pointer so we know we are no longer 13026 * set to anything. 13027 */ 13028 if (ent == rack->r_ctl.rc_last_sft) { 13029 rack->r_ctl.rc_last_sft = NULL; 13030 if (rack->rc_hybrid_mode) { 13031 rack->rc_catch_up = 0; 13032 if (rack->cspr_is_fcc == 0) 13033 rack->r_ctl.bw_rate_cap = 0; 13034 else 13035 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 13036 rack->r_ctl.client_suggested_maxseg = 0; 13037 } 13038 } 13039 /* Generate the log that the tcp_netflix call would have */ 13040 tcp_req_log_req_info(rack->rc_tp, ent, 13041 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 13042 /* Free it and see if there is another one */ 13043 tcp_req_free_a_slot(rack->rc_tp, ent); 13044 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 13045 } 13046 } 13047 #endif 13048 13049 13050 /* 13051 * Return value of 1, we do not need to call rack_process_data(). 13052 * return value of 0, rack_process_data can be called. 13053 * For ret_val if its 0 the TCP is locked, if its non-zero 13054 * its unlocked and probably unsafe to touch the TCB. 13055 */ 13056 static int 13057 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13058 struct tcpcb *tp, struct tcpopt *to, 13059 uint32_t tiwin, int32_t tlen, 13060 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 13061 { 13062 int32_t ourfinisacked = 0; 13063 int32_t nsegs, acked_amount; 13064 int32_t acked; 13065 struct mbuf *mfree; 13066 struct tcp_rack *rack; 13067 int32_t under_pacing = 0; 13068 int32_t post_recovery = 0; 13069 uint32_t p_cwnd; 13070 13071 INP_WLOCK_ASSERT(tptoinpcb(tp)); 13072 13073 rack = (struct tcp_rack *)tp->t_fb_ptr; 13074 if (SEQ_GT(th->th_ack, tp->snd_max)) { 13075 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 13076 &rack->r_ctl.challenge_ack_ts, 13077 &rack->r_ctl.challenge_ack_cnt); 13078 rack->r_wanted_output = 1; 13079 return (1); 13080 } 13081 if (rack->gp_ready && 13082 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 13083 under_pacing = 1; 13084 } 13085 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 13086 int in_rec, dup_ack_struck = 0; 13087 int dsack_seen = 0, sacks_seen = 0; 13088 13089 in_rec = IN_FASTRECOVERY(tp->t_flags); 13090 if (rack->rc_in_persist) { 13091 tp->t_rxtshift = 0; 13092 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13093 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13094 } 13095 13096 if ((th->th_ack == tp->snd_una) && 13097 (tiwin == tp->snd_wnd) && 13098 (orig_tlen == 0) && 13099 ((to->to_flags & TOF_SACK) == 0)) { 13100 rack_strike_dupack(rack, th->th_ack); 13101 dup_ack_struck = 1; 13102 } 13103 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 13104 dup_ack_struck, &dsack_seen, &sacks_seen); 13105 if ((rack->sack_attack_disable > 0) && 13106 (th->th_ack == tp->snd_una) && 13107 (tiwin == tp->snd_wnd) && 13108 (orig_tlen == 0) && 13109 (dsack_seen == 0) && 13110 (sacks_seen > 0)) { 13111 /* 13112 * If sacks have been disabled we may 13113 * want to strike a dup-ack "ignoring" the 13114 * sack as long as the sack was not a "dsack". Note 13115 * that if no sack is sent (TOF_SACK is off) then the 13116 * normal dsack code above rack_log_ack() would have 13117 * already struck. So this is just to catch the case 13118 * were we are ignoring sacks from this guy due to 13119 * it being a suspected attacker. 13120 */ 13121 rack_strike_dupack(rack, th->th_ack); 13122 } 13123 13124 } 13125 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 13126 /* 13127 * Old ack, behind (or duplicate to) the last one rcv'd 13128 * Note: We mark reordering is occuring if its 13129 * less than and we have not closed our window. 13130 */ 13131 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 13132 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13133 if (rack->r_ctl.rc_reorder_ts == 0) 13134 rack->r_ctl.rc_reorder_ts = 1; 13135 } 13136 return (0); 13137 } 13138 /* 13139 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 13140 * something we sent. 13141 */ 13142 if (tp->t_flags & TF_NEEDSYN) { 13143 /* 13144 * T/TCP: Connection was half-synchronized, and our SYN has 13145 * been ACK'd (so connection is now fully synchronized). Go 13146 * to non-starred state, increment snd_una for ACK of SYN, 13147 * and check if we can do window scaling. 13148 */ 13149 tp->t_flags &= ~TF_NEEDSYN; 13150 tp->snd_una++; 13151 /* Do window scaling? */ 13152 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13153 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13154 tp->rcv_scale = tp->request_r_scale; 13155 /* Send window already scaled. */ 13156 } 13157 } 13158 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13159 13160 acked = BYTES_THIS_ACK(tp, th); 13161 if (acked) { 13162 /* 13163 * Any time we move the cum-ack forward clear 13164 * keep-alive tied probe-not-answered. The 13165 * persists clears its own on entry. 13166 */ 13167 rack->probe_not_answered = 0; 13168 } 13169 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 13170 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13171 /* 13172 * If we just performed our first retransmit, and the ACK arrives 13173 * within our recovery window, then it was a mistake to do the 13174 * retransmit in the first place. Recover our original cwnd and 13175 * ssthresh, and proceed to transmit where we left off. 13176 */ 13177 if ((tp->t_flags & TF_PREVVALID) && 13178 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13179 tp->t_flags &= ~TF_PREVVALID; 13180 if (tp->t_rxtshift == 1 && 13181 (int)(ticks - tp->t_badrxtwin) < 0) 13182 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 13183 } 13184 if (acked) { 13185 /* assure we are not backed off */ 13186 tp->t_rxtshift = 0; 13187 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13188 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13189 rack->rc_tlp_in_progress = 0; 13190 rack->r_ctl.rc_tlp_cnt_out = 0; 13191 /* 13192 * If it is the RXT timer we want to 13193 * stop it, so we can restart a TLP. 13194 */ 13195 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13196 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13197 #ifdef TCP_REQUEST_TRK 13198 rack_req_check_for_comp(rack, th->th_ack); 13199 #endif 13200 } 13201 /* 13202 * If we have a timestamp reply, update smoothed round trip time. If 13203 * no timestamp is present but transmit timer is running and timed 13204 * sequence number was acked, update smoothed round trip time. Since 13205 * we now have an rtt measurement, cancel the timer backoff (cf., 13206 * Phil Karn's retransmit alg.). Recompute the initial retransmit 13207 * timer. 13208 * 13209 * Some boxes send broken timestamp replies during the SYN+ACK 13210 * phase, ignore timestamps of 0 or we could calculate a huge RTT 13211 * and blow up the retransmit timer. 13212 */ 13213 /* 13214 * If all outstanding data is acked, stop retransmit timer and 13215 * remember to restart (more output or persist). If there is more 13216 * data to be acked, restart retransmit timer, using current 13217 * (possibly backed-off) value. 13218 */ 13219 if (acked == 0) { 13220 if (ofia) 13221 *ofia = ourfinisacked; 13222 return (0); 13223 } 13224 if (IN_RECOVERY(tp->t_flags)) { 13225 if (SEQ_LT(th->th_ack, tp->snd_recover) && 13226 (SEQ_LT(th->th_ack, tp->snd_max))) { 13227 tcp_rack_partialack(tp); 13228 } else { 13229 rack_post_recovery(tp, th->th_ack); 13230 post_recovery = 1; 13231 /* 13232 * Grab the segsiz, multiply by 2 and add the snd_cwnd 13233 * that is the max the CC should add if we are exiting 13234 * recovery and doing a late add. 13235 */ 13236 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 13237 p_cwnd <<= 1; 13238 p_cwnd += tp->snd_cwnd; 13239 } 13240 } else if ((rack->rto_from_rec == 1) && 13241 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 13242 /* 13243 * We were in recovery, hit a rxt timeout 13244 * and never re-entered recovery. The timeout(s) 13245 * made up all the lost data. In such a case 13246 * we need to clear the rto_from_rec flag. 13247 */ 13248 rack->rto_from_rec = 0; 13249 } 13250 /* 13251 * Let the congestion control algorithm update congestion control 13252 * related information. This typically means increasing the 13253 * congestion window. 13254 */ 13255 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 13256 if (post_recovery && 13257 (tp->snd_cwnd > p_cwnd)) { 13258 /* Must be non-newreno (cubic) getting too ahead of itself */ 13259 tp->snd_cwnd = p_cwnd; 13260 } 13261 SOCKBUF_LOCK(&so->so_snd); 13262 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 13263 tp->snd_wnd -= acked_amount; 13264 mfree = sbcut_locked(&so->so_snd, acked_amount); 13265 if ((sbused(&so->so_snd) == 0) && 13266 (acked > acked_amount) && 13267 (tp->t_state >= TCPS_FIN_WAIT_1) && 13268 (tp->t_flags & TF_SENTFIN)) { 13269 /* 13270 * We must be sure our fin 13271 * was sent and acked (we can be 13272 * in FIN_WAIT_1 without having 13273 * sent the fin). 13274 */ 13275 ourfinisacked = 1; 13276 } 13277 tp->snd_una = th->th_ack; 13278 /* wakeups? */ 13279 if (acked_amount && sbavail(&so->so_snd)) 13280 rack_adjust_sendmap_head(rack, &so->so_snd); 13281 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13282 /* NB: sowwakeup_locked() does an implicit unlock. */ 13283 sowwakeup_locked(so); 13284 m_freem(mfree); 13285 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13286 tp->snd_recover = tp->snd_una; 13287 13288 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 13289 tp->snd_nxt = tp->snd_max; 13290 } 13291 if (under_pacing && 13292 (rack->use_fixed_rate == 0) && 13293 (rack->in_probe_rtt == 0) && 13294 rack->rc_gp_dyn_mul && 13295 rack->rc_always_pace) { 13296 /* Check if we are dragging bottom */ 13297 rack_check_bottom_drag(tp, rack, so); 13298 } 13299 if (tp->snd_una == tp->snd_max) { 13300 /* Nothing left outstanding */ 13301 tp->t_flags &= ~TF_PREVVALID; 13302 rack->r_ctl.idle_snd_una = tp->snd_una; 13303 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13304 if (rack->r_ctl.rc_went_idle_time == 0) 13305 rack->r_ctl.rc_went_idle_time = 1; 13306 rack->r_ctl.retran_during_recovery = 0; 13307 rack->r_ctl.dsack_byte_cnt = 0; 13308 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13309 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13310 tp->t_acktime = 0; 13311 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13312 rack->rc_suspicious = 0; 13313 /* Set need output so persist might get set */ 13314 rack->r_wanted_output = 1; 13315 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13316 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13317 (sbavail(&so->so_snd) == 0) && 13318 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13319 /* 13320 * The socket was gone and the 13321 * peer sent data (now or in the past), time to 13322 * reset him. 13323 */ 13324 *ret_val = 1; 13325 /* tcp_close will kill the inp pre-log the Reset */ 13326 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13327 tp = tcp_close(tp); 13328 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 13329 return (1); 13330 } 13331 } 13332 if (ofia) 13333 *ofia = ourfinisacked; 13334 return (0); 13335 } 13336 13337 13338 static void 13339 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 13340 int dir, uint32_t flags, struct rack_sendmap *rsm) 13341 { 13342 if (tcp_bblogging_on(rack->rc_tp)) { 13343 union tcp_log_stackspecific log; 13344 struct timeval tv; 13345 13346 memset(&log, 0, sizeof(log)); 13347 log.u_bbr.flex1 = cnt; 13348 log.u_bbr.flex2 = split; 13349 log.u_bbr.flex3 = out; 13350 log.u_bbr.flex4 = line; 13351 log.u_bbr.flex5 = rack->r_must_retran; 13352 log.u_bbr.flex6 = flags; 13353 log.u_bbr.flex7 = rack->rc_has_collapsed; 13354 log.u_bbr.flex8 = dir; /* 13355 * 1 is collapsed, 0 is uncollapsed, 13356 * 2 is log of a rsm being marked, 3 is a split. 13357 */ 13358 if (rsm == NULL) 13359 log.u_bbr.rttProp = 0; 13360 else 13361 log.u_bbr.rttProp = (uint64_t)rsm; 13362 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13363 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 13364 TCP_LOG_EVENTP(rack->rc_tp, NULL, 13365 &rack->rc_inp->inp_socket->so_rcv, 13366 &rack->rc_inp->inp_socket->so_snd, 13367 TCP_RACK_LOG_COLLAPSE, 0, 13368 0, &log, false, &tv); 13369 } 13370 } 13371 13372 static void 13373 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 13374 { 13375 /* 13376 * Here all we do is mark the collapsed point and set the flag. 13377 * This may happen again and again, but there is no 13378 * sense splitting our map until we know where the 13379 * peer finally lands in the collapse. 13380 */ 13381 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 13382 if ((rack->rc_has_collapsed == 0) || 13383 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 13384 counter_u64_add(rack_collapsed_win_seen, 1); 13385 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 13386 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 13387 rack->rc_has_collapsed = 1; 13388 rack->r_collapse_point_valid = 1; 13389 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 13390 } 13391 13392 static void 13393 rack_un_collapse_window(struct tcp_rack *rack, int line) 13394 { 13395 struct rack_sendmap *nrsm, *rsm; 13396 int cnt = 0, split = 0; 13397 int insret __diagused; 13398 13399 13400 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 13401 rack->rc_has_collapsed = 0; 13402 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 13403 if (rsm == NULL) { 13404 /* Nothing to do maybe the peer ack'ed it all */ 13405 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 13406 return; 13407 } 13408 /* Now do we need to split this one? */ 13409 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 13410 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 13411 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 13412 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 13413 if (nrsm == NULL) { 13414 /* We can't get a rsm, mark all? */ 13415 nrsm = rsm; 13416 goto no_split; 13417 } 13418 /* Clone it */ 13419 split = 1; 13420 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 13421 #ifndef INVARIANTS 13422 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 13423 #else 13424 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 13425 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 13426 nrsm, insret, rack, rsm); 13427 } 13428 #endif 13429 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 13430 rack->r_ctl.last_collapse_point, __LINE__); 13431 if (rsm->r_in_tmap) { 13432 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 13433 nrsm->r_in_tmap = 1; 13434 } 13435 /* 13436 * Set in the new RSM as the 13437 * collapsed starting point 13438 */ 13439 rsm = nrsm; 13440 } 13441 13442 no_split: 13443 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 13444 cnt++; 13445 nrsm->r_flags |= RACK_RWND_COLLAPSED; 13446 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 13447 cnt++; 13448 } 13449 if (cnt) { 13450 counter_u64_add(rack_collapsed_win, 1); 13451 } 13452 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 13453 } 13454 13455 static void 13456 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 13457 int32_t tlen, int32_t tfo_syn) 13458 { 13459 if (DELAY_ACK(tp, tlen) || tfo_syn) { 13460 rack_timer_cancel(tp, rack, 13461 rack->r_ctl.rc_rcvtime, __LINE__); 13462 tp->t_flags |= TF_DELACK; 13463 } else { 13464 rack->r_wanted_output = 1; 13465 tp->t_flags |= TF_ACKNOW; 13466 } 13467 } 13468 13469 static void 13470 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 13471 { 13472 /* 13473 * If fast output is in progress, lets validate that 13474 * the new window did not shrink on us and make it 13475 * so fast output should end. 13476 */ 13477 if (rack->r_fast_output) { 13478 uint32_t out; 13479 13480 /* 13481 * Calculate what we will send if left as is 13482 * and compare that to our send window. 13483 */ 13484 out = ctf_outstanding(tp); 13485 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 13486 /* ok we have an issue */ 13487 if (out >= tp->snd_wnd) { 13488 /* Turn off fast output the window is met or collapsed */ 13489 rack->r_fast_output = 0; 13490 } else { 13491 /* we have some room left */ 13492 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 13493 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 13494 /* If not at least 1 full segment never mind */ 13495 rack->r_fast_output = 0; 13496 } 13497 } 13498 } 13499 } 13500 } 13501 13502 /* 13503 * Return value of 1, the TCB is unlocked and most 13504 * likely gone, return value of 0, the TCP is still 13505 * locked. 13506 */ 13507 static int 13508 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 13509 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 13510 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 13511 { 13512 /* 13513 * Update window information. Don't look at window if no ACK: TAC's 13514 * send garbage on first SYN. 13515 */ 13516 int32_t nsegs; 13517 int32_t tfo_syn; 13518 struct tcp_rack *rack; 13519 13520 INP_WLOCK_ASSERT(tptoinpcb(tp)); 13521 13522 rack = (struct tcp_rack *)tp->t_fb_ptr; 13523 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13524 if ((thflags & TH_ACK) && 13525 (SEQ_LT(tp->snd_wl1, th->th_seq) || 13526 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 13527 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 13528 /* keep track of pure window updates */ 13529 if (tlen == 0 && 13530 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 13531 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13532 tp->snd_wnd = tiwin; 13533 rack_validate_fo_sendwin_up(tp, rack); 13534 tp->snd_wl1 = th->th_seq; 13535 tp->snd_wl2 = th->th_ack; 13536 if (tp->snd_wnd > tp->max_sndwnd) 13537 tp->max_sndwnd = tp->snd_wnd; 13538 rack->r_wanted_output = 1; 13539 } else if (thflags & TH_ACK) { 13540 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 13541 tp->snd_wnd = tiwin; 13542 rack_validate_fo_sendwin_up(tp, rack); 13543 tp->snd_wl1 = th->th_seq; 13544 tp->snd_wl2 = th->th_ack; 13545 } 13546 } 13547 if (tp->snd_wnd < ctf_outstanding(tp)) 13548 /* The peer collapsed the window */ 13549 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 13550 else if (rack->rc_has_collapsed) 13551 rack_un_collapse_window(rack, __LINE__); 13552 if ((rack->r_collapse_point_valid) && 13553 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 13554 rack->r_collapse_point_valid = 0; 13555 /* Was persist timer active and now we have window space? */ 13556 if ((rack->rc_in_persist != 0) && 13557 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13558 rack->r_ctl.rc_pace_min_segs))) { 13559 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13560 tp->snd_nxt = tp->snd_max; 13561 /* Make sure we output to start the timer */ 13562 rack->r_wanted_output = 1; 13563 } 13564 /* Do we enter persists? */ 13565 if ((rack->rc_in_persist == 0) && 13566 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13567 TCPS_HAVEESTABLISHED(tp->t_state) && 13568 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13569 sbavail(&tptosocket(tp)->so_snd) && 13570 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13571 /* 13572 * Here the rwnd is less than 13573 * the pacing size, we are established, 13574 * nothing is outstanding, and there is 13575 * data to send. Enter persists. 13576 */ 13577 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 13578 } 13579 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 13580 m_freem(m); 13581 return (0); 13582 } 13583 /* 13584 * don't process the URG bit, ignore them drag 13585 * along the up. 13586 */ 13587 tp->rcv_up = tp->rcv_nxt; 13588 13589 /* 13590 * Process the segment text, merging it into the TCP sequencing 13591 * queue, and arranging for acknowledgment of receipt if necessary. 13592 * This process logically involves adjusting tp->rcv_wnd as data is 13593 * presented to the user (this happens in tcp_usrreq.c, case 13594 * PRU_RCVD). If a FIN has already been received on this connection 13595 * then we just ignore the text. 13596 */ 13597 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 13598 (tp->t_flags & TF_FASTOPEN)); 13599 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 13600 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 13601 tcp_seq save_start = th->th_seq; 13602 tcp_seq save_rnxt = tp->rcv_nxt; 13603 int save_tlen = tlen; 13604 13605 m_adj(m, drop_hdrlen); /* delayed header drop */ 13606 /* 13607 * Insert segment which includes th into TCP reassembly 13608 * queue with control block tp. Set thflags to whether 13609 * reassembly now includes a segment with FIN. This handles 13610 * the common case inline (segment is the next to be 13611 * received on an established connection, and the queue is 13612 * empty), avoiding linkage into and removal from the queue 13613 * and repetition of various conversions. Set DELACK for 13614 * segments received in order, but ack immediately when 13615 * segments are out of order (so fast retransmit can work). 13616 */ 13617 if (th->th_seq == tp->rcv_nxt && 13618 SEGQ_EMPTY(tp) && 13619 (TCPS_HAVEESTABLISHED(tp->t_state) || 13620 tfo_syn)) { 13621 #ifdef NETFLIX_SB_LIMITS 13622 u_int mcnt, appended; 13623 13624 if (so->so_rcv.sb_shlim) { 13625 mcnt = m_memcnt(m); 13626 appended = 0; 13627 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 13628 CFO_NOSLEEP, NULL) == false) { 13629 counter_u64_add(tcp_sb_shlim_fails, 1); 13630 m_freem(m); 13631 return (0); 13632 } 13633 } 13634 #endif 13635 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 13636 tp->rcv_nxt += tlen; 13637 if (tlen && 13638 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 13639 (tp->t_fbyte_in == 0)) { 13640 tp->t_fbyte_in = ticks; 13641 if (tp->t_fbyte_in == 0) 13642 tp->t_fbyte_in = 1; 13643 if (tp->t_fbyte_out && tp->t_fbyte_in) 13644 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 13645 } 13646 thflags = tcp_get_flags(th) & TH_FIN; 13647 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 13648 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 13649 SOCKBUF_LOCK(&so->so_rcv); 13650 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13651 m_freem(m); 13652 } else { 13653 int32_t newsize; 13654 13655 if (tlen > 0) { 13656 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 13657 if (newsize) 13658 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 13659 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 13660 } 13661 #ifdef NETFLIX_SB_LIMITS 13662 appended = 13663 #endif 13664 sbappendstream_locked(&so->so_rcv, m, 0); 13665 } 13666 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 13667 /* NB: sorwakeup_locked() does an implicit unlock. */ 13668 sorwakeup_locked(so); 13669 #ifdef NETFLIX_SB_LIMITS 13670 if (so->so_rcv.sb_shlim && appended != mcnt) 13671 counter_fo_release(so->so_rcv.sb_shlim, 13672 mcnt - appended); 13673 #endif 13674 } else { 13675 /* 13676 * XXX: Due to the header drop above "th" is 13677 * theoretically invalid by now. Fortunately 13678 * m_adj() doesn't actually frees any mbufs when 13679 * trimming from the head. 13680 */ 13681 tcp_seq temp = save_start; 13682 13683 thflags = tcp_reass(tp, th, &temp, &tlen, m); 13684 tp->t_flags |= TF_ACKNOW; 13685 if (tp->t_flags & TF_WAKESOR) { 13686 tp->t_flags &= ~TF_WAKESOR; 13687 /* NB: sorwakeup_locked() does an implicit unlock. */ 13688 sorwakeup_locked(so); 13689 } 13690 } 13691 if ((tp->t_flags & TF_SACK_PERMIT) && 13692 (save_tlen > 0) && 13693 TCPS_HAVEESTABLISHED(tp->t_state)) { 13694 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 13695 /* 13696 * DSACK actually handled in the fastpath 13697 * above. 13698 */ 13699 tcp_update_sack_list(tp, save_start, 13700 save_start + save_tlen); 13701 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 13702 if ((tp->rcv_numsacks >= 1) && 13703 (tp->sackblks[0].end == save_start)) { 13704 /* 13705 * Partial overlap, recorded at todrop 13706 * above. 13707 */ 13708 tcp_update_sack_list(tp, 13709 tp->sackblks[0].start, 13710 tp->sackblks[0].end); 13711 } else { 13712 tcp_update_dsack_list(tp, save_start, 13713 save_start + save_tlen); 13714 } 13715 } else if (tlen >= save_tlen) { 13716 /* Update of sackblks. */ 13717 tcp_update_dsack_list(tp, save_start, 13718 save_start + save_tlen); 13719 } else if (tlen > 0) { 13720 tcp_update_dsack_list(tp, save_start, 13721 save_start + tlen); 13722 } 13723 } 13724 } else { 13725 m_freem(m); 13726 thflags &= ~TH_FIN; 13727 } 13728 13729 /* 13730 * If FIN is received ACK the FIN and let the user know that the 13731 * connection is closing. 13732 */ 13733 if (thflags & TH_FIN) { 13734 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 13735 /* The socket upcall is handled by socantrcvmore. */ 13736 socantrcvmore(so); 13737 /* 13738 * If connection is half-synchronized (ie NEEDSYN 13739 * flag on) then delay ACK, so it may be piggybacked 13740 * when SYN is sent. Otherwise, since we received a 13741 * FIN then no more input can be expected, send ACK 13742 * now. 13743 */ 13744 if (tp->t_flags & TF_NEEDSYN) { 13745 rack_timer_cancel(tp, rack, 13746 rack->r_ctl.rc_rcvtime, __LINE__); 13747 tp->t_flags |= TF_DELACK; 13748 } else { 13749 tp->t_flags |= TF_ACKNOW; 13750 } 13751 tp->rcv_nxt++; 13752 } 13753 switch (tp->t_state) { 13754 /* 13755 * In SYN_RECEIVED and ESTABLISHED STATES enter the 13756 * CLOSE_WAIT state. 13757 */ 13758 case TCPS_SYN_RECEIVED: 13759 tp->t_starttime = ticks; 13760 /* FALLTHROUGH */ 13761 case TCPS_ESTABLISHED: 13762 rack_timer_cancel(tp, rack, 13763 rack->r_ctl.rc_rcvtime, __LINE__); 13764 tcp_state_change(tp, TCPS_CLOSE_WAIT); 13765 break; 13766 13767 /* 13768 * If still in FIN_WAIT_1 STATE FIN has not been 13769 * acked so enter the CLOSING state. 13770 */ 13771 case TCPS_FIN_WAIT_1: 13772 rack_timer_cancel(tp, rack, 13773 rack->r_ctl.rc_rcvtime, __LINE__); 13774 tcp_state_change(tp, TCPS_CLOSING); 13775 break; 13776 13777 /* 13778 * In FIN_WAIT_2 state enter the TIME_WAIT state, 13779 * starting the time-wait timer, turning off the 13780 * other standard timers. 13781 */ 13782 case TCPS_FIN_WAIT_2: 13783 rack_timer_cancel(tp, rack, 13784 rack->r_ctl.rc_rcvtime, __LINE__); 13785 tcp_twstart(tp); 13786 return (1); 13787 } 13788 } 13789 /* 13790 * Return any desired output. 13791 */ 13792 if ((tp->t_flags & TF_ACKNOW) || 13793 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 13794 rack->r_wanted_output = 1; 13795 } 13796 return (0); 13797 } 13798 13799 /* 13800 * Here nothing is really faster, its just that we 13801 * have broken out the fast-data path also just like 13802 * the fast-ack. 13803 */ 13804 static int 13805 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 13806 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13807 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 13808 { 13809 int32_t nsegs; 13810 int32_t newsize = 0; /* automatic sockbuf scaling */ 13811 struct tcp_rack *rack; 13812 #ifdef NETFLIX_SB_LIMITS 13813 u_int mcnt, appended; 13814 #endif 13815 13816 /* 13817 * If last ACK falls within this segment's sequence numbers, record 13818 * the timestamp. NOTE that the test is modified according to the 13819 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 13820 */ 13821 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 13822 return (0); 13823 } 13824 if (tiwin && tiwin != tp->snd_wnd) { 13825 return (0); 13826 } 13827 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 13828 return (0); 13829 } 13830 if (__predict_false((to->to_flags & TOF_TS) && 13831 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 13832 return (0); 13833 } 13834 if (__predict_false((th->th_ack != tp->snd_una))) { 13835 return (0); 13836 } 13837 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 13838 return (0); 13839 } 13840 if ((to->to_flags & TOF_TS) != 0 && 13841 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 13842 tp->ts_recent_age = tcp_ts_getticks(); 13843 tp->ts_recent = to->to_tsval; 13844 } 13845 rack = (struct tcp_rack *)tp->t_fb_ptr; 13846 /* 13847 * This is a pure, in-sequence data packet with nothing on the 13848 * reassembly queue and we have enough buffer space to take it. 13849 */ 13850 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13851 13852 #ifdef NETFLIX_SB_LIMITS 13853 if (so->so_rcv.sb_shlim) { 13854 mcnt = m_memcnt(m); 13855 appended = 0; 13856 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 13857 CFO_NOSLEEP, NULL) == false) { 13858 counter_u64_add(tcp_sb_shlim_fails, 1); 13859 m_freem(m); 13860 return (1); 13861 } 13862 } 13863 #endif 13864 /* Clean receiver SACK report if present */ 13865 if (tp->rcv_numsacks) 13866 tcp_clean_sackreport(tp); 13867 KMOD_TCPSTAT_INC(tcps_preddat); 13868 tp->rcv_nxt += tlen; 13869 if (tlen && 13870 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 13871 (tp->t_fbyte_in == 0)) { 13872 tp->t_fbyte_in = ticks; 13873 if (tp->t_fbyte_in == 0) 13874 tp->t_fbyte_in = 1; 13875 if (tp->t_fbyte_out && tp->t_fbyte_in) 13876 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 13877 } 13878 /* 13879 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 13880 */ 13881 tp->snd_wl1 = th->th_seq; 13882 /* 13883 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 13884 */ 13885 tp->rcv_up = tp->rcv_nxt; 13886 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 13887 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 13888 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 13889 13890 /* Add data to socket buffer. */ 13891 SOCKBUF_LOCK(&so->so_rcv); 13892 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13893 m_freem(m); 13894 } else { 13895 /* 13896 * Set new socket buffer size. Give up when limit is 13897 * reached. 13898 */ 13899 if (newsize) 13900 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 13901 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 13902 m_adj(m, drop_hdrlen); /* delayed header drop */ 13903 #ifdef NETFLIX_SB_LIMITS 13904 appended = 13905 #endif 13906 sbappendstream_locked(&so->so_rcv, m, 0); 13907 ctf_calc_rwin(so, tp); 13908 } 13909 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 13910 /* NB: sorwakeup_locked() does an implicit unlock. */ 13911 sorwakeup_locked(so); 13912 #ifdef NETFLIX_SB_LIMITS 13913 if (so->so_rcv.sb_shlim && mcnt != appended) 13914 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 13915 #endif 13916 rack_handle_delayed_ack(tp, rack, tlen, 0); 13917 if (tp->snd_una == tp->snd_max) 13918 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13919 return (1); 13920 } 13921 13922 /* 13923 * This subfunction is used to try to highly optimize the 13924 * fast path. We again allow window updates that are 13925 * in sequence to remain in the fast-path. We also add 13926 * in the __predict's to attempt to help the compiler. 13927 * Note that if we return a 0, then we can *not* process 13928 * it and the caller should push the packet into the 13929 * slow-path. 13930 */ 13931 static int 13932 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13933 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13934 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 13935 { 13936 int32_t acked; 13937 int32_t nsegs; 13938 int32_t under_pacing = 0; 13939 struct tcp_rack *rack; 13940 13941 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 13942 /* Old ack, behind (or duplicate to) the last one rcv'd */ 13943 return (0); 13944 } 13945 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 13946 /* Above what we have sent? */ 13947 return (0); 13948 } 13949 if (__predict_false(tiwin == 0)) { 13950 /* zero window */ 13951 return (0); 13952 } 13953 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 13954 /* We need a SYN or a FIN, unlikely.. */ 13955 return (0); 13956 } 13957 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 13958 /* Timestamp is behind .. old ack with seq wrap? */ 13959 return (0); 13960 } 13961 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 13962 /* Still recovering */ 13963 return (0); 13964 } 13965 rack = (struct tcp_rack *)tp->t_fb_ptr; 13966 if (rack->r_ctl.rc_sacked) { 13967 /* We have sack holes on our scoreboard */ 13968 return (0); 13969 } 13970 /* Ok if we reach here, we can process a fast-ack */ 13971 if (rack->gp_ready && 13972 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 13973 under_pacing = 1; 13974 } 13975 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13976 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 13977 /* Did the window get updated? */ 13978 if (tiwin != tp->snd_wnd) { 13979 tp->snd_wnd = tiwin; 13980 rack_validate_fo_sendwin_up(tp, rack); 13981 tp->snd_wl1 = th->th_seq; 13982 if (tp->snd_wnd > tp->max_sndwnd) 13983 tp->max_sndwnd = tp->snd_wnd; 13984 } 13985 /* Do we exit persists? */ 13986 if ((rack->rc_in_persist != 0) && 13987 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13988 rack->r_ctl.rc_pace_min_segs))) { 13989 rack_exit_persist(tp, rack, cts); 13990 } 13991 /* Do we enter persists? */ 13992 if ((rack->rc_in_persist == 0) && 13993 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13994 TCPS_HAVEESTABLISHED(tp->t_state) && 13995 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13996 sbavail(&tptosocket(tp)->so_snd) && 13997 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13998 /* 13999 * Here the rwnd is less than 14000 * the pacing size, we are established, 14001 * nothing is outstanding, and there is 14002 * data to send. Enter persists. 14003 */ 14004 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 14005 } 14006 /* 14007 * If last ACK falls within this segment's sequence numbers, record 14008 * the timestamp. NOTE that the test is modified according to the 14009 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 14010 */ 14011 if ((to->to_flags & TOF_TS) != 0 && 14012 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 14013 tp->ts_recent_age = tcp_ts_getticks(); 14014 tp->ts_recent = to->to_tsval; 14015 } 14016 /* 14017 * This is a pure ack for outstanding data. 14018 */ 14019 KMOD_TCPSTAT_INC(tcps_predack); 14020 14021 /* 14022 * "bad retransmit" recovery. 14023 */ 14024 if ((tp->t_flags & TF_PREVVALID) && 14025 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 14026 tp->t_flags &= ~TF_PREVVALID; 14027 if (tp->t_rxtshift == 1 && 14028 (int)(ticks - tp->t_badrxtwin) < 0) 14029 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 14030 } 14031 /* 14032 * Recalculate the transmit timer / rtt. 14033 * 14034 * Some boxes send broken timestamp replies during the SYN+ACK 14035 * phase, ignore timestamps of 0 or we could calculate a huge RTT 14036 * and blow up the retransmit timer. 14037 */ 14038 acked = BYTES_THIS_ACK(tp, th); 14039 14040 #ifdef TCP_HHOOK 14041 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 14042 hhook_run_tcp_est_in(tp, th, to); 14043 #endif 14044 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 14045 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 14046 if (acked) { 14047 struct mbuf *mfree; 14048 14049 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 14050 SOCKBUF_LOCK(&so->so_snd); 14051 mfree = sbcut_locked(&so->so_snd, acked); 14052 tp->snd_una = th->th_ack; 14053 /* Note we want to hold the sb lock through the sendmap adjust */ 14054 rack_adjust_sendmap_head(rack, &so->so_snd); 14055 /* Wake up the socket if we have room to write more */ 14056 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 14057 sowwakeup_locked(so); 14058 m_freem(mfree); 14059 tp->t_rxtshift = 0; 14060 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 14061 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 14062 rack->rc_tlp_in_progress = 0; 14063 rack->r_ctl.rc_tlp_cnt_out = 0; 14064 /* 14065 * If it is the RXT timer we want to 14066 * stop it, so we can restart a TLP. 14067 */ 14068 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 14069 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 14070 14071 #ifdef TCP_REQUEST_TRK 14072 rack_req_check_for_comp(rack, th->th_ack); 14073 #endif 14074 } 14075 /* 14076 * Let the congestion control algorithm update congestion control 14077 * related information. This typically means increasing the 14078 * congestion window. 14079 */ 14080 if (tp->snd_wnd < ctf_outstanding(tp)) { 14081 /* The peer collapsed the window */ 14082 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 14083 } else if (rack->rc_has_collapsed) 14084 rack_un_collapse_window(rack, __LINE__); 14085 if ((rack->r_collapse_point_valid) && 14086 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 14087 rack->r_collapse_point_valid = 0; 14088 /* 14089 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 14090 */ 14091 tp->snd_wl2 = th->th_ack; 14092 tp->t_dupacks = 0; 14093 m_freem(m); 14094 /* ND6_HINT(tp); *//* Some progress has been made. */ 14095 14096 /* 14097 * If all outstanding data are acked, stop retransmit timer, 14098 * otherwise restart timer using current (possibly backed-off) 14099 * value. If process is waiting for space, wakeup/selwakeup/signal. 14100 * If data are ready to send, let tcp_output decide between more 14101 * output or persist. 14102 */ 14103 if (under_pacing && 14104 (rack->use_fixed_rate == 0) && 14105 (rack->in_probe_rtt == 0) && 14106 rack->rc_gp_dyn_mul && 14107 rack->rc_always_pace) { 14108 /* Check if we are dragging bottom */ 14109 rack_check_bottom_drag(tp, rack, so); 14110 } 14111 if (tp->snd_una == tp->snd_max) { 14112 tp->t_flags &= ~TF_PREVVALID; 14113 rack->r_ctl.retran_during_recovery = 0; 14114 rack->rc_suspicious = 0; 14115 rack->r_ctl.dsack_byte_cnt = 0; 14116 rack->r_ctl.idle_snd_una = tp->snd_una; 14117 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 14118 if (rack->r_ctl.rc_went_idle_time == 0) 14119 rack->r_ctl.rc_went_idle_time = 1; 14120 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 14121 if (sbavail(&tptosocket(tp)->so_snd) == 0) 14122 tp->t_acktime = 0; 14123 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 14124 } 14125 if (acked && rack->r_fast_output) 14126 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 14127 if (sbavail(&so->so_snd)) { 14128 rack->r_wanted_output = 1; 14129 } 14130 return (1); 14131 } 14132 14133 /* 14134 * Return value of 1, the TCB is unlocked and most 14135 * likely gone, return value of 0, the TCP is still 14136 * locked. 14137 */ 14138 static int 14139 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 14140 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14141 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14142 { 14143 int32_t ret_val = 0; 14144 int32_t orig_tlen = tlen; 14145 int32_t todrop; 14146 int32_t ourfinisacked = 0; 14147 struct tcp_rack *rack; 14148 14149 INP_WLOCK_ASSERT(tptoinpcb(tp)); 14150 14151 ctf_calc_rwin(so, tp); 14152 /* 14153 * If the state is SYN_SENT: if seg contains an ACK, but not for our 14154 * SYN, drop the input. if seg contains a RST, then drop the 14155 * connection. if seg does not contain SYN, then drop it. Otherwise 14156 * this is an acceptable SYN segment initialize tp->rcv_nxt and 14157 * tp->irs if seg contains ack then advance tp->snd_una if seg 14158 * contains an ECE and ECN support is enabled, the stream is ECN 14159 * capable. if SYN has been acked change to ESTABLISHED else 14160 * SYN_RCVD state arrange for segment to be acked (eventually) 14161 * continue processing rest of data/controls. 14162 */ 14163 if ((thflags & TH_ACK) && 14164 (SEQ_LEQ(th->th_ack, tp->iss) || 14165 SEQ_GT(th->th_ack, tp->snd_max))) { 14166 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14167 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14168 return (1); 14169 } 14170 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 14171 TCP_PROBE5(connect__refused, NULL, tp, 14172 mtod(m, const char *), tp, th); 14173 tp = tcp_drop(tp, ECONNREFUSED); 14174 ctf_do_drop(m, tp); 14175 return (1); 14176 } 14177 if (thflags & TH_RST) { 14178 ctf_do_drop(m, tp); 14179 return (1); 14180 } 14181 if (!(thflags & TH_SYN)) { 14182 ctf_do_drop(m, tp); 14183 return (1); 14184 } 14185 tp->irs = th->th_seq; 14186 tcp_rcvseqinit(tp); 14187 rack = (struct tcp_rack *)tp->t_fb_ptr; 14188 if (thflags & TH_ACK) { 14189 int tfo_partial = 0; 14190 14191 KMOD_TCPSTAT_INC(tcps_connects); 14192 soisconnected(so); 14193 #ifdef MAC 14194 mac_socketpeer_set_from_mbuf(m, so); 14195 #endif 14196 /* Do window scaling on this connection? */ 14197 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 14198 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 14199 tp->rcv_scale = tp->request_r_scale; 14200 } 14201 tp->rcv_adv += min(tp->rcv_wnd, 14202 TCP_MAXWIN << tp->rcv_scale); 14203 /* 14204 * If not all the data that was sent in the TFO SYN 14205 * has been acked, resend the remainder right away. 14206 */ 14207 if ((tp->t_flags & TF_FASTOPEN) && 14208 (tp->snd_una != tp->snd_max)) { 14209 /* Was it a partial ack? */ 14210 if (SEQ_LT(th->th_ack, tp->snd_max)) 14211 tfo_partial = 1; 14212 } 14213 /* 14214 * If there's data, delay ACK; if there's also a FIN ACKNOW 14215 * will be turned on later. 14216 */ 14217 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 14218 rack_timer_cancel(tp, rack, 14219 rack->r_ctl.rc_rcvtime, __LINE__); 14220 tp->t_flags |= TF_DELACK; 14221 } else { 14222 rack->r_wanted_output = 1; 14223 tp->t_flags |= TF_ACKNOW; 14224 } 14225 14226 tcp_ecn_input_syn_sent(tp, thflags, iptos); 14227 14228 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14229 /* 14230 * We advance snd_una for the 14231 * fast open case. If th_ack is 14232 * acknowledging data beyond 14233 * snd_una we can't just call 14234 * ack-processing since the 14235 * data stream in our send-map 14236 * will start at snd_una + 1 (one 14237 * beyond the SYN). If its just 14238 * equal we don't need to do that 14239 * and there is no send_map. 14240 */ 14241 tp->snd_una++; 14242 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 14243 /* 14244 * We sent a SYN with data, and thus have a 14245 * sendmap entry with a SYN set. Lets find it 14246 * and take off the send bit and the byte and 14247 * set it up to be what we send (send it next). 14248 */ 14249 struct rack_sendmap *rsm; 14250 14251 rsm = tqhash_min(rack->r_ctl.tqh); 14252 if (rsm) { 14253 if (rsm->r_flags & RACK_HAS_SYN) { 14254 rsm->r_flags &= ~RACK_HAS_SYN; 14255 rsm->r_start++; 14256 } 14257 rack->r_ctl.rc_resend = rsm; 14258 } 14259 } 14260 } 14261 /* 14262 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 14263 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 14264 */ 14265 tp->t_starttime = ticks; 14266 if (tp->t_flags & TF_NEEDFIN) { 14267 tcp_state_change(tp, TCPS_FIN_WAIT_1); 14268 tp->t_flags &= ~TF_NEEDFIN; 14269 thflags &= ~TH_SYN; 14270 } else { 14271 tcp_state_change(tp, TCPS_ESTABLISHED); 14272 TCP_PROBE5(connect__established, NULL, tp, 14273 mtod(m, const char *), tp, th); 14274 rack_cc_conn_init(tp); 14275 } 14276 } else { 14277 /* 14278 * Received initial SYN in SYN-SENT[*] state => simultaneous 14279 * open. If segment contains CC option and there is a 14280 * cached CC, apply TAO test. If it succeeds, connection is * 14281 * half-synchronized. Otherwise, do 3-way handshake: 14282 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 14283 * there was no CC option, clear cached CC value. 14284 */ 14285 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 14286 tcp_state_change(tp, TCPS_SYN_RECEIVED); 14287 } 14288 /* 14289 * Advance th->th_seq to correspond to first data byte. If data, 14290 * trim to stay within window, dropping FIN if necessary. 14291 */ 14292 th->th_seq++; 14293 if (tlen > tp->rcv_wnd) { 14294 todrop = tlen - tp->rcv_wnd; 14295 m_adj(m, -todrop); 14296 tlen = tp->rcv_wnd; 14297 thflags &= ~TH_FIN; 14298 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 14299 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 14300 } 14301 tp->snd_wl1 = th->th_seq - 1; 14302 tp->rcv_up = th->th_seq; 14303 /* 14304 * Client side of transaction: already sent SYN and data. If the 14305 * remote host used T/TCP to validate the SYN, our data will be 14306 * ACK'd; if so, enter normal data segment processing in the middle 14307 * of step 5, ack processing. Otherwise, goto step 6. 14308 */ 14309 if (thflags & TH_ACK) { 14310 /* For syn-sent we need to possibly update the rtt */ 14311 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 14312 uint32_t t, mcts; 14313 14314 mcts = tcp_ts_getticks(); 14315 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 14316 if (!tp->t_rttlow || tp->t_rttlow > t) 14317 tp->t_rttlow = t; 14318 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 14319 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 14320 tcp_rack_xmit_timer_commit(rack, tp); 14321 } 14322 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 14323 return (ret_val); 14324 /* We may have changed to FIN_WAIT_1 above */ 14325 if (tp->t_state == TCPS_FIN_WAIT_1) { 14326 /* 14327 * In FIN_WAIT_1 STATE in addition to the processing 14328 * for the ESTABLISHED state if our FIN is now 14329 * acknowledged then enter FIN_WAIT_2. 14330 */ 14331 if (ourfinisacked) { 14332 /* 14333 * If we can't receive any more data, then 14334 * closing user can proceed. Starting the 14335 * timer is contrary to the specification, 14336 * but if we don't get a FIN we'll hang 14337 * forever. 14338 * 14339 * XXXjl: we should release the tp also, and 14340 * use a compressed state. 14341 */ 14342 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 14343 soisdisconnected(so); 14344 tcp_timer_activate(tp, TT_2MSL, 14345 (tcp_fast_finwait2_recycle ? 14346 tcp_finwait2_timeout : 14347 TP_MAXIDLE(tp))); 14348 } 14349 tcp_state_change(tp, TCPS_FIN_WAIT_2); 14350 } 14351 } 14352 } 14353 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14354 tiwin, thflags, nxt_pkt)); 14355 } 14356 14357 /* 14358 * Return value of 1, the TCB is unlocked and most 14359 * likely gone, return value of 0, the TCP is still 14360 * locked. 14361 */ 14362 static int 14363 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 14364 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14365 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14366 { 14367 struct tcp_rack *rack; 14368 int32_t orig_tlen = tlen; 14369 int32_t ret_val = 0; 14370 int32_t ourfinisacked = 0; 14371 14372 rack = (struct tcp_rack *)tp->t_fb_ptr; 14373 ctf_calc_rwin(so, tp); 14374 if ((thflags & TH_RST) || 14375 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14376 return (__ctf_process_rst(m, th, so, tp, 14377 &rack->r_ctl.challenge_ack_ts, 14378 &rack->r_ctl.challenge_ack_cnt)); 14379 if ((thflags & TH_ACK) && 14380 (SEQ_LEQ(th->th_ack, tp->snd_una) || 14381 SEQ_GT(th->th_ack, tp->snd_max))) { 14382 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14383 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14384 return (1); 14385 } 14386 if (tp->t_flags & TF_FASTOPEN) { 14387 /* 14388 * When a TFO connection is in SYN_RECEIVED, the 14389 * only valid packets are the initial SYN, a 14390 * retransmit/copy of the initial SYN (possibly with 14391 * a subset of the original data), a valid ACK, a 14392 * FIN, or a RST. 14393 */ 14394 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 14395 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14396 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14397 return (1); 14398 } else if (thflags & TH_SYN) { 14399 /* non-initial SYN is ignored */ 14400 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 14401 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 14402 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 14403 ctf_do_drop(m, NULL); 14404 return (0); 14405 } 14406 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 14407 ctf_do_drop(m, NULL); 14408 return (0); 14409 } 14410 } 14411 14412 /* 14413 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14414 * it's less than ts_recent, drop it. 14415 */ 14416 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14417 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14418 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14419 return (ret_val); 14420 } 14421 /* 14422 * In the SYN-RECEIVED state, validate that the packet belongs to 14423 * this connection before trimming the data to fit the receive 14424 * window. Check the sequence number versus IRS since we know the 14425 * sequence numbers haven't wrapped. This is a partial fix for the 14426 * "LAND" DoS attack. 14427 */ 14428 if (SEQ_LT(th->th_seq, tp->irs)) { 14429 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14430 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14431 return (1); 14432 } 14433 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14434 &rack->r_ctl.challenge_ack_ts, 14435 &rack->r_ctl.challenge_ack_cnt)) { 14436 return (ret_val); 14437 } 14438 /* 14439 * If last ACK falls within this segment's sequence numbers, record 14440 * its timestamp. NOTE: 1) That the test incorporates suggestions 14441 * from the latest proposal of the tcplw@cray.com list (Braden 14442 * 1993/04/26). 2) That updating only on newer timestamps interferes 14443 * with our earlier PAWS tests, so this check should be solely 14444 * predicated on the sequence space of this segment. 3) That we 14445 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14446 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14447 * SEG.Len, This modified check allows us to overcome RFC1323's 14448 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14449 * p.869. In such cases, we can still calculate the RTT correctly 14450 * when RCV.NXT == Last.ACK.Sent. 14451 */ 14452 if ((to->to_flags & TOF_TS) != 0 && 14453 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14454 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14455 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14456 tp->ts_recent_age = tcp_ts_getticks(); 14457 tp->ts_recent = to->to_tsval; 14458 } 14459 tp->snd_wnd = tiwin; 14460 rack_validate_fo_sendwin_up(tp, rack); 14461 /* 14462 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14463 * is on (half-synchronized state), then queue data for later 14464 * processing; else drop segment and return. 14465 */ 14466 if ((thflags & TH_ACK) == 0) { 14467 if (tp->t_flags & TF_FASTOPEN) { 14468 rack_cc_conn_init(tp); 14469 } 14470 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14471 tiwin, thflags, nxt_pkt)); 14472 } 14473 KMOD_TCPSTAT_INC(tcps_connects); 14474 if (tp->t_flags & TF_SONOTCONN) { 14475 tp->t_flags &= ~TF_SONOTCONN; 14476 soisconnected(so); 14477 } 14478 /* Do window scaling? */ 14479 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 14480 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 14481 tp->rcv_scale = tp->request_r_scale; 14482 } 14483 /* 14484 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 14485 * FIN-WAIT-1 14486 */ 14487 tp->t_starttime = ticks; 14488 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 14489 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 14490 tp->t_tfo_pending = NULL; 14491 } 14492 if (tp->t_flags & TF_NEEDFIN) { 14493 tcp_state_change(tp, TCPS_FIN_WAIT_1); 14494 tp->t_flags &= ~TF_NEEDFIN; 14495 } else { 14496 tcp_state_change(tp, TCPS_ESTABLISHED); 14497 TCP_PROBE5(accept__established, NULL, tp, 14498 mtod(m, const char *), tp, th); 14499 /* 14500 * TFO connections call cc_conn_init() during SYN 14501 * processing. Calling it again here for such connections 14502 * is not harmless as it would undo the snd_cwnd reduction 14503 * that occurs when a TFO SYN|ACK is retransmitted. 14504 */ 14505 if (!(tp->t_flags & TF_FASTOPEN)) 14506 rack_cc_conn_init(tp); 14507 } 14508 /* 14509 * Account for the ACK of our SYN prior to 14510 * regular ACK processing below, except for 14511 * simultaneous SYN, which is handled later. 14512 */ 14513 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 14514 tp->snd_una++; 14515 /* 14516 * If segment contains data or ACK, will call tcp_reass() later; if 14517 * not, do so now to pass queued data to user. 14518 */ 14519 if (tlen == 0 && (thflags & TH_FIN) == 0) { 14520 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 14521 (struct mbuf *)0); 14522 if (tp->t_flags & TF_WAKESOR) { 14523 tp->t_flags &= ~TF_WAKESOR; 14524 /* NB: sorwakeup_locked() does an implicit unlock. */ 14525 sorwakeup_locked(so); 14526 } 14527 } 14528 tp->snd_wl1 = th->th_seq - 1; 14529 /* For syn-recv we need to possibly update the rtt */ 14530 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 14531 uint32_t t, mcts; 14532 14533 mcts = tcp_ts_getticks(); 14534 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 14535 if (!tp->t_rttlow || tp->t_rttlow > t) 14536 tp->t_rttlow = t; 14537 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 14538 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 14539 tcp_rack_xmit_timer_commit(rack, tp); 14540 } 14541 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14542 return (ret_val); 14543 } 14544 if (tp->t_state == TCPS_FIN_WAIT_1) { 14545 /* We could have went to FIN_WAIT_1 (or EST) above */ 14546 /* 14547 * In FIN_WAIT_1 STATE in addition to the processing for the 14548 * ESTABLISHED state if our FIN is now acknowledged then 14549 * enter FIN_WAIT_2. 14550 */ 14551 if (ourfinisacked) { 14552 /* 14553 * If we can't receive any more data, then closing 14554 * user can proceed. Starting the timer is contrary 14555 * to the specification, but if we don't get a FIN 14556 * we'll hang forever. 14557 * 14558 * XXXjl: we should release the tp also, and use a 14559 * compressed state. 14560 */ 14561 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 14562 soisdisconnected(so); 14563 tcp_timer_activate(tp, TT_2MSL, 14564 (tcp_fast_finwait2_recycle ? 14565 tcp_finwait2_timeout : 14566 TP_MAXIDLE(tp))); 14567 } 14568 tcp_state_change(tp, TCPS_FIN_WAIT_2); 14569 } 14570 } 14571 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14572 tiwin, thflags, nxt_pkt)); 14573 } 14574 14575 /* 14576 * Return value of 1, the TCB is unlocked and most 14577 * likely gone, return value of 0, the TCP is still 14578 * locked. 14579 */ 14580 static int 14581 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 14582 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14583 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14584 { 14585 int32_t ret_val = 0; 14586 int32_t orig_tlen = tlen; 14587 struct tcp_rack *rack; 14588 14589 /* 14590 * Header prediction: check for the two common cases of a 14591 * uni-directional data xfer. If the packet has no control flags, 14592 * is in-sequence, the window didn't change and we're not 14593 * retransmitting, it's a candidate. If the length is zero and the 14594 * ack moved forward, we're the sender side of the xfer. Just free 14595 * the data acked & wake any higher level process that was blocked 14596 * waiting for space. If the length is non-zero and the ack didn't 14597 * move, we're the receiver side. If we're getting packets in-order 14598 * (the reassembly queue is empty), add the data toc The socket 14599 * buffer and note that we need a delayed ack. Make sure that the 14600 * hidden state-flags are also off. Since we check for 14601 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 14602 */ 14603 rack = (struct tcp_rack *)tp->t_fb_ptr; 14604 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 14605 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 14606 __predict_true(SEGQ_EMPTY(tp)) && 14607 __predict_true(th->th_seq == tp->rcv_nxt)) { 14608 if (tlen == 0) { 14609 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 14610 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 14611 return (0); 14612 } 14613 } else { 14614 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 14615 tiwin, nxt_pkt, iptos)) { 14616 return (0); 14617 } 14618 } 14619 } 14620 ctf_calc_rwin(so, tp); 14621 14622 if ((thflags & TH_RST) || 14623 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14624 return (__ctf_process_rst(m, th, so, tp, 14625 &rack->r_ctl.challenge_ack_ts, 14626 &rack->r_ctl.challenge_ack_cnt)); 14627 14628 /* 14629 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14630 * synchronized state. 14631 */ 14632 if (thflags & TH_SYN) { 14633 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14634 return (ret_val); 14635 } 14636 /* 14637 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14638 * it's less than ts_recent, drop it. 14639 */ 14640 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14641 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14642 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14643 return (ret_val); 14644 } 14645 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14646 &rack->r_ctl.challenge_ack_ts, 14647 &rack->r_ctl.challenge_ack_cnt)) { 14648 return (ret_val); 14649 } 14650 /* 14651 * If last ACK falls within this segment's sequence numbers, record 14652 * its timestamp. NOTE: 1) That the test incorporates suggestions 14653 * from the latest proposal of the tcplw@cray.com list (Braden 14654 * 1993/04/26). 2) That updating only on newer timestamps interferes 14655 * with our earlier PAWS tests, so this check should be solely 14656 * predicated on the sequence space of this segment. 3) That we 14657 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14658 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14659 * SEG.Len, This modified check allows us to overcome RFC1323's 14660 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14661 * p.869. In such cases, we can still calculate the RTT correctly 14662 * when RCV.NXT == Last.ACK.Sent. 14663 */ 14664 if ((to->to_flags & TOF_TS) != 0 && 14665 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14666 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14667 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14668 tp->ts_recent_age = tcp_ts_getticks(); 14669 tp->ts_recent = to->to_tsval; 14670 } 14671 /* 14672 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14673 * is on (half-synchronized state), then queue data for later 14674 * processing; else drop segment and return. 14675 */ 14676 if ((thflags & TH_ACK) == 0) { 14677 if (tp->t_flags & TF_NEEDSYN) { 14678 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14679 tiwin, thflags, nxt_pkt)); 14680 14681 } else if (tp->t_flags & TF_ACKNOW) { 14682 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14683 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14684 return (ret_val); 14685 } else { 14686 ctf_do_drop(m, NULL); 14687 return (0); 14688 } 14689 } 14690 /* 14691 * Ack processing. 14692 */ 14693 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 14694 return (ret_val); 14695 } 14696 if (sbavail(&so->so_snd)) { 14697 if (ctf_progress_timeout_check(tp, true)) { 14698 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 14699 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14700 return (1); 14701 } 14702 } 14703 /* State changes only happen in rack_process_data() */ 14704 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14705 tiwin, thflags, nxt_pkt)); 14706 } 14707 14708 /* 14709 * Return value of 1, the TCB is unlocked and most 14710 * likely gone, return value of 0, the TCP is still 14711 * locked. 14712 */ 14713 static int 14714 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 14715 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14716 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14717 { 14718 int32_t ret_val = 0; 14719 int32_t orig_tlen = tlen; 14720 struct tcp_rack *rack; 14721 14722 rack = (struct tcp_rack *)tp->t_fb_ptr; 14723 ctf_calc_rwin(so, tp); 14724 if ((thflags & TH_RST) || 14725 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14726 return (__ctf_process_rst(m, th, so, tp, 14727 &rack->r_ctl.challenge_ack_ts, 14728 &rack->r_ctl.challenge_ack_cnt)); 14729 /* 14730 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14731 * synchronized state. 14732 */ 14733 if (thflags & TH_SYN) { 14734 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14735 return (ret_val); 14736 } 14737 /* 14738 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14739 * it's less than ts_recent, drop it. 14740 */ 14741 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14742 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14743 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14744 return (ret_val); 14745 } 14746 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14747 &rack->r_ctl.challenge_ack_ts, 14748 &rack->r_ctl.challenge_ack_cnt)) { 14749 return (ret_val); 14750 } 14751 /* 14752 * If last ACK falls within this segment's sequence numbers, record 14753 * its timestamp. NOTE: 1) That the test incorporates suggestions 14754 * from the latest proposal of the tcplw@cray.com list (Braden 14755 * 1993/04/26). 2) That updating only on newer timestamps interferes 14756 * with our earlier PAWS tests, so this check should be solely 14757 * predicated on the sequence space of this segment. 3) That we 14758 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14759 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14760 * SEG.Len, This modified check allows us to overcome RFC1323's 14761 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14762 * p.869. In such cases, we can still calculate the RTT correctly 14763 * when RCV.NXT == Last.ACK.Sent. 14764 */ 14765 if ((to->to_flags & TOF_TS) != 0 && 14766 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14767 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14768 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14769 tp->ts_recent_age = tcp_ts_getticks(); 14770 tp->ts_recent = to->to_tsval; 14771 } 14772 /* 14773 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14774 * is on (half-synchronized state), then queue data for later 14775 * processing; else drop segment and return. 14776 */ 14777 if ((thflags & TH_ACK) == 0) { 14778 if (tp->t_flags & TF_NEEDSYN) { 14779 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14780 tiwin, thflags, nxt_pkt)); 14781 14782 } else if (tp->t_flags & TF_ACKNOW) { 14783 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14784 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14785 return (ret_val); 14786 } else { 14787 ctf_do_drop(m, NULL); 14788 return (0); 14789 } 14790 } 14791 /* 14792 * Ack processing. 14793 */ 14794 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 14795 return (ret_val); 14796 } 14797 if (sbavail(&so->so_snd)) { 14798 if (ctf_progress_timeout_check(tp, true)) { 14799 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14800 tp, tick, PROGRESS_DROP, __LINE__); 14801 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14802 return (1); 14803 } 14804 } 14805 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14806 tiwin, thflags, nxt_pkt)); 14807 } 14808 14809 static int 14810 rack_check_data_after_close(struct mbuf *m, 14811 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 14812 { 14813 struct tcp_rack *rack; 14814 14815 rack = (struct tcp_rack *)tp->t_fb_ptr; 14816 if (rack->rc_allow_data_af_clo == 0) { 14817 close_now: 14818 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 14819 /* tcp_close will kill the inp pre-log the Reset */ 14820 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 14821 tp = tcp_close(tp); 14822 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 14823 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 14824 return (1); 14825 } 14826 if (sbavail(&so->so_snd) == 0) 14827 goto close_now; 14828 /* Ok we allow data that is ignored and a followup reset */ 14829 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 14830 tp->rcv_nxt = th->th_seq + *tlen; 14831 tp->t_flags2 |= TF2_DROP_AF_DATA; 14832 rack->r_wanted_output = 1; 14833 *tlen = 0; 14834 return (0); 14835 } 14836 14837 /* 14838 * Return value of 1, the TCB is unlocked and most 14839 * likely gone, return value of 0, the TCP is still 14840 * locked. 14841 */ 14842 static int 14843 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 14844 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14845 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14846 { 14847 int32_t ret_val = 0; 14848 int32_t orig_tlen = tlen; 14849 int32_t ourfinisacked = 0; 14850 struct tcp_rack *rack; 14851 14852 rack = (struct tcp_rack *)tp->t_fb_ptr; 14853 ctf_calc_rwin(so, tp); 14854 14855 if ((thflags & TH_RST) || 14856 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14857 return (__ctf_process_rst(m, th, so, tp, 14858 &rack->r_ctl.challenge_ack_ts, 14859 &rack->r_ctl.challenge_ack_cnt)); 14860 /* 14861 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14862 * synchronized state. 14863 */ 14864 if (thflags & TH_SYN) { 14865 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14866 return (ret_val); 14867 } 14868 /* 14869 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14870 * it's less than ts_recent, drop it. 14871 */ 14872 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14873 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14874 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14875 return (ret_val); 14876 } 14877 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14878 &rack->r_ctl.challenge_ack_ts, 14879 &rack->r_ctl.challenge_ack_cnt)) { 14880 return (ret_val); 14881 } 14882 /* 14883 * If new data are received on a connection after the user processes 14884 * are gone, then RST the other end. 14885 */ 14886 if ((tp->t_flags & TF_CLOSED) && tlen && 14887 rack_check_data_after_close(m, tp, &tlen, th, so)) 14888 return (1); 14889 /* 14890 * If last ACK falls within this segment's sequence numbers, record 14891 * its timestamp. NOTE: 1) That the test incorporates suggestions 14892 * from the latest proposal of the tcplw@cray.com list (Braden 14893 * 1993/04/26). 2) That updating only on newer timestamps interferes 14894 * with our earlier PAWS tests, so this check should be solely 14895 * predicated on the sequence space of this segment. 3) That we 14896 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14897 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14898 * SEG.Len, This modified check allows us to overcome RFC1323's 14899 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14900 * p.869. In such cases, we can still calculate the RTT correctly 14901 * when RCV.NXT == Last.ACK.Sent. 14902 */ 14903 if ((to->to_flags & TOF_TS) != 0 && 14904 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14905 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14906 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14907 tp->ts_recent_age = tcp_ts_getticks(); 14908 tp->ts_recent = to->to_tsval; 14909 } 14910 /* 14911 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14912 * is on (half-synchronized state), then queue data for later 14913 * processing; else drop segment and return. 14914 */ 14915 if ((thflags & TH_ACK) == 0) { 14916 if (tp->t_flags & TF_NEEDSYN) { 14917 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14918 tiwin, thflags, nxt_pkt)); 14919 } else if (tp->t_flags & TF_ACKNOW) { 14920 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14921 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14922 return (ret_val); 14923 } else { 14924 ctf_do_drop(m, NULL); 14925 return (0); 14926 } 14927 } 14928 /* 14929 * Ack processing. 14930 */ 14931 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 14932 return (ret_val); 14933 } 14934 if (ourfinisacked) { 14935 /* 14936 * If we can't receive any more data, then closing user can 14937 * proceed. Starting the timer is contrary to the 14938 * specification, but if we don't get a FIN we'll hang 14939 * forever. 14940 * 14941 * XXXjl: we should release the tp also, and use a 14942 * compressed state. 14943 */ 14944 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 14945 soisdisconnected(so); 14946 tcp_timer_activate(tp, TT_2MSL, 14947 (tcp_fast_finwait2_recycle ? 14948 tcp_finwait2_timeout : 14949 TP_MAXIDLE(tp))); 14950 } 14951 tcp_state_change(tp, TCPS_FIN_WAIT_2); 14952 } 14953 if (sbavail(&so->so_snd)) { 14954 if (ctf_progress_timeout_check(tp, true)) { 14955 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14956 tp, tick, PROGRESS_DROP, __LINE__); 14957 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14958 return (1); 14959 } 14960 } 14961 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14962 tiwin, thflags, nxt_pkt)); 14963 } 14964 14965 /* 14966 * Return value of 1, the TCB is unlocked and most 14967 * likely gone, return value of 0, the TCP is still 14968 * locked. 14969 */ 14970 static int 14971 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 14972 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14973 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14974 { 14975 int32_t ret_val = 0; 14976 int32_t orig_tlen = tlen; 14977 int32_t ourfinisacked = 0; 14978 struct tcp_rack *rack; 14979 14980 rack = (struct tcp_rack *)tp->t_fb_ptr; 14981 ctf_calc_rwin(so, tp); 14982 14983 if ((thflags & TH_RST) || 14984 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14985 return (__ctf_process_rst(m, th, so, tp, 14986 &rack->r_ctl.challenge_ack_ts, 14987 &rack->r_ctl.challenge_ack_cnt)); 14988 /* 14989 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14990 * synchronized state. 14991 */ 14992 if (thflags & TH_SYN) { 14993 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14994 return (ret_val); 14995 } 14996 /* 14997 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14998 * it's less than ts_recent, drop it. 14999 */ 15000 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 15001 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 15002 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 15003 return (ret_val); 15004 } 15005 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 15006 &rack->r_ctl.challenge_ack_ts, 15007 &rack->r_ctl.challenge_ack_cnt)) { 15008 return (ret_val); 15009 } 15010 /* 15011 * If last ACK falls within this segment's sequence numbers, record 15012 * its timestamp. NOTE: 1) That the test incorporates suggestions 15013 * from the latest proposal of the tcplw@cray.com list (Braden 15014 * 1993/04/26). 2) That updating only on newer timestamps interferes 15015 * with our earlier PAWS tests, so this check should be solely 15016 * predicated on the sequence space of this segment. 3) That we 15017 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 15018 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 15019 * SEG.Len, This modified check allows us to overcome RFC1323's 15020 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 15021 * p.869. In such cases, we can still calculate the RTT correctly 15022 * when RCV.NXT == Last.ACK.Sent. 15023 */ 15024 if ((to->to_flags & TOF_TS) != 0 && 15025 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 15026 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 15027 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 15028 tp->ts_recent_age = tcp_ts_getticks(); 15029 tp->ts_recent = to->to_tsval; 15030 } 15031 /* 15032 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 15033 * is on (half-synchronized state), then queue data for later 15034 * processing; else drop segment and return. 15035 */ 15036 if ((thflags & TH_ACK) == 0) { 15037 if (tp->t_flags & TF_NEEDSYN) { 15038 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 15039 tiwin, thflags, nxt_pkt)); 15040 } else if (tp->t_flags & TF_ACKNOW) { 15041 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 15042 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 15043 return (ret_val); 15044 } else { 15045 ctf_do_drop(m, NULL); 15046 return (0); 15047 } 15048 } 15049 /* 15050 * Ack processing. 15051 */ 15052 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 15053 return (ret_val); 15054 } 15055 if (ourfinisacked) { 15056 tcp_twstart(tp); 15057 m_freem(m); 15058 return (1); 15059 } 15060 if (sbavail(&so->so_snd)) { 15061 if (ctf_progress_timeout_check(tp, true)) { 15062 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 15063 tp, tick, PROGRESS_DROP, __LINE__); 15064 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 15065 return (1); 15066 } 15067 } 15068 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 15069 tiwin, thflags, nxt_pkt)); 15070 } 15071 15072 /* 15073 * Return value of 1, the TCB is unlocked and most 15074 * likely gone, return value of 0, the TCP is still 15075 * locked. 15076 */ 15077 static int 15078 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 15079 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 15080 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 15081 { 15082 int32_t ret_val = 0; 15083 int32_t orig_tlen; 15084 int32_t ourfinisacked = 0; 15085 struct tcp_rack *rack; 15086 15087 rack = (struct tcp_rack *)tp->t_fb_ptr; 15088 ctf_calc_rwin(so, tp); 15089 15090 if ((thflags & TH_RST) || 15091 (tp->t_fin_is_rst && (thflags & TH_FIN))) 15092 return (__ctf_process_rst(m, th, so, tp, 15093 &rack->r_ctl.challenge_ack_ts, 15094 &rack->r_ctl.challenge_ack_cnt)); 15095 /* 15096 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 15097 * synchronized state. 15098 */ 15099 if (thflags & TH_SYN) { 15100 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 15101 return (ret_val); 15102 } 15103 /* 15104 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 15105 * it's less than ts_recent, drop it. 15106 */ 15107 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 15108 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 15109 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 15110 return (ret_val); 15111 } 15112 orig_tlen = tlen; 15113 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 15114 &rack->r_ctl.challenge_ack_ts, 15115 &rack->r_ctl.challenge_ack_cnt)) { 15116 return (ret_val); 15117 } 15118 /* 15119 * If last ACK falls within this segment's sequence numbers, record 15120 * its timestamp. NOTE: 1) That the test incorporates suggestions 15121 * from the latest proposal of the tcplw@cray.com list (Braden 15122 * 1993/04/26). 2) That updating only on newer timestamps interferes 15123 * with our earlier PAWS tests, so this check should be solely 15124 * predicated on the sequence space of this segment. 3) That we 15125 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 15126 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 15127 * SEG.Len, This modified check allows us to overcome RFC1323's 15128 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 15129 * p.869. In such cases, we can still calculate the RTT correctly 15130 * when RCV.NXT == Last.ACK.Sent. 15131 */ 15132 if ((to->to_flags & TOF_TS) != 0 && 15133 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 15134 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 15135 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 15136 tp->ts_recent_age = tcp_ts_getticks(); 15137 tp->ts_recent = to->to_tsval; 15138 } 15139 /* 15140 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 15141 * is on (half-synchronized state), then queue data for later 15142 * processing; else drop segment and return. 15143 */ 15144 if ((thflags & TH_ACK) == 0) { 15145 if (tp->t_flags & TF_NEEDSYN) { 15146 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 15147 tiwin, thflags, nxt_pkt)); 15148 } else if (tp->t_flags & TF_ACKNOW) { 15149 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 15150 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 15151 return (ret_val); 15152 } else { 15153 ctf_do_drop(m, NULL); 15154 return (0); 15155 } 15156 } 15157 /* 15158 * case TCPS_LAST_ACK: Ack processing. 15159 */ 15160 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 15161 return (ret_val); 15162 } 15163 if (ourfinisacked) { 15164 tp = tcp_close(tp); 15165 ctf_do_drop(m, tp); 15166 return (1); 15167 } 15168 if (sbavail(&so->so_snd)) { 15169 if (ctf_progress_timeout_check(tp, true)) { 15170 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 15171 tp, tick, PROGRESS_DROP, __LINE__); 15172 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 15173 return (1); 15174 } 15175 } 15176 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 15177 tiwin, thflags, nxt_pkt)); 15178 } 15179 15180 /* 15181 * Return value of 1, the TCB is unlocked and most 15182 * likely gone, return value of 0, the TCP is still 15183 * locked. 15184 */ 15185 static int 15186 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 15187 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 15188 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 15189 { 15190 int32_t ret_val = 0; 15191 int32_t orig_tlen = tlen; 15192 int32_t ourfinisacked = 0; 15193 struct tcp_rack *rack; 15194 15195 rack = (struct tcp_rack *)tp->t_fb_ptr; 15196 ctf_calc_rwin(so, tp); 15197 15198 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 15199 if ((thflags & TH_RST) || 15200 (tp->t_fin_is_rst && (thflags & TH_FIN))) 15201 return (__ctf_process_rst(m, th, so, tp, 15202 &rack->r_ctl.challenge_ack_ts, 15203 &rack->r_ctl.challenge_ack_cnt)); 15204 /* 15205 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 15206 * synchronized state. 15207 */ 15208 if (thflags & TH_SYN) { 15209 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 15210 return (ret_val); 15211 } 15212 /* 15213 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 15214 * it's less than ts_recent, drop it. 15215 */ 15216 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 15217 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 15218 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 15219 return (ret_val); 15220 } 15221 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 15222 &rack->r_ctl.challenge_ack_ts, 15223 &rack->r_ctl.challenge_ack_cnt)) { 15224 return (ret_val); 15225 } 15226 /* 15227 * If new data are received on a connection after the user processes 15228 * are gone, then RST the other end. 15229 */ 15230 if ((tp->t_flags & TF_CLOSED) && tlen && 15231 rack_check_data_after_close(m, tp, &tlen, th, so)) 15232 return (1); 15233 /* 15234 * If last ACK falls within this segment's sequence numbers, record 15235 * its timestamp. NOTE: 1) That the test incorporates suggestions 15236 * from the latest proposal of the tcplw@cray.com list (Braden 15237 * 1993/04/26). 2) That updating only on newer timestamps interferes 15238 * with our earlier PAWS tests, so this check should be solely 15239 * predicated on the sequence space of this segment. 3) That we 15240 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 15241 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 15242 * SEG.Len, This modified check allows us to overcome RFC1323's 15243 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 15244 * p.869. In such cases, we can still calculate the RTT correctly 15245 * when RCV.NXT == Last.ACK.Sent. 15246 */ 15247 if ((to->to_flags & TOF_TS) != 0 && 15248 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 15249 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 15250 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 15251 tp->ts_recent_age = tcp_ts_getticks(); 15252 tp->ts_recent = to->to_tsval; 15253 } 15254 /* 15255 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 15256 * is on (half-synchronized state), then queue data for later 15257 * processing; else drop segment and return. 15258 */ 15259 if ((thflags & TH_ACK) == 0) { 15260 if (tp->t_flags & TF_NEEDSYN) { 15261 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 15262 tiwin, thflags, nxt_pkt)); 15263 } else if (tp->t_flags & TF_ACKNOW) { 15264 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 15265 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 15266 return (ret_val); 15267 } else { 15268 ctf_do_drop(m, NULL); 15269 return (0); 15270 } 15271 } 15272 /* 15273 * Ack processing. 15274 */ 15275 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 15276 return (ret_val); 15277 } 15278 if (sbavail(&so->so_snd)) { 15279 if (ctf_progress_timeout_check(tp, true)) { 15280 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 15281 tp, tick, PROGRESS_DROP, __LINE__); 15282 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 15283 return (1); 15284 } 15285 } 15286 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 15287 tiwin, thflags, nxt_pkt)); 15288 } 15289 15290 static void inline 15291 rack_clear_rate_sample(struct tcp_rack *rack) 15292 { 15293 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 15294 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 15295 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 15296 } 15297 15298 static void 15299 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 15300 { 15301 uint64_t bw_est, rate_wanted; 15302 int chged = 0; 15303 uint32_t user_max, orig_min, orig_max; 15304 15305 #ifdef TCP_REQUEST_TRK 15306 if (rack->rc_hybrid_mode && 15307 (rack->r_ctl.rc_pace_max_segs != 0) && 15308 (rack_hybrid_allow_set_maxseg == 1) && 15309 (rack->r_ctl.rc_last_sft != NULL)) { 15310 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 15311 return; 15312 } 15313 #endif 15314 orig_min = rack->r_ctl.rc_pace_min_segs; 15315 orig_max = rack->r_ctl.rc_pace_max_segs; 15316 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 15317 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 15318 chged = 1; 15319 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 15320 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 15321 if (user_max != rack->r_ctl.rc_pace_max_segs) 15322 chged = 1; 15323 } 15324 if (rack->rc_force_max_seg) { 15325 rack->r_ctl.rc_pace_max_segs = user_max; 15326 } else if (rack->use_fixed_rate) { 15327 bw_est = rack_get_bw(rack); 15328 if ((rack->r_ctl.crte == NULL) || 15329 (bw_est != rack->r_ctl.crte->rate)) { 15330 rack->r_ctl.rc_pace_max_segs = user_max; 15331 } else { 15332 /* We are pacing right at the hardware rate */ 15333 uint32_t segsiz, pace_one; 15334 15335 if (rack_pace_one_seg || 15336 (rack->r_ctl.rc_user_set_min_segs == 1)) 15337 pace_one = 1; 15338 else 15339 pace_one = 0; 15340 segsiz = min(ctf_fixed_maxseg(tp), 15341 rack->r_ctl.rc_pace_min_segs); 15342 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 15343 tp, bw_est, segsiz, pace_one, 15344 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 15345 } 15346 } else if (rack->rc_always_pace) { 15347 if (rack->r_ctl.gp_bw || 15348 rack->r_ctl.init_rate) { 15349 /* We have a rate of some sort set */ 15350 uint32_t orig; 15351 15352 bw_est = rack_get_bw(rack); 15353 orig = rack->r_ctl.rc_pace_max_segs; 15354 if (fill_override) 15355 rate_wanted = *fill_override; 15356 else 15357 rate_wanted = rack_get_gp_est(rack); 15358 if (rate_wanted) { 15359 /* We have something */ 15360 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 15361 rate_wanted, 15362 ctf_fixed_maxseg(rack->rc_tp)); 15363 } else 15364 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 15365 if (orig != rack->r_ctl.rc_pace_max_segs) 15366 chged = 1; 15367 } else if ((rack->r_ctl.gp_bw == 0) && 15368 (rack->r_ctl.rc_pace_max_segs == 0)) { 15369 /* 15370 * If we have nothing limit us to bursting 15371 * out IW sized pieces. 15372 */ 15373 chged = 1; 15374 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 15375 } 15376 } 15377 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 15378 chged = 1; 15379 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 15380 } 15381 if (chged) 15382 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 15383 } 15384 15385 15386 static void 15387 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 15388 { 15389 #ifdef INET6 15390 struct ip6_hdr *ip6 = NULL; 15391 #endif 15392 #ifdef INET 15393 struct ip *ip = NULL; 15394 #endif 15395 struct udphdr *udp = NULL; 15396 15397 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 15398 #ifdef INET6 15399 if (rack->r_is_v6) { 15400 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15401 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15402 if (tp->t_port) { 15403 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 15404 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 15405 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 15406 udp->uh_dport = tp->t_port; 15407 rack->r_ctl.fsb.udp = udp; 15408 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 15409 } else 15410 { 15411 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 15412 rack->r_ctl.fsb.udp = NULL; 15413 } 15414 tcpip_fillheaders(rack->rc_inp, 15415 tp->t_port, 15416 ip6, rack->r_ctl.fsb.th); 15417 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 15418 } else 15419 #endif /* INET6 */ 15420 #ifdef INET 15421 { 15422 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 15423 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15424 if (tp->t_port) { 15425 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 15426 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 15427 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 15428 udp->uh_dport = tp->t_port; 15429 rack->r_ctl.fsb.udp = udp; 15430 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 15431 } else 15432 { 15433 rack->r_ctl.fsb.udp = NULL; 15434 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 15435 } 15436 tcpip_fillheaders(rack->rc_inp, 15437 tp->t_port, 15438 ip, rack->r_ctl.fsb.th); 15439 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 15440 } 15441 #endif 15442 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 15443 (long)TCP_MAXWIN << tp->rcv_scale); 15444 rack->r_fsb_inited = 1; 15445 } 15446 15447 static int 15448 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 15449 { 15450 /* 15451 * Allocate the larger of spaces V6 if available else just 15452 * V4 and include udphdr (overbook) 15453 */ 15454 #ifdef INET6 15455 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 15456 #else 15457 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 15458 #endif 15459 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 15460 M_TCPFSB, M_NOWAIT|M_ZERO); 15461 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 15462 return (ENOMEM); 15463 } 15464 rack->r_fsb_inited = 0; 15465 return (0); 15466 } 15467 15468 static void 15469 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 15470 { 15471 /* 15472 * Types of logs (mod value) 15473 * 20 - Initial round setup 15474 * 21 - Rack declares a new round. 15475 */ 15476 struct tcpcb *tp; 15477 15478 tp = rack->rc_tp; 15479 if (tcp_bblogging_on(tp)) { 15480 union tcp_log_stackspecific log; 15481 struct timeval tv; 15482 15483 memset(&log, 0, sizeof(log)); 15484 log.u_bbr.flex1 = rack->r_ctl.current_round; 15485 log.u_bbr.flex2 = rack->r_ctl.roundends; 15486 log.u_bbr.flex3 = high_seq; 15487 log.u_bbr.flex4 = tp->snd_max; 15488 log.u_bbr.flex8 = mod; 15489 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15490 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 15491 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 15492 TCP_LOG_EVENTP(tp, NULL, 15493 &tptosocket(tp)->so_rcv, 15494 &tptosocket(tp)->so_snd, 15495 TCP_HYSTART, 0, 15496 0, &log, false, &tv); 15497 } 15498 } 15499 15500 static void 15501 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 15502 { 15503 rack->rack_deferred_inited = 1; 15504 rack->r_ctl.roundends = tp->snd_max; 15505 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 15506 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 15507 } 15508 15509 static void 15510 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 15511 { 15512 /* Retransmit bit controls. 15513 * 15514 * The setting of these values control one of 15515 * three settings you can have and dictate 15516 * how rack does retransmissions. Note this 15517 * is in *any* mode i.e. pacing on or off DGP 15518 * fixed rate pacing, or just bursting rack. 15519 * 15520 * 1 - Use full sized retransmits i.e. limit 15521 * the size to whatever the pace_max_segments 15522 * size is. 15523 * 15524 * 2 - Use pacer min granularity as a guide to 15525 * the size combined with the current calculated 15526 * goodput b/w measurement. So for example if 15527 * the goodput is measured at 20Mbps we would 15528 * calculate 8125 (pacer minimum 250usec in 15529 * that b/w) and then round it up to the next 15530 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 15531 * 15532 * 0 - The rack default 1 MSS (anything not 0/1/2 15533 * fall here too if we are setting via rack_init()). 15534 * 15535 */ 15536 if (ctl == 1) { 15537 rack->full_size_rxt = 1; 15538 rack->shape_rxt_to_pacing_min = 0; 15539 } else if (ctl == 2) { 15540 rack->full_size_rxt = 0; 15541 rack->shape_rxt_to_pacing_min = 1; 15542 } else { 15543 rack->full_size_rxt = 0; 15544 rack->shape_rxt_to_pacing_min = 0; 15545 } 15546 } 15547 15548 static void 15549 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 15550 uint32_t flex1, 15551 uint32_t flex2, 15552 uint32_t flex3) 15553 { 15554 if (tcp_bblogging_on(rack->rc_tp)) { 15555 union tcp_log_stackspecific log; 15556 struct timeval tv; 15557 15558 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15559 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15560 log.u_bbr.flex8 = mod; 15561 log.u_bbr.flex1 = flex1; 15562 log.u_bbr.flex2 = flex2; 15563 log.u_bbr.flex3 = flex3; 15564 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 15565 0, &log, false, NULL, __func__, __LINE__, &tv); 15566 } 15567 } 15568 15569 static int 15570 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 15571 { 15572 struct tcp_rack *rack; 15573 struct rack_sendmap *rsm; 15574 int i; 15575 15576 15577 rack = (struct tcp_rack *)tp->t_fb_ptr; 15578 switch (reqr->req) { 15579 case TCP_QUERY_SENDMAP: 15580 if ((reqr->req_param == tp->snd_max) || 15581 (tp->snd_max == tp->snd_una)){ 15582 /* Unlikely */ 15583 return (0); 15584 } 15585 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 15586 if (rsm == NULL) { 15587 /* Can't find that seq -- unlikely */ 15588 return (0); 15589 } 15590 reqr->sendmap_start = rsm->r_start; 15591 reqr->sendmap_end = rsm->r_end; 15592 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 15593 reqr->sendmap_fas = rsm->r_fas; 15594 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 15595 reqr->sendmap_send_cnt = SNDMAP_NRTX; 15596 for(i=0; i<reqr->sendmap_send_cnt; i++) 15597 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 15598 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 15599 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 15600 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 15601 reqr->sendmap_dupacks = rsm->r_dupack; 15602 rack_log_chg_info(tp, rack, 1, 15603 rsm->r_start, 15604 rsm->r_end, 15605 rsm->r_flags); 15606 return(1); 15607 break; 15608 case TCP_QUERY_TIMERS_UP: 15609 if (rack->r_ctl.rc_hpts_flags == 0) { 15610 /* no timers up */ 15611 return (0); 15612 } 15613 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 15614 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15615 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 15616 } 15617 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 15618 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 15619 } 15620 rack_log_chg_info(tp, rack, 2, 15621 rack->r_ctl.rc_hpts_flags, 15622 rack->r_ctl.rc_last_output_to, 15623 rack->r_ctl.rc_timer_exp); 15624 return (1); 15625 break; 15626 case TCP_QUERY_RACK_TIMES: 15627 /* Reordering items */ 15628 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 15629 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 15630 /* Timerstamps and timers */ 15631 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 15632 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 15633 reqr->rack_rtt = rack->rc_rack_rtt; 15634 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 15635 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 15636 /* PRR data */ 15637 reqr->rack_sacked = rack->r_ctl.rc_sacked; 15638 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 15639 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 15640 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 15641 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 15642 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 15643 /* TLP and persists info */ 15644 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 15645 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 15646 if (rack->rc_in_persist) { 15647 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 15648 reqr->rack_in_persist = 1; 15649 } else { 15650 reqr->rack_time_went_idle = 0; 15651 reqr->rack_in_persist = 0; 15652 } 15653 if (rack->r_wanted_output) 15654 reqr->rack_wanted_output = 1; 15655 else 15656 reqr->rack_wanted_output = 0; 15657 return (1); 15658 break; 15659 default: 15660 return (-EINVAL); 15661 } 15662 } 15663 15664 static void 15665 rack_switch_failed(struct tcpcb *tp) 15666 { 15667 /* 15668 * This method gets called if a stack switch was 15669 * attempted and it failed. We are left 15670 * but our hpts timers were stopped and we 15671 * need to validate time units and t_flags2. 15672 */ 15673 struct tcp_rack *rack; 15674 struct timeval tv; 15675 uint32_t cts; 15676 uint32_t toval; 15677 struct hpts_diag diag; 15678 15679 rack = (struct tcp_rack *)tp->t_fb_ptr; 15680 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 15681 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 15682 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 15683 else 15684 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 15685 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15686 tp->t_flags2 |= TF2_MBUF_ACKCMP; 15687 if (tp->t_in_hpts > IHPTS_NONE) { 15688 /* Strange */ 15689 return; 15690 } 15691 cts = tcp_get_usecs(&tv); 15692 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15693 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 15694 toval = rack->r_ctl.rc_last_output_to - cts; 15695 } else { 15696 /* one slot please */ 15697 toval = HPTS_TICKS_PER_SLOT; 15698 } 15699 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 15700 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 15701 toval = rack->r_ctl.rc_timer_exp - cts; 15702 } else { 15703 /* one slot please */ 15704 toval = HPTS_TICKS_PER_SLOT; 15705 } 15706 } else 15707 toval = HPTS_TICKS_PER_SLOT; 15708 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 15709 __LINE__, &diag); 15710 rack_log_hpts_diag(rack, cts, &diag, &tv); 15711 } 15712 15713 static int 15714 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 15715 { 15716 struct rack_sendmap *rsm, *ersm; 15717 int insret __diagused; 15718 /* 15719 * When initing outstanding, we must be quite careful 15720 * to not refer to tp->t_fb_ptr. This has the old rack 15721 * pointer in it, not the "new" one (when we are doing 15722 * a stack switch). 15723 */ 15724 15725 15726 if (tp->t_fb->tfb_chg_query == NULL) { 15727 /* Create a send map for the current outstanding data */ 15728 15729 rsm = rack_alloc(rack); 15730 if (rsm == NULL) { 15731 uma_zfree(rack_pcb_zone, ptr); 15732 return (ENOMEM); 15733 } 15734 rsm->r_no_rtt_allowed = 1; 15735 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 15736 rsm->r_rtr_cnt = 1; 15737 rsm->r_rtr_bytes = 0; 15738 if (tp->t_flags & TF_SENTFIN) 15739 rsm->r_flags |= RACK_HAS_FIN; 15740 rsm->r_end = tp->snd_max; 15741 if (tp->snd_una == tp->iss) { 15742 /* The data space is one beyond snd_una */ 15743 rsm->r_flags |= RACK_HAS_SYN; 15744 rsm->r_start = tp->iss; 15745 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 15746 } else 15747 rsm->r_start = tp->snd_una; 15748 rsm->r_dupack = 0; 15749 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 15750 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 15751 if (rsm->m) { 15752 rsm->orig_m_len = rsm->m->m_len; 15753 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 15754 } else { 15755 rsm->orig_m_len = 0; 15756 rsm->orig_t_space = 0; 15757 } 15758 } else { 15759 /* 15760 * This can happen if we have a stand-alone FIN or 15761 * SYN. 15762 */ 15763 rsm->m = NULL; 15764 rsm->orig_m_len = 0; 15765 rsm->orig_t_space = 0; 15766 rsm->soff = 0; 15767 } 15768 #ifdef INVARIANTS 15769 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 15770 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 15771 insret, rack, rsm); 15772 } 15773 #else 15774 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 15775 #endif 15776 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 15777 rsm->r_in_tmap = 1; 15778 } else { 15779 /* We have a query mechanism, lets use it */ 15780 struct tcp_query_resp qr; 15781 int i; 15782 tcp_seq at; 15783 15784 at = tp->snd_una; 15785 while (at != tp->snd_max) { 15786 memset(&qr, 0, sizeof(qr)); 15787 qr.req = TCP_QUERY_SENDMAP; 15788 qr.req_param = at; 15789 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 15790 break; 15791 /* Move forward */ 15792 at = qr.sendmap_end; 15793 /* Now lets build the entry for this one */ 15794 rsm = rack_alloc(rack); 15795 if (rsm == NULL) { 15796 uma_zfree(rack_pcb_zone, ptr); 15797 return (ENOMEM); 15798 } 15799 memset(rsm, 0, sizeof(struct rack_sendmap)); 15800 /* Now configure the rsm and insert it */ 15801 rsm->r_dupack = qr.sendmap_dupacks; 15802 rsm->r_start = qr.sendmap_start; 15803 rsm->r_end = qr.sendmap_end; 15804 if (qr.sendmap_fas) 15805 rsm->r_fas = qr.sendmap_end; 15806 else 15807 rsm->r_fas = rsm->r_start - tp->snd_una; 15808 /* 15809 * We have carefully aligned the bits 15810 * so that all we have to do is copy over 15811 * the bits with the mask. 15812 */ 15813 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 15814 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 15815 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 15816 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 15817 for (i=0 ; i<rsm->r_rtr_cnt; i++) 15818 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 15819 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 15820 (rsm->r_start - tp->snd_una), &rsm->soff); 15821 if (rsm->m) { 15822 rsm->orig_m_len = rsm->m->m_len; 15823 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 15824 } else { 15825 rsm->orig_m_len = 0; 15826 rsm->orig_t_space = 0; 15827 } 15828 #ifdef INVARIANTS 15829 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 15830 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 15831 insret, rack, rsm); 15832 } 15833 #else 15834 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 15835 #endif 15836 if ((rsm->r_flags & RACK_ACKED) == 0) { 15837 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 15838 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 15839 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 15840 /* 15841 * If the existing ersm was sent at 15842 * a later time than the new one, then 15843 * the new one should appear ahead of this 15844 * ersm. 15845 */ 15846 rsm->r_in_tmap = 1; 15847 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 15848 break; 15849 } 15850 } 15851 if (rsm->r_in_tmap == 0) { 15852 /* 15853 * Not found so shove it on the tail. 15854 */ 15855 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 15856 rsm->r_in_tmap = 1; 15857 } 15858 } else { 15859 if ((rack->r_ctl.rc_sacklast == NULL) || 15860 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 15861 rack->r_ctl.rc_sacklast = rsm; 15862 } 15863 } 15864 rack_log_chg_info(tp, rack, 3, 15865 rsm->r_start, 15866 rsm->r_end, 15867 rsm->r_flags); 15868 } 15869 } 15870 return (0); 15871 } 15872 15873 static void 15874 rack_translate_policer_detect(struct tcp_rack *rack, uint32_t optval) 15875 { 15876 /* 15877 * P = Percent of retransmits 499 = 49.9% 15878 * A = Average number 1 (.1%) -> 169 (16.9%) 15879 * M = Median number of retrans 1 - 16 15880 * MMMM MMMM AAAA AAAA PPPP PPPP PPPP PPPP 15881 * 15882 */ 15883 uint16_t per, upp; 15884 15885 per = optval & 0x0000ffff; 15886 rack->r_ctl.policer_rxt_threshold = (uint32_t)(per & 0xffff); 15887 upp = ((optval & 0xffff0000) >> 16); 15888 rack->r_ctl.policer_avg_threshold = (0x00ff & upp); 15889 rack->r_ctl.policer_med_threshold = ((upp >> 8) & 0x00ff); 15890 if ((rack->r_ctl.policer_rxt_threshold > 0) && 15891 (rack->r_ctl.policer_avg_threshold > 0) && 15892 (rack->r_ctl.policer_med_threshold > 0)) { 15893 rack->policer_detect_on = 1; 15894 } else { 15895 rack->policer_detect_on = 0; 15896 } 15897 rack->r_ctl.saved_policer_val = optval; 15898 policer_detection_log(rack, optval, 15899 rack->r_ctl.policer_avg_threshold, 15900 rack->r_ctl.policer_med_threshold, 15901 rack->r_ctl.policer_rxt_threshold, 11); 15902 } 15903 15904 static int32_t 15905 rack_init(struct tcpcb *tp, void **ptr) 15906 { 15907 struct inpcb *inp = tptoinpcb(tp); 15908 struct tcp_rack *rack = NULL; 15909 uint32_t iwin, snt, us_cts; 15910 size_t sz; 15911 int err, no_query; 15912 15913 tcp_hpts_init(tp); 15914 15915 /* 15916 * First are we the initial or are we a switched stack? 15917 * If we are initing via tcp_newtcppcb the ptr passed 15918 * will be tp->t_fb_ptr. If its a stack switch that 15919 * has a previous stack we can query it will be a local 15920 * var that will in the end be set into t_fb_ptr. 15921 */ 15922 if (ptr == &tp->t_fb_ptr) 15923 no_query = 1; 15924 else 15925 no_query = 0; 15926 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 15927 if (*ptr == NULL) { 15928 /* 15929 * We need to allocate memory but cant. The INP and INP_INFO 15930 * locks and they are recursive (happens during setup. So a 15931 * scheme to drop the locks fails :( 15932 * 15933 */ 15934 return(ENOMEM); 15935 } 15936 memset(*ptr, 0, sizeof(struct tcp_rack)); 15937 rack = (struct tcp_rack *)*ptr; 15938 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 15939 if (rack->r_ctl.tqh == NULL) { 15940 uma_zfree(rack_pcb_zone, rack); 15941 return(ENOMEM); 15942 } 15943 tqhash_init(rack->r_ctl.tqh); 15944 TAILQ_INIT(&rack->r_ctl.rc_free); 15945 TAILQ_INIT(&rack->r_ctl.rc_tmap); 15946 rack->rc_tp = tp; 15947 rack->rc_inp = inp; 15948 /* Set the flag */ 15949 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 15950 /* Probably not needed but lets be sure */ 15951 rack_clear_rate_sample(rack); 15952 /* 15953 * Save off the default values, socket options will poke 15954 * at these if pacing is not on or we have not yet 15955 * reached where pacing is on (gp_ready/fixed enabled). 15956 * When they get set into the CC module (when gp_ready 15957 * is enabled or we enable fixed) then we will set these 15958 * values into the CC and place in here the old values 15959 * so we have a restoral. Then we will set the flag 15960 * rc_pacing_cc_set. That way whenever we turn off pacing 15961 * or switch off this stack, we will know to go restore 15962 * the saved values. 15963 * 15964 * We specifically put into the beta the ecn value for pacing. 15965 */ 15966 rack->rc_new_rnd_needed = 1; 15967 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 15968 /* We want abe like behavior as well */ 15969 15970 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 15971 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 15972 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 15973 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 15974 rack->r_ctl.policer_del_mss = rack_req_del_mss; 15975 if ((rack_policer_rxt_thresh > 0) && 15976 (rack_policer_avg_thresh > 0) && 15977 (rack_policer_med_thresh > 0)) { 15978 rack->r_ctl.policer_rxt_threshold = rack_policer_rxt_thresh; 15979 rack->r_ctl.policer_avg_threshold = rack_policer_avg_thresh; 15980 rack->r_ctl.policer_med_threshold = rack_policer_med_thresh; 15981 rack->policer_detect_on = 1; 15982 } else { 15983 rack->policer_detect_on = 0; 15984 } 15985 if (rack_fill_cw_state) 15986 rack->rc_pace_to_cwnd = 1; 15987 if (rack_pacing_min_seg) 15988 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 15989 if (use_rack_rr) 15990 rack->use_rack_rr = 1; 15991 if (rack_dnd_default) { 15992 rack->rc_pace_dnd = 1; 15993 } 15994 if (V_tcp_delack_enabled) 15995 tp->t_delayed_ack = 1; 15996 else 15997 tp->t_delayed_ack = 0; 15998 #ifdef TCP_ACCOUNTING 15999 if (rack_tcp_accounting) { 16000 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 16001 } 16002 #endif 16003 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 16004 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 16005 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 16006 if (rack->r_ctl.pcm_s == NULL) { 16007 rack->r_ctl.pcm_i.cnt_alloc = 0; 16008 } 16009 #ifdef NETFLIX_STATS 16010 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; 16011 #endif 16012 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 16013 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 16014 if (rack_enable_shared_cwnd) 16015 rack->rack_enable_scwnd = 1; 16016 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 16017 rack->rc_user_set_max_segs = rack_hptsi_segments; 16018 rack->r_ctl.max_reduction = rack_max_reduce; 16019 rack->rc_force_max_seg = 0; 16020 TAILQ_INIT(&rack->r_ctl.opt_list); 16021 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 16022 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 16023 if (rack_hibeta_setting) { 16024 rack->rack_hibeta = 1; 16025 if ((rack_hibeta_setting >= 50) && 16026 (rack_hibeta_setting <= 100)) { 16027 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; 16028 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 16029 } 16030 } else { 16031 rack->r_ctl.saved_hibeta = 50; 16032 } 16033 /* 16034 * We initialize to all ones so we never match 0 16035 * just in case the client sends in 0, it hopefully 16036 * will never have all 1's in ms :-) 16037 */ 16038 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 16039 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 16040 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 16041 rack->r_ctl.pol_bw_comp = rack_policing_do_bw_comp; 16042 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 16043 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 16044 rack->r_ctl.rc_highest_us_rtt = 0; 16045 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 16046 rack->pcm_enabled = rack_pcm_is_enabled; 16047 if (rack_fillcw_bw_cap) 16048 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 16049 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 16050 if (rack_use_cmp_acks) 16051 rack->r_use_cmp_ack = 1; 16052 if (rack_disable_prr) 16053 rack->rack_no_prr = 1; 16054 if (rack_gp_no_rec_chg) 16055 rack->rc_gp_no_rec_chg = 1; 16056 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 16057 rack->r_ctl.pacing_method |= RACK_REG_PACING; 16058 rack->rc_always_pace = 1; 16059 if (rack->rack_hibeta) 16060 rack_set_cc_pacing(rack); 16061 } else 16062 rack->rc_always_pace = 0; 16063 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 16064 rack->r_mbuf_queue = 1; 16065 else 16066 rack->r_mbuf_queue = 0; 16067 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16068 if (rack_limits_scwnd) 16069 rack->r_limit_scw = 1; 16070 else 16071 rack->r_limit_scw = 0; 16072 rack_init_retransmit_value(rack, rack_rxt_controls); 16073 rack->rc_labc = V_tcp_abc_l_var; 16074 if (rack_honors_hpts_min_to) 16075 rack->r_use_hpts_min = 1; 16076 if (tp->snd_una != 0) { 16077 rack->r_ctl.idle_snd_una = tp->snd_una; 16078 rack->rc_sendvars_notset = 0; 16079 /* 16080 * Make sure any TCP timers are not running. 16081 */ 16082 tcp_timer_stop(tp); 16083 } else { 16084 /* 16085 * Server side, we are called from the 16086 * syn-cache. This means none of the 16087 * snd_una/max are set yet so we have 16088 * to defer this until the first send. 16089 */ 16090 rack->rc_sendvars_notset = 1; 16091 } 16092 16093 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 16094 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 16095 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 16096 rack->r_ctl.rc_min_to = rack_min_to; 16097 microuptime(&rack->r_ctl.act_rcv_time); 16098 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 16099 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 16100 if (rack_hw_up_only) 16101 rack->r_up_only = 1; 16102 if (rack_do_dyn_mul) { 16103 /* When dynamic adjustment is on CA needs to start at 100% */ 16104 rack->rc_gp_dyn_mul = 1; 16105 if (rack_do_dyn_mul >= 100) 16106 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 16107 } else 16108 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 16109 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 16110 if (rack_timely_off) { 16111 rack->rc_skip_timely = 1; 16112 } 16113 if (rack->rc_skip_timely) { 16114 rack->r_ctl.rack_per_of_gp_rec = 90; 16115 rack->r_ctl.rack_per_of_gp_ca = 100; 16116 rack->r_ctl.rack_per_of_gp_ss = 250; 16117 } 16118 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 16119 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 16120 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 16121 16122 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 16123 rack_probertt_filter_life); 16124 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16125 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 16126 rack->r_ctl.rc_time_of_last_probertt = us_cts; 16127 rack->r_ctl.rc_went_idle_time = us_cts; 16128 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks() - (tcp_ack_war_time_window + 1); 16129 rack->r_ctl.rc_time_probertt_starts = 0; 16130 16131 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 16132 if (rack_rnd_cnt_req & 0x10000) 16133 rack->r_ctl.gate_to_fs = 1; 16134 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 16135 if ((rack_rnd_cnt_req & 0x100) > 0) { 16136 16137 } 16138 if (rack_dsack_std_based & 0x1) { 16139 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 16140 rack->rc_rack_tmr_std_based = 1; 16141 } 16142 if (rack_dsack_std_based & 0x2) { 16143 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 16144 rack->rc_rack_use_dsack = 1; 16145 } 16146 /* We require at least one measurement, even if the sysctl is 0 */ 16147 if (rack_req_measurements) 16148 rack->r_ctl.req_measurements = rack_req_measurements; 16149 else 16150 rack->r_ctl.req_measurements = 1; 16151 if (rack_enable_hw_pacing) 16152 rack->rack_hdw_pace_ena = 1; 16153 if (rack_hw_rate_caps) 16154 rack->r_rack_hw_rate_caps = 1; 16155 #ifdef TCP_SAD_DETECTION 16156 rack->do_detection = 1; 16157 #else 16158 rack->do_detection = 0; 16159 #endif 16160 if (rack_non_rxt_use_cr) 16161 rack->rack_rec_nonrxt_use_cr = 1; 16162 /* Lets setup the fsb block */ 16163 err = rack_init_fsb(tp, rack); 16164 if (err) { 16165 uma_zfree(rack_pcb_zone, *ptr); 16166 *ptr = NULL; 16167 return (err); 16168 } 16169 if (rack_do_hystart) { 16170 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 16171 if (rack_do_hystart > 1) 16172 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 16173 if (rack_do_hystart > 2) 16174 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 16175 } 16176 /* Log what we will do with queries */ 16177 rack_log_chg_info(tp, rack, 7, 16178 no_query, 0, 0); 16179 if (rack_def_profile) 16180 rack_set_profile(rack, rack_def_profile); 16181 /* Cancel the GP measurement in progress */ 16182 tp->t_flags &= ~TF_GPUTINPROG; 16183 if ((tp->t_state != TCPS_CLOSED) && 16184 (tp->t_state != TCPS_TIME_WAIT)) { 16185 /* 16186 * We are already open, we may 16187 * need to adjust a few things. 16188 */ 16189 if (SEQ_GT(tp->snd_max, tp->iss)) 16190 snt = tp->snd_max - tp->iss; 16191 else 16192 snt = 0; 16193 iwin = rc_init_window(rack); 16194 if ((snt < iwin) && 16195 (no_query == 1)) { 16196 /* We are not past the initial window 16197 * on the first init (i.e. a stack switch 16198 * has not yet occured) so we need to make 16199 * sure cwnd and ssthresh is correct. 16200 */ 16201 if (tp->snd_cwnd < iwin) 16202 tp->snd_cwnd = iwin; 16203 /* 16204 * If we are within the initial window 16205 * we want ssthresh to be unlimited. Setting 16206 * it to the rwnd (which the default stack does 16207 * and older racks) is not really a good idea 16208 * since we want to be in SS and grow both the 16209 * cwnd and the rwnd (via dynamic rwnd growth). If 16210 * we set it to the rwnd then as the peer grows its 16211 * rwnd we will be stuck in CA and never hit SS. 16212 * 16213 * Its far better to raise it up high (this takes the 16214 * risk that there as been a loss already, probably 16215 * we should have an indicator in all stacks of loss 16216 * but we don't), but considering the normal use this 16217 * is a risk worth taking. The consequences of not 16218 * hitting SS are far worse than going one more time 16219 * into it early on (before we have sent even a IW). 16220 * It is highly unlikely that we will have had a loss 16221 * before getting the IW out. 16222 */ 16223 tp->snd_ssthresh = 0xffffffff; 16224 } 16225 /* 16226 * Any init based on sequence numbers 16227 * should be done in the deferred init path 16228 * since we can be CLOSED and not have them 16229 * inited when rack_init() is called. We 16230 * are not closed so lets call it. 16231 */ 16232 rack_deferred_init(tp, rack); 16233 } 16234 if ((tp->t_state != TCPS_CLOSED) && 16235 (tp->t_state != TCPS_TIME_WAIT) && 16236 (no_query == 0) && 16237 (tp->snd_una != tp->snd_max)) { 16238 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 16239 if (err) { 16240 *ptr = NULL; 16241 return(err); 16242 } 16243 } 16244 rack_stop_all_timers(tp, rack); 16245 /* Setup all the t_flags2 */ 16246 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 16247 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 16248 else 16249 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 16250 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 16251 tp->t_flags2 |= TF2_MBUF_ACKCMP; 16252 /* 16253 * Timers in Rack are kept in microseconds so lets 16254 * convert any initial incoming variables 16255 * from ticks into usecs. Note that we 16256 * also change the values of t_srtt and t_rttvar, if 16257 * they are non-zero. They are kept with a 5 16258 * bit decimal so we have to carefully convert 16259 * these to get the full precision. 16260 */ 16261 rack_convert_rtts(tp); 16262 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 16263 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 16264 /* We do not start any timers on DROPPED connections */ 16265 if (tp->t_fb->tfb_chg_query == NULL) { 16266 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16267 } else { 16268 struct tcp_query_resp qr; 16269 int ret; 16270 16271 memset(&qr, 0, sizeof(qr)); 16272 16273 /* Get the misc time stamps and such for rack */ 16274 qr.req = TCP_QUERY_RACK_TIMES; 16275 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 16276 if (ret == 1) { 16277 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 16278 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 16279 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 16280 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 16281 rack->rc_rack_rtt = qr.rack_rtt; 16282 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 16283 rack->r_ctl.rc_sacked = qr.rack_sacked; 16284 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 16285 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 16286 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 16287 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 16288 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 16289 if (qr.rack_tlp_out) { 16290 rack->rc_tlp_in_progress = 1; 16291 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 16292 } else { 16293 rack->rc_tlp_in_progress = 0; 16294 rack->r_ctl.rc_tlp_cnt_out = 0; 16295 } 16296 if (qr.rack_srtt_measured) 16297 rack->rc_srtt_measure_made = 1; 16298 if (qr.rack_in_persist == 1) { 16299 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 16300 #ifdef NETFLIX_SHARED_CWND 16301 if (rack->r_ctl.rc_scw) { 16302 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 16303 rack->rack_scwnd_is_idle = 1; 16304 } 16305 #endif 16306 rack->r_ctl.persist_lost_ends = 0; 16307 rack->probe_not_answered = 0; 16308 rack->forced_ack = 0; 16309 tp->t_rxtshift = 0; 16310 rack->rc_in_persist = 1; 16311 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16312 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16313 } 16314 if (qr.rack_wanted_output) 16315 rack->r_wanted_output = 1; 16316 rack_log_chg_info(tp, rack, 6, 16317 qr.rack_min_rtt, 16318 qr.rack_rtt, 16319 qr.rack_reorder_ts); 16320 } 16321 /* Get the old stack timers */ 16322 qr.req_param = 0; 16323 qr.req = TCP_QUERY_TIMERS_UP; 16324 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 16325 if (ret) { 16326 /* 16327 * non-zero return means we have a timer('s) 16328 * to start. Zero means no timer (no keepalive 16329 * I suppose). 16330 */ 16331 uint32_t tov = 0; 16332 16333 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 16334 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 16335 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 16336 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 16337 tov = qr.timer_pacing_to - us_cts; 16338 else 16339 tov = HPTS_TICKS_PER_SLOT; 16340 } 16341 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 16342 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 16343 if (tov == 0) { 16344 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 16345 tov = qr.timer_timer_exp - us_cts; 16346 else 16347 tov = HPTS_TICKS_PER_SLOT; 16348 } 16349 } 16350 rack_log_chg_info(tp, rack, 4, 16351 rack->r_ctl.rc_hpts_flags, 16352 rack->r_ctl.rc_last_output_to, 16353 rack->r_ctl.rc_timer_exp); 16354 if (tov) { 16355 struct hpts_diag diag; 16356 16357 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 16358 __LINE__, &diag); 16359 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 16360 } 16361 } 16362 } 16363 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 16364 __LINE__, RACK_RTTS_INIT); 16365 } 16366 return (0); 16367 } 16368 16369 static int 16370 rack_handoff_ok(struct tcpcb *tp) 16371 { 16372 if ((tp->t_state == TCPS_CLOSED) || 16373 (tp->t_state == TCPS_LISTEN)) { 16374 /* Sure no problem though it may not stick */ 16375 return (0); 16376 } 16377 if ((tp->t_state == TCPS_SYN_SENT) || 16378 (tp->t_state == TCPS_SYN_RECEIVED)) { 16379 /* 16380 * We really don't know if you support sack, 16381 * you have to get to ESTAB or beyond to tell. 16382 */ 16383 return (EAGAIN); 16384 } 16385 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 16386 /* 16387 * Rack will only send a FIN after all data is acknowledged. 16388 * So in this case we have more data outstanding. We can't 16389 * switch stacks until either all data and only the FIN 16390 * is left (in which case rack_init() now knows how 16391 * to deal with that) <or> all is acknowledged and we 16392 * are only left with incoming data, though why you 16393 * would want to switch to rack after all data is acknowledged 16394 * I have no idea (rrs)! 16395 */ 16396 return (EAGAIN); 16397 } 16398 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 16399 return (0); 16400 } 16401 /* 16402 * If we reach here we don't do SACK on this connection so we can 16403 * never do rack. 16404 */ 16405 return (EINVAL); 16406 } 16407 16408 static void 16409 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 16410 { 16411 16412 if (tp->t_fb_ptr) { 16413 uint32_t cnt_free = 0; 16414 struct tcp_rack *rack; 16415 struct rack_sendmap *rsm; 16416 16417 tcp_handle_orphaned_packets(tp); 16418 tp->t_flags &= ~TF_FORCEDATA; 16419 rack = (struct tcp_rack *)tp->t_fb_ptr; 16420 rack_log_pacing_delay_calc(rack, 16421 0, 16422 0, 16423 0, 16424 rack_get_gp_est(rack), /* delRate */ 16425 rack_get_lt_bw(rack), /* rttProp */ 16426 20, __LINE__, NULL, 0); 16427 #ifdef NETFLIX_SHARED_CWND 16428 if (rack->r_ctl.rc_scw) { 16429 uint32_t limit; 16430 16431 if (rack->r_limit_scw) 16432 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 16433 else 16434 limit = 0; 16435 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 16436 rack->r_ctl.rc_scw_index, 16437 limit); 16438 rack->r_ctl.rc_scw = NULL; 16439 } 16440 #endif 16441 if (rack->r_ctl.fsb.tcp_ip_hdr) { 16442 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 16443 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 16444 rack->r_ctl.fsb.th = NULL; 16445 } 16446 if (rack->rc_always_pace == 1) { 16447 rack_remove_pacing(rack); 16448 } 16449 /* Clean up any options if they were not applied */ 16450 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 16451 struct deferred_opt_list *dol; 16452 16453 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 16454 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 16455 free(dol, M_TCPDO); 16456 } 16457 /* rack does not use force data but other stacks may clear it */ 16458 if (rack->r_ctl.crte != NULL) { 16459 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 16460 rack->rack_hdrw_pacing = 0; 16461 rack->r_ctl.crte = NULL; 16462 } 16463 #ifdef TCP_BLACKBOX 16464 tcp_log_flowend(tp); 16465 #endif 16466 /* 16467 * Lets take a different approach to purging just 16468 * get each one and free it like a cum-ack would and 16469 * not use a foreach loop. 16470 */ 16471 rsm = tqhash_min(rack->r_ctl.tqh); 16472 while (rsm) { 16473 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 16474 rack->r_ctl.rc_num_maps_alloced--; 16475 uma_zfree(rack_zone, rsm); 16476 rsm = tqhash_min(rack->r_ctl.tqh); 16477 } 16478 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 16479 while (rsm) { 16480 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 16481 rack->r_ctl.rc_num_maps_alloced--; 16482 rack->rc_free_cnt--; 16483 cnt_free++; 16484 uma_zfree(rack_zone, rsm); 16485 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 16486 } 16487 if (rack->r_ctl.pcm_s != NULL) { 16488 free(rack->r_ctl.pcm_s, M_TCPPCM); 16489 rack->r_ctl.pcm_s = NULL; 16490 rack->r_ctl.pcm_i.cnt_alloc = 0; 16491 rack->r_ctl.pcm_i.cnt = 0; 16492 } 16493 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 16494 (tcp_bblogging_on(tp))) { 16495 union tcp_log_stackspecific log; 16496 struct timeval tv; 16497 16498 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16499 log.u_bbr.flex8 = 10; 16500 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 16501 log.u_bbr.flex2 = rack->rc_free_cnt; 16502 log.u_bbr.flex3 = cnt_free; 16503 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16504 rsm = tqhash_min(rack->r_ctl.tqh); 16505 log.u_bbr.delRate = (uint64_t)rsm; 16506 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 16507 log.u_bbr.cur_del_rate = (uint64_t)rsm; 16508 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 16509 log.u_bbr.pkt_epoch = __LINE__; 16510 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16511 0, &log, false, NULL, NULL, 0, &tv); 16512 } 16513 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 16514 ("rack:%p num_aloc:%u after freeing all?", 16515 rack, 16516 rack->r_ctl.rc_num_maps_alloced)); 16517 rack->rc_free_cnt = 0; 16518 free(rack->r_ctl.tqh, M_TCPFSB); 16519 rack->r_ctl.tqh = NULL; 16520 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 16521 tp->t_fb_ptr = NULL; 16522 } 16523 /* Make sure snd_nxt is correctly set */ 16524 tp->snd_nxt = tp->snd_max; 16525 } 16526 16527 static void 16528 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 16529 { 16530 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 16531 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 16532 } 16533 switch (tp->t_state) { 16534 case TCPS_SYN_SENT: 16535 rack->r_state = TCPS_SYN_SENT; 16536 rack->r_substate = rack_do_syn_sent; 16537 break; 16538 case TCPS_SYN_RECEIVED: 16539 rack->r_state = TCPS_SYN_RECEIVED; 16540 rack->r_substate = rack_do_syn_recv; 16541 break; 16542 case TCPS_ESTABLISHED: 16543 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16544 rack->r_state = TCPS_ESTABLISHED; 16545 rack->r_substate = rack_do_established; 16546 break; 16547 case TCPS_CLOSE_WAIT: 16548 rack->r_state = TCPS_CLOSE_WAIT; 16549 rack->r_substate = rack_do_close_wait; 16550 break; 16551 case TCPS_FIN_WAIT_1: 16552 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16553 rack->r_state = TCPS_FIN_WAIT_1; 16554 rack->r_substate = rack_do_fin_wait_1; 16555 break; 16556 case TCPS_CLOSING: 16557 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16558 rack->r_state = TCPS_CLOSING; 16559 rack->r_substate = rack_do_closing; 16560 break; 16561 case TCPS_LAST_ACK: 16562 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16563 rack->r_state = TCPS_LAST_ACK; 16564 rack->r_substate = rack_do_lastack; 16565 break; 16566 case TCPS_FIN_WAIT_2: 16567 rack->r_state = TCPS_FIN_WAIT_2; 16568 rack->r_substate = rack_do_fin_wait_2; 16569 break; 16570 case TCPS_LISTEN: 16571 case TCPS_CLOSED: 16572 case TCPS_TIME_WAIT: 16573 default: 16574 break; 16575 }; 16576 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 16577 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 16578 16579 } 16580 16581 static void 16582 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 16583 { 16584 /* 16585 * We received an ack, and then did not 16586 * call send or were bounced out due to the 16587 * hpts was running. Now a timer is up as well, is 16588 * it the right timer? 16589 */ 16590 struct rack_sendmap *rsm; 16591 int tmr_up; 16592 16593 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 16594 if (tcp_in_hpts(rack->rc_tp) == 0) { 16595 /* 16596 * Ok we probably need some timer up, but no 16597 * matter what the mask we are not in hpts. We 16598 * may have received an old ack and thus did nothing. 16599 */ 16600 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16601 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16602 return; 16603 } 16604 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 16605 return; 16606 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 16607 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 16608 (tmr_up == PACE_TMR_RXT)) { 16609 /* Should be an RXT */ 16610 return; 16611 } 16612 if (rsm == NULL) { 16613 /* Nothing outstanding? */ 16614 if (tp->t_flags & TF_DELACK) { 16615 if (tmr_up == PACE_TMR_DELACK) 16616 /* We are supposed to have delayed ack up and we do */ 16617 return; 16618 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 16619 /* 16620 * if we hit enobufs then we would expect the possibility 16621 * of nothing outstanding and the RXT up (and the hptsi timer). 16622 */ 16623 return; 16624 } else if (((V_tcp_always_keepalive || 16625 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16626 (tp->t_state <= TCPS_CLOSING)) && 16627 (tmr_up == PACE_TMR_KEEP) && 16628 (tp->snd_max == tp->snd_una)) { 16629 /* We should have keep alive up and we do */ 16630 return; 16631 } 16632 } 16633 if (SEQ_GT(tp->snd_max, tp->snd_una) && 16634 ((tmr_up == PACE_TMR_TLP) || 16635 (tmr_up == PACE_TMR_RACK) || 16636 (tmr_up == PACE_TMR_RXT))) { 16637 /* 16638 * Either a Rack, TLP or RXT is fine if we 16639 * have outstanding data. 16640 */ 16641 return; 16642 } else if (tmr_up == PACE_TMR_DELACK) { 16643 /* 16644 * If the delayed ack was going to go off 16645 * before the rtx/tlp/rack timer were going to 16646 * expire, then that would be the timer in control. 16647 * Note we don't check the time here trusting the 16648 * code is correct. 16649 */ 16650 return; 16651 } 16652 /* 16653 * Ok the timer originally started is not what we want now. 16654 * We will force the hpts to be stopped if any, and restart 16655 * with the slot set to what was in the saved slot. 16656 */ 16657 if (tcp_in_hpts(rack->rc_tp)) { 16658 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16659 uint32_t us_cts; 16660 16661 us_cts = tcp_get_usecs(NULL); 16662 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 16663 rack->r_early = 1; 16664 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 16665 } 16666 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16667 } 16668 tcp_hpts_remove(rack->rc_tp); 16669 } 16670 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16671 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16672 } 16673 16674 16675 static void 16676 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 16677 { 16678 if ((SEQ_LT(tp->snd_wl1, seq) || 16679 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 16680 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 16681 /* keep track of pure window updates */ 16682 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 16683 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 16684 tp->snd_wnd = tiwin; 16685 rack_validate_fo_sendwin_up(tp, rack); 16686 tp->snd_wl1 = seq; 16687 tp->snd_wl2 = ack; 16688 if (tp->snd_wnd > tp->max_sndwnd) 16689 tp->max_sndwnd = tp->snd_wnd; 16690 rack->r_wanted_output = 1; 16691 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 16692 tp->snd_wnd = tiwin; 16693 rack_validate_fo_sendwin_up(tp, rack); 16694 tp->snd_wl1 = seq; 16695 tp->snd_wl2 = ack; 16696 } else { 16697 /* Not a valid win update */ 16698 return; 16699 } 16700 if (tp->snd_wnd > tp->max_sndwnd) 16701 tp->max_sndwnd = tp->snd_wnd; 16702 /* Do we exit persists? */ 16703 if ((rack->rc_in_persist != 0) && 16704 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 16705 rack->r_ctl.rc_pace_min_segs))) { 16706 rack_exit_persist(tp, rack, cts); 16707 } 16708 /* Do we enter persists? */ 16709 if ((rack->rc_in_persist == 0) && 16710 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 16711 TCPS_HAVEESTABLISHED(tp->t_state) && 16712 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 16713 sbavail(&tptosocket(tp)->so_snd) && 16714 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 16715 /* 16716 * Here the rwnd is less than 16717 * the pacing size, we are established, 16718 * nothing is outstanding, and there is 16719 * data to send. Enter persists. 16720 */ 16721 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 16722 } 16723 } 16724 16725 static void 16726 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 16727 { 16728 16729 if (tcp_bblogging_on(rack->rc_tp)) { 16730 struct inpcb *inp = tptoinpcb(tp); 16731 union tcp_log_stackspecific log; 16732 struct timeval ltv; 16733 char tcp_hdr_buf[60]; 16734 struct tcphdr *th; 16735 struct timespec ts; 16736 uint32_t orig_snd_una; 16737 uint8_t xx = 0; 16738 16739 #ifdef TCP_REQUEST_TRK 16740 struct tcp_sendfile_track *tcp_req; 16741 16742 if (SEQ_GT(ae->ack, tp->snd_una)) { 16743 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 16744 } else { 16745 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 16746 } 16747 #endif 16748 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16749 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16750 if (rack->rack_no_prr == 0) 16751 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16752 else 16753 log.u_bbr.flex1 = 0; 16754 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16755 log.u_bbr.use_lt_bw <<= 1; 16756 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16757 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16758 log.u_bbr.bbr_state = rack->rc_free_cnt; 16759 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 16760 log.u_bbr.pkts_out = tp->t_maxseg; 16761 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16762 log.u_bbr.flex7 = 1; 16763 log.u_bbr.lost = ae->flags; 16764 log.u_bbr.cwnd_gain = ackval; 16765 log.u_bbr.pacing_gain = 0x2; 16766 if (ae->flags & TSTMP_HDWR) { 16767 /* Record the hardware timestamp if present */ 16768 log.u_bbr.flex3 = M_TSTMP; 16769 ts.tv_sec = ae->timestamp / 1000000000; 16770 ts.tv_nsec = ae->timestamp % 1000000000; 16771 ltv.tv_sec = ts.tv_sec; 16772 ltv.tv_usec = ts.tv_nsec / 1000; 16773 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16774 } else if (ae->flags & TSTMP_LRO) { 16775 /* Record the LRO the arrival timestamp */ 16776 log.u_bbr.flex3 = M_TSTMP_LRO; 16777 ts.tv_sec = ae->timestamp / 1000000000; 16778 ts.tv_nsec = ae->timestamp % 1000000000; 16779 ltv.tv_sec = ts.tv_sec; 16780 ltv.tv_usec = ts.tv_nsec / 1000; 16781 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16782 } 16783 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16784 /* Log the rcv time */ 16785 log.u_bbr.delRate = ae->timestamp; 16786 #ifdef TCP_REQUEST_TRK 16787 log.u_bbr.applimited = tp->t_tcpreq_closed; 16788 log.u_bbr.applimited <<= 8; 16789 log.u_bbr.applimited |= tp->t_tcpreq_open; 16790 log.u_bbr.applimited <<= 8; 16791 log.u_bbr.applimited |= tp->t_tcpreq_req; 16792 if (tcp_req) { 16793 /* Copy out any client req info */ 16794 /* seconds */ 16795 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16796 /* useconds */ 16797 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16798 log.u_bbr.rttProp = tcp_req->timestamp; 16799 log.u_bbr.cur_del_rate = tcp_req->start; 16800 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16801 log.u_bbr.flex8 |= 1; 16802 } else { 16803 log.u_bbr.flex8 |= 2; 16804 log.u_bbr.bw_inuse = tcp_req->end; 16805 } 16806 log.u_bbr.flex6 = tcp_req->start_seq; 16807 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16808 log.u_bbr.flex8 |= 4; 16809 log.u_bbr.epoch = tcp_req->end_seq; 16810 } 16811 } 16812 #endif 16813 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 16814 th = (struct tcphdr *)tcp_hdr_buf; 16815 th->th_seq = ae->seq; 16816 th->th_ack = ae->ack; 16817 th->th_win = ae->win; 16818 /* Now fill in the ports */ 16819 th->th_sport = inp->inp_fport; 16820 th->th_dport = inp->inp_lport; 16821 tcp_set_flags(th, ae->flags); 16822 /* Now do we have a timestamp option? */ 16823 if (ae->flags & HAS_TSTMP) { 16824 u_char *cp; 16825 uint32_t val; 16826 16827 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 16828 cp = (u_char *)(th + 1); 16829 *cp = TCPOPT_NOP; 16830 cp++; 16831 *cp = TCPOPT_NOP; 16832 cp++; 16833 *cp = TCPOPT_TIMESTAMP; 16834 cp++; 16835 *cp = TCPOLEN_TIMESTAMP; 16836 cp++; 16837 val = htonl(ae->ts_value); 16838 bcopy((char *)&val, 16839 (char *)cp, sizeof(uint32_t)); 16840 val = htonl(ae->ts_echo); 16841 bcopy((char *)&val, 16842 (char *)(cp + 4), sizeof(uint32_t)); 16843 } else 16844 th->th_off = (sizeof(struct tcphdr) >> 2); 16845 16846 /* 16847 * For sane logging we need to play a little trick. 16848 * If the ack were fully processed we would have moved 16849 * snd_una to high_seq, but since compressed acks are 16850 * processed in two phases, at this point (logging) snd_una 16851 * won't be advanced. So we would see multiple acks showing 16852 * the advancement. We can prevent that by "pretending" that 16853 * snd_una was advanced and then un-advancing it so that the 16854 * logging code has the right value for tlb_snd_una. 16855 */ 16856 if (tp->snd_una != high_seq) { 16857 orig_snd_una = tp->snd_una; 16858 tp->snd_una = high_seq; 16859 xx = 1; 16860 } else 16861 xx = 0; 16862 TCP_LOG_EVENTP(tp, th, 16863 &tptosocket(tp)->so_rcv, 16864 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 16865 0, &log, true, <v); 16866 if (xx) { 16867 tp->snd_una = orig_snd_una; 16868 } 16869 } 16870 16871 } 16872 16873 static void 16874 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 16875 { 16876 uint32_t us_rtt; 16877 /* 16878 * A persist or keep-alive was forced out, update our 16879 * min rtt time. Note now worry about lost responses. 16880 * When a subsequent keep-alive or persist times out 16881 * and forced_ack is still on, then the last probe 16882 * was not responded to. In such cases we have a 16883 * sysctl that controls the behavior. Either we apply 16884 * the rtt but with reduced confidence (0). Or we just 16885 * plain don't apply the rtt estimate. Having data flow 16886 * will clear the probe_not_answered flag i.e. cum-ack 16887 * move forward <or> exiting and reentering persists. 16888 */ 16889 16890 rack->forced_ack = 0; 16891 rack->rc_tp->t_rxtshift = 0; 16892 if ((rack->rc_in_persist && 16893 (tiwin == rack->rc_tp->snd_wnd)) || 16894 (rack->rc_in_persist == 0)) { 16895 /* 16896 * In persists only apply the RTT update if this is 16897 * a response to our window probe. And that 16898 * means the rwnd sent must match the current 16899 * snd_wnd. If it does not, then we got a 16900 * window update ack instead. For keepalive 16901 * we allow the answer no matter what the window. 16902 * 16903 * Note that if the probe_not_answered is set then 16904 * the forced_ack_ts is the oldest one i.e. the first 16905 * probe sent that might have been lost. This assures 16906 * us that if we do calculate an RTT it is longer not 16907 * some short thing. 16908 */ 16909 if (rack->rc_in_persist) 16910 counter_u64_add(rack_persists_acks, 1); 16911 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 16912 if (us_rtt == 0) 16913 us_rtt = 1; 16914 if (rack->probe_not_answered == 0) { 16915 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 16916 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 16917 } else { 16918 /* We have a retransmitted probe here too */ 16919 if (rack_apply_rtt_with_reduced_conf) { 16920 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 16921 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 16922 } 16923 } 16924 } 16925 } 16926 16927 static void 16928 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 16929 { 16930 /* 16931 * The next send has occurred mark the end of the round 16932 * as when that data gets acknowledged. We can 16933 * also do common things we might need to do when 16934 * a round begins. 16935 */ 16936 rack->r_ctl.roundends = tp->snd_max; 16937 rack->rc_new_rnd_needed = 0; 16938 rack_log_hystart_event(rack, tp->snd_max, 4); 16939 } 16940 16941 16942 static void 16943 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 16944 uint32_t flex3) 16945 { 16946 if (tcp_bblogging_on(rack->rc_tp)) { 16947 union tcp_log_stackspecific log; 16948 struct timeval tv; 16949 16950 (void)tcp_get_usecs(&tv); 16951 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16952 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 16953 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16954 log.u_bbr.flex8 = mod; 16955 log.u_bbr.flex1 = flex1; 16956 log.u_bbr.flex2 = flex2; 16957 log.u_bbr.flex3 = flex3; 16958 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 16959 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 16960 log.u_bbr.bbr_substate = rack->pcm_needed; 16961 log.u_bbr.bbr_substate <<= 1; 16962 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 16963 log.u_bbr.bbr_substate <<= 1; 16964 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 16965 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 16966 0, &log, false, NULL, NULL, 0, &tv); 16967 } 16968 } 16969 16970 static void 16971 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 16972 { 16973 /* 16974 * The round (current_round) has ended. We now 16975 * setup for the next round by incrementing the 16976 * round numnber and doing any round specific 16977 * things. 16978 */ 16979 rack_log_hystart_event(rack, high_seq, 21); 16980 rack->r_ctl.current_round++; 16981 /* New round (current_round) begins at next send */ 16982 rack->rc_new_rnd_needed = 1; 16983 if ((rack->pcm_enabled == 1) && 16984 (rack->pcm_needed == 0) && 16985 (rack->pcm_in_progress == 0)) { 16986 /* 16987 * If we have enabled PCM, then we need to 16988 * check if the round has adanced to the state 16989 * where one is required. 16990 */ 16991 int rnds; 16992 16993 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 16994 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 16995 rack->pcm_needed = 1; 16996 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 16997 } else if (rack_verbose_logging) { 16998 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 16999 } 17000 } 17001 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 17002 /* We have hystart enabled send the round info in */ 17003 if (CC_ALGO(tp)->newround != NULL) { 17004 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 17005 } 17006 } 17007 /* 17008 * For DGP an initial startup check. We want to validate 17009 * that we are not just pushing on slow-start and just 17010 * not gaining.. i.e. filling buffers without getting any 17011 * boost in b/w during the inital slow-start. 17012 */ 17013 if (rack->dgp_on && 17014 (rack->rc_initial_ss_comp == 0) && 17015 (tp->snd_cwnd < tp->snd_ssthresh) && 17016 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 17017 (rack->r_ctl.gp_rnd_thresh > 0) && 17018 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 17019 17020 /* 17021 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 17022 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 17023 * exit SS. 17024 * 17025 * Pick up the flight size now as we enter slowstart (not the 17026 * cwnd which may be inflated). 17027 */ 17028 rack->rc_initial_ss_comp = 1; 17029 17030 if (tcp_bblogging_on(rack->rc_tp)) { 17031 union tcp_log_stackspecific log; 17032 struct timeval tv; 17033 17034 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 17035 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17036 log.u_bbr.flex1 = rack->r_ctl.current_round; 17037 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 17038 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 17039 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 17040 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 17041 log.u_bbr.flex8 = 40; 17042 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 17043 0, &log, false, NULL, __func__, __LINE__,&tv); 17044 } 17045 if ((rack->r_ctl.gate_to_fs == 1) && 17046 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 17047 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 17048 } 17049 tp->snd_ssthresh = tp->snd_cwnd - 1; 17050 /* Turn off any fast output running */ 17051 rack->r_fast_output = 0; 17052 } 17053 } 17054 17055 static int 17056 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 17057 { 17058 /* 17059 * Handle a "special" compressed ack mbuf. Each incoming 17060 * ack has only four possible dispositions: 17061 * 17062 * A) It moves the cum-ack forward 17063 * B) It is behind the cum-ack. 17064 * C) It is a window-update ack. 17065 * D) It is a dup-ack. 17066 * 17067 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 17068 * in the incoming mbuf. We also need to still pay attention 17069 * to nxt_pkt since there may be another packet after this 17070 * one. 17071 */ 17072 #ifdef TCP_ACCOUNTING 17073 uint64_t ts_val; 17074 uint64_t rdstc; 17075 #endif 17076 int segsiz; 17077 struct timespec ts; 17078 struct tcp_rack *rack; 17079 struct tcp_ackent *ae; 17080 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 17081 int cnt, i, did_out, ourfinisacked = 0; 17082 struct tcpopt to_holder, *to = NULL; 17083 #ifdef TCP_ACCOUNTING 17084 int win_up_req = 0; 17085 #endif 17086 int nsegs = 0; 17087 int under_pacing = 0; 17088 int post_recovery = 0; 17089 #ifdef TCP_ACCOUNTING 17090 sched_pin(); 17091 #endif 17092 rack = (struct tcp_rack *)tp->t_fb_ptr; 17093 if (rack->gp_ready && 17094 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 17095 under_pacing = 1; 17096 17097 if (rack->r_state != tp->t_state) 17098 rack_set_state(tp, rack); 17099 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 17100 (tp->t_flags & TF_GPUTINPROG)) { 17101 /* 17102 * We have a goodput in progress 17103 * and we have entered a late state. 17104 * Do we have enough data in the sb 17105 * to handle the GPUT request? 17106 */ 17107 uint32_t bytes; 17108 17109 bytes = tp->gput_ack - tp->gput_seq; 17110 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 17111 bytes += tp->gput_seq - tp->snd_una; 17112 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 17113 /* 17114 * There are not enough bytes in the socket 17115 * buffer that have been sent to cover this 17116 * measurement. Cancel it. 17117 */ 17118 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17119 rack->r_ctl.rc_gp_srtt /*flex1*/, 17120 tp->gput_seq, 17121 0, 0, 18, __LINE__, NULL, 0); 17122 tp->t_flags &= ~TF_GPUTINPROG; 17123 } 17124 } 17125 to = &to_holder; 17126 to->to_flags = 0; 17127 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 17128 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 17129 cnt = m->m_len / sizeof(struct tcp_ackent); 17130 counter_u64_add(rack_multi_single_eq, cnt); 17131 high_seq = tp->snd_una; 17132 the_win = tp->snd_wnd; 17133 win_seq = tp->snd_wl1; 17134 win_upd_ack = tp->snd_wl2; 17135 cts = tcp_tv_to_usectick(tv); 17136 ms_cts = tcp_tv_to_mssectick(tv); 17137 rack->r_ctl.rc_rcvtime = cts; 17138 segsiz = ctf_fixed_maxseg(tp); 17139 if ((rack->rc_gp_dyn_mul) && 17140 (rack->use_fixed_rate == 0) && 17141 (rack->rc_always_pace)) { 17142 /* Check in on probertt */ 17143 rack_check_probe_rtt(rack, cts); 17144 } 17145 for (i = 0; i < cnt; i++) { 17146 #ifdef TCP_ACCOUNTING 17147 ts_val = get_cyclecount(); 17148 #endif 17149 rack_clear_rate_sample(rack); 17150 ae = ((mtod(m, struct tcp_ackent *)) + i); 17151 if (ae->flags & TH_FIN) 17152 rack_log_pacing_delay_calc(rack, 17153 0, 17154 0, 17155 0, 17156 rack_get_gp_est(rack), /* delRate */ 17157 rack_get_lt_bw(rack), /* rttProp */ 17158 20, __LINE__, NULL, 0); 17159 /* Setup the window */ 17160 tiwin = ae->win << tp->snd_scale; 17161 if (tiwin > rack->r_ctl.rc_high_rwnd) 17162 rack->r_ctl.rc_high_rwnd = tiwin; 17163 /* figure out the type of ack */ 17164 if (SEQ_LT(ae->ack, high_seq)) { 17165 /* Case B*/ 17166 ae->ack_val_set = ACK_BEHIND; 17167 } else if (SEQ_GT(ae->ack, high_seq)) { 17168 /* Case A */ 17169 ae->ack_val_set = ACK_CUMACK; 17170 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 17171 /* Case D */ 17172 ae->ack_val_set = ACK_DUPACK; 17173 } else { 17174 /* Case C */ 17175 ae->ack_val_set = ACK_RWND; 17176 } 17177 if (rack->sack_attack_disable > 0) { 17178 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 17179 rack->r_ctl.ack_during_sd++; 17180 } 17181 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 17182 /* Validate timestamp */ 17183 if (ae->flags & HAS_TSTMP) { 17184 /* Setup for a timestamp */ 17185 to->to_flags = TOF_TS; 17186 ae->ts_echo -= tp->ts_offset; 17187 to->to_tsecr = ae->ts_echo; 17188 to->to_tsval = ae->ts_value; 17189 /* 17190 * If echoed timestamp is later than the current time, fall back to 17191 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 17192 * were used when this connection was established. 17193 */ 17194 if (TSTMP_GT(ae->ts_echo, ms_cts)) 17195 to->to_tsecr = 0; 17196 if (tp->ts_recent && 17197 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 17198 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 17199 #ifdef TCP_ACCOUNTING 17200 rdstc = get_cyclecount(); 17201 if (rdstc > ts_val) { 17202 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17203 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 17204 } 17205 } 17206 #endif 17207 continue; 17208 } 17209 } 17210 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 17211 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 17212 tp->ts_recent_age = tcp_ts_getticks(); 17213 tp->ts_recent = ae->ts_value; 17214 } 17215 } else { 17216 /* Setup for a no options */ 17217 to->to_flags = 0; 17218 } 17219 /* Update the rcv time and perform idle reduction possibly */ 17220 if (tp->t_idle_reduce && 17221 (tp->snd_max == tp->snd_una) && 17222 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 17223 counter_u64_add(rack_input_idle_reduces, 1); 17224 rack_cc_after_idle(rack, tp); 17225 } 17226 tp->t_rcvtime = ticks; 17227 /* Now what about ECN of a chain of pure ACKs? */ 17228 if (tcp_ecn_input_segment(tp, ae->flags, 0, 17229 tcp_packets_this_ack(tp, ae->ack), 17230 ae->codepoint)) 17231 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 17232 #ifdef TCP_ACCOUNTING 17233 /* Count for the specific type of ack in */ 17234 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17235 tp->tcp_cnt_counters[ae->ack_val_set]++; 17236 } 17237 #endif 17238 /* 17239 * Note how we could move up these in the determination 17240 * above, but we don't so that way the timestamp checks (and ECN) 17241 * is done first before we do any processing on the ACK. 17242 * The non-compressed path through the code has this 17243 * weakness (noted by @jtl) that it actually does some 17244 * processing before verifying the timestamp information. 17245 * We don't take that path here which is why we set 17246 * the ack_val_set first, do the timestamp and ecn 17247 * processing, and then look at what we have setup. 17248 */ 17249 if (ae->ack_val_set == ACK_BEHIND) { 17250 /* 17251 * Case B flag reordering, if window is not closed 17252 * or it could be a keep-alive or persists 17253 */ 17254 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 17255 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 17256 if (rack->r_ctl.rc_reorder_ts == 0) 17257 rack->r_ctl.rc_reorder_ts = 1; 17258 } 17259 } else if (ae->ack_val_set == ACK_DUPACK) { 17260 /* Case D */ 17261 rack_strike_dupack(rack, ae->ack); 17262 } else if (ae->ack_val_set == ACK_RWND) { 17263 /* Case C */ 17264 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 17265 ts.tv_sec = ae->timestamp / 1000000000; 17266 ts.tv_nsec = ae->timestamp % 1000000000; 17267 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 17268 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 17269 } else { 17270 rack->r_ctl.act_rcv_time = *tv; 17271 } 17272 if (rack->forced_ack) { 17273 rack_handle_probe_response(rack, tiwin, 17274 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 17275 } 17276 #ifdef TCP_ACCOUNTING 17277 win_up_req = 1; 17278 #endif 17279 win_upd_ack = ae->ack; 17280 win_seq = ae->seq; 17281 the_win = tiwin; 17282 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 17283 } else { 17284 /* Case A */ 17285 if (SEQ_GT(ae->ack, tp->snd_max)) { 17286 /* 17287 * We just send an ack since the incoming 17288 * ack is beyond the largest seq we sent. 17289 */ 17290 if ((tp->t_flags & TF_ACKNOW) == 0) { 17291 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 17292 if (tp->t_flags && TF_ACKNOW) 17293 rack->r_wanted_output = 1; 17294 } 17295 } else { 17296 nsegs++; 17297 /* If the window changed setup to update */ 17298 if (tiwin != tp->snd_wnd) { 17299 win_upd_ack = ae->ack; 17300 win_seq = ae->seq; 17301 the_win = tiwin; 17302 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 17303 } 17304 #ifdef TCP_ACCOUNTING 17305 /* Account for the acks */ 17306 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17307 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 17308 } 17309 #endif 17310 high_seq = ae->ack; 17311 /* Setup our act_rcv_time */ 17312 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 17313 ts.tv_sec = ae->timestamp / 1000000000; 17314 ts.tv_nsec = ae->timestamp % 1000000000; 17315 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 17316 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 17317 } else { 17318 rack->r_ctl.act_rcv_time = *tv; 17319 } 17320 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 17321 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 17322 #ifdef TCP_REQUEST_TRK 17323 rack_req_check_for_comp(rack, high_seq); 17324 #endif 17325 if (rack->rc_dsack_round_seen) { 17326 /* Is the dsack round over? */ 17327 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 17328 /* Yes it is */ 17329 rack->rc_dsack_round_seen = 0; 17330 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 17331 } 17332 } 17333 } 17334 } 17335 /* And lets be sure to commit the rtt measurements for this ack */ 17336 tcp_rack_xmit_timer_commit(rack, tp); 17337 #ifdef TCP_ACCOUNTING 17338 rdstc = get_cyclecount(); 17339 if (rdstc > ts_val) { 17340 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17341 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 17342 if (ae->ack_val_set == ACK_CUMACK) 17343 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 17344 } 17345 } 17346 #endif 17347 } 17348 #ifdef TCP_ACCOUNTING 17349 ts_val = get_cyclecount(); 17350 #endif 17351 /* Tend to any collapsed window */ 17352 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 17353 /* The peer collapsed the window */ 17354 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 17355 } else if (rack->rc_has_collapsed) 17356 rack_un_collapse_window(rack, __LINE__); 17357 if ((rack->r_collapse_point_valid) && 17358 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 17359 rack->r_collapse_point_valid = 0; 17360 acked_amount = acked = (high_seq - tp->snd_una); 17361 if (acked) { 17362 /* 17363 * The draft (v3) calls for us to use SEQ_GEQ, but that 17364 * causes issues when we are just going app limited. Lets 17365 * instead use SEQ_GT <or> where its equal but more data 17366 * is outstanding. 17367 * 17368 * Also make sure we are on the last ack of a series. We 17369 * have to have all the ack's processed in queue to know 17370 * if there is something left outstanding. 17371 * 17372 */ 17373 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 17374 (rack->rc_new_rnd_needed == 0) && 17375 (nxt_pkt == 0)) { 17376 /* 17377 * We have crossed into a new round with 17378 * this th_ack value. 17379 */ 17380 rack_new_round_setup(tp, rack, high_seq); 17381 } 17382 /* 17383 * Clear the probe not answered flag 17384 * since cum-ack moved forward. 17385 */ 17386 rack->probe_not_answered = 0; 17387 if (rack->sack_attack_disable == 0) 17388 rack_do_decay(rack); 17389 if (acked >= segsiz) { 17390 /* 17391 * You only get credit for 17392 * MSS and greater (and you get extra 17393 * credit for larger cum-ack moves). 17394 */ 17395 int ac; 17396 17397 ac = acked / segsiz; 17398 rack->r_ctl.ack_count += ac; 17399 counter_u64_add(rack_ack_total, ac); 17400 } 17401 if (rack->r_ctl.ack_count > 0xfff00000) { 17402 /* 17403 * reduce the number to keep us under 17404 * a uint32_t. 17405 */ 17406 rack->r_ctl.ack_count /= 2; 17407 rack->r_ctl.sack_count /= 2; 17408 } 17409 if (tp->t_flags & TF_NEEDSYN) { 17410 /* 17411 * T/TCP: Connection was half-synchronized, and our SYN has 17412 * been ACK'd (so connection is now fully synchronized). Go 17413 * to non-starred state, increment snd_una for ACK of SYN, 17414 * and check if we can do window scaling. 17415 */ 17416 tp->t_flags &= ~TF_NEEDSYN; 17417 tp->snd_una++; 17418 acked_amount = acked = (high_seq - tp->snd_una); 17419 } 17420 if (acked > sbavail(&so->so_snd)) 17421 acked_amount = sbavail(&so->so_snd); 17422 #ifdef TCP_SAD_DETECTION 17423 /* 17424 * We only care on a cum-ack move if we are in a sack-disabled 17425 * state. We have already added in to the ack_count, and we never 17426 * would disable on a cum-ack move, so we only care to do the 17427 * detection if it may "undo" it, i.e. we were in disabled already. 17428 */ 17429 if (rack->sack_attack_disable) 17430 rack_do_detection(tp, rack, acked_amount, segsiz); 17431 #endif 17432 if (IN_FASTRECOVERY(tp->t_flags) && 17433 (rack->rack_no_prr == 0)) 17434 rack_update_prr(tp, rack, acked_amount, high_seq); 17435 if (IN_RECOVERY(tp->t_flags)) { 17436 if (SEQ_LT(high_seq, tp->snd_recover) && 17437 (SEQ_LT(high_seq, tp->snd_max))) { 17438 tcp_rack_partialack(tp); 17439 } else { 17440 rack_post_recovery(tp, high_seq); 17441 post_recovery = 1; 17442 } 17443 } else if ((rack->rto_from_rec == 1) && 17444 SEQ_GEQ(high_seq, tp->snd_recover)) { 17445 /* 17446 * We were in recovery, hit a rxt timeout 17447 * and never re-entered recovery. The timeout(s) 17448 * made up all the lost data. In such a case 17449 * we need to clear the rto_from_rec flag. 17450 */ 17451 rack->rto_from_rec = 0; 17452 } 17453 /* Handle the rack-log-ack part (sendmap) */ 17454 if ((sbused(&so->so_snd) == 0) && 17455 (acked > acked_amount) && 17456 (tp->t_state >= TCPS_FIN_WAIT_1) && 17457 (tp->t_flags & TF_SENTFIN)) { 17458 /* 17459 * We must be sure our fin 17460 * was sent and acked (we can be 17461 * in FIN_WAIT_1 without having 17462 * sent the fin). 17463 */ 17464 ourfinisacked = 1; 17465 /* 17466 * Lets make sure snd_una is updated 17467 * since most likely acked_amount = 0 (it 17468 * should be). 17469 */ 17470 tp->snd_una = high_seq; 17471 } 17472 /* Did we make a RTO error? */ 17473 if ((tp->t_flags & TF_PREVVALID) && 17474 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 17475 tp->t_flags &= ~TF_PREVVALID; 17476 if (tp->t_rxtshift == 1 && 17477 (int)(ticks - tp->t_badrxtwin) < 0) 17478 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 17479 } 17480 /* Handle the data in the socket buffer */ 17481 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 17482 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 17483 if (acked_amount > 0) { 17484 uint32_t p_cwnd; 17485 struct mbuf *mfree; 17486 17487 if (post_recovery) { 17488 /* 17489 * Grab the segsiz, multiply by 2 and add the snd_cwnd 17490 * that is the max the CC should add if we are exiting 17491 * recovery and doing a late add. 17492 */ 17493 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 17494 p_cwnd <<= 1; 17495 p_cwnd += tp->snd_cwnd; 17496 } 17497 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 17498 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 17499 /* Must be non-newreno (cubic) getting too ahead of itself */ 17500 tp->snd_cwnd = p_cwnd; 17501 } 17502 SOCKBUF_LOCK(&so->so_snd); 17503 mfree = sbcut_locked(&so->so_snd, acked_amount); 17504 tp->snd_una = high_seq; 17505 /* Note we want to hold the sb lock through the sendmap adjust */ 17506 rack_adjust_sendmap_head(rack, &so->so_snd); 17507 /* Wake up the socket if we have room to write more */ 17508 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 17509 sowwakeup_locked(so); 17510 m_freem(mfree); 17511 } 17512 /* update progress */ 17513 tp->t_acktime = ticks; 17514 rack_log_progress_event(rack, tp, tp->t_acktime, 17515 PROGRESS_UPDATE, __LINE__); 17516 /* Clear out shifts and such */ 17517 tp->t_rxtshift = 0; 17518 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 17519 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 17520 rack->rc_tlp_in_progress = 0; 17521 rack->r_ctl.rc_tlp_cnt_out = 0; 17522 /* Send recover and snd_nxt must be dragged along */ 17523 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 17524 tp->snd_recover = tp->snd_una; 17525 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 17526 tp->snd_nxt = tp->snd_max; 17527 /* 17528 * If the RXT timer is running we want to 17529 * stop it, so we can restart a TLP (or new RXT). 17530 */ 17531 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 17532 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 17533 tp->snd_wl2 = high_seq; 17534 tp->t_dupacks = 0; 17535 if (under_pacing && 17536 (rack->use_fixed_rate == 0) && 17537 (rack->in_probe_rtt == 0) && 17538 rack->rc_gp_dyn_mul && 17539 rack->rc_always_pace) { 17540 /* Check if we are dragging bottom */ 17541 rack_check_bottom_drag(tp, rack, so); 17542 } 17543 if (tp->snd_una == tp->snd_max) { 17544 tp->t_flags &= ~TF_PREVVALID; 17545 rack->r_ctl.retran_during_recovery = 0; 17546 rack->rc_suspicious = 0; 17547 rack->r_ctl.dsack_byte_cnt = 0; 17548 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 17549 if (rack->r_ctl.rc_went_idle_time == 0) 17550 rack->r_ctl.rc_went_idle_time = 1; 17551 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 17552 if (sbavail(&tptosocket(tp)->so_snd) == 0) 17553 tp->t_acktime = 0; 17554 /* Set so we might enter persists... */ 17555 rack->r_wanted_output = 1; 17556 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 17557 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 17558 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 17559 (sbavail(&so->so_snd) == 0) && 17560 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 17561 /* 17562 * The socket was gone and the 17563 * peer sent data (not now in the past), time to 17564 * reset him. 17565 */ 17566 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 17567 /* tcp_close will kill the inp pre-log the Reset */ 17568 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 17569 #ifdef TCP_ACCOUNTING 17570 rdstc = get_cyclecount(); 17571 if (rdstc > ts_val) { 17572 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17573 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17574 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17575 } 17576 } 17577 #endif 17578 m_freem(m); 17579 tp = tcp_close(tp); 17580 if (tp == NULL) { 17581 #ifdef TCP_ACCOUNTING 17582 sched_unpin(); 17583 #endif 17584 return (1); 17585 } 17586 /* 17587 * We would normally do drop-with-reset which would 17588 * send back a reset. We can't since we don't have 17589 * all the needed bits. Instead lets arrange for 17590 * a call to tcp_output(). That way since we 17591 * are in the closed state we will generate a reset. 17592 * 17593 * Note if tcp_accounting is on we don't unpin since 17594 * we do that after the goto label. 17595 */ 17596 goto send_out_a_rst; 17597 } 17598 if ((sbused(&so->so_snd) == 0) && 17599 (tp->t_state >= TCPS_FIN_WAIT_1) && 17600 (tp->t_flags & TF_SENTFIN)) { 17601 /* 17602 * If we can't receive any more data, then closing user can 17603 * proceed. Starting the timer is contrary to the 17604 * specification, but if we don't get a FIN we'll hang 17605 * forever. 17606 * 17607 */ 17608 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 17609 soisdisconnected(so); 17610 tcp_timer_activate(tp, TT_2MSL, 17611 (tcp_fast_finwait2_recycle ? 17612 tcp_finwait2_timeout : 17613 TP_MAXIDLE(tp))); 17614 } 17615 if (ourfinisacked == 0) { 17616 /* 17617 * We don't change to fin-wait-2 if we have our fin acked 17618 * which means we are probably in TCPS_CLOSING. 17619 */ 17620 tcp_state_change(tp, TCPS_FIN_WAIT_2); 17621 } 17622 } 17623 } 17624 /* Wake up the socket if we have room to write more */ 17625 if (sbavail(&so->so_snd)) { 17626 rack->r_wanted_output = 1; 17627 if (ctf_progress_timeout_check(tp, true)) { 17628 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 17629 tp, tick, PROGRESS_DROP, __LINE__); 17630 /* 17631 * We cheat here and don't send a RST, we should send one 17632 * when the pacer drops the connection. 17633 */ 17634 #ifdef TCP_ACCOUNTING 17635 rdstc = get_cyclecount(); 17636 if (rdstc > ts_val) { 17637 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17638 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17639 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17640 } 17641 } 17642 sched_unpin(); 17643 #endif 17644 (void)tcp_drop(tp, ETIMEDOUT); 17645 m_freem(m); 17646 return (1); 17647 } 17648 } 17649 if (ourfinisacked) { 17650 switch(tp->t_state) { 17651 case TCPS_CLOSING: 17652 #ifdef TCP_ACCOUNTING 17653 rdstc = get_cyclecount(); 17654 if (rdstc > ts_val) { 17655 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17656 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17657 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17658 } 17659 } 17660 sched_unpin(); 17661 #endif 17662 tcp_twstart(tp); 17663 m_freem(m); 17664 return (1); 17665 break; 17666 case TCPS_LAST_ACK: 17667 #ifdef TCP_ACCOUNTING 17668 rdstc = get_cyclecount(); 17669 if (rdstc > ts_val) { 17670 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17671 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17672 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17673 } 17674 } 17675 sched_unpin(); 17676 #endif 17677 tp = tcp_close(tp); 17678 ctf_do_drop(m, tp); 17679 return (1); 17680 break; 17681 case TCPS_FIN_WAIT_1: 17682 #ifdef TCP_ACCOUNTING 17683 rdstc = get_cyclecount(); 17684 if (rdstc > ts_val) { 17685 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17686 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17687 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17688 } 17689 } 17690 #endif 17691 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 17692 soisdisconnected(so); 17693 tcp_timer_activate(tp, TT_2MSL, 17694 (tcp_fast_finwait2_recycle ? 17695 tcp_finwait2_timeout : 17696 TP_MAXIDLE(tp))); 17697 } 17698 tcp_state_change(tp, TCPS_FIN_WAIT_2); 17699 break; 17700 default: 17701 break; 17702 } 17703 } 17704 if (rack->r_fast_output) { 17705 /* 17706 * We re doing fast output.. can we expand that? 17707 */ 17708 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 17709 } 17710 #ifdef TCP_ACCOUNTING 17711 rdstc = get_cyclecount(); 17712 if (rdstc > ts_val) { 17713 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17714 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 17715 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 17716 } 17717 } 17718 17719 } else if (win_up_req) { 17720 rdstc = get_cyclecount(); 17721 if (rdstc > ts_val) { 17722 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17723 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 17724 } 17725 } 17726 #endif 17727 } 17728 /* Now is there a next packet, if so we are done */ 17729 m_freem(m); 17730 did_out = 0; 17731 if (nxt_pkt) { 17732 #ifdef TCP_ACCOUNTING 17733 sched_unpin(); 17734 #endif 17735 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 17736 return (0); 17737 } 17738 rack_handle_might_revert(tp, rack); 17739 ctf_calc_rwin(so, tp); 17740 if ((rack->r_wanted_output != 0) || 17741 (rack->r_fast_output != 0) || 17742 (tp->t_flags & TF_ACKNOW )) { 17743 send_out_a_rst: 17744 if (tcp_output(tp) < 0) { 17745 #ifdef TCP_ACCOUNTING 17746 sched_unpin(); 17747 #endif 17748 return (1); 17749 } 17750 did_out = 1; 17751 } 17752 if (tp->t_flags2 & TF2_HPTS_CALLS) 17753 tp->t_flags2 &= ~TF2_HPTS_CALLS; 17754 rack_free_trim(rack); 17755 #ifdef TCP_ACCOUNTING 17756 sched_unpin(); 17757 #endif 17758 rack_timer_audit(tp, rack, &so->so_snd); 17759 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 17760 return (0); 17761 } 17762 17763 #define TCP_LRO_TS_OPTION \ 17764 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 17765 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 17766 17767 static int 17768 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17769 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 17770 struct timeval *tv) 17771 { 17772 struct inpcb *inp = tptoinpcb(tp); 17773 struct socket *so = tptosocket(tp); 17774 #ifdef TCP_ACCOUNTING 17775 uint64_t ts_val; 17776 #endif 17777 int32_t thflags, retval, did_out = 0; 17778 int32_t way_out = 0; 17779 /* 17780 * cts - is the current time from tv (caller gets ts) in microseconds. 17781 * ms_cts - is the current time from tv in milliseconds. 17782 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 17783 */ 17784 uint32_t cts, us_cts, ms_cts; 17785 uint32_t tiwin; 17786 struct timespec ts; 17787 struct tcpopt to; 17788 struct tcp_rack *rack; 17789 struct rack_sendmap *rsm; 17790 int32_t prev_state = 0; 17791 int no_output = 0; 17792 int slot_remaining = 0; 17793 #ifdef TCP_ACCOUNTING 17794 int ack_val_set = 0xf; 17795 #endif 17796 int nsegs; 17797 17798 NET_EPOCH_ASSERT(); 17799 INP_WLOCK_ASSERT(inp); 17800 17801 /* 17802 * tv passed from common code is from either M_TSTMP_LRO or 17803 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 17804 */ 17805 rack = (struct tcp_rack *)tp->t_fb_ptr; 17806 if (rack->rack_deferred_inited == 0) { 17807 /* 17808 * If we are the connecting socket we will 17809 * hit rack_init() when no sequence numbers 17810 * are setup. This makes it so we must defer 17811 * some initialization. Call that now. 17812 */ 17813 rack_deferred_init(tp, rack); 17814 } 17815 /* 17816 * Check to see if we need to skip any output plans. This 17817 * can happen in the non-LRO path where we are pacing and 17818 * must process the ack coming in but need to defer sending 17819 * anything becase a pacing timer is running. 17820 */ 17821 us_cts = tcp_tv_to_usectick(tv); 17822 if (m->m_flags & M_ACKCMP) { 17823 /* 17824 * All compressed ack's are ack's by definition so 17825 * remove any ack required flag and then do the processing. 17826 */ 17827 rack->rc_ack_required = 0; 17828 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 17829 } 17830 thflags = tcp_get_flags(th); 17831 if ((rack->rc_always_pace == 1) && 17832 (rack->rc_ack_can_sendout_data == 0) && 17833 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 17834 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 17835 /* 17836 * Ok conditions are right for queuing the packets 17837 * but we do have to check the flags in the inp, it 17838 * could be, if a sack is present, we want to be awoken and 17839 * so should process the packets. 17840 */ 17841 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 17842 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 17843 no_output = 1; 17844 } else { 17845 /* 17846 * If there is no options, or just a 17847 * timestamp option, we will want to queue 17848 * the packets. This is the same that LRO does 17849 * and will need to change with accurate ECN. 17850 */ 17851 uint32_t *ts_ptr; 17852 int optlen; 17853 17854 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 17855 ts_ptr = (uint32_t *)(th + 1); 17856 if ((optlen == 0) || 17857 ((optlen == TCPOLEN_TSTAMP_APPA) && 17858 (*ts_ptr == TCP_LRO_TS_OPTION))) 17859 no_output = 1; 17860 } 17861 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 17862 /* 17863 * It is unrealistic to think we can pace in less than 17864 * the minimum granularity of the pacer (def:250usec). So 17865 * if we have less than that time remaining we should go 17866 * ahead and allow output to be "early". We will attempt to 17867 * make up for it in any pacing time we try to apply on 17868 * the outbound packet. 17869 */ 17870 no_output = 0; 17871 } 17872 } 17873 /* 17874 * If there is a RST or FIN lets dump out the bw 17875 * with a FIN the connection may go on but we 17876 * may not. 17877 */ 17878 if ((thflags & TH_FIN) || (thflags & TH_RST)) 17879 rack_log_pacing_delay_calc(rack, 17880 rack->r_ctl.gp_bw, 17881 0, 17882 0, 17883 rack_get_gp_est(rack), /* delRate */ 17884 rack_get_lt_bw(rack), /* rttProp */ 17885 20, __LINE__, NULL, 0); 17886 if (m->m_flags & M_ACKCMP) { 17887 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 17888 } 17889 cts = tcp_tv_to_usectick(tv); 17890 ms_cts = tcp_tv_to_mssectick(tv); 17891 nsegs = m->m_pkthdr.lro_nsegs; 17892 counter_u64_add(rack_proc_non_comp_ack, 1); 17893 #ifdef TCP_ACCOUNTING 17894 sched_pin(); 17895 if (thflags & TH_ACK) 17896 ts_val = get_cyclecount(); 17897 #endif 17898 if ((m->m_flags & M_TSTMP) || 17899 (m->m_flags & M_TSTMP_LRO)) { 17900 mbuf_tstmp2timespec(m, &ts); 17901 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 17902 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 17903 } else 17904 rack->r_ctl.act_rcv_time = *tv; 17905 kern_prefetch(rack, &prev_state); 17906 prev_state = 0; 17907 /* 17908 * Unscale the window into a 32-bit value. For the SYN_SENT state 17909 * the scale is zero. 17910 */ 17911 tiwin = th->th_win << tp->snd_scale; 17912 #ifdef TCP_ACCOUNTING 17913 if (thflags & TH_ACK) { 17914 /* 17915 * We have a tradeoff here. We can either do what we are 17916 * doing i.e. pinning to this CPU and then doing the accounting 17917 * <or> we could do a critical enter, setup the rdtsc and cpu 17918 * as in below, and then validate we are on the same CPU on 17919 * exit. I have choosen to not do the critical enter since 17920 * that often will gain you a context switch, and instead lock 17921 * us (line above this if) to the same CPU with sched_pin(). This 17922 * means we may be context switched out for a higher priority 17923 * interupt but we won't be moved to another CPU. 17924 * 17925 * If this occurs (which it won't very often since we most likely 17926 * are running this code in interupt context and only a higher 17927 * priority will bump us ... clock?) we will falsely add in 17928 * to the time the interupt processing time plus the ack processing 17929 * time. This is ok since its a rare event. 17930 */ 17931 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 17932 ctf_fixed_maxseg(tp)); 17933 } 17934 #endif 17935 /* 17936 * Parse options on any incoming segment. 17937 */ 17938 memset(&to, 0, sizeof(to)); 17939 tcp_dooptions(&to, (u_char *)(th + 1), 17940 (th->th_off << 2) - sizeof(struct tcphdr), 17941 (thflags & TH_SYN) ? TO_SYN : 0); 17942 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 17943 __func__)); 17944 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 17945 __func__)); 17946 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 17947 /* 17948 * We don't look at sack's from the 17949 * peer because the MSS is too small which 17950 * can subject us to an attack. 17951 */ 17952 to.to_flags &= ~TOF_SACK; 17953 } 17954 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 17955 (tp->t_flags & TF_GPUTINPROG)) { 17956 /* 17957 * We have a goodput in progress 17958 * and we have entered a late state. 17959 * Do we have enough data in the sb 17960 * to handle the GPUT request? 17961 */ 17962 uint32_t bytes; 17963 17964 bytes = tp->gput_ack - tp->gput_seq; 17965 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 17966 bytes += tp->gput_seq - tp->snd_una; 17967 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 17968 /* 17969 * There are not enough bytes in the socket 17970 * buffer that have been sent to cover this 17971 * measurement. Cancel it. 17972 */ 17973 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17974 rack->r_ctl.rc_gp_srtt /*flex1*/, 17975 tp->gput_seq, 17976 0, 0, 18, __LINE__, NULL, 0); 17977 tp->t_flags &= ~TF_GPUTINPROG; 17978 } 17979 } 17980 if (tcp_bblogging_on(rack->rc_tp)) { 17981 union tcp_log_stackspecific log; 17982 struct timeval ltv; 17983 #ifdef TCP_REQUEST_TRK 17984 struct tcp_sendfile_track *tcp_req; 17985 17986 if (SEQ_GT(th->th_ack, tp->snd_una)) { 17987 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 17988 } else { 17989 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 17990 } 17991 #endif 17992 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 17993 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 17994 if (rack->rack_no_prr == 0) 17995 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 17996 else 17997 log.u_bbr.flex1 = 0; 17998 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 17999 log.u_bbr.use_lt_bw <<= 1; 18000 log.u_bbr.use_lt_bw |= rack->r_might_revert; 18001 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 18002 log.u_bbr.bbr_state = rack->rc_free_cnt; 18003 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18004 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 18005 log.u_bbr.flex3 = m->m_flags; 18006 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 18007 log.u_bbr.lost = thflags; 18008 log.u_bbr.pacing_gain = 0x1; 18009 #ifdef TCP_ACCOUNTING 18010 log.u_bbr.cwnd_gain = ack_val_set; 18011 #endif 18012 log.u_bbr.flex7 = 2; 18013 if (m->m_flags & M_TSTMP) { 18014 /* Record the hardware timestamp if present */ 18015 mbuf_tstmp2timespec(m, &ts); 18016 ltv.tv_sec = ts.tv_sec; 18017 ltv.tv_usec = ts.tv_nsec / 1000; 18018 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 18019 } else if (m->m_flags & M_TSTMP_LRO) { 18020 /* Record the LRO the arrival timestamp */ 18021 mbuf_tstmp2timespec(m, &ts); 18022 ltv.tv_sec = ts.tv_sec; 18023 ltv.tv_usec = ts.tv_nsec / 1000; 18024 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 18025 } 18026 log.u_bbr.timeStamp = tcp_get_usecs(<v); 18027 /* Log the rcv time */ 18028 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 18029 #ifdef TCP_REQUEST_TRK 18030 log.u_bbr.applimited = tp->t_tcpreq_closed; 18031 log.u_bbr.applimited <<= 8; 18032 log.u_bbr.applimited |= tp->t_tcpreq_open; 18033 log.u_bbr.applimited <<= 8; 18034 log.u_bbr.applimited |= tp->t_tcpreq_req; 18035 if (tcp_req) { 18036 /* Copy out any client req info */ 18037 /* seconds */ 18038 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 18039 /* useconds */ 18040 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 18041 log.u_bbr.rttProp = tcp_req->timestamp; 18042 log.u_bbr.cur_del_rate = tcp_req->start; 18043 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 18044 log.u_bbr.flex8 |= 1; 18045 } else { 18046 log.u_bbr.flex8 |= 2; 18047 log.u_bbr.bw_inuse = tcp_req->end; 18048 } 18049 log.u_bbr.flex6 = tcp_req->start_seq; 18050 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 18051 log.u_bbr.flex8 |= 4; 18052 log.u_bbr.epoch = tcp_req->end_seq; 18053 } 18054 } 18055 #endif 18056 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 18057 tlen, &log, true, <v); 18058 } 18059 /* Remove ack required flag if set, we have one */ 18060 if (thflags & TH_ACK) 18061 rack->rc_ack_required = 0; 18062 if (rack->sack_attack_disable > 0) { 18063 rack->r_ctl.ack_during_sd++; 18064 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 18065 } 18066 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 18067 way_out = 4; 18068 retval = 0; 18069 m_freem(m); 18070 goto done_with_input; 18071 } 18072 /* 18073 * If a segment with the ACK-bit set arrives in the SYN-SENT state 18074 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 18075 */ 18076 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 18077 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 18078 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 18079 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 18080 #ifdef TCP_ACCOUNTING 18081 sched_unpin(); 18082 #endif 18083 return (1); 18084 } 18085 /* 18086 * If timestamps were negotiated during SYN/ACK and a 18087 * segment without a timestamp is received, silently drop 18088 * the segment, unless it is a RST segment or missing timestamps are 18089 * tolerated. 18090 * See section 3.2 of RFC 7323. 18091 */ 18092 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 18093 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 18094 way_out = 5; 18095 retval = 0; 18096 m_freem(m); 18097 goto done_with_input; 18098 } 18099 /* 18100 * Segment received on connection. Reset idle time and keep-alive 18101 * timer. XXX: This should be done after segment validation to 18102 * ignore broken/spoofed segs. 18103 */ 18104 if (tp->t_idle_reduce && 18105 (tp->snd_max == tp->snd_una) && 18106 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 18107 counter_u64_add(rack_input_idle_reduces, 1); 18108 rack_cc_after_idle(rack, tp); 18109 } 18110 tp->t_rcvtime = ticks; 18111 #ifdef STATS 18112 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 18113 #endif 18114 if (tiwin > rack->r_ctl.rc_high_rwnd) 18115 rack->r_ctl.rc_high_rwnd = tiwin; 18116 /* 18117 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 18118 * this to occur after we've validated the segment. 18119 */ 18120 if (tcp_ecn_input_segment(tp, thflags, tlen, 18121 tcp_packets_this_ack(tp, th->th_ack), 18122 iptos)) 18123 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 18124 18125 /* 18126 * If echoed timestamp is later than the current time, fall back to 18127 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 18128 * were used when this connection was established. 18129 */ 18130 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 18131 to.to_tsecr -= tp->ts_offset; 18132 if (TSTMP_GT(to.to_tsecr, ms_cts)) 18133 to.to_tsecr = 0; 18134 } 18135 if ((rack->r_rcvpath_rtt_up == 1) && 18136 (to.to_flags & TOF_TS) && 18137 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 18138 uint32_t rtt = 0; 18139 18140 /* 18141 * We are receiving only and thus not sending 18142 * data to do an RTT. We set a flag when we first 18143 * sent this TS to the peer. We now have it back 18144 * and have an RTT to share. We log it as a conf 18145 * 4, we are not so sure about it.. since we 18146 * may have lost an ack. 18147 */ 18148 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 18149 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 18150 rack->r_rcvpath_rtt_up = 0; 18151 /* Submit and commit the timer */ 18152 if (rtt > 0) { 18153 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 18154 tcp_rack_xmit_timer_commit(rack, tp); 18155 } 18156 } 18157 /* 18158 * If its the first time in we need to take care of options and 18159 * verify we can do SACK for rack! 18160 */ 18161 if (rack->r_state == 0) { 18162 /* Should be init'd by rack_init() */ 18163 KASSERT(rack->rc_inp != NULL, 18164 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 18165 if (rack->rc_inp == NULL) { 18166 rack->rc_inp = inp; 18167 } 18168 18169 /* 18170 * Process options only when we get SYN/ACK back. The SYN 18171 * case for incoming connections is handled in tcp_syncache. 18172 * According to RFC1323 the window field in a SYN (i.e., a 18173 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 18174 * this is traditional behavior, may need to be cleaned up. 18175 */ 18176 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 18177 /* Handle parallel SYN for ECN */ 18178 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 18179 if ((to.to_flags & TOF_SCALE) && 18180 (tp->t_flags & TF_REQ_SCALE)) { 18181 tp->t_flags |= TF_RCVD_SCALE; 18182 tp->snd_scale = to.to_wscale; 18183 } else 18184 tp->t_flags &= ~TF_REQ_SCALE; 18185 /* 18186 * Initial send window. It will be updated with the 18187 * next incoming segment to the scaled value. 18188 */ 18189 tp->snd_wnd = th->th_win; 18190 rack_validate_fo_sendwin_up(tp, rack); 18191 if ((to.to_flags & TOF_TS) && 18192 (tp->t_flags & TF_REQ_TSTMP)) { 18193 tp->t_flags |= TF_RCVD_TSTMP; 18194 tp->ts_recent = to.to_tsval; 18195 tp->ts_recent_age = cts; 18196 } else 18197 tp->t_flags &= ~TF_REQ_TSTMP; 18198 if (to.to_flags & TOF_MSS) { 18199 tcp_mss(tp, to.to_mss); 18200 } 18201 if ((tp->t_flags & TF_SACK_PERMIT) && 18202 (to.to_flags & TOF_SACKPERM) == 0) 18203 tp->t_flags &= ~TF_SACK_PERMIT; 18204 if (tp->t_flags & TF_FASTOPEN) { 18205 if (to.to_flags & TOF_FASTOPEN) { 18206 uint16_t mss; 18207 18208 if (to.to_flags & TOF_MSS) 18209 mss = to.to_mss; 18210 else 18211 if ((inp->inp_vflag & INP_IPV6) != 0) 18212 mss = TCP6_MSS; 18213 else 18214 mss = TCP_MSS; 18215 tcp_fastopen_update_cache(tp, mss, 18216 to.to_tfo_len, to.to_tfo_cookie); 18217 } else 18218 tcp_fastopen_disable_path(tp); 18219 } 18220 } 18221 /* 18222 * At this point we are at the initial call. Here we decide 18223 * if we are doing RACK or not. We do this by seeing if 18224 * TF_SACK_PERMIT is set and the sack-not-required is clear. 18225 * The code now does do dup-ack counting so if you don't 18226 * switch back you won't get rack & TLP, but you will still 18227 * get this stack. 18228 */ 18229 18230 if ((rack_sack_not_required == 0) && 18231 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 18232 tcp_switch_back_to_default(tp); 18233 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 18234 tlen, iptos); 18235 #ifdef TCP_ACCOUNTING 18236 sched_unpin(); 18237 #endif 18238 return (1); 18239 } 18240 tcp_set_hpts(tp); 18241 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 18242 } 18243 if (thflags & TH_FIN) 18244 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 18245 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 18246 if ((rack->rc_gp_dyn_mul) && 18247 (rack->use_fixed_rate == 0) && 18248 (rack->rc_always_pace)) { 18249 /* Check in on probertt */ 18250 rack_check_probe_rtt(rack, cts); 18251 } 18252 rack_clear_rate_sample(rack); 18253 if ((rack->forced_ack) && 18254 ((tcp_get_flags(th) & TH_RST) == 0)) { 18255 rack_handle_probe_response(rack, tiwin, us_cts); 18256 } 18257 /* 18258 * This is the one exception case where we set the rack state 18259 * always. All other times (timers etc) we must have a rack-state 18260 * set (so we assure we have done the checks above for SACK). 18261 */ 18262 rack->r_ctl.rc_rcvtime = cts; 18263 if (rack->r_state != tp->t_state) 18264 rack_set_state(tp, rack); 18265 if (SEQ_GT(th->th_ack, tp->snd_una) && 18266 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 18267 kern_prefetch(rsm, &prev_state); 18268 prev_state = rack->r_state; 18269 if ((thflags & TH_RST) && 18270 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 18271 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 18272 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 18273 /* The connection will be killed by a reset check the tracepoint */ 18274 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 18275 } 18276 retval = (*rack->r_substate) (m, th, so, 18277 tp, &to, drop_hdrlen, 18278 tlen, tiwin, thflags, nxt_pkt, iptos); 18279 if (retval == 0) { 18280 /* 18281 * If retval is 1 the tcb is unlocked and most likely the tp 18282 * is gone. 18283 */ 18284 INP_WLOCK_ASSERT(inp); 18285 if ((rack->rc_gp_dyn_mul) && 18286 (rack->rc_always_pace) && 18287 (rack->use_fixed_rate == 0) && 18288 rack->in_probe_rtt && 18289 (rack->r_ctl.rc_time_probertt_starts == 0)) { 18290 /* 18291 * If we are going for target, lets recheck before 18292 * we output. 18293 */ 18294 rack_check_probe_rtt(rack, cts); 18295 } 18296 if (rack->set_pacing_done_a_iw == 0) { 18297 /* How much has been acked? */ 18298 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 18299 /* We have enough to set in the pacing segment size */ 18300 rack->set_pacing_done_a_iw = 1; 18301 rack_set_pace_segments(tp, rack, __LINE__, NULL); 18302 } 18303 } 18304 tcp_rack_xmit_timer_commit(rack, tp); 18305 #ifdef TCP_ACCOUNTING 18306 /* 18307 * If we set the ack_val_se to what ack processing we are doing 18308 * we also want to track how many cycles we burned. Note 18309 * the bits after tcp_output we let be "free". This is because 18310 * we are also tracking the tcp_output times as well. Note the 18311 * use of 0xf here since we only have 11 counter (0 - 0xa) and 18312 * 0xf cannot be returned and is what we initialize it too to 18313 * indicate we are not doing the tabulations. 18314 */ 18315 if (ack_val_set != 0xf) { 18316 uint64_t crtsc; 18317 18318 crtsc = get_cyclecount(); 18319 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18320 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 18321 } 18322 } 18323 #endif 18324 if ((nxt_pkt == 0) && (no_output == 0)) { 18325 if ((rack->r_wanted_output != 0) || 18326 (tp->t_flags & TF_ACKNOW) || 18327 (rack->r_fast_output != 0)) { 18328 18329 do_output_now: 18330 if (tcp_output(tp) < 0) { 18331 #ifdef TCP_ACCOUNTING 18332 sched_unpin(); 18333 #endif 18334 return (1); 18335 } 18336 did_out = 1; 18337 } 18338 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 18339 rack_free_trim(rack); 18340 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 18341 goto do_output_now; 18342 } else if ((no_output == 1) && 18343 (nxt_pkt == 0) && 18344 (tcp_in_hpts(rack->rc_tp) == 0)) { 18345 /* 18346 * We are not in hpts and we had a pacing timer up. Use 18347 * the remaining time (slot_remaining) to restart the timer. 18348 */ 18349 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 18350 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 18351 rack_free_trim(rack); 18352 } 18353 /* Clear the flag, it may have been cleared by output but we may not have */ 18354 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 18355 tp->t_flags2 &= ~TF2_HPTS_CALLS; 18356 /* 18357 * The draft (v3) calls for us to use SEQ_GEQ, but that 18358 * causes issues when we are just going app limited. Lets 18359 * instead use SEQ_GT <or> where its equal but more data 18360 * is outstanding. 18361 * 18362 * Also make sure we are on the last ack of a series. We 18363 * have to have all the ack's processed in queue to know 18364 * if there is something left outstanding. 18365 */ 18366 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 18367 (rack->rc_new_rnd_needed == 0) && 18368 (nxt_pkt == 0)) { 18369 /* 18370 * We have crossed into a new round with 18371 * the new snd_unae. 18372 */ 18373 rack_new_round_setup(tp, rack, tp->snd_una); 18374 } 18375 if ((nxt_pkt == 0) && 18376 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 18377 (SEQ_GT(tp->snd_max, tp->snd_una) || 18378 (tp->t_flags & TF_DELACK) || 18379 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 18380 (tp->t_state <= TCPS_CLOSING)))) { 18381 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 18382 if ((tp->snd_max == tp->snd_una) && 18383 ((tp->t_flags & TF_DELACK) == 0) && 18384 (tcp_in_hpts(rack->rc_tp)) && 18385 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 18386 /* keep alive not needed if we are hptsi output yet */ 18387 ; 18388 } else { 18389 int late = 0; 18390 if (tcp_in_hpts(tp)) { 18391 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 18392 us_cts = tcp_get_usecs(NULL); 18393 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 18394 rack->r_early = 1; 18395 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 18396 } else 18397 late = 1; 18398 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 18399 } 18400 tcp_hpts_remove(tp); 18401 } 18402 if (late && (did_out == 0)) { 18403 /* 18404 * We are late in the sending 18405 * and we did not call the output 18406 * (this probably should not happen). 18407 */ 18408 goto do_output_now; 18409 } 18410 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 18411 } 18412 way_out = 1; 18413 } else if (nxt_pkt == 0) { 18414 /* Do we have the correct timer running? */ 18415 rack_timer_audit(tp, rack, &so->so_snd); 18416 way_out = 2; 18417 } 18418 done_with_input: 18419 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 18420 if (did_out) 18421 rack->r_wanted_output = 0; 18422 } 18423 18424 #ifdef TCP_ACCOUNTING 18425 sched_unpin(); 18426 #endif 18427 return (retval); 18428 } 18429 18430 static void 18431 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 18432 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 18433 { 18434 struct timeval tv; 18435 18436 /* First lets see if we have old packets */ 18437 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 18438 if (ctf_do_queued_segments(tp, 1)) { 18439 m_freem(m); 18440 return; 18441 } 18442 } 18443 if (m->m_flags & M_TSTMP_LRO) { 18444 mbuf_tstmp2timeval(m, &tv); 18445 } else { 18446 /* Should not be should we kassert instead? */ 18447 tcp_get_usecs(&tv); 18448 } 18449 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 18450 &tv) == 0) { 18451 INP_WUNLOCK(tptoinpcb(tp)); 18452 } 18453 } 18454 18455 struct rack_sendmap * 18456 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 18457 { 18458 struct rack_sendmap *rsm = NULL; 18459 int32_t idx; 18460 uint32_t srtt = 0, thresh = 0, ts_low = 0; 18461 int no_sack = 0; 18462 18463 /* Return the next guy to be re-transmitted */ 18464 if (tqhash_empty(rack->r_ctl.tqh)) { 18465 return (NULL); 18466 } 18467 if (tp->t_flags & TF_SENTFIN) { 18468 /* retran the end FIN? */ 18469 return (NULL); 18470 } 18471 /* ok lets look at this one */ 18472 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 18473 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 18474 return (rsm); 18475 } 18476 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 18477 goto check_it; 18478 } 18479 rsm = rack_find_lowest_rsm(rack); 18480 if (rsm == NULL) { 18481 return (NULL); 18482 } 18483 check_it: 18484 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) || 18485 (rack->sack_attack_disable > 0)) { 18486 no_sack = 1; 18487 } 18488 if ((no_sack > 0) && 18489 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 18490 /* 18491 * No sack so we automatically do the 3 strikes and 18492 * retransmit (no rack timer would be started). 18493 */ 18494 return (rsm); 18495 } 18496 if (rsm->r_flags & RACK_ACKED) { 18497 return (NULL); 18498 } 18499 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 18500 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 18501 /* Its not yet ready */ 18502 return (NULL); 18503 } 18504 srtt = rack_grab_rtt(tp, rack); 18505 idx = rsm->r_rtr_cnt - 1; 18506 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 18507 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 18508 if ((tsused == ts_low) || 18509 (TSTMP_LT(tsused, ts_low))) { 18510 /* No time since sending */ 18511 return (NULL); 18512 } 18513 if ((tsused - ts_low) < thresh) { 18514 /* It has not been long enough yet */ 18515 return (NULL); 18516 } 18517 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 18518 ((rsm->r_flags & RACK_SACK_PASSED) && 18519 (rack->sack_attack_disable == 0))) { 18520 /* 18521 * We have passed the dup-ack threshold <or> 18522 * a SACK has indicated this is missing. 18523 * Note that if you are a declared attacker 18524 * it is only the dup-ack threshold that 18525 * will cause retransmits. 18526 */ 18527 /* log retransmit reason */ 18528 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 18529 rack->r_fast_output = 0; 18530 return (rsm); 18531 } 18532 return (NULL); 18533 } 18534 18535 static void 18536 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, 18537 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 18538 int line, struct rack_sendmap *rsm, uint8_t quality) 18539 { 18540 if (tcp_bblogging_on(rack->rc_tp)) { 18541 union tcp_log_stackspecific log; 18542 struct timeval tv; 18543 18544 if (rack_verbose_logging == 0) { 18545 /* 18546 * We are not verbose screen out all but 18547 * ones we always want. 18548 */ 18549 if ((method != 2) && 18550 (method != 3) && 18551 (method != 7) && 18552 (method != 89) && 18553 (method != 14) && 18554 (method != 20)) { 18555 return; 18556 } 18557 } 18558 memset(&log, 0, sizeof(log)); 18559 log.u_bbr.flex1 = slot; 18560 log.u_bbr.flex2 = len; 18561 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 18562 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 18563 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 18564 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 18565 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 18566 log.u_bbr.use_lt_bw <<= 1; 18567 log.u_bbr.use_lt_bw |= rack->r_late; 18568 log.u_bbr.use_lt_bw <<= 1; 18569 log.u_bbr.use_lt_bw |= rack->r_early; 18570 log.u_bbr.use_lt_bw <<= 1; 18571 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 18572 log.u_bbr.use_lt_bw <<= 1; 18573 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 18574 log.u_bbr.use_lt_bw <<= 1; 18575 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 18576 log.u_bbr.use_lt_bw <<= 1; 18577 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 18578 log.u_bbr.use_lt_bw <<= 1; 18579 log.u_bbr.use_lt_bw |= rack->gp_ready; 18580 log.u_bbr.pkt_epoch = line; 18581 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 18582 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 18583 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 18584 log.u_bbr.bw_inuse = bw_est; 18585 log.u_bbr.delRate = bw; 18586 if (rack->r_ctl.gp_bw == 0) 18587 log.u_bbr.cur_del_rate = 0; 18588 else 18589 log.u_bbr.cur_del_rate = rack_get_bw(rack); 18590 log.u_bbr.rttProp = len_time; 18591 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 18592 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 18593 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18594 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 18595 /* We are in slow start */ 18596 log.u_bbr.flex7 = 1; 18597 } else { 18598 /* we are on congestion avoidance */ 18599 log.u_bbr.flex7 = 0; 18600 } 18601 log.u_bbr.flex8 = method; 18602 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18603 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18604 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 18605 log.u_bbr.cwnd_gain <<= 1; 18606 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 18607 log.u_bbr.cwnd_gain <<= 1; 18608 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 18609 log.u_bbr.bbr_substate = quality; 18610 log.u_bbr.bbr_state = rack->dgp_on; 18611 log.u_bbr.bbr_state <<= 1; 18612 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 18613 log.u_bbr.bbr_state <<= 2; 18614 TCP_LOG_EVENTP(rack->rc_tp, NULL, 18615 &rack->rc_inp->inp_socket->so_rcv, 18616 &rack->rc_inp->inp_socket->so_snd, 18617 BBR_LOG_HPTSI_CALC, 0, 18618 0, &log, false, &tv); 18619 } 18620 } 18621 18622 static uint32_t 18623 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 18624 { 18625 uint32_t new_tso, user_max, pace_one; 18626 18627 user_max = rack->rc_user_set_max_segs * mss; 18628 if (rack->rc_force_max_seg) { 18629 return (user_max); 18630 } 18631 if (rack->use_fixed_rate && 18632 ((rack->r_ctl.crte == NULL) || 18633 (bw != rack->r_ctl.crte->rate))) { 18634 /* Use the user mss since we are not exactly matched */ 18635 return (user_max); 18636 } 18637 if (rack_pace_one_seg || 18638 (rack->r_ctl.rc_user_set_min_segs == 1)) 18639 pace_one = 1; 18640 else 18641 pace_one = 0; 18642 18643 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 18644 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 18645 if (new_tso > user_max) 18646 new_tso = user_max; 18647 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 18648 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 18649 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 18650 } 18651 if (rack->r_ctl.rc_user_set_min_segs && 18652 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 18653 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 18654 return (new_tso); 18655 } 18656 18657 static uint64_t 18658 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 18659 { 18660 uint64_t reduced_win; 18661 uint32_t gain; 18662 18663 if (window_input < rc_init_window(rack)) { 18664 /* 18665 * The cwnd is collapsed to 18666 * nearly zero, maybe because of a time-out? 18667 * Lets drop back to the lt-bw. 18668 */ 18669 reduced_win = rack_get_lt_bw(rack); 18670 /* Set the flag so the caller knows its a rate and not a reduced window */ 18671 *rate_set = 1; 18672 gain = 100; 18673 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 18674 /* 18675 * If we are in recover our cwnd needs to be less for 18676 * our pacing consideration. 18677 */ 18678 if (rack->rack_hibeta == 0) { 18679 reduced_win = window_input / 2; 18680 gain = 50; 18681 } else { 18682 reduced_win = window_input * rack->r_ctl.saved_hibeta; 18683 reduced_win /= 100; 18684 gain = rack->r_ctl.saved_hibeta; 18685 } 18686 } else { 18687 /* 18688 * Apply Timely factor to increase/decrease the 18689 * amount we are pacing at. 18690 */ 18691 gain = rack_get_output_gain(rack, NULL); 18692 if (gain > rack_gain_p5_ub) { 18693 gain = rack_gain_p5_ub; 18694 } 18695 reduced_win = window_input * gain; 18696 reduced_win /= 100; 18697 } 18698 if (gain_b != NULL) 18699 *gain_b = gain; 18700 /* 18701 * What is being returned here is a trimmed down 18702 * window values in all cases where rate_set is left 18703 * at 0. In one case we actually return the rate (lt_bw). 18704 * the "reduced_win" is returned as a slimmed down cwnd that 18705 * is then calculated by the caller into a rate when rate_set 18706 * is 0. 18707 */ 18708 return (reduced_win); 18709 } 18710 18711 static int32_t 18712 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 18713 { 18714 uint64_t lentim, fill_bw; 18715 18716 rack->r_via_fill_cw = 0; 18717 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 18718 return (slot); 18719 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 18720 return (slot); 18721 if (rack->r_ctl.rc_last_us_rtt == 0) 18722 return (slot); 18723 if (rack->rc_pace_fill_if_rttin_range && 18724 (rack->r_ctl.rc_last_us_rtt >= 18725 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 18726 /* The rtt is huge, N * smallest, lets not fill */ 18727 return (slot); 18728 } 18729 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 18730 return (slot); 18731 /* 18732 * first lets calculate the b/w based on the last us-rtt 18733 * and the the smallest send window. 18734 */ 18735 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 18736 if (rack->rc_fillcw_apply_discount) { 18737 uint32_t rate_set = 0; 18738 18739 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 18740 if (rate_set) { 18741 goto at_lt_bw; 18742 } 18743 } 18744 /* Take the rwnd if its smaller */ 18745 if (fill_bw > rack->rc_tp->snd_wnd) 18746 fill_bw = rack->rc_tp->snd_wnd; 18747 /* Now lets make it into a b/w */ 18748 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 18749 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 18750 /* Adjust to any cap */ 18751 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 18752 fill_bw = rack->r_ctl.fillcw_cap; 18753 18754 at_lt_bw: 18755 if (rack_bw_multipler > 0) { 18756 /* 18757 * We want to limit fill-cw to the some multiplier 18758 * of the max(lt_bw, gp_est). The normal default 18759 * is 0 for off, so a sysctl has enabled it. 18760 */ 18761 uint64_t lt_bw, gp, rate; 18762 18763 gp = rack_get_gp_est(rack); 18764 lt_bw = rack_get_lt_bw(rack); 18765 if (lt_bw > gp) 18766 rate = lt_bw; 18767 else 18768 rate = gp; 18769 rate *= rack_bw_multipler; 18770 rate /= 100; 18771 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18772 union tcp_log_stackspecific log; 18773 struct timeval tv; 18774 18775 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18776 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18777 log.u_bbr.flex1 = rack_bw_multipler; 18778 log.u_bbr.flex2 = len; 18779 log.u_bbr.cur_del_rate = gp; 18780 log.u_bbr.delRate = lt_bw; 18781 log.u_bbr.bw_inuse = rate; 18782 log.u_bbr.rttProp = fill_bw; 18783 log.u_bbr.flex8 = 44; 18784 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 18785 BBR_LOG_CWND, 0, 18786 0, &log, false, NULL, 18787 __func__, __LINE__, &tv); 18788 } 18789 if (fill_bw > rate) 18790 fill_bw = rate; 18791 } 18792 /* We are below the min b/w */ 18793 if (non_paced) 18794 *rate_wanted = fill_bw; 18795 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 18796 return (slot); 18797 rack->r_via_fill_cw = 1; 18798 if (rack->r_rack_hw_rate_caps && 18799 (rack->r_ctl.crte != NULL)) { 18800 uint64_t high_rate; 18801 18802 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 18803 if (fill_bw > high_rate) { 18804 /* We are capping bw at the highest rate table entry */ 18805 if (*rate_wanted > high_rate) { 18806 /* The original rate was also capped */ 18807 rack->r_via_fill_cw = 0; 18808 } 18809 rack_log_hdwr_pacing(rack, 18810 fill_bw, high_rate, __LINE__, 18811 0, 3); 18812 fill_bw = high_rate; 18813 if (capped) 18814 *capped = 1; 18815 } 18816 } else if ((rack->r_ctl.crte == NULL) && 18817 (rack->rack_hdrw_pacing == 0) && 18818 (rack->rack_hdw_pace_ena) && 18819 rack->r_rack_hw_rate_caps && 18820 (rack->rack_attempt_hdwr_pace == 0) && 18821 (rack->rc_inp->inp_route.ro_nh != NULL) && 18822 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 18823 /* 18824 * Ok we may have a first attempt that is greater than our top rate 18825 * lets check. 18826 */ 18827 uint64_t high_rate; 18828 18829 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 18830 if (high_rate) { 18831 if (fill_bw > high_rate) { 18832 fill_bw = high_rate; 18833 if (capped) 18834 *capped = 1; 18835 } 18836 } 18837 } 18838 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 18839 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 18840 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 18841 fill_bw = rack->r_ctl.bw_rate_cap; 18842 } 18843 /* 18844 * Ok fill_bw holds our mythical b/w to fill the cwnd 18845 * in an rtt (unless it was capped), what does that 18846 * time wise equate too? 18847 */ 18848 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 18849 lentim /= fill_bw; 18850 *rate_wanted = fill_bw; 18851 if (non_paced || (lentim < slot)) { 18852 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 18853 0, lentim, 12, __LINE__, NULL, 0); 18854 return ((int32_t)lentim); 18855 } else 18856 return (slot); 18857 } 18858 18859 static uint32_t 18860 rack_policer_check_send(struct tcp_rack *rack, uint32_t len, uint32_t segsiz, uint32_t *needs) 18861 { 18862 uint64_t calc; 18863 18864 rack->rc_policer_should_pace = 0; 18865 calc = rack_policer_bucket_reserve * rack->r_ctl.policer_bucket_size; 18866 calc /= 100; 18867 /* 18868 * Now lets look at if we want more than is in the bucket <or> 18869 * we want more than is reserved in the bucket. 18870 */ 18871 if (rack_verbose_logging > 0) 18872 policer_detection_log(rack, len, segsiz, calc, rack->r_ctl.current_policer_bucket, 8); 18873 if ((calc > rack->r_ctl.current_policer_bucket) || 18874 (len >= (rack->r_ctl.current_policer_bucket - calc))) { 18875 /* 18876 * We may want to pace depending on if we are going 18877 * into the reserve or not. 18878 */ 18879 uint32_t newlen; 18880 18881 if (calc > rack->r_ctl.current_policer_bucket) { 18882 /* 18883 * This will eat into the reserve if we 18884 * don't have room at all some lines 18885 * below will catch it. 18886 */ 18887 newlen = rack->r_ctl.policer_max_seg; 18888 rack->rc_policer_should_pace = 1; 18889 } else { 18890 /* 18891 * We have all of the reserve plus something in the bucket 18892 * that we can give out. 18893 */ 18894 newlen = rack->r_ctl.current_policer_bucket - calc; 18895 if (newlen < rack->r_ctl.policer_max_seg) { 18896 /* 18897 * Into the reserve to get a full policer_max_seg 18898 * so we set the len to that and eat into 18899 * the reserve. If we go over the code 18900 * below will make us wait. 18901 */ 18902 newlen = rack->r_ctl.policer_max_seg; 18903 rack->rc_policer_should_pace = 1; 18904 } 18905 } 18906 if (newlen > rack->r_ctl.current_policer_bucket) { 18907 /* We have to wait some */ 18908 *needs = newlen - rack->r_ctl.current_policer_bucket; 18909 return (0); 18910 } 18911 if (rack_verbose_logging > 0) 18912 policer_detection_log(rack, len, segsiz, newlen, 0, 9); 18913 len = newlen; 18914 } /* else we have all len available above the reserve */ 18915 if (rack_verbose_logging > 0) 18916 policer_detection_log(rack, len, segsiz, calc, 0, 10); 18917 return (len); 18918 } 18919 18920 static uint32_t 18921 rack_policed_sending(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, uint32_t segsiz, int call_line) 18922 { 18923 /* 18924 * Given a send of len, and a token bucket set at current_policer_bucket_size 18925 * are we close enough to the end of the bucket that we need to pace? If so 18926 * calculate out a time and return it. Otherwise subtract the tokens from 18927 * the bucket. 18928 */ 18929 uint64_t calc; 18930 18931 if ((rack->r_ctl.policer_bw == 0) || 18932 (rack->r_ctl.policer_bucket_size < segsiz)) { 18933 /* 18934 * We should have an estimate here... 18935 */ 18936 return (0); 18937 } 18938 calc = (uint64_t)rack_policer_bucket_reserve * (uint64_t)rack->r_ctl.policer_bucket_size; 18939 calc /= 100; 18940 if ((rack->r_ctl.current_policer_bucket < len) || 18941 (rack->rc_policer_should_pace == 1) || 18942 ((rack->r_ctl.current_policer_bucket - len) <= (uint32_t)calc)) { 18943 /* we need to pace */ 18944 uint64_t lentim, res; 18945 uint32_t slot; 18946 18947 lentim = (uint64_t)len * (uint64_t)HPTS_USEC_IN_SEC; 18948 res = lentim / rack->r_ctl.policer_bw; 18949 slot = (uint32_t)res; 18950 if (rack->r_ctl.current_policer_bucket > len) 18951 rack->r_ctl.current_policer_bucket -= len; 18952 else 18953 rack->r_ctl.current_policer_bucket = 0; 18954 policer_detection_log(rack, len, slot, (uint32_t)rack_policer_bucket_reserve, call_line, 5); 18955 rack->rc_policer_should_pace = 0; 18956 return(slot); 18957 } 18958 /* Just take tokens out of the bucket and let rack do whatever it would have */ 18959 policer_detection_log(rack, len, 0, (uint32_t)rack_policer_bucket_reserve, call_line, 6); 18960 if (len < rack->r_ctl.current_policer_bucket) { 18961 rack->r_ctl.current_policer_bucket -= len; 18962 } else { 18963 rack->r_ctl.current_policer_bucket = 0; 18964 } 18965 return (0); 18966 } 18967 18968 18969 static int32_t 18970 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 18971 { 18972 uint64_t srtt; 18973 int32_t slot = 0; 18974 int32_t minslot = 0; 18975 int can_start_hw_pacing = 1; 18976 int err; 18977 int pace_one; 18978 18979 if (rack_pace_one_seg || 18980 (rack->r_ctl.rc_user_set_min_segs == 1)) 18981 pace_one = 1; 18982 else 18983 pace_one = 0; 18984 if (rack->rc_policer_detected == 1) { 18985 /* 18986 * A policer has been detected and we 18987 * have all of our data (policer-bw and 18988 * policer bucket size) calculated. Call 18989 * into the function to find out if we are 18990 * overriding the time. 18991 */ 18992 slot = rack_policed_sending(rack, tp, len, segsiz, line); 18993 if (slot) { 18994 uint64_t logbw; 18995 18996 logbw = rack->r_ctl.current_policer_bucket; 18997 logbw <<= 32; 18998 logbw |= rack->r_ctl.policer_bucket_size; 18999 rack_log_pacing_delay_calc(rack, len, slot, rack->r_ctl.policer_bw, logbw, 0, 89, __LINE__, NULL, 0); 19000 return(slot); 19001 } 19002 } 19003 if (rack->rc_always_pace == 0) { 19004 /* 19005 * We use the most optimistic possible cwnd/srtt for 19006 * sending calculations. This will make our 19007 * calculation anticipate getting more through 19008 * quicker then possible. But thats ok we don't want 19009 * the peer to have a gap in data sending. 19010 */ 19011 uint64_t cwnd, tr_perms = 0; 19012 int32_t reduce = 0; 19013 19014 old_method: 19015 /* 19016 * We keep no precise pacing with the old method 19017 * instead we use the pacer to mitigate bursts. 19018 */ 19019 if (rack->r_ctl.rc_rack_min_rtt) 19020 srtt = rack->r_ctl.rc_rack_min_rtt; 19021 else 19022 srtt = max(tp->t_srtt, 1); 19023 if (rack->r_ctl.rc_rack_largest_cwnd) 19024 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 19025 else 19026 cwnd = rack->r_ctl.cwnd_to_use; 19027 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 19028 tr_perms = (cwnd * 1000) / srtt; 19029 if (tr_perms == 0) { 19030 tr_perms = ctf_fixed_maxseg(tp); 19031 } 19032 /* 19033 * Calculate how long this will take to drain, if 19034 * the calculation comes out to zero, thats ok we 19035 * will use send_a_lot to possibly spin around for 19036 * more increasing tot_len_this_send to the point 19037 * that its going to require a pace, or we hit the 19038 * cwnd. Which in that case we are just waiting for 19039 * a ACK. 19040 */ 19041 slot = len / tr_perms; 19042 /* Now do we reduce the time so we don't run dry? */ 19043 if (slot && rack_slot_reduction) { 19044 reduce = (slot / rack_slot_reduction); 19045 if (reduce < slot) { 19046 slot -= reduce; 19047 } else 19048 slot = 0; 19049 } 19050 slot *= HPTS_USEC_IN_MSEC; 19051 if (rack->rc_pace_to_cwnd) { 19052 uint64_t rate_wanted = 0; 19053 19054 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 19055 rack->rc_ack_can_sendout_data = 1; 19056 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 19057 } else 19058 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 19059 /*******************************************************/ 19060 /* RRS: We insert non-paced call to stats here for len */ 19061 /*******************************************************/ 19062 } else { 19063 uint64_t bw_est, res, lentim, rate_wanted; 19064 uint32_t segs, oh; 19065 int capped = 0; 19066 int prev_fill; 19067 19068 if ((rack->r_rr_config == 1) && rsm) { 19069 return (rack->r_ctl.rc_min_to); 19070 } 19071 if (rack->use_fixed_rate) { 19072 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 19073 } else if ((rack->r_ctl.init_rate == 0) && 19074 (rack->r_ctl.gp_bw == 0)) { 19075 /* no way to yet do an estimate */ 19076 bw_est = rate_wanted = 0; 19077 } else if (rack->dgp_on) { 19078 bw_est = rack_get_bw(rack); 19079 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 19080 } else { 19081 uint32_t gain, rate_set = 0; 19082 19083 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 19084 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 19085 if (rate_set == 0) { 19086 if (rate_wanted > rack->rc_tp->snd_wnd) 19087 rate_wanted = rack->rc_tp->snd_wnd; 19088 /* Now lets make it into a b/w */ 19089 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 19090 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 19091 } 19092 bw_est = rate_wanted; 19093 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 19094 rack->r_ctl.cwnd_to_use, 19095 rate_wanted, bw_est, 19096 rack->r_ctl.rc_last_us_rtt, 19097 88, __LINE__, NULL, gain); 19098 } 19099 if ((bw_est == 0) || (rate_wanted == 0) || 19100 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 19101 /* 19102 * No way yet to make a b/w estimate or 19103 * our raise is set incorrectly. 19104 */ 19105 goto old_method; 19106 } 19107 rack_rate_cap_bw(rack, &rate_wanted, &capped); 19108 /* We need to account for all the overheads */ 19109 segs = (len + segsiz - 1) / segsiz; 19110 /* 19111 * We need the diff between 1514 bytes (e-mtu with e-hdr) 19112 * and how much data we put in each packet. Yes this 19113 * means we may be off if we are larger than 1500 bytes 19114 * or smaller. But this just makes us more conservative. 19115 */ 19116 19117 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 19118 if (rack->r_is_v6) { 19119 #ifdef INET6 19120 oh += sizeof(struct ip6_hdr); 19121 #endif 19122 } else { 19123 #ifdef INET 19124 oh += sizeof(struct ip); 19125 #endif 19126 } 19127 /* We add a fixed 14 for the ethernet header */ 19128 oh += 14; 19129 segs *= oh; 19130 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 19131 res = lentim / rate_wanted; 19132 slot = (uint32_t)res; 19133 if (rack_hw_rate_min && 19134 (rate_wanted < rack_hw_rate_min)) { 19135 can_start_hw_pacing = 0; 19136 if (rack->r_ctl.crte) { 19137 /* 19138 * Ok we need to release it, we 19139 * have fallen too low. 19140 */ 19141 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 19142 rack->r_ctl.crte = NULL; 19143 rack->rack_attempt_hdwr_pace = 0; 19144 rack->rack_hdrw_pacing = 0; 19145 } 19146 } 19147 if (rack->r_ctl.crte && 19148 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 19149 /* 19150 * We want more than the hardware can give us, 19151 * don't start any hw pacing. 19152 */ 19153 can_start_hw_pacing = 0; 19154 if (rack->r_rack_hw_rate_caps == 0) { 19155 /* 19156 * Ok we need to release it, we 19157 * want more than the card can give us and 19158 * no rate cap is in place. Set it up so 19159 * when we want less we can retry. 19160 */ 19161 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 19162 rack->r_ctl.crte = NULL; 19163 rack->rack_attempt_hdwr_pace = 0; 19164 rack->rack_hdrw_pacing = 0; 19165 } 19166 } 19167 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 19168 /* 19169 * We lost our rate somehow, this can happen 19170 * if the interface changed underneath us. 19171 */ 19172 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 19173 rack->r_ctl.crte = NULL; 19174 /* Lets re-allow attempting to setup pacing */ 19175 rack->rack_hdrw_pacing = 0; 19176 rack->rack_attempt_hdwr_pace = 0; 19177 rack_log_hdwr_pacing(rack, 19178 rate_wanted, bw_est, __LINE__, 19179 0, 6); 19180 } 19181 prev_fill = rack->r_via_fill_cw; 19182 if ((rack->rc_pace_to_cwnd) && 19183 (capped == 0) && 19184 (rack->dgp_on == 1) && 19185 (rack->use_fixed_rate == 0) && 19186 (rack->in_probe_rtt == 0) && 19187 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 19188 /* 19189 * We want to pace at our rate *or* faster to 19190 * fill the cwnd to the max if its not full. 19191 */ 19192 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 19193 /* Re-check to make sure we are not exceeding our max b/w */ 19194 if ((rack->r_ctl.crte != NULL) && 19195 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 19196 /* 19197 * We want more than the hardware can give us, 19198 * don't start any hw pacing. 19199 */ 19200 can_start_hw_pacing = 0; 19201 if (rack->r_rack_hw_rate_caps == 0) { 19202 /* 19203 * Ok we need to release it, we 19204 * want more than the card can give us and 19205 * no rate cap is in place. Set it up so 19206 * when we want less we can retry. 19207 */ 19208 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 19209 rack->r_ctl.crte = NULL; 19210 rack->rack_attempt_hdwr_pace = 0; 19211 rack->rack_hdrw_pacing = 0; 19212 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19213 } 19214 } 19215 } 19216 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 19217 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 19218 if ((rack->rack_hdw_pace_ena) && 19219 (can_start_hw_pacing > 0) && 19220 (rack->rack_hdrw_pacing == 0) && 19221 (rack->rack_attempt_hdwr_pace == 0)) { 19222 /* 19223 * Lets attempt to turn on hardware pacing 19224 * if we can. 19225 */ 19226 rack->rack_attempt_hdwr_pace = 1; 19227 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 19228 rack->rc_inp->inp_route.ro_nh->nh_ifp, 19229 rate_wanted, 19230 RS_PACING_GEQ, 19231 &err, &rack->r_ctl.crte_prev_rate); 19232 if (rack->r_ctl.crte) { 19233 rack->rack_hdrw_pacing = 1; 19234 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 19235 pace_one, rack->r_ctl.crte, 19236 NULL, rack->r_ctl.pace_len_divisor); 19237 rack_log_hdwr_pacing(rack, 19238 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 19239 err, 0); 19240 rack->r_ctl.last_hw_bw_req = rate_wanted; 19241 } else { 19242 counter_u64_add(rack_hw_pace_init_fail, 1); 19243 } 19244 } else if (rack->rack_hdrw_pacing && 19245 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 19246 /* Do we need to adjust our rate? */ 19247 const struct tcp_hwrate_limit_table *nrte; 19248 19249 if (rack->r_up_only && 19250 (rate_wanted < rack->r_ctl.crte->rate)) { 19251 /** 19252 * We have four possible states here 19253 * having to do with the previous time 19254 * and this time. 19255 * previous | this-time 19256 * A) 0 | 0 -- fill_cw not in the picture 19257 * B) 1 | 0 -- we were doing a fill-cw but now are not 19258 * C) 1 | 1 -- all rates from fill_cw 19259 * D) 0 | 1 -- we were doing non-fill and now we are filling 19260 * 19261 * For case A, C and D we don't allow a drop. But for 19262 * case B where we now our on our steady rate we do 19263 * allow a drop. 19264 * 19265 */ 19266 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 19267 goto done_w_hdwr; 19268 } 19269 if ((rate_wanted > rack->r_ctl.crte->rate) || 19270 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 19271 if (rack_hw_rate_to_low && 19272 (bw_est < rack_hw_rate_to_low)) { 19273 /* 19274 * The pacing rate is too low for hardware, but 19275 * do allow hardware pacing to be restarted. 19276 */ 19277 rack_log_hdwr_pacing(rack, 19278 bw_est, rack->r_ctl.crte->rate, __LINE__, 19279 0, 5); 19280 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 19281 rack->r_ctl.crte = NULL; 19282 rack->rack_attempt_hdwr_pace = 0; 19283 rack->rack_hdrw_pacing = 0; 19284 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 19285 goto done_w_hdwr; 19286 } 19287 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 19288 rack->rc_tp, 19289 rack->rc_inp->inp_route.ro_nh->nh_ifp, 19290 rate_wanted, 19291 RS_PACING_GEQ, 19292 &err, &rack->r_ctl.crte_prev_rate); 19293 if (nrte == NULL) { 19294 /* 19295 * Lost the rate, lets drop hardware pacing 19296 * period. 19297 */ 19298 rack->rack_hdrw_pacing = 0; 19299 rack->r_ctl.crte = NULL; 19300 rack_log_hdwr_pacing(rack, 19301 rate_wanted, 0, __LINE__, 19302 err, 1); 19303 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 19304 counter_u64_add(rack_hw_pace_lost, 1); 19305 } else if (nrte != rack->r_ctl.crte) { 19306 rack->r_ctl.crte = nrte; 19307 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 19308 segsiz, pace_one, rack->r_ctl.crte, 19309 NULL, rack->r_ctl.pace_len_divisor); 19310 rack_log_hdwr_pacing(rack, 19311 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 19312 err, 2); 19313 rack->r_ctl.last_hw_bw_req = rate_wanted; 19314 } 19315 } else { 19316 /* We just need to adjust the segment size */ 19317 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 19318 rack_log_hdwr_pacing(rack, 19319 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 19320 0, 4); 19321 rack->r_ctl.last_hw_bw_req = rate_wanted; 19322 } 19323 } 19324 } 19325 if (minslot && (minslot > slot)) { 19326 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, 19327 98, __LINE__, NULL, 0); 19328 slot = minslot; 19329 } 19330 done_w_hdwr: 19331 if (rack_limit_time_with_srtt && 19332 (rack->use_fixed_rate == 0) && 19333 (rack->rack_hdrw_pacing == 0)) { 19334 /* 19335 * Sanity check, we do not allow the pacing delay 19336 * to be longer than the SRTT of the path. If it is 19337 * a slow path, then adding a packet should increase 19338 * the RTT and compensate for this i.e. the srtt will 19339 * be greater so the allowed pacing time will be greater. 19340 * 19341 * Note this restriction is not for where a peak rate 19342 * is set, we are doing fixed pacing or hardware pacing. 19343 */ 19344 if (rack->rc_tp->t_srtt) 19345 srtt = rack->rc_tp->t_srtt; 19346 else 19347 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 19348 if (srtt < (uint64_t)slot) { 19349 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 19350 slot = srtt; 19351 } 19352 } 19353 /*******************************************************************/ 19354 /* RRS: We insert paced call to stats here for len and rate_wanted */ 19355 /*******************************************************************/ 19356 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 19357 } 19358 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 19359 /* 19360 * If this rate is seeing enobufs when it 19361 * goes to send then either the nic is out 19362 * of gas or we are mis-estimating the time 19363 * somehow and not letting the queue empty 19364 * completely. Lets add to the pacing time. 19365 */ 19366 int hw_boost_delay; 19367 19368 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 19369 if (hw_boost_delay > rack_enobuf_hw_max) 19370 hw_boost_delay = rack_enobuf_hw_max; 19371 else if (hw_boost_delay < rack_enobuf_hw_min) 19372 hw_boost_delay = rack_enobuf_hw_min; 19373 slot += hw_boost_delay; 19374 } 19375 return (slot); 19376 } 19377 19378 static void 19379 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 19380 tcp_seq startseq, uint32_t sb_offset) 19381 { 19382 struct rack_sendmap *my_rsm = NULL; 19383 19384 if (tp->t_state < TCPS_ESTABLISHED) { 19385 /* 19386 * We don't start any measurements if we are 19387 * not at least established. 19388 */ 19389 return; 19390 } 19391 if (tp->t_state >= TCPS_FIN_WAIT_1) { 19392 /* 19393 * We will get no more data into the SB 19394 * this means we need to have the data available 19395 * before we start a measurement. 19396 */ 19397 19398 if (sbavail(&tptosocket(tp)->so_snd) < 19399 max(rc_init_window(rack), 19400 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 19401 /* Nope not enough data */ 19402 return; 19403 } 19404 } 19405 tp->t_flags |= TF_GPUTINPROG; 19406 rack->r_ctl.rc_gp_cumack_ts = 0; 19407 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 19408 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 19409 tp->gput_seq = startseq; 19410 rack->app_limited_needs_set = 0; 19411 if (rack->in_probe_rtt) 19412 rack->measure_saw_probe_rtt = 1; 19413 else if ((rack->measure_saw_probe_rtt) && 19414 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 19415 rack->measure_saw_probe_rtt = 0; 19416 if (rack->rc_gp_filled) 19417 tp->gput_ts = rack->r_ctl.last_cumack_advance; 19418 else { 19419 /* Special case initial measurement */ 19420 struct timeval tv; 19421 19422 tp->gput_ts = tcp_get_usecs(&tv); 19423 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 19424 } 19425 /* 19426 * We take a guess out into the future, 19427 * if we have no measurement and no 19428 * initial rate, we measure the first 19429 * initial-windows worth of data to 19430 * speed up getting some GP measurement and 19431 * thus start pacing. 19432 */ 19433 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 19434 rack->app_limited_needs_set = 1; 19435 tp->gput_ack = startseq + max(rc_init_window(rack), 19436 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 19437 rack_log_pacing_delay_calc(rack, 19438 tp->gput_seq, 19439 tp->gput_ack, 19440 0, 19441 tp->gput_ts, 19442 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 19443 9, 19444 __LINE__, NULL, 0); 19445 rack_tend_gp_marks(tp, rack); 19446 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 19447 return; 19448 } 19449 if (sb_offset) { 19450 /* 19451 * We are out somewhere in the sb 19452 * can we use the already outstanding data? 19453 */ 19454 19455 if (rack->r_ctl.rc_app_limited_cnt == 0) { 19456 /* 19457 * Yes first one is good and in this case 19458 * the tp->gput_ts is correctly set based on 19459 * the last ack that arrived (no need to 19460 * set things up when an ack comes in). 19461 */ 19462 my_rsm = tqhash_min(rack->r_ctl.tqh); 19463 if ((my_rsm == NULL) || 19464 (my_rsm->r_rtr_cnt != 1)) { 19465 /* retransmission? */ 19466 goto use_latest; 19467 } 19468 } else { 19469 if (rack->r_ctl.rc_first_appl == NULL) { 19470 /* 19471 * If rc_first_appl is NULL 19472 * then the cnt should be 0. 19473 * This is probably an error, maybe 19474 * a KASSERT would be approprate. 19475 */ 19476 goto use_latest; 19477 } 19478 /* 19479 * If we have a marker pointer to the last one that is 19480 * app limited we can use that, but we need to set 19481 * things up so that when it gets ack'ed we record 19482 * the ack time (if its not already acked). 19483 */ 19484 rack->app_limited_needs_set = 1; 19485 /* 19486 * We want to get to the rsm that is either 19487 * next with space i.e. over 1 MSS or the one 19488 * after that (after the app-limited). 19489 */ 19490 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 19491 if (my_rsm) { 19492 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 19493 /* Have to use the next one */ 19494 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 19495 else { 19496 /* Use after the first MSS of it is acked */ 19497 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 19498 goto start_set; 19499 } 19500 } 19501 if ((my_rsm == NULL) || 19502 (my_rsm->r_rtr_cnt != 1)) { 19503 /* 19504 * Either its a retransmit or 19505 * the last is the app-limited one. 19506 */ 19507 goto use_latest; 19508 } 19509 } 19510 tp->gput_seq = my_rsm->r_start; 19511 start_set: 19512 if (my_rsm->r_flags & RACK_ACKED) { 19513 /* 19514 * This one has been acked use the arrival ack time 19515 */ 19516 struct rack_sendmap *nrsm; 19517 19518 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 19519 rack->app_limited_needs_set = 0; 19520 /* 19521 * Ok in this path we need to use the r_end now 19522 * since this guy is the starting ack. 19523 */ 19524 tp->gput_seq = my_rsm->r_end; 19525 /* 19526 * We also need to adjust up the sendtime 19527 * to the send of the next data after my_rsm. 19528 */ 19529 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 19530 if (nrsm != NULL) 19531 my_rsm = nrsm; 19532 else { 19533 /* 19534 * The next as not been sent, thats the 19535 * case for using the latest. 19536 */ 19537 goto use_latest; 19538 } 19539 } 19540 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 19541 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 19542 rack->r_ctl.rc_gp_cumack_ts = 0; 19543 if ((rack->r_ctl.cleared_app_ack == 1) && 19544 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { 19545 /* 19546 * We just cleared an application limited period 19547 * so the next seq out needs to skip the first 19548 * ack. 19549 */ 19550 rack->app_limited_needs_set = 1; 19551 rack->r_ctl.cleared_app_ack = 0; 19552 } 19553 rack_log_pacing_delay_calc(rack, 19554 tp->gput_seq, 19555 tp->gput_ack, 19556 (uint64_t)my_rsm, 19557 tp->gput_ts, 19558 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 19559 9, 19560 __LINE__, my_rsm, 0); 19561 /* Now lets make sure all are marked as they should be */ 19562 rack_tend_gp_marks(tp, rack); 19563 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 19564 return; 19565 } 19566 19567 use_latest: 19568 /* 19569 * We don't know how long we may have been 19570 * idle or if this is the first-send. Lets 19571 * setup the flag so we will trim off 19572 * the first ack'd data so we get a true 19573 * measurement. 19574 */ 19575 rack->app_limited_needs_set = 1; 19576 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 19577 rack->r_ctl.rc_gp_cumack_ts = 0; 19578 /* Find this guy so we can pull the send time */ 19579 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 19580 if (my_rsm) { 19581 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 19582 if (my_rsm->r_flags & RACK_ACKED) { 19583 /* 19584 * Unlikely since its probably what was 19585 * just transmitted (but I am paranoid). 19586 */ 19587 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 19588 rack->app_limited_needs_set = 0; 19589 } 19590 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 19591 /* This also is unlikely */ 19592 tp->gput_seq = my_rsm->r_start; 19593 } 19594 } else { 19595 /* 19596 * TSNH unless we have some send-map limit, 19597 * and even at that it should not be hitting 19598 * that limit (we should have stopped sending). 19599 */ 19600 struct timeval tv; 19601 19602 microuptime(&tv); 19603 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 19604 } 19605 rack_tend_gp_marks(tp, rack); 19606 rack_log_pacing_delay_calc(rack, 19607 tp->gput_seq, 19608 tp->gput_ack, 19609 (uint64_t)my_rsm, 19610 tp->gput_ts, 19611 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 19612 9, __LINE__, NULL, 0); 19613 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 19614 } 19615 19616 static inline uint32_t 19617 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 19618 uint32_t avail, int32_t sb_offset) 19619 { 19620 uint32_t len; 19621 uint32_t sendwin; 19622 19623 if (tp->snd_wnd > cwnd_to_use) 19624 sendwin = cwnd_to_use; 19625 else 19626 sendwin = tp->snd_wnd; 19627 if (ctf_outstanding(tp) >= tp->snd_wnd) { 19628 /* We never want to go over our peers rcv-window */ 19629 len = 0; 19630 } else { 19631 uint32_t flight; 19632 19633 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 19634 if (flight >= sendwin) { 19635 /* 19636 * We have in flight what we are allowed by cwnd (if 19637 * it was rwnd blocking it would have hit above out 19638 * >= tp->snd_wnd). 19639 */ 19640 return (0); 19641 } 19642 len = sendwin - flight; 19643 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 19644 /* We would send too much (beyond the rwnd) */ 19645 len = tp->snd_wnd - ctf_outstanding(tp); 19646 } 19647 if ((len + sb_offset) > avail) { 19648 /* 19649 * We don't have that much in the SB, how much is 19650 * there? 19651 */ 19652 len = avail - sb_offset; 19653 } 19654 } 19655 return (len); 19656 } 19657 19658 static void 19659 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 19660 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 19661 int rsm_is_null, int optlen, int line, uint16_t mode) 19662 { 19663 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 19664 union tcp_log_stackspecific log; 19665 struct timeval tv; 19666 19667 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19668 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19669 log.u_bbr.flex1 = error; 19670 log.u_bbr.flex2 = flags; 19671 log.u_bbr.flex3 = rsm_is_null; 19672 log.u_bbr.flex4 = ipoptlen; 19673 log.u_bbr.flex5 = tp->rcv_numsacks; 19674 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19675 log.u_bbr.flex7 = optlen; 19676 log.u_bbr.flex8 = rack->r_fsb_inited; 19677 log.u_bbr.applimited = rack->r_fast_output; 19678 log.u_bbr.bw_inuse = rack_get_bw(rack); 19679 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19680 log.u_bbr.cwnd_gain = mode; 19681 log.u_bbr.pkts_out = orig_len; 19682 log.u_bbr.lt_epoch = len; 19683 log.u_bbr.delivered = line; 19684 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 19685 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19686 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 19687 len, &log, false, NULL, __func__, __LINE__, &tv); 19688 } 19689 } 19690 19691 19692 static struct mbuf * 19693 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 19694 struct rack_fast_send_blk *fsb, 19695 int32_t seglimit, int32_t segsize, int hw_tls) 19696 { 19697 #ifdef KERN_TLS 19698 struct ktls_session *tls, *ntls; 19699 #ifdef INVARIANTS 19700 struct mbuf *start; 19701 #endif 19702 #endif 19703 struct mbuf *m, *n, **np, *smb; 19704 struct mbuf *top; 19705 int32_t off, soff; 19706 int32_t len = *plen; 19707 int32_t fragsize; 19708 int32_t len_cp = 0; 19709 uint32_t mlen, frags; 19710 19711 soff = off = the_off; 19712 smb = m = the_m; 19713 np = ⊤ 19714 top = NULL; 19715 #ifdef KERN_TLS 19716 if (hw_tls && (m->m_flags & M_EXTPG)) 19717 tls = m->m_epg_tls; 19718 else 19719 tls = NULL; 19720 #ifdef INVARIANTS 19721 start = m; 19722 #endif 19723 #endif 19724 while (len > 0) { 19725 if (m == NULL) { 19726 *plen = len_cp; 19727 break; 19728 } 19729 #ifdef KERN_TLS 19730 if (hw_tls) { 19731 if (m->m_flags & M_EXTPG) 19732 ntls = m->m_epg_tls; 19733 else 19734 ntls = NULL; 19735 19736 /* 19737 * Avoid mixing TLS records with handshake 19738 * data or TLS records from different 19739 * sessions. 19740 */ 19741 if (tls != ntls) { 19742 MPASS(m != start); 19743 *plen = len_cp; 19744 break; 19745 } 19746 } 19747 #endif 19748 mlen = min(len, m->m_len - off); 19749 if (seglimit) { 19750 /* 19751 * For M_EXTPG mbufs, add 3 segments 19752 * + 1 in case we are crossing page boundaries 19753 * + 2 in case the TLS hdr/trailer are used 19754 * It is cheaper to just add the segments 19755 * than it is to take the cache miss to look 19756 * at the mbuf ext_pgs state in detail. 19757 */ 19758 if (m->m_flags & M_EXTPG) { 19759 fragsize = min(segsize, PAGE_SIZE); 19760 frags = 3; 19761 } else { 19762 fragsize = segsize; 19763 frags = 0; 19764 } 19765 19766 /* Break if we really can't fit anymore. */ 19767 if ((frags + 1) >= seglimit) { 19768 *plen = len_cp; 19769 break; 19770 } 19771 19772 /* 19773 * Reduce size if you can't copy the whole 19774 * mbuf. If we can't copy the whole mbuf, also 19775 * adjust len so the loop will end after this 19776 * mbuf. 19777 */ 19778 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 19779 mlen = (seglimit - frags - 1) * fragsize; 19780 len = mlen; 19781 *plen = len_cp + len; 19782 } 19783 frags += howmany(mlen, fragsize); 19784 if (frags == 0) 19785 frags++; 19786 seglimit -= frags; 19787 KASSERT(seglimit > 0, 19788 ("%s: seglimit went too low", __func__)); 19789 } 19790 n = m_get(M_NOWAIT, m->m_type); 19791 *np = n; 19792 if (n == NULL) 19793 goto nospace; 19794 n->m_len = mlen; 19795 soff += mlen; 19796 len_cp += n->m_len; 19797 if (m->m_flags & (M_EXT | M_EXTPG)) { 19798 n->m_data = m->m_data + off; 19799 mb_dupcl(n, m); 19800 } else { 19801 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 19802 (u_int)n->m_len); 19803 } 19804 len -= n->m_len; 19805 off = 0; 19806 m = m->m_next; 19807 np = &n->m_next; 19808 if (len || (soff == smb->m_len)) { 19809 /* 19810 * We have more so we move forward or 19811 * we have consumed the entire mbuf and 19812 * len has fell to 0. 19813 */ 19814 soff = 0; 19815 smb = m; 19816 } 19817 19818 } 19819 if (fsb != NULL) { 19820 fsb->m = smb; 19821 fsb->off = soff; 19822 if (smb) { 19823 /* 19824 * Save off the size of the mbuf. We do 19825 * this so that we can recognize when it 19826 * has been trimmed by sbcut() as acks 19827 * come in. 19828 */ 19829 fsb->o_m_len = smb->m_len; 19830 fsb->o_t_len = M_TRAILINGROOM(smb); 19831 } else { 19832 /* 19833 * This is the case where the next mbuf went to NULL. This 19834 * means with this copy we have sent everything in the sb. 19835 * In theory we could clear the fast_output flag, but lets 19836 * not since its possible that we could get more added 19837 * and acks that call the extend function which would let 19838 * us send more. 19839 */ 19840 fsb->o_m_len = 0; 19841 fsb->o_t_len = 0; 19842 } 19843 } 19844 return (top); 19845 nospace: 19846 if (top) 19847 m_freem(top); 19848 return (NULL); 19849 19850 } 19851 19852 /* 19853 * This is a copy of m_copym(), taking the TSO segment size/limit 19854 * constraints into account, and advancing the sndptr as it goes. 19855 */ 19856 static struct mbuf * 19857 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 19858 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 19859 { 19860 struct mbuf *m, *n; 19861 int32_t soff; 19862 19863 m = rack->r_ctl.fsb.m; 19864 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 19865 /* 19866 * The trailing space changed, mbufs can grow 19867 * at the tail but they can't shrink from 19868 * it, KASSERT that. Adjust the orig_m_len to 19869 * compensate for this change. 19870 */ 19871 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 19872 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 19873 m, 19874 rack, 19875 (intmax_t)M_TRAILINGROOM(m), 19876 rack->r_ctl.fsb.o_t_len, 19877 rack->r_ctl.fsb.o_m_len, 19878 m->m_len)); 19879 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 19880 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 19881 } 19882 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 19883 /* 19884 * Mbuf shrank, trimmed off the top by an ack, our 19885 * offset changes. 19886 */ 19887 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 19888 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 19889 m, m->m_len, 19890 rack, rack->r_ctl.fsb.o_m_len, 19891 rack->r_ctl.fsb.off)); 19892 19893 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 19894 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 19895 else 19896 rack->r_ctl.fsb.off = 0; 19897 rack->r_ctl.fsb.o_m_len = m->m_len; 19898 #ifdef INVARIANTS 19899 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 19900 panic("rack:%p m:%p m_len grew outside of t_space compensation", 19901 rack, m); 19902 #endif 19903 } 19904 soff = rack->r_ctl.fsb.off; 19905 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 19906 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 19907 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 19908 __FUNCTION__, 19909 rack, *plen, m, m->m_len)); 19910 /* Save off the right location before we copy and advance */ 19911 *s_soff = soff; 19912 *s_mb = rack->r_ctl.fsb.m; 19913 n = rack_fo_base_copym(m, soff, plen, 19914 &rack->r_ctl.fsb, 19915 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 19916 return (n); 19917 } 19918 19919 /* Log the buffer level */ 19920 static void 19921 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 19922 int len, struct timeval *tv, 19923 uint32_t cts) 19924 { 19925 uint32_t p_rate = 0, p_queue = 0, err = 0; 19926 union tcp_log_stackspecific log; 19927 19928 #ifdef RATELIMIT 19929 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 19930 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 19931 #endif 19932 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19933 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19934 log.u_bbr.flex1 = p_rate; 19935 log.u_bbr.flex2 = p_queue; 19936 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 19937 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 19938 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 19939 log.u_bbr.flex7 = 99; 19940 log.u_bbr.flex8 = 0; 19941 log.u_bbr.pkts_out = err; 19942 log.u_bbr.delRate = rack->r_ctl.crte->rate; 19943 log.u_bbr.timeStamp = cts; 19944 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19945 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 19946 len, &log, false, NULL, __func__, __LINE__, tv); 19947 19948 } 19949 19950 static uint32_t 19951 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 19952 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 19953 { 19954 uint64_t lentime = 0; 19955 #ifdef RATELIMIT 19956 uint32_t p_rate = 0, p_queue = 0, err; 19957 union tcp_log_stackspecific log; 19958 uint64_t bw; 19959 19960 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 19961 /* Failed or queue is zero */ 19962 if (err || (p_queue == 0)) { 19963 lentime = 0; 19964 goto out; 19965 } 19966 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 19967 if (err) { 19968 lentime = 0; 19969 goto out; 19970 } 19971 /* 19972 * If we reach here we have some bytes in 19973 * the queue. The number returned is a value 19974 * between 0 and 0xffff where ffff is full 19975 * and 0 is empty. So how best to make this into 19976 * something usable? 19977 * 19978 * The "safer" way is lets take the b/w gotten 19979 * from the query (which should be our b/w rate) 19980 * and pretend that a full send (our rc_pace_max_segs) 19981 * is outstanding. We factor it so its as if a full 19982 * number of our MSS segment is terms of full 19983 * ethernet segments are outstanding. 19984 */ 19985 bw = p_rate / 8; 19986 if (bw) { 19987 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 19988 lentime *= ETHERNET_SEGMENT_SIZE; 19989 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 19990 lentime /= bw; 19991 } else { 19992 /* TSNH -- KASSERT? */ 19993 lentime = 0; 19994 } 19995 out: 19996 if (tcp_bblogging_on(tp)) { 19997 memset(&log, 0, sizeof(log)); 19998 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19999 log.u_bbr.flex1 = p_rate; 20000 log.u_bbr.flex2 = p_queue; 20001 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 20002 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 20003 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 20004 log.u_bbr.flex7 = 99; 20005 log.u_bbr.flex8 = 0; 20006 log.u_bbr.pkts_out = err; 20007 log.u_bbr.delRate = rack->r_ctl.crte->rate; 20008 log.u_bbr.cur_del_rate = lentime; 20009 log.u_bbr.timeStamp = cts; 20010 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20011 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 20012 len, &log, false, NULL, __func__, __LINE__,tv); 20013 } 20014 #endif 20015 return ((uint32_t)lentime); 20016 } 20017 20018 static int 20019 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 20020 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 20021 { 20022 /* 20023 * Enter the fast retransmit path. We are given that a sched_pin is 20024 * in place (if accounting is compliled in) and the cycle count taken 20025 * at the entry is in the ts_val. The concept her is that the rsm 20026 * now holds the mbuf offsets and such so we can directly transmit 20027 * without a lot of overhead, the len field is already set for 20028 * us to prohibit us from sending too much (usually its 1MSS). 20029 */ 20030 struct ip *ip = NULL; 20031 struct udphdr *udp = NULL; 20032 struct tcphdr *th = NULL; 20033 struct mbuf *m = NULL; 20034 struct inpcb *inp; 20035 uint8_t *cpto; 20036 struct tcp_log_buffer *lgb; 20037 #ifdef TCP_ACCOUNTING 20038 uint64_t crtsc; 20039 int cnt_thru = 1; 20040 #endif 20041 struct tcpopt to; 20042 u_char opt[TCP_MAXOLEN]; 20043 uint32_t hdrlen, optlen; 20044 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 20045 uint16_t flags; 20046 uint32_t if_hw_tsomaxsegcount = 0, startseq; 20047 uint32_t if_hw_tsomaxsegsize; 20048 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 20049 20050 #ifdef INET6 20051 struct ip6_hdr *ip6 = NULL; 20052 20053 if (rack->r_is_v6) { 20054 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20055 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 20056 } else 20057 #endif /* INET6 */ 20058 { 20059 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20060 hdrlen = sizeof(struct tcpiphdr); 20061 } 20062 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 20063 goto failed; 20064 } 20065 if (doing_tlp) { 20066 /* Its a TLP add the flag, it may already be there but be sure */ 20067 rsm->r_flags |= RACK_TLP; 20068 } else { 20069 /* If it was a TLP it is not not on this retransmit */ 20070 rsm->r_flags &= ~RACK_TLP; 20071 } 20072 startseq = rsm->r_start; 20073 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20074 inp = rack->rc_inp; 20075 to.to_flags = 0; 20076 flags = tcp_outflags[tp->t_state]; 20077 if (flags & (TH_SYN|TH_RST)) { 20078 goto failed; 20079 } 20080 if (rsm->r_flags & RACK_HAS_FIN) { 20081 /* We can't send a FIN here */ 20082 goto failed; 20083 } 20084 if (flags & TH_FIN) { 20085 /* We never send a FIN */ 20086 flags &= ~TH_FIN; 20087 } 20088 if (tp->t_flags & TF_RCVD_TSTMP) { 20089 to.to_tsval = ms_cts + tp->ts_offset; 20090 to.to_tsecr = tp->ts_recent; 20091 to.to_flags = TOF_TS; 20092 } 20093 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20094 /* TCP-MD5 (RFC2385). */ 20095 if (tp->t_flags & TF_SIGNATURE) 20096 to.to_flags |= TOF_SIGNATURE; 20097 #endif 20098 optlen = tcp_addoptions(&to, opt); 20099 hdrlen += optlen; 20100 udp = rack->r_ctl.fsb.udp; 20101 if (udp) 20102 hdrlen += sizeof(struct udphdr); 20103 if (rack->r_ctl.rc_pace_max_segs) 20104 max_val = rack->r_ctl.rc_pace_max_segs; 20105 else if (rack->rc_user_set_max_segs) 20106 max_val = rack->rc_user_set_max_segs * segsiz; 20107 else 20108 max_val = len; 20109 if ((tp->t_flags & TF_TSO) && 20110 V_tcp_do_tso && 20111 (len > segsiz) && 20112 (tp->t_port == 0)) 20113 tso = 1; 20114 #ifdef INET6 20115 if (MHLEN < hdrlen + max_linkhdr) 20116 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 20117 else 20118 #endif 20119 m = m_gethdr(M_NOWAIT, MT_DATA); 20120 if (m == NULL) 20121 goto failed; 20122 m->m_data += max_linkhdr; 20123 m->m_len = hdrlen; 20124 th = rack->r_ctl.fsb.th; 20125 /* Establish the len to send */ 20126 if (len > max_val) 20127 len = max_val; 20128 if ((tso) && (len + optlen > segsiz)) { 20129 uint32_t if_hw_tsomax; 20130 int32_t max_len; 20131 20132 /* extract TSO information */ 20133 if_hw_tsomax = tp->t_tsomax; 20134 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 20135 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 20136 /* 20137 * Check if we should limit by maximum payload 20138 * length: 20139 */ 20140 if (if_hw_tsomax != 0) { 20141 /* compute maximum TSO length */ 20142 max_len = (if_hw_tsomax - hdrlen - 20143 max_linkhdr); 20144 if (max_len <= 0) { 20145 goto failed; 20146 } else if (len > max_len) { 20147 len = max_len; 20148 } 20149 } 20150 if (len <= segsiz) { 20151 /* 20152 * In case there are too many small fragments don't 20153 * use TSO: 20154 */ 20155 tso = 0; 20156 } 20157 } else { 20158 tso = 0; 20159 } 20160 if ((tso == 0) && (len > segsiz)) 20161 len = segsiz; 20162 (void)tcp_get_usecs(tv); 20163 if ((len == 0) || 20164 (len <= MHLEN - hdrlen - max_linkhdr)) { 20165 goto failed; 20166 } 20167 th->th_seq = htonl(rsm->r_start); 20168 th->th_ack = htonl(tp->rcv_nxt); 20169 /* 20170 * The PUSH bit should only be applied 20171 * if the full retransmission is made. If 20172 * we are sending less than this is the 20173 * left hand edge and should not have 20174 * the PUSH bit. 20175 */ 20176 if ((rsm->r_flags & RACK_HAD_PUSH) && 20177 (len == (rsm->r_end - rsm->r_start))) 20178 flags |= TH_PUSH; 20179 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 20180 if (th->th_win == 0) { 20181 tp->t_sndzerowin++; 20182 tp->t_flags |= TF_RXWIN0SENT; 20183 } else 20184 tp->t_flags &= ~TF_RXWIN0SENT; 20185 if (rsm->r_flags & RACK_TLP) { 20186 /* 20187 * TLP should not count in retran count, but 20188 * in its own bin 20189 */ 20190 counter_u64_add(rack_tlp_retran, 1); 20191 counter_u64_add(rack_tlp_retran_bytes, len); 20192 } else { 20193 tp->t_sndrexmitpack++; 20194 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 20195 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 20196 } 20197 #ifdef STATS 20198 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 20199 len); 20200 #endif 20201 if (rsm->m == NULL) 20202 goto failed; 20203 if (rsm->m && 20204 ((rsm->orig_m_len != rsm->m->m_len) || 20205 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 20206 /* Fix up the orig_m_len and possibly the mbuf offset */ 20207 rack_adjust_orig_mlen(rsm); 20208 } 20209 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 20210 if (len <= segsiz) { 20211 /* 20212 * Must have ran out of mbufs for the copy 20213 * shorten it to no longer need tso. Lets 20214 * not put on sendalot since we are low on 20215 * mbufs. 20216 */ 20217 tso = 0; 20218 } 20219 if ((m->m_next == NULL) || (len <= 0)){ 20220 goto failed; 20221 } 20222 if (udp) { 20223 if (rack->r_is_v6) 20224 ulen = hdrlen + len - sizeof(struct ip6_hdr); 20225 else 20226 ulen = hdrlen + len - sizeof(struct ip); 20227 udp->uh_ulen = htons(ulen); 20228 } 20229 m->m_pkthdr.rcvif = (struct ifnet *)0; 20230 if (TCPS_HAVERCVDSYN(tp->t_state) && 20231 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 20232 int ect = tcp_ecn_output_established(tp, &flags, len, true); 20233 if ((tp->t_state == TCPS_SYN_RECEIVED) && 20234 (tp->t_flags2 & TF2_ECN_SND_ECE)) 20235 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 20236 #ifdef INET6 20237 if (rack->r_is_v6) { 20238 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 20239 ip6->ip6_flow |= htonl(ect << 20); 20240 } 20241 else 20242 #endif 20243 { 20244 ip->ip_tos &= ~IPTOS_ECN_MASK; 20245 ip->ip_tos |= ect; 20246 } 20247 } 20248 if (rack->r_ctl.crte != NULL) { 20249 /* See if we can send via the hw queue */ 20250 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 20251 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 20252 if (slot == 0) 20253 ip_sendflag = 0; 20254 } 20255 tcp_set_flags(th, flags); 20256 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 20257 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20258 if (to.to_flags & TOF_SIGNATURE) { 20259 /* 20260 * Calculate MD5 signature and put it into the place 20261 * determined before. 20262 * NOTE: since TCP options buffer doesn't point into 20263 * mbuf's data, calculate offset and use it. 20264 */ 20265 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 20266 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 20267 /* 20268 * Do not send segment if the calculation of MD5 20269 * digest has failed. 20270 */ 20271 goto failed; 20272 } 20273 } 20274 #endif 20275 #ifdef INET6 20276 if (rack->r_is_v6) { 20277 if (tp->t_port) { 20278 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 20279 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20280 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 20281 th->th_sum = htons(0); 20282 UDPSTAT_INC(udps_opackets); 20283 } else { 20284 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 20285 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20286 th->th_sum = in6_cksum_pseudo(ip6, 20287 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 20288 0); 20289 } 20290 } 20291 #endif 20292 #if defined(INET6) && defined(INET) 20293 else 20294 #endif 20295 #ifdef INET 20296 { 20297 if (tp->t_port) { 20298 m->m_pkthdr.csum_flags = CSUM_UDP; 20299 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20300 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 20301 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 20302 th->th_sum = htons(0); 20303 UDPSTAT_INC(udps_opackets); 20304 } else { 20305 m->m_pkthdr.csum_flags = CSUM_TCP; 20306 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20307 th->th_sum = in_pseudo(ip->ip_src.s_addr, 20308 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 20309 IPPROTO_TCP + len + optlen)); 20310 } 20311 /* IP version must be set here for ipv4/ipv6 checking later */ 20312 KASSERT(ip->ip_v == IPVERSION, 20313 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 20314 } 20315 #endif 20316 if (tso) { 20317 /* 20318 * Here we use segsiz since we have no added options besides 20319 * any standard timestamp options (no DSACKs or SACKS are sent 20320 * via either fast-path). 20321 */ 20322 KASSERT(len > segsiz, 20323 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 20324 m->m_pkthdr.csum_flags |= CSUM_TSO; 20325 m->m_pkthdr.tso_segsz = segsiz; 20326 } 20327 #ifdef INET6 20328 if (rack->r_is_v6) { 20329 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 20330 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 20331 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 20332 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20333 else 20334 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20335 } 20336 #endif 20337 #if defined(INET) && defined(INET6) 20338 else 20339 #endif 20340 #ifdef INET 20341 { 20342 ip->ip_len = htons(m->m_pkthdr.len); 20343 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 20344 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 20345 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20346 if (tp->t_port == 0 || len < V_tcp_minmss) { 20347 ip->ip_off |= htons(IP_DF); 20348 } 20349 } else { 20350 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20351 } 20352 } 20353 #endif 20354 if (doing_tlp == 0) { 20355 /* Set we retransmitted */ 20356 rack->rc_gp_saw_rec = 1; 20357 } else { 20358 /* Its a TLP set ca or ss */ 20359 if (tp->snd_cwnd > tp->snd_ssthresh) { 20360 /* Set we sent in CA */ 20361 rack->rc_gp_saw_ca = 1; 20362 } else { 20363 /* Set we sent in SS */ 20364 rack->rc_gp_saw_ss = 1; 20365 } 20366 } 20367 /* Time to copy in our header */ 20368 cpto = mtod(m, uint8_t *); 20369 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 20370 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 20371 if (optlen) { 20372 bcopy(opt, th + 1, optlen); 20373 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 20374 } else { 20375 th->th_off = sizeof(struct tcphdr) >> 2; 20376 } 20377 if (tcp_bblogging_on(rack->rc_tp)) { 20378 union tcp_log_stackspecific log; 20379 20380 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 20381 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 20382 counter_u64_add(rack_collapsed_win_rxt, 1); 20383 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 20384 } 20385 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 20386 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 20387 if (rack->rack_no_prr) 20388 log.u_bbr.flex1 = 0; 20389 else 20390 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 20391 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 20392 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 20393 log.u_bbr.flex4 = max_val; 20394 /* Save off the early/late values */ 20395 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 20396 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 20397 log.u_bbr.bw_inuse = rack_get_bw(rack); 20398 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 20399 if (doing_tlp == 0) 20400 log.u_bbr.flex8 = 1; 20401 else 20402 log.u_bbr.flex8 = 2; 20403 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 20404 log.u_bbr.flex7 = 55; 20405 log.u_bbr.pkts_out = tp->t_maxseg; 20406 log.u_bbr.timeStamp = cts; 20407 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20408 if (rsm && (rsm->r_rtr_cnt > 0)) { 20409 /* 20410 * When we have a retransmit we want to log the 20411 * burst at send and flight at send from before. 20412 */ 20413 log.u_bbr.flex5 = rsm->r_fas; 20414 log.u_bbr.bbr_substate = rsm->r_bas; 20415 } else { 20416 /* 20417 * This is currently unlikely until we do the 20418 * packet pair probes but I will add it for completeness. 20419 */ 20420 log.u_bbr.flex5 = log.u_bbr.inflight; 20421 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 20422 } 20423 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 20424 log.u_bbr.delivered = 0; 20425 log.u_bbr.rttProp = (uint64_t)rsm; 20426 log.u_bbr.delRate = rsm->r_flags; 20427 log.u_bbr.delRate <<= 31; 20428 log.u_bbr.delRate |= rack->r_must_retran; 20429 log.u_bbr.delRate <<= 1; 20430 log.u_bbr.delRate |= 1; 20431 log.u_bbr.pkt_epoch = __LINE__; 20432 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 20433 len, &log, false, NULL, __func__, __LINE__, tv); 20434 } else 20435 lgb = NULL; 20436 if ((rack->r_ctl.crte != NULL) && 20437 tcp_bblogging_on(tp)) { 20438 rack_log_queue_level(tp, rack, len, tv, cts); 20439 } 20440 #ifdef INET6 20441 if (rack->r_is_v6) { 20442 error = ip6_output(m, inp->in6p_outputopts, 20443 &inp->inp_route6, 20444 ip_sendflag, NULL, NULL, inp); 20445 } 20446 else 20447 #endif 20448 #ifdef INET 20449 { 20450 error = ip_output(m, NULL, 20451 &inp->inp_route, 20452 ip_sendflag, 0, inp); 20453 } 20454 #endif 20455 m = NULL; 20456 if (lgb) { 20457 lgb->tlb_errno = error; 20458 lgb = NULL; 20459 } 20460 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 20461 tp->snd_nxt = tp->snd_max; 20462 if (error) { 20463 goto failed; 20464 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 20465 rack->rc_hw_nobuf = 0; 20466 rack->r_ctl.rc_agg_delayed = 0; 20467 rack->r_early = 0; 20468 rack->r_late = 0; 20469 rack->r_ctl.rc_agg_early = 0; 20470 } 20471 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 20472 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 20473 if (doing_tlp) { 20474 rack->rc_tlp_in_progress = 1; 20475 rack->r_ctl.rc_tlp_cnt_out++; 20476 } 20477 if (error == 0) { 20478 counter_u64_add(rack_total_bytes, len); 20479 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 20480 if (doing_tlp) { 20481 rack->rc_last_sent_tlp_past_cumack = 0; 20482 rack->rc_last_sent_tlp_seq_valid = 1; 20483 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 20484 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 20485 } 20486 if (rack->r_ctl.rc_prr_sndcnt >= len) 20487 rack->r_ctl.rc_prr_sndcnt -= len; 20488 else 20489 rack->r_ctl.rc_prr_sndcnt = 0; 20490 } 20491 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 20492 rack->forced_ack = 0; /* If we send something zap the FA flag */ 20493 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 20494 rack->r_ctl.retran_during_recovery += len; 20495 { 20496 int idx; 20497 20498 idx = (len / segsiz) + 3; 20499 if (idx >= TCP_MSS_ACCT_ATIMER) 20500 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 20501 else 20502 counter_u64_add(rack_out_size[idx], 1); 20503 } 20504 if (tp->t_rtttime == 0) { 20505 tp->t_rtttime = ticks; 20506 tp->t_rtseq = startseq; 20507 KMOD_TCPSTAT_INC(tcps_segstimed); 20508 } 20509 counter_u64_add(rack_fto_rsm_send, 1); 20510 if (error && (error == ENOBUFS)) { 20511 if (rack->r_ctl.crte != NULL) { 20512 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 20513 if (tcp_bblogging_on(rack->rc_tp)) 20514 rack_log_queue_level(tp, rack, len, tv, cts); 20515 } else 20516 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 20517 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 20518 if (rack->rc_enobuf < 0x7f) 20519 rack->rc_enobuf++; 20520 if (slot < (10 * HPTS_USEC_IN_MSEC)) 20521 slot = 10 * HPTS_USEC_IN_MSEC; 20522 if (rack->r_ctl.crte != NULL) { 20523 counter_u64_add(rack_saw_enobuf_hw, 1); 20524 tcp_rl_log_enobuf(rack->r_ctl.crte); 20525 } 20526 counter_u64_add(rack_saw_enobuf, 1); 20527 } else { 20528 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 20529 } 20530 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 20531 #ifdef TCP_ACCOUNTING 20532 crtsc = get_cyclecount(); 20533 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20534 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 20535 } 20536 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20537 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 20538 } 20539 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20540 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 20541 } 20542 sched_unpin(); 20543 #endif 20544 return (0); 20545 failed: 20546 if (m) 20547 m_free(m); 20548 return (-1); 20549 } 20550 20551 static void 20552 rack_sndbuf_autoscale(struct tcp_rack *rack) 20553 { 20554 /* 20555 * Automatic sizing of send socket buffer. Often the send buffer 20556 * size is not optimally adjusted to the actual network conditions 20557 * at hand (delay bandwidth product). Setting the buffer size too 20558 * small limits throughput on links with high bandwidth and high 20559 * delay (eg. trans-continental/oceanic links). Setting the 20560 * buffer size too big consumes too much real kernel memory, 20561 * especially with many connections on busy servers. 20562 * 20563 * The criteria to step up the send buffer one notch are: 20564 * 1. receive window of remote host is larger than send buffer 20565 * (with a fudge factor of 5/4th); 20566 * 2. send buffer is filled to 7/8th with data (so we actually 20567 * have data to make use of it); 20568 * 3. send buffer fill has not hit maximal automatic size; 20569 * 4. our send window (slow start and cogestion controlled) is 20570 * larger than sent but unacknowledged data in send buffer. 20571 * 20572 * Note that the rack version moves things much faster since 20573 * we want to avoid hitting cache lines in the rack_fast_output() 20574 * path so this is called much less often and thus moves 20575 * the SB forward by a percentage. 20576 */ 20577 struct socket *so; 20578 struct tcpcb *tp; 20579 uint32_t sendwin, scaleup; 20580 20581 tp = rack->rc_tp; 20582 so = rack->rc_inp->inp_socket; 20583 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 20584 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 20585 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 20586 sbused(&so->so_snd) >= 20587 (so->so_snd.sb_hiwat / 8 * 7) && 20588 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 20589 sendwin >= (sbused(&so->so_snd) - 20590 (tp->snd_max - tp->snd_una))) { 20591 if (rack_autosndbuf_inc) 20592 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 20593 else 20594 scaleup = V_tcp_autosndbuf_inc; 20595 if (scaleup < V_tcp_autosndbuf_inc) 20596 scaleup = V_tcp_autosndbuf_inc; 20597 scaleup += so->so_snd.sb_hiwat; 20598 if (scaleup > V_tcp_autosndbuf_max) 20599 scaleup = V_tcp_autosndbuf_max; 20600 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 20601 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 20602 } 20603 } 20604 } 20605 20606 static int 20607 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 20608 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 20609 { 20610 /* 20611 * Enter to do fast output. We are given that the sched_pin is 20612 * in place (if accounting is compiled in) and the cycle count taken 20613 * at entry is in place in ts_val. The idea here is that 20614 * we know how many more bytes needs to be sent (presumably either 20615 * during pacing or to fill the cwnd and that was greater than 20616 * the max-burst). We have how much to send and all the info we 20617 * need to just send. 20618 */ 20619 #ifdef INET 20620 struct ip *ip = NULL; 20621 #endif 20622 struct udphdr *udp = NULL; 20623 struct tcphdr *th = NULL; 20624 struct mbuf *m, *s_mb; 20625 struct inpcb *inp; 20626 uint8_t *cpto; 20627 struct tcp_log_buffer *lgb; 20628 #ifdef TCP_ACCOUNTING 20629 uint64_t crtsc; 20630 #endif 20631 struct tcpopt to; 20632 u_char opt[TCP_MAXOLEN]; 20633 uint32_t hdrlen, optlen; 20634 #ifdef TCP_ACCOUNTING 20635 int cnt_thru = 1; 20636 #endif 20637 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 20638 uint16_t flags; 20639 uint32_t s_soff; 20640 uint32_t if_hw_tsomaxsegcount = 0, startseq; 20641 uint32_t if_hw_tsomaxsegsize; 20642 uint32_t add_flag = RACK_SENT_FP; 20643 #ifdef INET6 20644 struct ip6_hdr *ip6 = NULL; 20645 20646 if (rack->r_is_v6) { 20647 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20648 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 20649 } else 20650 #endif /* INET6 */ 20651 { 20652 #ifdef INET 20653 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20654 hdrlen = sizeof(struct tcpiphdr); 20655 #endif 20656 } 20657 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 20658 m = NULL; 20659 goto failed; 20660 } 20661 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20662 startseq = tp->snd_max; 20663 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20664 inp = rack->rc_inp; 20665 len = rack->r_ctl.fsb.left_to_send; 20666 to.to_flags = 0; 20667 flags = rack->r_ctl.fsb.tcp_flags; 20668 if (tp->t_flags & TF_RCVD_TSTMP) { 20669 to.to_tsval = ms_cts + tp->ts_offset; 20670 to.to_tsecr = tp->ts_recent; 20671 to.to_flags = TOF_TS; 20672 } 20673 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20674 /* TCP-MD5 (RFC2385). */ 20675 if (tp->t_flags & TF_SIGNATURE) 20676 to.to_flags |= TOF_SIGNATURE; 20677 #endif 20678 optlen = tcp_addoptions(&to, opt); 20679 hdrlen += optlen; 20680 udp = rack->r_ctl.fsb.udp; 20681 if (udp) 20682 hdrlen += sizeof(struct udphdr); 20683 if (rack->r_ctl.rc_pace_max_segs) 20684 max_val = rack->r_ctl.rc_pace_max_segs; 20685 else if (rack->rc_user_set_max_segs) 20686 max_val = rack->rc_user_set_max_segs * segsiz; 20687 else 20688 max_val = len; 20689 if ((tp->t_flags & TF_TSO) && 20690 V_tcp_do_tso && 20691 (len > segsiz) && 20692 (tp->t_port == 0)) 20693 tso = 1; 20694 again: 20695 #ifdef INET6 20696 if (MHLEN < hdrlen + max_linkhdr) 20697 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 20698 else 20699 #endif 20700 m = m_gethdr(M_NOWAIT, MT_DATA); 20701 if (m == NULL) 20702 goto failed; 20703 m->m_data += max_linkhdr; 20704 m->m_len = hdrlen; 20705 th = rack->r_ctl.fsb.th; 20706 /* Establish the len to send */ 20707 if (len > max_val) 20708 len = max_val; 20709 if ((tso) && (len + optlen > segsiz)) { 20710 uint32_t if_hw_tsomax; 20711 int32_t max_len; 20712 20713 /* extract TSO information */ 20714 if_hw_tsomax = tp->t_tsomax; 20715 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 20716 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 20717 /* 20718 * Check if we should limit by maximum payload 20719 * length: 20720 */ 20721 if (if_hw_tsomax != 0) { 20722 /* compute maximum TSO length */ 20723 max_len = (if_hw_tsomax - hdrlen - 20724 max_linkhdr); 20725 if (max_len <= 0) { 20726 goto failed; 20727 } else if (len > max_len) { 20728 len = max_len; 20729 } 20730 } 20731 if (len <= segsiz) { 20732 /* 20733 * In case there are too many small fragments don't 20734 * use TSO: 20735 */ 20736 tso = 0; 20737 } 20738 } else { 20739 tso = 0; 20740 } 20741 if ((tso == 0) && (len > segsiz)) 20742 len = segsiz; 20743 (void)tcp_get_usecs(tv); 20744 if ((len == 0) || 20745 (len <= MHLEN - hdrlen - max_linkhdr)) { 20746 goto failed; 20747 } 20748 sb_offset = tp->snd_max - tp->snd_una; 20749 th->th_seq = htonl(tp->snd_max); 20750 th->th_ack = htonl(tp->rcv_nxt); 20751 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 20752 if (th->th_win == 0) { 20753 tp->t_sndzerowin++; 20754 tp->t_flags |= TF_RXWIN0SENT; 20755 } else 20756 tp->t_flags &= ~TF_RXWIN0SENT; 20757 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 20758 KMOD_TCPSTAT_INC(tcps_sndpack); 20759 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 20760 #ifdef STATS 20761 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 20762 len); 20763 #endif 20764 if (rack->r_ctl.fsb.m == NULL) 20765 goto failed; 20766 20767 /* s_mb and s_soff are saved for rack_log_output */ 20768 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 20769 &s_mb, &s_soff); 20770 if (len <= segsiz) { 20771 /* 20772 * Must have ran out of mbufs for the copy 20773 * shorten it to no longer need tso. Lets 20774 * not put on sendalot since we are low on 20775 * mbufs. 20776 */ 20777 tso = 0; 20778 } 20779 if (rack->r_ctl.fsb.rfo_apply_push && 20780 (len == rack->r_ctl.fsb.left_to_send)) { 20781 tcp_set_flags(th, flags | TH_PUSH); 20782 add_flag |= RACK_HAD_PUSH; 20783 } 20784 if ((m->m_next == NULL) || (len <= 0)){ 20785 goto failed; 20786 } 20787 if (udp) { 20788 if (rack->r_is_v6) 20789 ulen = hdrlen + len - sizeof(struct ip6_hdr); 20790 else 20791 ulen = hdrlen + len - sizeof(struct ip); 20792 udp->uh_ulen = htons(ulen); 20793 } 20794 m->m_pkthdr.rcvif = (struct ifnet *)0; 20795 if (TCPS_HAVERCVDSYN(tp->t_state) && 20796 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 20797 int ect = tcp_ecn_output_established(tp, &flags, len, false); 20798 if ((tp->t_state == TCPS_SYN_RECEIVED) && 20799 (tp->t_flags2 & TF2_ECN_SND_ECE)) 20800 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 20801 #ifdef INET6 20802 if (rack->r_is_v6) { 20803 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 20804 ip6->ip6_flow |= htonl(ect << 20); 20805 } 20806 else 20807 #endif 20808 { 20809 #ifdef INET 20810 ip->ip_tos &= ~IPTOS_ECN_MASK; 20811 ip->ip_tos |= ect; 20812 #endif 20813 } 20814 } 20815 tcp_set_flags(th, flags); 20816 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 20817 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 20818 if (to.to_flags & TOF_SIGNATURE) { 20819 /* 20820 * Calculate MD5 signature and put it into the place 20821 * determined before. 20822 * NOTE: since TCP options buffer doesn't point into 20823 * mbuf's data, calculate offset and use it. 20824 */ 20825 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 20826 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 20827 /* 20828 * Do not send segment if the calculation of MD5 20829 * digest has failed. 20830 */ 20831 goto failed; 20832 } 20833 } 20834 #endif 20835 #ifdef INET6 20836 if (rack->r_is_v6) { 20837 if (tp->t_port) { 20838 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 20839 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20840 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 20841 th->th_sum = htons(0); 20842 UDPSTAT_INC(udps_opackets); 20843 } else { 20844 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 20845 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20846 th->th_sum = in6_cksum_pseudo(ip6, 20847 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 20848 0); 20849 } 20850 } 20851 #endif 20852 #if defined(INET6) && defined(INET) 20853 else 20854 #endif 20855 #ifdef INET 20856 { 20857 if (tp->t_port) { 20858 m->m_pkthdr.csum_flags = CSUM_UDP; 20859 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 20860 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 20861 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 20862 th->th_sum = htons(0); 20863 UDPSTAT_INC(udps_opackets); 20864 } else { 20865 m->m_pkthdr.csum_flags = CSUM_TCP; 20866 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 20867 th->th_sum = in_pseudo(ip->ip_src.s_addr, 20868 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 20869 IPPROTO_TCP + len + optlen)); 20870 } 20871 /* IP version must be set here for ipv4/ipv6 checking later */ 20872 KASSERT(ip->ip_v == IPVERSION, 20873 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 20874 } 20875 #endif 20876 if (tso) { 20877 /* 20878 * Here we use segsiz since we have no added options besides 20879 * any standard timestamp options (no DSACKs or SACKS are sent 20880 * via either fast-path). 20881 */ 20882 KASSERT(len > segsiz, 20883 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 20884 m->m_pkthdr.csum_flags |= CSUM_TSO; 20885 m->m_pkthdr.tso_segsz = segsiz; 20886 } 20887 #ifdef INET6 20888 if (rack->r_is_v6) { 20889 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 20890 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 20891 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 20892 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20893 else 20894 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20895 } 20896 #endif 20897 #if defined(INET) && defined(INET6) 20898 else 20899 #endif 20900 #ifdef INET 20901 { 20902 ip->ip_len = htons(m->m_pkthdr.len); 20903 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 20904 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 20905 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 20906 if (tp->t_port == 0 || len < V_tcp_minmss) { 20907 ip->ip_off |= htons(IP_DF); 20908 } 20909 } else { 20910 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 20911 } 20912 } 20913 #endif 20914 if (tp->snd_cwnd > tp->snd_ssthresh) { 20915 /* Set we sent in CA */ 20916 rack->rc_gp_saw_ca = 1; 20917 } else { 20918 /* Set we sent in SS */ 20919 rack->rc_gp_saw_ss = 1; 20920 } 20921 /* Time to copy in our header */ 20922 cpto = mtod(m, uint8_t *); 20923 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 20924 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 20925 if (optlen) { 20926 bcopy(opt, th + 1, optlen); 20927 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 20928 } else { 20929 th->th_off = sizeof(struct tcphdr) >> 2; 20930 } 20931 if ((rack->r_ctl.crte != NULL) && 20932 tcp_bblogging_on(tp)) { 20933 rack_log_queue_level(tp, rack, len, tv, cts); 20934 } 20935 if (tcp_bblogging_on(rack->rc_tp)) { 20936 union tcp_log_stackspecific log; 20937 20938 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 20939 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 20940 if (rack->rack_no_prr) 20941 log.u_bbr.flex1 = 0; 20942 else 20943 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 20944 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 20945 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 20946 log.u_bbr.flex4 = max_val; 20947 /* Save off the early/late values */ 20948 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 20949 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 20950 log.u_bbr.bw_inuse = rack_get_bw(rack); 20951 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 20952 log.u_bbr.flex8 = 0; 20953 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 20954 log.u_bbr.flex7 = 44; 20955 log.u_bbr.pkts_out = tp->t_maxseg; 20956 log.u_bbr.timeStamp = cts; 20957 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20958 log.u_bbr.flex5 = log.u_bbr.inflight; 20959 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 20960 log.u_bbr.delivered = 0; 20961 log.u_bbr.rttProp = 0; 20962 log.u_bbr.delRate = rack->r_must_retran; 20963 log.u_bbr.delRate <<= 1; 20964 log.u_bbr.pkt_epoch = __LINE__; 20965 /* For fast output no retrans so just inflight and how many mss we send */ 20966 log.u_bbr.flex5 = log.u_bbr.inflight; 20967 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 20968 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 20969 len, &log, false, NULL, __func__, __LINE__, tv); 20970 } else 20971 lgb = NULL; 20972 #ifdef INET6 20973 if (rack->r_is_v6) { 20974 error = ip6_output(m, inp->in6p_outputopts, 20975 &inp->inp_route6, 20976 0, NULL, NULL, inp); 20977 } 20978 #endif 20979 #if defined(INET) && defined(INET6) 20980 else 20981 #endif 20982 #ifdef INET 20983 { 20984 error = ip_output(m, NULL, 20985 &inp->inp_route, 20986 0, 0, inp); 20987 } 20988 #endif 20989 if (lgb) { 20990 lgb->tlb_errno = error; 20991 lgb = NULL; 20992 } 20993 if (error) { 20994 *send_err = error; 20995 m = NULL; 20996 goto failed; 20997 } else if (rack->rc_hw_nobuf) { 20998 rack->rc_hw_nobuf = 0; 20999 rack->r_ctl.rc_agg_delayed = 0; 21000 rack->r_early = 0; 21001 rack->r_late = 0; 21002 rack->r_ctl.rc_agg_early = 0; 21003 } 21004 if ((error == 0) && (rack->lt_bw_up == 0)) { 21005 /* Unlikely */ 21006 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 21007 rack->r_ctl.lt_seq = tp->snd_una; 21008 rack->lt_bw_up = 1; 21009 } else if ((error == 0) && 21010 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 21011 /* 21012 * Need to record what we have since we are 21013 * approaching seq wrap. 21014 */ 21015 struct timeval tv; 21016 uint64_t tmark; 21017 21018 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 21019 rack->r_ctl.lt_seq = tp->snd_una; 21020 tmark = tcp_get_u64_usecs(&tv); 21021 if (tmark > rack->r_ctl.lt_timemark) { 21022 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 21023 rack->r_ctl.lt_timemark = tmark; 21024 } 21025 } 21026 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 21027 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 21028 m = NULL; 21029 if (tp->snd_una == tp->snd_max) { 21030 rack->r_ctl.rc_tlp_rxt_last_time = cts; 21031 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 21032 tp->t_acktime = ticks; 21033 } 21034 counter_u64_add(rack_total_bytes, len); 21035 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 21036 21037 rack->forced_ack = 0; /* If we send something zap the FA flag */ 21038 tot_len += len; 21039 if ((tp->t_flags & TF_GPUTINPROG) == 0) 21040 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 21041 tp->snd_max += len; 21042 tp->snd_nxt = tp->snd_max; 21043 if (rack->rc_new_rnd_needed) { 21044 rack_new_round_starts(tp, rack, tp->snd_max); 21045 } 21046 { 21047 int idx; 21048 21049 idx = (len / segsiz) + 3; 21050 if (idx >= TCP_MSS_ACCT_ATIMER) 21051 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 21052 else 21053 counter_u64_add(rack_out_size[idx], 1); 21054 } 21055 if (len <= rack->r_ctl.fsb.left_to_send) 21056 rack->r_ctl.fsb.left_to_send -= len; 21057 else 21058 rack->r_ctl.fsb.left_to_send = 0; 21059 if (rack->r_ctl.fsb.left_to_send < segsiz) { 21060 rack->r_fast_output = 0; 21061 rack->r_ctl.fsb.left_to_send = 0; 21062 /* At the end of fast_output scale up the sb */ 21063 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 21064 rack_sndbuf_autoscale(rack); 21065 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 21066 } 21067 if (tp->t_rtttime == 0) { 21068 tp->t_rtttime = ticks; 21069 tp->t_rtseq = startseq; 21070 KMOD_TCPSTAT_INC(tcps_segstimed); 21071 } 21072 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 21073 (max_val > len) && 21074 (tso == 0)) { 21075 max_val -= len; 21076 len = segsiz; 21077 th = rack->r_ctl.fsb.th; 21078 #ifdef TCP_ACCOUNTING 21079 cnt_thru++; 21080 #endif 21081 goto again; 21082 } 21083 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 21084 counter_u64_add(rack_fto_send, 1); 21085 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz, __LINE__); 21086 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 21087 #ifdef TCP_ACCOUNTING 21088 crtsc = get_cyclecount(); 21089 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21090 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 21091 } 21092 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21093 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21094 } 21095 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21096 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 21097 } 21098 sched_unpin(); 21099 #endif 21100 return (0); 21101 failed: 21102 if (m) 21103 m_free(m); 21104 rack->r_fast_output = 0; 21105 return (-1); 21106 } 21107 21108 static inline void 21109 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 21110 struct sockbuf *sb, 21111 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 21112 bool hw_tls, 21113 uint16_t flags) 21114 { 21115 rack->r_fast_output = 1; 21116 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 21117 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 21118 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 21119 rack->r_ctl.fsb.tcp_flags = flags; 21120 rack->r_ctl.fsb.left_to_send = orig_len - len; 21121 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 21122 /* Less than a full sized pace, lets not */ 21123 rack->r_fast_output = 0; 21124 return; 21125 } else { 21126 /* Round down to the nearest pace_max_seg */ 21127 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 21128 } 21129 if (hw_tls) 21130 rack->r_ctl.fsb.hw_tls = 1; 21131 else 21132 rack->r_ctl.fsb.hw_tls = 0; 21133 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 21134 ("rack:%p left_to_send:%u sbavail:%u out:%u", 21135 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 21136 (tp->snd_max - tp->snd_una))); 21137 if (rack->r_ctl.fsb.left_to_send < segsiz) 21138 rack->r_fast_output = 0; 21139 else { 21140 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 21141 rack->r_ctl.fsb.rfo_apply_push = 1; 21142 else 21143 rack->r_ctl.fsb.rfo_apply_push = 0; 21144 } 21145 } 21146 21147 static uint32_t 21148 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 21149 { 21150 uint64_t min_time; 21151 uint32_t maxlen; 21152 21153 min_time = (uint64_t)get_hpts_min_sleep_time(); 21154 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 21155 maxlen = roundup(maxlen, segsiz); 21156 return (maxlen); 21157 } 21158 21159 static struct rack_sendmap * 21160 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 21161 { 21162 struct rack_sendmap *rsm = NULL; 21163 int thresh; 21164 21165 restart: 21166 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 21167 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 21168 /* Nothing, strange turn off validity */ 21169 rack->r_collapse_point_valid = 0; 21170 return (NULL); 21171 } 21172 /* Can we send it yet? */ 21173 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 21174 /* 21175 * Receiver window has not grown enough for 21176 * the segment to be put on the wire. 21177 */ 21178 return (NULL); 21179 } 21180 if (rsm->r_flags & RACK_ACKED) { 21181 /* 21182 * It has been sacked, lets move to the 21183 * next one if possible. 21184 */ 21185 rack->r_ctl.last_collapse_point = rsm->r_end; 21186 /* Are we done? */ 21187 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 21188 rack->r_ctl.high_collapse_point)) { 21189 rack->r_collapse_point_valid = 0; 21190 return (NULL); 21191 } 21192 goto restart; 21193 } 21194 /* Now has it been long enough ? */ 21195 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 21196 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 21197 rack_log_collapse(rack, rsm->r_start, 21198 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 21199 thresh, __LINE__, 6, rsm->r_flags, rsm); 21200 return (rsm); 21201 } 21202 /* Not enough time */ 21203 rack_log_collapse(rack, rsm->r_start, 21204 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 21205 thresh, __LINE__, 7, rsm->r_flags, rsm); 21206 return (NULL); 21207 } 21208 21209 static void 21210 rack_credit_back_policer_idle_time(struct tcp_rack *rack, uint64_t idle_t, int line) 21211 { 21212 /* 21213 * We were idle some time (idle_t) and so our policer bucket 21214 * needs to grow. It can go no higher than policer_bucket_size. 21215 */ 21216 uint64_t len; 21217 21218 len = idle_t * rack->r_ctl.policer_bw; 21219 len /= HPTS_USEC_IN_SEC; 21220 rack->r_ctl.current_policer_bucket += (uint32_t)len; 21221 if (rack->r_ctl.policer_bucket_size < rack->r_ctl.current_policer_bucket) { 21222 rack->r_ctl.current_policer_bucket = rack->r_ctl.policer_bucket_size; 21223 } 21224 if (rack_verbose_logging > 0) 21225 policer_detection_log(rack, (uint32_t)len, line, (uint32_t)idle_t, 0, 7); 21226 } 21227 21228 static inline void 21229 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 21230 { 21231 if ((rack->full_size_rxt == 0) && 21232 (rack->shape_rxt_to_pacing_min == 0) && 21233 (*len >= segsiz)) { 21234 *len = segsiz; 21235 } else if (rack->shape_rxt_to_pacing_min && 21236 rack->gp_ready) { 21237 /* We use pacing min as shaping len req */ 21238 uint32_t maxlen; 21239 21240 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 21241 if (*len > maxlen) 21242 *len = maxlen; 21243 } else { 21244 /* 21245 * The else is full_size_rxt is on so send it all 21246 * note we do need to check this for exceeding 21247 * our max segment size due to the fact that 21248 * we do sometimes merge chunks together i.e. 21249 * we cannot just assume that we will never have 21250 * a chunk greater than pace_max_seg 21251 */ 21252 if (*len > pace_max_seg) 21253 *len = pace_max_seg; 21254 } 21255 } 21256 21257 static int 21258 rack_output(struct tcpcb *tp) 21259 { 21260 struct socket *so; 21261 uint32_t recwin; 21262 uint32_t sb_offset, s_moff = 0; 21263 int32_t len, error = 0; 21264 uint16_t flags; 21265 struct mbuf *m, *s_mb = NULL; 21266 struct mbuf *mb; 21267 uint32_t if_hw_tsomaxsegcount = 0; 21268 uint32_t if_hw_tsomaxsegsize; 21269 int32_t segsiz, minseg; 21270 long tot_len_this_send = 0; 21271 #ifdef INET 21272 struct ip *ip = NULL; 21273 #endif 21274 struct udphdr *udp = NULL; 21275 struct tcp_rack *rack; 21276 struct tcphdr *th; 21277 uint8_t pass = 0; 21278 uint8_t mark = 0; 21279 uint8_t check_done = 0; 21280 uint8_t wanted_cookie = 0; 21281 u_char opt[TCP_MAXOLEN]; 21282 unsigned ipoptlen, optlen, hdrlen, ulen=0; 21283 uint32_t rack_seq; 21284 21285 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21286 unsigned ipsec_optlen = 0; 21287 21288 #endif 21289 int32_t idle, sendalot; 21290 uint32_t tot_idle; 21291 int32_t sub_from_prr = 0; 21292 volatile int32_t sack_rxmit; 21293 struct rack_sendmap *rsm = NULL; 21294 int32_t tso, mtu; 21295 struct tcpopt to; 21296 int32_t slot = 0; 21297 int32_t sup_rack = 0; 21298 uint32_t cts, ms_cts, delayed, early; 21299 uint32_t add_flag = RACK_SENT_SP; 21300 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 21301 uint8_t doing_tlp = 0; 21302 uint32_t cwnd_to_use, pace_max_seg; 21303 int32_t do_a_prefetch = 0; 21304 int32_t prefetch_rsm = 0; 21305 int32_t orig_len = 0; 21306 struct timeval tv; 21307 int32_t prefetch_so_done = 0; 21308 struct tcp_log_buffer *lgb; 21309 struct inpcb *inp = tptoinpcb(tp); 21310 struct sockbuf *sb; 21311 uint64_t ts_val = 0; 21312 #ifdef TCP_ACCOUNTING 21313 uint64_t crtsc; 21314 #endif 21315 #ifdef INET6 21316 struct ip6_hdr *ip6 = NULL; 21317 int32_t isipv6; 21318 #endif 21319 bool hpts_calling, hw_tls = false; 21320 21321 NET_EPOCH_ASSERT(); 21322 INP_WLOCK_ASSERT(inp); 21323 21324 /* setup and take the cache hits here */ 21325 rack = (struct tcp_rack *)tp->t_fb_ptr; 21326 #ifdef TCP_ACCOUNTING 21327 sched_pin(); 21328 ts_val = get_cyclecount(); 21329 #endif 21330 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 21331 tp->t_flags2 &= ~TF2_HPTS_CALLS; 21332 #ifdef TCP_OFFLOAD 21333 if (tp->t_flags & TF_TOE) { 21334 #ifdef TCP_ACCOUNTING 21335 sched_unpin(); 21336 #endif 21337 return (tcp_offload_output(tp)); 21338 } 21339 #endif 21340 if (rack->rack_deferred_inited == 0) { 21341 /* 21342 * If we are the connecting socket we will 21343 * hit rack_init() when no sequence numbers 21344 * are setup. This makes it so we must defer 21345 * some initialization. Call that now. 21346 */ 21347 rack_deferred_init(tp, rack); 21348 } 21349 /* 21350 * For TFO connections in SYN_RECEIVED, only allow the initial 21351 * SYN|ACK and those sent by the retransmit timer. 21352 */ 21353 if ((tp->t_flags & TF_FASTOPEN) && 21354 (tp->t_state == TCPS_SYN_RECEIVED) && 21355 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 21356 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 21357 #ifdef TCP_ACCOUNTING 21358 sched_unpin(); 21359 #endif 21360 return (0); 21361 } 21362 #ifdef INET6 21363 if (rack->r_state) { 21364 /* Use the cache line loaded if possible */ 21365 isipv6 = rack->r_is_v6; 21366 } else { 21367 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 21368 } 21369 #endif 21370 early = 0; 21371 cts = tcp_get_usecs(&tv); 21372 ms_cts = tcp_tv_to_mssectick(&tv); 21373 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 21374 tcp_in_hpts(rack->rc_tp)) { 21375 /* 21376 * We are on the hpts for some timer but not hptsi output. 21377 * Remove from the hpts unconditionally. 21378 */ 21379 rack_timer_cancel(tp, rack, cts, __LINE__); 21380 } 21381 /* Are we pacing and late? */ 21382 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 21383 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 21384 /* We are delayed */ 21385 delayed = cts - rack->r_ctl.rc_last_output_to; 21386 } else { 21387 delayed = 0; 21388 } 21389 /* Do the timers, which may override the pacer */ 21390 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 21391 int retval; 21392 21393 retval = rack_process_timers(tp, rack, cts, hpts_calling, 21394 &doing_tlp); 21395 if (retval != 0) { 21396 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 21397 #ifdef TCP_ACCOUNTING 21398 sched_unpin(); 21399 #endif 21400 /* 21401 * If timers want tcp_drop(), then pass error out, 21402 * otherwise suppress it. 21403 */ 21404 return (retval < 0 ? retval : 0); 21405 } 21406 } 21407 if (rack->rc_in_persist) { 21408 if (tcp_in_hpts(rack->rc_tp) == 0) { 21409 /* Timer is not running */ 21410 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 21411 } 21412 #ifdef TCP_ACCOUNTING 21413 sched_unpin(); 21414 #endif 21415 return (0); 21416 } 21417 if ((rack->rc_ack_required == 1) && 21418 (rack->r_timer_override == 0)){ 21419 /* A timeout occurred and no ack has arrived */ 21420 if (tcp_in_hpts(rack->rc_tp) == 0) { 21421 /* Timer is not running */ 21422 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 21423 } 21424 #ifdef TCP_ACCOUNTING 21425 sched_unpin(); 21426 #endif 21427 return (0); 21428 } 21429 if ((rack->r_timer_override) || 21430 (rack->rc_ack_can_sendout_data) || 21431 (delayed) || 21432 (tp->t_state < TCPS_ESTABLISHED)) { 21433 rack->rc_ack_can_sendout_data = 0; 21434 if (tcp_in_hpts(rack->rc_tp)) 21435 tcp_hpts_remove(rack->rc_tp); 21436 } else if (tcp_in_hpts(rack->rc_tp)) { 21437 /* 21438 * On the hpts you can't pass even if ACKNOW is on, we will 21439 * when the hpts fires. 21440 */ 21441 #ifdef TCP_ACCOUNTING 21442 crtsc = get_cyclecount(); 21443 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21444 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 21445 } 21446 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21447 tp->tcp_cnt_counters[SND_BLOCKED]++; 21448 } 21449 sched_unpin(); 21450 #endif 21451 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 21452 return (0); 21453 } 21454 /* Finish out both pacing early and late accounting */ 21455 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 21456 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 21457 early = rack->r_ctl.rc_last_output_to - cts; 21458 } else 21459 early = 0; 21460 if (delayed && (rack->rc_always_pace == 1)) { 21461 rack->r_ctl.rc_agg_delayed += delayed; 21462 rack->r_late = 1; 21463 } else if (early && (rack->rc_always_pace == 1)) { 21464 rack->r_ctl.rc_agg_early += early; 21465 rack->r_early = 1; 21466 } else if (rack->rc_always_pace == 0) { 21467 /* Non-paced we are not late */ 21468 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 21469 rack->r_early = rack->r_late = 0; 21470 } 21471 /* Now that early/late accounting is done turn off the flag */ 21472 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 21473 rack->r_wanted_output = 0; 21474 rack->r_timer_override = 0; 21475 if ((tp->t_state != rack->r_state) && 21476 TCPS_HAVEESTABLISHED(tp->t_state)) { 21477 rack_set_state(tp, rack); 21478 } 21479 if ((rack->r_fast_output) && 21480 (doing_tlp == 0) && 21481 (tp->rcv_numsacks == 0)) { 21482 int ret; 21483 21484 error = 0; 21485 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 21486 if (ret >= 0) 21487 return(ret); 21488 else if (error) { 21489 inp = rack->rc_inp; 21490 so = inp->inp_socket; 21491 sb = &so->so_snd; 21492 goto nomore; 21493 } 21494 } 21495 inp = rack->rc_inp; 21496 /* 21497 * For TFO connections in SYN_SENT or SYN_RECEIVED, 21498 * only allow the initial SYN or SYN|ACK and those sent 21499 * by the retransmit timer. 21500 */ 21501 if ((tp->t_flags & TF_FASTOPEN) && 21502 ((tp->t_state == TCPS_SYN_RECEIVED) || 21503 (tp->t_state == TCPS_SYN_SENT)) && 21504 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 21505 (tp->t_rxtshift == 0)) { /* not a retransmit */ 21506 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 21507 so = inp->inp_socket; 21508 sb = &so->so_snd; 21509 goto just_return_nolock; 21510 } 21511 /* 21512 * Determine length of data that should be transmitted, and flags 21513 * that will be used. If there is some data or critical controls 21514 * (SYN, RST) to send, then transmit; otherwise, investigate 21515 * further. 21516 */ 21517 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 21518 if (tp->t_idle_reduce) { 21519 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 21520 rack_cc_after_idle(rack, tp); 21521 } 21522 tp->t_flags &= ~TF_LASTIDLE; 21523 if (idle) { 21524 if (tp->t_flags & TF_MORETOCOME) { 21525 tp->t_flags |= TF_LASTIDLE; 21526 idle = 0; 21527 } 21528 } 21529 if ((tp->snd_una == tp->snd_max) && 21530 rack->r_ctl.rc_went_idle_time && 21531 (cts > rack->r_ctl.rc_went_idle_time)) { 21532 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 21533 if (tot_idle > rack_min_probertt_hold) { 21534 /* Count as a probe rtt */ 21535 if (rack->in_probe_rtt == 0) { 21536 rack->r_ctl.rc_lower_rtt_us_cts = cts; 21537 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 21538 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 21539 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 21540 } else { 21541 rack_exit_probertt(rack, cts); 21542 } 21543 } 21544 } 21545 if(rack->policer_detect_on) { 21546 /* 21547 * If we are doing policer detetion we at a minium 21548 * record the time but if possible add back to 21549 * the bucket based on the idle time. 21550 */ 21551 uint64_t idle_t, u64_cts; 21552 21553 segsiz = min(ctf_fixed_maxseg(tp), 21554 rack->r_ctl.rc_pace_min_segs); 21555 u64_cts = tcp_tv_to_lusectick(&tv); 21556 if ((rack->rc_policer_detected == 1) && 21557 (rack->r_ctl.policer_bucket_size > segsiz) && 21558 (rack->r_ctl.policer_bw > 0) && 21559 (u64_cts > rack->r_ctl.last_sendtime)) { 21560 /* We are being policed add back the time */ 21561 idle_t = u64_cts - rack->r_ctl.last_sendtime; 21562 rack_credit_back_policer_idle_time(rack, idle_t, __LINE__); 21563 } 21564 rack->r_ctl.last_sendtime = u64_cts; 21565 } 21566 if (rack_use_fsb && 21567 (rack->r_ctl.fsb.tcp_ip_hdr) && 21568 (rack->r_fsb_inited == 0) && 21569 (rack->r_state != TCPS_CLOSED)) 21570 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 21571 if (rack->rc_sendvars_notset == 1) { 21572 rack->r_ctl.idle_snd_una = tp->snd_una; 21573 rack->rc_sendvars_notset = 0; 21574 /* 21575 * Make sure any TCP timers (keep-alive) is not running. 21576 */ 21577 tcp_timer_stop(tp); 21578 } 21579 if ((rack->rack_no_prr == 1) && 21580 (rack->rc_always_pace == 0)) { 21581 /* 21582 * Sanity check before sending, if we have 21583 * no-pacing enabled and prr is turned off that 21584 * is a logistics error. Correct this by turnning 21585 * prr back on. A user *must* set some form of 21586 * pacing in order to turn PRR off. We do this 21587 * in the output path so that we can avoid socket 21588 * option ordering issues that would occur if we 21589 * tried to do it while setting rack_no_prr on. 21590 */ 21591 rack->rack_no_prr = 0; 21592 } 21593 if ((rack->pcm_enabled == 1) && 21594 (rack->pcm_needed == 0) && 21595 (tot_idle > 0)) { 21596 /* 21597 * We have been idle some micro seconds. We need 21598 * to factor this in to see if a PCM is needed. 21599 */ 21600 uint32_t rtts_idle, rnds; 21601 21602 if (tp->t_srtt) 21603 rtts_idle = tot_idle / tp->t_srtt; 21604 else 21605 rtts_idle = 0; 21606 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 21607 rack->r_ctl.pcm_idle_rounds += rtts_idle; 21608 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 21609 rack->pcm_needed = 1; 21610 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 21611 } 21612 } 21613 again: 21614 sendalot = 0; 21615 cts = tcp_get_usecs(&tv); 21616 ms_cts = tcp_tv_to_mssectick(&tv); 21617 tso = 0; 21618 mtu = 0; 21619 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 21620 minseg = segsiz; 21621 if (rack->r_ctl.rc_pace_max_segs == 0) 21622 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 21623 else 21624 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 21625 if (TCPS_HAVEESTABLISHED(tp->t_state) && 21626 (rack->r_ctl.pcm_max_seg == 0)) { 21627 /* 21628 * We set in our first send so we know that the ctf_fixed_maxseg 21629 * has been fully set. If we do it in rack_init() we most likely 21630 * see 512 bytes so we end up at 5120, not desirable. 21631 */ 21632 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 21633 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 21634 /* 21635 * Assure our initial PCM probe is at least 10 MSS. 21636 */ 21637 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 21638 } 21639 } 21640 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 21641 uint32_t rw_avail, cwa; 21642 21643 if (tp->snd_wnd > ctf_outstanding(tp)) 21644 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 21645 else 21646 rw_avail = 0; 21647 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 21648 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 21649 else 21650 cwa = 0; 21651 if ((cwa >= rack->r_ctl.pcm_max_seg) && 21652 (rw_avail > rack->r_ctl.pcm_max_seg)) { 21653 /* Raise up the max seg for this trip through */ 21654 pace_max_seg = rack->r_ctl.pcm_max_seg; 21655 /* Disable any fast output */ 21656 rack->r_fast_output = 0; 21657 } 21658 if (rack_verbose_logging) { 21659 rack_log_pcm(rack, 4, 21660 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 21661 } 21662 } 21663 sb_offset = tp->snd_max - tp->snd_una; 21664 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 21665 flags = tcp_outflags[tp->t_state]; 21666 while (rack->rc_free_cnt < rack_free_cache) { 21667 rsm = rack_alloc(rack); 21668 if (rsm == NULL) { 21669 if (hpts_calling) 21670 /* Retry in a ms */ 21671 slot = (1 * HPTS_USEC_IN_MSEC); 21672 so = inp->inp_socket; 21673 sb = &so->so_snd; 21674 goto just_return_nolock; 21675 } 21676 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 21677 rack->rc_free_cnt++; 21678 rsm = NULL; 21679 } 21680 sack_rxmit = 0; 21681 len = 0; 21682 rsm = NULL; 21683 if (flags & TH_RST) { 21684 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 21685 so = inp->inp_socket; 21686 sb = &so->so_snd; 21687 goto send; 21688 } 21689 if (rack->r_ctl.rc_resend) { 21690 /* Retransmit timer */ 21691 rsm = rack->r_ctl.rc_resend; 21692 rack->r_ctl.rc_resend = NULL; 21693 len = rsm->r_end - rsm->r_start; 21694 sack_rxmit = 1; 21695 sendalot = 0; 21696 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21697 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21698 __func__, __LINE__, 21699 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21700 sb_offset = rsm->r_start - tp->snd_una; 21701 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21702 } else if (rack->r_collapse_point_valid && 21703 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 21704 /* 21705 * If an RSM is returned then enough time has passed 21706 * for us to retransmit it. Move up the collapse point, 21707 * since this rsm has its chance to retransmit now. 21708 */ 21709 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 21710 rack->r_ctl.last_collapse_point = rsm->r_end; 21711 /* Are we done? */ 21712 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 21713 rack->r_ctl.high_collapse_point)) 21714 rack->r_collapse_point_valid = 0; 21715 sack_rxmit = 1; 21716 /* We are not doing a TLP */ 21717 doing_tlp = 0; 21718 len = rsm->r_end - rsm->r_start; 21719 sb_offset = rsm->r_start - tp->snd_una; 21720 sendalot = 0; 21721 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21722 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 21723 /* We have a retransmit that takes precedence */ 21724 if ((!IN_FASTRECOVERY(tp->t_flags)) && 21725 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 21726 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 21727 /* Enter recovery if not induced by a time-out */ 21728 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 21729 } 21730 #ifdef INVARIANTS 21731 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 21732 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 21733 tp, rack, rsm, rsm->r_start, tp->snd_una); 21734 } 21735 #endif 21736 len = rsm->r_end - rsm->r_start; 21737 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21738 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21739 __func__, __LINE__, 21740 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21741 sb_offset = rsm->r_start - tp->snd_una; 21742 sendalot = 0; 21743 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 21744 if (len > 0) { 21745 sack_rxmit = 1; 21746 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 21747 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 21748 min(len, segsiz)); 21749 } 21750 } else if (rack->r_ctl.rc_tlpsend) { 21751 /* Tail loss probe */ 21752 long cwin; 21753 long tlen; 21754 21755 /* 21756 * Check if we can do a TLP with a RACK'd packet 21757 * this can happen if we are not doing the rack 21758 * cheat and we skipped to a TLP and it 21759 * went off. 21760 */ 21761 rsm = rack->r_ctl.rc_tlpsend; 21762 /* We are doing a TLP make sure the flag is preent */ 21763 rsm->r_flags |= RACK_TLP; 21764 rack->r_ctl.rc_tlpsend = NULL; 21765 sack_rxmit = 1; 21766 tlen = rsm->r_end - rsm->r_start; 21767 if (tlen > segsiz) 21768 tlen = segsiz; 21769 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 21770 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 21771 __func__, __LINE__, 21772 rsm->r_start, tp->snd_una, tp, rack, rsm)); 21773 sb_offset = rsm->r_start - tp->snd_una; 21774 cwin = min(tp->snd_wnd, tlen); 21775 len = cwin; 21776 } 21777 if (rack->r_must_retran && 21778 (doing_tlp == 0) && 21779 (SEQ_GT(tp->snd_max, tp->snd_una)) && 21780 (rsm == NULL)) { 21781 /* 21782 * There are two different ways that we 21783 * can get into this block: 21784 * a) This is a non-sack connection, we had a time-out 21785 * and thus r_must_retran was set and everything 21786 * left outstanding as been marked for retransmit. 21787 * b) The MTU of the path shrank, so that everything 21788 * was marked to be retransmitted with the smaller 21789 * mtu and r_must_retran was set. 21790 * 21791 * This means that we expect the sendmap (outstanding) 21792 * to all be marked must. We can use the tmap to 21793 * look at them. 21794 * 21795 */ 21796 int sendwin, flight; 21797 21798 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 21799 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 21800 if (flight >= sendwin) { 21801 /* 21802 * We can't send yet. 21803 */ 21804 so = inp->inp_socket; 21805 sb = &so->so_snd; 21806 goto just_return_nolock; 21807 } 21808 /* 21809 * This is the case a/b mentioned above. All 21810 * outstanding/not-acked should be marked. 21811 * We can use the tmap to find them. 21812 */ 21813 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 21814 if (rsm == NULL) { 21815 /* TSNH */ 21816 rack->r_must_retran = 0; 21817 rack->r_ctl.rc_out_at_rto = 0; 21818 so = inp->inp_socket; 21819 sb = &so->so_snd; 21820 goto just_return_nolock; 21821 } 21822 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 21823 /* 21824 * The first one does not have the flag, did we collapse 21825 * further up in our list? 21826 */ 21827 rack->r_must_retran = 0; 21828 rack->r_ctl.rc_out_at_rto = 0; 21829 rsm = NULL; 21830 sack_rxmit = 0; 21831 } else { 21832 sack_rxmit = 1; 21833 len = rsm->r_end - rsm->r_start; 21834 sb_offset = rsm->r_start - tp->snd_una; 21835 sendalot = 0; 21836 if ((rack->full_size_rxt == 0) && 21837 (rack->shape_rxt_to_pacing_min == 0) && 21838 (len >= segsiz)) 21839 len = segsiz; 21840 else if (rack->shape_rxt_to_pacing_min && 21841 rack->gp_ready) { 21842 /* We use pacing min as shaping len req */ 21843 uint32_t maxlen; 21844 21845 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 21846 if (len > maxlen) 21847 len = maxlen; 21848 } 21849 /* 21850 * Delay removing the flag RACK_MUST_RXT so 21851 * that the fastpath for retransmit will 21852 * work with this rsm. 21853 */ 21854 } 21855 } 21856 /* 21857 * Enforce a connection sendmap count limit if set 21858 * as long as we are not retransmiting. 21859 */ 21860 if ((rsm == NULL) && 21861 (rack->do_detection == 0) && 21862 (V_tcp_map_entries_limit > 0) && 21863 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 21864 counter_u64_add(rack_to_alloc_limited, 1); 21865 if (!rack->alloc_limit_reported) { 21866 rack->alloc_limit_reported = 1; 21867 counter_u64_add(rack_alloc_limited_conns, 1); 21868 } 21869 so = inp->inp_socket; 21870 sb = &so->so_snd; 21871 goto just_return_nolock; 21872 } 21873 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 21874 /* we are retransmitting the fin */ 21875 len--; 21876 if (len) { 21877 /* 21878 * When retransmitting data do *not* include the 21879 * FIN. This could happen from a TLP probe. 21880 */ 21881 flags &= ~TH_FIN; 21882 } 21883 } 21884 if (rsm && rack->r_fsb_inited && 21885 rack_use_rsm_rfo && 21886 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 21887 int ret; 21888 21889 if ((rack->rc_policer_detected == 1) && 21890 (rack->r_ctl.policer_bucket_size > segsiz) && 21891 (rack->r_ctl.policer_bw > 0)) { 21892 /* Check to see if there is room */ 21893 if (rack->r_ctl.current_policer_bucket < len) { 21894 goto skip_fast_output; 21895 } 21896 } 21897 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 21898 if (ret == 0) 21899 return (0); 21900 } 21901 skip_fast_output: 21902 so = inp->inp_socket; 21903 sb = &so->so_snd; 21904 if (do_a_prefetch == 0) { 21905 kern_prefetch(sb, &do_a_prefetch); 21906 do_a_prefetch = 1; 21907 } 21908 #ifdef NETFLIX_SHARED_CWND 21909 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 21910 rack->rack_enable_scwnd) { 21911 /* We are doing cwnd sharing */ 21912 if (rack->gp_ready && 21913 (rack->rack_attempted_scwnd == 0) && 21914 (rack->r_ctl.rc_scw == NULL) && 21915 tp->t_lib) { 21916 /* The pcbid is in, lets make an attempt */ 21917 counter_u64_add(rack_try_scwnd, 1); 21918 rack->rack_attempted_scwnd = 1; 21919 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 21920 &rack->r_ctl.rc_scw_index, 21921 segsiz); 21922 } 21923 if (rack->r_ctl.rc_scw && 21924 (rack->rack_scwnd_is_idle == 1) && 21925 sbavail(&so->so_snd)) { 21926 /* we are no longer out of data */ 21927 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21928 rack->rack_scwnd_is_idle = 0; 21929 } 21930 if (rack->r_ctl.rc_scw) { 21931 /* First lets update and get the cwnd */ 21932 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 21933 rack->r_ctl.rc_scw_index, 21934 tp->snd_cwnd, tp->snd_wnd, segsiz); 21935 } 21936 } 21937 #endif 21938 /* 21939 * Get standard flags, and add SYN or FIN if requested by 'hidden' 21940 * state flags. 21941 */ 21942 if (tp->t_flags & TF_NEEDFIN) 21943 flags |= TH_FIN; 21944 if (tp->t_flags & TF_NEEDSYN) 21945 flags |= TH_SYN; 21946 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 21947 void *end_rsm; 21948 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 21949 if (end_rsm) 21950 kern_prefetch(end_rsm, &prefetch_rsm); 21951 prefetch_rsm = 1; 21952 } 21953 SOCKBUF_LOCK(sb); 21954 if ((sack_rxmit == 0) && 21955 (TCPS_HAVEESTABLISHED(tp->t_state) || 21956 (tp->t_flags & TF_FASTOPEN))) { 21957 /* 21958 * We are not retransmitting (sack_rxmit is 0) so we 21959 * are sending new data. This is always based on snd_max. 21960 * Now in theory snd_max may be equal to snd_una, if so 21961 * then nothing is outstanding and the offset would be 0. 21962 */ 21963 uint32_t avail; 21964 21965 avail = sbavail(sb); 21966 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 21967 sb_offset = tp->snd_max - tp->snd_una; 21968 else 21969 sb_offset = 0; 21970 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 21971 if (rack->r_ctl.rc_tlp_new_data) { 21972 /* TLP is forcing out new data */ 21973 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 21974 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 21975 } 21976 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 21977 if (tp->snd_wnd > sb_offset) 21978 len = tp->snd_wnd - sb_offset; 21979 else 21980 len = 0; 21981 } else { 21982 len = rack->r_ctl.rc_tlp_new_data; 21983 } 21984 rack->r_ctl.rc_tlp_new_data = 0; 21985 } else { 21986 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 21987 } 21988 if ((rack->r_ctl.crte == NULL) && 21989 IN_FASTRECOVERY(tp->t_flags) && 21990 (rack->full_size_rxt == 0) && 21991 (rack->shape_rxt_to_pacing_min == 0) && 21992 (len > segsiz)) { 21993 /* 21994 * For prr=off, we need to send only 1 MSS 21995 * at a time. We do this because another sack could 21996 * be arriving that causes us to send retransmits and 21997 * we don't want to be on a long pace due to a larger send 21998 * that keeps us from sending out the retransmit. 21999 */ 22000 len = segsiz; 22001 } else if (rack->shape_rxt_to_pacing_min && 22002 rack->gp_ready) { 22003 /* We use pacing min as shaping len req */ 22004 uint32_t maxlen; 22005 22006 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 22007 if (len > maxlen) 22008 len = maxlen; 22009 }/* The else is full_size_rxt is on so send it all */ 22010 } else { 22011 uint32_t outstanding; 22012 /* 22013 * We are inside of a Fast recovery episode, this 22014 * is caused by a SACK or 3 dup acks. At this point 22015 * we have sent all the retransmissions and we rely 22016 * on PRR to dictate what we will send in the form of 22017 * new data. 22018 */ 22019 22020 outstanding = tp->snd_max - tp->snd_una; 22021 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 22022 if (tp->snd_wnd > outstanding) { 22023 len = tp->snd_wnd - outstanding; 22024 /* Check to see if we have the data */ 22025 if ((sb_offset + len) > avail) { 22026 /* It does not all fit */ 22027 if (avail > sb_offset) 22028 len = avail - sb_offset; 22029 else 22030 len = 0; 22031 } 22032 } else { 22033 len = 0; 22034 } 22035 } else if (avail > sb_offset) { 22036 len = avail - sb_offset; 22037 } else { 22038 len = 0; 22039 } 22040 if (len > 0) { 22041 if (len > rack->r_ctl.rc_prr_sndcnt) { 22042 len = rack->r_ctl.rc_prr_sndcnt; 22043 } 22044 if (len > 0) { 22045 sub_from_prr = 1; 22046 } 22047 } 22048 if (len > segsiz) { 22049 /* 22050 * We should never send more than a MSS when 22051 * retransmitting or sending new data in prr 22052 * mode unless the override flag is on. Most 22053 * likely the PRR algorithm is not going to 22054 * let us send a lot as well :-) 22055 */ 22056 if (rack->r_ctl.rc_prr_sendalot == 0) { 22057 len = segsiz; 22058 } 22059 } else if (len < segsiz) { 22060 /* 22061 * Do we send any? The idea here is if the 22062 * send empty's the socket buffer we want to 22063 * do it. However if not then lets just wait 22064 * for our prr_sndcnt to get bigger. 22065 */ 22066 long leftinsb; 22067 22068 leftinsb = sbavail(sb) - sb_offset; 22069 if (leftinsb > len) { 22070 /* This send does not empty the sb */ 22071 len = 0; 22072 } 22073 } 22074 } 22075 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 22076 /* 22077 * If you have not established 22078 * and are not doing FAST OPEN 22079 * no data please. 22080 */ 22081 if ((sack_rxmit == 0) && 22082 !(tp->t_flags & TF_FASTOPEN)) { 22083 len = 0; 22084 sb_offset = 0; 22085 } 22086 } 22087 if (prefetch_so_done == 0) { 22088 kern_prefetch(so, &prefetch_so_done); 22089 prefetch_so_done = 1; 22090 } 22091 orig_len = len; 22092 if ((rack->rc_policer_detected == 1) && 22093 (rack->r_ctl.policer_bucket_size > segsiz) && 22094 (rack->r_ctl.policer_bw > 0) && 22095 (len > 0)) { 22096 /* 22097 * Ok we believe we have a policer watching 22098 * what we send, can we send len? If not can 22099 * we tune it down to a smaller value? 22100 */ 22101 uint32_t plen, buck_needs; 22102 22103 plen = rack_policer_check_send(rack, len, segsiz, &buck_needs); 22104 if (plen == 0) { 22105 /* 22106 * We are not allowed to send. How long 22107 * do we need to pace for i.e. how long 22108 * before len is available to send? 22109 */ 22110 uint64_t lentime; 22111 22112 lentime = buck_needs; 22113 lentime *= HPTS_USEC_IN_SEC; 22114 lentime /= rack->r_ctl.policer_bw; 22115 slot = (uint32_t)lentime; 22116 tot_len_this_send = 0; 22117 SOCKBUF_UNLOCK(sb); 22118 if (rack_verbose_logging > 0) 22119 policer_detection_log(rack, len, slot, buck_needs, 0, 12); 22120 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22121 rack_log_type_just_return(rack, cts, 0, slot, hpts_calling, 0, cwnd_to_use); 22122 goto just_return_clean; 22123 } 22124 if (plen < len) { 22125 sendalot = 0; 22126 len = plen; 22127 } 22128 } 22129 /* 22130 * Lop off SYN bit if it has already been sent. However, if this is 22131 * SYN-SENT state and if segment contains data and if we don't know 22132 * that foreign host supports TAO, suppress sending segment. 22133 */ 22134 if ((flags & TH_SYN) && 22135 SEQ_GT(tp->snd_max, tp->snd_una) && 22136 ((sack_rxmit == 0) && 22137 (tp->t_rxtshift == 0))) { 22138 /* 22139 * When sending additional segments following a TFO SYN|ACK, 22140 * do not include the SYN bit. 22141 */ 22142 if ((tp->t_flags & TF_FASTOPEN) && 22143 (tp->t_state == TCPS_SYN_RECEIVED)) 22144 flags &= ~TH_SYN; 22145 } 22146 /* 22147 * Be careful not to send data and/or FIN on SYN segments. This 22148 * measure is needed to prevent interoperability problems with not 22149 * fully conformant TCP implementations. 22150 */ 22151 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 22152 len = 0; 22153 flags &= ~TH_FIN; 22154 } 22155 /* 22156 * On TFO sockets, ensure no data is sent in the following cases: 22157 * 22158 * - When retransmitting SYN|ACK on a passively-created socket 22159 * 22160 * - When retransmitting SYN on an actively created socket 22161 * 22162 * - When sending a zero-length cookie (cookie request) on an 22163 * actively created socket 22164 * 22165 * - When the socket is in the CLOSED state (RST is being sent) 22166 */ 22167 if ((tp->t_flags & TF_FASTOPEN) && 22168 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 22169 ((tp->t_state == TCPS_SYN_SENT) && 22170 (tp->t_tfo_client_cookie_len == 0)) || 22171 (flags & TH_RST))) { 22172 sack_rxmit = 0; 22173 len = 0; 22174 } 22175 /* Without fast-open there should never be data sent on a SYN */ 22176 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 22177 len = 0; 22178 } 22179 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 22180 /* We only send 1 MSS if we have a DSACK block */ 22181 add_flag |= RACK_SENT_W_DSACK; 22182 len = segsiz; 22183 } 22184 if (len <= 0) { 22185 /* 22186 * We have nothing to send, or the window shrank, or 22187 * is closed, do we need to go into persists? 22188 */ 22189 len = 0; 22190 if ((tp->snd_wnd == 0) && 22191 (TCPS_HAVEESTABLISHED(tp->t_state)) && 22192 (tp->snd_una == tp->snd_max) && 22193 (sb_offset < (int)sbavail(sb))) { 22194 rack_enter_persist(tp, rack, cts, tp->snd_una); 22195 } 22196 } else if ((rsm == NULL) && 22197 (doing_tlp == 0) && 22198 (len < pace_max_seg)) { 22199 /* 22200 * We are not sending a maximum sized segment for 22201 * some reason. Should we not send anything (think 22202 * sws or persists)? 22203 */ 22204 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 22205 (TCPS_HAVEESTABLISHED(tp->t_state)) && 22206 (len < minseg) && 22207 (len < (int)(sbavail(sb) - sb_offset))) { 22208 /* 22209 * Here the rwnd is less than 22210 * the minimum pacing size, this is not a retransmit, 22211 * we are established and 22212 * the send is not the last in the socket buffer 22213 * we send nothing, and we may enter persists 22214 * if nothing is outstanding. 22215 */ 22216 len = 0; 22217 if (tp->snd_max == tp->snd_una) { 22218 /* 22219 * Nothing out we can 22220 * go into persists. 22221 */ 22222 rack_enter_persist(tp, rack, cts, tp->snd_una); 22223 } 22224 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 22225 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 22226 (len < (int)(sbavail(sb) - sb_offset)) && 22227 (len < minseg)) { 22228 /* 22229 * Here we are not retransmitting, and 22230 * the cwnd is not so small that we could 22231 * not send at least a min size (rxt timer 22232 * not having gone off), We have 2 segments or 22233 * more already in flight, its not the tail end 22234 * of the socket buffer and the cwnd is blocking 22235 * us from sending out a minimum pacing segment size. 22236 * Lets not send anything. 22237 */ 22238 len = 0; 22239 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 22240 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 22241 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 22242 (len < (int)(sbavail(sb) - sb_offset)) && 22243 (TCPS_HAVEESTABLISHED(tp->t_state))) { 22244 /* 22245 * Here we have a send window but we have 22246 * filled it up and we can't send another pacing segment. 22247 * We also have in flight more than 2 segments 22248 * and we are not completing the sb i.e. we allow 22249 * the last bytes of the sb to go out even if 22250 * its not a full pacing segment. 22251 */ 22252 len = 0; 22253 } else if ((rack->r_ctl.crte != NULL) && 22254 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 22255 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 22256 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 22257 (len < (int)(sbavail(sb) - sb_offset))) { 22258 /* 22259 * Here we are doing hardware pacing, this is not a TLP, 22260 * we are not sending a pace max segment size, there is rwnd 22261 * room to send at least N pace_max_seg, the cwnd is greater 22262 * than or equal to a full pacing segments plus 4 mss and we have 2 or 22263 * more segments in flight and its not the tail of the socket buffer. 22264 * 22265 * We don't want to send instead we need to get more ack's in to 22266 * allow us to send a full pacing segment. Normally, if we are pacing 22267 * about the right speed, we should have finished our pacing 22268 * send as most of the acks have come back if we are at the 22269 * right rate. This is a bit fuzzy since return path delay 22270 * can delay the acks, which is why we want to make sure we 22271 * have cwnd space to have a bit more than a max pace segments in flight. 22272 * 22273 * If we have not gotten our acks back we are pacing at too high a 22274 * rate delaying will not hurt and will bring our GP estimate down by 22275 * injecting the delay. If we don't do this we will send 22276 * 2 MSS out in response to the acks being clocked in which 22277 * defeats the point of hw-pacing (i.e. to help us get 22278 * larger TSO's out). 22279 */ 22280 len = 0; 22281 } 22282 22283 } 22284 /* len will be >= 0 after this point. */ 22285 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 22286 rack_sndbuf_autoscale(rack); 22287 /* 22288 * Decide if we can use TCP Segmentation Offloading (if supported by 22289 * hardware). 22290 * 22291 * TSO may only be used if we are in a pure bulk sending state. The 22292 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 22293 * options prevent using TSO. With TSO the TCP header is the same 22294 * (except for the sequence number) for all generated packets. This 22295 * makes it impossible to transmit any options which vary per 22296 * generated segment or packet. 22297 * 22298 * IPv4 handling has a clear separation of ip options and ip header 22299 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 22300 * the right thing below to provide length of just ip options and thus 22301 * checking for ipoptlen is enough to decide if ip options are present. 22302 */ 22303 ipoptlen = 0; 22304 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22305 /* 22306 * Pre-calculate here as we save another lookup into the darknesses 22307 * of IPsec that way and can actually decide if TSO is ok. 22308 */ 22309 #ifdef INET6 22310 if (isipv6 && IPSEC_ENABLED(ipv6)) 22311 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 22312 #ifdef INET 22313 else 22314 #endif 22315 #endif /* INET6 */ 22316 #ifdef INET 22317 if (IPSEC_ENABLED(ipv4)) 22318 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 22319 #endif /* INET */ 22320 #endif 22321 22322 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22323 ipoptlen += ipsec_optlen; 22324 #endif 22325 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 22326 (tp->t_port == 0) && 22327 ((tp->t_flags & TF_SIGNATURE) == 0) && 22328 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 22329 ipoptlen == 0) 22330 tso = 1; 22331 { 22332 uint32_t outstanding __unused; 22333 22334 outstanding = tp->snd_max - tp->snd_una; 22335 if (tp->t_flags & TF_SENTFIN) { 22336 /* 22337 * If we sent a fin, snd_max is 1 higher than 22338 * snd_una 22339 */ 22340 outstanding--; 22341 } 22342 if (sack_rxmit) { 22343 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 22344 flags &= ~TH_FIN; 22345 } 22346 } 22347 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 22348 (long)TCP_MAXWIN << tp->rcv_scale); 22349 22350 /* 22351 * Sender silly window avoidance. We transmit under the following 22352 * conditions when len is non-zero: 22353 * 22354 * - We have a full segment (or more with TSO) - This is the last 22355 * buffer in a write()/send() and we are either idle or running 22356 * NODELAY - we've timed out (e.g. persist timer) - we have more 22357 * then 1/2 the maximum send window's worth of data (receiver may be 22358 * limited the window size) - we need to retransmit 22359 */ 22360 if (len) { 22361 if (len >= segsiz) { 22362 goto send; 22363 } 22364 /* 22365 * NOTE! on localhost connections an 'ack' from the remote 22366 * end may occur synchronously with the output and cause us 22367 * to flush a buffer queued with moretocome. XXX 22368 * 22369 */ 22370 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 22371 (idle || (tp->t_flags & TF_NODELAY)) && 22372 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 22373 (tp->t_flags & TF_NOPUSH) == 0) { 22374 pass = 2; 22375 goto send; 22376 } 22377 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 22378 pass = 22; 22379 goto send; 22380 } 22381 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 22382 pass = 4; 22383 goto send; 22384 } 22385 if (sack_rxmit) { 22386 pass = 6; 22387 goto send; 22388 } 22389 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 22390 (ctf_outstanding(tp) < (segsiz * 2))) { 22391 /* 22392 * We have less than two MSS outstanding (delayed ack) 22393 * and our rwnd will not let us send a full sized 22394 * MSS. Lets go ahead and let this small segment 22395 * out because we want to try to have at least two 22396 * packets inflight to not be caught by delayed ack. 22397 */ 22398 pass = 12; 22399 goto send; 22400 } 22401 } 22402 /* 22403 * Sending of standalone window updates. 22404 * 22405 * Window updates are important when we close our window due to a 22406 * full socket buffer and are opening it again after the application 22407 * reads data from it. Once the window has opened again and the 22408 * remote end starts to send again the ACK clock takes over and 22409 * provides the most current window information. 22410 * 22411 * We must avoid the silly window syndrome whereas every read from 22412 * the receive buffer, no matter how small, causes a window update 22413 * to be sent. We also should avoid sending a flurry of window 22414 * updates when the socket buffer had queued a lot of data and the 22415 * application is doing small reads. 22416 * 22417 * Prevent a flurry of pointless window updates by only sending an 22418 * update when we can increase the advertized window by more than 22419 * 1/4th of the socket buffer capacity. When the buffer is getting 22420 * full or is very small be more aggressive and send an update 22421 * whenever we can increase by two mss sized segments. In all other 22422 * situations the ACK's to new incoming data will carry further 22423 * window increases. 22424 * 22425 * Don't send an independent window update if a delayed ACK is 22426 * pending (it will get piggy-backed on it) or the remote side 22427 * already has done a half-close and won't send more data. Skip 22428 * this if the connection is in T/TCP half-open state. 22429 */ 22430 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 22431 !(tp->t_flags & TF_DELACK) && 22432 !TCPS_HAVERCVDFIN(tp->t_state)) { 22433 /* 22434 * "adv" is the amount we could increase the window, taking 22435 * into account that we are limited by TCP_MAXWIN << 22436 * tp->rcv_scale. 22437 */ 22438 int32_t adv; 22439 int oldwin; 22440 22441 adv = recwin; 22442 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 22443 oldwin = (tp->rcv_adv - tp->rcv_nxt); 22444 if (adv > oldwin) 22445 adv -= oldwin; 22446 else { 22447 /* We can't increase the window */ 22448 adv = 0; 22449 } 22450 } else 22451 oldwin = 0; 22452 22453 /* 22454 * If the new window size ends up being the same as or less 22455 * than the old size when it is scaled, then don't force 22456 * a window update. 22457 */ 22458 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 22459 goto dontupdate; 22460 22461 if (adv >= (int32_t)(2 * segsiz) && 22462 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 22463 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 22464 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 22465 pass = 7; 22466 goto send; 22467 } 22468 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 22469 pass = 23; 22470 goto send; 22471 } 22472 } 22473 dontupdate: 22474 22475 /* 22476 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 22477 * is also a catch-all for the retransmit timer timeout case. 22478 */ 22479 if (tp->t_flags & TF_ACKNOW) { 22480 pass = 8; 22481 goto send; 22482 } 22483 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 22484 pass = 9; 22485 goto send; 22486 } 22487 /* 22488 * If our state indicates that FIN should be sent and we have not 22489 * yet done so, then we need to send. 22490 */ 22491 if ((flags & TH_FIN) && 22492 (tp->snd_max == tp->snd_una)) { 22493 pass = 11; 22494 goto send; 22495 } 22496 /* 22497 * No reason to send a segment, just return. 22498 */ 22499 just_return: 22500 SOCKBUF_UNLOCK(sb); 22501 just_return_nolock: 22502 { 22503 int app_limited = CTF_JR_SENT_DATA; 22504 22505 if ((tp->t_flags & TF_FASTOPEN) == 0 && 22506 (flags & TH_FIN) && 22507 (len == 0) && 22508 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 22509 ((tp->snd_max - tp->snd_una) <= segsiz)) { 22510 /* 22511 * Ok less than or right at a MSS is 22512 * outstanding. The original FreeBSD stack would 22513 * have sent a FIN, which can speed things up for 22514 * a transactional application doing a MSG_WAITALL. 22515 * To speed things up since we do *not* send a FIN 22516 * if data is outstanding, we send a "challenge ack". 22517 * The idea behind that is instead of having to have 22518 * the peer wait for the delayed-ack timer to run off 22519 * we send an ack that makes the peer send us an ack. 22520 */ 22521 rack_send_ack_challange(rack); 22522 } 22523 if (tot_len_this_send > 0) { 22524 rack->r_ctl.fsb.recwin = recwin; 22525 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 22526 if ((error == 0) && 22527 (rack->rc_policer_detected == 0) && 22528 rack_use_rfo && 22529 ((flags & (TH_SYN|TH_FIN)) == 0) && 22530 (ipoptlen == 0) && 22531 (tp->rcv_numsacks == 0) && 22532 rack->r_fsb_inited && 22533 TCPS_HAVEESTABLISHED(tp->t_state) && 22534 ((IN_RECOVERY(tp->t_flags)) == 0) && 22535 (rack->r_must_retran == 0) && 22536 ((tp->t_flags & TF_NEEDFIN) == 0) && 22537 (len > 0) && (orig_len > 0) && 22538 (orig_len > len) && 22539 ((orig_len - len) >= segsiz) && 22540 ((optlen == 0) || 22541 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22542 /* We can send at least one more MSS using our fsb */ 22543 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22544 segsiz, pace_max_seg, hw_tls, flags); 22545 } else 22546 rack->r_fast_output = 0; 22547 rack_log_fsb(rack, tp, so, flags, 22548 ipoptlen, orig_len, len, 0, 22549 1, optlen, __LINE__, 1); 22550 /* Assure when we leave that snd_nxt will point to top */ 22551 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22552 tp->snd_nxt = tp->snd_max; 22553 } else { 22554 int end_window = 0; 22555 uint32_t seq = tp->gput_ack; 22556 22557 rsm = tqhash_max(rack->r_ctl.tqh); 22558 if (rsm) { 22559 /* 22560 * Mark the last sent that we just-returned (hinting 22561 * that delayed ack may play a role in any rtt measurement). 22562 */ 22563 rsm->r_just_ret = 1; 22564 } 22565 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 22566 rack->r_ctl.rc_agg_delayed = 0; 22567 rack->r_early = 0; 22568 rack->r_late = 0; 22569 rack->r_ctl.rc_agg_early = 0; 22570 if ((ctf_outstanding(tp) + 22571 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 22572 minseg)) >= tp->snd_wnd) { 22573 /* We are limited by the rwnd */ 22574 app_limited = CTF_JR_RWND_LIMITED; 22575 if (IN_FASTRECOVERY(tp->t_flags)) 22576 rack->r_ctl.rc_prr_sndcnt = 0; 22577 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 22578 /* We are limited by whats available -- app limited */ 22579 app_limited = CTF_JR_APP_LIMITED; 22580 if (IN_FASTRECOVERY(tp->t_flags)) 22581 rack->r_ctl.rc_prr_sndcnt = 0; 22582 } else if ((idle == 0) && 22583 ((tp->t_flags & TF_NODELAY) == 0) && 22584 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 22585 (len < segsiz)) { 22586 /* 22587 * No delay is not on and the 22588 * user is sending less than 1MSS. This 22589 * brings out SWS avoidance so we 22590 * don't send. Another app-limited case. 22591 */ 22592 app_limited = CTF_JR_APP_LIMITED; 22593 } else if (tp->t_flags & TF_NOPUSH) { 22594 /* 22595 * The user has requested no push of 22596 * the last segment and we are 22597 * at the last segment. Another app 22598 * limited case. 22599 */ 22600 app_limited = CTF_JR_APP_LIMITED; 22601 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 22602 /* Its the cwnd */ 22603 app_limited = CTF_JR_CWND_LIMITED; 22604 } else if (IN_FASTRECOVERY(tp->t_flags) && 22605 (rack->rack_no_prr == 0) && 22606 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 22607 app_limited = CTF_JR_PRR; 22608 } else { 22609 /* Now why here are we not sending? */ 22610 #ifdef NOW 22611 #ifdef INVARIANTS 22612 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 22613 #endif 22614 #endif 22615 app_limited = CTF_JR_ASSESSING; 22616 } 22617 /* 22618 * App limited in some fashion, for our pacing GP 22619 * measurements we don't want any gap (even cwnd). 22620 * Close down the measurement window. 22621 */ 22622 if (rack_cwnd_block_ends_measure && 22623 ((app_limited == CTF_JR_CWND_LIMITED) || 22624 (app_limited == CTF_JR_PRR))) { 22625 /* 22626 * The reason we are not sending is 22627 * the cwnd (or prr). We have been configured 22628 * to end the measurement window in 22629 * this case. 22630 */ 22631 end_window = 1; 22632 } else if (rack_rwnd_block_ends_measure && 22633 (app_limited == CTF_JR_RWND_LIMITED)) { 22634 /* 22635 * We are rwnd limited and have been 22636 * configured to end the measurement 22637 * window in this case. 22638 */ 22639 end_window = 1; 22640 } else if (app_limited == CTF_JR_APP_LIMITED) { 22641 /* 22642 * A true application limited period, we have 22643 * ran out of data. 22644 */ 22645 end_window = 1; 22646 } else if (app_limited == CTF_JR_ASSESSING) { 22647 /* 22648 * In the assessing case we hit the end of 22649 * the if/else and had no known reason 22650 * This will panic us under invariants.. 22651 * 22652 * If we get this out in logs we need to 22653 * investagate which reason we missed. 22654 */ 22655 end_window = 1; 22656 } 22657 if (end_window) { 22658 uint8_t log = 0; 22659 22660 /* Adjust the Gput measurement */ 22661 if ((tp->t_flags & TF_GPUTINPROG) && 22662 SEQ_GT(tp->gput_ack, tp->snd_max)) { 22663 tp->gput_ack = tp->snd_max; 22664 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 22665 /* 22666 * There is not enough to measure. 22667 */ 22668 tp->t_flags &= ~TF_GPUTINPROG; 22669 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 22670 rack->r_ctl.rc_gp_srtt /*flex1*/, 22671 tp->gput_seq, 22672 0, 0, 18, __LINE__, NULL, 0); 22673 } else 22674 log = 1; 22675 } 22676 /* Mark the last packet has app limited */ 22677 rsm = tqhash_max(rack->r_ctl.tqh); 22678 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 22679 if (rack->r_ctl.rc_app_limited_cnt == 0) 22680 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 22681 else { 22682 /* 22683 * Go out to the end app limited and mark 22684 * this new one as next and move the end_appl up 22685 * to this guy. 22686 */ 22687 if (rack->r_ctl.rc_end_appl) 22688 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 22689 rack->r_ctl.rc_end_appl = rsm; 22690 } 22691 rsm->r_flags |= RACK_APP_LIMITED; 22692 rack->r_ctl.rc_app_limited_cnt++; 22693 } 22694 if (log) 22695 rack_log_pacing_delay_calc(rack, 22696 rack->r_ctl.rc_app_limited_cnt, seq, 22697 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 22698 } 22699 } 22700 /* Check if we need to go into persists or not */ 22701 if ((tp->snd_max == tp->snd_una) && 22702 TCPS_HAVEESTABLISHED(tp->t_state) && 22703 sbavail(sb) && 22704 (sbavail(sb) > tp->snd_wnd) && 22705 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 22706 /* Yes lets make sure to move to persist before timer-start */ 22707 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 22708 } 22709 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 22710 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 22711 } 22712 just_return_clean: 22713 #ifdef NETFLIX_SHARED_CWND 22714 if ((sbavail(sb) == 0) && 22715 rack->r_ctl.rc_scw) { 22716 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 22717 rack->rack_scwnd_is_idle = 1; 22718 } 22719 #endif 22720 #ifdef TCP_ACCOUNTING 22721 if (tot_len_this_send > 0) { 22722 crtsc = get_cyclecount(); 22723 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22724 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22725 } 22726 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22727 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 22728 } 22729 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22730 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 22731 } 22732 } else { 22733 crtsc = get_cyclecount(); 22734 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22735 tp->tcp_cnt_counters[SND_LIMITED]++; 22736 } 22737 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22738 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 22739 } 22740 } 22741 sched_unpin(); 22742 #endif 22743 return (0); 22744 22745 send: 22746 if ((rack->r_ctl.crte != NULL) && 22747 (rsm == NULL) && 22748 ((rack->rc_hw_nobuf == 1) || 22749 (rack_hw_check_queue && (check_done == 0)))) { 22750 /* 22751 * We only want to do this once with the hw_check_queue, 22752 * for the enobuf case we would only do it once if 22753 * we come around to again, the flag will be clear. 22754 */ 22755 check_done = 1; 22756 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 22757 if (slot) { 22758 rack->r_ctl.rc_agg_delayed = 0; 22759 rack->r_ctl.rc_agg_early = 0; 22760 rack->r_early = 0; 22761 rack->r_late = 0; 22762 SOCKBUF_UNLOCK(&so->so_snd); 22763 goto skip_all_send; 22764 } 22765 } 22766 if (rsm || sack_rxmit) 22767 counter_u64_add(rack_nfto_resend, 1); 22768 else 22769 counter_u64_add(rack_non_fto_send, 1); 22770 if ((flags & TH_FIN) && 22771 sbavail(sb)) { 22772 /* 22773 * We do not transmit a FIN 22774 * with data outstanding. We 22775 * need to make it so all data 22776 * is acked first. 22777 */ 22778 flags &= ~TH_FIN; 22779 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 22780 ((tp->snd_max - tp->snd_una) <= segsiz)) { 22781 /* 22782 * Ok less than or right at a MSS is 22783 * outstanding. The original FreeBSD stack would 22784 * have sent a FIN, which can speed things up for 22785 * a transactional application doing a MSG_WAITALL. 22786 * To speed things up since we do *not* send a FIN 22787 * if data is outstanding, we send a "challenge ack". 22788 * The idea behind that is instead of having to have 22789 * the peer wait for the delayed-ack timer to run off 22790 * we send an ack that makes the peer send us an ack. 22791 */ 22792 rack_send_ack_challange(rack); 22793 } 22794 } 22795 /* Enforce stack imposed max seg size if we have one */ 22796 if (pace_max_seg && 22797 (len > pace_max_seg)) { 22798 mark = 1; 22799 len = pace_max_seg; 22800 } 22801 if ((rsm == NULL) && 22802 (rack->pcm_in_progress == 0) && 22803 (rack->r_ctl.pcm_max_seg > 0) && 22804 (len >= rack->r_ctl.pcm_max_seg)) { 22805 /* It is large enough for a measurement */ 22806 add_flag |= RACK_IS_PCM; 22807 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 22808 } else if (rack_verbose_logging) { 22809 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 22810 } 22811 22812 SOCKBUF_LOCK_ASSERT(sb); 22813 if (len > 0) { 22814 if (len >= segsiz) 22815 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 22816 else 22817 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 22818 } 22819 /* 22820 * Before ESTABLISHED, force sending of initial options unless TCP 22821 * set not to do any options. NOTE: we assume that the IP/TCP header 22822 * plus TCP options always fit in a single mbuf, leaving room for a 22823 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 22824 * + optlen <= MCLBYTES 22825 */ 22826 optlen = 0; 22827 #ifdef INET6 22828 if (isipv6) 22829 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 22830 else 22831 #endif 22832 hdrlen = sizeof(struct tcpiphdr); 22833 22834 /* 22835 * Ok what seq are we sending from. If we have 22836 * no rsm to use, then we look at various bits, 22837 * if we are putting out a SYN it will be ISS. 22838 * If we are retransmitting a FIN it will 22839 * be snd_max-1 else its snd_max. 22840 */ 22841 if (rsm == NULL) { 22842 if (flags & TH_SYN) 22843 rack_seq = tp->iss; 22844 else if ((flags & TH_FIN) && 22845 (tp->t_flags & TF_SENTFIN)) 22846 rack_seq = tp->snd_max - 1; 22847 else 22848 rack_seq = tp->snd_max; 22849 } else { 22850 rack_seq = rsm->r_start; 22851 } 22852 /* 22853 * Compute options for segment. We only have to care about SYN and 22854 * established connection segments. Options for SYN-ACK segments 22855 * are handled in TCP syncache. 22856 */ 22857 to.to_flags = 0; 22858 if ((tp->t_flags & TF_NOOPT) == 0) { 22859 /* Maximum segment size. */ 22860 if (flags & TH_SYN) { 22861 to.to_mss = tcp_mssopt(&inp->inp_inc); 22862 if (tp->t_port) 22863 to.to_mss -= V_tcp_udp_tunneling_overhead; 22864 to.to_flags |= TOF_MSS; 22865 22866 /* 22867 * On SYN or SYN|ACK transmits on TFO connections, 22868 * only include the TFO option if it is not a 22869 * retransmit, as the presence of the TFO option may 22870 * have caused the original SYN or SYN|ACK to have 22871 * been dropped by a middlebox. 22872 */ 22873 if ((tp->t_flags & TF_FASTOPEN) && 22874 (tp->t_rxtshift == 0)) { 22875 if (tp->t_state == TCPS_SYN_RECEIVED) { 22876 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 22877 to.to_tfo_cookie = 22878 (u_int8_t *)&tp->t_tfo_cookie.server; 22879 to.to_flags |= TOF_FASTOPEN; 22880 wanted_cookie = 1; 22881 } else if (tp->t_state == TCPS_SYN_SENT) { 22882 to.to_tfo_len = 22883 tp->t_tfo_client_cookie_len; 22884 to.to_tfo_cookie = 22885 tp->t_tfo_cookie.client; 22886 to.to_flags |= TOF_FASTOPEN; 22887 wanted_cookie = 1; 22888 /* 22889 * If we wind up having more data to 22890 * send with the SYN than can fit in 22891 * one segment, don't send any more 22892 * until the SYN|ACK comes back from 22893 * the other end. 22894 */ 22895 sendalot = 0; 22896 } 22897 } 22898 } 22899 /* Window scaling. */ 22900 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 22901 to.to_wscale = tp->request_r_scale; 22902 to.to_flags |= TOF_SCALE; 22903 } 22904 /* Timestamps. */ 22905 if ((tp->t_flags & TF_RCVD_TSTMP) || 22906 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 22907 uint32_t ts_to_use; 22908 22909 if ((rack->r_rcvpath_rtt_up == 1) && 22910 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 22911 /* 22912 * When we are doing a rcv_rtt probe all 22913 * other timestamps use the next msec. This 22914 * is safe since our previous ack is in the 22915 * air and we will just have a few more 22916 * on the next ms. This assures that only 22917 * the one ack has the ms_cts that was on 22918 * our ack-probe. 22919 */ 22920 ts_to_use = ms_cts + 1; 22921 } else { 22922 ts_to_use = ms_cts; 22923 } 22924 to.to_tsval = ts_to_use + tp->ts_offset; 22925 to.to_tsecr = tp->ts_recent; 22926 to.to_flags |= TOF_TS; 22927 if ((len == 0) && 22928 (TCPS_HAVEESTABLISHED(tp->t_state)) && 22929 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 22930 (tp->snd_una == tp->snd_max) && 22931 (flags & TH_ACK) && 22932 (sbavail(sb) == 0) && 22933 (rack->r_ctl.current_round != 0) && 22934 ((flags & (TH_SYN|TH_FIN)) == 0) && 22935 (rack->r_rcvpath_rtt_up == 0)) { 22936 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 22937 rack->r_ctl.last_time_of_arm_rcv = cts; 22938 rack->r_rcvpath_rtt_up = 1; 22939 /* Subtract 1 from seq to force a response */ 22940 rack_seq--; 22941 } 22942 } 22943 /* Set receive buffer autosizing timestamp. */ 22944 if (tp->rfbuf_ts == 0 && 22945 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 22946 tp->rfbuf_ts = ms_cts; 22947 } 22948 /* Selective ACK's. */ 22949 if (tp->t_flags & TF_SACK_PERMIT) { 22950 if (flags & TH_SYN) 22951 to.to_flags |= TOF_SACKPERM; 22952 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 22953 tp->rcv_numsacks > 0) { 22954 to.to_flags |= TOF_SACK; 22955 to.to_nsacks = tp->rcv_numsacks; 22956 to.to_sacks = (u_char *)tp->sackblks; 22957 } 22958 } 22959 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 22960 /* TCP-MD5 (RFC2385). */ 22961 if (tp->t_flags & TF_SIGNATURE) 22962 to.to_flags |= TOF_SIGNATURE; 22963 #endif 22964 22965 /* Processing the options. */ 22966 hdrlen += optlen = tcp_addoptions(&to, opt); 22967 /* 22968 * If we wanted a TFO option to be added, but it was unable 22969 * to fit, ensure no data is sent. 22970 */ 22971 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 22972 !(to.to_flags & TOF_FASTOPEN)) 22973 len = 0; 22974 } 22975 if (tp->t_port) { 22976 if (V_tcp_udp_tunneling_port == 0) { 22977 /* The port was removed?? */ 22978 SOCKBUF_UNLOCK(&so->so_snd); 22979 #ifdef TCP_ACCOUNTING 22980 crtsc = get_cyclecount(); 22981 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22982 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22983 } 22984 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22985 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22986 } 22987 sched_unpin(); 22988 #endif 22989 return (EHOSTUNREACH); 22990 } 22991 hdrlen += sizeof(struct udphdr); 22992 } 22993 #ifdef INET6 22994 if (isipv6) 22995 ipoptlen = ip6_optlen(inp); 22996 else 22997 #endif 22998 if (inp->inp_options) 22999 ipoptlen = inp->inp_options->m_len - 23000 offsetof(struct ipoption, ipopt_list); 23001 else 23002 ipoptlen = 0; 23003 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 23004 ipoptlen += ipsec_optlen; 23005 #endif 23006 23007 /* 23008 * Adjust data length if insertion of options will bump the packet 23009 * length beyond the t_maxseg length. Clear the FIN bit because we 23010 * cut off the tail of the segment. 23011 */ 23012 if (len + optlen + ipoptlen > tp->t_maxseg) { 23013 if (tso) { 23014 uint32_t if_hw_tsomax; 23015 uint32_t moff; 23016 int32_t max_len; 23017 23018 /* extract TSO information */ 23019 if_hw_tsomax = tp->t_tsomax; 23020 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 23021 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 23022 KASSERT(ipoptlen == 0, 23023 ("%s: TSO can't do IP options", __func__)); 23024 23025 /* 23026 * Check if we should limit by maximum payload 23027 * length: 23028 */ 23029 if (if_hw_tsomax != 0) { 23030 /* compute maximum TSO length */ 23031 max_len = (if_hw_tsomax - hdrlen - 23032 max_linkhdr); 23033 if (max_len <= 0) { 23034 len = 0; 23035 } else if (len > max_len) { 23036 sendalot = 1; 23037 len = max_len; 23038 mark = 2; 23039 } 23040 } 23041 /* 23042 * Prevent the last segment from being fractional 23043 * unless the send sockbuf can be emptied: 23044 */ 23045 max_len = (tp->t_maxseg - optlen); 23046 if ((sb_offset + len) < sbavail(sb)) { 23047 moff = len % (u_int)max_len; 23048 if (moff != 0) { 23049 mark = 3; 23050 len -= moff; 23051 } 23052 } 23053 /* 23054 * In case there are too many small fragments don't 23055 * use TSO: 23056 */ 23057 if (len <= max_len) { 23058 mark = 4; 23059 tso = 0; 23060 } 23061 /* 23062 * Send the FIN in a separate segment after the bulk 23063 * sending is done. We don't trust the TSO 23064 * implementations to clear the FIN flag on all but 23065 * the last segment. 23066 */ 23067 if (tp->t_flags & TF_NEEDFIN) { 23068 sendalot = 4; 23069 } 23070 } else { 23071 mark = 5; 23072 if (optlen + ipoptlen >= tp->t_maxseg) { 23073 /* 23074 * Since we don't have enough space to put 23075 * the IP header chain and the TCP header in 23076 * one packet as required by RFC 7112, don't 23077 * send it. Also ensure that at least one 23078 * byte of the payload can be put into the 23079 * TCP segment. 23080 */ 23081 SOCKBUF_UNLOCK(&so->so_snd); 23082 error = EMSGSIZE; 23083 sack_rxmit = 0; 23084 goto out; 23085 } 23086 len = tp->t_maxseg - optlen - ipoptlen; 23087 sendalot = 5; 23088 } 23089 } else { 23090 tso = 0; 23091 mark = 6; 23092 } 23093 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 23094 ("%s: len > IP_MAXPACKET", __func__)); 23095 #ifdef DIAGNOSTIC 23096 #ifdef INET6 23097 if (max_linkhdr + hdrlen > MCLBYTES) 23098 #else 23099 if (max_linkhdr + hdrlen > MHLEN) 23100 #endif 23101 panic("tcphdr too big"); 23102 #endif 23103 23104 /* 23105 * This KASSERT is here to catch edge cases at a well defined place. 23106 * Before, those had triggered (random) panic conditions further 23107 * down. 23108 */ 23109 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 23110 if ((len == 0) && 23111 (flags & TH_FIN) && 23112 (sbused(sb))) { 23113 /* 23114 * We have outstanding data, don't send a fin by itself!. 23115 * 23116 * Check to see if we need to send a challenge ack. 23117 */ 23118 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 23119 ((tp->snd_max - tp->snd_una) <= segsiz)) { 23120 /* 23121 * Ok less than or right at a MSS is 23122 * outstanding. The original FreeBSD stack would 23123 * have sent a FIN, which can speed things up for 23124 * a transactional application doing a MSG_WAITALL. 23125 * To speed things up since we do *not* send a FIN 23126 * if data is outstanding, we send a "challenge ack". 23127 * The idea behind that is instead of having to have 23128 * the peer wait for the delayed-ack timer to run off 23129 * we send an ack that makes the peer send us an ack. 23130 */ 23131 rack_send_ack_challange(rack); 23132 } 23133 goto just_return; 23134 } 23135 /* 23136 * Grab a header mbuf, attaching a copy of data to be transmitted, 23137 * and initialize the header from the template for sends on this 23138 * connection. 23139 */ 23140 hw_tls = tp->t_nic_ktls_xmit != 0; 23141 if (len) { 23142 uint32_t max_val; 23143 uint32_t moff; 23144 23145 if (pace_max_seg) 23146 max_val = pace_max_seg; 23147 else 23148 max_val = len; 23149 /* 23150 * We allow a limit on sending with hptsi. 23151 */ 23152 if (len > max_val) { 23153 mark = 7; 23154 len = max_val; 23155 } 23156 #ifdef INET6 23157 if (MHLEN < hdrlen + max_linkhdr) 23158 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 23159 else 23160 #endif 23161 m = m_gethdr(M_NOWAIT, MT_DATA); 23162 23163 if (m == NULL) { 23164 SOCKBUF_UNLOCK(sb); 23165 error = ENOBUFS; 23166 sack_rxmit = 0; 23167 goto out; 23168 } 23169 m->m_data += max_linkhdr; 23170 m->m_len = hdrlen; 23171 23172 /* 23173 * Start the m_copy functions from the closest mbuf to the 23174 * sb_offset in the socket buffer chain. 23175 */ 23176 mb = sbsndptr_noadv(sb, sb_offset, &moff); 23177 s_mb = mb; 23178 s_moff = moff; 23179 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 23180 m_copydata(mb, moff, (int)len, 23181 mtod(m, caddr_t)+hdrlen); 23182 /* 23183 * If we are not retransmitting advance the 23184 * sndptr to help remember the next place in 23185 * the sb. 23186 */ 23187 if (rsm == NULL) 23188 sbsndptr_adv(sb, mb, len); 23189 m->m_len += len; 23190 } else { 23191 struct sockbuf *msb; 23192 23193 /* 23194 * If we are not retransmitting pass in msb so 23195 * the socket buffer can be advanced. Otherwise 23196 * set it to NULL if its a retransmission since 23197 * we don't want to change the sb remembered 23198 * location. 23199 */ 23200 if (rsm == NULL) 23201 msb = sb; 23202 else 23203 msb = NULL; 23204 m->m_next = tcp_m_copym( 23205 mb, moff, &len, 23206 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 23207 ((rsm == NULL) ? hw_tls : 0) 23208 #ifdef NETFLIX_COPY_ARGS 23209 , &s_mb, &s_moff 23210 #endif 23211 ); 23212 if (len <= (tp->t_maxseg - optlen)) { 23213 /* 23214 * Must have ran out of mbufs for the copy 23215 * shorten it to no longer need tso. Lets 23216 * not put on sendalot since we are low on 23217 * mbufs. 23218 */ 23219 tso = 0; 23220 } 23221 if (m->m_next == NULL) { 23222 SOCKBUF_UNLOCK(sb); 23223 (void)m_free(m); 23224 error = ENOBUFS; 23225 sack_rxmit = 0; 23226 goto out; 23227 } 23228 } 23229 if (sack_rxmit) { 23230 if (rsm && (rsm->r_flags & RACK_TLP)) { 23231 /* 23232 * TLP should not count in retran count, but 23233 * in its own bin 23234 */ 23235 counter_u64_add(rack_tlp_retran, 1); 23236 counter_u64_add(rack_tlp_retran_bytes, len); 23237 } else { 23238 tp->t_sndrexmitpack++; 23239 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 23240 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 23241 } 23242 #ifdef STATS 23243 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 23244 len); 23245 #endif 23246 } else { 23247 KMOD_TCPSTAT_INC(tcps_sndpack); 23248 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 23249 #ifdef STATS 23250 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 23251 len); 23252 #endif 23253 } 23254 /* 23255 * If we're sending everything we've got, set PUSH. (This 23256 * will keep happy those implementations which only give 23257 * data to the user when a buffer fills or a PUSH comes in.) 23258 */ 23259 if (sb_offset + len == sbused(sb) && 23260 sbused(sb) && 23261 !(flags & TH_SYN)) { 23262 flags |= TH_PUSH; 23263 add_flag |= RACK_HAD_PUSH; 23264 } 23265 23266 SOCKBUF_UNLOCK(sb); 23267 } else { 23268 SOCKBUF_UNLOCK(sb); 23269 if (tp->t_flags & TF_ACKNOW) 23270 KMOD_TCPSTAT_INC(tcps_sndacks); 23271 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 23272 KMOD_TCPSTAT_INC(tcps_sndctrl); 23273 else 23274 KMOD_TCPSTAT_INC(tcps_sndwinup); 23275 23276 m = m_gethdr(M_NOWAIT, MT_DATA); 23277 if (m == NULL) { 23278 error = ENOBUFS; 23279 sack_rxmit = 0; 23280 goto out; 23281 } 23282 #ifdef INET6 23283 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 23284 MHLEN >= hdrlen) { 23285 M_ALIGN(m, hdrlen); 23286 } else 23287 #endif 23288 m->m_data += max_linkhdr; 23289 m->m_len = hdrlen; 23290 } 23291 SOCKBUF_UNLOCK_ASSERT(sb); 23292 m->m_pkthdr.rcvif = (struct ifnet *)0; 23293 #ifdef MAC 23294 mac_inpcb_create_mbuf(inp, m); 23295 #endif 23296 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 23297 #ifdef INET6 23298 if (isipv6) 23299 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 23300 else 23301 #endif /* INET6 */ 23302 #ifdef INET 23303 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 23304 #endif 23305 th = rack->r_ctl.fsb.th; 23306 udp = rack->r_ctl.fsb.udp; 23307 if (udp) { 23308 #ifdef INET6 23309 if (isipv6) 23310 ulen = hdrlen + len - sizeof(struct ip6_hdr); 23311 else 23312 #endif /* INET6 */ 23313 ulen = hdrlen + len - sizeof(struct ip); 23314 udp->uh_ulen = htons(ulen); 23315 } 23316 } else { 23317 #ifdef INET6 23318 if (isipv6) { 23319 ip6 = mtod(m, struct ip6_hdr *); 23320 if (tp->t_port) { 23321 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 23322 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 23323 udp->uh_dport = tp->t_port; 23324 ulen = hdrlen + len - sizeof(struct ip6_hdr); 23325 udp->uh_ulen = htons(ulen); 23326 th = (struct tcphdr *)(udp + 1); 23327 } else 23328 th = (struct tcphdr *)(ip6 + 1); 23329 tcpip_fillheaders(inp, tp->t_port, ip6, th); 23330 } else 23331 #endif /* INET6 */ 23332 { 23333 #ifdef INET 23334 ip = mtod(m, struct ip *); 23335 if (tp->t_port) { 23336 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 23337 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 23338 udp->uh_dport = tp->t_port; 23339 ulen = hdrlen + len - sizeof(struct ip); 23340 udp->uh_ulen = htons(ulen); 23341 th = (struct tcphdr *)(udp + 1); 23342 } else 23343 th = (struct tcphdr *)(ip + 1); 23344 tcpip_fillheaders(inp, tp->t_port, ip, th); 23345 #endif 23346 } 23347 } 23348 /* 23349 * If we are starting a connection, send ECN setup SYN packet. If we 23350 * are on a retransmit, we may resend those bits a number of times 23351 * as per RFC 3168. 23352 */ 23353 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 23354 flags |= tcp_ecn_output_syn_sent(tp); 23355 } 23356 /* Also handle parallel SYN for ECN */ 23357 if (TCPS_HAVERCVDSYN(tp->t_state) && 23358 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 23359 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 23360 if ((tp->t_state == TCPS_SYN_RECEIVED) && 23361 (tp->t_flags2 & TF2_ECN_SND_ECE)) 23362 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 23363 #ifdef INET6 23364 if (isipv6) { 23365 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 23366 ip6->ip6_flow |= htonl(ect << 20); 23367 } 23368 else 23369 #endif 23370 { 23371 #ifdef INET 23372 ip->ip_tos &= ~IPTOS_ECN_MASK; 23373 ip->ip_tos |= ect; 23374 #endif 23375 } 23376 } 23377 th->th_seq = htonl(rack_seq); 23378 th->th_ack = htonl(tp->rcv_nxt); 23379 tcp_set_flags(th, flags); 23380 /* 23381 * Calculate receive window. Don't shrink window, but avoid silly 23382 * window syndrome. 23383 * If a RST segment is sent, advertise a window of zero. 23384 */ 23385 if (flags & TH_RST) { 23386 recwin = 0; 23387 } else { 23388 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 23389 recwin < (long)segsiz) { 23390 recwin = 0; 23391 } 23392 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 23393 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 23394 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 23395 } 23396 23397 /* 23398 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 23399 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 23400 * handled in syncache. 23401 */ 23402 if (flags & TH_SYN) 23403 th->th_win = htons((u_short) 23404 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 23405 else { 23406 /* Avoid shrinking window with window scaling. */ 23407 recwin = roundup2(recwin, 1 << tp->rcv_scale); 23408 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 23409 } 23410 /* 23411 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 23412 * window. This may cause the remote transmitter to stall. This 23413 * flag tells soreceive() to disable delayed acknowledgements when 23414 * draining the buffer. This can occur if the receiver is 23415 * attempting to read more data than can be buffered prior to 23416 * transmitting on the connection. 23417 */ 23418 if (th->th_win == 0) { 23419 tp->t_sndzerowin++; 23420 tp->t_flags |= TF_RXWIN0SENT; 23421 } else 23422 tp->t_flags &= ~TF_RXWIN0SENT; 23423 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 23424 /* Now are we using fsb?, if so copy the template data to the mbuf */ 23425 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 23426 uint8_t *cpto; 23427 23428 cpto = mtod(m, uint8_t *); 23429 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 23430 /* 23431 * We have just copied in: 23432 * IP/IP6 23433 * <optional udphdr> 23434 * tcphdr (no options) 23435 * 23436 * We need to grab the correct pointers into the mbuf 23437 * for both the tcp header, and possibly the udp header (if tunneling). 23438 * We do this by using the offset in the copy buffer and adding it 23439 * to the mbuf base pointer (cpto). 23440 */ 23441 #ifdef INET6 23442 if (isipv6) 23443 ip6 = mtod(m, struct ip6_hdr *); 23444 else 23445 #endif /* INET6 */ 23446 #ifdef INET 23447 ip = mtod(m, struct ip *); 23448 #endif 23449 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 23450 /* If we have a udp header lets set it into the mbuf as well */ 23451 if (udp) 23452 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 23453 } 23454 if (optlen) { 23455 bcopy(opt, th + 1, optlen); 23456 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 23457 } 23458 /* 23459 * Put TCP length in extended header, and then checksum extended 23460 * header and data. 23461 */ 23462 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 23463 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 23464 if (to.to_flags & TOF_SIGNATURE) { 23465 /* 23466 * Calculate MD5 signature and put it into the place 23467 * determined before. 23468 * NOTE: since TCP options buffer doesn't point into 23469 * mbuf's data, calculate offset and use it. 23470 */ 23471 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 23472 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 23473 /* 23474 * Do not send segment if the calculation of MD5 23475 * digest has failed. 23476 */ 23477 goto out; 23478 } 23479 } 23480 #endif 23481 #ifdef INET6 23482 if (isipv6) { 23483 /* 23484 * ip6_plen is not need to be filled now, and will be filled 23485 * in ip6_output. 23486 */ 23487 if (tp->t_port) { 23488 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 23489 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 23490 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 23491 th->th_sum = htons(0); 23492 UDPSTAT_INC(udps_opackets); 23493 } else { 23494 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 23495 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 23496 th->th_sum = in6_cksum_pseudo(ip6, 23497 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 23498 0); 23499 } 23500 } 23501 #endif 23502 #if defined(INET6) && defined(INET) 23503 else 23504 #endif 23505 #ifdef INET 23506 { 23507 if (tp->t_port) { 23508 m->m_pkthdr.csum_flags = CSUM_UDP; 23509 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 23510 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 23511 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 23512 th->th_sum = htons(0); 23513 UDPSTAT_INC(udps_opackets); 23514 } else { 23515 m->m_pkthdr.csum_flags = CSUM_TCP; 23516 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 23517 th->th_sum = in_pseudo(ip->ip_src.s_addr, 23518 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 23519 IPPROTO_TCP + len + optlen)); 23520 } 23521 /* IP version must be set here for ipv4/ipv6 checking later */ 23522 KASSERT(ip->ip_v == IPVERSION, 23523 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 23524 } 23525 #endif 23526 /* 23527 * Enable TSO and specify the size of the segments. The TCP pseudo 23528 * header checksum is always provided. XXX: Fixme: This is currently 23529 * not the case for IPv6. 23530 */ 23531 if (tso) { 23532 /* 23533 * Here we must use t_maxseg and the optlen since 23534 * the optlen may include SACK's (or DSACK). 23535 */ 23536 KASSERT(len > tp->t_maxseg - optlen, 23537 ("%s: len <= tso_segsz", __func__)); 23538 m->m_pkthdr.csum_flags |= CSUM_TSO; 23539 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 23540 } 23541 KASSERT(len + hdrlen == m_length(m, NULL), 23542 ("%s: mbuf chain different than expected: %d + %u != %u", 23543 __func__, len, hdrlen, m_length(m, NULL))); 23544 23545 #ifdef TCP_HHOOK 23546 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 23547 hhook_run_tcp_est_out(tp, th, &to, len, tso); 23548 #endif 23549 if ((rack->r_ctl.crte != NULL) && 23550 (rack->rc_hw_nobuf == 0) && 23551 tcp_bblogging_on(tp)) { 23552 rack_log_queue_level(tp, rack, len, &tv, cts); 23553 } 23554 /* We're getting ready to send; log now. */ 23555 if (tcp_bblogging_on(rack->rc_tp)) { 23556 union tcp_log_stackspecific log; 23557 23558 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 23559 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 23560 if (rack->rack_no_prr) 23561 log.u_bbr.flex1 = 0; 23562 else 23563 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 23564 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 23565 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 23566 log.u_bbr.flex4 = orig_len; 23567 /* Save off the early/late values */ 23568 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 23569 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 23570 log.u_bbr.bw_inuse = rack_get_bw(rack); 23571 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 23572 log.u_bbr.flex8 = 0; 23573 if (rsm) { 23574 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 23575 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 23576 counter_u64_add(rack_collapsed_win_rxt, 1); 23577 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 23578 } 23579 if (doing_tlp) 23580 log.u_bbr.flex8 = 2; 23581 else 23582 log.u_bbr.flex8 = 1; 23583 } else { 23584 if (doing_tlp) 23585 log.u_bbr.flex8 = 3; 23586 } 23587 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 23588 log.u_bbr.flex7 = mark; 23589 log.u_bbr.flex7 <<= 8; 23590 log.u_bbr.flex7 |= pass; 23591 log.u_bbr.pkts_out = tp->t_maxseg; 23592 log.u_bbr.timeStamp = cts; 23593 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 23594 if (rsm && (rsm->r_rtr_cnt > 0)) { 23595 /* 23596 * When we have a retransmit we want to log the 23597 * burst at send and flight at send from before. 23598 */ 23599 log.u_bbr.flex5 = rsm->r_fas; 23600 log.u_bbr.bbr_substate = rsm->r_bas; 23601 } else { 23602 /* 23603 * New transmits we log in flex5 the inflight again as 23604 * well as the number of segments in our send in the 23605 * substate field. 23606 */ 23607 log.u_bbr.flex5 = log.u_bbr.inflight; 23608 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 23609 } 23610 log.u_bbr.lt_epoch = cwnd_to_use; 23611 log.u_bbr.delivered = sendalot; 23612 log.u_bbr.rttProp = (uint64_t)rsm; 23613 log.u_bbr.pkt_epoch = __LINE__; 23614 if (rsm) { 23615 log.u_bbr.delRate = rsm->r_flags; 23616 log.u_bbr.delRate <<= 31; 23617 log.u_bbr.delRate |= rack->r_must_retran; 23618 log.u_bbr.delRate <<= 1; 23619 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 23620 } else { 23621 log.u_bbr.delRate = rack->r_must_retran; 23622 log.u_bbr.delRate <<= 1; 23623 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 23624 } 23625 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 23626 len, &log, false, NULL, __func__, __LINE__, &tv); 23627 } else 23628 lgb = NULL; 23629 23630 /* 23631 * Fill in IP length and desired time to live and send to IP level. 23632 * There should be a better way to handle ttl and tos; we could keep 23633 * them in the template, but need a way to checksum without them. 23634 */ 23635 /* 23636 * m->m_pkthdr.len should have been set before cksum calcuration, 23637 * because in6_cksum() need it. 23638 */ 23639 #ifdef INET6 23640 if (isipv6) { 23641 /* 23642 * we separately set hoplimit for every segment, since the 23643 * user might want to change the value via setsockopt. Also, 23644 * desired default hop limit might be changed via Neighbor 23645 * Discovery. 23646 */ 23647 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 23648 23649 /* 23650 * Set the packet size here for the benefit of DTrace 23651 * probes. ip6_output() will set it properly; it's supposed 23652 * to include the option header lengths as well. 23653 */ 23654 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 23655 23656 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 23657 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 23658 else 23659 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 23660 23661 if (tp->t_state == TCPS_SYN_SENT) 23662 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 23663 23664 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 23665 /* TODO: IPv6 IP6TOS_ECT bit on */ 23666 error = ip6_output(m, 23667 inp->in6p_outputopts, 23668 &inp->inp_route6, 23669 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 23670 NULL, NULL, inp); 23671 23672 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 23673 mtu = inp->inp_route6.ro_nh->nh_mtu; 23674 } 23675 #endif /* INET6 */ 23676 #if defined(INET) && defined(INET6) 23677 else 23678 #endif 23679 #ifdef INET 23680 { 23681 ip->ip_len = htons(m->m_pkthdr.len); 23682 #ifdef INET6 23683 if (inp->inp_vflag & INP_IPV6PROTO) 23684 ip->ip_ttl = in6_selecthlim(inp, NULL); 23685 #endif /* INET6 */ 23686 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 23687 /* 23688 * If we do path MTU discovery, then we set DF on every 23689 * packet. This might not be the best thing to do according 23690 * to RFC3390 Section 2. However the tcp hostcache migitates 23691 * the problem so it affects only the first tcp connection 23692 * with a host. 23693 * 23694 * NB: Don't set DF on small MTU/MSS to have a safe 23695 * fallback. 23696 */ 23697 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 23698 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 23699 if (tp->t_port == 0 || len < V_tcp_minmss) { 23700 ip->ip_off |= htons(IP_DF); 23701 } 23702 } else { 23703 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 23704 } 23705 23706 if (tp->t_state == TCPS_SYN_SENT) 23707 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 23708 23709 TCP_PROBE5(send, NULL, tp, ip, tp, th); 23710 23711 error = ip_output(m, 23712 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 23713 inp->inp_options, 23714 #else 23715 NULL, 23716 #endif 23717 &inp->inp_route, 23718 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 23719 inp); 23720 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 23721 mtu = inp->inp_route.ro_nh->nh_mtu; 23722 } 23723 #endif /* INET */ 23724 if (lgb) { 23725 lgb->tlb_errno = error; 23726 lgb = NULL; 23727 } 23728 23729 out: 23730 /* 23731 * In transmit state, time the transmission and arrange for the 23732 * retransmit. In persist state, just set snd_max. 23733 */ 23734 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 23735 rack_to_usec_ts(&tv), 23736 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 23737 if (error == 0) { 23738 if (add_flag & RACK_IS_PCM) { 23739 /* We just launched a PCM */ 23740 /* rrs here log */ 23741 rack->pcm_in_progress = 1; 23742 rack->pcm_needed = 0; 23743 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 23744 } 23745 if (rsm == NULL) { 23746 if (rack->lt_bw_up == 0) { 23747 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 23748 rack->r_ctl.lt_seq = tp->snd_una; 23749 rack->lt_bw_up = 1; 23750 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 23751 /* 23752 * Need to record what we have since we are 23753 * approaching seq wrap. 23754 */ 23755 uint64_t tmark; 23756 23757 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 23758 rack->r_ctl.lt_seq = tp->snd_una; 23759 tmark = tcp_get_u64_usecs(&tv); 23760 if (tmark > rack->r_ctl.lt_timemark) { 23761 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 23762 rack->r_ctl.lt_timemark = tmark; 23763 } 23764 } 23765 } 23766 rack->forced_ack = 0; /* If we send something zap the FA flag */ 23767 counter_u64_add(rack_total_bytes, len); 23768 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 23769 if (rsm && doing_tlp) { 23770 rack->rc_last_sent_tlp_past_cumack = 0; 23771 rack->rc_last_sent_tlp_seq_valid = 1; 23772 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 23773 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 23774 } 23775 if (rack->rc_hw_nobuf) { 23776 rack->rc_hw_nobuf = 0; 23777 rack->r_ctl.rc_agg_delayed = 0; 23778 rack->r_early = 0; 23779 rack->r_late = 0; 23780 rack->r_ctl.rc_agg_early = 0; 23781 } 23782 if (rsm && (doing_tlp == 0)) { 23783 /* Set we retransmitted */ 23784 rack->rc_gp_saw_rec = 1; 23785 } else { 23786 if (cwnd_to_use > tp->snd_ssthresh) { 23787 /* Set we sent in CA */ 23788 rack->rc_gp_saw_ca = 1; 23789 } else { 23790 /* Set we sent in SS */ 23791 rack->rc_gp_saw_ss = 1; 23792 } 23793 } 23794 if (TCPS_HAVEESTABLISHED(tp->t_state) && 23795 (tp->t_flags & TF_SACK_PERMIT) && 23796 tp->rcv_numsacks > 0) 23797 tcp_clean_dsack_blocks(tp); 23798 tot_len_this_send += len; 23799 if (len == 0) { 23800 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 23801 } else { 23802 int idx; 23803 23804 idx = (len / segsiz) + 3; 23805 if (idx >= TCP_MSS_ACCT_ATIMER) 23806 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 23807 else 23808 counter_u64_add(rack_out_size[idx], 1); 23809 } 23810 } 23811 if ((rack->rack_no_prr == 0) && 23812 sub_from_prr && 23813 (error == 0)) { 23814 if (rack->r_ctl.rc_prr_sndcnt >= len) 23815 rack->r_ctl.rc_prr_sndcnt -= len; 23816 else 23817 rack->r_ctl.rc_prr_sndcnt = 0; 23818 } 23819 sub_from_prr = 0; 23820 if (doing_tlp) { 23821 /* Make sure the TLP is added */ 23822 add_flag |= RACK_TLP; 23823 } else if (rsm) { 23824 /* If its a resend without TLP then it must not have the flag */ 23825 rsm->r_flags &= ~RACK_TLP; 23826 } 23827 23828 23829 if ((error == 0) && 23830 (len > 0) && 23831 (tp->snd_una == tp->snd_max)) 23832 rack->r_ctl.rc_tlp_rxt_last_time = cts; 23833 23834 { 23835 /* 23836 * This block is not associated with the above error == 0 test. 23837 * It is used to advance snd_max if we have a new transmit. 23838 */ 23839 tcp_seq startseq = tp->snd_max; 23840 23841 23842 if (rsm && (doing_tlp == 0)) 23843 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 23844 if (error) 23845 /* We don't log or do anything with errors */ 23846 goto nomore; 23847 if (doing_tlp == 0) { 23848 if (rsm == NULL) { 23849 /* 23850 * Not a retransmission of some 23851 * sort, new data is going out so 23852 * clear our TLP count and flag. 23853 */ 23854 rack->rc_tlp_in_progress = 0; 23855 rack->r_ctl.rc_tlp_cnt_out = 0; 23856 } 23857 } else { 23858 /* 23859 * We have just sent a TLP, mark that it is true 23860 * and make sure our in progress is set so we 23861 * continue to check the count. 23862 */ 23863 rack->rc_tlp_in_progress = 1; 23864 rack->r_ctl.rc_tlp_cnt_out++; 23865 } 23866 /* 23867 * If we are retransmitting we are done, snd_max 23868 * does not get updated. 23869 */ 23870 if (sack_rxmit) 23871 goto nomore; 23872 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 23873 /* 23874 * Update the time we just added data since 23875 * nothing was outstanding. 23876 */ 23877 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 23878 tp->t_acktime = ticks; 23879 } 23880 /* 23881 * Now for special SYN/FIN handling. 23882 */ 23883 if (flags & (TH_SYN | TH_FIN)) { 23884 if ((flags & TH_SYN) && 23885 ((tp->t_flags & TF_SENTSYN) == 0)) { 23886 tp->snd_max++; 23887 tp->t_flags |= TF_SENTSYN; 23888 } 23889 if ((flags & TH_FIN) && 23890 ((tp->t_flags & TF_SENTFIN) == 0)) { 23891 tp->snd_max++; 23892 tp->t_flags |= TF_SENTFIN; 23893 } 23894 } 23895 tp->snd_max += len; 23896 if (rack->rc_new_rnd_needed) { 23897 rack_new_round_starts(tp, rack, tp->snd_max); 23898 } 23899 /* 23900 * Time this transmission if not a retransmission and 23901 * not currently timing anything. 23902 * This is only relevant in case of switching back to 23903 * the base stack. 23904 */ 23905 if (tp->t_rtttime == 0) { 23906 tp->t_rtttime = ticks; 23907 tp->t_rtseq = startseq; 23908 KMOD_TCPSTAT_INC(tcps_segstimed); 23909 } 23910 if (len && 23911 ((tp->t_flags & TF_GPUTINPROG) == 0)) 23912 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 23913 /* 23914 * If we are doing FO we need to update the mbuf position and subtract 23915 * this happens when the peer sends us duplicate information and 23916 * we thus want to send a DSACK. 23917 * 23918 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 23919 * turned off? If not then we are going to echo multiple DSACK blocks 23920 * out (with the TSO), which we should not be doing. 23921 */ 23922 if (rack->r_fast_output && len) { 23923 if (rack->r_ctl.fsb.left_to_send > len) 23924 rack->r_ctl.fsb.left_to_send -= len; 23925 else 23926 rack->r_ctl.fsb.left_to_send = 0; 23927 if (rack->r_ctl.fsb.left_to_send < segsiz) 23928 rack->r_fast_output = 0; 23929 if (rack->r_fast_output) { 23930 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 23931 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 23932 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 23933 } 23934 } 23935 if (rack_pcm_blast == 0) { 23936 if ((orig_len > len) && 23937 (add_flag & RACK_IS_PCM) && 23938 (len < pace_max_seg) && 23939 ((pace_max_seg - len) > segsiz)) { 23940 /* 23941 * We are doing a PCM measurement and we did 23942 * not get enough data in the TSO to meet the 23943 * burst requirement. 23944 */ 23945 uint32_t n_len; 23946 23947 n_len = (orig_len - len); 23948 orig_len -= len; 23949 pace_max_seg -= len; 23950 len = n_len; 23951 sb_offset = tp->snd_max - tp->snd_una; 23952 /* Re-lock for the next spin */ 23953 SOCKBUF_LOCK(sb); 23954 goto send; 23955 } 23956 } else { 23957 if ((orig_len > len) && 23958 (add_flag & RACK_IS_PCM) && 23959 ((orig_len - len) > segsiz)) { 23960 /* 23961 * We are doing a PCM measurement and we did 23962 * not get enough data in the TSO to meet the 23963 * burst requirement. 23964 */ 23965 uint32_t n_len; 23966 23967 n_len = (orig_len - len); 23968 orig_len -= len; 23969 len = n_len; 23970 sb_offset = tp->snd_max - tp->snd_una; 23971 /* Re-lock for the next spin */ 23972 SOCKBUF_LOCK(sb); 23973 goto send; 23974 } 23975 } 23976 } 23977 nomore: 23978 if (error) { 23979 rack->r_ctl.rc_agg_delayed = 0; 23980 rack->r_early = 0; 23981 rack->r_late = 0; 23982 rack->r_ctl.rc_agg_early = 0; 23983 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 23984 /* 23985 * Failures do not advance the seq counter above. For the 23986 * case of ENOBUFS we will fall out and retry in 1ms with 23987 * the hpts. Everything else will just have to retransmit 23988 * with the timer. 23989 * 23990 * In any case, we do not want to loop around for another 23991 * send without a good reason. 23992 */ 23993 sendalot = 0; 23994 switch (error) { 23995 case EPERM: 23996 case EACCES: 23997 tp->t_softerror = error; 23998 #ifdef TCP_ACCOUNTING 23999 crtsc = get_cyclecount(); 24000 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24001 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 24002 } 24003 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24004 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 24005 } 24006 sched_unpin(); 24007 #endif 24008 return (error); 24009 case ENOBUFS: 24010 /* 24011 * Pace us right away to retry in a some 24012 * time 24013 */ 24014 if (rack->r_ctl.crte != NULL) { 24015 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 24016 if (tcp_bblogging_on(rack->rc_tp)) 24017 rack_log_queue_level(tp, rack, len, &tv, cts); 24018 } else 24019 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 24020 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 24021 if (rack->rc_enobuf < 0x7f) 24022 rack->rc_enobuf++; 24023 if (slot < (10 * HPTS_USEC_IN_MSEC)) 24024 slot = 10 * HPTS_USEC_IN_MSEC; 24025 if (rack->r_ctl.crte != NULL) { 24026 counter_u64_add(rack_saw_enobuf_hw, 1); 24027 tcp_rl_log_enobuf(rack->r_ctl.crte); 24028 } 24029 counter_u64_add(rack_saw_enobuf, 1); 24030 goto enobufs; 24031 case EMSGSIZE: 24032 /* 24033 * For some reason the interface we used initially 24034 * to send segments changed to another or lowered 24035 * its MTU. If TSO was active we either got an 24036 * interface without TSO capabilits or TSO was 24037 * turned off. If we obtained mtu from ip_output() 24038 * then update it and try again. 24039 */ 24040 if (tso) 24041 tp->t_flags &= ~TF_TSO; 24042 if (mtu != 0) { 24043 int saved_mtu; 24044 24045 saved_mtu = tp->t_maxseg; 24046 tcp_mss_update(tp, -1, mtu, NULL, NULL); 24047 if (saved_mtu > tp->t_maxseg) { 24048 goto again; 24049 } 24050 } 24051 slot = 10 * HPTS_USEC_IN_MSEC; 24052 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 24053 #ifdef TCP_ACCOUNTING 24054 crtsc = get_cyclecount(); 24055 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24056 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 24057 } 24058 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24059 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 24060 } 24061 sched_unpin(); 24062 #endif 24063 return (error); 24064 case ENETUNREACH: 24065 counter_u64_add(rack_saw_enetunreach, 1); 24066 case EHOSTDOWN: 24067 case EHOSTUNREACH: 24068 case ENETDOWN: 24069 if (TCPS_HAVERCVDSYN(tp->t_state)) { 24070 tp->t_softerror = error; 24071 } 24072 /* FALLTHROUGH */ 24073 default: 24074 slot = 10 * HPTS_USEC_IN_MSEC; 24075 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 24076 #ifdef TCP_ACCOUNTING 24077 crtsc = get_cyclecount(); 24078 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24079 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 24080 } 24081 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24082 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 24083 } 24084 sched_unpin(); 24085 #endif 24086 return (error); 24087 } 24088 } else { 24089 rack->rc_enobuf = 0; 24090 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 24091 rack->r_ctl.retran_during_recovery += len; 24092 } 24093 KMOD_TCPSTAT_INC(tcps_sndtotal); 24094 24095 /* 24096 * Data sent (as far as we can tell). If this advertises a larger 24097 * window than any other segment, then remember the size of the 24098 * advertised window. Any pending ACK has now been sent. 24099 */ 24100 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 24101 tp->rcv_adv = tp->rcv_nxt + recwin; 24102 24103 tp->last_ack_sent = tp->rcv_nxt; 24104 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 24105 enobufs: 24106 if (sendalot) { 24107 /* Do we need to turn off sendalot? */ 24108 if (pace_max_seg && 24109 (tot_len_this_send >= pace_max_seg)) { 24110 /* We hit our max. */ 24111 sendalot = 0; 24112 } 24113 } 24114 if ((error == 0) && (flags & TH_FIN)) 24115 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 24116 if (flags & TH_RST) { 24117 /* 24118 * We don't send again after sending a RST. 24119 */ 24120 slot = 0; 24121 sendalot = 0; 24122 if (error == 0) 24123 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 24124 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 24125 /* 24126 * Get our pacing rate, if an error 24127 * occurred in sending (ENOBUF) we would 24128 * hit the else if with slot preset. Other 24129 * errors return. 24130 */ 24131 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 24132 } 24133 /* We have sent clear the flag */ 24134 rack->r_ent_rec_ns = 0; 24135 if (rack->r_must_retran) { 24136 if (rsm) { 24137 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 24138 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 24139 /* 24140 * We have retransmitted all. 24141 */ 24142 rack->r_must_retran = 0; 24143 rack->r_ctl.rc_out_at_rto = 0; 24144 } 24145 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 24146 /* 24147 * Sending new data will also kill 24148 * the loop. 24149 */ 24150 rack->r_must_retran = 0; 24151 rack->r_ctl.rc_out_at_rto = 0; 24152 } 24153 } 24154 rack->r_ctl.fsb.recwin = recwin; 24155 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 24156 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 24157 /* 24158 * We hit an RTO and now have past snd_max at the RTO 24159 * clear all the WAS flags. 24160 */ 24161 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 24162 } 24163 if (slot) { 24164 /* set the rack tcb into the slot N */ 24165 if ((error == 0) && 24166 rack_use_rfo && 24167 ((flags & (TH_SYN|TH_FIN)) == 0) && 24168 (rsm == NULL) && 24169 (ipoptlen == 0) && 24170 (tp->rcv_numsacks == 0) && 24171 (rack->rc_policer_detected == 0) && 24172 rack->r_fsb_inited && 24173 TCPS_HAVEESTABLISHED(tp->t_state) && 24174 ((IN_RECOVERY(tp->t_flags)) == 0) && 24175 (rack->r_must_retran == 0) && 24176 ((tp->t_flags & TF_NEEDFIN) == 0) && 24177 (len > 0) && (orig_len > 0) && 24178 (orig_len > len) && 24179 ((orig_len - len) >= segsiz) && 24180 ((optlen == 0) || 24181 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 24182 /* We can send at least one more MSS using our fsb */ 24183 rack_setup_fast_output(tp, rack, sb, len, orig_len, 24184 segsiz, pace_max_seg, hw_tls, flags); 24185 } else 24186 rack->r_fast_output = 0; 24187 rack_log_fsb(rack, tp, so, flags, 24188 ipoptlen, orig_len, len, error, 24189 (rsm == NULL), optlen, __LINE__, 2); 24190 } else if (sendalot) { 24191 int ret; 24192 24193 sack_rxmit = 0; 24194 if ((error == 0) && 24195 rack_use_rfo && 24196 ((flags & (TH_SYN|TH_FIN)) == 0) && 24197 (rsm == NULL) && 24198 (ipoptlen == 0) && 24199 (tp->rcv_numsacks == 0) && 24200 (rack->r_must_retran == 0) && 24201 rack->r_fsb_inited && 24202 TCPS_HAVEESTABLISHED(tp->t_state) && 24203 ((IN_RECOVERY(tp->t_flags)) == 0) && 24204 ((tp->t_flags & TF_NEEDFIN) == 0) && 24205 (len > 0) && (orig_len > 0) && 24206 (orig_len > len) && 24207 ((orig_len - len) >= segsiz) && 24208 ((optlen == 0) || 24209 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 24210 /* we can use fast_output for more */ 24211 rack_setup_fast_output(tp, rack, sb, len, orig_len, 24212 segsiz, pace_max_seg, hw_tls, flags); 24213 if (rack->r_fast_output) { 24214 error = 0; 24215 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 24216 if (ret >= 0) 24217 return (ret); 24218 else if (error) 24219 goto nomore; 24220 24221 } 24222 } 24223 goto again; 24224 } 24225 skip_all_send: 24226 /* Assure when we leave that snd_nxt will point to top */ 24227 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 24228 tp->snd_nxt = tp->snd_max; 24229 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 24230 #ifdef TCP_ACCOUNTING 24231 crtsc = get_cyclecount() - ts_val; 24232 if (tot_len_this_send) { 24233 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24234 tp->tcp_cnt_counters[SND_OUT_DATA]++; 24235 } 24236 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24237 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 24238 } 24239 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24240 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 24241 } 24242 } else { 24243 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24244 tp->tcp_cnt_counters[SND_OUT_ACK]++; 24245 } 24246 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 24247 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 24248 } 24249 } 24250 sched_unpin(); 24251 #endif 24252 if (error == ENOBUFS) 24253 error = 0; 24254 return (error); 24255 } 24256 24257 static void 24258 rack_update_seg(struct tcp_rack *rack) 24259 { 24260 uint32_t orig_val; 24261 24262 orig_val = rack->r_ctl.rc_pace_max_segs; 24263 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 24264 if (orig_val != rack->r_ctl.rc_pace_max_segs) 24265 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 24266 } 24267 24268 static void 24269 rack_mtu_change(struct tcpcb *tp) 24270 { 24271 /* 24272 * The MSS may have changed 24273 */ 24274 struct tcp_rack *rack; 24275 struct rack_sendmap *rsm; 24276 24277 rack = (struct tcp_rack *)tp->t_fb_ptr; 24278 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 24279 /* 24280 * The MTU has changed we need to resend everything 24281 * since all we have sent is lost. We first fix 24282 * up the mtu though. 24283 */ 24284 rack_set_pace_segments(tp, rack, __LINE__, NULL); 24285 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 24286 rack_remxt_tmr(tp); 24287 rack->r_fast_output = 0; 24288 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 24289 rack->r_ctl.rc_sacked); 24290 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 24291 rack->r_must_retran = 1; 24292 /* Mark all inflight to needing to be rxt'd */ 24293 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 24294 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 24295 } 24296 } 24297 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 24298 /* We don't use snd_nxt to retransmit */ 24299 tp->snd_nxt = tp->snd_max; 24300 } 24301 24302 static int 24303 rack_set_dgp(struct tcp_rack *rack) 24304 { 24305 if (rack->dgp_on == 1) 24306 return(0); 24307 if ((rack->use_fixed_rate == 1) && 24308 (rack->rc_always_pace == 1)) { 24309 /* 24310 * We are already pacing another 24311 * way. 24312 */ 24313 return (EBUSY); 24314 } 24315 if (rack->rc_always_pace == 1) { 24316 rack_remove_pacing(rack); 24317 } 24318 if (tcp_incr_dgp_pacing_cnt() == 0) 24319 return (ENOSPC); 24320 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 24321 rack->rc_fillcw_apply_discount = 0; 24322 rack->dgp_on = 1; 24323 rack->rc_always_pace = 1; 24324 rack->rc_pace_dnd = 1; 24325 rack->use_fixed_rate = 0; 24326 if (rack->gp_ready) 24327 rack_set_cc_pacing(rack); 24328 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24329 rack->rack_attempt_hdwr_pace = 0; 24330 /* rxt settings */ 24331 rack->full_size_rxt = 1; 24332 rack->shape_rxt_to_pacing_min = 0; 24333 /* cmpack=1 */ 24334 rack->r_use_cmp_ack = 1; 24335 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 24336 rack->r_use_cmp_ack) 24337 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 24338 /* scwnd=1 */ 24339 rack->rack_enable_scwnd = 1; 24340 /* dynamic=100 */ 24341 rack->rc_gp_dyn_mul = 1; 24342 /* gp_inc_ca */ 24343 rack->r_ctl.rack_per_of_gp_ca = 100; 24344 /* rrr_conf=3 */ 24345 rack->r_rr_config = 3; 24346 /* npush=2 */ 24347 rack->r_ctl.rc_no_push_at_mrtt = 2; 24348 /* fillcw=1 */ 24349 rack->rc_pace_to_cwnd = 1; 24350 rack->rc_pace_fill_if_rttin_range = 0; 24351 rack->rtt_limit_mul = 0; 24352 /* noprr=1 */ 24353 rack->rack_no_prr = 1; 24354 /* lscwnd=1 */ 24355 rack->r_limit_scw = 1; 24356 /* gp_inc_rec */ 24357 rack->r_ctl.rack_per_of_gp_rec = 90; 24358 return (0); 24359 } 24360 24361 static int 24362 rack_set_profile(struct tcp_rack *rack, int prof) 24363 { 24364 int err = EINVAL; 24365 if (prof == 1) { 24366 /* 24367 * Profile 1 is "standard" DGP. It ignores 24368 * client buffer level. 24369 */ 24370 err = rack_set_dgp(rack); 24371 if (err) 24372 return (err); 24373 } else if (prof == 6) { 24374 err = rack_set_dgp(rack); 24375 if (err) 24376 return (err); 24377 /* 24378 * Profile 6 tweaks DGP so that it will apply to 24379 * fill-cw the same settings that profile5 does 24380 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 24381 */ 24382 rack->rc_fillcw_apply_discount = 1; 24383 } else if (prof == 0) { 24384 /* This changes things back to the default settings */ 24385 if (rack->rc_always_pace == 1) { 24386 rack_remove_pacing(rack); 24387 } else { 24388 /* Make sure any stray flags are off */ 24389 rack->dgp_on = 0; 24390 rack->rc_hybrid_mode = 0; 24391 rack->use_fixed_rate = 0; 24392 } 24393 err = 0; 24394 if (rack_fill_cw_state) 24395 rack->rc_pace_to_cwnd = 1; 24396 else 24397 rack->rc_pace_to_cwnd = 0; 24398 24399 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 24400 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24401 rack->rc_always_pace = 1; 24402 if (rack->rack_hibeta) 24403 rack_set_cc_pacing(rack); 24404 } else 24405 rack->rc_always_pace = 0; 24406 if (rack_dsack_std_based & 0x1) { 24407 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 24408 rack->rc_rack_tmr_std_based = 1; 24409 } 24410 if (rack_dsack_std_based & 0x2) { 24411 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 24412 rack->rc_rack_use_dsack = 1; 24413 } 24414 if (rack_use_cmp_acks) 24415 rack->r_use_cmp_ack = 1; 24416 else 24417 rack->r_use_cmp_ack = 0; 24418 if (rack_disable_prr) 24419 rack->rack_no_prr = 1; 24420 else 24421 rack->rack_no_prr = 0; 24422 if (rack_gp_no_rec_chg) 24423 rack->rc_gp_no_rec_chg = 1; 24424 else 24425 rack->rc_gp_no_rec_chg = 0; 24426 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 24427 rack->r_mbuf_queue = 1; 24428 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 24429 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 24430 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24431 } else { 24432 rack->r_mbuf_queue = 0; 24433 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24434 } 24435 if (rack_enable_shared_cwnd) 24436 rack->rack_enable_scwnd = 1; 24437 else 24438 rack->rack_enable_scwnd = 0; 24439 if (rack_do_dyn_mul) { 24440 /* When dynamic adjustment is on CA needs to start at 100% */ 24441 rack->rc_gp_dyn_mul = 1; 24442 if (rack_do_dyn_mul >= 100) 24443 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 24444 } else { 24445 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 24446 rack->rc_gp_dyn_mul = 0; 24447 } 24448 rack->r_rr_config = 0; 24449 rack->r_ctl.rc_no_push_at_mrtt = 0; 24450 rack->rc_pace_fill_if_rttin_range = 0; 24451 rack->rtt_limit_mul = 0; 24452 24453 if (rack_enable_hw_pacing) 24454 rack->rack_hdw_pace_ena = 1; 24455 else 24456 rack->rack_hdw_pace_ena = 0; 24457 if (rack_disable_prr) 24458 rack->rack_no_prr = 1; 24459 else 24460 rack->rack_no_prr = 0; 24461 if (rack_limits_scwnd) 24462 rack->r_limit_scw = 1; 24463 else 24464 rack->r_limit_scw = 0; 24465 rack_init_retransmit_value(rack, rack_rxt_controls); 24466 err = 0; 24467 } 24468 return (err); 24469 } 24470 24471 static int 24472 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 24473 { 24474 struct deferred_opt_list *dol; 24475 24476 dol = malloc(sizeof(struct deferred_opt_list), 24477 M_TCPDO, M_NOWAIT|M_ZERO); 24478 if (dol == NULL) { 24479 /* 24480 * No space yikes -- fail out.. 24481 */ 24482 return (0); 24483 } 24484 dol->optname = sopt_name; 24485 dol->optval = loptval; 24486 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 24487 return (1); 24488 } 24489 24490 static int 24491 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 24492 { 24493 #ifdef TCP_REQUEST_TRK 24494 struct tcp_sendfile_track *sft; 24495 struct timeval tv; 24496 tcp_seq seq; 24497 int err; 24498 24499 microuptime(&tv); 24500 24501 /* Make sure no fixed rate is on */ 24502 rack->use_fixed_rate = 0; 24503 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 24504 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 24505 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 24506 /* Now allocate or find our entry that will have these settings */ 24507 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 24508 if (sft == NULL) { 24509 rack->rc_tp->tcp_hybrid_error++; 24510 /* no space, where would it have gone? */ 24511 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 24512 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 24513 return (ENOSPC); 24514 } 24515 /* mask our internal flags */ 24516 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 24517 /* The seq will be snd_una + everything in the buffer */ 24518 seq = sft->start_seq; 24519 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 24520 /* Disabling hybrid pacing */ 24521 if (rack->rc_hybrid_mode) { 24522 rack_set_profile(rack, 0); 24523 rack->rc_tp->tcp_hybrid_stop++; 24524 } 24525 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 24526 return (0); 24527 } 24528 if (rack->dgp_on == 0) { 24529 /* 24530 * If we have not yet turned DGP on, do so 24531 * now setting pure DGP mode, no buffer level 24532 * response. 24533 */ 24534 if ((err = rack_set_profile(rack, 1)) != 0){ 24535 /* Failed to turn pacing on */ 24536 rack->rc_tp->tcp_hybrid_error++; 24537 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 24538 return (err); 24539 } 24540 } 24541 /* 24542 * Now we must switch to hybrid mode as well which also 24543 * means moving to regular pacing. 24544 */ 24545 if (rack->rc_hybrid_mode == 0) { 24546 /* First time */ 24547 if (tcp_can_enable_pacing()) { 24548 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24549 rack->rc_hybrid_mode = 1; 24550 } else { 24551 return (ENOSPC); 24552 } 24553 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 24554 /* 24555 * This should be true. 24556 */ 24557 tcp_dec_dgp_pacing_cnt(); 24558 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 24559 } 24560 } 24561 /* Now set in our flags */ 24562 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 24563 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 24564 sft->cspr = hybrid->cspr; 24565 else 24566 sft->cspr = 0; 24567 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 24568 sft->hint_maxseg = hybrid->hint_maxseg; 24569 else 24570 sft->hint_maxseg = 0; 24571 rack->rc_tp->tcp_hybrid_start++; 24572 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 24573 return (0); 24574 #else 24575 return (ENOTSUP); 24576 #endif 24577 } 24578 24579 static int 24580 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 24581 { 24582 /* 24583 * Gather rack specific information. 24584 */ 24585 struct tcp_rack *rack; 24586 24587 rack = (struct tcp_rack *)tp->t_fb_ptr; 24588 /* We pulled a SSI info log out what was there */ 24589 policer_detection_log(rack, rack->rc_highly_buffered, 0, 0, 0, 20); 24590 if (rack->policer_detect_on) { 24591 si->policer_detection_enabled = 1; 24592 if (rack->rc_policer_detected) { 24593 si->policer_detected = 1; 24594 si->policer_bucket_size = rack->r_ctl.policer_bucket_size; 24595 si->policer_last_bw = rack->r_ctl.policer_bw; 24596 } else { 24597 si->policer_detected = 0; 24598 si->policer_bucket_size = 0; 24599 si->policer_last_bw = 0; 24600 } 24601 si->current_round = rack->r_ctl.current_round; 24602 si->highly_buffered = rack->rc_highly_buffered; 24603 } 24604 si->bytes_transmitted = tp->t_sndbytes; 24605 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 24606 return (0); 24607 } 24608 24609 static int 24610 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 24611 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 24612 24613 { 24614 struct epoch_tracker et; 24615 struct sockopt sopt; 24616 struct cc_newreno_opts opt; 24617 uint64_t val; 24618 int error = 0; 24619 uint16_t ca, ss; 24620 24621 switch (sopt_name) { 24622 case TCP_RACK_SET_RXT_OPTIONS: 24623 if ((optval >= 0) && (optval <= 2)) { 24624 rack_init_retransmit_value(rack, optval); 24625 } else { 24626 /* 24627 * You must send in 0, 1 or 2 all else is 24628 * invalid. 24629 */ 24630 error = EINVAL; 24631 } 24632 break; 24633 case TCP_RACK_DSACK_OPT: 24634 RACK_OPTS_INC(tcp_rack_dsack_opt); 24635 if (optval & 0x1) { 24636 rack->rc_rack_tmr_std_based = 1; 24637 } else { 24638 rack->rc_rack_tmr_std_based = 0; 24639 } 24640 if (optval & 0x2) { 24641 rack->rc_rack_use_dsack = 1; 24642 } else { 24643 rack->rc_rack_use_dsack = 0; 24644 } 24645 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 24646 break; 24647 case TCP_RACK_PACING_DIVISOR: 24648 RACK_OPTS_INC(tcp_rack_pacing_divisor); 24649 if (optval == 0) { 24650 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 24651 } else { 24652 if (optval < RL_MIN_DIVISOR) 24653 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 24654 else 24655 rack->r_ctl.pace_len_divisor = optval; 24656 } 24657 break; 24658 case TCP_RACK_HI_BETA: 24659 RACK_OPTS_INC(tcp_rack_hi_beta); 24660 if (optval > 0) { 24661 rack->rack_hibeta = 1; 24662 if ((optval >= 50) && 24663 (optval <= 100)) { 24664 /* 24665 * User wants to set a custom beta. 24666 */ 24667 rack->r_ctl.saved_hibeta = optval; 24668 if (rack->rc_pacing_cc_set) 24669 rack_undo_cc_pacing(rack); 24670 rack->r_ctl.rc_saved_beta.beta = optval; 24671 } 24672 if (rack->rc_pacing_cc_set == 0) 24673 rack_set_cc_pacing(rack); 24674 } else { 24675 rack->rack_hibeta = 0; 24676 if (rack->rc_pacing_cc_set) 24677 rack_undo_cc_pacing(rack); 24678 } 24679 break; 24680 case TCP_RACK_PACING_BETA: 24681 error = EINVAL; 24682 break; 24683 case TCP_RACK_TIMER_SLOP: 24684 RACK_OPTS_INC(tcp_rack_timer_slop); 24685 rack->r_ctl.timer_slop = optval; 24686 if (rack->rc_tp->t_srtt) { 24687 /* 24688 * If we have an SRTT lets update t_rxtcur 24689 * to have the new slop. 24690 */ 24691 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 24692 rack_rto_min, rack_rto_max, 24693 rack->r_ctl.timer_slop); 24694 } 24695 break; 24696 case TCP_RACK_PACING_BETA_ECN: 24697 RACK_OPTS_INC(tcp_rack_beta_ecn); 24698 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 24699 /* This only works for newreno. */ 24700 error = EINVAL; 24701 break; 24702 } 24703 if (rack->rc_pacing_cc_set) { 24704 /* 24705 * Set them into the real CC module 24706 * whats in the rack pcb is the old values 24707 * to be used on restoral/ 24708 */ 24709 sopt.sopt_dir = SOPT_SET; 24710 opt.name = CC_NEWRENO_BETA_ECN; 24711 opt.val = optval; 24712 if (CC_ALGO(tp)->ctl_output != NULL) 24713 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 24714 else 24715 error = ENOENT; 24716 } else { 24717 /* 24718 * Not pacing yet so set it into our local 24719 * rack pcb storage. 24720 */ 24721 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 24722 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 24723 } 24724 break; 24725 case TCP_DEFER_OPTIONS: 24726 RACK_OPTS_INC(tcp_defer_opt); 24727 if (optval) { 24728 if (rack->gp_ready) { 24729 /* Too late */ 24730 error = EINVAL; 24731 break; 24732 } 24733 rack->defer_options = 1; 24734 } else 24735 rack->defer_options = 0; 24736 break; 24737 case TCP_RACK_MEASURE_CNT: 24738 RACK_OPTS_INC(tcp_rack_measure_cnt); 24739 if (optval && (optval <= 0xff)) { 24740 rack->r_ctl.req_measurements = optval; 24741 } else 24742 error = EINVAL; 24743 break; 24744 case TCP_REC_ABC_VAL: 24745 RACK_OPTS_INC(tcp_rec_abc_val); 24746 if (optval > 0) 24747 rack->r_use_labc_for_rec = 1; 24748 else 24749 rack->r_use_labc_for_rec = 0; 24750 break; 24751 case TCP_RACK_ABC_VAL: 24752 RACK_OPTS_INC(tcp_rack_abc_val); 24753 if ((optval > 0) && (optval < 255)) 24754 rack->rc_labc = optval; 24755 else 24756 error = EINVAL; 24757 break; 24758 case TCP_HDWR_UP_ONLY: 24759 RACK_OPTS_INC(tcp_pacing_up_only); 24760 if (optval) 24761 rack->r_up_only = 1; 24762 else 24763 rack->r_up_only = 0; 24764 break; 24765 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24766 RACK_OPTS_INC(tcp_fillcw_rate_cap); 24767 rack->r_ctl.fillcw_cap = loptval; 24768 break; 24769 case TCP_PACING_RATE_CAP: 24770 RACK_OPTS_INC(tcp_pacing_rate_cap); 24771 if ((rack->dgp_on == 1) && 24772 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 24773 /* 24774 * If we are doing DGP we need to switch 24775 * to using the pacing limit. 24776 */ 24777 if (tcp_can_enable_pacing() == 0) { 24778 error = ENOSPC; 24779 break; 24780 } 24781 /* 24782 * Now change up the flags and counts to be correct. 24783 */ 24784 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24785 tcp_dec_dgp_pacing_cnt(); 24786 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 24787 } 24788 rack->r_ctl.bw_rate_cap = loptval; 24789 break; 24790 case TCP_HYBRID_PACING: 24791 if (hybrid == NULL) { 24792 error = EINVAL; 24793 break; 24794 } 24795 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 24796 error = EPERM; 24797 break; 24798 } 24799 error = process_hybrid_pacing(rack, hybrid); 24800 break; 24801 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24802 if (optval) 24803 rack->r_ctl.side_chan_dis_mask = optval; 24804 else 24805 rack->r_ctl.side_chan_dis_mask = 0; 24806 break; 24807 case TCP_RACK_PROFILE: 24808 RACK_OPTS_INC(tcp_profile); 24809 error = rack_set_profile(rack, optval); 24810 break; 24811 case TCP_USE_CMP_ACKS: 24812 RACK_OPTS_INC(tcp_use_cmp_acks); 24813 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 24814 /* You can't turn it off once its on! */ 24815 error = EINVAL; 24816 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 24817 rack->r_use_cmp_ack = 1; 24818 rack->r_mbuf_queue = 1; 24819 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24820 } 24821 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 24822 tp->t_flags2 |= TF2_MBUF_ACKCMP; 24823 break; 24824 case TCP_SHARED_CWND_TIME_LIMIT: 24825 RACK_OPTS_INC(tcp_lscwnd); 24826 if (optval) 24827 rack->r_limit_scw = 1; 24828 else 24829 rack->r_limit_scw = 0; 24830 break; 24831 case TCP_RACK_DGP_IN_REC: 24832 error = EINVAL; 24833 break; 24834 case TCP_POLICER_DETECT: /* URL:pol_det */ 24835 RACK_OPTS_INC(tcp_pol_detect); 24836 rack_translate_policer_detect(rack, optval); 24837 break; 24838 case TCP_POLICER_MSS: 24839 RACK_OPTS_INC(tcp_pol_mss); 24840 rack->r_ctl.policer_del_mss = (uint8_t)optval; 24841 if (optval & 0x00000100) { 24842 /* 24843 * Value is setup like so: 24844 * VVVV VVVV VVVV VVVV VVVV VVAI MMMM MMMM 24845 * Where MMMM MMMM is MSS setting 24846 * I (9th bit) is the Postive value that 24847 * says it is being set (if its 0 then the 24848 * upper bits 11 - 32 have no meaning. 24849 * This allows setting it off with 24850 * 0x000001MM. 24851 * 24852 * The 10th bit is used to turn on the 24853 * alternate median (not the expanded one). 24854 * 24855 */ 24856 rack->r_ctl.pol_bw_comp = (optval >> 10); 24857 } 24858 if (optval & 0x00000200) { 24859 rack->r_ctl.policer_alt_median = 1; 24860 } else { 24861 rack->r_ctl.policer_alt_median = 0; 24862 } 24863 break; 24864 case TCP_RACK_PACE_TO_FILL: 24865 RACK_OPTS_INC(tcp_fillcw); 24866 if (optval == 0) 24867 rack->rc_pace_to_cwnd = 0; 24868 else { 24869 rack->rc_pace_to_cwnd = 1; 24870 } 24871 if ((optval >= rack_gp_rtt_maxmul) && 24872 rack_gp_rtt_maxmul && 24873 (optval < 0xf)) { 24874 rack->rc_pace_fill_if_rttin_range = 1; 24875 rack->rtt_limit_mul = optval; 24876 } else { 24877 rack->rc_pace_fill_if_rttin_range = 0; 24878 rack->rtt_limit_mul = 0; 24879 } 24880 break; 24881 case TCP_RACK_NO_PUSH_AT_MAX: 24882 RACK_OPTS_INC(tcp_npush); 24883 if (optval == 0) 24884 rack->r_ctl.rc_no_push_at_mrtt = 0; 24885 else if (optval < 0xff) 24886 rack->r_ctl.rc_no_push_at_mrtt = optval; 24887 else 24888 error = EINVAL; 24889 break; 24890 case TCP_SHARED_CWND_ENABLE: 24891 RACK_OPTS_INC(tcp_rack_scwnd); 24892 if (optval == 0) 24893 rack->rack_enable_scwnd = 0; 24894 else 24895 rack->rack_enable_scwnd = 1; 24896 break; 24897 case TCP_RACK_MBUF_QUEUE: 24898 /* Now do we use the LRO mbuf-queue feature */ 24899 RACK_OPTS_INC(tcp_rack_mbufq); 24900 if (optval || rack->r_use_cmp_ack) 24901 rack->r_mbuf_queue = 1; 24902 else 24903 rack->r_mbuf_queue = 0; 24904 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 24905 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 24906 else 24907 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 24908 break; 24909 case TCP_RACK_NONRXT_CFG_RATE: 24910 RACK_OPTS_INC(tcp_rack_cfg_rate); 24911 if (optval == 0) 24912 rack->rack_rec_nonrxt_use_cr = 0; 24913 else 24914 rack->rack_rec_nonrxt_use_cr = 1; 24915 break; 24916 case TCP_NO_PRR: 24917 RACK_OPTS_INC(tcp_rack_noprr); 24918 if (optval == 0) 24919 rack->rack_no_prr = 0; 24920 else if (optval == 1) 24921 rack->rack_no_prr = 1; 24922 else if (optval == 2) 24923 rack->no_prr_addback = 1; 24924 else 24925 error = EINVAL; 24926 break; 24927 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24928 if (optval > 0) 24929 rack->cspr_is_fcc = 1; 24930 else 24931 rack->cspr_is_fcc = 0; 24932 break; 24933 case TCP_TIMELY_DYN_ADJ: 24934 RACK_OPTS_INC(tcp_timely_dyn); 24935 if (optval == 0) 24936 rack->rc_gp_dyn_mul = 0; 24937 else { 24938 rack->rc_gp_dyn_mul = 1; 24939 if (optval >= 100) { 24940 /* 24941 * If the user sets something 100 or more 24942 * its the gp_ca value. 24943 */ 24944 rack->r_ctl.rack_per_of_gp_ca = optval; 24945 } 24946 } 24947 break; 24948 case TCP_RACK_DO_DETECTION: 24949 RACK_OPTS_INC(tcp_rack_do_detection); 24950 if (optval == 0) 24951 rack->do_detection = 0; 24952 else 24953 rack->do_detection = 1; 24954 break; 24955 case TCP_RACK_TLP_USE: 24956 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 24957 error = EINVAL; 24958 break; 24959 } 24960 RACK_OPTS_INC(tcp_tlp_use); 24961 rack->rack_tlp_threshold_use = optval; 24962 break; 24963 case TCP_RACK_TLP_REDUCE: 24964 /* RACK TLP cwnd reduction (bool) */ 24965 RACK_OPTS_INC(tcp_rack_tlp_reduce); 24966 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 24967 break; 24968 /* Pacing related ones */ 24969 case TCP_RACK_PACE_ALWAYS: 24970 /* 24971 * zero is old rack method, 1 is new 24972 * method using a pacing rate. 24973 */ 24974 RACK_OPTS_INC(tcp_rack_pace_always); 24975 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 24976 error = EPERM; 24977 break; 24978 } 24979 if (optval > 0) { 24980 if (rack->rc_always_pace) { 24981 error = EALREADY; 24982 break; 24983 } else if (tcp_can_enable_pacing()) { 24984 rack->r_ctl.pacing_method |= RACK_REG_PACING; 24985 rack->rc_always_pace = 1; 24986 if (rack->rack_hibeta) 24987 rack_set_cc_pacing(rack); 24988 } 24989 else { 24990 error = ENOSPC; 24991 break; 24992 } 24993 } else { 24994 if (rack->rc_always_pace == 1) { 24995 rack_remove_pacing(rack); 24996 } 24997 } 24998 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 24999 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 25000 else 25001 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 25002 /* A rate may be set irate or other, if so set seg size */ 25003 rack_update_seg(rack); 25004 break; 25005 case TCP_BBR_RACK_INIT_RATE: 25006 RACK_OPTS_INC(tcp_initial_rate); 25007 val = optval; 25008 /* Change from kbits per second to bytes per second */ 25009 val *= 1000; 25010 val /= 8; 25011 rack->r_ctl.init_rate = val; 25012 if (rack->rc_always_pace) 25013 rack_update_seg(rack); 25014 break; 25015 case TCP_BBR_IWINTSO: 25016 error = EINVAL; 25017 break; 25018 case TCP_RACK_FORCE_MSEG: 25019 RACK_OPTS_INC(tcp_rack_force_max_seg); 25020 if (optval) 25021 rack->rc_force_max_seg = 1; 25022 else 25023 rack->rc_force_max_seg = 0; 25024 break; 25025 case TCP_RACK_PACE_MIN_SEG: 25026 RACK_OPTS_INC(tcp_rack_min_seg); 25027 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 25028 rack_set_pace_segments(tp, rack, __LINE__, NULL); 25029 break; 25030 case TCP_RACK_PACE_MAX_SEG: 25031 /* Max segments size in a pace in bytes */ 25032 RACK_OPTS_INC(tcp_rack_max_seg); 25033 if ((rack->dgp_on == 1) && 25034 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 25035 /* 25036 * If we set a max-seg and are doing DGP then 25037 * we now fall under the pacing limits not the 25038 * DGP ones. 25039 */ 25040 if (tcp_can_enable_pacing() == 0) { 25041 error = ENOSPC; 25042 break; 25043 } 25044 /* 25045 * Now change up the flags and counts to be correct. 25046 */ 25047 rack->r_ctl.pacing_method |= RACK_REG_PACING; 25048 tcp_dec_dgp_pacing_cnt(); 25049 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 25050 } 25051 if (optval <= MAX_USER_SET_SEG) 25052 rack->rc_user_set_max_segs = optval; 25053 else 25054 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 25055 rack_set_pace_segments(tp, rack, __LINE__, NULL); 25056 break; 25057 case TCP_RACK_PACE_RATE_REC: 25058 /* Set the fixed pacing rate in Bytes per second ca */ 25059 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 25060 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 25061 error = EPERM; 25062 break; 25063 } 25064 if (rack->dgp_on) { 25065 /* 25066 * We are already pacing another 25067 * way. 25068 */ 25069 error = EBUSY; 25070 break; 25071 } 25072 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 25073 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 25074 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 25075 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 25076 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 25077 rack->use_fixed_rate = 1; 25078 if (rack->rack_hibeta) 25079 rack_set_cc_pacing(rack); 25080 rack_log_pacing_delay_calc(rack, 25081 rack->r_ctl.rc_fixed_pacing_rate_ss, 25082 rack->r_ctl.rc_fixed_pacing_rate_ca, 25083 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 25084 __LINE__, NULL,0); 25085 break; 25086 25087 case TCP_RACK_PACE_RATE_SS: 25088 /* Set the fixed pacing rate in Bytes per second ca */ 25089 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 25090 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 25091 error = EPERM; 25092 break; 25093 } 25094 if (rack->dgp_on) { 25095 /* 25096 * We are already pacing another 25097 * way. 25098 */ 25099 error = EBUSY; 25100 break; 25101 } 25102 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 25103 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 25104 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 25105 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 25106 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 25107 rack->use_fixed_rate = 1; 25108 if (rack->rack_hibeta) 25109 rack_set_cc_pacing(rack); 25110 rack_log_pacing_delay_calc(rack, 25111 rack->r_ctl.rc_fixed_pacing_rate_ss, 25112 rack->r_ctl.rc_fixed_pacing_rate_ca, 25113 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 25114 __LINE__, NULL, 0); 25115 break; 25116 25117 case TCP_RACK_PACE_RATE_CA: 25118 /* Set the fixed pacing rate in Bytes per second ca */ 25119 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 25120 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 25121 error = EPERM; 25122 break; 25123 } 25124 if (rack->dgp_on) { 25125 /* 25126 * We are already pacing another 25127 * way. 25128 */ 25129 error = EBUSY; 25130 break; 25131 } 25132 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 25133 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 25134 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 25135 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 25136 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 25137 rack->use_fixed_rate = 1; 25138 if (rack->rack_hibeta) 25139 rack_set_cc_pacing(rack); 25140 rack_log_pacing_delay_calc(rack, 25141 rack->r_ctl.rc_fixed_pacing_rate_ss, 25142 rack->r_ctl.rc_fixed_pacing_rate_ca, 25143 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 25144 __LINE__, NULL, 0); 25145 break; 25146 case TCP_RACK_GP_INCREASE_REC: 25147 RACK_OPTS_INC(tcp_gp_inc_rec); 25148 rack->r_ctl.rack_per_of_gp_rec = optval; 25149 rack_log_pacing_delay_calc(rack, 25150 rack->r_ctl.rack_per_of_gp_ss, 25151 rack->r_ctl.rack_per_of_gp_ca, 25152 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 25153 __LINE__, NULL, 0); 25154 break; 25155 case TCP_RACK_GP_INCREASE_CA: 25156 RACK_OPTS_INC(tcp_gp_inc_ca); 25157 ca = optval; 25158 if (ca < 100) { 25159 /* 25160 * We don't allow any reduction 25161 * over the GP b/w. 25162 */ 25163 error = EINVAL; 25164 break; 25165 } 25166 rack->r_ctl.rack_per_of_gp_ca = ca; 25167 rack_log_pacing_delay_calc(rack, 25168 rack->r_ctl.rack_per_of_gp_ss, 25169 rack->r_ctl.rack_per_of_gp_ca, 25170 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 25171 __LINE__, NULL, 0); 25172 break; 25173 case TCP_RACK_GP_INCREASE_SS: 25174 RACK_OPTS_INC(tcp_gp_inc_ss); 25175 ss = optval; 25176 if (ss < 100) { 25177 /* 25178 * We don't allow any reduction 25179 * over the GP b/w. 25180 */ 25181 error = EINVAL; 25182 break; 25183 } 25184 rack->r_ctl.rack_per_of_gp_ss = ss; 25185 rack_log_pacing_delay_calc(rack, 25186 rack->r_ctl.rack_per_of_gp_ss, 25187 rack->r_ctl.rack_per_of_gp_ca, 25188 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 25189 __LINE__, NULL, 0); 25190 break; 25191 case TCP_RACK_RR_CONF: 25192 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 25193 if (optval && optval <= 3) 25194 rack->r_rr_config = optval; 25195 else 25196 rack->r_rr_config = 0; 25197 break; 25198 case TCP_PACING_DND: /* URL:dnd */ 25199 if (optval > 0) 25200 rack->rc_pace_dnd = 1; 25201 else 25202 rack->rc_pace_dnd = 0; 25203 break; 25204 case TCP_HDWR_RATE_CAP: 25205 RACK_OPTS_INC(tcp_hdwr_rate_cap); 25206 if (optval) { 25207 if (rack->r_rack_hw_rate_caps == 0) 25208 rack->r_rack_hw_rate_caps = 1; 25209 else 25210 error = EALREADY; 25211 } else { 25212 rack->r_rack_hw_rate_caps = 0; 25213 } 25214 break; 25215 case TCP_DGP_UPPER_BOUNDS: 25216 { 25217 uint8_t val; 25218 val = optval & 0x0000ff; 25219 rack->r_ctl.rack_per_upper_bound_ca = val; 25220 val = (optval >> 16) & 0x0000ff; 25221 rack->r_ctl.rack_per_upper_bound_ss = val; 25222 break; 25223 } 25224 case TCP_SS_EEXIT: /* URL:eexit */ 25225 if (optval > 0) { 25226 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 25227 if (optval & 0x10000) { 25228 rack->r_ctl.gate_to_fs = 1; 25229 } else { 25230 rack->r_ctl.gate_to_fs = 0; 25231 } 25232 if (optval & 0x20000) { 25233 rack->r_ctl.use_gp_not_last = 1; 25234 } else { 25235 rack->r_ctl.use_gp_not_last = 0; 25236 } 25237 if (optval & 0xfffc0000) { 25238 uint32_t v; 25239 25240 v = (optval >> 18) & 0x00003fff; 25241 if (v >= 1000) 25242 rack->r_ctl.gp_gain_req = v; 25243 } 25244 } else { 25245 /* We do not do ss early exit at all */ 25246 rack->rc_initial_ss_comp = 1; 25247 rack->r_ctl.gp_rnd_thresh = 0; 25248 } 25249 break; 25250 case TCP_RACK_SPLIT_LIMIT: 25251 RACK_OPTS_INC(tcp_split_limit); 25252 rack->r_ctl.rc_split_limit = optval; 25253 break; 25254 case TCP_BBR_HDWR_PACE: 25255 RACK_OPTS_INC(tcp_hdwr_pacing); 25256 if (optval){ 25257 if (rack->rack_hdrw_pacing == 0) { 25258 rack->rack_hdw_pace_ena = 1; 25259 rack->rack_attempt_hdwr_pace = 0; 25260 } else 25261 error = EALREADY; 25262 } else { 25263 rack->rack_hdw_pace_ena = 0; 25264 #ifdef RATELIMIT 25265 if (rack->r_ctl.crte != NULL) { 25266 rack->rack_hdrw_pacing = 0; 25267 rack->rack_attempt_hdwr_pace = 0; 25268 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 25269 rack->r_ctl.crte = NULL; 25270 } 25271 #endif 25272 } 25273 break; 25274 /* End Pacing related ones */ 25275 case TCP_RACK_PRR_SENDALOT: 25276 /* Allow PRR to send more than one seg */ 25277 RACK_OPTS_INC(tcp_rack_prr_sendalot); 25278 rack->r_ctl.rc_prr_sendalot = optval; 25279 break; 25280 case TCP_RACK_MIN_TO: 25281 /* Minimum time between rack t-o's in ms */ 25282 RACK_OPTS_INC(tcp_rack_min_to); 25283 rack->r_ctl.rc_min_to = optval; 25284 break; 25285 case TCP_RACK_EARLY_SEG: 25286 /* If early recovery max segments */ 25287 RACK_OPTS_INC(tcp_rack_early_seg); 25288 rack->r_ctl.rc_early_recovery_segs = optval; 25289 break; 25290 case TCP_RACK_ENABLE_HYSTART: 25291 { 25292 if (optval) { 25293 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 25294 if (rack_do_hystart > RACK_HYSTART_ON) 25295 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 25296 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 25297 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 25298 } else { 25299 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 25300 } 25301 } 25302 break; 25303 case TCP_RACK_REORD_THRESH: 25304 /* RACK reorder threshold (shift amount) */ 25305 RACK_OPTS_INC(tcp_rack_reord_thresh); 25306 if ((optval > 0) && (optval < 31)) 25307 rack->r_ctl.rc_reorder_shift = optval; 25308 else 25309 error = EINVAL; 25310 break; 25311 case TCP_RACK_REORD_FADE: 25312 /* Does reordering fade after ms time */ 25313 RACK_OPTS_INC(tcp_rack_reord_fade); 25314 rack->r_ctl.rc_reorder_fade = optval; 25315 break; 25316 case TCP_RACK_TLP_THRESH: 25317 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 25318 RACK_OPTS_INC(tcp_rack_tlp_thresh); 25319 if (optval) 25320 rack->r_ctl.rc_tlp_threshold = optval; 25321 else 25322 error = EINVAL; 25323 break; 25324 case TCP_BBR_USE_RACK_RR: 25325 RACK_OPTS_INC(tcp_rack_rr); 25326 if (optval) 25327 rack->use_rack_rr = 1; 25328 else 25329 rack->use_rack_rr = 0; 25330 break; 25331 case TCP_RACK_PKT_DELAY: 25332 /* RACK added ms i.e. rack-rtt + reord + N */ 25333 RACK_OPTS_INC(tcp_rack_pkt_delay); 25334 rack->r_ctl.rc_pkt_delay = optval; 25335 break; 25336 case TCP_DELACK: 25337 RACK_OPTS_INC(tcp_rack_delayed_ack); 25338 if (optval == 0) 25339 tp->t_delayed_ack = 0; 25340 else 25341 tp->t_delayed_ack = 1; 25342 if (tp->t_flags & TF_DELACK) { 25343 tp->t_flags &= ~TF_DELACK; 25344 tp->t_flags |= TF_ACKNOW; 25345 NET_EPOCH_ENTER(et); 25346 rack_output(tp); 25347 NET_EPOCH_EXIT(et); 25348 } 25349 break; 25350 25351 case TCP_BBR_RACK_RTT_USE: 25352 RACK_OPTS_INC(tcp_rack_rtt_use); 25353 if ((optval != USE_RTT_HIGH) && 25354 (optval != USE_RTT_LOW) && 25355 (optval != USE_RTT_AVG)) 25356 error = EINVAL; 25357 else 25358 rack->r_ctl.rc_rate_sample_method = optval; 25359 break; 25360 case TCP_HONOR_HPTS_MIN: 25361 RACK_OPTS_INC(tcp_honor_hpts); 25362 if (optval) { 25363 rack->r_use_hpts_min = 1; 25364 /* 25365 * Must be between 2 - 80% to be a reduction else 25366 * we keep the default (10%). 25367 */ 25368 if ((optval > 1) && (optval <= 80)) { 25369 rack->r_ctl.max_reduction = optval; 25370 } 25371 } else 25372 rack->r_use_hpts_min = 0; 25373 break; 25374 case TCP_REC_IS_DYN: /* URL:dynrec */ 25375 RACK_OPTS_INC(tcp_dyn_rec); 25376 if (optval) 25377 rack->rc_gp_no_rec_chg = 1; 25378 else 25379 rack->rc_gp_no_rec_chg = 0; 25380 break; 25381 case TCP_NO_TIMELY: 25382 RACK_OPTS_INC(tcp_notimely); 25383 if (optval) { 25384 rack->rc_skip_timely = 1; 25385 rack->r_ctl.rack_per_of_gp_rec = 90; 25386 rack->r_ctl.rack_per_of_gp_ca = 100; 25387 rack->r_ctl.rack_per_of_gp_ss = 250; 25388 } else { 25389 rack->rc_skip_timely = 0; 25390 } 25391 break; 25392 case TCP_GP_USE_LTBW: 25393 if (optval == 0) { 25394 rack->use_lesser_lt_bw = 0; 25395 rack->dis_lt_bw = 1; 25396 } else if (optval == 1) { 25397 rack->use_lesser_lt_bw = 1; 25398 rack->dis_lt_bw = 0; 25399 } else if (optval == 2) { 25400 rack->use_lesser_lt_bw = 0; 25401 rack->dis_lt_bw = 0; 25402 } 25403 break; 25404 case TCP_DATA_AFTER_CLOSE: 25405 RACK_OPTS_INC(tcp_data_after_close); 25406 if (optval) 25407 rack->rc_allow_data_af_clo = 1; 25408 else 25409 rack->rc_allow_data_af_clo = 0; 25410 break; 25411 default: 25412 break; 25413 } 25414 tcp_log_socket_option(tp, sopt_name, optval, error); 25415 return (error); 25416 } 25417 25418 static void 25419 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 25420 { 25421 /* 25422 * A new connection has been created (tp) and 25423 * the parent is the inpcb given. We want to 25424 * apply a read-lock to the parent (we are already 25425 * holding a write lock on the tp) and copy anything 25426 * out of the rack specific data as long as its tfb is 25427 * the same as ours i.e. we are the same stack. Otherwise 25428 * we just return. 25429 */ 25430 struct tcpcb *par; 25431 struct tcp_rack *dest, *src; 25432 int cnt = 0; 25433 25434 par = intotcpcb(parent); 25435 if (par->t_fb != tp->t_fb) { 25436 /* Not the same stack */ 25437 tcp_log_socket_option(tp, 0, 0, 1); 25438 return; 25439 } 25440 /* Ok if we reach here lets setup the two rack pointers */ 25441 dest = (struct tcp_rack *)tp->t_fb_ptr; 25442 src = (struct tcp_rack *)par->t_fb_ptr; 25443 if ((src == NULL) || (dest == NULL)) { 25444 /* Huh? */ 25445 tcp_log_socket_option(tp, 0, 0, 2); 25446 return; 25447 } 25448 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 25449 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 25450 if ((src->dgp_on) && (dest->dgp_on == 0)) { 25451 /* Profile 1 had to be set via sock opt */ 25452 rack_set_dgp(dest); 25453 cnt++; 25454 } 25455 /* TCP_RACK_SET_RXT_OPTIONS */ 25456 if (dest->full_size_rxt != src->full_size_rxt) { 25457 dest->full_size_rxt = src->full_size_rxt; 25458 cnt++; 25459 } 25460 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 25461 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 25462 cnt++; 25463 } 25464 /* TCP_RACK_DSACK_OPT */ 25465 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 25466 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 25467 cnt++; 25468 } 25469 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 25470 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 25471 cnt++; 25472 } 25473 /* TCP_RACK_PACING_DIVISOR */ 25474 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 25475 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 25476 cnt++; 25477 } 25478 /* TCP_RACK_HI_BETA */ 25479 if (src->rack_hibeta != dest->rack_hibeta) { 25480 cnt++; 25481 if (src->rack_hibeta) { 25482 dest->r_ctl.rc_saved_beta.beta = src->r_ctl.rc_saved_beta.beta; 25483 dest->rack_hibeta = 1; 25484 } else { 25485 dest->rack_hibeta = 0; 25486 } 25487 } 25488 /* TCP_RACK_TIMER_SLOP */ 25489 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 25490 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 25491 cnt++; 25492 } 25493 /* TCP_RACK_PACING_BETA_ECN */ 25494 if (dest->r_ctl.rc_saved_beta.beta_ecn != src->r_ctl.rc_saved_beta.beta_ecn) { 25495 dest->r_ctl.rc_saved_beta.beta_ecn = src->r_ctl.rc_saved_beta.beta_ecn; 25496 cnt++; 25497 } 25498 if (dest->r_ctl.rc_saved_beta.newreno_flags != src->r_ctl.rc_saved_beta.newreno_flags) { 25499 dest->r_ctl.rc_saved_beta.newreno_flags = src->r_ctl.rc_saved_beta.newreno_flags; 25500 cnt++; 25501 } 25502 /* We do not do TCP_DEFER_OPTIONS */ 25503 /* TCP_RACK_MEASURE_CNT */ 25504 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 25505 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 25506 cnt++; 25507 } 25508 /* TCP_HDWR_UP_ONLY */ 25509 if (dest->r_up_only != src->r_up_only) { 25510 dest->r_up_only = src->r_up_only; 25511 cnt++; 25512 } 25513 /* TCP_FILLCW_RATE_CAP */ 25514 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 25515 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 25516 cnt++; 25517 } 25518 /* TCP_PACING_RATE_CAP */ 25519 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 25520 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 25521 cnt++; 25522 } 25523 /* A listener can't set TCP_HYBRID_PACING */ 25524 /* TCP_SIDECHAN_DIS */ 25525 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 25526 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 25527 cnt++; 25528 } 25529 /* TCP_SHARED_CWND_TIME_LIMIT */ 25530 if (dest->r_limit_scw != src->r_limit_scw) { 25531 dest->r_limit_scw = src->r_limit_scw; 25532 cnt++; 25533 } 25534 /* TCP_POLICER_DETECT */ 25535 if (dest->r_ctl.policer_rxt_threshold != src->r_ctl.policer_rxt_threshold) { 25536 dest->r_ctl.policer_rxt_threshold = src->r_ctl.policer_rxt_threshold; 25537 cnt++; 25538 } 25539 if (dest->r_ctl.policer_avg_threshold != src->r_ctl.policer_avg_threshold) { 25540 dest->r_ctl.policer_avg_threshold = src->r_ctl.policer_avg_threshold; 25541 cnt++; 25542 } 25543 if (dest->r_ctl.policer_med_threshold != src->r_ctl.policer_med_threshold) { 25544 dest->r_ctl.policer_med_threshold = src->r_ctl.policer_med_threshold; 25545 cnt++; 25546 } 25547 if (dest->policer_detect_on != src->policer_detect_on) { 25548 dest->policer_detect_on = src->policer_detect_on; 25549 cnt++; 25550 } 25551 25552 if (dest->r_ctl.saved_policer_val != src->r_ctl.saved_policer_val) { 25553 dest->r_ctl.saved_policer_val = src->r_ctl.saved_policer_val; 25554 cnt++; 25555 } 25556 /* TCP_POLICER_MSS */ 25557 if (dest->r_ctl.policer_del_mss != src->r_ctl.policer_del_mss) { 25558 dest->r_ctl.policer_del_mss = src->r_ctl.policer_del_mss; 25559 cnt++; 25560 } 25561 25562 if (dest->r_ctl.pol_bw_comp != src->r_ctl.pol_bw_comp) { 25563 dest->r_ctl.pol_bw_comp = src->r_ctl.pol_bw_comp; 25564 cnt++; 25565 } 25566 25567 if (dest->r_ctl.policer_alt_median != src->r_ctl.policer_alt_median) { 25568 dest->r_ctl.policer_alt_median = src->r_ctl.policer_alt_median; 25569 cnt++; 25570 } 25571 /* TCP_RACK_PACE_TO_FILL */ 25572 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 25573 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 25574 cnt++; 25575 } 25576 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 25577 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 25578 cnt++; 25579 } 25580 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 25581 dest->rtt_limit_mul = src->rtt_limit_mul; 25582 cnt++; 25583 } 25584 /* TCP_RACK_NO_PUSH_AT_MAX */ 25585 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 25586 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 25587 cnt++; 25588 } 25589 /* TCP_SHARED_CWND_ENABLE */ 25590 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 25591 dest->rack_enable_scwnd = src->rack_enable_scwnd; 25592 cnt++; 25593 } 25594 /* TCP_USE_CMP_ACKS */ 25595 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 25596 dest->r_use_cmp_ack = src->r_use_cmp_ack; 25597 cnt++; 25598 } 25599 25600 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 25601 dest->r_mbuf_queue = src->r_mbuf_queue; 25602 cnt++; 25603 } 25604 /* TCP_RACK_MBUF_QUEUE */ 25605 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 25606 dest->r_mbuf_queue = src->r_mbuf_queue; 25607 cnt++; 25608 } 25609 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 25610 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 25611 } else { 25612 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 25613 } 25614 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 25615 tp->t_flags2 |= TF2_MBUF_ACKCMP; 25616 } 25617 /* TCP_RACK_NONRXT_CFG_RATE */ 25618 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 25619 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 25620 cnt++; 25621 } 25622 /* TCP_NO_PRR */ 25623 if (dest->rack_no_prr != src->rack_no_prr) { 25624 dest->rack_no_prr = src->rack_no_prr; 25625 cnt++; 25626 } 25627 if (dest->no_prr_addback != src->no_prr_addback) { 25628 dest->no_prr_addback = src->no_prr_addback; 25629 cnt++; 25630 } 25631 /* RACK_CSPR_IS_FCC */ 25632 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 25633 dest->cspr_is_fcc = src->cspr_is_fcc; 25634 cnt++; 25635 } 25636 /* TCP_TIMELY_DYN_ADJ */ 25637 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 25638 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 25639 cnt++; 25640 } 25641 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 25642 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 25643 cnt++; 25644 } 25645 /* TCP_RACK_DO_DETECTION */ 25646 if (dest->do_detection != src->do_detection) { 25647 dest->do_detection = src->do_detection; 25648 cnt++; 25649 } 25650 /* TCP_RACK_TLP_USE */ 25651 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 25652 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 25653 cnt++; 25654 } 25655 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 25656 /* TCP_BBR_RACK_INIT_RATE */ 25657 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 25658 dest->r_ctl.init_rate = src->r_ctl.init_rate; 25659 cnt++; 25660 } 25661 /* TCP_RACK_FORCE_MSEG */ 25662 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 25663 dest->rc_force_max_seg = src->rc_force_max_seg; 25664 cnt++; 25665 } 25666 /* TCP_RACK_PACE_MIN_SEG */ 25667 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 25668 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 25669 cnt++; 25670 } 25671 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 25672 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 25673 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 25674 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 25675 cnt++; 25676 } 25677 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 25678 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 25679 cnt++; 25680 } 25681 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 25682 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 25683 cnt++; 25684 } 25685 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 25686 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 25687 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 25688 cnt++; 25689 } 25690 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 25691 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 25692 cnt++; 25693 } 25694 25695 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 25696 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 25697 cnt++; 25698 } 25699 /* TCP_RACK_RR_CONF */ 25700 if (dest->r_rr_config != src->r_rr_config) { 25701 dest->r_rr_config = src->r_rr_config; 25702 cnt++; 25703 } 25704 /* TCP_PACING_DND */ 25705 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 25706 dest->rc_pace_dnd = src->rc_pace_dnd; 25707 cnt++; 25708 } 25709 /* TCP_HDWR_RATE_CAP */ 25710 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 25711 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 25712 cnt++; 25713 } 25714 /* TCP_DGP_UPPER_BOUNDS */ 25715 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 25716 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 25717 cnt++; 25718 } 25719 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 25720 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 25721 cnt++; 25722 } 25723 /* TCP_SS_EEXIT */ 25724 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 25725 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 25726 cnt++; 25727 } 25728 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 25729 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 25730 cnt++; 25731 } 25732 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 25733 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 25734 cnt++; 25735 } 25736 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 25737 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 25738 cnt++; 25739 } 25740 /* TCP_BBR_HDWR_PACE */ 25741 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 25742 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 25743 cnt++; 25744 } 25745 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 25746 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 25747 cnt++; 25748 } 25749 /* TCP_RACK_PRR_SENDALOT */ 25750 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 25751 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 25752 cnt++; 25753 } 25754 /* TCP_RACK_MIN_TO */ 25755 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 25756 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 25757 cnt++; 25758 } 25759 /* TCP_RACK_EARLY_SEG */ 25760 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 25761 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 25762 cnt++; 25763 } 25764 /* TCP_RACK_ENABLE_HYSTART */ 25765 if (par->t_ccv.flags != tp->t_ccv.flags) { 25766 cnt++; 25767 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 25768 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 25769 if (rack_do_hystart > RACK_HYSTART_ON) 25770 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 25771 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 25772 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 25773 } else { 25774 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 25775 } 25776 } 25777 /* TCP_RACK_REORD_THRESH */ 25778 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 25779 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 25780 cnt++; 25781 } 25782 /* TCP_RACK_REORD_FADE */ 25783 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 25784 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 25785 cnt++; 25786 } 25787 /* TCP_RACK_TLP_THRESH */ 25788 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 25789 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 25790 cnt++; 25791 } 25792 /* TCP_BBR_USE_RACK_RR */ 25793 if (dest->use_rack_rr != src->use_rack_rr) { 25794 dest->use_rack_rr = src->use_rack_rr; 25795 cnt++; 25796 } 25797 /* TCP_RACK_PKT_DELAY */ 25798 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 25799 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 25800 cnt++; 25801 } 25802 /* TCP_DELACK will get copied via the main code if applicable */ 25803 /* TCP_BBR_RACK_RTT_USE */ 25804 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 25805 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 25806 cnt++; 25807 } 25808 /* TCP_HONOR_HPTS_MIN */ 25809 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 25810 dest->r_use_hpts_min = src->r_use_hpts_min; 25811 cnt++; 25812 } 25813 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 25814 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 25815 cnt++; 25816 } 25817 /* TCP_REC_IS_DYN */ 25818 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 25819 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 25820 cnt++; 25821 } 25822 if (dest->rc_skip_timely != src->rc_skip_timely) { 25823 dest->rc_skip_timely = src->rc_skip_timely; 25824 cnt++; 25825 } 25826 /* TCP_DATA_AFTER_CLOSE */ 25827 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 25828 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 25829 cnt++; 25830 } 25831 /* TCP_GP_USE_LTBW */ 25832 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 25833 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 25834 cnt++; 25835 } 25836 if (dest->dis_lt_bw != src->dis_lt_bw) { 25837 dest->dis_lt_bw = src->dis_lt_bw; 25838 cnt++; 25839 } 25840 tcp_log_socket_option(tp, 0, cnt, 0); 25841 } 25842 25843 25844 static void 25845 rack_apply_deferred_options(struct tcp_rack *rack) 25846 { 25847 struct deferred_opt_list *dol, *sdol; 25848 uint32_t s_optval; 25849 25850 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 25851 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 25852 /* Disadvantage of deferal is you loose the error return */ 25853 s_optval = (uint32_t)dol->optval; 25854 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 25855 free(dol, M_TCPDO); 25856 } 25857 } 25858 25859 static void 25860 rack_hw_tls_change(struct tcpcb *tp, int chg) 25861 { 25862 /* Update HW tls state */ 25863 struct tcp_rack *rack; 25864 25865 rack = (struct tcp_rack *)tp->t_fb_ptr; 25866 if (chg) 25867 rack->r_ctl.fsb.hw_tls = 1; 25868 else 25869 rack->r_ctl.fsb.hw_tls = 0; 25870 } 25871 25872 static int 25873 rack_pru_options(struct tcpcb *tp, int flags) 25874 { 25875 if (flags & PRUS_OOB) 25876 return (EOPNOTSUPP); 25877 return (0); 25878 } 25879 25880 static bool 25881 rack_wake_check(struct tcpcb *tp) 25882 { 25883 struct tcp_rack *rack; 25884 struct timeval tv; 25885 uint32_t cts; 25886 25887 rack = (struct tcp_rack *)tp->t_fb_ptr; 25888 if (rack->r_ctl.rc_hpts_flags) { 25889 cts = tcp_get_usecs(&tv); 25890 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 25891 /* 25892 * Pacing timer is up, check if we are ready. 25893 */ 25894 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 25895 return (true); 25896 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 25897 /* 25898 * A timer is up, check if we are ready. 25899 */ 25900 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 25901 return (true); 25902 } 25903 } 25904 return (false); 25905 } 25906 25907 static struct tcp_function_block __tcp_rack = { 25908 .tfb_tcp_block_name = __XSTRING(STACKNAME), 25909 .tfb_tcp_output = rack_output, 25910 .tfb_do_queued_segments = ctf_do_queued_segments, 25911 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 25912 .tfb_tcp_do_segment = rack_do_segment, 25913 .tfb_tcp_ctloutput = rack_ctloutput, 25914 .tfb_tcp_fb_init = rack_init, 25915 .tfb_tcp_fb_fini = rack_fini, 25916 .tfb_tcp_timer_stop_all = rack_stopall, 25917 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 25918 .tfb_tcp_handoff_ok = rack_handoff_ok, 25919 .tfb_tcp_mtu_chg = rack_mtu_change, 25920 .tfb_pru_options = rack_pru_options, 25921 .tfb_hwtls_change = rack_hw_tls_change, 25922 .tfb_chg_query = rack_chg_query, 25923 .tfb_switch_failed = rack_switch_failed, 25924 .tfb_early_wake_check = rack_wake_check, 25925 .tfb_compute_pipe = rack_compute_pipe, 25926 .tfb_stack_info = rack_stack_information, 25927 .tfb_inherit = rack_inherit, 25928 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 25929 25930 }; 25931 25932 /* 25933 * rack_ctloutput() must drop the inpcb lock before performing copyin on 25934 * socket option arguments. When it re-acquires the lock after the copy, it 25935 * has to revalidate that the connection is still valid for the socket 25936 * option. 25937 */ 25938 static int 25939 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 25940 { 25941 struct inpcb *inp = tptoinpcb(tp); 25942 #ifdef INET 25943 struct ip *ip; 25944 #endif 25945 struct tcp_rack *rack; 25946 struct tcp_hybrid_req hybrid; 25947 uint64_t loptval; 25948 int32_t error = 0, optval; 25949 25950 rack = (struct tcp_rack *)tp->t_fb_ptr; 25951 if (rack == NULL) { 25952 INP_WUNLOCK(inp); 25953 return (EINVAL); 25954 } 25955 #ifdef INET 25956 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 25957 #endif 25958 25959 switch (sopt->sopt_level) { 25960 #ifdef INET6 25961 case IPPROTO_IPV6: 25962 MPASS(inp->inp_vflag & INP_IPV6PROTO); 25963 switch (sopt->sopt_name) { 25964 case IPV6_USE_MIN_MTU: 25965 tcp6_use_min_mtu(tp); 25966 break; 25967 } 25968 INP_WUNLOCK(inp); 25969 return (0); 25970 #endif 25971 #ifdef INET 25972 case IPPROTO_IP: 25973 switch (sopt->sopt_name) { 25974 case IP_TOS: 25975 /* 25976 * The DSCP codepoint has changed, update the fsb. 25977 */ 25978 ip->ip_tos = rack->rc_inp->inp_ip_tos; 25979 break; 25980 case IP_TTL: 25981 /* 25982 * The TTL has changed, update the fsb. 25983 */ 25984 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 25985 break; 25986 } 25987 INP_WUNLOCK(inp); 25988 return (0); 25989 #endif 25990 #ifdef SO_PEERPRIO 25991 case SOL_SOCKET: 25992 switch (sopt->sopt_name) { 25993 case SO_PEERPRIO: /* SC-URL:bs */ 25994 /* Already read in and sanity checked in sosetopt(). */ 25995 if (inp->inp_socket) { 25996 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 25997 } 25998 break; 25999 } 26000 INP_WUNLOCK(inp); 26001 return (0); 26002 #endif 26003 case IPPROTO_TCP: 26004 switch (sopt->sopt_name) { 26005 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 26006 /* Pacing related ones */ 26007 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 26008 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 26009 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 26010 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 26011 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 26012 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 26013 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 26014 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 26015 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 26016 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 26017 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 26018 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 26019 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 26020 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 26021 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 26022 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 26023 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 26024 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 26025 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 26026 /* End pacing related */ 26027 case TCP_POLICER_DETECT: /* URL:pol_det */ 26028 case TCP_POLICER_MSS: /* URL:pol_mss */ 26029 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 26030 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 26031 case TCP_RACK_MIN_TO: /* URL:min_to */ 26032 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 26033 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 26034 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 26035 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 26036 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 26037 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 26038 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 26039 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 26040 case TCP_RACK_DO_DETECTION: /* URL:detect */ 26041 case TCP_NO_PRR: /* URL:noprr */ 26042 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 26043 case TCP_DATA_AFTER_CLOSE: /* no URL */ 26044 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 26045 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 26046 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 26047 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 26048 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 26049 case TCP_RACK_PROFILE: /* URL:profile */ 26050 case TCP_SIDECHAN_DIS: /* URL:scodm */ 26051 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 26052 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 26053 case TCP_RACK_ABC_VAL: /* URL:labc */ 26054 case TCP_REC_ABC_VAL: /* URL:reclabc */ 26055 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 26056 case TCP_DEFER_OPTIONS: /* URL:defer */ 26057 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 26058 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 26059 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 26060 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 26061 case TCP_RACK_HI_BETA: /* URL:hibeta */ 26062 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 26063 case TCP_SS_EEXIT: /* URL:eexit */ 26064 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 26065 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 26066 case TCP_PACING_DND: /* URL:dnd */ 26067 case TCP_NO_TIMELY: /* URL:notimely */ 26068 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 26069 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 26070 case TCP_REC_IS_DYN: /* URL:dynrec */ 26071 case TCP_GP_USE_LTBW: /* URL:useltbw */ 26072 goto process_opt; 26073 break; 26074 default: 26075 /* Filter off all unknown options to the base stack */ 26076 return (tcp_default_ctloutput(tp, sopt)); 26077 break; 26078 } 26079 default: 26080 INP_WUNLOCK(inp); 26081 return (0); 26082 } 26083 process_opt: 26084 INP_WUNLOCK(inp); 26085 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 26086 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 26087 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 26088 /* 26089 * We truncate it down to 32 bits for the socket-option trace this 26090 * means rates > 34Gbps won't show right, but thats probably ok. 26091 */ 26092 optval = (uint32_t)loptval; 26093 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 26094 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 26095 } else { 26096 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 26097 /* Save it in 64 bit form too */ 26098 loptval = optval; 26099 } 26100 if (error) 26101 return (error); 26102 INP_WLOCK(inp); 26103 if (tp->t_fb != &__tcp_rack) { 26104 INP_WUNLOCK(inp); 26105 return (ENOPROTOOPT); 26106 } 26107 if (rack->defer_options && (rack->gp_ready == 0) && 26108 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 26109 (sopt->sopt_name != TCP_HYBRID_PACING) && 26110 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 26111 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 26112 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 26113 /* Options are being deferred */ 26114 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 26115 INP_WUNLOCK(inp); 26116 return (0); 26117 } else { 26118 /* No memory to defer, fail */ 26119 INP_WUNLOCK(inp); 26120 return (ENOMEM); 26121 } 26122 } 26123 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 26124 INP_WUNLOCK(inp); 26125 return (error); 26126 } 26127 26128 static void 26129 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 26130 { 26131 26132 INP_WLOCK_ASSERT(tptoinpcb(tp)); 26133 bzero(ti, sizeof(*ti)); 26134 26135 ti->tcpi_state = tp->t_state; 26136 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 26137 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 26138 if (tp->t_flags & TF_SACK_PERMIT) 26139 ti->tcpi_options |= TCPI_OPT_SACK; 26140 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 26141 ti->tcpi_options |= TCPI_OPT_WSCALE; 26142 ti->tcpi_snd_wscale = tp->snd_scale; 26143 ti->tcpi_rcv_wscale = tp->rcv_scale; 26144 } 26145 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 26146 ti->tcpi_options |= TCPI_OPT_ECN; 26147 if (tp->t_flags & TF_FASTOPEN) 26148 ti->tcpi_options |= TCPI_OPT_TFO; 26149 /* still kept in ticks is t_rcvtime */ 26150 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 26151 /* Since we hold everything in precise useconds this is easy */ 26152 ti->tcpi_rtt = tp->t_srtt; 26153 ti->tcpi_rttvar = tp->t_rttvar; 26154 ti->tcpi_rto = tp->t_rxtcur; 26155 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 26156 ti->tcpi_snd_cwnd = tp->snd_cwnd; 26157 /* 26158 * FreeBSD-specific extension fields for tcp_info. 26159 */ 26160 ti->tcpi_rcv_space = tp->rcv_wnd; 26161 ti->tcpi_rcv_nxt = tp->rcv_nxt; 26162 ti->tcpi_snd_wnd = tp->snd_wnd; 26163 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 26164 ti->tcpi_snd_nxt = tp->snd_nxt; 26165 ti->tcpi_snd_mss = tp->t_maxseg; 26166 ti->tcpi_rcv_mss = tp->t_maxseg; 26167 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 26168 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 26169 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 26170 ti->tcpi_total_tlp = tp->t_sndtlppack; 26171 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 26172 ti->tcpi_rttmin = tp->t_rttlow; 26173 #ifdef NETFLIX_STATS 26174 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 26175 #endif 26176 #ifdef TCP_OFFLOAD 26177 if (tp->t_flags & TF_TOE) { 26178 ti->tcpi_options |= TCPI_OPT_TOE; 26179 tcp_offload_tcp_info(tp, ti); 26180 } 26181 #endif 26182 } 26183 26184 static int 26185 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 26186 { 26187 struct inpcb *inp = tptoinpcb(tp); 26188 struct tcp_rack *rack; 26189 int32_t error, optval; 26190 uint64_t val, loptval; 26191 struct tcp_info ti; 26192 /* 26193 * Because all our options are either boolean or an int, we can just 26194 * pull everything into optval and then unlock and copy. If we ever 26195 * add a option that is not a int, then this will have quite an 26196 * impact to this routine. 26197 */ 26198 error = 0; 26199 rack = (struct tcp_rack *)tp->t_fb_ptr; 26200 if (rack == NULL) { 26201 INP_WUNLOCK(inp); 26202 return (EINVAL); 26203 } 26204 switch (sopt->sopt_name) { 26205 case TCP_INFO: 26206 /* First get the info filled */ 26207 rack_fill_info(tp, &ti); 26208 /* Fix up the rtt related fields if needed */ 26209 INP_WUNLOCK(inp); 26210 error = sooptcopyout(sopt, &ti, sizeof ti); 26211 return (error); 26212 /* 26213 * Beta is the congestion control value for NewReno that influences how 26214 * much of a backoff happens when loss is detected. It is normally set 26215 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 26216 * when you exit recovery. 26217 */ 26218 case TCP_RACK_PACING_BETA: 26219 break; 26220 /* 26221 * Beta_ecn is the congestion control value for NewReno that influences how 26222 * much of a backoff happens when a ECN mark is detected. It is normally set 26223 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 26224 * you exit recovery. Note that classic ECN has a beta of 50, it is only 26225 * ABE Ecn that uses this "less" value, but we do too with pacing :) 26226 */ 26227 26228 case TCP_RACK_PACING_BETA_ECN: 26229 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 26230 error = EINVAL; 26231 else if (rack->rc_pacing_cc_set == 0) 26232 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 26233 else { 26234 /* 26235 * Reach out into the CC data and report back what 26236 * I have previously set. Yeah it looks hackish but 26237 * we don't want to report the saved values. 26238 */ 26239 if (tp->t_ccv.cc_data) 26240 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 26241 else 26242 error = EINVAL; 26243 } 26244 break; 26245 case TCP_RACK_DSACK_OPT: 26246 optval = 0; 26247 if (rack->rc_rack_tmr_std_based) { 26248 optval |= 1; 26249 } 26250 if (rack->rc_rack_use_dsack) { 26251 optval |= 2; 26252 } 26253 break; 26254 case TCP_RACK_ENABLE_HYSTART: 26255 { 26256 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 26257 optval = RACK_HYSTART_ON; 26258 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 26259 optval = RACK_HYSTART_ON_W_SC; 26260 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 26261 optval = RACK_HYSTART_ON_W_SC_C; 26262 } else { 26263 optval = RACK_HYSTART_OFF; 26264 } 26265 } 26266 break; 26267 case TCP_RACK_DGP_IN_REC: 26268 error = EINVAL; 26269 break; 26270 case TCP_RACK_HI_BETA: 26271 optval = rack->rack_hibeta; 26272 break; 26273 case TCP_POLICER_MSS: 26274 optval = rack->r_ctl.policer_del_mss; 26275 break; 26276 case TCP_POLICER_DETECT: 26277 optval = rack->r_ctl.saved_policer_val; 26278 break; 26279 case TCP_DEFER_OPTIONS: 26280 optval = rack->defer_options; 26281 break; 26282 case TCP_RACK_MEASURE_CNT: 26283 optval = rack->r_ctl.req_measurements; 26284 break; 26285 case TCP_REC_ABC_VAL: 26286 optval = rack->r_use_labc_for_rec; 26287 break; 26288 case TCP_RACK_ABC_VAL: 26289 optval = rack->rc_labc; 26290 break; 26291 case TCP_HDWR_UP_ONLY: 26292 optval= rack->r_up_only; 26293 break; 26294 case TCP_FILLCW_RATE_CAP: 26295 loptval = rack->r_ctl.fillcw_cap; 26296 break; 26297 case TCP_PACING_RATE_CAP: 26298 loptval = rack->r_ctl.bw_rate_cap; 26299 break; 26300 case TCP_RACK_PROFILE: 26301 /* You cannot retrieve a profile, its write only */ 26302 error = EINVAL; 26303 break; 26304 case TCP_SIDECHAN_DIS: 26305 optval = rack->r_ctl.side_chan_dis_mask; 26306 break; 26307 case TCP_HYBRID_PACING: 26308 /* You cannot retrieve hybrid pacing information, its write only */ 26309 error = EINVAL; 26310 break; 26311 case TCP_USE_CMP_ACKS: 26312 optval = rack->r_use_cmp_ack; 26313 break; 26314 case TCP_RACK_PACE_TO_FILL: 26315 optval = rack->rc_pace_to_cwnd; 26316 break; 26317 case TCP_RACK_NO_PUSH_AT_MAX: 26318 optval = rack->r_ctl.rc_no_push_at_mrtt; 26319 break; 26320 case TCP_SHARED_CWND_ENABLE: 26321 optval = rack->rack_enable_scwnd; 26322 break; 26323 case TCP_RACK_NONRXT_CFG_RATE: 26324 optval = rack->rack_rec_nonrxt_use_cr; 26325 break; 26326 case TCP_NO_PRR: 26327 if (rack->rack_no_prr == 1) 26328 optval = 1; 26329 else if (rack->no_prr_addback == 1) 26330 optval = 2; 26331 else 26332 optval = 0; 26333 break; 26334 case TCP_GP_USE_LTBW: 26335 if (rack->dis_lt_bw) { 26336 /* It is not used */ 26337 optval = 0; 26338 } else if (rack->use_lesser_lt_bw) { 26339 /* we use min() */ 26340 optval = 1; 26341 } else { 26342 /* we use max() */ 26343 optval = 2; 26344 } 26345 break; 26346 case TCP_RACK_DO_DETECTION: 26347 optval = rack->do_detection; 26348 break; 26349 case TCP_RACK_MBUF_QUEUE: 26350 /* Now do we use the LRO mbuf-queue feature */ 26351 optval = rack->r_mbuf_queue; 26352 break; 26353 case RACK_CSPR_IS_FCC: 26354 optval = rack->cspr_is_fcc; 26355 break; 26356 case TCP_TIMELY_DYN_ADJ: 26357 optval = rack->rc_gp_dyn_mul; 26358 break; 26359 case TCP_BBR_IWINTSO: 26360 error = EINVAL; 26361 break; 26362 case TCP_RACK_TLP_REDUCE: 26363 /* RACK TLP cwnd reduction (bool) */ 26364 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 26365 break; 26366 case TCP_BBR_RACK_INIT_RATE: 26367 val = rack->r_ctl.init_rate; 26368 /* convert to kbits per sec */ 26369 val *= 8; 26370 val /= 1000; 26371 optval = (uint32_t)val; 26372 break; 26373 case TCP_RACK_FORCE_MSEG: 26374 optval = rack->rc_force_max_seg; 26375 break; 26376 case TCP_RACK_PACE_MIN_SEG: 26377 optval = rack->r_ctl.rc_user_set_min_segs; 26378 break; 26379 case TCP_RACK_PACE_MAX_SEG: 26380 /* Max segments in a pace */ 26381 optval = rack->rc_user_set_max_segs; 26382 break; 26383 case TCP_RACK_PACE_ALWAYS: 26384 /* Use the always pace method */ 26385 optval = rack->rc_always_pace; 26386 break; 26387 case TCP_RACK_PRR_SENDALOT: 26388 /* Allow PRR to send more than one seg */ 26389 optval = rack->r_ctl.rc_prr_sendalot; 26390 break; 26391 case TCP_RACK_MIN_TO: 26392 /* Minimum time between rack t-o's in ms */ 26393 optval = rack->r_ctl.rc_min_to; 26394 break; 26395 case TCP_RACK_SPLIT_LIMIT: 26396 optval = rack->r_ctl.rc_split_limit; 26397 break; 26398 case TCP_RACK_EARLY_SEG: 26399 /* If early recovery max segments */ 26400 optval = rack->r_ctl.rc_early_recovery_segs; 26401 break; 26402 case TCP_RACK_REORD_THRESH: 26403 /* RACK reorder threshold (shift amount) */ 26404 optval = rack->r_ctl.rc_reorder_shift; 26405 break; 26406 case TCP_SS_EEXIT: 26407 if (rack->r_ctl.gp_rnd_thresh) { 26408 uint32_t v; 26409 26410 v = rack->r_ctl.gp_gain_req; 26411 v <<= 17; 26412 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 26413 if (rack->r_ctl.gate_to_fs == 1) 26414 optval |= 0x10000; 26415 } else 26416 optval = 0; 26417 break; 26418 case TCP_RACK_REORD_FADE: 26419 /* Does reordering fade after ms time */ 26420 optval = rack->r_ctl.rc_reorder_fade; 26421 break; 26422 case TCP_BBR_USE_RACK_RR: 26423 /* Do we use the rack cheat for rxt */ 26424 optval = rack->use_rack_rr; 26425 break; 26426 case TCP_RACK_RR_CONF: 26427 optval = rack->r_rr_config; 26428 break; 26429 case TCP_HDWR_RATE_CAP: 26430 optval = rack->r_rack_hw_rate_caps; 26431 break; 26432 case TCP_BBR_HDWR_PACE: 26433 optval = rack->rack_hdw_pace_ena; 26434 break; 26435 case TCP_RACK_TLP_THRESH: 26436 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 26437 optval = rack->r_ctl.rc_tlp_threshold; 26438 break; 26439 case TCP_RACK_PKT_DELAY: 26440 /* RACK added ms i.e. rack-rtt + reord + N */ 26441 optval = rack->r_ctl.rc_pkt_delay; 26442 break; 26443 case TCP_RACK_TLP_USE: 26444 optval = rack->rack_tlp_threshold_use; 26445 break; 26446 case TCP_PACING_DND: 26447 optval = rack->rc_pace_dnd; 26448 break; 26449 case TCP_RACK_PACE_RATE_CA: 26450 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 26451 break; 26452 case TCP_RACK_PACE_RATE_SS: 26453 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 26454 break; 26455 case TCP_RACK_PACE_RATE_REC: 26456 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 26457 break; 26458 case TCP_DGP_UPPER_BOUNDS: 26459 optval = rack->r_ctl.rack_per_upper_bound_ss; 26460 optval <<= 16; 26461 optval |= rack->r_ctl.rack_per_upper_bound_ca; 26462 break; 26463 case TCP_RACK_GP_INCREASE_SS: 26464 optval = rack->r_ctl.rack_per_of_gp_ca; 26465 break; 26466 case TCP_RACK_GP_INCREASE_CA: 26467 optval = rack->r_ctl.rack_per_of_gp_ss; 26468 break; 26469 case TCP_RACK_PACING_DIVISOR: 26470 optval = rack->r_ctl.pace_len_divisor; 26471 break; 26472 case TCP_BBR_RACK_RTT_USE: 26473 optval = rack->r_ctl.rc_rate_sample_method; 26474 break; 26475 case TCP_DELACK: 26476 optval = tp->t_delayed_ack; 26477 break; 26478 case TCP_DATA_AFTER_CLOSE: 26479 optval = rack->rc_allow_data_af_clo; 26480 break; 26481 case TCP_SHARED_CWND_TIME_LIMIT: 26482 optval = rack->r_limit_scw; 26483 break; 26484 case TCP_HONOR_HPTS_MIN: 26485 if (rack->r_use_hpts_min) 26486 optval = rack->r_ctl.max_reduction; 26487 else 26488 optval = 0; 26489 break; 26490 case TCP_REC_IS_DYN: 26491 optval = rack->rc_gp_no_rec_chg; 26492 break; 26493 case TCP_NO_TIMELY: 26494 optval = rack->rc_skip_timely; 26495 break; 26496 case TCP_RACK_TIMER_SLOP: 26497 optval = rack->r_ctl.timer_slop; 26498 break; 26499 default: 26500 return (tcp_default_ctloutput(tp, sopt)); 26501 break; 26502 } 26503 INP_WUNLOCK(inp); 26504 if (error == 0) { 26505 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 26506 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 26507 error = sooptcopyout(sopt, &loptval, sizeof loptval); 26508 else 26509 error = sooptcopyout(sopt, &optval, sizeof optval); 26510 } 26511 return (error); 26512 } 26513 26514 static int 26515 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 26516 { 26517 if (sopt->sopt_dir == SOPT_SET) { 26518 return (rack_set_sockopt(tp, sopt)); 26519 } else if (sopt->sopt_dir == SOPT_GET) { 26520 return (rack_get_sockopt(tp, sopt)); 26521 } else { 26522 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 26523 } 26524 } 26525 26526 static const char *rack_stack_names[] = { 26527 __XSTRING(STACKNAME), 26528 #ifdef STACKALIAS 26529 __XSTRING(STACKALIAS), 26530 #endif 26531 }; 26532 26533 static int 26534 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 26535 { 26536 memset(mem, 0, size); 26537 return (0); 26538 } 26539 26540 static void 26541 rack_dtor(void *mem, int32_t size, void *arg) 26542 { 26543 26544 } 26545 26546 static bool rack_mod_inited = false; 26547 26548 static int 26549 tcp_addrack(module_t mod, int32_t type, void *data) 26550 { 26551 int32_t err = 0; 26552 int num_stacks; 26553 26554 switch (type) { 26555 case MOD_LOAD: 26556 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 26557 sizeof(struct rack_sendmap), 26558 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 26559 26560 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 26561 sizeof(struct tcp_rack), 26562 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 26563 26564 sysctl_ctx_init(&rack_sysctl_ctx); 26565 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 26566 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 26567 OID_AUTO, 26568 #ifdef STACKALIAS 26569 __XSTRING(STACKALIAS), 26570 #else 26571 __XSTRING(STACKNAME), 26572 #endif 26573 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 26574 ""); 26575 if (rack_sysctl_root == NULL) { 26576 printf("Failed to add sysctl node\n"); 26577 err = EFAULT; 26578 goto free_uma; 26579 } 26580 rack_init_sysctls(); 26581 num_stacks = nitems(rack_stack_names); 26582 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 26583 rack_stack_names, &num_stacks); 26584 if (err) { 26585 printf("Failed to register %s stack name for " 26586 "%s module\n", rack_stack_names[num_stacks], 26587 __XSTRING(MODNAME)); 26588 sysctl_ctx_free(&rack_sysctl_ctx); 26589 free_uma: 26590 uma_zdestroy(rack_zone); 26591 uma_zdestroy(rack_pcb_zone); 26592 rack_counter_destroy(); 26593 printf("Failed to register rack module -- err:%d\n", err); 26594 return (err); 26595 } 26596 tcp_lro_reg_mbufq(); 26597 rack_mod_inited = true; 26598 break; 26599 case MOD_QUIESCE: 26600 err = deregister_tcp_functions(&__tcp_rack, true, false); 26601 break; 26602 case MOD_UNLOAD: 26603 err = deregister_tcp_functions(&__tcp_rack, false, true); 26604 if (err == EBUSY) 26605 break; 26606 if (rack_mod_inited) { 26607 uma_zdestroy(rack_zone); 26608 uma_zdestroy(rack_pcb_zone); 26609 sysctl_ctx_free(&rack_sysctl_ctx); 26610 rack_counter_destroy(); 26611 rack_mod_inited = false; 26612 } 26613 tcp_lro_dereg_mbufq(); 26614 err = 0; 26615 break; 26616 default: 26617 return (EOPNOTSUPP); 26618 } 26619 return (err); 26620 } 26621 26622 static moduledata_t tcp_rack = { 26623 .name = __XSTRING(MODNAME), 26624 .evhand = tcp_addrack, 26625 .priv = 0 26626 }; 26627 26628 MODULE_VERSION(MODNAME, 1); 26629 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 26630 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 26631 26632 #endif /* #if !defined(INET) && !defined(INET6) */ 26633