1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/mbuf.h> 45 #include <sys/proc.h> /* for proc0 declaration */ 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 #ifdef STATS 51 #include <sys/qmath.h> 52 #include <sys/tree.h> 53 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 54 #else 55 #include <sys/tree.h> 56 #endif 57 #include <sys/refcount.h> 58 #include <sys/queue.h> 59 #include <sys/tim_filter.h> 60 #include <sys/smp.h> 61 #include <sys/kthread.h> 62 #include <sys/kern_prefetch.h> 63 #include <sys/protosw.h> 64 #ifdef TCP_ACCOUNTING 65 #include <sys/sched.h> 66 #include <machine/cpu.h> 67 #endif 68 #include <vm/uma.h> 69 70 #include <net/route.h> 71 #include <net/route/nhop.h> 72 #include <net/vnet.h> 73 74 #define TCPSTATES /* for logging */ 75 76 #include <netinet/in.h> 77 #include <netinet/in_kdtrace.h> 78 #include <netinet/in_pcb.h> 79 #include <netinet/ip.h> 80 #include <netinet/ip_var.h> 81 #include <netinet/ip6.h> 82 #include <netinet6/in6_pcb.h> 83 #include <netinet6/ip6_var.h> 84 #include <netinet/tcp.h> 85 #define TCPOUTFLAGS 86 #include <netinet/tcp_fsm.h> 87 #include <netinet/tcp_seq.h> 88 #include <netinet/tcp_timer.h> 89 #include <netinet/tcp_var.h> 90 #include <netinet/tcp_log_buf.h> 91 #include <netinet/tcp_syncache.h> 92 #include <netinet/tcp_hpts.h> 93 #include <netinet/tcp_ratelimit.h> 94 #include <netinet/tcp_accounting.h> 95 #include <netinet/tcpip.h> 96 #include <netinet/cc/cc.h> 97 #include <netinet/cc/cc_newreno.h> 98 #include <netinet/tcp_fastopen.h> 99 #include <netinet/tcp_lro.h> 100 #ifdef NETFLIX_SHARED_CWND 101 #include <netinet/tcp_shared_cwnd.h> 102 #endif 103 #ifdef TCP_OFFLOAD 104 #include <netinet/tcp_offload.h> 105 #endif 106 #ifdef INET6 107 #include <netinet6/tcp6_var.h> 108 #endif 109 #include <netinet/tcp_ecn.h> 110 111 #include <netipsec/ipsec_support.h> 112 113 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 114 #include <netipsec/ipsec.h> 115 #include <netipsec/ipsec6.h> 116 #endif /* IPSEC */ 117 118 #include <netinet/udp.h> 119 #include <netinet/udp_var.h> 120 #include <machine/in_cksum.h> 121 122 #ifdef MAC 123 #include <security/mac/mac_framework.h> 124 #endif 125 #include "sack_filter.h" 126 #include "tcp_rack.h" 127 #include "tailq_hash.h" 128 #include "rack_bbr_common.h" 129 130 uma_zone_t rack_zone; 131 uma_zone_t rack_pcb_zone; 132 133 #ifndef TICKS2SBT 134 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 135 #endif 136 137 VNET_DECLARE(uint32_t, newreno_beta); 138 VNET_DECLARE(uint32_t, newreno_beta_ecn); 139 #define V_newreno_beta VNET(newreno_beta) 140 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 141 142 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 143 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 144 145 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 146 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 147 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 148 149 struct sysctl_ctx_list rack_sysctl_ctx; 150 struct sysctl_oid *rack_sysctl_root; 151 152 #define CUM_ACKED 1 153 #define SACKED 2 154 155 /* 156 * The RACK module incorporates a number of 157 * TCP ideas that have been put out into the IETF 158 * over the last few years: 159 * - Matt Mathis's Rate Halving which slowly drops 160 * the congestion window so that the ack clock can 161 * be maintained during a recovery. 162 * - Yuchung Cheng's RACK TCP (for which its named) that 163 * will stop us using the number of dup acks and instead 164 * use time as the gage of when we retransmit. 165 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 166 * of Dukkipati et.al. 167 * RACK depends on SACK, so if an endpoint arrives that 168 * cannot do SACK the state machine below will shuttle the 169 * connection back to using the "default" TCP stack that is 170 * in FreeBSD. 171 * 172 * To implement RACK the original TCP stack was first decomposed 173 * into a functional state machine with individual states 174 * for each of the possible TCP connection states. The do_segment 175 * functions role in life is to mandate the connection supports SACK 176 * initially and then assure that the RACK state matches the conenction 177 * state before calling the states do_segment function. Each 178 * state is simplified due to the fact that the original do_segment 179 * has been decomposed and we *know* what state we are in (no 180 * switches on the state) and all tests for SACK are gone. This 181 * greatly simplifies what each state does. 182 * 183 * TCP output is also over-written with a new version since it 184 * must maintain the new rack scoreboard. 185 * 186 */ 187 static int32_t rack_tlp_thresh = 1; 188 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 189 static int32_t rack_tlp_use_greater = 1; 190 static int32_t rack_reorder_thresh = 2; 191 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 192 * - 60 seconds */ 193 static uint32_t rack_pcm_every_n_rounds = 100; 194 static uint32_t rack_pcm_blast = 0; 195 static uint32_t rack_pcm_is_enabled = 1; 196 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 197 198 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round as "gaining" */ 199 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 200 201 202 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 203 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 204 static int32_t rack_rxt_controls = 0; 205 static int32_t rack_fill_cw_state = 0; 206 static uint8_t rack_req_measurements = 1; 207 /* Attack threshold detections */ 208 static uint32_t rack_highest_sack_thresh_seen = 0; 209 static uint32_t rack_highest_move_thresh_seen = 0; 210 static uint32_t rack_merge_out_sacks_on_attack = 0; 211 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 212 static int32_t rack_hw_rate_caps = 0; /* 1; */ 213 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 214 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 215 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 216 static int32_t rack_hw_up_only = 0; 217 static int32_t rack_stats_gets_ms_rtt = 1; 218 static int32_t rack_prr_addbackmax = 2; 219 static int32_t rack_do_hystart = 0; 220 static int32_t rack_apply_rtt_with_reduced_conf = 0; 221 static int32_t rack_hibeta_setting = 0; 222 static int32_t rack_default_pacing_divisor = 250; 223 static uint16_t rack_pacing_min_seg = 0; 224 static int32_t rack_timely_off = 0; 225 226 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 227 static int32_t rack_pkt_delay = 1000; 228 static int32_t rack_send_a_lot_in_prr = 1; 229 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 230 static int32_t rack_verbose_logging = 0; 231 static int32_t rack_ignore_data_after_close = 1; 232 static int32_t rack_enable_shared_cwnd = 1; 233 static int32_t rack_use_cmp_acks = 1; 234 static int32_t rack_use_fsb = 1; 235 static int32_t rack_use_rfo = 1; 236 static int32_t rack_use_rsm_rfo = 1; 237 static int32_t rack_max_abc_post_recovery = 2; 238 static int32_t rack_client_low_buf = 0; 239 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 240 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 241 #ifdef TCP_ACCOUNTING 242 static int32_t rack_tcp_accounting = 0; 243 #endif 244 static int32_t rack_limits_scwnd = 1; 245 static int32_t rack_enable_mqueue_for_nonpaced = 0; 246 static int32_t rack_hybrid_allow_set_maxseg = 0; 247 static int32_t rack_disable_prr = 0; 248 static int32_t use_rack_rr = 1; 249 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 250 static int32_t rack_persist_min = 250000; /* 250usec */ 251 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 252 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 253 static uint32_t rack_max_reduce = 10; /* Percent we can reduce pacing delay by */ 254 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 255 static int32_t rack_limit_time_with_srtt = 0; 256 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 257 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost pacing delay using time_between */ 258 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 259 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 260 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 261 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 262 263 /* 264 * Currently regular tcp has a rto_min of 30ms 265 * the backoff goes 12 times so that ends up 266 * being a total of 122.850 seconds before a 267 * connection is killed. 268 */ 269 static uint32_t rack_def_data_window = 20; 270 static uint32_t rack_goal_bdp = 2; 271 static uint32_t rack_min_srtts = 1; 272 static uint32_t rack_min_measure_usec = 0; 273 static int32_t rack_tlp_min = 10000; /* 10ms */ 274 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 275 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 276 static const int32_t rack_free_cache = 2; 277 static int32_t rack_hptsi_segments = 40; 278 static int32_t rack_rate_sample_method = USE_RTT_LOW; 279 static int32_t rack_pace_every_seg = 0; 280 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 281 static int32_t rack_pacing_delay_reduction = 4; 282 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 283 static int32_t rack_cwnd_block_ends_measure = 0; 284 static int32_t rack_rwnd_block_ends_measure = 0; 285 static int32_t rack_def_profile = 0; 286 287 static int32_t rack_lower_cwnd_at_tlp = 0; 288 static int32_t rack_always_send_oldest = 0; 289 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 290 291 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 292 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 293 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 294 295 /* Probertt */ 296 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 297 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 298 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 299 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 300 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 301 302 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 303 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 304 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 305 static uint32_t rack_probertt_use_min_rtt_exit = 0; 306 static uint32_t rack_probe_rtt_sets_cwnd = 0; 307 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 308 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 309 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 310 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 311 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 312 static uint32_t rack_probertt_filter_life = 10000000; 313 static uint32_t rack_probertt_lower_within = 10; 314 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 315 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 316 static int32_t rack_probertt_clear_is = 1; 317 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 318 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 319 320 /* Part of pacing */ 321 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 322 323 /* Timely information: 324 * 325 * Here we have various control parameters on how 326 * timely may change the multiplier. rack_gain_p5_ub 327 * is associated with timely but not directly influencing 328 * the rate decision like the other variables. It controls 329 * the way fill-cw interacts with timely and caps how much 330 * timely can boost the fill-cw b/w. 331 * 332 * The other values are various boost/shrink numbers as well 333 * as potential caps when adjustments are made to the timely 334 * gain (returned by rack_get_output_gain(). Remember too that 335 * the gain returned can be overriden by other factors such as 336 * probeRTT as well as fixed-rate-pacing. 337 */ 338 static int32_t rack_gain_p5_ub = 250; 339 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 340 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 341 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 342 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 343 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 344 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 345 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 346 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 347 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 348 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 349 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 350 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 351 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 352 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 353 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 354 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 355 static int32_t rack_timely_no_stopping = 0; 356 static int32_t rack_down_raise_thresh = 100; 357 static int32_t rack_req_segs = 1; 358 static uint64_t rack_bw_rate_cap = 0; 359 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 360 361 362 /* Rack specific counters */ 363 counter_u64_t rack_saw_enobuf; 364 counter_u64_t rack_saw_enobuf_hw; 365 counter_u64_t rack_saw_enetunreach; 366 counter_u64_t rack_persists_sends; 367 counter_u64_t rack_persists_acks; 368 counter_u64_t rack_persists_loss; 369 counter_u64_t rack_persists_lost_ends; 370 counter_u64_t rack_total_bytes; 371 #ifdef INVARIANTS 372 counter_u64_t rack_adjust_map_bw; 373 #endif 374 /* Tail loss probe counters */ 375 counter_u64_t rack_tlp_tot; 376 counter_u64_t rack_tlp_newdata; 377 counter_u64_t rack_tlp_retran; 378 counter_u64_t rack_tlp_retran_bytes; 379 counter_u64_t rack_to_tot; 380 counter_u64_t rack_hot_alloc; 381 counter_u64_t rack_to_alloc; 382 counter_u64_t rack_to_alloc_hard; 383 counter_u64_t rack_to_alloc_emerg; 384 counter_u64_t rack_to_alloc_limited; 385 counter_u64_t rack_alloc_limited_conns; 386 counter_u64_t rack_split_limited; 387 counter_u64_t rack_rxt_clamps_cwnd; 388 counter_u64_t rack_rxt_clamps_cwnd_uniq; 389 390 counter_u64_t rack_multi_single_eq; 391 counter_u64_t rack_proc_non_comp_ack; 392 393 counter_u64_t rack_fto_send; 394 counter_u64_t rack_fto_rsm_send; 395 counter_u64_t rack_nfto_resend; 396 counter_u64_t rack_non_fto_send; 397 counter_u64_t rack_extended_rfo; 398 399 counter_u64_t rack_sack_proc_all; 400 counter_u64_t rack_sack_proc_short; 401 counter_u64_t rack_sack_proc_restart; 402 counter_u64_t rack_sack_attacks_detected; 403 counter_u64_t rack_sack_attacks_reversed; 404 counter_u64_t rack_sack_attacks_suspect; 405 counter_u64_t rack_sack_used_next_merge; 406 counter_u64_t rack_sack_splits; 407 counter_u64_t rack_sack_used_prev_merge; 408 counter_u64_t rack_sack_skipped_acked; 409 counter_u64_t rack_ack_total; 410 counter_u64_t rack_express_sack; 411 counter_u64_t rack_sack_total; 412 counter_u64_t rack_move_none; 413 counter_u64_t rack_move_some; 414 415 counter_u64_t rack_input_idle_reduces; 416 counter_u64_t rack_collapsed_win; 417 counter_u64_t rack_collapsed_win_seen; 418 counter_u64_t rack_collapsed_win_rxt; 419 counter_u64_t rack_collapsed_win_rxt_bytes; 420 counter_u64_t rack_try_scwnd; 421 counter_u64_t rack_hw_pace_init_fail; 422 counter_u64_t rack_hw_pace_lost; 423 424 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 425 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 426 427 428 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 429 430 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 431 (tv) = (value) + slop; \ 432 if ((u_long)(tv) < (u_long)(tvmin)) \ 433 (tv) = (tvmin); \ 434 if ((u_long)(tv) > (u_long)(tvmax)) \ 435 (tv) = (tvmax); \ 436 } while (0) 437 438 static void 439 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 440 441 static int 442 rack_process_ack(struct mbuf *m, struct tcphdr *th, 443 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 444 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 445 static int 446 rack_process_data(struct mbuf *m, struct tcphdr *th, 447 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 448 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 449 static void 450 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 451 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 452 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 453 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 454 uint8_t limit_type); 455 static struct rack_sendmap * 456 rack_check_recovery_mode(struct tcpcb *tp, 457 uint32_t tsused); 458 static uint32_t 459 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 460 static void 461 rack_cong_signal(struct tcpcb *tp, 462 uint32_t type, uint32_t ack, int ); 463 static void rack_counter_destroy(void); 464 static int 465 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 466 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 467 static void 468 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 469 static void 470 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 471 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 472 static void rack_dtor(void *mem, int32_t size, void *arg); 473 static void 474 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 475 uint32_t flex1, uint32_t flex2, 476 uint32_t flex3, uint32_t flex4, 477 uint32_t flex5, uint32_t flex6, 478 uint16_t flex7, uint8_t mod); 479 480 static void 481 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay, 482 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 483 struct rack_sendmap *rsm, uint8_t quality); 484 static struct rack_sendmap * 485 rack_find_high_nonack(struct tcp_rack *rack, 486 struct rack_sendmap *rsm); 487 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 488 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 489 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 490 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 491 static void 492 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 493 tcp_seq th_ack, int line, uint8_t quality); 494 static void 495 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 496 497 static uint32_t 498 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 499 static int32_t rack_handoff_ok(struct tcpcb *tp); 500 static int32_t rack_init(struct tcpcb *tp, void **ptr); 501 static void rack_init_sysctls(void); 502 503 static void 504 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 505 struct tcphdr *th, int entered_rec, int dup_ack_struck, 506 int *dsack_seen, int *sacks_seen); 507 static void 508 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 509 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 510 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 511 512 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 513 514 515 static void 516 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 517 struct rack_sendmap *rsm, uint32_t cts); 518 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 519 static int32_t rack_output(struct tcpcb *tp); 520 521 static uint32_t 522 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 523 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 524 uint32_t cts, uint32_t segsiz); 525 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 526 static void rack_remxt_tmr(struct tcpcb *tp); 527 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 528 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 529 static int32_t rack_stopall(struct tcpcb *tp); 530 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 531 static uint32_t 532 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 533 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 534 static void 535 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 536 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 537 static int 538 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 539 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 540 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 541 static int 542 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 543 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 544 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 545 546 static int 547 rack_do_closing(struct mbuf *m, struct tcphdr *th, 548 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 549 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 550 static int 551 rack_do_established(struct mbuf *m, struct tcphdr *th, 552 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 553 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 554 static int 555 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 556 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 557 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 558 static int 559 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 560 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 561 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 562 static int 563 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 564 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 565 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 566 static int 567 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 568 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 569 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 570 static int 571 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 572 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 573 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 574 static int 575 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 576 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 577 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 578 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 579 struct rack_sendmap * 580 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 581 uint32_t tsused); 582 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 583 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 584 static void 585 tcp_rack_partialack(struct tcpcb *tp); 586 static int 587 rack_set_profile(struct tcp_rack *rack, int prof); 588 static void 589 rack_apply_deferred_options(struct tcp_rack *rack); 590 591 int32_t rack_clear_counter=0; 592 593 static uint64_t 594 rack_get_lt_bw(struct tcp_rack *rack) 595 { 596 struct timeval tv; 597 uint64_t tim, bytes; 598 599 tim = rack->r_ctl.lt_bw_time; 600 bytes = rack->r_ctl.lt_bw_bytes; 601 if (rack->lt_bw_up) { 602 /* Include all the current bytes too */ 603 microuptime(&tv); 604 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 605 tim += (tcp_tv_to_lusec(&tv) - rack->r_ctl.lt_timemark); 606 } 607 if ((bytes != 0) && (tim != 0)) 608 return ((bytes * (uint64_t)1000000) / tim); 609 else 610 return (0); 611 } 612 613 static void 614 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 615 { 616 struct sockopt sopt; 617 struct cc_newreno_opts opt; 618 struct tcpcb *tp; 619 uint32_t old_beta; 620 uint32_t old_beta_ecn; 621 int error = 0, failed = 0; 622 623 tp = rack->rc_tp; 624 if (tp->t_cc == NULL) { 625 /* Tcb is leaving */ 626 return; 627 } 628 rack->rc_pacing_cc_set = 1; 629 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 630 /* Not new-reno we can't play games with beta! */ 631 failed = 1; 632 goto out; 633 634 } 635 if (CC_ALGO(tp)->ctl_output == NULL) { 636 /* Huh, not using new-reno so no swaps.? */ 637 failed = 2; 638 goto out; 639 } 640 /* Get the current values out */ 641 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 642 sopt.sopt_dir = SOPT_GET; 643 opt.name = CC_NEWRENO_BETA; 644 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 645 if (error) { 646 failed = 3; 647 goto out; 648 } 649 old_beta = opt.val; 650 opt.name = CC_NEWRENO_BETA_ECN; 651 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 652 if (error) { 653 failed = 4; 654 goto out; 655 } 656 old_beta_ecn = opt.val; 657 658 /* Now lets set in the values we have stored */ 659 sopt.sopt_dir = SOPT_SET; 660 opt.name = CC_NEWRENO_BETA; 661 opt.val = rack->r_ctl.rc_saved_beta; 662 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 663 if (error) { 664 failed = 5; 665 goto out; 666 } 667 opt.name = CC_NEWRENO_BETA_ECN; 668 opt.val = rack->r_ctl.rc_saved_beta_ecn; 669 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 670 if (error) { 671 failed = 6; 672 goto out; 673 } 674 /* Save off the values for restoral */ 675 rack->r_ctl.rc_saved_beta = old_beta; 676 rack->r_ctl.rc_saved_beta_ecn = old_beta_ecn; 677 out: 678 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 679 union tcp_log_stackspecific log; 680 struct timeval tv; 681 struct newreno *ptr; 682 683 ptr = ((struct newreno *)tp->t_ccv.cc_data); 684 memset(&log, 0, sizeof(log)); 685 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 686 log.u_bbr.flex1 = ptr->beta; 687 log.u_bbr.flex2 = ptr->beta_ecn; 688 log.u_bbr.flex3 = ptr->newreno_flags; 689 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta; 690 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta_ecn; 691 log.u_bbr.flex6 = failed; 692 log.u_bbr.flex7 = rack->gp_ready; 693 log.u_bbr.flex7 <<= 1; 694 log.u_bbr.flex7 |= rack->use_fixed_rate; 695 log.u_bbr.flex7 <<= 1; 696 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 697 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 698 log.u_bbr.flex8 = flex8; 699 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 700 0, &log, false, NULL, NULL, 0, &tv); 701 } 702 } 703 704 static void 705 rack_set_cc_pacing(struct tcp_rack *rack) 706 { 707 if (rack->rc_pacing_cc_set) 708 return; 709 /* 710 * Use the swap utility placing in 3 for flex8 to id a 711 * set of a new set of values. 712 */ 713 rack->rc_pacing_cc_set = 1; 714 rack_swap_beta_values(rack, 3); 715 } 716 717 static void 718 rack_undo_cc_pacing(struct tcp_rack *rack) 719 { 720 if (rack->rc_pacing_cc_set == 0) 721 return; 722 /* 723 * Use the swap utility placing in 4 for flex8 to id a 724 * restoral of the old values. 725 */ 726 rack->rc_pacing_cc_set = 0; 727 rack_swap_beta_values(rack, 4); 728 } 729 730 static void 731 rack_remove_pacing(struct tcp_rack *rack) 732 { 733 if (rack->rc_pacing_cc_set) 734 rack_undo_cc_pacing(rack); 735 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 736 tcp_decrement_paced_conn(); 737 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 738 tcp_dec_dgp_pacing_cnt(); 739 rack->rc_always_pace = 0; 740 rack->r_ctl.pacing_method = RACK_PACING_NONE; 741 rack->dgp_on = 0; 742 rack->rc_hybrid_mode = 0; 743 rack->use_fixed_rate = 0; 744 } 745 746 static void 747 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 748 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 749 { 750 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 751 union tcp_log_stackspecific log; 752 struct timeval tv; 753 754 memset(&log, 0, sizeof(log)); 755 log.u_bbr.flex1 = seq_end; 756 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 757 log.u_bbr.flex3 = ack_end_t; 758 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 759 log.u_bbr.flex5 = send_end_t; 760 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 761 log.u_bbr.flex7 = mode; 762 log.u_bbr.flex8 = 69; 763 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 764 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 765 log.u_bbr.pkts_out = line; 766 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 767 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 768 log.u_bbr.epoch = rack->r_ctl.current_round; 769 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 770 if (rsm != NULL) { 771 log.u_bbr.applimited = rsm->r_start; 772 log.u_bbr.delivered = rsm->r_end; 773 log.u_bbr.epoch = rsm->r_flags; 774 } 775 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 776 TCP_LOG_EVENTP(rack->rc_tp, NULL, 777 &rack->rc_inp->inp_socket->so_rcv, 778 &rack->rc_inp->inp_socket->so_snd, 779 BBR_LOG_HPTSI_CALC, 0, 780 0, &log, false, &tv); 781 } 782 } 783 784 static int 785 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 786 { 787 uint32_t stat; 788 int32_t error; 789 790 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 791 if (error || req->newptr == NULL) 792 return error; 793 794 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 795 if (error) 796 return (error); 797 if (stat == 1) { 798 #ifdef INVARIANTS 799 printf("Clearing RACK counters\n"); 800 #endif 801 counter_u64_zero(rack_tlp_tot); 802 counter_u64_zero(rack_tlp_newdata); 803 counter_u64_zero(rack_tlp_retran); 804 counter_u64_zero(rack_tlp_retran_bytes); 805 counter_u64_zero(rack_to_tot); 806 counter_u64_zero(rack_saw_enobuf); 807 counter_u64_zero(rack_saw_enobuf_hw); 808 counter_u64_zero(rack_saw_enetunreach); 809 counter_u64_zero(rack_persists_sends); 810 counter_u64_zero(rack_total_bytes); 811 counter_u64_zero(rack_persists_acks); 812 counter_u64_zero(rack_persists_loss); 813 counter_u64_zero(rack_persists_lost_ends); 814 #ifdef INVARIANTS 815 counter_u64_zero(rack_adjust_map_bw); 816 #endif 817 counter_u64_zero(rack_to_alloc_hard); 818 counter_u64_zero(rack_to_alloc_emerg); 819 counter_u64_zero(rack_sack_proc_all); 820 counter_u64_zero(rack_fto_send); 821 counter_u64_zero(rack_fto_rsm_send); 822 counter_u64_zero(rack_extended_rfo); 823 counter_u64_zero(rack_hw_pace_init_fail); 824 counter_u64_zero(rack_hw_pace_lost); 825 counter_u64_zero(rack_non_fto_send); 826 counter_u64_zero(rack_nfto_resend); 827 counter_u64_zero(rack_sack_proc_short); 828 counter_u64_zero(rack_sack_proc_restart); 829 counter_u64_zero(rack_to_alloc); 830 counter_u64_zero(rack_to_alloc_limited); 831 counter_u64_zero(rack_alloc_limited_conns); 832 counter_u64_zero(rack_split_limited); 833 counter_u64_zero(rack_rxt_clamps_cwnd); 834 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 835 counter_u64_zero(rack_multi_single_eq); 836 counter_u64_zero(rack_proc_non_comp_ack); 837 counter_u64_zero(rack_sack_attacks_detected); 838 counter_u64_zero(rack_sack_attacks_reversed); 839 counter_u64_zero(rack_sack_attacks_suspect); 840 counter_u64_zero(rack_sack_used_next_merge); 841 counter_u64_zero(rack_sack_used_prev_merge); 842 counter_u64_zero(rack_sack_splits); 843 counter_u64_zero(rack_sack_skipped_acked); 844 counter_u64_zero(rack_ack_total); 845 counter_u64_zero(rack_express_sack); 846 counter_u64_zero(rack_sack_total); 847 counter_u64_zero(rack_move_none); 848 counter_u64_zero(rack_move_some); 849 counter_u64_zero(rack_try_scwnd); 850 counter_u64_zero(rack_collapsed_win); 851 counter_u64_zero(rack_collapsed_win_rxt); 852 counter_u64_zero(rack_collapsed_win_seen); 853 counter_u64_zero(rack_collapsed_win_rxt_bytes); 854 } else if (stat == 2) { 855 #ifdef INVARIANTS 856 printf("Clearing RACK option array\n"); 857 #endif 858 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 859 } else if (stat == 3) { 860 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 861 } else if (stat == 4) { 862 #ifdef INVARIANTS 863 printf("Clearing RACK out size array\n"); 864 #endif 865 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 866 } 867 rack_clear_counter = 0; 868 return (0); 869 } 870 871 static void 872 rack_init_sysctls(void) 873 { 874 struct sysctl_oid *rack_counters; 875 struct sysctl_oid *rack_attack; 876 struct sysctl_oid *rack_pacing; 877 struct sysctl_oid *rack_timely; 878 struct sysctl_oid *rack_timers; 879 struct sysctl_oid *rack_tlp; 880 struct sysctl_oid *rack_misc; 881 struct sysctl_oid *rack_features; 882 struct sysctl_oid *rack_measure; 883 struct sysctl_oid *rack_probertt; 884 struct sysctl_oid *rack_hw_pacing; 885 886 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 887 SYSCTL_CHILDREN(rack_sysctl_root), 888 OID_AUTO, 889 "sack_attack", 890 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 891 "Rack Sack Attack Counters and Controls"); 892 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 893 SYSCTL_CHILDREN(rack_sysctl_root), 894 OID_AUTO, 895 "stats", 896 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 897 "Rack Counters"); 898 SYSCTL_ADD_S32(&rack_sysctl_ctx, 899 SYSCTL_CHILDREN(rack_sysctl_root), 900 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 901 &rack_rate_sample_method , USE_RTT_LOW, 902 "What method should we use for rate sampling 0=high, 1=low "); 903 /* Probe rtt related controls */ 904 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 905 SYSCTL_CHILDREN(rack_sysctl_root), 906 OID_AUTO, 907 "probertt", 908 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 909 "ProbeRTT related Controls"); 910 SYSCTL_ADD_U16(&rack_sysctl_ctx, 911 SYSCTL_CHILDREN(rack_probertt), 912 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 913 &rack_atexit_prtt_hbp, 130, 914 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 915 SYSCTL_ADD_U16(&rack_sysctl_ctx, 916 SYSCTL_CHILDREN(rack_probertt), 917 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 918 &rack_atexit_prtt, 130, 919 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 920 SYSCTL_ADD_U16(&rack_sysctl_ctx, 921 SYSCTL_CHILDREN(rack_probertt), 922 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 923 &rack_per_of_gp_probertt, 60, 924 "What percentage of goodput do we pace at in probertt"); 925 SYSCTL_ADD_U16(&rack_sysctl_ctx, 926 SYSCTL_CHILDREN(rack_probertt), 927 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 928 &rack_per_of_gp_probertt_reduce, 10, 929 "What percentage of goodput do we reduce every gp_srtt"); 930 SYSCTL_ADD_U16(&rack_sysctl_ctx, 931 SYSCTL_CHILDREN(rack_probertt), 932 OID_AUTO, "gp_per_low", CTLFLAG_RW, 933 &rack_per_of_gp_lowthresh, 40, 934 "What percentage of goodput do we allow the multiplier to fall to"); 935 SYSCTL_ADD_U32(&rack_sysctl_ctx, 936 SYSCTL_CHILDREN(rack_probertt), 937 OID_AUTO, "time_between", CTLFLAG_RW, 938 &rack_time_between_probertt, 96000000, 939 "How many useconds between the lowest rtt falling must past before we enter probertt"); 940 SYSCTL_ADD_U32(&rack_sysctl_ctx, 941 SYSCTL_CHILDREN(rack_probertt), 942 OID_AUTO, "safety", CTLFLAG_RW, 943 &rack_probe_rtt_safety_val, 2000000, 944 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 945 SYSCTL_ADD_U32(&rack_sysctl_ctx, 946 SYSCTL_CHILDREN(rack_probertt), 947 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 948 &rack_probe_rtt_sets_cwnd, 0, 949 "Do we set the cwnd too (if always_lower is on)"); 950 SYSCTL_ADD_U32(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_probertt), 952 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 953 &rack_max_drain_wait, 2, 954 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 955 SYSCTL_ADD_U32(&rack_sysctl_ctx, 956 SYSCTL_CHILDREN(rack_probertt), 957 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 958 &rack_must_drain, 1, 959 "We must drain this many gp_srtt's waiting for flight to reach goal"); 960 SYSCTL_ADD_U32(&rack_sysctl_ctx, 961 SYSCTL_CHILDREN(rack_probertt), 962 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 963 &rack_probertt_use_min_rtt_entry, 1, 964 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 965 SYSCTL_ADD_U32(&rack_sysctl_ctx, 966 SYSCTL_CHILDREN(rack_probertt), 967 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 968 &rack_probertt_use_min_rtt_exit, 0, 969 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 970 SYSCTL_ADD_U32(&rack_sysctl_ctx, 971 SYSCTL_CHILDREN(rack_probertt), 972 OID_AUTO, "length_div", CTLFLAG_RW, 973 &rack_probertt_gpsrtt_cnt_div, 0, 974 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 975 SYSCTL_ADD_U32(&rack_sysctl_ctx, 976 SYSCTL_CHILDREN(rack_probertt), 977 OID_AUTO, "length_mul", CTLFLAG_RW, 978 &rack_probertt_gpsrtt_cnt_mul, 0, 979 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 980 SYSCTL_ADD_U32(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_probertt), 982 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 983 &rack_min_probertt_hold, 200000, 984 "What is the minimum time we hold probertt at target"); 985 SYSCTL_ADD_U32(&rack_sysctl_ctx, 986 SYSCTL_CHILDREN(rack_probertt), 987 OID_AUTO, "filter_life", CTLFLAG_RW, 988 &rack_probertt_filter_life, 10000000, 989 "What is the time for the filters life in useconds"); 990 SYSCTL_ADD_U32(&rack_sysctl_ctx, 991 SYSCTL_CHILDREN(rack_probertt), 992 OID_AUTO, "lower_within", CTLFLAG_RW, 993 &rack_probertt_lower_within, 10, 994 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 995 SYSCTL_ADD_U32(&rack_sysctl_ctx, 996 SYSCTL_CHILDREN(rack_probertt), 997 OID_AUTO, "must_move", CTLFLAG_RW, 998 &rack_min_rtt_movement, 250, 999 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1000 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1001 SYSCTL_CHILDREN(rack_probertt), 1002 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1003 &rack_probertt_clear_is, 1, 1004 "Do we clear I/S counts on exiting probe-rtt"); 1005 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1006 SYSCTL_CHILDREN(rack_probertt), 1007 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1008 &rack_max_drain_hbp, 1, 1009 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1010 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1011 SYSCTL_CHILDREN(rack_probertt), 1012 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1013 &rack_hbp_thresh, 3, 1014 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1015 /* Pacing related sysctls */ 1016 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1017 SYSCTL_CHILDREN(rack_sysctl_root), 1018 OID_AUTO, 1019 "pacing", 1020 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1021 "Pacing related Controls"); 1022 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_pacing), 1024 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1025 &rack_pcm_is_enabled, 1, 1026 "Do we by default do PCM measurements?"); 1027 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_pacing), 1029 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1030 &rack_pcm_every_n_rounds, 100, 1031 "How many rounds before we need to do a PCM measurement"); 1032 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1033 SYSCTL_CHILDREN(rack_pacing), 1034 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1035 &rack_pcm_blast, 0, 1036 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1037 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1038 SYSCTL_CHILDREN(rack_pacing), 1039 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1040 &rack_gp_gain_req, 1200, 1041 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1042 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1043 SYSCTL_CHILDREN(rack_pacing), 1044 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1045 &rack_rnd_cnt_req, 0x10005, 1046 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1047 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_pacing), 1049 OID_AUTO, "no_timely", CTLFLAG_RW, 1050 &rack_timely_off, 0, 1051 "Do we not use timely in DGP?"); 1052 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1053 SYSCTL_CHILDREN(rack_pacing), 1054 OID_AUTO, "fillcw", CTLFLAG_RW, 1055 &rack_fill_cw_state, 0, 1056 "Enable fillcw on new connections (default=0 off)?"); 1057 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1058 SYSCTL_CHILDREN(rack_pacing), 1059 OID_AUTO, "min_burst", CTLFLAG_RW, 1060 &rack_pacing_min_seg, 0, 1061 "What is the min burst size for pacing (0 disables)?"); 1062 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1063 SYSCTL_CHILDREN(rack_pacing), 1064 OID_AUTO, "divisor", CTLFLAG_RW, 1065 &rack_default_pacing_divisor, 250, 1066 "What is the default divisor given to the rl code?"); 1067 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1068 SYSCTL_CHILDREN(rack_pacing), 1069 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1070 &rack_bw_multipler, 0, 1071 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1072 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1073 SYSCTL_CHILDREN(rack_pacing), 1074 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1075 &rack_max_per_above, 30, 1076 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1077 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1078 SYSCTL_CHILDREN(rack_pacing), 1079 OID_AUTO, "allow1mss", CTLFLAG_RW, 1080 &rack_pace_one_seg, 0, 1081 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1082 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1083 SYSCTL_CHILDREN(rack_pacing), 1084 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1085 &rack_limit_time_with_srtt, 0, 1086 "Do we limit pacing time based on srtt"); 1087 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1088 SYSCTL_CHILDREN(rack_pacing), 1089 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1090 &rack_per_of_gp_ss, 250, 1091 "If non zero, what percentage of goodput to pace at in slow start"); 1092 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1093 SYSCTL_CHILDREN(rack_pacing), 1094 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1095 &rack_per_of_gp_ca, 150, 1096 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1097 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1098 SYSCTL_CHILDREN(rack_pacing), 1099 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1100 &rack_per_of_gp_rec, 200, 1101 "If non zero, what percentage of goodput to pace at in recovery"); 1102 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1103 SYSCTL_CHILDREN(rack_pacing), 1104 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1105 &rack_hptsi_segments, 40, 1106 "What size is the max for TSO segments in pacing and burst mitigation"); 1107 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1108 SYSCTL_CHILDREN(rack_pacing), 1109 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1110 &rack_pacing_delay_reduction, 4, 1111 "When doing only burst mitigation what is the reduce divisor"); 1112 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1113 SYSCTL_CHILDREN(rack_sysctl_root), 1114 OID_AUTO, "use_pacing", CTLFLAG_RW, 1115 &rack_pace_every_seg, 0, 1116 "If set we use pacing, if clear we use only the original burst mitigation"); 1117 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1118 SYSCTL_CHILDREN(rack_pacing), 1119 OID_AUTO, "rate_cap", CTLFLAG_RW, 1120 &rack_bw_rate_cap, 0, 1121 "If set we apply this value to the absolute rate cap used by pacing"); 1122 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1123 SYSCTL_CHILDREN(rack_pacing), 1124 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1125 &rack_fillcw_bw_cap, 3750000, 1126 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1127 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1128 SYSCTL_CHILDREN(rack_sysctl_root), 1129 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1130 &rack_req_measurements, 1, 1131 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1132 /* Hardware pacing */ 1133 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1134 SYSCTL_CHILDREN(rack_sysctl_root), 1135 OID_AUTO, 1136 "hdwr_pacing", 1137 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1138 "Pacing related Controls"); 1139 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_hw_pacing), 1141 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1142 &rack_hw_rwnd_factor, 2, 1143 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1144 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_hw_pacing), 1146 OID_AUTO, "precheck", CTLFLAG_RW, 1147 &rack_hw_check_queue, 0, 1148 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1149 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_hw_pacing), 1151 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1152 &rack_enobuf_hw_boost_mult, 0, 1153 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1154 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1155 SYSCTL_CHILDREN(rack_hw_pacing), 1156 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1157 &rack_enobuf_hw_max, 2, 1158 "What is the max boost the pacing time if we see a ENOBUFS?"); 1159 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1160 SYSCTL_CHILDREN(rack_hw_pacing), 1161 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1162 &rack_enobuf_hw_min, 2, 1163 "What is the min boost the pacing time if we see a ENOBUFS?"); 1164 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1165 SYSCTL_CHILDREN(rack_hw_pacing), 1166 OID_AUTO, "enable", CTLFLAG_RW, 1167 &rack_enable_hw_pacing, 0, 1168 "Should RACK attempt to use hw pacing?"); 1169 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1170 SYSCTL_CHILDREN(rack_hw_pacing), 1171 OID_AUTO, "rate_cap", CTLFLAG_RW, 1172 &rack_hw_rate_caps, 0, 1173 "Does the highest hardware pacing rate cap the rate we will send at??"); 1174 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1175 SYSCTL_CHILDREN(rack_hw_pacing), 1176 OID_AUTO, "uncap_per", CTLFLAG_RW, 1177 &rack_hw_rate_cap_per, 0, 1178 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1179 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1180 SYSCTL_CHILDREN(rack_hw_pacing), 1181 OID_AUTO, "rate_min", CTLFLAG_RW, 1182 &rack_hw_rate_min, 0, 1183 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1184 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1185 SYSCTL_CHILDREN(rack_hw_pacing), 1186 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1187 &rack_hw_rate_to_low, 0, 1188 "If we fall below this rate, dis-engage hw pacing?"); 1189 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1190 SYSCTL_CHILDREN(rack_hw_pacing), 1191 OID_AUTO, "up_only", CTLFLAG_RW, 1192 &rack_hw_up_only, 0, 1193 "Do we allow hw pacing to lower the rate selected?"); 1194 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1195 SYSCTL_CHILDREN(rack_sysctl_root), 1196 OID_AUTO, 1197 "timely", 1198 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1199 "Rack Timely RTT Controls"); 1200 /* Timely based GP dynmics */ 1201 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1202 SYSCTL_CHILDREN(rack_timely), 1203 OID_AUTO, "upper", CTLFLAG_RW, 1204 &rack_gp_per_bw_mul_up, 2, 1205 "Rack timely upper range for equal b/w (in percentage)"); 1206 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1207 SYSCTL_CHILDREN(rack_timely), 1208 OID_AUTO, "lower", CTLFLAG_RW, 1209 &rack_gp_per_bw_mul_down, 4, 1210 "Rack timely lower range for equal b/w (in percentage)"); 1211 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1212 SYSCTL_CHILDREN(rack_timely), 1213 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1214 &rack_gp_rtt_maxmul, 3, 1215 "Rack timely multiplier of lowest rtt for rtt_max"); 1216 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1217 SYSCTL_CHILDREN(rack_timely), 1218 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1219 &rack_gp_rtt_mindiv, 4, 1220 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1221 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1222 SYSCTL_CHILDREN(rack_timely), 1223 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1224 &rack_gp_rtt_minmul, 1, 1225 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1226 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1227 SYSCTL_CHILDREN(rack_timely), 1228 OID_AUTO, "decrease", CTLFLAG_RW, 1229 &rack_gp_decrease_per, 80, 1230 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1231 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1232 SYSCTL_CHILDREN(rack_timely), 1233 OID_AUTO, "increase", CTLFLAG_RW, 1234 &rack_gp_increase_per, 2, 1235 "Rack timely increase perentage of our GP multiplication factor"); 1236 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1237 SYSCTL_CHILDREN(rack_timely), 1238 OID_AUTO, "lowerbound", CTLFLAG_RW, 1239 &rack_per_lower_bound, 50, 1240 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1241 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1242 SYSCTL_CHILDREN(rack_timely), 1243 OID_AUTO, "p5_upper", CTLFLAG_RW, 1244 &rack_gain_p5_ub, 250, 1245 "Profile 5 upper bound to timely gain"); 1246 1247 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1248 SYSCTL_CHILDREN(rack_timely), 1249 OID_AUTO, "upperboundss", CTLFLAG_RW, 1250 &rack_per_upper_bound_ss, 0, 1251 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1252 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1253 SYSCTL_CHILDREN(rack_timely), 1254 OID_AUTO, "upperboundca", CTLFLAG_RW, 1255 &rack_per_upper_bound_ca, 0, 1256 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1257 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1258 SYSCTL_CHILDREN(rack_timely), 1259 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1260 &rack_do_dyn_mul, 0, 1261 "Rack timely do we enable dynmaic timely goodput by default"); 1262 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1263 SYSCTL_CHILDREN(rack_timely), 1264 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1265 &rack_gp_no_rec_chg, 1, 1266 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1267 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1268 SYSCTL_CHILDREN(rack_timely), 1269 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1270 &rack_timely_dec_clear, 6, 1271 "Rack timely what threshold do we count to before another boost during b/w decent"); 1272 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1273 SYSCTL_CHILDREN(rack_timely), 1274 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1275 &rack_timely_max_push_rise, 3, 1276 "Rack timely how many times do we push up with b/w increase"); 1277 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1278 SYSCTL_CHILDREN(rack_timely), 1279 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1280 &rack_timely_max_push_drop, 3, 1281 "Rack timely how many times do we push back on b/w decent"); 1282 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1283 SYSCTL_CHILDREN(rack_timely), 1284 OID_AUTO, "min_segs", CTLFLAG_RW, 1285 &rack_timely_min_segs, 4, 1286 "Rack timely when setting the cwnd what is the min num segments"); 1287 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_timely), 1289 OID_AUTO, "nonstop", CTLFLAG_RW, 1290 &rack_timely_no_stopping, 0, 1291 "Rack timely don't stop increase"); 1292 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1293 SYSCTL_CHILDREN(rack_timely), 1294 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1295 &rack_down_raise_thresh, 100, 1296 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1297 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1298 SYSCTL_CHILDREN(rack_timely), 1299 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1300 &rack_req_segs, 1, 1301 "Bottom dragging if not these many segments outstanding and room"); 1302 1303 /* TLP and Rack related parameters */ 1304 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1305 SYSCTL_CHILDREN(rack_sysctl_root), 1306 OID_AUTO, 1307 "tlp", 1308 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1309 "TLP and Rack related Controls"); 1310 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1311 SYSCTL_CHILDREN(rack_tlp), 1312 OID_AUTO, "use_rrr", CTLFLAG_RW, 1313 &use_rack_rr, 1, 1314 "Do we use Rack Rapid Recovery"); 1315 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1316 SYSCTL_CHILDREN(rack_tlp), 1317 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1318 &rack_max_abc_post_recovery, 2, 1319 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1320 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1321 SYSCTL_CHILDREN(rack_tlp), 1322 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1323 &rack_non_rxt_use_cr, 0, 1324 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1325 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1326 SYSCTL_CHILDREN(rack_tlp), 1327 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1328 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1329 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1330 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1331 SYSCTL_CHILDREN(rack_tlp), 1332 OID_AUTO, "limit", CTLFLAG_RW, 1333 &rack_tlp_limit, 2, 1334 "How many TLP's can be sent without sending new data"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_tlp), 1337 OID_AUTO, "use_greater", CTLFLAG_RW, 1338 &rack_tlp_use_greater, 1, 1339 "Should we use the rack_rtt time if its greater than srtt"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_tlp), 1342 OID_AUTO, "tlpminto", CTLFLAG_RW, 1343 &rack_tlp_min, 10000, 1344 "TLP minimum timeout per the specification (in microseconds)"); 1345 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_tlp), 1347 OID_AUTO, "send_oldest", CTLFLAG_RW, 1348 &rack_always_send_oldest, 0, 1349 "Should we always send the oldest TLP and RACK-TLP"); 1350 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_tlp), 1352 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1353 &rack_lower_cwnd_at_tlp, 0, 1354 "When a TLP completes a retran should we enter recovery"); 1355 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1356 SYSCTL_CHILDREN(rack_tlp), 1357 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1358 &rack_reorder_thresh, 2, 1359 "What factor for rack will be added when seeing reordering (shift right)"); 1360 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_tlp), 1362 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1363 &rack_tlp_thresh, 1, 1364 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1365 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1366 SYSCTL_CHILDREN(rack_tlp), 1367 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1368 &rack_reorder_fade, 60000000, 1369 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1370 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1371 SYSCTL_CHILDREN(rack_tlp), 1372 OID_AUTO, "pktdelay", CTLFLAG_RW, 1373 &rack_pkt_delay, 1000, 1374 "Extra RACK time (in microseconds) besides reordering thresh"); 1375 1376 /* Timer related controls */ 1377 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1378 SYSCTL_CHILDREN(rack_sysctl_root), 1379 OID_AUTO, 1380 "timers", 1381 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1382 "Timer related controls"); 1383 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1384 SYSCTL_CHILDREN(rack_timers), 1385 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1386 &rack_ssthresh_rest_rto_rec, 0, 1387 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1388 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1389 SYSCTL_CHILDREN(rack_timers), 1390 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1391 &rack_rxt_scoreboard_clear_thresh, 2, 1392 "How many RTO's are allowed before we clear the scoreboard"); 1393 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1394 SYSCTL_CHILDREN(rack_timers), 1395 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1396 &rack_honors_hpts_min_to, 1, 1397 "Do rack pacing timers honor hpts min timeout"); 1398 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1399 SYSCTL_CHILDREN(rack_timers), 1400 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1401 &rack_max_reduce, 10, 1402 "Max percentage we will reduce pacing delay by for pacing when we are behind"); 1403 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1404 SYSCTL_CHILDREN(rack_timers), 1405 OID_AUTO, "persmin", CTLFLAG_RW, 1406 &rack_persist_min, 250000, 1407 "What is the minimum time in microseconds between persists"); 1408 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1409 SYSCTL_CHILDREN(rack_timers), 1410 OID_AUTO, "persmax", CTLFLAG_RW, 1411 &rack_persist_max, 2000000, 1412 "What is the largest delay in microseconds between persists"); 1413 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1414 SYSCTL_CHILDREN(rack_timers), 1415 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1416 &rack_delayed_ack_time, 40000, 1417 "Delayed ack time (40ms in microseconds)"); 1418 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1419 SYSCTL_CHILDREN(rack_timers), 1420 OID_AUTO, "minrto", CTLFLAG_RW, 1421 &rack_rto_min, 30000, 1422 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1423 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1424 SYSCTL_CHILDREN(rack_timers), 1425 OID_AUTO, "maxrto", CTLFLAG_RW, 1426 &rack_rto_max, 4000000, 1427 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1428 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1429 SYSCTL_CHILDREN(rack_timers), 1430 OID_AUTO, "minto", CTLFLAG_RW, 1431 &rack_min_to, 1000, 1432 "Minimum rack timeout in microseconds"); 1433 /* Measure controls */ 1434 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1435 SYSCTL_CHILDREN(rack_sysctl_root), 1436 OID_AUTO, 1437 "measure", 1438 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1439 "Measure related controls"); 1440 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1441 SYSCTL_CHILDREN(rack_measure), 1442 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1443 &rack_wma_divisor, 8, 1444 "When doing b/w calculation what is the divisor for the WMA"); 1445 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_measure), 1447 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1448 &rack_cwnd_block_ends_measure, 0, 1449 "Does a cwnd just-return end the measurement window (app limited)"); 1450 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1451 SYSCTL_CHILDREN(rack_measure), 1452 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1453 &rack_rwnd_block_ends_measure, 0, 1454 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1455 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1456 SYSCTL_CHILDREN(rack_measure), 1457 OID_AUTO, "min_target", CTLFLAG_RW, 1458 &rack_def_data_window, 20, 1459 "What is the minimum target window (in mss) for a GP measurements"); 1460 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1461 SYSCTL_CHILDREN(rack_measure), 1462 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1463 &rack_goal_bdp, 2, 1464 "What is the goal BDP to measure"); 1465 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_measure), 1467 OID_AUTO, "min_srtts", CTLFLAG_RW, 1468 &rack_min_srtts, 1, 1469 "What is the goal BDP to measure"); 1470 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1471 SYSCTL_CHILDREN(rack_measure), 1472 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1473 &rack_min_measure_usec, 0, 1474 "What is the Minimum time time for a measurement if 0, this is off"); 1475 /* Features */ 1476 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1477 SYSCTL_CHILDREN(rack_sysctl_root), 1478 OID_AUTO, 1479 "features", 1480 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1481 "Feature controls"); 1482 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1483 SYSCTL_CHILDREN(rack_features), 1484 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1485 &rack_hybrid_allow_set_maxseg, 0, 1486 "Should hybrid pacing allow the setmss command"); 1487 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1488 SYSCTL_CHILDREN(rack_features), 1489 OID_AUTO, "cmpack", CTLFLAG_RW, 1490 &rack_use_cmp_acks, 1, 1491 "Should RACK have LRO send compressed acks"); 1492 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1493 SYSCTL_CHILDREN(rack_features), 1494 OID_AUTO, "fsb", CTLFLAG_RW, 1495 &rack_use_fsb, 1, 1496 "Should RACK use the fast send block?"); 1497 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_features), 1499 OID_AUTO, "rfo", CTLFLAG_RW, 1500 &rack_use_rfo, 1, 1501 "Should RACK use rack_fast_output()?"); 1502 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_features), 1504 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1505 &rack_use_rsm_rfo, 1, 1506 "Should RACK use rack_fast_rsm_output()?"); 1507 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1508 SYSCTL_CHILDREN(rack_features), 1509 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1510 &rack_enable_mqueue_for_nonpaced, 0, 1511 "Should RACK use mbuf queuing for non-paced connections"); 1512 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1513 SYSCTL_CHILDREN(rack_features), 1514 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1515 &rack_do_hystart, 0, 1516 "Should RACK enable HyStart++ on connections?"); 1517 /* Misc rack controls */ 1518 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1519 SYSCTL_CHILDREN(rack_sysctl_root), 1520 OID_AUTO, 1521 "misc", 1522 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1523 "Misc related controls"); 1524 #ifdef TCP_ACCOUNTING 1525 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1526 SYSCTL_CHILDREN(rack_misc), 1527 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1528 &rack_tcp_accounting, 0, 1529 "Should we turn on TCP accounting for all rack sessions?"); 1530 #endif 1531 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1532 SYSCTL_CHILDREN(rack_misc), 1533 OID_AUTO, "dnd", CTLFLAG_RW, 1534 &rack_dnd_default, 0, 1535 "Do not disturb default for rack_rrr = 3"); 1536 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1537 SYSCTL_CHILDREN(rack_misc), 1538 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1539 &sad_seg_size_per, 800, 1540 "Percentage of segment size needed in a sack 800 = 80.0?"); 1541 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1542 SYSCTL_CHILDREN(rack_misc), 1543 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1544 &rack_rxt_controls, 0, 1545 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1546 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1547 SYSCTL_CHILDREN(rack_misc), 1548 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1549 &rack_hibeta_setting, 0, 1550 "Do we ue a high beta (80 instead of 50)?"); 1551 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1552 SYSCTL_CHILDREN(rack_misc), 1553 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1554 &rack_apply_rtt_with_reduced_conf, 0, 1555 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1556 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1557 SYSCTL_CHILDREN(rack_misc), 1558 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1559 &rack_dsack_std_based, 3, 1560 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1561 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_misc), 1563 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1564 &rack_prr_addbackmax, 2, 1565 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1566 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1567 SYSCTL_CHILDREN(rack_misc), 1568 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1569 &rack_stats_gets_ms_rtt, 1, 1570 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1571 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1572 SYSCTL_CHILDREN(rack_misc), 1573 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1574 &rack_client_low_buf, 0, 1575 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1576 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1577 SYSCTL_CHILDREN(rack_misc), 1578 OID_AUTO, "defprofile", CTLFLAG_RW, 1579 &rack_def_profile, 0, 1580 "Should RACK use a default profile (0=no, num == profile num)?"); 1581 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_misc), 1583 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1584 &rack_enable_shared_cwnd, 1, 1585 "Should RACK try to use the shared cwnd on connections where allowed"); 1586 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1587 SYSCTL_CHILDREN(rack_misc), 1588 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1589 &rack_limits_scwnd, 1, 1590 "Should RACK place low end time limits on the shared cwnd feature"); 1591 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_misc), 1593 OID_AUTO, "no_prr", CTLFLAG_RW, 1594 &rack_disable_prr, 0, 1595 "Should RACK not use prr and only pace (must have pacing on)"); 1596 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1597 SYSCTL_CHILDREN(rack_misc), 1598 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1599 &rack_verbose_logging, 0, 1600 "Should RACK black box logging be verbose"); 1601 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1602 SYSCTL_CHILDREN(rack_misc), 1603 OID_AUTO, "data_after_close", CTLFLAG_RW, 1604 &rack_ignore_data_after_close, 1, 1605 "Do we hold off sending a RST until all pending data is ack'd"); 1606 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1607 SYSCTL_CHILDREN(rack_misc), 1608 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1609 &rack_sack_not_required, 1, 1610 "Do we allow rack to run on connections not supporting SACK"); 1611 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1612 SYSCTL_CHILDREN(rack_misc), 1613 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1614 &rack_send_a_lot_in_prr, 1, 1615 "Send a lot in prr"); 1616 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1617 SYSCTL_CHILDREN(rack_misc), 1618 OID_AUTO, "autoscale", CTLFLAG_RW, 1619 &rack_autosndbuf_inc, 20, 1620 "What percentage should rack scale up its snd buffer by?"); 1621 1622 1623 /* Sack Attacker detection stuff */ 1624 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1625 SYSCTL_CHILDREN(rack_attack), 1626 OID_AUTO, "merge_out", CTLFLAG_RW, 1627 &rack_merge_out_sacks_on_attack, 0, 1628 "Do we merge the sendmap when we decide we are being attacked?"); 1629 1630 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1631 SYSCTL_CHILDREN(rack_attack), 1632 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1633 &rack_highest_sack_thresh_seen, 0, 1634 "Highest sack to ack ratio seen"); 1635 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_attack), 1637 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1638 &rack_highest_move_thresh_seen, 0, 1639 "Highest move to non-move ratio seen"); 1640 rack_ack_total = counter_u64_alloc(M_WAITOK); 1641 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1642 SYSCTL_CHILDREN(rack_attack), 1643 OID_AUTO, "acktotal", CTLFLAG_RD, 1644 &rack_ack_total, 1645 "Total number of Ack's"); 1646 rack_express_sack = counter_u64_alloc(M_WAITOK); 1647 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1648 SYSCTL_CHILDREN(rack_attack), 1649 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1650 &rack_express_sack, 1651 "Total expresss number of Sack's"); 1652 rack_sack_total = counter_u64_alloc(M_WAITOK); 1653 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1654 SYSCTL_CHILDREN(rack_attack), 1655 OID_AUTO, "sacktotal", CTLFLAG_RD, 1656 &rack_sack_total, 1657 "Total number of SACKs"); 1658 rack_move_none = counter_u64_alloc(M_WAITOK); 1659 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1660 SYSCTL_CHILDREN(rack_attack), 1661 OID_AUTO, "move_none", CTLFLAG_RD, 1662 &rack_move_none, 1663 "Total number of SACK index reuse of positions under threshold"); 1664 rack_move_some = counter_u64_alloc(M_WAITOK); 1665 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_attack), 1667 OID_AUTO, "move_some", CTLFLAG_RD, 1668 &rack_move_some, 1669 "Total number of SACK index reuse of positions over threshold"); 1670 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1671 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1672 SYSCTL_CHILDREN(rack_attack), 1673 OID_AUTO, "attacks", CTLFLAG_RD, 1674 &rack_sack_attacks_detected, 1675 "Total number of SACK attackers that had sack disabled"); 1676 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1677 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1678 SYSCTL_CHILDREN(rack_attack), 1679 OID_AUTO, "reversed", CTLFLAG_RD, 1680 &rack_sack_attacks_reversed, 1681 "Total number of SACK attackers that were later determined false positive"); 1682 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1683 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1684 SYSCTL_CHILDREN(rack_attack), 1685 OID_AUTO, "suspect", CTLFLAG_RD, 1686 &rack_sack_attacks_suspect, 1687 "Total number of SACKs that triggered early detection"); 1688 1689 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1690 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1691 SYSCTL_CHILDREN(rack_attack), 1692 OID_AUTO, "nextmerge", CTLFLAG_RD, 1693 &rack_sack_used_next_merge, 1694 "Total number of times we used the next merge"); 1695 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1696 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1697 SYSCTL_CHILDREN(rack_attack), 1698 OID_AUTO, "prevmerge", CTLFLAG_RD, 1699 &rack_sack_used_prev_merge, 1700 "Total number of times we used the prev merge"); 1701 /* Counters */ 1702 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1704 SYSCTL_CHILDREN(rack_counters), 1705 OID_AUTO, "totalbytes", CTLFLAG_RD, 1706 &rack_total_bytes, 1707 "Total number of bytes sent"); 1708 rack_fto_send = counter_u64_alloc(M_WAITOK); 1709 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1710 SYSCTL_CHILDREN(rack_counters), 1711 OID_AUTO, "fto_send", CTLFLAG_RD, 1712 &rack_fto_send, "Total number of rack_fast_output sends"); 1713 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1714 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1715 SYSCTL_CHILDREN(rack_counters), 1716 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1717 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1718 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1719 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1720 SYSCTL_CHILDREN(rack_counters), 1721 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1722 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1723 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1724 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1725 SYSCTL_CHILDREN(rack_counters), 1726 OID_AUTO, "nfto_send", CTLFLAG_RD, 1727 &rack_non_fto_send, "Total number of rack_output first sends"); 1728 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1729 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1730 SYSCTL_CHILDREN(rack_counters), 1731 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1732 &rack_extended_rfo, "Total number of times we extended rfo"); 1733 1734 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1735 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1736 SYSCTL_CHILDREN(rack_counters), 1737 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1738 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1739 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1740 1741 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1742 SYSCTL_CHILDREN(rack_counters), 1743 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1744 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1745 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1746 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1747 SYSCTL_CHILDREN(rack_counters), 1748 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1749 &rack_tlp_tot, 1750 "Total number of tail loss probe expirations"); 1751 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1752 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1753 SYSCTL_CHILDREN(rack_counters), 1754 OID_AUTO, "tlp_new", CTLFLAG_RD, 1755 &rack_tlp_newdata, 1756 "Total number of tail loss probe sending new data"); 1757 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1758 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1759 SYSCTL_CHILDREN(rack_counters), 1760 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1761 &rack_tlp_retran, 1762 "Total number of tail loss probe sending retransmitted data"); 1763 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1764 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1765 SYSCTL_CHILDREN(rack_counters), 1766 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1767 &rack_tlp_retran_bytes, 1768 "Total bytes of tail loss probe sending retransmitted data"); 1769 rack_to_tot = counter_u64_alloc(M_WAITOK); 1770 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1771 SYSCTL_CHILDREN(rack_counters), 1772 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1773 &rack_to_tot, 1774 "Total number of times the rack to expired"); 1775 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1776 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1777 SYSCTL_CHILDREN(rack_counters), 1778 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1779 &rack_saw_enobuf, 1780 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1781 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1783 SYSCTL_CHILDREN(rack_counters), 1784 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1785 &rack_saw_enobuf_hw, 1786 "Total number of times a send returned enobuf for hdwr paced connections"); 1787 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1788 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1789 SYSCTL_CHILDREN(rack_counters), 1790 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1791 &rack_saw_enetunreach, 1792 "Total number of times a send received a enetunreachable"); 1793 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1794 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1795 SYSCTL_CHILDREN(rack_counters), 1796 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1797 &rack_hot_alloc, 1798 "Total allocations from the top of our list"); 1799 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1800 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1801 SYSCTL_CHILDREN(rack_counters), 1802 OID_AUTO, "allocs", CTLFLAG_RD, 1803 &rack_to_alloc, 1804 "Total allocations of tracking structures"); 1805 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1806 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1807 SYSCTL_CHILDREN(rack_counters), 1808 OID_AUTO, "allochard", CTLFLAG_RD, 1809 &rack_to_alloc_hard, 1810 "Total allocations done with sleeping the hard way"); 1811 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1812 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1813 SYSCTL_CHILDREN(rack_counters), 1814 OID_AUTO, "allocemerg", CTLFLAG_RD, 1815 &rack_to_alloc_emerg, 1816 "Total allocations done from emergency cache"); 1817 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1818 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1819 SYSCTL_CHILDREN(rack_counters), 1820 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1821 &rack_to_alloc_limited, 1822 "Total allocations dropped due to limit"); 1823 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1824 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1825 SYSCTL_CHILDREN(rack_counters), 1826 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1827 &rack_alloc_limited_conns, 1828 "Connections with allocations dropped due to limit"); 1829 rack_split_limited = counter_u64_alloc(M_WAITOK); 1830 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1831 SYSCTL_CHILDREN(rack_counters), 1832 OID_AUTO, "split_limited", CTLFLAG_RD, 1833 &rack_split_limited, 1834 "Split allocations dropped due to limit"); 1835 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1836 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1837 SYSCTL_CHILDREN(rack_counters), 1838 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1839 &rack_rxt_clamps_cwnd, 1840 "Number of times that excessive rxt clamped the cwnd down"); 1841 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1842 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1843 SYSCTL_CHILDREN(rack_counters), 1844 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1845 &rack_rxt_clamps_cwnd_uniq, 1846 "Number of connections that have had excessive rxt clamped the cwnd down"); 1847 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1848 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1849 SYSCTL_CHILDREN(rack_counters), 1850 OID_AUTO, "persist_sends", CTLFLAG_RD, 1851 &rack_persists_sends, 1852 "Number of times we sent a persist probe"); 1853 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1854 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1855 SYSCTL_CHILDREN(rack_counters), 1856 OID_AUTO, "persist_acks", CTLFLAG_RD, 1857 &rack_persists_acks, 1858 "Number of times a persist probe was acked"); 1859 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1861 SYSCTL_CHILDREN(rack_counters), 1862 OID_AUTO, "persist_loss", CTLFLAG_RD, 1863 &rack_persists_loss, 1864 "Number of times we detected a lost persist probe (no ack)"); 1865 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1867 SYSCTL_CHILDREN(rack_counters), 1868 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1869 &rack_persists_lost_ends, 1870 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1871 #ifdef INVARIANTS 1872 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1873 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1874 SYSCTL_CHILDREN(rack_counters), 1875 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1876 &rack_adjust_map_bw, 1877 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1878 #endif 1879 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1880 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1881 SYSCTL_CHILDREN(rack_counters), 1882 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1883 &rack_multi_single_eq, 1884 "Number of compressed acks total represented"); 1885 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1886 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1887 SYSCTL_CHILDREN(rack_counters), 1888 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1889 &rack_proc_non_comp_ack, 1890 "Number of non compresseds acks that we processed"); 1891 1892 1893 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1894 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1895 SYSCTL_CHILDREN(rack_counters), 1896 OID_AUTO, "sack_long", CTLFLAG_RD, 1897 &rack_sack_proc_all, 1898 "Total times we had to walk whole list for sack processing"); 1899 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1900 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1901 SYSCTL_CHILDREN(rack_counters), 1902 OID_AUTO, "sack_restart", CTLFLAG_RD, 1903 &rack_sack_proc_restart, 1904 "Total times we had to walk whole list due to a restart"); 1905 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1906 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1907 SYSCTL_CHILDREN(rack_counters), 1908 OID_AUTO, "sack_short", CTLFLAG_RD, 1909 &rack_sack_proc_short, 1910 "Total times we took shortcut for sack processing"); 1911 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1912 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1913 SYSCTL_CHILDREN(rack_attack), 1914 OID_AUTO, "skipacked", CTLFLAG_RD, 1915 &rack_sack_skipped_acked, 1916 "Total number of times we skipped previously sacked"); 1917 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1918 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1919 SYSCTL_CHILDREN(rack_attack), 1920 OID_AUTO, "ofsplit", CTLFLAG_RD, 1921 &rack_sack_splits, 1922 "Total number of times we did the old fashion tree split"); 1923 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1924 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1925 SYSCTL_CHILDREN(rack_counters), 1926 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1927 &rack_input_idle_reduces, 1928 "Total number of idle reductions on input"); 1929 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1930 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1931 SYSCTL_CHILDREN(rack_counters), 1932 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1933 &rack_collapsed_win_seen, 1934 "Total number of collapsed window events seen (where our window shrinks)"); 1935 1936 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1937 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1938 SYSCTL_CHILDREN(rack_counters), 1939 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1940 &rack_collapsed_win, 1941 "Total number of collapsed window events where we mark packets"); 1942 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1943 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1944 SYSCTL_CHILDREN(rack_counters), 1945 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1946 &rack_collapsed_win_rxt, 1947 "Total number of packets that were retransmitted"); 1948 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1949 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1950 SYSCTL_CHILDREN(rack_counters), 1951 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1952 &rack_collapsed_win_rxt_bytes, 1953 "Total number of bytes that were retransmitted"); 1954 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1955 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1956 SYSCTL_CHILDREN(rack_counters), 1957 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1958 &rack_try_scwnd, 1959 "Total number of scwnd attempts"); 1960 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1961 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1962 OID_AUTO, "outsize", CTLFLAG_RD, 1963 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1964 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1965 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1966 OID_AUTO, "opts", CTLFLAG_RD, 1967 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1968 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1969 SYSCTL_CHILDREN(rack_sysctl_root), 1970 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1971 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1972 } 1973 1974 static uint32_t 1975 rc_init_window(struct tcp_rack *rack) 1976 { 1977 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1978 1979 } 1980 1981 static uint64_t 1982 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1983 { 1984 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1985 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1986 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1987 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1988 else 1989 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1990 } 1991 1992 static void 1993 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 1994 uint64_t data, uint8_t mod, uint16_t aux, 1995 struct tcp_sendfile_track *cur, int line) 1996 { 1997 #ifdef TCP_REQUEST_TRK 1998 int do_log = 0; 1999 2000 /* 2001 * The rate cap one is noisy and only should come out when normal BB logging 2002 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2003 * once per chunk and make up the BBpoint that can be turned on by the client. 2004 */ 2005 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2006 /* 2007 * The very noisy two need to only come out when 2008 * we have verbose logging on. 2009 */ 2010 if (rack_verbose_logging != 0) 2011 do_log = tcp_bblogging_on(rack->rc_tp); 2012 else 2013 do_log = 0; 2014 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2015 /* 2016 * All other less noisy logs here except the measure which 2017 * also needs to come out on the point and the log. 2018 */ 2019 do_log = tcp_bblogging_on(rack->rc_tp); 2020 } else { 2021 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2022 } 2023 2024 if (do_log) { 2025 union tcp_log_stackspecific log; 2026 struct timeval tv; 2027 uint64_t lt_bw; 2028 2029 /* Convert our ms to a microsecond */ 2030 memset(&log, 0, sizeof(log)); 2031 2032 log.u_bbr.cwnd_gain = line; 2033 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2034 log.u_bbr.rttProp = tim; 2035 log.u_bbr.bw_inuse = cbw; 2036 log.u_bbr.delRate = rack_get_gp_est(rack); 2037 lt_bw = rack_get_lt_bw(rack); 2038 log.u_bbr.flex1 = seq; 2039 log.u_bbr.pacing_gain = aux; 2040 /* lt_bw = < flex3 | flex2 > */ 2041 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2042 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2043 /* Record the last obtained us rtt in inflight */ 2044 if (cur == NULL) { 2045 /* Make sure we are looking at the right log if an overide comes in */ 2046 cur = rack->r_ctl.rc_last_sft; 2047 } 2048 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2049 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2050 else { 2051 /* Use the last known rtt i.e. the rack-rtt */ 2052 log.u_bbr.inflight = rack->rc_rack_rtt; 2053 } 2054 if (cur != NULL) { 2055 uint64_t off; 2056 2057 log.u_bbr.cur_del_rate = cur->deadline; 2058 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2059 /* start = < lost | pkt_epoch > */ 2060 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2061 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2062 log.u_bbr.flex6 = cur->start_seq; 2063 log.u_bbr.pkts_out = cur->end_seq; 2064 } else { 2065 /* start = < lost | pkt_epoch > */ 2066 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2067 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2068 /* end = < pkts_out | flex6 > */ 2069 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2070 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2071 } 2072 /* first_send = <lt_epoch | epoch> */ 2073 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2074 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2075 /* localtime = <delivered | applimited>*/ 2076 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2077 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2078 #ifdef TCP_REQUEST_TRK 2079 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2080 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2081 #endif 2082 log.u_bbr.inhpts = 1; 2083 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2084 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2085 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2086 } else { 2087 log.u_bbr.flex7 = 0xffff; 2088 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2089 } 2090 /* 2091 * Compose bbr_state to be a bit wise 0000ADHF 2092 * where A is the always_pace flag 2093 * where D is the dgp_on flag 2094 * where H is the hybrid_mode on flag 2095 * where F is the use_fixed_rate flag. 2096 */ 2097 log.u_bbr.bbr_state = rack->rc_always_pace; 2098 log.u_bbr.bbr_state <<= 1; 2099 log.u_bbr.bbr_state |= rack->dgp_on; 2100 log.u_bbr.bbr_state <<= 1; 2101 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2102 log.u_bbr.bbr_state <<= 1; 2103 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2104 log.u_bbr.flex8 = mod; 2105 tcp_log_event(rack->rc_tp, NULL, 2106 &rack->rc_inp->inp_socket->so_rcv, 2107 &rack->rc_inp->inp_socket->so_snd, 2108 TCP_HYBRID_PACING_LOG, 0, 2109 0, &log, false, NULL, __func__, __LINE__, &tv); 2110 2111 } 2112 #endif 2113 } 2114 2115 #ifdef TCP_REQUEST_TRK 2116 static void 2117 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2118 { 2119 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2120 union tcp_log_stackspecific log; 2121 struct timeval tv; 2122 uint64_t off; 2123 2124 /* Convert our ms to a microsecond */ 2125 memset(&log, 0, sizeof(log)); 2126 2127 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2128 log.u_bbr.delRate = cur->sent_at_fs; 2129 2130 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2131 /* 2132 * We did not get a new Rules Applied to set so 2133 * no overlapping send occured, this means the 2134 * current byte counts are correct. 2135 */ 2136 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2137 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2138 } else { 2139 /* 2140 * Overlapping send case, we switched to a new 2141 * send and did a rules applied. 2142 */ 2143 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2144 log.u_bbr.rttProp = cur->rxt_at_ls; 2145 } 2146 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2147 log.u_bbr.cwnd_gain = line; 2148 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2149 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2150 /* start = < flex1 | flex2 > */ 2151 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2152 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2153 /* end = < flex3 | flex4 > */ 2154 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2155 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2156 2157 /* localtime = <delivered | applimited>*/ 2158 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2159 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2160 /* client timestamp = <lt_epoch | epoch>*/ 2161 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2162 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2163 /* now set all the flags in */ 2164 log.u_bbr.pkts_out = cur->hybrid_flags; 2165 log.u_bbr.lost = cur->playout_ms; 2166 log.u_bbr.flex6 = cur->flags; 2167 /* 2168 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2169 * where a false retransmit occurred so first_send <-> lastsend may 2170 * include longer time then it actually took if we have a false rxt. 2171 */ 2172 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2173 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2174 /* 2175 * Compose bbr_state to be a bit wise 0000ADHF 2176 * where A is the always_pace flag 2177 * where D is the dgp_on flag 2178 * where H is the hybrid_mode on flag 2179 * where F is the use_fixed_rate flag. 2180 */ 2181 log.u_bbr.bbr_state = rack->rc_always_pace; 2182 log.u_bbr.bbr_state <<= 1; 2183 log.u_bbr.bbr_state |= rack->dgp_on; 2184 log.u_bbr.bbr_state <<= 1; 2185 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2186 log.u_bbr.bbr_state <<= 1; 2187 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2188 2189 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2190 tcp_log_event(rack->rc_tp, NULL, 2191 &rack->rc_inp->inp_socket->so_rcv, 2192 &rack->rc_inp->inp_socket->so_snd, 2193 TCP_HYBRID_PACING_LOG, 0, 2194 0, &log, false, NULL, __func__, __LINE__, &tv); 2195 } 2196 } 2197 #endif 2198 2199 static inline uint64_t 2200 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2201 { 2202 uint64_t ret_bw, ether; 2203 uint64_t u_segsiz; 2204 2205 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2206 if (rack->r_is_v6){ 2207 #ifdef INET6 2208 ether += sizeof(struct ip6_hdr); 2209 #endif 2210 ether += 14; /* eheader size 6+6+2 */ 2211 } else { 2212 #ifdef INET 2213 ether += sizeof(struct ip); 2214 #endif 2215 ether += 14; /* eheader size 6+6+2 */ 2216 } 2217 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2218 ret_bw = bw; 2219 ret_bw *= ether; 2220 ret_bw /= u_segsiz; 2221 return (ret_bw); 2222 } 2223 2224 static void 2225 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2226 { 2227 #ifdef TCP_REQUEST_TRK 2228 struct timeval tv; 2229 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2230 #endif 2231 2232 if (rack->r_ctl.bw_rate_cap == 0) 2233 return; 2234 #ifdef TCP_REQUEST_TRK 2235 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2236 (rack->r_ctl.rc_last_sft != NULL)) { 2237 /* 2238 * We have a dynamic cap. The original target 2239 * is in bw_rate_cap, but we need to look at 2240 * how long it is until we hit the deadline. 2241 */ 2242 struct tcp_sendfile_track *ent; 2243 2244 ent = rack->r_ctl.rc_last_sft; 2245 microuptime(&tv); 2246 timenow = tcp_tv_to_lusec(&tv); 2247 if (timenow >= ent->deadline) { 2248 /* No time left we do DGP only */ 2249 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2250 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2251 rack->r_ctl.bw_rate_cap = 0; 2252 return; 2253 } 2254 /* We have the time */ 2255 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2256 if (timeleft < HPTS_MSEC_IN_SEC) { 2257 /* If there is less than a ms left just use DGPs rate */ 2258 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2259 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2260 rack->r_ctl.bw_rate_cap = 0; 2261 return; 2262 } 2263 /* 2264 * Now lets find the amount of data left to send. 2265 * 2266 * Now ideally we want to use the end_seq to figure out how much more 2267 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2268 */ 2269 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2270 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2271 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2272 else { 2273 /* TSNH, we should catch it at the send */ 2274 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2275 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2276 rack->r_ctl.bw_rate_cap = 0; 2277 return; 2278 } 2279 } else { 2280 /* 2281 * The hard way, figure out how much is gone and then 2282 * take that away from the total the client asked for 2283 * (thats off by tls overhead if this is tls). 2284 */ 2285 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2286 lengone = rack->rc_tp->snd_una - ent->start_seq; 2287 else 2288 lengone = 0; 2289 if (lengone < (ent->end - ent->start)) 2290 lenleft = (ent->end - ent->start) - lengone; 2291 else { 2292 /* TSNH, we should catch it at the send */ 2293 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2294 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2295 rack->r_ctl.bw_rate_cap = 0; 2296 return; 2297 } 2298 } 2299 if (lenleft == 0) { 2300 /* We have it all sent */ 2301 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2302 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2303 if (rack->r_ctl.bw_rate_cap) 2304 goto normal_ratecap; 2305 else 2306 return; 2307 } 2308 calcbw = lenleft * HPTS_USEC_IN_SEC; 2309 calcbw /= timeleft; 2310 /* Now we must compensate for IP/TCP overhead */ 2311 calcbw = rack_compensate_for_linerate(rack, calcbw); 2312 /* Update the bit rate cap */ 2313 rack->r_ctl.bw_rate_cap = calcbw; 2314 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2315 (rack_hybrid_allow_set_maxseg == 1) && 2316 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2317 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2318 uint32_t orig_max; 2319 2320 orig_max = rack->r_ctl.rc_pace_max_segs; 2321 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2322 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2323 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2324 } 2325 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2326 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2327 if ((calcbw > 0) && (*bw > calcbw)) { 2328 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2329 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2330 *capped = 1; 2331 *bw = calcbw; 2332 } 2333 return; 2334 } 2335 normal_ratecap: 2336 #endif 2337 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2338 #ifdef TCP_REQUEST_TRK 2339 if (rack->rc_hybrid_mode && 2340 rack->rc_catch_up && 2341 (rack->r_ctl.rc_last_sft != NULL) && 2342 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2343 (rack_hybrid_allow_set_maxseg == 1) && 2344 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2345 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2346 uint32_t orig_max; 2347 2348 orig_max = rack->r_ctl.rc_pace_max_segs; 2349 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2350 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2351 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2352 } 2353 #endif 2354 *capped = 1; 2355 *bw = rack->r_ctl.bw_rate_cap; 2356 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2357 *bw, 0, 0, 2358 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2359 } 2360 } 2361 2362 static uint64_t 2363 rack_get_gp_est(struct tcp_rack *rack) 2364 { 2365 uint64_t bw, lt_bw, ret_bw; 2366 2367 if (rack->rc_gp_filled == 0) { 2368 /* 2369 * We have yet no b/w measurement, 2370 * if we have a user set initial bw 2371 * return it. If we don't have that and 2372 * we have an srtt, use the tcp IW (10) to 2373 * calculate a fictional b/w over the SRTT 2374 * which is more or less a guess. Note 2375 * we don't use our IW from rack on purpose 2376 * so if we have like IW=30, we are not 2377 * calculating a "huge" b/w. 2378 */ 2379 uint64_t srtt; 2380 2381 if (rack->dis_lt_bw == 1) 2382 lt_bw = 0; 2383 else 2384 lt_bw = rack_get_lt_bw(rack); 2385 if (lt_bw) { 2386 /* 2387 * No goodput bw but a long-term b/w does exist 2388 * lets use that. 2389 */ 2390 ret_bw = lt_bw; 2391 goto compensate; 2392 } 2393 if (rack->r_ctl.init_rate) 2394 return (rack->r_ctl.init_rate); 2395 2396 /* Ok lets come up with the IW guess, if we have a srtt */ 2397 if (rack->rc_tp->t_srtt == 0) { 2398 /* 2399 * Go with old pacing method 2400 * i.e. burst mitigation only. 2401 */ 2402 return (0); 2403 } 2404 /* Ok lets get the initial TCP win (not racks) */ 2405 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2406 srtt = (uint64_t)rack->rc_tp->t_srtt; 2407 bw *= (uint64_t)USECS_IN_SECOND; 2408 bw /= srtt; 2409 ret_bw = bw; 2410 goto compensate; 2411 2412 } 2413 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2414 /* Averaging is done, we can return the value */ 2415 bw = rack->r_ctl.gp_bw; 2416 } else { 2417 /* Still doing initial average must calculate */ 2418 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2419 } 2420 if (rack->dis_lt_bw) { 2421 /* We are not using lt-bw */ 2422 ret_bw = bw; 2423 goto compensate; 2424 } 2425 lt_bw = rack_get_lt_bw(rack); 2426 if (lt_bw == 0) { 2427 /* If we don't have one then equate it to the gp_bw */ 2428 lt_bw = rack->r_ctl.gp_bw; 2429 } 2430 if (rack->use_lesser_lt_bw) { 2431 if (lt_bw < bw) 2432 ret_bw = lt_bw; 2433 else 2434 ret_bw = bw; 2435 } else { 2436 if (lt_bw > bw) 2437 ret_bw = lt_bw; 2438 else 2439 ret_bw = bw; 2440 } 2441 /* 2442 * Now lets compensate based on the TCP/IP overhead. Our 2443 * Goodput estimate does not include this so we must pace out 2444 * a bit faster since our pacing calculations do. The pacing 2445 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2446 * we are using to do this, so we do that here in the opposite 2447 * direction as well. This means that if we are tunneled and the 2448 * segsiz is say 1200 bytes we will get quite a boost, but its 2449 * compensated for in the pacing time the opposite way. 2450 */ 2451 compensate: 2452 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2453 return(ret_bw); 2454 } 2455 2456 2457 static uint64_t 2458 rack_get_bw(struct tcp_rack *rack) 2459 { 2460 uint64_t bw; 2461 2462 if (rack->use_fixed_rate) { 2463 /* Return the fixed pacing rate */ 2464 return (rack_get_fixed_pacing_bw(rack)); 2465 } 2466 bw = rack_get_gp_est(rack); 2467 return (bw); 2468 } 2469 2470 static uint16_t 2471 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2472 { 2473 if (rack->use_fixed_rate) { 2474 return (100); 2475 } else if (rack->in_probe_rtt && (rsm == NULL)) 2476 return (rack->r_ctl.rack_per_of_gp_probertt); 2477 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2478 rack->r_ctl.rack_per_of_gp_rec)) { 2479 if (rsm) { 2480 /* a retransmission always use the recovery rate */ 2481 return (rack->r_ctl.rack_per_of_gp_rec); 2482 } else if (rack->rack_rec_nonrxt_use_cr) { 2483 /* Directed to use the configured rate */ 2484 goto configured_rate; 2485 } else if (rack->rack_no_prr && 2486 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2487 /* No PRR, lets just use the b/w estimate only */ 2488 return (100); 2489 } else { 2490 /* 2491 * Here we may have a non-retransmit but we 2492 * have no overrides, so just use the recovery 2493 * rate (prr is in effect). 2494 */ 2495 return (rack->r_ctl.rack_per_of_gp_rec); 2496 } 2497 } 2498 configured_rate: 2499 /* For the configured rate we look at our cwnd vs the ssthresh */ 2500 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2501 return (rack->r_ctl.rack_per_of_gp_ss); 2502 else 2503 return (rack->r_ctl.rack_per_of_gp_ca); 2504 } 2505 2506 static void 2507 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2508 { 2509 /* 2510 * Types of logs (mod value) 2511 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2512 * 2 = a dsack round begins, persist is reset to 16. 2513 * 3 = a dsack round ends 2514 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2515 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2516 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2517 */ 2518 if (tcp_bblogging_on(rack->rc_tp)) { 2519 union tcp_log_stackspecific log; 2520 struct timeval tv; 2521 2522 memset(&log, 0, sizeof(log)); 2523 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2524 log.u_bbr.flex1 <<= 1; 2525 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2526 log.u_bbr.flex1 <<= 1; 2527 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2528 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2529 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2530 log.u_bbr.flex4 = flex4; 2531 log.u_bbr.flex5 = flex5; 2532 log.u_bbr.flex6 = flex6; 2533 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2534 log.u_bbr.flex8 = mod; 2535 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2536 log.u_bbr.epoch = rack->r_ctl.current_round; 2537 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2538 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2539 &rack->rc_inp->inp_socket->so_rcv, 2540 &rack->rc_inp->inp_socket->so_snd, 2541 RACK_DSACK_HANDLING, 0, 2542 0, &log, false, &tv); 2543 } 2544 } 2545 2546 static void 2547 rack_log_hdwr_pacing(struct tcp_rack *rack, 2548 uint64_t rate, uint64_t hw_rate, int line, 2549 int error, uint16_t mod) 2550 { 2551 if (tcp_bblogging_on(rack->rc_tp)) { 2552 union tcp_log_stackspecific log; 2553 struct timeval tv; 2554 const struct ifnet *ifp; 2555 uint64_t ifp64; 2556 2557 memset(&log, 0, sizeof(log)); 2558 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2559 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2560 if (rack->r_ctl.crte) { 2561 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2562 } else if (rack->rc_inp->inp_route.ro_nh && 2563 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2564 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2565 } else 2566 ifp = NULL; 2567 if (ifp) { 2568 ifp64 = (uintptr_t)ifp; 2569 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff); 2570 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff); 2571 } 2572 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2573 log.u_bbr.bw_inuse = rate; 2574 log.u_bbr.flex5 = line; 2575 log.u_bbr.flex6 = error; 2576 log.u_bbr.flex7 = mod; 2577 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2578 log.u_bbr.flex8 = rack->use_fixed_rate; 2579 log.u_bbr.flex8 <<= 1; 2580 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2581 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2582 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2583 if (rack->r_ctl.crte) 2584 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2585 else 2586 log.u_bbr.cur_del_rate = 0; 2587 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2588 log.u_bbr.epoch = rack->r_ctl.current_round; 2589 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2590 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2591 &rack->rc_inp->inp_socket->so_rcv, 2592 &rack->rc_inp->inp_socket->so_snd, 2593 BBR_LOG_HDWR_PACE, 0, 2594 0, &log, false, &tv); 2595 } 2596 } 2597 2598 static uint64_t 2599 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2600 { 2601 /* 2602 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2603 */ 2604 uint64_t bw_est, high_rate; 2605 uint64_t gain; 2606 2607 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2608 bw_est = bw * gain; 2609 bw_est /= (uint64_t)100; 2610 /* Never fall below the minimum (def 64kbps) */ 2611 if (bw_est < RACK_MIN_BW) 2612 bw_est = RACK_MIN_BW; 2613 if (rack->r_rack_hw_rate_caps) { 2614 /* Rate caps are in place */ 2615 if (rack->r_ctl.crte != NULL) { 2616 /* We have a hdwr rate already */ 2617 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2618 if (bw_est >= high_rate) { 2619 /* We are capping bw at the highest rate table entry */ 2620 if (rack_hw_rate_cap_per && 2621 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2622 rack->r_rack_hw_rate_caps = 0; 2623 goto done; 2624 } 2625 rack_log_hdwr_pacing(rack, 2626 bw_est, high_rate, __LINE__, 2627 0, 3); 2628 bw_est = high_rate; 2629 if (capped) 2630 *capped = 1; 2631 } 2632 } else if ((rack->rack_hdrw_pacing == 0) && 2633 (rack->rack_hdw_pace_ena) && 2634 (rack->rack_attempt_hdwr_pace == 0) && 2635 (rack->rc_inp->inp_route.ro_nh != NULL) && 2636 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2637 /* 2638 * Special case, we have not yet attempted hardware 2639 * pacing, and yet we may, when we do, find out if we are 2640 * above the highest rate. We need to know the maxbw for the interface 2641 * in question (if it supports ratelimiting). We get back 2642 * a 0, if the interface is not found in the RL lists. 2643 */ 2644 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2645 if (high_rate) { 2646 /* Yep, we have a rate is it above this rate? */ 2647 if (bw_est > high_rate) { 2648 bw_est = high_rate; 2649 if (capped) 2650 *capped = 1; 2651 } 2652 } 2653 } 2654 } 2655 done: 2656 return (bw_est); 2657 } 2658 2659 static void 2660 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2661 { 2662 if (tcp_bblogging_on(rack->rc_tp)) { 2663 union tcp_log_stackspecific log; 2664 struct timeval tv; 2665 2666 if ((mod != 1) && (rack_verbose_logging == 0)) { 2667 /* 2668 * We get 3 values currently for mod 2669 * 1 - We are retransmitting and this tells the reason. 2670 * 2 - We are clearing a dup-ack count. 2671 * 3 - We are incrementing a dup-ack count. 2672 * 2673 * The clear/increment are only logged 2674 * if you have BBverbose on. 2675 */ 2676 return; 2677 } 2678 memset(&log, 0, sizeof(log)); 2679 log.u_bbr.flex1 = tsused; 2680 log.u_bbr.flex2 = thresh; 2681 log.u_bbr.flex3 = rsm->r_flags; 2682 log.u_bbr.flex4 = rsm->r_dupack; 2683 log.u_bbr.flex5 = rsm->r_start; 2684 log.u_bbr.flex6 = rsm->r_end; 2685 log.u_bbr.flex8 = mod; 2686 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2687 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2688 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2689 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2690 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2691 log.u_bbr.pacing_gain = rack->r_must_retran; 2692 log.u_bbr.epoch = rack->r_ctl.current_round; 2693 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2694 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2695 &rack->rc_inp->inp_socket->so_rcv, 2696 &rack->rc_inp->inp_socket->so_snd, 2697 BBR_LOG_SETTINGS_CHG, 0, 2698 0, &log, false, &tv); 2699 } 2700 } 2701 2702 static void 2703 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t pacing_delay, uint8_t which) 2704 { 2705 if (tcp_bblogging_on(rack->rc_tp)) { 2706 union tcp_log_stackspecific log; 2707 struct timeval tv; 2708 2709 memset(&log, 0, sizeof(log)); 2710 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2711 log.u_bbr.flex2 = to; 2712 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2713 log.u_bbr.flex4 = pacing_delay; 2714 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2715 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2716 log.u_bbr.flex7 = rack->rc_in_persist; 2717 log.u_bbr.flex8 = which; 2718 if (rack->rack_no_prr) 2719 log.u_bbr.pkts_out = 0; 2720 else 2721 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2722 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2723 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2724 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2725 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2726 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2727 log.u_bbr.pacing_gain = rack->r_must_retran; 2728 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2729 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2730 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2731 log.u_bbr.lost = rack_rto_min; 2732 log.u_bbr.epoch = rack->r_ctl.roundends; 2733 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2734 log.u_bbr.bw_inuse <<= 32; 2735 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2736 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2737 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2738 &rack->rc_inp->inp_socket->so_rcv, 2739 &rack->rc_inp->inp_socket->so_snd, 2740 BBR_LOG_TIMERSTAR, 0, 2741 0, &log, false, &tv); 2742 } 2743 } 2744 2745 static void 2746 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2747 { 2748 if (tcp_bblogging_on(rack->rc_tp)) { 2749 union tcp_log_stackspecific log; 2750 struct timeval tv; 2751 2752 memset(&log, 0, sizeof(log)); 2753 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2754 log.u_bbr.flex8 = to_num; 2755 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2756 log.u_bbr.flex2 = rack->rc_rack_rtt; 2757 if (rsm == NULL) 2758 log.u_bbr.flex3 = 0; 2759 else 2760 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2761 if (rack->rack_no_prr) 2762 log.u_bbr.flex5 = 0; 2763 else 2764 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2765 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2766 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2767 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2768 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2769 log.u_bbr.pacing_gain = rack->r_must_retran; 2770 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2771 log.u_bbr.bw_inuse <<= 32; 2772 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2773 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2774 &rack->rc_inp->inp_socket->so_rcv, 2775 &rack->rc_inp->inp_socket->so_snd, 2776 BBR_LOG_RTO, 0, 2777 0, &log, false, &tv); 2778 } 2779 } 2780 2781 static void 2782 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2783 struct rack_sendmap *prev, 2784 struct rack_sendmap *rsm, 2785 struct rack_sendmap *next, 2786 int flag, uint32_t th_ack, int line) 2787 { 2788 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2789 union tcp_log_stackspecific log; 2790 struct timeval tv; 2791 2792 memset(&log, 0, sizeof(log)); 2793 log.u_bbr.flex8 = flag; 2794 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2795 log.u_bbr.cur_del_rate = (uintptr_t)prev; 2796 log.u_bbr.delRate = (uintptr_t)rsm; 2797 log.u_bbr.rttProp = (uintptr_t)next; 2798 log.u_bbr.flex7 = 0; 2799 if (prev) { 2800 log.u_bbr.flex1 = prev->r_start; 2801 log.u_bbr.flex2 = prev->r_end; 2802 log.u_bbr.flex7 |= 0x4; 2803 } 2804 if (rsm) { 2805 log.u_bbr.flex3 = rsm->r_start; 2806 log.u_bbr.flex4 = rsm->r_end; 2807 log.u_bbr.flex7 |= 0x2; 2808 } 2809 if (next) { 2810 log.u_bbr.flex5 = next->r_start; 2811 log.u_bbr.flex6 = next->r_end; 2812 log.u_bbr.flex7 |= 0x1; 2813 } 2814 log.u_bbr.applimited = line; 2815 log.u_bbr.pkts_out = th_ack; 2816 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2817 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2818 if (rack->rack_no_prr) 2819 log.u_bbr.lost = 0; 2820 else 2821 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2822 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2823 log.u_bbr.bw_inuse <<= 32; 2824 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2825 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2826 &rack->rc_inp->inp_socket->so_rcv, 2827 &rack->rc_inp->inp_socket->so_snd, 2828 TCP_LOG_MAPCHG, 0, 2829 0, &log, false, &tv); 2830 } 2831 } 2832 2833 static void 2834 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2835 struct rack_sendmap *rsm, int conf) 2836 { 2837 if (tcp_bblogging_on(tp)) { 2838 union tcp_log_stackspecific log; 2839 struct timeval tv; 2840 memset(&log, 0, sizeof(log)); 2841 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2842 log.u_bbr.flex1 = t; 2843 log.u_bbr.flex2 = len; 2844 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2845 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2846 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2847 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2848 log.u_bbr.flex7 = conf; 2849 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2850 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2851 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2852 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2853 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2854 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2855 if (rsm) { 2856 log.u_bbr.pkt_epoch = rsm->r_start; 2857 log.u_bbr.lost = rsm->r_end; 2858 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2859 /* We loose any upper of the 24 bits */ 2860 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2861 } else { 2862 /* Its a SYN */ 2863 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2864 log.u_bbr.lost = 0; 2865 log.u_bbr.cwnd_gain = 0; 2866 log.u_bbr.pacing_gain = 0; 2867 } 2868 /* Write out general bits of interest rrs here */ 2869 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2870 log.u_bbr.use_lt_bw <<= 1; 2871 log.u_bbr.use_lt_bw |= rack->forced_ack; 2872 log.u_bbr.use_lt_bw <<= 1; 2873 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2874 log.u_bbr.use_lt_bw <<= 1; 2875 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2876 log.u_bbr.use_lt_bw <<= 1; 2877 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2878 log.u_bbr.use_lt_bw <<= 1; 2879 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2880 log.u_bbr.use_lt_bw <<= 1; 2881 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2882 log.u_bbr.use_lt_bw <<= 1; 2883 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2884 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2885 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2886 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2887 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2888 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2889 log.u_bbr.bw_inuse = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 2890 log.u_bbr.bw_inuse <<= 32; 2891 if (rsm) 2892 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2893 TCP_LOG_EVENTP(tp, NULL, 2894 &rack->rc_inp->inp_socket->so_rcv, 2895 &rack->rc_inp->inp_socket->so_snd, 2896 BBR_LOG_BBRRTT, 0, 2897 0, &log, false, &tv); 2898 2899 2900 } 2901 } 2902 2903 static void 2904 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2905 { 2906 /* 2907 * Log the rtt sample we are 2908 * applying to the srtt algorithm in 2909 * useconds. 2910 */ 2911 if (tcp_bblogging_on(rack->rc_tp)) { 2912 union tcp_log_stackspecific log; 2913 struct timeval tv; 2914 2915 /* Convert our ms to a microsecond */ 2916 memset(&log, 0, sizeof(log)); 2917 log.u_bbr.flex1 = rtt; 2918 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2919 log.u_bbr.flex7 = 1; 2920 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2921 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2922 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2923 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2924 log.u_bbr.pacing_gain = rack->r_must_retran; 2925 /* 2926 * We capture in delRate the upper 32 bits as 2927 * the confidence level we had declared, and the 2928 * lower 32 bits as the actual RTT using the arrival 2929 * timestamp. 2930 */ 2931 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2932 log.u_bbr.delRate <<= 32; 2933 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2934 /* Lets capture all the things that make up t_rtxcur */ 2935 log.u_bbr.applimited = rack_rto_min; 2936 log.u_bbr.epoch = rack_rto_max; 2937 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2938 log.u_bbr.lost = rack_rto_min; 2939 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2940 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2941 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2942 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2943 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2944 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2945 &rack->rc_inp->inp_socket->so_rcv, 2946 &rack->rc_inp->inp_socket->so_snd, 2947 TCP_LOG_RTT, 0, 2948 0, &log, false, &tv); 2949 } 2950 } 2951 2952 static void 2953 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2954 { 2955 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2956 union tcp_log_stackspecific log; 2957 struct timeval tv; 2958 2959 /* Convert our ms to a microsecond */ 2960 memset(&log, 0, sizeof(log)); 2961 log.u_bbr.flex1 = rtt; 2962 log.u_bbr.flex2 = send_time; 2963 log.u_bbr.flex3 = ack_time; 2964 log.u_bbr.flex4 = where; 2965 log.u_bbr.flex7 = 2; 2966 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2967 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2968 log.u_bbr.bw_inuse <<= 32; 2969 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2970 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2971 &rack->rc_inp->inp_socket->so_rcv, 2972 &rack->rc_inp->inp_socket->so_snd, 2973 TCP_LOG_RTT, 0, 2974 0, &log, false, &tv); 2975 } 2976 } 2977 2978 2979 static void 2980 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 2981 { 2982 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2983 union tcp_log_stackspecific log; 2984 struct timeval tv; 2985 2986 /* Convert our ms to a microsecond */ 2987 memset(&log, 0, sizeof(log)); 2988 log.u_bbr.flex1 = idx; 2989 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 2990 log.u_bbr.flex3 = tsecho; 2991 log.u_bbr.flex7 = 3; 2992 log.u_bbr.rttProp = tsv; 2993 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2994 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2995 log.u_bbr.bw_inuse <<= 32; 2996 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2997 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2998 &rack->rc_inp->inp_socket->so_rcv, 2999 &rack->rc_inp->inp_socket->so_snd, 3000 TCP_LOG_RTT, 0, 3001 0, &log, false, &tv); 3002 } 3003 } 3004 3005 3006 static inline void 3007 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3008 { 3009 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3010 union tcp_log_stackspecific log; 3011 struct timeval tv; 3012 3013 memset(&log, 0, sizeof(log)); 3014 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3015 log.u_bbr.flex1 = line; 3016 log.u_bbr.flex2 = tick; 3017 log.u_bbr.flex3 = tp->t_maxunacktime; 3018 log.u_bbr.flex4 = tp->t_acktime; 3019 log.u_bbr.flex8 = event; 3020 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3021 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3022 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3023 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3024 log.u_bbr.pacing_gain = rack->r_must_retran; 3025 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3026 log.u_bbr.bw_inuse <<= 32; 3027 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3028 TCP_LOG_EVENTP(tp, NULL, 3029 &rack->rc_inp->inp_socket->so_rcv, 3030 &rack->rc_inp->inp_socket->so_snd, 3031 BBR_LOG_PROGRESS, 0, 3032 0, &log, false, &tv); 3033 } 3034 } 3035 3036 static void 3037 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay, uint32_t cts, struct timeval *tv, int line) 3038 { 3039 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3040 union tcp_log_stackspecific log; 3041 3042 memset(&log, 0, sizeof(log)); 3043 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3044 log.u_bbr.flex1 = pacing_delay; 3045 if (rack->rack_no_prr) 3046 log.u_bbr.flex2 = 0; 3047 else 3048 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3049 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3050 log.u_bbr.flex6 = line; 3051 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3052 log.u_bbr.flex8 = rack->rc_in_persist; 3053 log.u_bbr.timeStamp = cts; 3054 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3055 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3056 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3057 log.u_bbr.pacing_gain = rack->r_must_retran; 3058 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3059 &rack->rc_inp->inp_socket->so_rcv, 3060 &rack->rc_inp->inp_socket->so_snd, 3061 BBR_LOG_BBRSND, 0, 3062 0, &log, false, tv); 3063 } 3064 } 3065 3066 static void 3067 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3068 { 3069 if (tcp_bblogging_on(rack->rc_tp)) { 3070 union tcp_log_stackspecific log; 3071 struct timeval tv; 3072 3073 memset(&log, 0, sizeof(log)); 3074 log.u_bbr.flex1 = did_out; 3075 log.u_bbr.flex2 = nxt_pkt; 3076 log.u_bbr.flex3 = way_out; 3077 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3078 if (rack->rack_no_prr) 3079 log.u_bbr.flex5 = 0; 3080 else 3081 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3082 log.u_bbr.flex6 = nsegs; 3083 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3084 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3085 log.u_bbr.flex7 <<= 1; 3086 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3087 log.u_bbr.flex7 <<= 1; 3088 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3089 log.u_bbr.flex8 = rack->rc_in_persist; 3090 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3091 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3092 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3093 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3094 log.u_bbr.use_lt_bw <<= 1; 3095 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3096 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3097 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3098 log.u_bbr.pacing_gain = rack->r_must_retran; 3099 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3100 log.u_bbr.bw_inuse <<= 32; 3101 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3102 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3103 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3104 log.u_bbr.lost = rack->rc_tp->t_srtt; 3105 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3106 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3107 &rack->rc_inp->inp_socket->so_rcv, 3108 &rack->rc_inp->inp_socket->so_snd, 3109 BBR_LOG_DOSEG_DONE, 0, 3110 0, &log, false, &tv); 3111 } 3112 } 3113 3114 static void 3115 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3116 { 3117 if (tcp_bblogging_on(rack->rc_tp)) { 3118 union tcp_log_stackspecific log; 3119 struct timeval tv; 3120 3121 memset(&log, 0, sizeof(log)); 3122 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3123 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3124 log.u_bbr.flex4 = arg1; 3125 log.u_bbr.flex5 = arg2; 3126 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3127 log.u_bbr.flex6 = arg3; 3128 log.u_bbr.flex8 = frm; 3129 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3130 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3131 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3132 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3133 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3134 log.u_bbr.pacing_gain = rack->r_must_retran; 3135 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3136 &tptosocket(tp)->so_snd, 3137 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3138 } 3139 } 3140 3141 static void 3142 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t pacing_delay, 3143 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3144 { 3145 if (tcp_bblogging_on(rack->rc_tp)) { 3146 union tcp_log_stackspecific log; 3147 struct timeval tv; 3148 3149 memset(&log, 0, sizeof(log)); 3150 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3151 log.u_bbr.flex1 = pacing_delay; 3152 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3153 log.u_bbr.flex4 = reason; 3154 if (rack->rack_no_prr) 3155 log.u_bbr.flex5 = 0; 3156 else 3157 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3158 log.u_bbr.flex7 = hpts_calling; 3159 log.u_bbr.flex8 = rack->rc_in_persist; 3160 log.u_bbr.lt_epoch = cwnd_to_use; 3161 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3162 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3163 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3164 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3165 log.u_bbr.pacing_gain = rack->r_must_retran; 3166 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3167 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3168 log.u_bbr.bw_inuse <<= 32; 3169 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3170 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3171 &rack->rc_inp->inp_socket->so_rcv, 3172 &rack->rc_inp->inp_socket->so_snd, 3173 BBR_LOG_JUSTRET, 0, 3174 tlen, &log, false, &tv); 3175 } 3176 } 3177 3178 static void 3179 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3180 struct timeval *tv, uint32_t flags_on_entry) 3181 { 3182 if (tcp_bblogging_on(rack->rc_tp)) { 3183 union tcp_log_stackspecific log; 3184 3185 memset(&log, 0, sizeof(log)); 3186 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3187 log.u_bbr.flex1 = line; 3188 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3189 log.u_bbr.flex3 = flags_on_entry; 3190 log.u_bbr.flex4 = us_cts; 3191 if (rack->rack_no_prr) 3192 log.u_bbr.flex5 = 0; 3193 else 3194 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3195 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3196 log.u_bbr.flex7 = hpts_removed; 3197 log.u_bbr.flex8 = 1; 3198 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3199 log.u_bbr.timeStamp = us_cts; 3200 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3201 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3202 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3203 log.u_bbr.pacing_gain = rack->r_must_retran; 3204 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3205 log.u_bbr.bw_inuse <<= 32; 3206 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3207 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3208 &rack->rc_inp->inp_socket->so_rcv, 3209 &rack->rc_inp->inp_socket->so_snd, 3210 BBR_LOG_TIMERCANC, 0, 3211 0, &log, false, tv); 3212 } 3213 } 3214 3215 static void 3216 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3217 uint32_t flex1, uint32_t flex2, 3218 uint32_t flex3, uint32_t flex4, 3219 uint32_t flex5, uint32_t flex6, 3220 uint16_t flex7, uint8_t mod) 3221 { 3222 if (tcp_bblogging_on(rack->rc_tp)) { 3223 union tcp_log_stackspecific log; 3224 struct timeval tv; 3225 3226 if (mod == 1) { 3227 /* No you can't use 1, its for the real to cancel */ 3228 return; 3229 } 3230 memset(&log, 0, sizeof(log)); 3231 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3232 log.u_bbr.flex1 = flex1; 3233 log.u_bbr.flex2 = flex2; 3234 log.u_bbr.flex3 = flex3; 3235 log.u_bbr.flex4 = flex4; 3236 log.u_bbr.flex5 = flex5; 3237 log.u_bbr.flex6 = flex6; 3238 log.u_bbr.flex7 = flex7; 3239 log.u_bbr.flex8 = mod; 3240 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3241 &rack->rc_inp->inp_socket->so_rcv, 3242 &rack->rc_inp->inp_socket->so_snd, 3243 BBR_LOG_TIMERCANC, 0, 3244 0, &log, false, &tv); 3245 } 3246 } 3247 3248 static void 3249 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3250 { 3251 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3252 union tcp_log_stackspecific log; 3253 struct timeval tv; 3254 3255 memset(&log, 0, sizeof(log)); 3256 log.u_bbr.flex1 = timers; 3257 log.u_bbr.flex2 = ret; 3258 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3259 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3260 log.u_bbr.flex5 = cts; 3261 if (rack->rack_no_prr) 3262 log.u_bbr.flex6 = 0; 3263 else 3264 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3265 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3266 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3267 log.u_bbr.pacing_gain = rack->r_must_retran; 3268 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3269 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3270 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3271 &rack->rc_inp->inp_socket->so_rcv, 3272 &rack->rc_inp->inp_socket->so_snd, 3273 BBR_LOG_TO_PROCESS, 0, 3274 0, &log, false, &tv); 3275 } 3276 } 3277 3278 static void 3279 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3280 { 3281 if (tcp_bblogging_on(rack->rc_tp)) { 3282 union tcp_log_stackspecific log; 3283 struct timeval tv; 3284 3285 memset(&log, 0, sizeof(log)); 3286 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3287 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3288 if (rack->rack_no_prr) 3289 log.u_bbr.flex3 = 0; 3290 else 3291 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3292 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3293 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3294 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3295 log.u_bbr.flex7 = line; 3296 log.u_bbr.flex8 = frm; 3297 log.u_bbr.pkts_out = orig_cwnd; 3298 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3299 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3300 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3301 log.u_bbr.use_lt_bw <<= 1; 3302 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3303 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3304 &rack->rc_inp->inp_socket->so_rcv, 3305 &rack->rc_inp->inp_socket->so_snd, 3306 BBR_LOG_BBRUPD, 0, 3307 0, &log, false, &tv); 3308 } 3309 } 3310 3311 static void 3312 rack_counter_destroy(void) 3313 { 3314 counter_u64_free(rack_total_bytes); 3315 counter_u64_free(rack_fto_send); 3316 counter_u64_free(rack_fto_rsm_send); 3317 counter_u64_free(rack_nfto_resend); 3318 counter_u64_free(rack_hw_pace_init_fail); 3319 counter_u64_free(rack_hw_pace_lost); 3320 counter_u64_free(rack_non_fto_send); 3321 counter_u64_free(rack_extended_rfo); 3322 counter_u64_free(rack_ack_total); 3323 counter_u64_free(rack_express_sack); 3324 counter_u64_free(rack_sack_total); 3325 counter_u64_free(rack_move_none); 3326 counter_u64_free(rack_move_some); 3327 counter_u64_free(rack_sack_attacks_detected); 3328 counter_u64_free(rack_sack_attacks_reversed); 3329 counter_u64_free(rack_sack_attacks_suspect); 3330 counter_u64_free(rack_sack_used_next_merge); 3331 counter_u64_free(rack_sack_used_prev_merge); 3332 counter_u64_free(rack_tlp_tot); 3333 counter_u64_free(rack_tlp_newdata); 3334 counter_u64_free(rack_tlp_retran); 3335 counter_u64_free(rack_tlp_retran_bytes); 3336 counter_u64_free(rack_to_tot); 3337 counter_u64_free(rack_saw_enobuf); 3338 counter_u64_free(rack_saw_enobuf_hw); 3339 counter_u64_free(rack_saw_enetunreach); 3340 counter_u64_free(rack_hot_alloc); 3341 counter_u64_free(rack_to_alloc); 3342 counter_u64_free(rack_to_alloc_hard); 3343 counter_u64_free(rack_to_alloc_emerg); 3344 counter_u64_free(rack_to_alloc_limited); 3345 counter_u64_free(rack_alloc_limited_conns); 3346 counter_u64_free(rack_split_limited); 3347 counter_u64_free(rack_multi_single_eq); 3348 counter_u64_free(rack_rxt_clamps_cwnd); 3349 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3350 counter_u64_free(rack_proc_non_comp_ack); 3351 counter_u64_free(rack_sack_proc_all); 3352 counter_u64_free(rack_sack_proc_restart); 3353 counter_u64_free(rack_sack_proc_short); 3354 counter_u64_free(rack_sack_skipped_acked); 3355 counter_u64_free(rack_sack_splits); 3356 counter_u64_free(rack_input_idle_reduces); 3357 counter_u64_free(rack_collapsed_win); 3358 counter_u64_free(rack_collapsed_win_rxt); 3359 counter_u64_free(rack_collapsed_win_rxt_bytes); 3360 counter_u64_free(rack_collapsed_win_seen); 3361 counter_u64_free(rack_try_scwnd); 3362 counter_u64_free(rack_persists_sends); 3363 counter_u64_free(rack_persists_acks); 3364 counter_u64_free(rack_persists_loss); 3365 counter_u64_free(rack_persists_lost_ends); 3366 #ifdef INVARIANTS 3367 counter_u64_free(rack_adjust_map_bw); 3368 #endif 3369 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3370 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3371 } 3372 3373 static struct rack_sendmap * 3374 rack_alloc(struct tcp_rack *rack) 3375 { 3376 struct rack_sendmap *rsm; 3377 3378 /* 3379 * First get the top of the list it in 3380 * theory is the "hottest" rsm we have, 3381 * possibly just freed by ack processing. 3382 */ 3383 if (rack->rc_free_cnt > rack_free_cache) { 3384 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3385 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3386 counter_u64_add(rack_hot_alloc, 1); 3387 rack->rc_free_cnt--; 3388 return (rsm); 3389 } 3390 /* 3391 * Once we get under our free cache we probably 3392 * no longer have a "hot" one available. Lets 3393 * get one from UMA. 3394 */ 3395 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3396 if (rsm) { 3397 rack->r_ctl.rc_num_maps_alloced++; 3398 counter_u64_add(rack_to_alloc, 1); 3399 return (rsm); 3400 } 3401 /* 3402 * Dig in to our aux rsm's (the last two) since 3403 * UMA failed to get us one. 3404 */ 3405 if (rack->rc_free_cnt) { 3406 counter_u64_add(rack_to_alloc_emerg, 1); 3407 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3408 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3409 rack->rc_free_cnt--; 3410 return (rsm); 3411 } 3412 return (NULL); 3413 } 3414 3415 static struct rack_sendmap * 3416 rack_alloc_full_limit(struct tcp_rack *rack) 3417 { 3418 if ((V_tcp_map_entries_limit > 0) && 3419 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3420 counter_u64_add(rack_to_alloc_limited, 1); 3421 if (!rack->alloc_limit_reported) { 3422 rack->alloc_limit_reported = 1; 3423 counter_u64_add(rack_alloc_limited_conns, 1); 3424 } 3425 return (NULL); 3426 } 3427 return (rack_alloc(rack)); 3428 } 3429 3430 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3431 static struct rack_sendmap * 3432 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3433 { 3434 struct rack_sendmap *rsm; 3435 3436 if (limit_type) { 3437 /* currently there is only one limit type */ 3438 if (rack->r_ctl.rc_split_limit > 0 && 3439 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3440 counter_u64_add(rack_split_limited, 1); 3441 if (!rack->alloc_limit_reported) { 3442 rack->alloc_limit_reported = 1; 3443 counter_u64_add(rack_alloc_limited_conns, 1); 3444 } 3445 return (NULL); 3446 } 3447 } 3448 3449 /* allocate and mark in the limit type, if set */ 3450 rsm = rack_alloc(rack); 3451 if (rsm != NULL && limit_type) { 3452 rsm->r_limit_type = limit_type; 3453 rack->r_ctl.rc_num_split_allocs++; 3454 } 3455 return (rsm); 3456 } 3457 3458 static void 3459 rack_free_trim(struct tcp_rack *rack) 3460 { 3461 struct rack_sendmap *rsm; 3462 3463 /* 3464 * Free up all the tail entries until 3465 * we get our list down to the limit. 3466 */ 3467 while (rack->rc_free_cnt > rack_free_cache) { 3468 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3469 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3470 rack->rc_free_cnt--; 3471 rack->r_ctl.rc_num_maps_alloced--; 3472 uma_zfree(rack_zone, rsm); 3473 } 3474 } 3475 3476 static void 3477 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3478 { 3479 if (rsm->r_flags & RACK_APP_LIMITED) { 3480 KASSERT((rack->r_ctl.rc_app_limited_cnt > 0), 3481 ("app_cnt %u, rsm %p", rack->r_ctl.rc_app_limited_cnt, rsm)); 3482 rack->r_ctl.rc_app_limited_cnt--; 3483 } 3484 if (rsm->r_limit_type) { 3485 /* currently there is only one limit type */ 3486 rack->r_ctl.rc_num_split_allocs--; 3487 } 3488 if (rsm == rack->r_ctl.rc_first_appl) { 3489 rack->r_ctl.cleared_app_ack_seq = rsm->r_end; 3490 rack->r_ctl.cleared_app_ack = 1; 3491 if (rack->r_ctl.rc_app_limited_cnt == 0) 3492 rack->r_ctl.rc_first_appl = NULL; 3493 else 3494 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3495 } 3496 if (rsm == rack->r_ctl.rc_resend) 3497 rack->r_ctl.rc_resend = NULL; 3498 if (rsm == rack->r_ctl.rc_end_appl) 3499 rack->r_ctl.rc_end_appl = NULL; 3500 if (rack->r_ctl.rc_tlpsend == rsm) 3501 rack->r_ctl.rc_tlpsend = NULL; 3502 if (rack->r_ctl.rc_sacklast == rsm) 3503 rack->r_ctl.rc_sacklast = NULL; 3504 memset(rsm, 0, sizeof(struct rack_sendmap)); 3505 /* Make sure we are not going to overrun our count limit of 0xff */ 3506 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3507 rack_free_trim(rack); 3508 } 3509 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3510 rack->rc_free_cnt++; 3511 } 3512 3513 static uint32_t 3514 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3515 { 3516 uint64_t srtt, bw, len, tim; 3517 uint32_t segsiz, def_len, minl; 3518 3519 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3520 def_len = rack_def_data_window * segsiz; 3521 if (rack->rc_gp_filled == 0) { 3522 /* 3523 * We have no measurement (IW is in flight?) so 3524 * we can only guess using our data_window sysctl 3525 * value (usually 20MSS). 3526 */ 3527 return (def_len); 3528 } 3529 /* 3530 * Now we have a number of factors to consider. 3531 * 3532 * 1) We have a desired BDP which is usually 3533 * at least 2. 3534 * 2) We have a minimum number of rtt's usually 1 SRTT 3535 * but we allow it too to be more. 3536 * 3) We want to make sure a measurement last N useconds (if 3537 * we have set rack_min_measure_usec. 3538 * 3539 * We handle the first concern here by trying to create a data 3540 * window of max(rack_def_data_window, DesiredBDP). The 3541 * second concern we handle in not letting the measurement 3542 * window end normally until at least the required SRTT's 3543 * have gone by which is done further below in 3544 * rack_enough_for_measurement(). Finally the third concern 3545 * we also handle here by calculating how long that time 3546 * would take at the current BW and then return the 3547 * max of our first calculation and that length. Note 3548 * that if rack_min_measure_usec is 0, we don't deal 3549 * with concern 3. Also for both Concern 1 and 3 an 3550 * application limited period could end the measurement 3551 * earlier. 3552 * 3553 * So lets calculate the BDP with the "known" b/w using 3554 * the SRTT as our rtt and then multiply it by the goal. 3555 */ 3556 bw = rack_get_bw(rack); 3557 srtt = (uint64_t)tp->t_srtt; 3558 len = bw * srtt; 3559 len /= (uint64_t)HPTS_USEC_IN_SEC; 3560 len *= max(1, rack_goal_bdp); 3561 /* Now we need to round up to the nearest MSS */ 3562 len = roundup(len, segsiz); 3563 if (rack_min_measure_usec) { 3564 /* Now calculate our min length for this b/w */ 3565 tim = rack_min_measure_usec; 3566 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3567 if (minl == 0) 3568 minl = 1; 3569 minl = roundup(minl, segsiz); 3570 if (len < minl) 3571 len = minl; 3572 } 3573 /* 3574 * Now if we have a very small window we want 3575 * to attempt to get the window that is 3576 * as small as possible. This happens on 3577 * low b/w connections and we don't want to 3578 * span huge numbers of rtt's between measurements. 3579 * 3580 * We basically include 2 over our "MIN window" so 3581 * that the measurement can be shortened (possibly) by 3582 * an ack'ed packet. 3583 */ 3584 if (len < def_len) 3585 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3586 else 3587 return (max((uint32_t)len, def_len)); 3588 3589 } 3590 3591 static int 3592 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3593 { 3594 uint32_t tim, srtts, segsiz; 3595 3596 /* 3597 * Has enough time passed for the GP measurement to be valid? 3598 */ 3599 if (SEQ_LT(th_ack, tp->gput_seq)) { 3600 /* Not enough bytes yet */ 3601 return (0); 3602 } 3603 if ((tp->snd_max == tp->snd_una) || 3604 (th_ack == tp->snd_max)){ 3605 /* 3606 * All is acked quality of all acked is 3607 * usually low or medium, but we in theory could split 3608 * all acked into two cases, where you got 3609 * a signifigant amount of your window and 3610 * where you did not. For now we leave it 3611 * but it is something to contemplate in the 3612 * future. The danger here is that delayed ack 3613 * is effecting the last byte (which is a 50:50 chance). 3614 */ 3615 *quality = RACK_QUALITY_ALLACKED; 3616 return (1); 3617 } 3618 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3619 /* 3620 * We obtained our entire window of data we wanted 3621 * no matter if we are in recovery or not then 3622 * its ok since expanding the window does not 3623 * make things fuzzy (or at least not as much). 3624 */ 3625 *quality = RACK_QUALITY_HIGH; 3626 return (1); 3627 } 3628 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3629 if (SEQ_LT(th_ack, tp->gput_ack) && 3630 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3631 /* Not enough bytes yet */ 3632 return (0); 3633 } 3634 if (rack->r_ctl.rc_first_appl && 3635 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3636 /* 3637 * We are up to the app limited send point 3638 * we have to measure irrespective of the time.. 3639 */ 3640 *quality = RACK_QUALITY_APPLIMITED; 3641 return (1); 3642 } 3643 /* Now what about time? */ 3644 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3645 tim = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3646 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3647 /* 3648 * We do not allow a measurement if we are in recovery 3649 * that would shrink the goodput window we wanted. 3650 * This is to prevent cloudyness of when the last send 3651 * was actually made. 3652 */ 3653 *quality = RACK_QUALITY_HIGH; 3654 return (1); 3655 } 3656 /* Nope not even a full SRTT has passed */ 3657 return (0); 3658 } 3659 3660 static void 3661 rack_log_timely(struct tcp_rack *rack, 3662 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3663 uint64_t up_bnd, int line, uint8_t method) 3664 { 3665 if (tcp_bblogging_on(rack->rc_tp)) { 3666 union tcp_log_stackspecific log; 3667 struct timeval tv; 3668 3669 memset(&log, 0, sizeof(log)); 3670 log.u_bbr.flex1 = logged; 3671 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3672 log.u_bbr.flex2 <<= 4; 3673 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3674 log.u_bbr.flex2 <<= 4; 3675 log.u_bbr.flex2 |= rack->rc_gp_incr; 3676 log.u_bbr.flex2 <<= 4; 3677 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3678 log.u_bbr.flex3 = rack->rc_gp_incr; 3679 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3680 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3681 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3682 log.u_bbr.flex7 = rack->rc_gp_bwred; 3683 log.u_bbr.flex8 = method; 3684 log.u_bbr.cur_del_rate = cur_bw; 3685 log.u_bbr.delRate = low_bnd; 3686 log.u_bbr.bw_inuse = up_bnd; 3687 log.u_bbr.rttProp = rack_get_bw(rack); 3688 log.u_bbr.pkt_epoch = line; 3689 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3690 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3691 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3692 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3693 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3694 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3695 log.u_bbr.cwnd_gain <<= 1; 3696 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3697 log.u_bbr.cwnd_gain <<= 1; 3698 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3699 log.u_bbr.cwnd_gain <<= 1; 3700 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3701 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3702 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3703 &rack->rc_inp->inp_socket->so_rcv, 3704 &rack->rc_inp->inp_socket->so_snd, 3705 TCP_TIMELY_WORK, 0, 3706 0, &log, false, &tv); 3707 } 3708 } 3709 3710 static int 3711 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3712 { 3713 /* 3714 * Before we increase we need to know if 3715 * the estimate just made was less than 3716 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3717 * 3718 * If we already are pacing at a fast enough 3719 * rate to push us faster there is no sense of 3720 * increasing. 3721 * 3722 * We first caculate our actual pacing rate (ss or ca multiplier 3723 * times our cur_bw). 3724 * 3725 * Then we take the last measured rate and multipy by our 3726 * maximum pacing overage to give us a max allowable rate. 3727 * 3728 * If our act_rate is smaller than our max_allowable rate 3729 * then we should increase. Else we should hold steady. 3730 * 3731 */ 3732 uint64_t act_rate, max_allow_rate; 3733 3734 if (rack_timely_no_stopping) 3735 return (1); 3736 3737 if ((cur_bw == 0) || (last_bw_est == 0)) { 3738 /* 3739 * Initial startup case or 3740 * everything is acked case. 3741 */ 3742 rack_log_timely(rack, mult, cur_bw, 0, 0, 3743 __LINE__, 9); 3744 return (1); 3745 } 3746 if (mult <= 100) { 3747 /* 3748 * We can always pace at or slightly above our rate. 3749 */ 3750 rack_log_timely(rack, mult, cur_bw, 0, 0, 3751 __LINE__, 9); 3752 return (1); 3753 } 3754 act_rate = cur_bw * (uint64_t)mult; 3755 act_rate /= 100; 3756 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3757 max_allow_rate /= 100; 3758 if (act_rate < max_allow_rate) { 3759 /* 3760 * Here the rate we are actually pacing at 3761 * is smaller than 10% above our last measurement. 3762 * This means we are pacing below what we would 3763 * like to try to achieve (plus some wiggle room). 3764 */ 3765 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3766 __LINE__, 9); 3767 return (1); 3768 } else { 3769 /* 3770 * Here we are already pacing at least rack_max_per_above(10%) 3771 * what we are getting back. This indicates most likely 3772 * that we are being limited (cwnd/rwnd/app) and can't 3773 * get any more b/w. There is no sense of trying to 3774 * raise up the pacing rate its not speeding us up 3775 * and we already are pacing faster than we are getting. 3776 */ 3777 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3778 __LINE__, 8); 3779 return (0); 3780 } 3781 } 3782 3783 static void 3784 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3785 { 3786 /* 3787 * When we drag bottom, we want to assure 3788 * that no multiplier is below 1.0, if so 3789 * we want to restore it to at least that. 3790 */ 3791 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3792 /* This is unlikely we usually do not touch recovery */ 3793 rack->r_ctl.rack_per_of_gp_rec = 100; 3794 } 3795 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3796 rack->r_ctl.rack_per_of_gp_ca = 100; 3797 } 3798 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3799 rack->r_ctl.rack_per_of_gp_ss = 100; 3800 } 3801 } 3802 3803 static void 3804 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3805 { 3806 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3807 rack->r_ctl.rack_per_of_gp_ca = 100; 3808 } 3809 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3810 rack->r_ctl.rack_per_of_gp_ss = 100; 3811 } 3812 } 3813 3814 static void 3815 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3816 { 3817 int32_t calc, logged, plus; 3818 3819 logged = 0; 3820 3821 if (rack->rc_skip_timely) 3822 return; 3823 if (override) { 3824 /* 3825 * override is passed when we are 3826 * loosing b/w and making one last 3827 * gasp at trying to not loose out 3828 * to a new-reno flow. 3829 */ 3830 goto extra_boost; 3831 } 3832 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3833 if (rack->rc_gp_incr && 3834 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3835 /* 3836 * Reset and get 5 strokes more before the boost. Note 3837 * that the count is 0 based so we have to add one. 3838 */ 3839 extra_boost: 3840 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3841 rack->rc_gp_timely_inc_cnt = 0; 3842 } else 3843 plus = (uint32_t)rack_gp_increase_per; 3844 /* Must be at least 1% increase for true timely increases */ 3845 if ((plus < 1) && 3846 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3847 plus = 1; 3848 if (rack->rc_gp_saw_rec && 3849 (rack->rc_gp_no_rec_chg == 0) && 3850 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3851 rack->r_ctl.rack_per_of_gp_rec)) { 3852 /* We have been in recovery ding it too */ 3853 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3854 if (calc > 0xffff) 3855 calc = 0xffff; 3856 logged |= 1; 3857 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3858 if (rack->r_ctl.rack_per_upper_bound_ca && 3859 (rack->rc_dragged_bottom == 0) && 3860 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3861 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3862 } 3863 if (rack->rc_gp_saw_ca && 3864 (rack->rc_gp_saw_ss == 0) && 3865 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3866 rack->r_ctl.rack_per_of_gp_ca)) { 3867 /* In CA */ 3868 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3869 if (calc > 0xffff) 3870 calc = 0xffff; 3871 logged |= 2; 3872 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3873 if (rack->r_ctl.rack_per_upper_bound_ca && 3874 (rack->rc_dragged_bottom == 0) && 3875 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3876 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3877 } 3878 if (rack->rc_gp_saw_ss && 3879 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3880 rack->r_ctl.rack_per_of_gp_ss)) { 3881 /* In SS */ 3882 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3883 if (calc > 0xffff) 3884 calc = 0xffff; 3885 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3886 if (rack->r_ctl.rack_per_upper_bound_ss && 3887 (rack->rc_dragged_bottom == 0) && 3888 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3889 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3890 logged |= 4; 3891 } 3892 if (logged && 3893 (rack->rc_gp_incr == 0)){ 3894 /* Go into increment mode */ 3895 rack->rc_gp_incr = 1; 3896 rack->rc_gp_timely_inc_cnt = 0; 3897 } 3898 if (rack->rc_gp_incr && 3899 logged && 3900 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3901 rack->rc_gp_timely_inc_cnt++; 3902 } 3903 rack_log_timely(rack, logged, plus, 0, 0, 3904 __LINE__, 1); 3905 } 3906 3907 static uint32_t 3908 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3909 { 3910 /*- 3911 * norm_grad = rtt_diff / minrtt; 3912 * new_per = curper * (1 - B * norm_grad) 3913 * 3914 * B = rack_gp_decrease_per (default 80%) 3915 * rtt_dif = input var current rtt-diff 3916 * curper = input var current percentage 3917 * minrtt = from rack filter 3918 * 3919 * In order to do the floating point calculations above we 3920 * do an integer conversion. The code looks confusing so let me 3921 * translate it into something that use more variables and 3922 * is clearer for us humans :) 3923 * 3924 * uint64_t norm_grad, inverse, reduce_by, final_result; 3925 * uint32_t perf; 3926 * 3927 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 3928 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 3929 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 3930 * inverse /= 1000000; 3931 * reduce_by = (1000000 - inverse); 3932 * final_result = (cur_per * reduce_by) / 1000000; 3933 * perf = (uint32_t)final_result; 3934 */ 3935 uint64_t perf; 3936 3937 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3938 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3939 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3940 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3941 (uint64_t)1000000)) / 3942 (uint64_t)1000000); 3943 if (perf > curper) { 3944 /* TSNH */ 3945 perf = curper - 1; 3946 } 3947 return ((uint32_t)perf); 3948 } 3949 3950 static uint32_t 3951 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3952 { 3953 /* 3954 * highrttthresh 3955 * result = curper * (1 - (B * ( 1 - ------ )) 3956 * gp_srtt 3957 * 3958 * B = rack_gp_decrease_per (default .8 i.e. 80) 3959 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3960 */ 3961 uint64_t perf; 3962 uint32_t highrttthresh; 3963 3964 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3965 3966 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3967 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3968 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3969 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3970 if (tcp_bblogging_on(rack->rc_tp)) { 3971 uint64_t log1; 3972 3973 log1 = rtt; 3974 log1 <<= 32; 3975 log1 |= highrttthresh; 3976 rack_log_timely(rack, 3977 rack_gp_decrease_per, 3978 (uint64_t)curper, 3979 log1, 3980 perf, 3981 __LINE__, 3982 15); 3983 } 3984 return (perf); 3985 } 3986 3987 static void 3988 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3989 { 3990 uint64_t logvar, logvar2, logvar3; 3991 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3992 3993 if (rack->rc_skip_timely) 3994 return; 3995 if (rack->rc_gp_incr) { 3996 /* Turn off increment counting */ 3997 rack->rc_gp_incr = 0; 3998 rack->rc_gp_timely_inc_cnt = 0; 3999 } 4000 ss_red = ca_red = rec_red = 0; 4001 logged = 0; 4002 /* Calculate the reduction value */ 4003 if (rtt_diff < 0) { 4004 rtt_diff *= -1; 4005 } 4006 /* Must be at least 1% reduction */ 4007 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4008 /* We have been in recovery ding it too */ 4009 if (timely_says == 2) { 4010 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4011 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4012 if (alt < new_per) 4013 val = alt; 4014 else 4015 val = new_per; 4016 } else 4017 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4018 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4019 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4020 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4021 } else { 4022 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4023 rec_red = 0; 4024 } 4025 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4026 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4027 logged |= 1; 4028 } 4029 if (rack->rc_gp_saw_ss) { 4030 /* Sent in SS */ 4031 if (timely_says == 2) { 4032 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4033 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4034 if (alt < new_per) 4035 val = alt; 4036 else 4037 val = new_per; 4038 } else 4039 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4040 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4041 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4042 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4043 } else { 4044 ss_red = new_per; 4045 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4046 logvar = new_per; 4047 logvar <<= 32; 4048 logvar |= alt; 4049 logvar2 = (uint32_t)rtt; 4050 logvar2 <<= 32; 4051 logvar2 |= (uint32_t)rtt_diff; 4052 logvar3 = rack_gp_rtt_maxmul; 4053 logvar3 <<= 32; 4054 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4055 rack_log_timely(rack, timely_says, 4056 logvar2, logvar3, 4057 logvar, __LINE__, 10); 4058 } 4059 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4060 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4061 logged |= 4; 4062 } else if (rack->rc_gp_saw_ca) { 4063 /* Sent in CA */ 4064 if (timely_says == 2) { 4065 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4066 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4067 if (alt < new_per) 4068 val = alt; 4069 else 4070 val = new_per; 4071 } else 4072 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4073 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4074 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4075 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4076 } else { 4077 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4078 ca_red = 0; 4079 logvar = new_per; 4080 logvar <<= 32; 4081 logvar |= alt; 4082 logvar2 = (uint32_t)rtt; 4083 logvar2 <<= 32; 4084 logvar2 |= (uint32_t)rtt_diff; 4085 logvar3 = rack_gp_rtt_maxmul; 4086 logvar3 <<= 32; 4087 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4088 rack_log_timely(rack, timely_says, 4089 logvar2, logvar3, 4090 logvar, __LINE__, 10); 4091 } 4092 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4093 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4094 logged |= 2; 4095 } 4096 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4097 rack->rc_gp_timely_dec_cnt++; 4098 if (rack_timely_dec_clear && 4099 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4100 rack->rc_gp_timely_dec_cnt = 0; 4101 } 4102 logvar = ss_red; 4103 logvar <<= 32; 4104 logvar |= ca_red; 4105 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4106 __LINE__, 2); 4107 } 4108 4109 static void 4110 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4111 uint32_t rtt, uint32_t line, uint8_t reas) 4112 { 4113 if (tcp_bblogging_on(rack->rc_tp)) { 4114 union tcp_log_stackspecific log; 4115 struct timeval tv; 4116 4117 memset(&log, 0, sizeof(log)); 4118 log.u_bbr.flex1 = line; 4119 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4120 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4121 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4122 log.u_bbr.flex5 = rtt; 4123 log.u_bbr.flex6 = rack->rc_highly_buffered; 4124 log.u_bbr.flex6 <<= 1; 4125 log.u_bbr.flex6 |= rack->forced_ack; 4126 log.u_bbr.flex6 <<= 1; 4127 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4128 log.u_bbr.flex6 <<= 1; 4129 log.u_bbr.flex6 |= rack->in_probe_rtt; 4130 log.u_bbr.flex6 <<= 1; 4131 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4132 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4133 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4134 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4135 log.u_bbr.flex8 = reas; 4136 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4137 log.u_bbr.delRate = rack_get_bw(rack); 4138 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4139 log.u_bbr.cur_del_rate <<= 32; 4140 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4141 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4142 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4143 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4144 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4145 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4146 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4147 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4148 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4149 log.u_bbr.rttProp = us_cts; 4150 log.u_bbr.rttProp <<= 32; 4151 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4152 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4153 &rack->rc_inp->inp_socket->so_rcv, 4154 &rack->rc_inp->inp_socket->so_snd, 4155 BBR_LOG_RTT_SHRINKS, 0, 4156 0, &log, false, &rack->r_ctl.act_rcv_time); 4157 } 4158 } 4159 4160 static void 4161 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4162 { 4163 uint64_t bwdp; 4164 4165 bwdp = rack_get_bw(rack); 4166 bwdp *= (uint64_t)rtt; 4167 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4168 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4169 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4170 /* 4171 * A window protocol must be able to have 4 packets 4172 * outstanding as the floor in order to function 4173 * (especially considering delayed ack :D). 4174 */ 4175 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4176 } 4177 } 4178 4179 static void 4180 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4181 { 4182 /** 4183 * ProbeRTT is a bit different in rack_pacing than in 4184 * BBR. It is like BBR in that it uses the lowering of 4185 * the RTT as a signal that we saw something new and 4186 * counts from there for how long between. But it is 4187 * different in that its quite simple. It does not 4188 * play with the cwnd and wait until we get down 4189 * to N segments outstanding and hold that for 4190 * 200ms. Instead it just sets the pacing reduction 4191 * rate to a set percentage (70 by default) and hold 4192 * that for a number of recent GP Srtt's. 4193 */ 4194 uint32_t segsiz; 4195 4196 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4197 if (rack->rc_gp_dyn_mul == 0) 4198 return; 4199 4200 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4201 /* We are idle */ 4202 return; 4203 } 4204 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4205 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4206 /* 4207 * Stop the goodput now, the idea here is 4208 * that future measurements with in_probe_rtt 4209 * won't register if they are not greater so 4210 * we want to get what info (if any) is available 4211 * now. 4212 */ 4213 rack_do_goodput_measurement(rack->rc_tp, rack, 4214 rack->rc_tp->snd_una, __LINE__, 4215 RACK_QUALITY_PROBERTT); 4216 } 4217 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4218 rack->r_ctl.rc_time_probertt_entered = us_cts; 4219 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4220 rack->r_ctl.rc_pace_min_segs); 4221 rack->in_probe_rtt = 1; 4222 rack->measure_saw_probe_rtt = 1; 4223 rack->r_ctl.rc_time_probertt_starts = 0; 4224 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4225 if (rack_probertt_use_min_rtt_entry) 4226 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4227 else 4228 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4229 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4230 __LINE__, RACK_RTTS_ENTERPROBE); 4231 } 4232 4233 static void 4234 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4235 { 4236 struct rack_sendmap *rsm; 4237 uint32_t segsiz; 4238 4239 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4240 rack->r_ctl.rc_pace_min_segs); 4241 rack->in_probe_rtt = 0; 4242 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4243 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4244 /* 4245 * Stop the goodput now, the idea here is 4246 * that future measurements with in_probe_rtt 4247 * won't register if they are not greater so 4248 * we want to get what info (if any) is available 4249 * now. 4250 */ 4251 rack_do_goodput_measurement(rack->rc_tp, rack, 4252 rack->rc_tp->snd_una, __LINE__, 4253 RACK_QUALITY_PROBERTT); 4254 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4255 /* 4256 * We don't have enough data to make a measurement. 4257 * So lets just stop and start here after exiting 4258 * probe-rtt. We probably are not interested in 4259 * the results anyway. 4260 */ 4261 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4262 } 4263 /* 4264 * Measurements through the current snd_max are going 4265 * to be limited by the slower pacing rate. 4266 * 4267 * We need to mark these as app-limited so we 4268 * don't collapse the b/w. 4269 */ 4270 rsm = tqhash_max(rack->r_ctl.tqh); 4271 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4272 if (rack->r_ctl.rc_app_limited_cnt == 0) 4273 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4274 else { 4275 /* 4276 * Go out to the end app limited and mark 4277 * this new one as next and move the end_appl up 4278 * to this guy. 4279 */ 4280 if (rack->r_ctl.rc_end_appl) 4281 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4282 rack->r_ctl.rc_end_appl = rsm; 4283 } 4284 rsm->r_flags |= RACK_APP_LIMITED; 4285 rack->r_ctl.rc_app_limited_cnt++; 4286 } 4287 /* 4288 * Now, we need to examine our pacing rate multipliers. 4289 * If its under 100%, we need to kick it back up to 4290 * 100%. We also don't let it be over our "max" above 4291 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4292 * Note setting clamp_atexit_prtt to 0 has the effect 4293 * of setting CA/SS to 100% always at exit (which is 4294 * the default behavior). 4295 */ 4296 if (rack_probertt_clear_is) { 4297 rack->rc_gp_incr = 0; 4298 rack->rc_gp_bwred = 0; 4299 rack->rc_gp_timely_inc_cnt = 0; 4300 rack->rc_gp_timely_dec_cnt = 0; 4301 } 4302 /* Do we do any clamping at exit? */ 4303 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4304 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4305 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4306 } 4307 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4308 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4309 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4310 } 4311 /* 4312 * Lets set rtt_diff to 0, so that we will get a "boost" 4313 * after exiting. 4314 */ 4315 rack->r_ctl.rc_rtt_diff = 0; 4316 4317 /* Clear all flags so we start fresh */ 4318 rack->rc_tp->t_bytes_acked = 0; 4319 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4320 /* 4321 * If configured to, set the cwnd and ssthresh to 4322 * our targets. 4323 */ 4324 if (rack_probe_rtt_sets_cwnd) { 4325 uint64_t ebdp; 4326 uint32_t setto; 4327 4328 /* Set ssthresh so we get into CA once we hit our target */ 4329 if (rack_probertt_use_min_rtt_exit == 1) { 4330 /* Set to min rtt */ 4331 rack_set_prtt_target(rack, segsiz, 4332 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4333 } else if (rack_probertt_use_min_rtt_exit == 2) { 4334 /* Set to current gp rtt */ 4335 rack_set_prtt_target(rack, segsiz, 4336 rack->r_ctl.rc_gp_srtt); 4337 } else if (rack_probertt_use_min_rtt_exit == 3) { 4338 /* Set to entry gp rtt */ 4339 rack_set_prtt_target(rack, segsiz, 4340 rack->r_ctl.rc_entry_gp_rtt); 4341 } else { 4342 uint64_t sum; 4343 uint32_t setval; 4344 4345 sum = rack->r_ctl.rc_entry_gp_rtt; 4346 sum *= 10; 4347 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4348 if (sum >= 20) { 4349 /* 4350 * A highly buffered path needs 4351 * cwnd space for timely to work. 4352 * Lets set things up as if 4353 * we are heading back here again. 4354 */ 4355 setval = rack->r_ctl.rc_entry_gp_rtt; 4356 } else if (sum >= 15) { 4357 /* 4358 * Lets take the smaller of the 4359 * two since we are just somewhat 4360 * buffered. 4361 */ 4362 setval = rack->r_ctl.rc_gp_srtt; 4363 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4364 setval = rack->r_ctl.rc_entry_gp_rtt; 4365 } else { 4366 /* 4367 * Here we are not highly buffered 4368 * and should pick the min we can to 4369 * keep from causing loss. 4370 */ 4371 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4372 } 4373 rack_set_prtt_target(rack, segsiz, 4374 setval); 4375 } 4376 if (rack_probe_rtt_sets_cwnd > 1) { 4377 /* There is a percentage here to boost */ 4378 ebdp = rack->r_ctl.rc_target_probertt_flight; 4379 ebdp *= rack_probe_rtt_sets_cwnd; 4380 ebdp /= 100; 4381 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4382 } else 4383 setto = rack->r_ctl.rc_target_probertt_flight; 4384 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4385 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4386 /* Enforce a min */ 4387 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4388 } 4389 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4390 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4391 } 4392 rack_log_rtt_shrinks(rack, us_cts, 4393 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4394 __LINE__, RACK_RTTS_EXITPROBE); 4395 /* Clear times last so log has all the info */ 4396 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4397 rack->r_ctl.rc_time_probertt_entered = us_cts; 4398 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4399 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4400 } 4401 4402 static void 4403 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4404 { 4405 /* Check in on probe-rtt */ 4406 4407 if (rack->rc_gp_filled == 0) { 4408 /* We do not do p-rtt unless we have gp measurements */ 4409 return; 4410 } 4411 if (rack->in_probe_rtt) { 4412 uint64_t no_overflow; 4413 uint32_t endtime, must_stay; 4414 4415 if (rack->r_ctl.rc_went_idle_time && 4416 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4417 /* 4418 * We went idle during prtt, just exit now. 4419 */ 4420 rack_exit_probertt(rack, us_cts); 4421 } else if (rack_probe_rtt_safety_val && 4422 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4423 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4424 /* 4425 * Probe RTT safety value triggered! 4426 */ 4427 rack_log_rtt_shrinks(rack, us_cts, 4428 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4429 __LINE__, RACK_RTTS_SAFETY); 4430 rack_exit_probertt(rack, us_cts); 4431 } 4432 /* Calculate the max we will wait */ 4433 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4434 if (rack->rc_highly_buffered) 4435 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4436 /* Calculate the min we must wait */ 4437 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4438 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4439 TSTMP_LT(us_cts, endtime)) { 4440 uint32_t calc; 4441 /* Do we lower more? */ 4442 no_exit: 4443 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4444 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4445 else 4446 calc = 0; 4447 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4448 if (calc) { 4449 /* Maybe */ 4450 calc *= rack_per_of_gp_probertt_reduce; 4451 if (calc > rack_per_of_gp_probertt) 4452 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4453 else 4454 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4455 /* Limit it too */ 4456 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4457 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4458 } 4459 /* We must reach target or the time set */ 4460 return; 4461 } 4462 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4463 if ((TSTMP_LT(us_cts, must_stay) && 4464 rack->rc_highly_buffered) || 4465 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4466 rack->r_ctl.rc_target_probertt_flight)) { 4467 /* We are not past the must_stay time */ 4468 goto no_exit; 4469 } 4470 rack_log_rtt_shrinks(rack, us_cts, 4471 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4472 __LINE__, RACK_RTTS_REACHTARGET); 4473 rack->r_ctl.rc_time_probertt_starts = us_cts; 4474 if (rack->r_ctl.rc_time_probertt_starts == 0) 4475 rack->r_ctl.rc_time_probertt_starts = 1; 4476 /* Restore back to our rate we want to pace at in prtt */ 4477 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4478 } 4479 /* 4480 * Setup our end time, some number of gp_srtts plus 200ms. 4481 */ 4482 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4483 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4484 if (rack_probertt_gpsrtt_cnt_div) 4485 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4486 else 4487 endtime = 0; 4488 endtime += rack_min_probertt_hold; 4489 endtime += rack->r_ctl.rc_time_probertt_starts; 4490 if (TSTMP_GEQ(us_cts, endtime)) { 4491 /* yes, exit probertt */ 4492 rack_exit_probertt(rack, us_cts); 4493 } 4494 4495 } else if ((rack->rc_skip_timely == 0) && 4496 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4497 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4498 /* Go into probertt, its been too long since we went lower */ 4499 rack_enter_probertt(rack, us_cts); 4500 } 4501 } 4502 4503 static void 4504 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4505 uint32_t rtt, int32_t rtt_diff) 4506 { 4507 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4508 uint32_t losses; 4509 4510 if ((rack->rc_gp_dyn_mul == 0) || 4511 (rack->use_fixed_rate) || 4512 (rack->in_probe_rtt) || 4513 (rack->rc_always_pace == 0)) { 4514 /* No dynamic GP multiplier in play */ 4515 return; 4516 } 4517 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4518 cur_bw = rack_get_bw(rack); 4519 /* Calculate our up and down range */ 4520 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4521 up_bnd /= 100; 4522 up_bnd += rack->r_ctl.last_gp_comp_bw; 4523 4524 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4525 subfr /= 100; 4526 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4527 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4528 /* 4529 * This is the case where our RTT is above 4530 * the max target and we have been configured 4531 * to just do timely no bonus up stuff in that case. 4532 * 4533 * There are two configurations, set to 1, and we 4534 * just do timely if we are over our max. If its 4535 * set above 1 then we slam the multipliers down 4536 * to 100 and then decrement per timely. 4537 */ 4538 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4539 __LINE__, 3); 4540 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4541 rack_validate_multipliers_at_or_below_100(rack); 4542 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4543 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4544 /* 4545 * We are decreasing this is a bit complicated this 4546 * means we are loosing ground. This could be 4547 * because another flow entered and we are competing 4548 * for b/w with it. This will push the RTT up which 4549 * makes timely unusable unless we want to get shoved 4550 * into a corner and just be backed off (the age 4551 * old problem with delay based CC). 4552 * 4553 * On the other hand if it was a route change we 4554 * would like to stay somewhat contained and not 4555 * blow out the buffers. 4556 */ 4557 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4558 __LINE__, 3); 4559 rack->r_ctl.last_gp_comp_bw = cur_bw; 4560 if (rack->rc_gp_bwred == 0) { 4561 /* Go into reduction counting */ 4562 rack->rc_gp_bwred = 1; 4563 rack->rc_gp_timely_dec_cnt = 0; 4564 } 4565 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4566 /* 4567 * Push another time with a faster pacing 4568 * to try to gain back (we include override to 4569 * get a full raise factor). 4570 */ 4571 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4572 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4573 (timely_says == 0) || 4574 (rack_down_raise_thresh == 0)) { 4575 /* 4576 * Do an override up in b/w if we were 4577 * below the threshold or if the threshold 4578 * is zero we always do the raise. 4579 */ 4580 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4581 } else { 4582 /* Log it stays the same */ 4583 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4584 __LINE__, 11); 4585 } 4586 rack->rc_gp_timely_dec_cnt++; 4587 /* We are not incrementing really no-count */ 4588 rack->rc_gp_incr = 0; 4589 rack->rc_gp_timely_inc_cnt = 0; 4590 } else { 4591 /* 4592 * Lets just use the RTT 4593 * information and give up 4594 * pushing. 4595 */ 4596 goto use_timely; 4597 } 4598 } else if ((timely_says != 2) && 4599 !losses && 4600 (last_bw_est > up_bnd)) { 4601 /* 4602 * We are increasing b/w lets keep going, updating 4603 * our b/w and ignoring any timely input, unless 4604 * of course we are at our max raise (if there is one). 4605 */ 4606 4607 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4608 __LINE__, 3); 4609 rack->r_ctl.last_gp_comp_bw = cur_bw; 4610 if (rack->rc_gp_saw_ss && 4611 rack->r_ctl.rack_per_upper_bound_ss && 4612 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4613 /* 4614 * In cases where we can't go higher 4615 * we should just use timely. 4616 */ 4617 goto use_timely; 4618 } 4619 if (rack->rc_gp_saw_ca && 4620 rack->r_ctl.rack_per_upper_bound_ca && 4621 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4622 /* 4623 * In cases where we can't go higher 4624 * we should just use timely. 4625 */ 4626 goto use_timely; 4627 } 4628 rack->rc_gp_bwred = 0; 4629 rack->rc_gp_timely_dec_cnt = 0; 4630 /* You get a set number of pushes if timely is trying to reduce */ 4631 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4632 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4633 } else { 4634 /* Log it stays the same */ 4635 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4636 __LINE__, 12); 4637 } 4638 return; 4639 } else { 4640 /* 4641 * We are staying between the lower and upper range bounds 4642 * so use timely to decide. 4643 */ 4644 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4645 __LINE__, 3); 4646 use_timely: 4647 if (timely_says) { 4648 rack->rc_gp_incr = 0; 4649 rack->rc_gp_timely_inc_cnt = 0; 4650 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4651 !losses && 4652 (last_bw_est < low_bnd)) { 4653 /* We are loosing ground */ 4654 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4655 rack->rc_gp_timely_dec_cnt++; 4656 /* We are not incrementing really no-count */ 4657 rack->rc_gp_incr = 0; 4658 rack->rc_gp_timely_inc_cnt = 0; 4659 } else 4660 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4661 } else { 4662 rack->rc_gp_bwred = 0; 4663 rack->rc_gp_timely_dec_cnt = 0; 4664 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4665 } 4666 } 4667 } 4668 4669 static int32_t 4670 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4671 { 4672 int32_t timely_says; 4673 uint64_t log_mult, log_rtt_a_diff; 4674 4675 log_rtt_a_diff = rtt; 4676 log_rtt_a_diff <<= 32; 4677 log_rtt_a_diff |= (uint32_t)rtt_diff; 4678 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4679 rack_gp_rtt_maxmul)) { 4680 /* Reduce the b/w multiplier */ 4681 timely_says = 2; 4682 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4683 log_mult <<= 32; 4684 log_mult |= prev_rtt; 4685 rack_log_timely(rack, timely_says, log_mult, 4686 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4687 log_rtt_a_diff, __LINE__, 4); 4688 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4689 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4690 max(rack_gp_rtt_mindiv , 1)))) { 4691 /* Increase the b/w multiplier */ 4692 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4693 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4694 max(rack_gp_rtt_mindiv , 1)); 4695 log_mult <<= 32; 4696 log_mult |= prev_rtt; 4697 timely_says = 0; 4698 rack_log_timely(rack, timely_says, log_mult , 4699 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4700 log_rtt_a_diff, __LINE__, 5); 4701 } else { 4702 /* 4703 * Use a gradient to find it the timely gradient 4704 * is: 4705 * grad = rc_rtt_diff / min_rtt; 4706 * 4707 * anything below or equal to 0 will be 4708 * a increase indication. Anything above 4709 * zero is a decrease. Note we take care 4710 * of the actual gradient calculation 4711 * in the reduction (its not needed for 4712 * increase). 4713 */ 4714 log_mult = prev_rtt; 4715 if (rtt_diff <= 0) { 4716 /* 4717 * Rttdiff is less than zero, increase the 4718 * b/w multiplier (its 0 or negative) 4719 */ 4720 timely_says = 0; 4721 rack_log_timely(rack, timely_says, log_mult, 4722 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4723 } else { 4724 /* Reduce the b/w multiplier */ 4725 timely_says = 1; 4726 rack_log_timely(rack, timely_says, log_mult, 4727 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4728 } 4729 } 4730 return (timely_says); 4731 } 4732 4733 static inline int 4734 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4735 { 4736 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4737 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4738 /** 4739 * This covers the case that the 4740 * resent is completely inside 4741 * the gp range or up to it. 4742 * |----------------| 4743 * |-----| <or> 4744 * |----| 4745 * <or> |---| 4746 */ 4747 return (1); 4748 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4749 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4750 /** 4751 * This covers the case of 4752 * |--------------| 4753 * |-------->| 4754 */ 4755 return (1); 4756 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4757 SEQ_LT(rsm->r_start, tp->gput_ack) && 4758 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4759 4760 /** 4761 * This covers the case of 4762 * |--------------| 4763 * |-------->| 4764 */ 4765 return (1); 4766 } 4767 return (0); 4768 } 4769 4770 static inline void 4771 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4772 { 4773 4774 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4775 return; 4776 /* 4777 * We have a Goodput measurement in progress. Mark 4778 * the send if its within the window. If its not 4779 * in the window make sure it does not have the mark. 4780 */ 4781 if (rack_in_gp_window(tp, rsm)) 4782 rsm->r_flags |= RACK_IN_GP_WIN; 4783 else 4784 rsm->r_flags &= ~RACK_IN_GP_WIN; 4785 } 4786 4787 static inline void 4788 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4789 { 4790 /* A GP measurement is ending, clear all marks on the send map*/ 4791 struct rack_sendmap *rsm = NULL; 4792 4793 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4794 if (rsm == NULL) { 4795 rsm = tqhash_min(rack->r_ctl.tqh); 4796 } 4797 /* Nothing left? */ 4798 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4799 rsm->r_flags &= ~RACK_IN_GP_WIN; 4800 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4801 } 4802 } 4803 4804 4805 static inline void 4806 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4807 { 4808 struct rack_sendmap *rsm = NULL; 4809 4810 if (tp->snd_una == tp->snd_max) { 4811 /* Nothing outstanding yet, nothing to do here */ 4812 return; 4813 } 4814 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4815 /* 4816 * We are measuring ahead of some outstanding 4817 * data. We need to walk through up until we get 4818 * to gp_seq marking so that no rsm is set incorrectly 4819 * with RACK_IN_GP_WIN. 4820 */ 4821 rsm = tqhash_min(rack->r_ctl.tqh); 4822 while (rsm != NULL) { 4823 rack_mark_in_gp_win(tp, rsm); 4824 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4825 break; 4826 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4827 } 4828 } 4829 if (rsm == NULL) { 4830 /* 4831 * Need to find the GP seq, if rsm is 4832 * set we stopped as we hit it. 4833 */ 4834 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4835 if (rsm == NULL) 4836 return; 4837 rack_mark_in_gp_win(tp, rsm); 4838 } 4839 /* 4840 * Now we may need to mark already sent rsm, ahead of 4841 * gput_seq in the window since they may have been sent 4842 * *before* we started our measurment. The rsm, if non-null 4843 * has been marked (note if rsm would have been NULL we would have 4844 * returned in the previous block). So we go to the next, and continue 4845 * until we run out of entries or we exceed the gp_ack value. 4846 */ 4847 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4848 while (rsm) { 4849 rack_mark_in_gp_win(tp, rsm); 4850 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4851 break; 4852 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4853 } 4854 } 4855 4856 static void 4857 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 4858 { 4859 if (tcp_bblogging_on(rack->rc_tp)) { 4860 union tcp_log_stackspecific log; 4861 struct timeval tv; 4862 4863 memset(&log, 0, sizeof(log)); 4864 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4865 log.u_bbr.flex1 = add_part; 4866 log.u_bbr.flex2 = sub_part; 4867 log.u_bbr.flex3 = rack_wma_divisor; 4868 log.u_bbr.flex4 = srtt; 4869 log.u_bbr.flex7 = (uint16_t)line; 4870 log.u_bbr.flex8 = meth; 4871 log.u_bbr.delRate = rack->r_ctl.gp_bw; 4872 log.u_bbr.cur_del_rate = meas_bw; 4873 log.u_bbr.rttProp = utim; 4874 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4875 &rack->rc_inp->inp_socket->so_rcv, 4876 &rack->rc_inp->inp_socket->so_snd, 4877 BBR_LOG_THRESH_CALC, 0, 4878 0, &log, false, &rack->r_ctl.act_rcv_time); 4879 } 4880 } 4881 4882 static void 4883 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4884 tcp_seq th_ack, int line, uint8_t quality) 4885 { 4886 uint64_t tim, bytes_ps, stim, utim; 4887 uint32_t segsiz, bytes, reqbytes, us_cts; 4888 int32_t gput, new_rtt_diff, timely_says; 4889 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4890 int did_add = 0; 4891 4892 us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 4893 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4894 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4895 tim = us_cts - tp->gput_ts; 4896 else 4897 tim = 0; 4898 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4899 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4900 else 4901 stim = 0; 4902 /* 4903 * Use the larger of the send time or ack time. This prevents us 4904 * from being influenced by ack artifacts to come up with too 4905 * high of measurement. Note that since we are spanning over many more 4906 * bytes in most of our measurements hopefully that is less likely to 4907 * occur. 4908 */ 4909 if (tim > stim) 4910 utim = max(tim, 1); 4911 else 4912 utim = max(stim, 1); 4913 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4914 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 4915 if ((tim == 0) && (stim == 0)) { 4916 /* 4917 * Invalid measurement time, maybe 4918 * all on one ack/one send? 4919 */ 4920 bytes = 0; 4921 bytes_ps = 0; 4922 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4923 0, 0, 0, 10, __LINE__, NULL, quality); 4924 goto skip_measurement; 4925 } 4926 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4927 /* We never made a us_rtt measurement? */ 4928 bytes = 0; 4929 bytes_ps = 0; 4930 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4931 0, 0, 0, 10, __LINE__, NULL, quality); 4932 goto skip_measurement; 4933 } 4934 /* 4935 * Calculate the maximum possible b/w this connection 4936 * could have. We base our calculation on the lowest 4937 * rtt we have seen during the measurement and the 4938 * largest rwnd the client has given us in that time. This 4939 * forms a BDP that is the maximum that we could ever 4940 * get to the client. Anything larger is not valid. 4941 * 4942 * I originally had code here that rejected measurements 4943 * where the time was less than 1/2 the latest us_rtt. 4944 * But after thinking on that I realized its wrong since 4945 * say you had a 150Mbps or even 1Gbps link, and you 4946 * were a long way away.. example I am in Europe (100ms rtt) 4947 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4948 * bytes my time would be 1.2ms, and yet my rtt would say 4949 * the measurement was invalid the time was < 50ms. The 4950 * same thing is true for 150Mb (8ms of time). 4951 * 4952 * A better way I realized is to look at what the maximum 4953 * the connection could possibly do. This is gated on 4954 * the lowest RTT we have seen and the highest rwnd. 4955 * We should in theory never exceed that, if we are 4956 * then something on the path is storing up packets 4957 * and then feeding them all at once to our endpoint 4958 * messing up our measurement. 4959 */ 4960 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4961 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4962 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4963 if (SEQ_LT(th_ack, tp->gput_seq)) { 4964 /* No measurement can be made */ 4965 bytes = 0; 4966 bytes_ps = 0; 4967 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4968 0, 0, 0, 10, __LINE__, NULL, quality); 4969 goto skip_measurement; 4970 } else 4971 bytes = (th_ack - tp->gput_seq); 4972 bytes_ps = (uint64_t)bytes; 4973 /* 4974 * Don't measure a b/w for pacing unless we have gotten at least 4975 * an initial windows worth of data in this measurement interval. 4976 * 4977 * Small numbers of bytes get badly influenced by delayed ack and 4978 * other artifacts. Note we take the initial window or our 4979 * defined minimum GP (defaulting to 10 which hopefully is the 4980 * IW). 4981 */ 4982 if (rack->rc_gp_filled == 0) { 4983 /* 4984 * The initial estimate is special. We 4985 * have blasted out an IW worth of packets 4986 * without a real valid ack ts results. We 4987 * then setup the app_limited_needs_set flag, 4988 * this should get the first ack in (probably 2 4989 * MSS worth) to be recorded as the timestamp. 4990 * We thus allow a smaller number of bytes i.e. 4991 * IW - 2MSS. 4992 */ 4993 reqbytes -= (2 * segsiz); 4994 /* Also lets fill previous for our first measurement to be neutral */ 4995 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4996 } 4997 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4998 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4999 rack->r_ctl.rc_app_limited_cnt, 5000 0, 0, 10, __LINE__, NULL, quality); 5001 goto skip_measurement; 5002 } 5003 /* 5004 * We now need to calculate the Timely like status so 5005 * we can update (possibly) the b/w multipliers. 5006 */ 5007 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5008 if (rack->rc_gp_filled == 0) { 5009 /* No previous reading */ 5010 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5011 } else { 5012 if (rack->measure_saw_probe_rtt == 0) { 5013 /* 5014 * We don't want a probertt to be counted 5015 * since it will be negative incorrectly. We 5016 * expect to be reducing the RTT when we 5017 * pace at a slower rate. 5018 */ 5019 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5020 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5021 } 5022 } 5023 timely_says = rack_make_timely_judgement(rack, 5024 rack->r_ctl.rc_gp_srtt, 5025 rack->r_ctl.rc_rtt_diff, 5026 rack->r_ctl.rc_prev_gp_srtt 5027 ); 5028 bytes_ps *= HPTS_USEC_IN_SEC; 5029 bytes_ps /= utim; 5030 if (bytes_ps > rack->r_ctl.last_max_bw) { 5031 /* 5032 * Something is on path playing 5033 * since this b/w is not possible based 5034 * on our BDP (highest rwnd and lowest rtt 5035 * we saw in the measurement window). 5036 * 5037 * Another option here would be to 5038 * instead skip the measurement. 5039 */ 5040 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5041 bytes_ps, rack->r_ctl.last_max_bw, 0, 5042 11, __LINE__, NULL, quality); 5043 bytes_ps = rack->r_ctl.last_max_bw; 5044 } 5045 /* We store gp for b/w in bytes per second */ 5046 if (rack->rc_gp_filled == 0) { 5047 /* Initial measurement */ 5048 if (bytes_ps) { 5049 rack->r_ctl.gp_bw = bytes_ps; 5050 rack->rc_gp_filled = 1; 5051 rack->r_ctl.num_measurements = 1; 5052 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5053 } else { 5054 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5055 rack->r_ctl.rc_app_limited_cnt, 5056 0, 0, 10, __LINE__, NULL, quality); 5057 } 5058 if (tcp_in_hpts(rack->rc_tp) && 5059 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5060 /* 5061 * Ok we can't trust the pacer in this case 5062 * where we transition from un-paced to paced. 5063 * Or for that matter when the burst mitigation 5064 * was making a wild guess and got it wrong. 5065 * Stop the pacer and clear up all the aggregate 5066 * delays etc. 5067 */ 5068 tcp_hpts_remove(rack->rc_tp); 5069 rack->r_ctl.rc_hpts_flags = 0; 5070 rack->r_ctl.rc_last_output_to = 0; 5071 } 5072 did_add = 2; 5073 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5074 /* Still a small number run an average */ 5075 rack->r_ctl.gp_bw += bytes_ps; 5076 addpart = rack->r_ctl.num_measurements; 5077 rack->r_ctl.num_measurements++; 5078 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5079 /* We have collected enough to move forward */ 5080 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5081 } 5082 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5083 did_add = 3; 5084 } else { 5085 /* 5086 * We want to take 1/wma of the goodput and add in to 7/8th 5087 * of the old value weighted by the srtt. So if your measurement 5088 * period is say 2 SRTT's long you would get 1/4 as the 5089 * value, if it was like 1/2 SRTT then you would get 1/16th. 5090 * 5091 * But we must be careful not to take too much i.e. if the 5092 * srtt is say 20ms and the measurement is taken over 5093 * 400ms our weight would be 400/20 i.e. 20. On the 5094 * other hand if we get a measurement over 1ms with a 5095 * 10ms rtt we only want to take a much smaller portion. 5096 */ 5097 uint8_t meth; 5098 5099 if (rack->r_ctl.num_measurements < 0xff) { 5100 rack->r_ctl.num_measurements++; 5101 } 5102 srtt = (uint64_t)tp->t_srtt; 5103 if (srtt == 0) { 5104 /* 5105 * Strange why did t_srtt go back to zero? 5106 */ 5107 if (rack->r_ctl.rc_rack_min_rtt) 5108 srtt = rack->r_ctl.rc_rack_min_rtt; 5109 else 5110 srtt = HPTS_USEC_IN_MSEC; 5111 } 5112 /* 5113 * XXXrrs: Note for reviewers, in playing with 5114 * dynamic pacing I discovered this GP calculation 5115 * as done originally leads to some undesired results. 5116 * Basically you can get longer measurements contributing 5117 * too much to the WMA. Thus I changed it if you are doing 5118 * dynamic adjustments to only do the aportioned adjustment 5119 * if we have a very small (time wise) measurement. Longer 5120 * measurements just get there weight (defaulting to 1/8) 5121 * add to the WMA. We may want to think about changing 5122 * this to always do that for both sides i.e. dynamic 5123 * and non-dynamic... but considering lots of folks 5124 * were playing with this I did not want to change the 5125 * calculation per.se. without your thoughts.. Lawerence? 5126 * Peter?? 5127 */ 5128 if (rack->rc_gp_dyn_mul == 0) { 5129 subpart = rack->r_ctl.gp_bw * utim; 5130 subpart /= (srtt * 8); 5131 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5132 /* 5133 * The b/w update takes no more 5134 * away then 1/2 our running total 5135 * so factor it in. 5136 */ 5137 addpart = bytes_ps * utim; 5138 addpart /= (srtt * 8); 5139 meth = 1; 5140 } else { 5141 /* 5142 * Don't allow a single measurement 5143 * to account for more than 1/2 of the 5144 * WMA. This could happen on a retransmission 5145 * where utim becomes huge compared to 5146 * srtt (multiple retransmissions when using 5147 * the sending rate which factors in all the 5148 * transmissions from the first one). 5149 */ 5150 subpart = rack->r_ctl.gp_bw / 2; 5151 addpart = bytes_ps / 2; 5152 meth = 2; 5153 } 5154 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5155 resid_bw = rack->r_ctl.gp_bw - subpart; 5156 rack->r_ctl.gp_bw = resid_bw + addpart; 5157 did_add = 1; 5158 } else { 5159 if ((utim / srtt) <= 1) { 5160 /* 5161 * The b/w update was over a small period 5162 * of time. The idea here is to prevent a small 5163 * measurement time period from counting 5164 * too much. So we scale it based on the 5165 * time so it attributes less than 1/rack_wma_divisor 5166 * of its measurement. 5167 */ 5168 subpart = rack->r_ctl.gp_bw * utim; 5169 subpart /= (srtt * rack_wma_divisor); 5170 addpart = bytes_ps * utim; 5171 addpart /= (srtt * rack_wma_divisor); 5172 meth = 3; 5173 } else { 5174 /* 5175 * The scaled measurement was long 5176 * enough so lets just add in the 5177 * portion of the measurement i.e. 1/rack_wma_divisor 5178 */ 5179 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5180 addpart = bytes_ps / rack_wma_divisor; 5181 meth = 4; 5182 } 5183 if ((rack->measure_saw_probe_rtt == 0) || 5184 (bytes_ps > rack->r_ctl.gp_bw)) { 5185 /* 5186 * For probe-rtt we only add it in 5187 * if its larger, all others we just 5188 * add in. 5189 */ 5190 did_add = 1; 5191 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5192 resid_bw = rack->r_ctl.gp_bw - subpart; 5193 rack->r_ctl.gp_bw = resid_bw + addpart; 5194 } 5195 } 5196 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5197 } 5198 /* 5199 * We only watch the growth of the GP during the initial startup 5200 * or first-slowstart that ensues. If we ever needed to watch 5201 * growth of gp outside of that period all we need to do is 5202 * remove the first clause of this if (rc_initial_ss_comp). 5203 */ 5204 if ((rack->rc_initial_ss_comp == 0) && 5205 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5206 uint64_t gp_est; 5207 5208 gp_est = bytes_ps; 5209 if (tcp_bblogging_on(rack->rc_tp)) { 5210 union tcp_log_stackspecific log; 5211 struct timeval tv; 5212 5213 memset(&log, 0, sizeof(log)); 5214 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5215 log.u_bbr.flex1 = rack->r_ctl.current_round; 5216 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5217 log.u_bbr.delRate = gp_est; 5218 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5219 log.u_bbr.flex8 = 41; 5220 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5221 0, &log, false, NULL, __func__, __LINE__,&tv); 5222 } 5223 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5224 (rack->r_ctl.last_gpest == 0)) { 5225 /* 5226 * The round we get our measurement averaging going 5227 * is the base round so it always is the source point 5228 * for when we had our first increment. From there on 5229 * we only record the round that had a rise. 5230 */ 5231 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5232 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5233 } else if (gp_est >= rack->r_ctl.last_gpest) { 5234 /* 5235 * Test to see if its gone up enough 5236 * to set the round count up to now. Note 5237 * that on the seeding of the 4th measurement we 5238 */ 5239 gp_est *= 1000; 5240 gp_est /= rack->r_ctl.last_gpest; 5241 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5242 /* 5243 * We went up enough to record the round. 5244 */ 5245 if (tcp_bblogging_on(rack->rc_tp)) { 5246 union tcp_log_stackspecific log; 5247 struct timeval tv; 5248 5249 memset(&log, 0, sizeof(log)); 5250 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5251 log.u_bbr.flex1 = rack->r_ctl.current_round; 5252 log.u_bbr.flex2 = (uint32_t)gp_est; 5253 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5254 log.u_bbr.delRate = gp_est; 5255 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5256 log.u_bbr.flex8 = 42; 5257 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5258 0, &log, false, NULL, __func__, __LINE__,&tv); 5259 } 5260 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5261 if (rack->r_ctl.use_gp_not_last == 1) 5262 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5263 else 5264 rack->r_ctl.last_gpest = bytes_ps; 5265 } 5266 } 5267 } 5268 if ((rack->gp_ready == 0) && 5269 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5270 /* We have enough measurements now */ 5271 rack->gp_ready = 1; 5272 if (rack->dgp_on || 5273 rack->rack_hibeta) 5274 rack_set_cc_pacing(rack); 5275 if (rack->defer_options) 5276 rack_apply_deferred_options(rack); 5277 } 5278 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5279 rack_get_bw(rack), 22, did_add, NULL, quality); 5280 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5281 5282 if ((rack->measure_saw_probe_rtt == 0) && 5283 rack->rc_gp_rtt_set) { 5284 if (rack->rc_skip_timely == 0) { 5285 rack_update_multiplier(rack, timely_says, bytes_ps, 5286 rack->r_ctl.rc_gp_srtt, 5287 rack->r_ctl.rc_rtt_diff); 5288 } 5289 } 5290 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5291 rack_get_bw(rack), 3, line, NULL, quality); 5292 rack_log_pacing_delay_calc(rack, 5293 bytes, /* flex2 */ 5294 tim, /* flex1 */ 5295 bytes_ps, /* bw_inuse */ 5296 rack->r_ctl.gp_bw, /* delRate */ 5297 rack_get_lt_bw(rack), /* rttProp */ 5298 20, line, NULL, 0); 5299 /* reset the gp srtt and setup the new prev */ 5300 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5301 /* Record the lost count for the next measurement */ 5302 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5303 skip_measurement: 5304 /* 5305 * We restart our diffs based on the gpsrtt in the 5306 * measurement window. 5307 */ 5308 rack->rc_gp_rtt_set = 0; 5309 rack->rc_gp_saw_rec = 0; 5310 rack->rc_gp_saw_ca = 0; 5311 rack->rc_gp_saw_ss = 0; 5312 rack->rc_dragged_bottom = 0; 5313 if (quality == RACK_QUALITY_HIGH) { 5314 /* 5315 * Gput in the stats world is in kbps where bytes_ps is 5316 * bytes per second so we do ((x * 8)/ 1000). 5317 */ 5318 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5319 #ifdef STATS 5320 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5321 gput); 5322 /* 5323 * XXXLAS: This is a temporary hack, and should be 5324 * chained off VOI_TCP_GPUT when stats(9) grows an 5325 * API to deal with chained VOIs. 5326 */ 5327 if (tp->t_stats_gput_prev > 0) 5328 stats_voi_update_abs_s32(tp->t_stats, 5329 VOI_TCP_GPUT_ND, 5330 ((gput - tp->t_stats_gput_prev) * 100) / 5331 tp->t_stats_gput_prev); 5332 #endif 5333 tp->t_stats_gput_prev = gput; 5334 } 5335 tp->t_flags &= ~TF_GPUTINPROG; 5336 /* 5337 * Now are we app limited now and there is space from where we 5338 * were to where we want to go? 5339 * 5340 * We don't do the other case i.e. non-applimited here since 5341 * the next send will trigger us picking up the missing data. 5342 */ 5343 if (rack->r_ctl.rc_first_appl && 5344 TCPS_HAVEESTABLISHED(tp->t_state) && 5345 rack->r_ctl.rc_app_limited_cnt && 5346 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5347 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5348 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5349 /* 5350 * Yep there is enough outstanding to make a measurement here. 5351 */ 5352 struct rack_sendmap *rsm; 5353 5354 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5355 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5356 tp->gput_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 5357 rack->app_limited_needs_set = 0; 5358 tp->gput_seq = th_ack; 5359 if (rack->in_probe_rtt) 5360 rack->measure_saw_probe_rtt = 1; 5361 else if ((rack->measure_saw_probe_rtt) && 5362 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5363 rack->measure_saw_probe_rtt = 0; 5364 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5365 /* There is a full window to gain info from */ 5366 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5367 } else { 5368 /* We can only measure up to the applimited point */ 5369 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5370 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5371 /* 5372 * We don't have enough to make a measurement. 5373 */ 5374 tp->t_flags &= ~TF_GPUTINPROG; 5375 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5376 0, 0, 0, 6, __LINE__, NULL, quality); 5377 return; 5378 } 5379 } 5380 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5381 /* 5382 * We will get no more data into the SB 5383 * this means we need to have the data available 5384 * before we start a measurement. 5385 */ 5386 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5387 /* Nope not enough data. */ 5388 return; 5389 } 5390 } 5391 tp->t_flags |= TF_GPUTINPROG; 5392 /* 5393 * Now we need to find the timestamp of the send at tp->gput_seq 5394 * for the send based measurement. 5395 */ 5396 rack->r_ctl.rc_gp_cumack_ts = 0; 5397 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5398 if (rsm) { 5399 /* Ok send-based limit is set */ 5400 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5401 /* 5402 * Move back to include the earlier part 5403 * so our ack time lines up right (this may 5404 * make an overlapping measurement but thats 5405 * ok). 5406 */ 5407 tp->gput_seq = rsm->r_start; 5408 } 5409 if (rsm->r_flags & RACK_ACKED) { 5410 struct rack_sendmap *nrsm; 5411 5412 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5413 tp->gput_seq = rsm->r_end; 5414 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5415 if (nrsm) 5416 rsm = nrsm; 5417 else { 5418 rack->app_limited_needs_set = 1; 5419 } 5420 } else 5421 rack->app_limited_needs_set = 1; 5422 /* We always go from the first send */ 5423 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5424 } else { 5425 /* 5426 * If we don't find the rsm due to some 5427 * send-limit set the current time, which 5428 * basically disables the send-limit. 5429 */ 5430 struct timeval tv; 5431 5432 microuptime(&tv); 5433 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5434 } 5435 rack_tend_gp_marks(tp, rack); 5436 rack_log_pacing_delay_calc(rack, 5437 tp->gput_seq, 5438 tp->gput_ack, 5439 (uintptr_t)rsm, 5440 tp->gput_ts, 5441 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5442 9, 5443 __LINE__, rsm, quality); 5444 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5445 } else { 5446 /* 5447 * To make sure proper timestamp merging occurs, we need to clear 5448 * all GP marks if we don't start a measurement. 5449 */ 5450 rack_clear_gp_marks(tp, rack); 5451 } 5452 } 5453 5454 /* 5455 * CC wrapper hook functions 5456 */ 5457 static void 5458 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5459 uint16_t type, int32_t post_recovery) 5460 { 5461 uint32_t prior_cwnd, acked; 5462 struct tcp_log_buffer *lgb = NULL; 5463 uint8_t labc_to_use, quality; 5464 5465 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5466 tp->t_ccv.nsegs = nsegs; 5467 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5468 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5469 uint32_t max; 5470 5471 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5472 if (tp->t_ccv.bytes_this_ack > max) { 5473 tp->t_ccv.bytes_this_ack = max; 5474 } 5475 } 5476 #ifdef STATS 5477 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5478 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5479 #endif 5480 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5481 /* 5482 * We will ack all the data, time to end any 5483 * lt_bw_up we have running until something 5484 * new is sent. Note we need to use the actual 5485 * ack_rcv_time which with pacing may be different. 5486 */ 5487 uint64_t tmark; 5488 5489 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5490 rack->r_ctl.lt_seq = tp->snd_max; 5491 tmark = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time); 5492 if (tmark >= rack->r_ctl.lt_timemark) { 5493 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5494 } 5495 rack->r_ctl.lt_timemark = tmark; 5496 rack->lt_bw_up = 0; 5497 } 5498 quality = RACK_QUALITY_NONE; 5499 if ((tp->t_flags & TF_GPUTINPROG) && 5500 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5501 /* Measure the Goodput */ 5502 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5503 } 5504 /* Which way our we limited, if not cwnd limited no advance in CA */ 5505 if (tp->snd_cwnd <= tp->snd_wnd) 5506 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5507 else 5508 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5509 if (tp->snd_cwnd > tp->snd_ssthresh) { 5510 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5511 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5512 /* For the setting of a window past use the actual scwnd we are using */ 5513 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5514 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5515 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5516 } 5517 } else { 5518 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5519 tp->t_bytes_acked = 0; 5520 } 5521 prior_cwnd = tp->snd_cwnd; 5522 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5523 (rack_client_low_buf && rack->client_bufferlvl && 5524 (rack->client_bufferlvl < rack_client_low_buf))) 5525 labc_to_use = rack->rc_labc; 5526 else 5527 labc_to_use = rack_max_abc_post_recovery; 5528 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5529 union tcp_log_stackspecific log; 5530 struct timeval tv; 5531 5532 memset(&log, 0, sizeof(log)); 5533 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5534 log.u_bbr.flex1 = th_ack; 5535 log.u_bbr.flex2 = tp->t_ccv.flags; 5536 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5537 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5538 log.u_bbr.flex5 = labc_to_use; 5539 log.u_bbr.flex6 = prior_cwnd; 5540 log.u_bbr.flex7 = V_tcp_do_newsack; 5541 log.u_bbr.flex8 = 1; 5542 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5543 0, &log, false, NULL, __func__, __LINE__,&tv); 5544 } 5545 if (CC_ALGO(tp)->ack_received != NULL) { 5546 /* XXXLAS: Find a way to live without this */ 5547 tp->t_ccv.curack = th_ack; 5548 tp->t_ccv.labc = labc_to_use; 5549 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5550 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5551 } 5552 if (lgb) { 5553 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5554 } 5555 if (rack->r_must_retran) { 5556 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5557 /* 5558 * We now are beyond the rxt point so lets disable 5559 * the flag. 5560 */ 5561 rack->r_ctl.rc_out_at_rto = 0; 5562 rack->r_must_retran = 0; 5563 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5564 /* 5565 * Only decrement the rc_out_at_rto if the cwnd advances 5566 * at least a whole segment. Otherwise next time the peer 5567 * acks, we won't be able to send this generaly happens 5568 * when we are in Congestion Avoidance. 5569 */ 5570 if (acked <= rack->r_ctl.rc_out_at_rto){ 5571 rack->r_ctl.rc_out_at_rto -= acked; 5572 } else { 5573 rack->r_ctl.rc_out_at_rto = 0; 5574 } 5575 } 5576 } 5577 #ifdef STATS 5578 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5579 #endif 5580 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5581 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5582 } 5583 if ((rack->rc_initial_ss_comp == 0) && 5584 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5585 /* 5586 * The cwnd has grown beyond ssthresh we have 5587 * entered ca and completed our first Slowstart. 5588 */ 5589 rack->rc_initial_ss_comp = 1; 5590 } 5591 } 5592 5593 static void 5594 tcp_rack_partialack(struct tcpcb *tp) 5595 { 5596 struct tcp_rack *rack; 5597 5598 rack = (struct tcp_rack *)tp->t_fb_ptr; 5599 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5600 /* 5601 * If we are doing PRR and have enough 5602 * room to send <or> we are pacing and prr 5603 * is disabled we will want to see if we 5604 * can send data (by setting r_wanted_output to 5605 * true). 5606 */ 5607 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5608 rack->rack_no_prr) 5609 rack->r_wanted_output = 1; 5610 } 5611 5612 static void 5613 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 5614 { 5615 /* 5616 * Now exit recovery. 5617 */ 5618 EXIT_RECOVERY(tp->t_flags); 5619 } 5620 5621 static void 5622 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 5623 { 5624 struct tcp_rack *rack; 5625 uint32_t orig_cwnd; 5626 5627 orig_cwnd = tp->snd_cwnd; 5628 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5629 rack = (struct tcp_rack *)tp->t_fb_ptr; 5630 /* only alert CC if we alerted when we entered */ 5631 if (CC_ALGO(tp)->post_recovery != NULL) { 5632 tp->t_ccv.curack = th_ack; 5633 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 5634 if (tp->snd_cwnd < tp->snd_ssthresh) { 5635 /* 5636 * Rack has burst control and pacing 5637 * so lets not set this any lower than 5638 * snd_ssthresh per RFC-6582 (option 2). 5639 */ 5640 tp->snd_cwnd = tp->snd_ssthresh; 5641 } 5642 } 5643 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5644 union tcp_log_stackspecific log; 5645 struct timeval tv; 5646 5647 memset(&log, 0, sizeof(log)); 5648 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5649 log.u_bbr.flex1 = th_ack; 5650 log.u_bbr.flex2 = tp->t_ccv.flags; 5651 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5652 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5653 log.u_bbr.flex5 = V_tcp_abc_l_var; 5654 log.u_bbr.flex6 = orig_cwnd; 5655 log.u_bbr.flex7 = V_tcp_do_newsack; 5656 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 5657 log.u_bbr.flex8 = 2; 5658 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5659 0, &log, false, NULL, __func__, __LINE__, &tv); 5660 } 5661 if ((rack->rack_no_prr == 0) && 5662 (rack->no_prr_addback == 0) && 5663 (rack->r_ctl.rc_prr_sndcnt > 0)) { 5664 /* 5665 * Suck the next prr cnt back into cwnd, but 5666 * only do that if we are not application limited. 5667 */ 5668 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 5669 /* 5670 * We are allowed to add back to the cwnd the amount we did 5671 * not get out if: 5672 * a) no_prr_addback is off. 5673 * b) we are not app limited 5674 * c) we are doing prr 5675 * <and> 5676 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 5677 */ 5678 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 5679 rack->r_ctl.rc_prr_sndcnt); 5680 } 5681 rack->r_ctl.rc_prr_sndcnt = 0; 5682 rack_log_to_prr(rack, 1, 0, __LINE__); 5683 } 5684 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 5685 tp->snd_recover = tp->snd_una; 5686 if (rack->r_ctl.dsack_persist) { 5687 rack->r_ctl.dsack_persist--; 5688 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 5689 rack->r_ctl.num_dsack = 0; 5690 } 5691 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 5692 } 5693 if (rack->rto_from_rec == 1) { 5694 rack->rto_from_rec = 0; 5695 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 5696 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 5697 } 5698 rack_exit_recovery(tp, rack, 1); 5699 } 5700 5701 static void 5702 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 5703 { 5704 struct tcp_rack *rack; 5705 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 5706 5707 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5708 #ifdef STATS 5709 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 5710 #endif 5711 if (IN_RECOVERY(tp->t_flags) == 0) { 5712 in_rec_at_entry = 0; 5713 ssthresh_enter = tp->snd_ssthresh; 5714 cwnd_enter = tp->snd_cwnd; 5715 } else 5716 in_rec_at_entry = 1; 5717 rack = (struct tcp_rack *)tp->t_fb_ptr; 5718 switch (type) { 5719 case CC_NDUPACK: 5720 tp->t_flags &= ~TF_WASFRECOVERY; 5721 tp->t_flags &= ~TF_WASCRECOVERY; 5722 if (!IN_FASTRECOVERY(tp->t_flags)) { 5723 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 5724 if (rack->rc_initial_ss_comp == 0) { 5725 /* Yep it is the end of the initial slowstart */ 5726 rack->rc_initial_ss_comp = 1; 5727 } 5728 rack->r_ctl.rc_prr_delivered = 0; 5729 rack->r_ctl.rc_prr_out = 0; 5730 rack->r_fast_output = 0; 5731 if (rack->rack_no_prr == 0) { 5732 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5733 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 5734 } 5735 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 5736 tp->snd_recover = tp->snd_max; 5737 if (tp->t_flags2 & TF2_ECN_PERMIT) 5738 tp->t_flags2 |= TF2_ECN_SND_CWR; 5739 } 5740 break; 5741 case CC_ECN: 5742 if (!IN_CONGRECOVERY(tp->t_flags) || 5743 /* 5744 * Allow ECN reaction on ACK to CWR, if 5745 * that data segment was also CE marked. 5746 */ 5747 SEQ_GEQ(ack, tp->snd_recover)) { 5748 EXIT_CONGRECOVERY(tp->t_flags); 5749 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5750 rack->r_fast_output = 0; 5751 tp->snd_recover = tp->snd_max + 1; 5752 if (tp->t_flags2 & TF2_ECN_PERMIT) 5753 tp->t_flags2 |= TF2_ECN_SND_CWR; 5754 } 5755 break; 5756 case CC_RTO: 5757 tp->t_dupacks = 0; 5758 tp->t_bytes_acked = 0; 5759 rack->r_fast_output = 0; 5760 if (IN_RECOVERY(tp->t_flags)) 5761 rack_exit_recovery(tp, rack, 2); 5762 orig_cwnd = tp->snd_cwnd; 5763 rack_log_to_prr(rack, 16, orig_cwnd, line); 5764 if (CC_ALGO(tp)->cong_signal == NULL) { 5765 /* TSNH */ 5766 tp->snd_ssthresh = max(2, 5767 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5768 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5769 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5770 } 5771 if (tp->t_flags2 & TF2_ECN_PERMIT) 5772 tp->t_flags2 |= TF2_ECN_SND_CWR; 5773 break; 5774 case CC_RTO_ERR: 5775 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5776 /* RTO was unnecessary, so reset everything. */ 5777 tp->snd_cwnd = tp->snd_cwnd_prev; 5778 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5779 tp->snd_recover = tp->snd_recover_prev; 5780 if (tp->t_flags & TF_WASFRECOVERY) { 5781 ENTER_FASTRECOVERY(tp->t_flags); 5782 tp->t_flags &= ~TF_WASFRECOVERY; 5783 } 5784 if (tp->t_flags & TF_WASCRECOVERY) { 5785 ENTER_CONGRECOVERY(tp->t_flags); 5786 tp->t_flags &= ~TF_WASCRECOVERY; 5787 } 5788 tp->snd_nxt = tp->snd_max; 5789 tp->t_badrxtwin = 0; 5790 break; 5791 } 5792 if ((CC_ALGO(tp)->cong_signal != NULL) && 5793 (type != CC_RTO)){ 5794 tp->t_ccv.curack = ack; 5795 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 5796 } 5797 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5798 rack_log_to_prr(rack, 15, cwnd_enter, line); 5799 rack->r_ctl.dsack_byte_cnt = 0; 5800 rack->r_ctl.retran_during_recovery = 0; 5801 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5802 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5803 rack->r_ent_rec_ns = 1; 5804 } 5805 } 5806 5807 static inline void 5808 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5809 { 5810 uint32_t i_cwnd; 5811 5812 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5813 5814 if (CC_ALGO(tp)->after_idle != NULL) 5815 CC_ALGO(tp)->after_idle(&tp->t_ccv); 5816 5817 if (tp->snd_cwnd == 1) 5818 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5819 else 5820 i_cwnd = rc_init_window(rack); 5821 5822 /* 5823 * Being idle is no different than the initial window. If the cc 5824 * clamps it down below the initial window raise it to the initial 5825 * window. 5826 */ 5827 if (tp->snd_cwnd < i_cwnd) { 5828 tp->snd_cwnd = i_cwnd; 5829 } 5830 } 5831 5832 /* 5833 * Indicate whether this ack should be delayed. We can delay the ack if 5834 * following conditions are met: 5835 * - There is no delayed ack timer in progress. 5836 * - Our last ack wasn't a 0-sized window. We never want to delay 5837 * the ack that opens up a 0-sized window. 5838 * - LRO wasn't used for this segment. We make sure by checking that the 5839 * segment size is not larger than the MSS. 5840 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5841 * connection. 5842 */ 5843 #define DELAY_ACK(tp, tlen) \ 5844 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5845 ((tp->t_flags & TF_DELACK) == 0) && \ 5846 (tlen <= tp->t_maxseg) && \ 5847 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5848 5849 static struct rack_sendmap * 5850 rack_find_lowest_rsm(struct tcp_rack *rack) 5851 { 5852 struct rack_sendmap *rsm; 5853 5854 /* 5855 * Walk the time-order transmitted list looking for an rsm that is 5856 * not acked. This will be the one that was sent the longest time 5857 * ago that is still outstanding. 5858 */ 5859 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5860 if (rsm->r_flags & RACK_ACKED) { 5861 continue; 5862 } 5863 goto finish; 5864 } 5865 finish: 5866 return (rsm); 5867 } 5868 5869 static struct rack_sendmap * 5870 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5871 { 5872 struct rack_sendmap *prsm; 5873 5874 /* 5875 * Walk the sequence order list backward until we hit and arrive at 5876 * the highest seq not acked. In theory when this is called it 5877 * should be the last segment (which it was not). 5878 */ 5879 prsm = rsm; 5880 5881 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 5882 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5883 continue; 5884 } 5885 return (prsm); 5886 } 5887 return (NULL); 5888 } 5889 5890 static uint32_t 5891 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 5892 { 5893 int32_t lro; 5894 uint32_t thresh; 5895 5896 /* 5897 * lro is the flag we use to determine if we have seen reordering. 5898 * If it gets set we have seen reordering. The reorder logic either 5899 * works in one of two ways: 5900 * 5901 * If reorder-fade is configured, then we track the last time we saw 5902 * re-ordering occur. If we reach the point where enough time as 5903 * passed we no longer consider reordering as occurring. 5904 * 5905 * Or if reorder-face is 0, then once we see reordering we consider 5906 * the connection to alway be subject to reordering and just set lro 5907 * to 1. 5908 * 5909 * In the end if lro is non-zero we add the extra time for 5910 * reordering in. 5911 */ 5912 if (srtt == 0) 5913 srtt = 1; 5914 if (rack->r_ctl.rc_reorder_ts) { 5915 if (rack->r_ctl.rc_reorder_fade) { 5916 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5917 lro = cts - rack->r_ctl.rc_reorder_ts; 5918 if (lro == 0) { 5919 /* 5920 * No time as passed since the last 5921 * reorder, mark it as reordering. 5922 */ 5923 lro = 1; 5924 } 5925 } else { 5926 /* Negative time? */ 5927 lro = 0; 5928 } 5929 if (lro > rack->r_ctl.rc_reorder_fade) { 5930 /* Turn off reordering seen too */ 5931 rack->r_ctl.rc_reorder_ts = 0; 5932 lro = 0; 5933 } 5934 } else { 5935 /* Reodering does not fade */ 5936 lro = 1; 5937 } 5938 } else { 5939 lro = 0; 5940 } 5941 if (rack->rc_rack_tmr_std_based == 0) { 5942 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5943 } else { 5944 /* Standards based pkt-delay is 1/4 srtt */ 5945 thresh = srtt + (srtt >> 2); 5946 } 5947 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5948 /* It must be set, if not you get 1/4 rtt */ 5949 if (rack->r_ctl.rc_reorder_shift) 5950 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5951 else 5952 thresh += (srtt >> 2); 5953 } 5954 if (rack->rc_rack_use_dsack && 5955 lro && 5956 (rack->r_ctl.num_dsack > 0)) { 5957 /* 5958 * We only increase the reordering window if we 5959 * have seen reordering <and> we have a DSACK count. 5960 */ 5961 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5962 if (log_allowed) 5963 rack_log_dsack_event(rack, 4, line, srtt, thresh); 5964 } 5965 /* SRTT * 2 is the ceiling */ 5966 if (thresh > (srtt * 2)) { 5967 thresh = srtt * 2; 5968 } 5969 /* And we don't want it above the RTO max either */ 5970 if (thresh > rack_rto_max) { 5971 thresh = rack_rto_max; 5972 } 5973 if (log_allowed) 5974 rack_log_dsack_event(rack, 6, line, srtt, thresh); 5975 return (thresh); 5976 } 5977 5978 static uint32_t 5979 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5980 struct rack_sendmap *rsm, uint32_t srtt) 5981 { 5982 struct rack_sendmap *prsm; 5983 uint32_t thresh, len; 5984 int segsiz; 5985 5986 if (srtt == 0) 5987 srtt = 1; 5988 if (rack->r_ctl.rc_tlp_threshold) 5989 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5990 else 5991 thresh = (srtt * 2); 5992 5993 /* Get the previous sent packet, if any */ 5994 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5995 len = rsm->r_end - rsm->r_start; 5996 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5997 /* Exactly like the ID */ 5998 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5999 uint32_t alt_thresh; 6000 /* 6001 * Compensate for delayed-ack with the d-ack time. 6002 */ 6003 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6004 if (alt_thresh > thresh) 6005 thresh = alt_thresh; 6006 } 6007 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6008 /* 2.1 behavior */ 6009 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6010 if (prsm && (len <= segsiz)) { 6011 /* 6012 * Two packets outstanding, thresh should be (2*srtt) + 6013 * possible inter-packet delay (if any). 6014 */ 6015 uint32_t inter_gap = 0; 6016 int idx, nidx; 6017 6018 idx = rsm->r_rtr_cnt - 1; 6019 nidx = prsm->r_rtr_cnt - 1; 6020 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6021 /* Yes it was sent later (or at the same time) */ 6022 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6023 } 6024 thresh += inter_gap; 6025 } else if (len <= segsiz) { 6026 /* 6027 * Possibly compensate for delayed-ack. 6028 */ 6029 uint32_t alt_thresh; 6030 6031 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6032 if (alt_thresh > thresh) 6033 thresh = alt_thresh; 6034 } 6035 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6036 /* 2.2 behavior */ 6037 if (len <= segsiz) { 6038 uint32_t alt_thresh; 6039 /* 6040 * Compensate for delayed-ack with the d-ack time. 6041 */ 6042 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6043 if (alt_thresh > thresh) 6044 thresh = alt_thresh; 6045 } 6046 } 6047 /* Not above an RTO */ 6048 if (thresh > tp->t_rxtcur) { 6049 thresh = tp->t_rxtcur; 6050 } 6051 /* Not above a RTO max */ 6052 if (thresh > rack_rto_max) { 6053 thresh = rack_rto_max; 6054 } 6055 /* Apply user supplied min TLP */ 6056 if (thresh < rack_tlp_min) { 6057 thresh = rack_tlp_min; 6058 } 6059 return (thresh); 6060 } 6061 6062 static uint32_t 6063 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6064 { 6065 /* 6066 * We want the rack_rtt which is the 6067 * last rtt we measured. However if that 6068 * does not exist we fallback to the srtt (which 6069 * we probably will never do) and then as a last 6070 * resort we use RACK_INITIAL_RTO if no srtt is 6071 * yet set. 6072 */ 6073 if (rack->rc_rack_rtt) 6074 return (rack->rc_rack_rtt); 6075 else if (tp->t_srtt == 0) 6076 return (RACK_INITIAL_RTO); 6077 return (tp->t_srtt); 6078 } 6079 6080 static struct rack_sendmap * 6081 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6082 { 6083 /* 6084 * Check to see that we don't need to fall into recovery. We will 6085 * need to do so if our oldest transmit is past the time we should 6086 * have had an ack. 6087 */ 6088 struct tcp_rack *rack; 6089 struct rack_sendmap *rsm; 6090 int32_t idx; 6091 uint32_t srtt, thresh; 6092 6093 rack = (struct tcp_rack *)tp->t_fb_ptr; 6094 if (tqhash_empty(rack->r_ctl.tqh)) { 6095 return (NULL); 6096 } 6097 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6098 if (rsm == NULL) 6099 return (NULL); 6100 6101 6102 if (rsm->r_flags & RACK_ACKED) { 6103 rsm = rack_find_lowest_rsm(rack); 6104 if (rsm == NULL) 6105 return (NULL); 6106 } 6107 idx = rsm->r_rtr_cnt - 1; 6108 srtt = rack_grab_rtt(tp, rack); 6109 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6110 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6111 return (NULL); 6112 } 6113 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6114 return (NULL); 6115 } 6116 /* Ok if we reach here we are over-due and this guy can be sent */ 6117 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6118 return (rsm); 6119 } 6120 6121 static uint32_t 6122 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6123 { 6124 int32_t t; 6125 int32_t tt; 6126 uint32_t ret_val; 6127 6128 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6129 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6130 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6131 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6132 ret_val = (uint32_t)tt; 6133 return (ret_val); 6134 } 6135 6136 static uint32_t 6137 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6138 { 6139 /* 6140 * Start the FR timer, we do this based on getting the first one in 6141 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6142 * events we need to stop the running timer (if its running) before 6143 * starting the new one. 6144 */ 6145 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6146 uint32_t srtt_cur; 6147 int32_t idx; 6148 int32_t is_tlp_timer = 0; 6149 struct rack_sendmap *rsm; 6150 6151 if (rack->t_timers_stopped) { 6152 /* All timers have been stopped none are to run */ 6153 return (0); 6154 } 6155 if (rack->rc_in_persist) { 6156 /* We can't start any timer in persists */ 6157 return (rack_get_persists_timer_val(tp, rack)); 6158 } 6159 rack->rc_on_min_to = 0; 6160 if ((tp->t_state < TCPS_ESTABLISHED) || 6161 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6162 goto activate_rxt; 6163 } 6164 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6165 if ((rsm == NULL) || sup_rack) { 6166 /* Nothing on the send map or no rack */ 6167 activate_rxt: 6168 time_since_sent = 0; 6169 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6170 if (rsm) { 6171 /* 6172 * Should we discount the RTX timer any? 6173 * 6174 * We want to discount it the smallest amount. 6175 * If a timer (Rack/TLP or RXT) has gone off more 6176 * recently thats the discount we want to use (now - timer time). 6177 * If the retransmit of the oldest packet was more recent then 6178 * we want to use that (now - oldest-packet-last_transmit_time). 6179 * 6180 */ 6181 idx = rsm->r_rtr_cnt - 1; 6182 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6183 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6184 else 6185 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6186 if (TSTMP_GT(cts, tstmp_touse)) 6187 time_since_sent = cts - tstmp_touse; 6188 } 6189 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6190 sbavail(&tptosocket(tp)->so_snd)) { 6191 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6192 to = tp->t_rxtcur; 6193 if (to > time_since_sent) 6194 to -= time_since_sent; 6195 else 6196 to = rack->r_ctl.rc_min_to; 6197 if (to == 0) 6198 to = 1; 6199 /* Special case for KEEPINIT */ 6200 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6201 (TP_KEEPINIT(tp) != 0) && 6202 rsm) { 6203 /* 6204 * We have to put a ceiling on the rxt timer 6205 * of the keep-init timeout. 6206 */ 6207 uint32_t max_time, red; 6208 6209 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6210 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6211 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6212 if (red < max_time) 6213 max_time -= red; 6214 else 6215 max_time = 1; 6216 } 6217 /* Reduce timeout to the keep value if needed */ 6218 if (max_time < to) 6219 to = max_time; 6220 } 6221 return (to); 6222 } 6223 return (0); 6224 } 6225 if (rsm->r_flags & RACK_ACKED) { 6226 rsm = rack_find_lowest_rsm(rack); 6227 if (rsm == NULL) { 6228 /* No lowest? */ 6229 goto activate_rxt; 6230 } 6231 } 6232 /* Convert from ms to usecs */ 6233 if ((rsm->r_flags & RACK_SACK_PASSED) || 6234 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6235 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6236 if ((tp->t_flags & TF_SENTFIN) && 6237 ((tp->snd_max - tp->snd_una) == 1) && 6238 (rsm->r_flags & RACK_HAS_FIN)) { 6239 /* 6240 * We don't start a rack timer if all we have is a 6241 * FIN outstanding. 6242 */ 6243 goto activate_rxt; 6244 } 6245 if ((rack->use_rack_rr == 0) && 6246 (IN_FASTRECOVERY(tp->t_flags)) && 6247 (rack->rack_no_prr == 0) && 6248 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6249 /* 6250 * We are not cheating, in recovery and 6251 * not enough ack's to yet get our next 6252 * retransmission out. 6253 * 6254 * Note that classified attackers do not 6255 * get to use the rack-cheat. 6256 */ 6257 goto activate_tlp; 6258 } 6259 srtt = rack_grab_rtt(tp, rack); 6260 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6261 idx = rsm->r_rtr_cnt - 1; 6262 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6263 if (SEQ_GEQ(exp, cts)) { 6264 to = exp - cts; 6265 if (to < rack->r_ctl.rc_min_to) { 6266 to = rack->r_ctl.rc_min_to; 6267 if (rack->r_rr_config == 3) 6268 rack->rc_on_min_to = 1; 6269 } 6270 } else { 6271 to = rack->r_ctl.rc_min_to; 6272 if (rack->r_rr_config == 3) 6273 rack->rc_on_min_to = 1; 6274 } 6275 } else { 6276 /* Ok we need to do a TLP not RACK */ 6277 activate_tlp: 6278 if ((rack->rc_tlp_in_progress != 0) && 6279 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6280 /* 6281 * The previous send was a TLP and we have sent 6282 * N TLP's without sending new data. 6283 */ 6284 goto activate_rxt; 6285 } 6286 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6287 if (rsm == NULL) { 6288 /* We found no rsm to TLP with. */ 6289 goto activate_rxt; 6290 } 6291 if (rsm->r_flags & RACK_HAS_FIN) { 6292 /* If its a FIN we dont do TLP */ 6293 rsm = NULL; 6294 goto activate_rxt; 6295 } 6296 idx = rsm->r_rtr_cnt - 1; 6297 time_since_sent = 0; 6298 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6299 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6300 else 6301 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6302 if (TSTMP_GT(cts, tstmp_touse)) 6303 time_since_sent = cts - tstmp_touse; 6304 is_tlp_timer = 1; 6305 if (tp->t_srtt) { 6306 if ((rack->rc_srtt_measure_made == 0) && 6307 (tp->t_srtt == 1)) { 6308 /* 6309 * If another stack as run and set srtt to 1, 6310 * then the srtt was 0, so lets use the initial. 6311 */ 6312 srtt = RACK_INITIAL_RTO; 6313 } else { 6314 srtt_cur = tp->t_srtt; 6315 srtt = srtt_cur; 6316 } 6317 } else 6318 srtt = RACK_INITIAL_RTO; 6319 /* 6320 * If the SRTT is not keeping up and the 6321 * rack RTT has spiked we want to use 6322 * the last RTT not the smoothed one. 6323 */ 6324 if (rack_tlp_use_greater && 6325 tp->t_srtt && 6326 (srtt < rack_grab_rtt(tp, rack))) { 6327 srtt = rack_grab_rtt(tp, rack); 6328 } 6329 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6330 if (thresh > time_since_sent) { 6331 to = thresh - time_since_sent; 6332 } else { 6333 to = rack->r_ctl.rc_min_to; 6334 rack_log_alt_to_to_cancel(rack, 6335 thresh, /* flex1 */ 6336 time_since_sent, /* flex2 */ 6337 tstmp_touse, /* flex3 */ 6338 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6339 (uint32_t)rsm->r_tim_lastsent[idx], 6340 srtt, 6341 idx, 99); 6342 } 6343 if (to < rack_tlp_min) { 6344 to = rack_tlp_min; 6345 } 6346 if (to > TICKS_2_USEC(tcp_rexmit_max)) { 6347 /* 6348 * If the TLP time works out to larger than the max 6349 * RTO lets not do TLP.. just RTO. 6350 */ 6351 goto activate_rxt; 6352 } 6353 } 6354 if (is_tlp_timer == 0) { 6355 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6356 } else { 6357 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6358 } 6359 if (to == 0) 6360 to = 1; 6361 return (to); 6362 } 6363 6364 static void 6365 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6366 { 6367 if (rack->rc_in_persist == 0) { 6368 if (tp->t_flags & TF_GPUTINPROG) { 6369 /* 6370 * Stop the goodput now, the calling of the 6371 * measurement function clears the flag. 6372 */ 6373 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6374 RACK_QUALITY_PERSIST); 6375 } 6376 #ifdef NETFLIX_SHARED_CWND 6377 if (rack->r_ctl.rc_scw) { 6378 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6379 rack->rack_scwnd_is_idle = 1; 6380 } 6381 #endif 6382 rack->r_ctl.rc_went_idle_time = cts; 6383 if (rack->r_ctl.rc_went_idle_time == 0) 6384 rack->r_ctl.rc_went_idle_time = 1; 6385 if (rack->lt_bw_up) { 6386 /* Suspend our LT BW measurement */ 6387 uint64_t tmark; 6388 6389 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6390 rack->r_ctl.lt_seq = snd_una; 6391 tmark = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time); 6392 if (tmark >= rack->r_ctl.lt_timemark) { 6393 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6394 } 6395 rack->r_ctl.lt_timemark = tmark; 6396 rack->lt_bw_up = 0; 6397 rack->r_persist_lt_bw_off = 1; 6398 } 6399 rack_timer_cancel(tp, rack, cts, __LINE__); 6400 rack->r_ctl.persist_lost_ends = 0; 6401 rack->probe_not_answered = 0; 6402 rack->forced_ack = 0; 6403 tp->t_rxtshift = 0; 6404 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6405 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6406 rack->rc_in_persist = 1; 6407 } 6408 } 6409 6410 static void 6411 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6412 { 6413 if (tcp_in_hpts(rack->rc_tp)) { 6414 tcp_hpts_remove(rack->rc_tp); 6415 rack->r_ctl.rc_hpts_flags = 0; 6416 } 6417 #ifdef NETFLIX_SHARED_CWND 6418 if (rack->r_ctl.rc_scw) { 6419 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6420 rack->rack_scwnd_is_idle = 0; 6421 } 6422 #endif 6423 if (rack->rc_gp_dyn_mul && 6424 (rack->use_fixed_rate == 0) && 6425 (rack->rc_always_pace)) { 6426 /* 6427 * Do we count this as if a probe-rtt just 6428 * finished? 6429 */ 6430 uint32_t time_idle, idle_min; 6431 6432 time_idle = cts - rack->r_ctl.rc_went_idle_time; 6433 idle_min = rack_min_probertt_hold; 6434 if (rack_probertt_gpsrtt_cnt_div) { 6435 uint64_t extra; 6436 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 6437 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 6438 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 6439 idle_min += (uint32_t)extra; 6440 } 6441 if (time_idle >= idle_min) { 6442 /* Yes, we count it as a probe-rtt. */ 6443 uint32_t us_cts; 6444 6445 us_cts = tcp_get_usecs(NULL); 6446 if (rack->in_probe_rtt == 0) { 6447 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 6448 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 6449 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 6450 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 6451 } else { 6452 rack_exit_probertt(rack, us_cts); 6453 } 6454 } 6455 } 6456 if (rack->r_persist_lt_bw_off) { 6457 /* Continue where we left off */ 6458 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 6459 rack->lt_bw_up = 1; 6460 rack->r_persist_lt_bw_off = 0; 6461 } 6462 rack->rc_in_persist = 0; 6463 rack->r_ctl.rc_went_idle_time = 0; 6464 tp->t_rxtshift = 0; 6465 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6466 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6467 rack->r_ctl.rc_agg_delayed = 0; 6468 rack->r_early = 0; 6469 rack->r_late = 0; 6470 rack->r_ctl.rc_agg_early = 0; 6471 } 6472 6473 static void 6474 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 6475 struct hpts_diag *diag, struct timeval *tv) 6476 { 6477 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6478 union tcp_log_stackspecific log; 6479 6480 memset(&log, 0, sizeof(log)); 6481 log.u_bbr.flex1 = diag->p_nxt_slot; 6482 log.u_bbr.flex2 = diag->p_cur_slot; 6483 log.u_bbr.flex3 = diag->slot_req; 6484 log.u_bbr.flex4 = diag->inp_hptsslot; 6485 log.u_bbr.flex5 = diag->time_remaining; 6486 log.u_bbr.flex6 = diag->need_new_to; 6487 log.u_bbr.flex7 = diag->p_hpts_active; 6488 log.u_bbr.flex8 = diag->p_on_min_sleep; 6489 /* Hijack other fields as needed */ 6490 log.u_bbr.epoch = diag->have_slept; 6491 log.u_bbr.lt_epoch = diag->yet_to_sleep; 6492 log.u_bbr.pkts_out = diag->co_ret; 6493 log.u_bbr.applimited = diag->hpts_sleep_time; 6494 log.u_bbr.delivered = diag->p_prev_slot; 6495 log.u_bbr.inflight = diag->p_runningslot; 6496 log.u_bbr.bw_inuse = diag->wheel_slot; 6497 log.u_bbr.rttProp = diag->wheel_cts; 6498 log.u_bbr.timeStamp = cts; 6499 log.u_bbr.delRate = diag->maxslots; 6500 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6501 &rack->rc_inp->inp_socket->so_rcv, 6502 &rack->rc_inp->inp_socket->so_snd, 6503 BBR_LOG_HPTSDIAG, 0, 6504 0, &log, false, tv); 6505 } 6506 6507 } 6508 6509 static void 6510 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 6511 { 6512 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6513 union tcp_log_stackspecific log; 6514 struct timeval tv; 6515 6516 memset(&log, 0, sizeof(log)); 6517 log.u_bbr.flex1 = sb->sb_flags; 6518 log.u_bbr.flex2 = len; 6519 log.u_bbr.flex3 = sb->sb_state; 6520 log.u_bbr.flex8 = type; 6521 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6522 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6523 &rack->rc_inp->inp_socket->so_rcv, 6524 &rack->rc_inp->inp_socket->so_snd, 6525 TCP_LOG_SB_WAKE, 0, 6526 len, &log, false, &tv); 6527 } 6528 } 6529 6530 static void 6531 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 6532 int32_t usecs, uint32_t tot_len_this_send, int sup_rack) 6533 { 6534 struct hpts_diag diag; 6535 struct inpcb *inp = tptoinpcb(tp); 6536 struct timeval tv; 6537 uint32_t delayed_ack = 0; 6538 uint32_t hpts_timeout; 6539 uint32_t entry_usecs = usecs; 6540 uint8_t stopped; 6541 uint32_t left = 0; 6542 uint32_t us_cts; 6543 6544 if ((tp->t_state == TCPS_CLOSED) || 6545 (tp->t_state == TCPS_LISTEN)) { 6546 return; 6547 } 6548 if (tcp_in_hpts(tp)) { 6549 /* Already on the pacer */ 6550 return; 6551 } 6552 stopped = rack->rc_tmr_stopped; 6553 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 6554 left = rack->r_ctl.rc_timer_exp - cts; 6555 } 6556 rack->r_ctl.rc_timer_exp = 0; 6557 rack->r_ctl.rc_hpts_flags = 0; 6558 us_cts = tcp_get_usecs(&tv); 6559 /* Now early/late accounting */ 6560 rack_log_pacing_delay_calc(rack, entry_usecs, usecs, 0, 0, 0, 26, __LINE__, NULL, 0); 6561 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 6562 /* 6563 * We have a early carry over set, 6564 * we can always add more time so we 6565 * can always make this compensation. 6566 * 6567 * Note if ack's are allowed to wake us do not 6568 * penalize the next timer for being awoke 6569 * by an ack aka the rc_agg_early (non-paced mode). 6570 */ 6571 usecs += rack->r_ctl.rc_agg_early; 6572 rack->r_early = 0; 6573 rack->r_ctl.rc_agg_early = 0; 6574 } 6575 if ((rack->r_late) && 6576 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 6577 /* 6578 * This is harder, we can 6579 * compensate some but it 6580 * really depends on what 6581 * the current pacing time is. 6582 */ 6583 if (rack->r_ctl.rc_agg_delayed >= usecs) { 6584 /* 6585 * We can't compensate for it all. 6586 * And we have to have some time 6587 * on the clock. We always have a min 6588 * 10 HPTS timer units (10 x 10 i.e. 100 usecs). 6589 */ 6590 if (usecs <= HPTS_USECS_PER_SLOT) { 6591 /* We gain delay */ 6592 rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - usecs); 6593 usecs = HPTS_USECS_PER_SLOT; 6594 } else { 6595 /* We take off some */ 6596 rack->r_ctl.rc_agg_delayed -= (usecs - HPTS_USECS_PER_SLOT); 6597 usecs = HPTS_USECS_PER_SLOT; 6598 } 6599 } else { 6600 usecs -= rack->r_ctl.rc_agg_delayed; 6601 rack->r_ctl.rc_agg_delayed = 0; 6602 /* Make sure we have 100 useconds at minimum */ 6603 if (usecs < HPTS_USECS_PER_SLOT) { 6604 rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - usecs; 6605 usecs = HPTS_USECS_PER_SLOT; 6606 } 6607 if (rack->r_ctl.rc_agg_delayed == 0) 6608 rack->r_late = 0; 6609 } 6610 } else if (rack->r_late) { 6611 /* r_use_hpts_min is on and so is DGP */ 6612 uint32_t max_red; 6613 6614 max_red = (usecs * rack->r_ctl.max_reduction) / 100; 6615 if (max_red >= rack->r_ctl.rc_agg_delayed) { 6616 usecs -= rack->r_ctl.rc_agg_delayed; 6617 rack->r_ctl.rc_agg_delayed = 0; 6618 } else { 6619 usecs -= max_red; 6620 rack->r_ctl.rc_agg_delayed -= max_red; 6621 } 6622 } 6623 if ((rack->r_use_hpts_min == 1) && 6624 (usecs > 0) && 6625 (rack->dgp_on == 1)) { 6626 /* 6627 * We are enforcing a min pacing timer 6628 * based on our hpts min timeout. 6629 */ 6630 uint32_t min; 6631 6632 min = get_hpts_min_sleep_time(); 6633 if (min > usecs) { 6634 usecs = min; 6635 } 6636 } 6637 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 6638 if (tp->t_flags & TF_DELACK) { 6639 delayed_ack = TICKS_2_USEC(tcp_delacktime); 6640 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 6641 } 6642 if (delayed_ack && ((hpts_timeout == 0) || 6643 (delayed_ack < hpts_timeout))) 6644 hpts_timeout = delayed_ack; 6645 else 6646 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6647 /* 6648 * If no timers are going to run and we will fall off the hptsi 6649 * wheel, we resort to a keep-alive timer if its configured. 6650 */ 6651 if ((hpts_timeout == 0) && 6652 (usecs == 0)) { 6653 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6654 (tp->t_state <= TCPS_CLOSING)) { 6655 /* 6656 * Ok we have no timer (persists, rack, tlp, rxt or 6657 * del-ack), we don't have segments being paced. So 6658 * all that is left is the keepalive timer. 6659 */ 6660 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6661 /* Get the established keep-alive time */ 6662 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 6663 } else { 6664 /* 6665 * Get the initial setup keep-alive time, 6666 * note that this is probably not going to 6667 * happen, since rack will be running a rxt timer 6668 * if a SYN of some sort is outstanding. It is 6669 * actually handled in rack_timeout_rxt(). 6670 */ 6671 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 6672 } 6673 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 6674 if (rack->in_probe_rtt) { 6675 /* 6676 * We want to instead not wake up a long time from 6677 * now but to wake up about the time we would 6678 * exit probe-rtt and initiate a keep-alive ack. 6679 * This will get us out of probe-rtt and update 6680 * our min-rtt. 6681 */ 6682 hpts_timeout = rack_min_probertt_hold; 6683 } 6684 } 6685 } 6686 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 6687 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 6688 /* 6689 * RACK, TLP, persists and RXT timers all are restartable 6690 * based on actions input .. i.e we received a packet (ack 6691 * or sack) and that changes things (rw, or snd_una etc). 6692 * Thus we can restart them with a new value. For 6693 * keep-alive, delayed_ack we keep track of what was left 6694 * and restart the timer with a smaller value. 6695 */ 6696 if (left < hpts_timeout) 6697 hpts_timeout = left; 6698 } 6699 if (hpts_timeout) { 6700 /* 6701 * Hack alert for now we can't time-out over 2,147,483 6702 * seconds (a bit more than 596 hours), which is probably ok 6703 * :). 6704 */ 6705 if (hpts_timeout > 0x7ffffffe) 6706 hpts_timeout = 0x7ffffffe; 6707 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 6708 } 6709 rack_log_pacing_delay_calc(rack, entry_usecs, usecs, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 6710 if ((rack->gp_ready == 0) && 6711 (rack->use_fixed_rate == 0) && 6712 (hpts_timeout < usecs) && 6713 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 6714 /* 6715 * We have no good estimate yet for the 6716 * old clunky burst mitigation or the 6717 * real pacing. And the tlp or rxt is smaller 6718 * than the pacing calculation. Lets not 6719 * pace that long since we know the calculation 6720 * so far is not accurate. 6721 */ 6722 usecs = hpts_timeout; 6723 } 6724 /** 6725 * Turn off all the flags for queuing by default. The 6726 * flags have important meanings to what happens when 6727 * LRO interacts with the transport. Most likely (by default now) 6728 * mbuf_queueing and ack compression are on. So the transport 6729 * has a couple of flags that control what happens (if those 6730 * are not on then these flags won't have any effect since it 6731 * won't go through the queuing LRO path). 6732 * 6733 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 6734 * pacing output, so don't disturb. But 6735 * it also means LRO can wake me if there 6736 * is a SACK arrival. 6737 * 6738 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 6739 * with the above flag (QUEUE_READY) and 6740 * when present it says don't even wake me 6741 * if a SACK arrives. 6742 * 6743 * The idea behind these flags is that if we are pacing we 6744 * set the MBUF_QUEUE_READY and only get woken up if 6745 * a SACK arrives (which could change things) or if 6746 * our pacing timer expires. If, however, we have a rack 6747 * timer running, then we don't even want a sack to wake 6748 * us since the rack timer has to expire before we can send. 6749 * 6750 * Other cases should usually have none of the flags set 6751 * so LRO can call into us. 6752 */ 6753 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 6754 if (usecs) { 6755 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 6756 rack->r_ctl.rc_last_output_to = us_cts + usecs; 6757 /* 6758 * A pacing timer (usecs microseconds) is being set, in 6759 * such a case we cannot send (we are blocked by 6760 * the timer). So lets tell LRO that it should not 6761 * wake us unless there is a SACK. Note this only 6762 * will be effective if mbuf queueing is on or 6763 * compressed acks are being processed. 6764 */ 6765 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 6766 /* 6767 * But wait if we have a Rack timer running 6768 * even a SACK should not disturb us (with 6769 * the exception of r_rr_config 3). 6770 */ 6771 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 6772 (IN_RECOVERY(tp->t_flags))) { 6773 if (rack->r_rr_config != 3) 6774 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6775 else if (rack->rc_pace_dnd) { 6776 /* 6777 * When DND is on, we only let a sack 6778 * interrupt us if we are not in recovery. 6779 * 6780 * If DND is off, then we never hit here 6781 * and let all sacks wake us up. 6782 * 6783 */ 6784 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6785 } 6786 } 6787 if (rack->rc_ack_can_sendout_data) { 6788 /* 6789 * Ahh but wait, this is that special case 6790 * where the pacing timer can be disturbed 6791 * backout the changes (used for non-paced 6792 * burst limiting). 6793 */ 6794 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 6795 TF2_MBUF_QUEUE_READY); 6796 } 6797 if ((rack->use_rack_rr) && 6798 (rack->r_rr_config < 2) && 6799 ((hpts_timeout) && (hpts_timeout < usecs))) { 6800 /* 6801 * Arrange for the hpts to kick back in after the 6802 * t-o if the t-o does not cause a send. 6803 */ 6804 tcp_hpts_insert(tp, hpts_timeout, &diag); 6805 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6806 rack_log_to_start(rack, cts, hpts_timeout, usecs, 0); 6807 } else { 6808 tcp_hpts_insert(tp, usecs, &diag); 6809 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6810 rack_log_to_start(rack, cts, hpts_timeout, usecs, 1); 6811 } 6812 } else if (hpts_timeout) { 6813 /* 6814 * With respect to t_flags2(?) here, lets let any new acks wake 6815 * us up here. Since we are not pacing (no pacing timer), output 6816 * can happen so we should let it. If its a Rack timer, then any inbound 6817 * packet probably won't change the sending (we will be blocked) 6818 * but it may change the prr stats so letting it in (the set defaults 6819 * at the start of this block) are good enough. 6820 */ 6821 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6822 tcp_hpts_insert(tp, hpts_timeout, &diag); 6823 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6824 rack_log_to_start(rack, cts, hpts_timeout, usecs, 0); 6825 } else { 6826 /* No timer starting */ 6827 #ifdef INVARIANTS 6828 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6829 panic("tp:%p rack:%p tlts:%d cts:%u usecs:%u pto:%u -- no timer started?", 6830 tp, rack, tot_len_this_send, cts, usecs, hpts_timeout); 6831 } 6832 #endif 6833 } 6834 rack->rc_tmr_stopped = 0; 6835 if (usecs) 6836 rack_log_type_bbrsnd(rack, tot_len_this_send, usecs, us_cts, &tv, __LINE__); 6837 } 6838 6839 static void 6840 rack_mark_lost(struct tcpcb *tp, 6841 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 6842 { 6843 struct rack_sendmap *nrsm; 6844 uint32_t thresh, exp; 6845 6846 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 6847 nrsm = rsm; 6848 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 6849 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 6850 /* Got up to all that were marked sack-passed */ 6851 break; 6852 } 6853 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 6854 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 6855 if (TSTMP_LT(exp, cts) || (exp == cts)) { 6856 /* We now consider it lost */ 6857 nrsm->r_flags |= RACK_WAS_LOST; 6858 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 6859 } else { 6860 /* Past here it won't be lost so stop */ 6861 break; 6862 } 6863 } 6864 } 6865 } 6866 6867 static inline void 6868 rack_mark_nolonger_lost(struct tcp_rack *rack, struct rack_sendmap *rsm) 6869 { 6870 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 6871 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 6872 rsm->r_flags &= ~RACK_WAS_LOST; 6873 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 6874 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 6875 else 6876 rack->r_ctl.rc_considered_lost = 0; 6877 } 6878 6879 /* 6880 * RACK Timer, here we simply do logging and house keeping. 6881 * the normal rack_output() function will call the 6882 * appropriate thing to check if we need to do a RACK retransmit. 6883 * We return 1, saying don't proceed with rack_output only 6884 * when all timers have been stopped (destroyed PCB?). 6885 */ 6886 static int 6887 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6888 { 6889 /* 6890 * This timer simply provides an internal trigger to send out data. 6891 * The check_recovery_mode call will see if there are needed 6892 * retransmissions, if so we will enter fast-recovery. The output 6893 * call may or may not do the same thing depending on sysctl 6894 * settings. 6895 */ 6896 struct rack_sendmap *rsm; 6897 6898 counter_u64_add(rack_to_tot, 1); 6899 if (rack->r_state && (rack->r_state != tp->t_state)) 6900 rack_set_state(tp, rack); 6901 rack->rc_on_min_to = 0; 6902 rsm = rack_check_recovery_mode(tp, cts); 6903 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6904 if (rsm) { 6905 /* We need to stroke any lost that are now declared as lost */ 6906 rack_mark_lost(tp, rack, rsm, cts); 6907 rack->r_ctl.rc_resend = rsm; 6908 rack->r_timer_override = 1; 6909 if (rack->use_rack_rr) { 6910 /* 6911 * Don't accumulate extra pacing delay 6912 * we are allowing the rack timer to 6913 * over-ride pacing i.e. rrr takes precedence 6914 * if the pacing interval is longer than the rrr 6915 * time (in other words we get the min pacing 6916 * time versus rrr pacing time). 6917 */ 6918 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6919 } 6920 } 6921 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6922 if (rsm == NULL) { 6923 /* restart a timer and return 1 */ 6924 rack_start_hpts_timer(rack, tp, cts, 6925 0, 0, 0); 6926 return (1); 6927 } 6928 return (0); 6929 } 6930 6931 6932 6933 static void 6934 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6935 { 6936 6937 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 6938 /* 6939 * The trailing space changed, mbufs can grow 6940 * at the tail but they can't shrink from 6941 * it, KASSERT that. Adjust the orig_m_len to 6942 * compensate for this change. 6943 */ 6944 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 6945 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 6946 rsm->m, 6947 rsm, 6948 (intmax_t)M_TRAILINGROOM(rsm->m), 6949 rsm->orig_t_space, 6950 rsm->orig_m_len, 6951 rsm->m->m_len)); 6952 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 6953 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 6954 } 6955 if (rsm->m->m_len < rsm->orig_m_len) { 6956 /* 6957 * Mbuf shrank, trimmed off the top by an ack, our 6958 * offset changes. 6959 */ 6960 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 6961 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 6962 rsm->m, rsm->m->m_len, 6963 rsm, rsm->orig_m_len, 6964 rsm->soff)); 6965 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 6966 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6967 else 6968 rsm->soff = 0; 6969 rsm->orig_m_len = rsm->m->m_len; 6970 #ifdef INVARIANTS 6971 } else if (rsm->m->m_len > rsm->orig_m_len) { 6972 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 6973 rsm, rsm->m); 6974 #endif 6975 } 6976 } 6977 6978 static void 6979 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6980 { 6981 struct mbuf *m; 6982 uint32_t soff; 6983 6984 if (src_rsm->m && 6985 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 6986 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 6987 /* Fix up the orig_m_len and possibly the mbuf offset */ 6988 rack_adjust_orig_mlen(src_rsm); 6989 } 6990 m = src_rsm->m; 6991 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6992 while (soff >= m->m_len) { 6993 /* Move out past this mbuf */ 6994 soff -= m->m_len; 6995 m = m->m_next; 6996 KASSERT((m != NULL), 6997 ("rsm:%p nrsm:%p hit at soff:%u null m", 6998 src_rsm, rsm, soff)); 6999 if (m == NULL) { 7000 /* This should *not* happen which is why there is a kassert */ 7001 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7002 (src_rsm->r_start - rack->rc_tp->snd_una), 7003 &src_rsm->soff); 7004 src_rsm->orig_m_len = src_rsm->m->m_len; 7005 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7006 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7007 (rsm->r_start - rack->rc_tp->snd_una), 7008 &rsm->soff); 7009 rsm->orig_m_len = rsm->m->m_len; 7010 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7011 return; 7012 } 7013 } 7014 rsm->m = m; 7015 rsm->soff = soff; 7016 rsm->orig_m_len = m->m_len; 7017 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7018 } 7019 7020 static inline void 7021 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7022 struct rack_sendmap *rsm, uint32_t start) 7023 { 7024 int idx; 7025 7026 nrsm->r_start = start; 7027 nrsm->r_end = rsm->r_end; 7028 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7029 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7030 nrsm->r_flags = rsm->r_flags; 7031 nrsm->r_dupack = rsm->r_dupack; 7032 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7033 nrsm->r_rtr_bytes = 0; 7034 nrsm->r_fas = rsm->r_fas; 7035 nrsm->r_bas = rsm->r_bas; 7036 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7037 nrsm->r_just_ret = rsm->r_just_ret; 7038 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7039 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7040 } 7041 /* Now if we have SYN flag we keep it on the left edge */ 7042 if (nrsm->r_flags & RACK_HAS_SYN) 7043 nrsm->r_flags &= ~RACK_HAS_SYN; 7044 /* Now if we have a FIN flag we keep it on the right edge */ 7045 if (rsm->r_flags & RACK_HAS_FIN) 7046 rsm->r_flags &= ~RACK_HAS_FIN; 7047 /* Push bit must go to the right edge as well */ 7048 if (rsm->r_flags & RACK_HAD_PUSH) 7049 rsm->r_flags &= ~RACK_HAD_PUSH; 7050 /* Update the count if app limited */ 7051 if (nrsm->r_flags & RACK_APP_LIMITED) 7052 rack->r_ctl.rc_app_limited_cnt++; 7053 /* Clone over the state of the hw_tls flag */ 7054 nrsm->r_hw_tls = rsm->r_hw_tls; 7055 /* 7056 * Now we need to find nrsm's new location in the mbuf chain 7057 * we basically calculate a new offset, which is soff + 7058 * how much is left in original rsm. Then we walk out the mbuf 7059 * chain to find the righ position, it may be the same mbuf 7060 * or maybe not. 7061 */ 7062 KASSERT(((rsm->m != NULL) || 7063 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7064 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7065 if (rsm->m) 7066 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7067 } 7068 7069 static struct rack_sendmap * 7070 rack_merge_rsm(struct tcp_rack *rack, 7071 struct rack_sendmap *l_rsm, 7072 struct rack_sendmap *r_rsm) 7073 { 7074 /* 7075 * We are merging two ack'd RSM's, 7076 * the l_rsm is on the left (lower seq 7077 * values) and the r_rsm is on the right 7078 * (higher seq value). The simplest way 7079 * to merge these is to move the right 7080 * one into the left. I don't think there 7081 * is any reason we need to try to find 7082 * the oldest (or last oldest retransmitted). 7083 */ 7084 rack_log_map_chg(rack->rc_tp, rack, NULL, 7085 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7086 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7087 if (l_rsm->r_dupack < r_rsm->r_dupack) 7088 l_rsm->r_dupack = r_rsm->r_dupack; 7089 if (r_rsm->r_rtr_bytes) 7090 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7091 if (r_rsm->r_in_tmap) { 7092 /* This really should not happen */ 7093 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7094 r_rsm->r_in_tmap = 0; 7095 } 7096 7097 /* Now the flags */ 7098 if (r_rsm->r_flags & RACK_HAS_FIN) 7099 l_rsm->r_flags |= RACK_HAS_FIN; 7100 if (r_rsm->r_flags & RACK_TLP) 7101 l_rsm->r_flags |= RACK_TLP; 7102 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7103 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7104 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7105 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7106 /* 7107 * If both are app-limited then let the 7108 * free lower the count. If right is app 7109 * limited and left is not, transfer. 7110 */ 7111 l_rsm->r_flags |= RACK_APP_LIMITED; 7112 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7113 if (r_rsm == rack->r_ctl.rc_first_appl) 7114 rack->r_ctl.rc_first_appl = l_rsm; 7115 } 7116 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7117 /* 7118 * We keep the largest value, which is the newest 7119 * send. We do this in case a segment that is 7120 * joined together and not part of a GP estimate 7121 * later gets expanded into the GP estimate. 7122 * 7123 * We prohibit the merging of unlike kinds i.e. 7124 * all pieces that are in the GP estimate can be 7125 * merged and all pieces that are not in a GP estimate 7126 * can be merged, but not disimilar pieces. Combine 7127 * this with taking the highest here and we should 7128 * be ok unless of course the client reneges. Then 7129 * all bets are off. 7130 */ 7131 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7132 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7133 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7134 } 7135 /* 7136 * When merging two RSM's we also need to consider the ack time and keep 7137 * newest. If the ack gets merged into a measurement then that is the 7138 * one we will want to be using. 7139 */ 7140 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7141 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7142 7143 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7144 /* Transfer the split limit to the map we free */ 7145 r_rsm->r_limit_type = l_rsm->r_limit_type; 7146 l_rsm->r_limit_type = 0; 7147 } 7148 rack_free(rack, r_rsm); 7149 l_rsm->r_flags |= RACK_MERGED; 7150 return (l_rsm); 7151 } 7152 7153 /* 7154 * TLP Timer, here we simply setup what segment we want to 7155 * have the TLP expire on, the normal rack_output() will then 7156 * send it out. 7157 * 7158 * We return 1, saying don't proceed with rack_output only 7159 * when all timers have been stopped (destroyed PCB?). 7160 */ 7161 static int 7162 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7163 { 7164 /* 7165 * Tail Loss Probe. 7166 */ 7167 struct rack_sendmap *rsm = NULL; 7168 int insret __diagused; 7169 struct socket *so = tptosocket(tp); 7170 uint32_t amm; 7171 uint32_t out, avail; 7172 int collapsed_win = 0; 7173 7174 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7175 /* Its not time yet */ 7176 return (0); 7177 } 7178 if (ctf_progress_timeout_check(tp, true)) { 7179 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7180 return (-ETIMEDOUT); /* tcp_drop() */ 7181 } 7182 /* 7183 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7184 * need to figure out how to force a full MSS segment out. 7185 */ 7186 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7187 rack->r_ctl.retran_during_recovery = 0; 7188 rack->r_might_revert = 0; 7189 rack->r_ctl.dsack_byte_cnt = 0; 7190 counter_u64_add(rack_tlp_tot, 1); 7191 if (rack->r_state && (rack->r_state != tp->t_state)) 7192 rack_set_state(tp, rack); 7193 avail = sbavail(&so->so_snd); 7194 out = tp->snd_max - tp->snd_una; 7195 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7196 /* special case, we need a retransmission */ 7197 collapsed_win = 1; 7198 goto need_retran; 7199 } 7200 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7201 rack->r_ctl.dsack_persist--; 7202 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7203 rack->r_ctl.num_dsack = 0; 7204 } 7205 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7206 } 7207 if ((tp->t_flags & TF_GPUTINPROG) && 7208 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7209 /* 7210 * If this is the second in a row 7211 * TLP and we are doing a measurement 7212 * its time to abandon the measurement. 7213 * Something is likely broken on 7214 * the clients network and measuring a 7215 * broken network does us no good. 7216 */ 7217 tp->t_flags &= ~TF_GPUTINPROG; 7218 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7219 rack->r_ctl.rc_gp_srtt /*flex1*/, 7220 tp->gput_seq, 7221 0, 0, 18, __LINE__, NULL, 0); 7222 } 7223 /* 7224 * Check our send oldest always settings, and if 7225 * there is an oldest to send jump to the need_retran. 7226 */ 7227 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7228 goto need_retran; 7229 7230 if (avail > out) { 7231 /* New data is available */ 7232 amm = avail - out; 7233 if (amm > ctf_fixed_maxseg(tp)) { 7234 amm = ctf_fixed_maxseg(tp); 7235 if ((amm + out) > tp->snd_wnd) { 7236 /* We are rwnd limited */ 7237 goto need_retran; 7238 } 7239 } else if (amm < ctf_fixed_maxseg(tp)) { 7240 /* not enough to fill a MTU */ 7241 goto need_retran; 7242 } 7243 if (IN_FASTRECOVERY(tp->t_flags)) { 7244 /* Unlikely */ 7245 if (rack->rack_no_prr == 0) { 7246 if (out + amm <= tp->snd_wnd) { 7247 rack->r_ctl.rc_prr_sndcnt = amm; 7248 rack->r_ctl.rc_tlp_new_data = amm; 7249 rack_log_to_prr(rack, 4, 0, __LINE__); 7250 } 7251 } else 7252 goto need_retran; 7253 } else { 7254 /* Set the send-new override */ 7255 if (out + amm <= tp->snd_wnd) 7256 rack->r_ctl.rc_tlp_new_data = amm; 7257 else 7258 goto need_retran; 7259 } 7260 rack->r_ctl.rc_tlpsend = NULL; 7261 counter_u64_add(rack_tlp_newdata, 1); 7262 goto send; 7263 } 7264 need_retran: 7265 /* 7266 * Ok we need to arrange the last un-acked segment to be re-sent, or 7267 * optionally the first un-acked segment. 7268 */ 7269 if (collapsed_win == 0) { 7270 if (rack_always_send_oldest) 7271 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7272 else { 7273 rsm = tqhash_max(rack->r_ctl.tqh); 7274 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7275 rsm = rack_find_high_nonack(rack, rsm); 7276 } 7277 } 7278 if (rsm == NULL) { 7279 #ifdef TCP_BLACKBOX 7280 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7281 #endif 7282 goto out; 7283 } 7284 } else { 7285 /* 7286 * We had a collapsed window, lets find 7287 * the point before the collapse. 7288 */ 7289 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7290 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7291 else { 7292 rsm = tqhash_min(rack->r_ctl.tqh); 7293 } 7294 if (rsm == NULL) { 7295 /* Huh */ 7296 goto out; 7297 } 7298 } 7299 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7300 /* 7301 * We need to split this the last segment in two. 7302 */ 7303 struct rack_sendmap *nrsm; 7304 7305 nrsm = rack_alloc_full_limit(rack); 7306 if (nrsm == NULL) { 7307 /* 7308 * No memory to split, we will just exit and punt 7309 * off to the RXT timer. 7310 */ 7311 goto out; 7312 } 7313 rack_clone_rsm(rack, nrsm, rsm, 7314 (rsm->r_end - ctf_fixed_maxseg(tp))); 7315 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7316 #ifndef INVARIANTS 7317 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7318 #else 7319 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7320 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7321 nrsm, insret, rack, rsm); 7322 } 7323 #endif 7324 if (rsm->r_in_tmap) { 7325 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7326 nrsm->r_in_tmap = 1; 7327 } 7328 rsm = nrsm; 7329 } 7330 rack->r_ctl.rc_tlpsend = rsm; 7331 send: 7332 /* Make sure output path knows we are doing a TLP */ 7333 *doing_tlp = 1; 7334 rack->r_timer_override = 1; 7335 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7336 return (0); 7337 out: 7338 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7339 return (0); 7340 } 7341 7342 /* 7343 * Delayed ack Timer, here we simply need to setup the 7344 * ACK_NOW flag and remove the DELACK flag. From there 7345 * the output routine will send the ack out. 7346 * 7347 * We only return 1, saying don't proceed, if all timers 7348 * are stopped (destroyed PCB?). 7349 */ 7350 static int 7351 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7352 { 7353 7354 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7355 tp->t_flags &= ~TF_DELACK; 7356 tp->t_flags |= TF_ACKNOW; 7357 KMOD_TCPSTAT_INC(tcps_delack); 7358 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7359 return (0); 7360 } 7361 7362 static inline int 7363 rack_send_ack_challange(struct tcp_rack *rack) 7364 { 7365 struct tcptemp *t_template; 7366 7367 t_template = tcpip_maketemplate(rack->rc_inp); 7368 if (t_template) { 7369 if (rack->forced_ack == 0) { 7370 rack->forced_ack = 1; 7371 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7372 } else { 7373 rack->probe_not_answered = 1; 7374 } 7375 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 7376 &t_template->tt_t, (struct mbuf *)NULL, 7377 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 7378 free(t_template, M_TEMP); 7379 /* This does send an ack so kill any D-ack timer */ 7380 if (rack->rc_tp->t_flags & TF_DELACK) 7381 rack->rc_tp->t_flags &= ~TF_DELACK; 7382 return(1); 7383 } else 7384 return (0); 7385 7386 } 7387 7388 /* 7389 * Persists timer, here we simply send the 7390 * same thing as a keepalive will. 7391 * the one byte send. 7392 * 7393 * We only return 1, saying don't proceed, if all timers 7394 * are stopped (destroyed PCB?). 7395 */ 7396 static int 7397 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7398 { 7399 int32_t retval = 1; 7400 7401 if (rack->rc_in_persist == 0) 7402 return (0); 7403 if (ctf_progress_timeout_check(tp, false)) { 7404 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7405 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7406 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7407 return (-ETIMEDOUT); /* tcp_drop() */ 7408 } 7409 /* 7410 * Persistence timer into zero window. Force a byte to be output, if 7411 * possible. 7412 */ 7413 KMOD_TCPSTAT_INC(tcps_persisttimeo); 7414 /* 7415 * Hack: if the peer is dead/unreachable, we do not time out if the 7416 * window is closed. After a full backoff, drop the connection if 7417 * the idle time (no responses to probes) reaches the maximum 7418 * backoff that we would use if retransmitting. 7419 */ 7420 if (tp->t_rxtshift >= V_tcp_retries && 7421 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 7422 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 7423 KMOD_TCPSTAT_INC(tcps_persistdrop); 7424 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7425 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7426 retval = -ETIMEDOUT; /* tcp_drop() */ 7427 goto out; 7428 } 7429 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 7430 tp->snd_una == tp->snd_max) 7431 rack_exit_persist(tp, rack, cts); 7432 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 7433 /* 7434 * If the user has closed the socket then drop a persisting 7435 * connection after a much reduced timeout. 7436 */ 7437 if (tp->t_state > TCPS_CLOSE_WAIT && 7438 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 7439 KMOD_TCPSTAT_INC(tcps_persistdrop); 7440 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7441 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7442 retval = -ETIMEDOUT; /* tcp_drop() */ 7443 goto out; 7444 } 7445 if (rack_send_ack_challange(rack)) { 7446 /* only set it if we were answered */ 7447 if (rack->probe_not_answered) { 7448 counter_u64_add(rack_persists_loss, 1); 7449 rack->r_ctl.persist_lost_ends++; 7450 } 7451 counter_u64_add(rack_persists_sends, 1); 7452 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 7453 } 7454 if (tp->t_rxtshift < V_tcp_retries) 7455 tp->t_rxtshift++; 7456 out: 7457 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 7458 rack_start_hpts_timer(rack, tp, cts, 7459 0, 0, 0); 7460 return (retval); 7461 } 7462 7463 /* 7464 * If a keepalive goes off, we had no other timers 7465 * happening. We always return 1 here since this 7466 * routine either drops the connection or sends 7467 * out a segment with respond. 7468 */ 7469 static int 7470 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7471 { 7472 struct inpcb *inp = tptoinpcb(tp); 7473 7474 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 7475 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 7476 /* 7477 * Keep-alive timer went off; send something or drop connection if 7478 * idle for too long. 7479 */ 7480 KMOD_TCPSTAT_INC(tcps_keeptimeo); 7481 if (tp->t_state < TCPS_ESTABLISHED) 7482 goto dropit; 7483 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7484 tp->t_state <= TCPS_CLOSING) { 7485 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 7486 goto dropit; 7487 /* 7488 * Send a packet designed to force a response if the peer is 7489 * up and reachable: either an ACK if the connection is 7490 * still alive, or an RST if the peer has closed the 7491 * connection due to timeout or reboot. Using sequence 7492 * number tp->snd_una-1 causes the transmitted zero-length 7493 * segment to lie outside the receive window; by the 7494 * protocol spec, this requires the correspondent TCP to 7495 * respond. 7496 */ 7497 KMOD_TCPSTAT_INC(tcps_keepprobe); 7498 rack_send_ack_challange(rack); 7499 } 7500 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7501 return (1); 7502 dropit: 7503 KMOD_TCPSTAT_INC(tcps_keepdrops); 7504 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7505 return (-ETIMEDOUT); /* tcp_drop() */ 7506 } 7507 7508 /* 7509 * Retransmit helper function, clear up all the ack 7510 * flags and take care of important book keeping. 7511 */ 7512 static void 7513 rack_remxt_tmr(struct tcpcb *tp) 7514 { 7515 /* 7516 * The retransmit timer went off, all sack'd blocks must be 7517 * un-acked. 7518 */ 7519 struct rack_sendmap *rsm, *trsm = NULL; 7520 struct tcp_rack *rack; 7521 7522 rack = (struct tcp_rack *)tp->t_fb_ptr; 7523 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 7524 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 7525 rack->r_timer_override = 1; 7526 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 7527 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 7528 rack->r_late = 0; 7529 rack->r_early = 0; 7530 rack->r_ctl.rc_agg_delayed = 0; 7531 rack->r_ctl.rc_agg_early = 0; 7532 if (rack->r_state && (rack->r_state != tp->t_state)) 7533 rack_set_state(tp, rack); 7534 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 7535 /* 7536 * We do not clear the scoreboard until we have had 7537 * more than rack_rxt_scoreboard_clear_thresh time-outs. 7538 */ 7539 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7540 if (rack->r_ctl.rc_resend != NULL) 7541 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7542 7543 return; 7544 } 7545 /* 7546 * Ideally we would like to be able to 7547 * mark SACK-PASS on anything not acked here. 7548 * 7549 * However, if we do that we would burst out 7550 * all that data 1ms apart. This would be unwise, 7551 * so for now we will just let the normal rxt timer 7552 * and tlp timer take care of it. 7553 * 7554 * Also we really need to stick them back in sequence 7555 * order. This way we send in the proper order and any 7556 * sacks that come floating in will "re-ack" the data. 7557 * To do this we zap the tmap with an INIT and then 7558 * walk through and place every rsm in the tail queue 7559 * hash table back in its seq ordered place. 7560 */ 7561 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7562 7563 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 7564 rsm->r_dupack = 0; 7565 if (rack_verbose_logging) 7566 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7567 /* We must re-add it back to the tlist */ 7568 if (trsm == NULL) { 7569 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7570 } else { 7571 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 7572 } 7573 rsm->r_in_tmap = 1; 7574 trsm = rsm; 7575 if (rsm->r_flags & RACK_ACKED) 7576 rsm->r_flags |= RACK_WAS_ACKED; 7577 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 7578 rsm->r_flags |= RACK_MUST_RXT; 7579 } 7580 /* zero the lost since it's all gone */ 7581 rack->r_ctl.rc_considered_lost = 0; 7582 /* Clear the count (we just un-acked them) */ 7583 rack->r_ctl.rc_sacked = 0; 7584 rack->r_ctl.rc_sacklast = NULL; 7585 /* Clear the tlp rtx mark */ 7586 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7587 if (rack->r_ctl.rc_resend != NULL) 7588 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7589 rack->r_ctl.rc_prr_sndcnt = 0; 7590 rack_log_to_prr(rack, 6, 0, __LINE__); 7591 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7592 if (rack->r_ctl.rc_resend != NULL) 7593 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7594 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && 7595 ((tp->t_flags & TF_SENTFIN) == 0)) { 7596 /* 7597 * For non-sack customers new data 7598 * needs to go out as retransmits until 7599 * we retransmit up to snd_max. 7600 */ 7601 rack->r_must_retran = 1; 7602 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 7603 rack->r_ctl.rc_sacked); 7604 } 7605 } 7606 7607 static void 7608 rack_convert_rtts(struct tcpcb *tp) 7609 { 7610 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 7611 tp->t_rxtcur = RACK_REXMTVAL(tp); 7612 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7613 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 7614 } 7615 if (tp->t_rxtcur > rack_rto_max) { 7616 tp->t_rxtcur = rack_rto_max; 7617 } 7618 } 7619 7620 static void 7621 rack_cc_conn_init(struct tcpcb *tp) 7622 { 7623 struct tcp_rack *rack; 7624 uint32_t srtt; 7625 7626 rack = (struct tcp_rack *)tp->t_fb_ptr; 7627 srtt = tp->t_srtt; 7628 cc_conn_init(tp); 7629 /* 7630 * Now convert to rack's internal format, 7631 * if required. 7632 */ 7633 if ((srtt == 0) && (tp->t_srtt != 0)) 7634 rack_convert_rtts(tp); 7635 /* 7636 * We want a chance to stay in slowstart as 7637 * we create a connection. TCP spec says that 7638 * initially ssthresh is infinite. For our 7639 * purposes that is the snd_wnd. 7640 */ 7641 if (tp->snd_ssthresh < tp->snd_wnd) { 7642 tp->snd_ssthresh = tp->snd_wnd; 7643 } 7644 /* 7645 * We also want to assure a IW worth of 7646 * data can get inflight. 7647 */ 7648 if (rc_init_window(rack) < tp->snd_cwnd) 7649 tp->snd_cwnd = rc_init_window(rack); 7650 } 7651 7652 /* 7653 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 7654 * we will setup to retransmit the lowest seq number outstanding. 7655 */ 7656 static int 7657 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7658 { 7659 struct inpcb *inp = tptoinpcb(tp); 7660 int32_t rexmt; 7661 int32_t retval = 0; 7662 bool isipv6; 7663 7664 if ((tp->t_flags & TF_GPUTINPROG) && 7665 (tp->t_rxtshift)) { 7666 /* 7667 * We have had a second timeout 7668 * measurements on successive rxt's are not profitable. 7669 * It is unlikely to be of any use (the network is 7670 * broken or the client went away). 7671 */ 7672 tp->t_flags &= ~TF_GPUTINPROG; 7673 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7674 rack->r_ctl.rc_gp_srtt /*flex1*/, 7675 tp->gput_seq, 7676 0, 0, 18, __LINE__, NULL, 0); 7677 } 7678 if (ctf_progress_timeout_check(tp, false)) { 7679 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7680 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7681 return (-ETIMEDOUT); /* tcp_drop() */ 7682 } 7683 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 7684 rack->r_ctl.retran_during_recovery = 0; 7685 rack->rc_ack_required = 1; 7686 rack->r_ctl.dsack_byte_cnt = 0; 7687 if (IN_RECOVERY(tp->t_flags) && 7688 (rack->rto_from_rec == 0)) { 7689 /* 7690 * Mark that we had a rto while in recovery 7691 * and save the ssthresh so if we go back 7692 * into recovery we will have a chance 7693 * to slowstart back to the level. 7694 */ 7695 rack->rto_from_rec = 1; 7696 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 7697 } 7698 if (IN_FASTRECOVERY(tp->t_flags)) 7699 tp->t_flags |= TF_WASFRECOVERY; 7700 else 7701 tp->t_flags &= ~TF_WASFRECOVERY; 7702 if (IN_CONGRECOVERY(tp->t_flags)) 7703 tp->t_flags |= TF_WASCRECOVERY; 7704 else 7705 tp->t_flags &= ~TF_WASCRECOVERY; 7706 if (TCPS_HAVEESTABLISHED(tp->t_state) && 7707 (tp->snd_una == tp->snd_max)) { 7708 /* Nothing outstanding .. nothing to do */ 7709 return (0); 7710 } 7711 if (rack->r_ctl.dsack_persist) { 7712 rack->r_ctl.dsack_persist--; 7713 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7714 rack->r_ctl.num_dsack = 0; 7715 } 7716 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7717 } 7718 /* 7719 * Rack can only run one timer at a time, so we cannot 7720 * run a KEEPINIT (gating SYN sending) and a retransmit 7721 * timer for the SYN. So if we are in a front state and 7722 * have a KEEPINIT timer we need to check the first transmit 7723 * against now to see if we have exceeded the KEEPINIT time 7724 * (if one is set). 7725 */ 7726 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 7727 (TP_KEEPINIT(tp) != 0)) { 7728 struct rack_sendmap *rsm; 7729 7730 rsm = tqhash_min(rack->r_ctl.tqh); 7731 if (rsm) { 7732 /* Ok we have something outstanding to test keepinit with */ 7733 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 7734 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 7735 /* We have exceeded the KEEPINIT time */ 7736 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7737 goto drop_it; 7738 } 7739 } 7740 } 7741 /* 7742 * Retransmission timer went off. Message has not been acked within 7743 * retransmit interval. Back off to a longer retransmit interval 7744 * and retransmit one segment. 7745 */ 7746 if ((rack->r_ctl.rc_resend == NULL) || 7747 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 7748 /* 7749 * If the rwnd collapsed on 7750 * the one we are retransmitting 7751 * it does not count against the 7752 * rxt count. 7753 */ 7754 tp->t_rxtshift++; 7755 } 7756 rack_remxt_tmr(tp); 7757 if (tp->t_rxtshift > V_tcp_retries) { 7758 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7759 drop_it: 7760 tp->t_rxtshift = V_tcp_retries; 7761 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 7762 /* XXXGL: previously t_softerror was casted to uint16_t */ 7763 MPASS(tp->t_softerror >= 0); 7764 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 7765 goto out; /* tcp_drop() */ 7766 } 7767 if (tp->t_state == TCPS_SYN_SENT) { 7768 /* 7769 * If the SYN was retransmitted, indicate CWND to be limited 7770 * to 1 segment in cc_conn_init(). 7771 */ 7772 tp->snd_cwnd = 1; 7773 } else if (tp->t_rxtshift == 1) { 7774 /* 7775 * first retransmit; record ssthresh and cwnd so they can be 7776 * recovered if this turns out to be a "bad" retransmit. A 7777 * retransmit is considered "bad" if an ACK for this segment 7778 * is received within RTT/2 interval; the assumption here is 7779 * that the ACK was already in flight. See "On Estimating 7780 * End-to-End Network Path Properties" by Allman and Paxson 7781 * for more details. 7782 */ 7783 tp->snd_cwnd_prev = tp->snd_cwnd; 7784 tp->snd_ssthresh_prev = tp->snd_ssthresh; 7785 tp->snd_recover_prev = tp->snd_recover; 7786 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 7787 tp->t_flags |= TF_PREVVALID; 7788 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 7789 tp->t_flags &= ~TF_PREVVALID; 7790 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 7791 if ((tp->t_state == TCPS_SYN_SENT) || 7792 (tp->t_state == TCPS_SYN_RECEIVED)) 7793 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 7794 else 7795 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 7796 7797 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 7798 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 7799 /* 7800 * We enter the path for PLMTUD if connection is established or, if 7801 * connection is FIN_WAIT_1 status, reason for the last is that if 7802 * amount of data we send is very small, we could send it in couple 7803 * of packets and process straight to FIN. In that case we won't 7804 * catch ESTABLISHED state. 7805 */ 7806 #ifdef INET6 7807 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 7808 #else 7809 isipv6 = false; 7810 #endif 7811 if (((V_tcp_pmtud_blackhole_detect == 1) || 7812 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 7813 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 7814 ((tp->t_state == TCPS_ESTABLISHED) || 7815 (tp->t_state == TCPS_FIN_WAIT_1))) { 7816 /* 7817 * Idea here is that at each stage of mtu probe (usually, 7818 * 1448 -> 1188 -> 524) should be given 2 chances to recover 7819 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 7820 * should take care of that. 7821 */ 7822 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 7823 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 7824 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 7825 tp->t_rxtshift % 2 == 0)) { 7826 /* 7827 * Enter Path MTU Black-hole Detection mechanism: - 7828 * Disable Path MTU Discovery (IP "DF" bit). - 7829 * Reduce MTU to lower value than what we negotiated 7830 * with peer. 7831 */ 7832 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7833 /* Record that we may have found a black hole. */ 7834 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7835 /* Keep track of previous MSS. */ 7836 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7837 } 7838 7839 /* 7840 * Reduce the MSS to blackhole value or to the 7841 * default in an attempt to retransmit. 7842 */ 7843 #ifdef INET6 7844 if (isipv6 && 7845 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7846 /* Use the sysctl tuneable blackhole MSS. */ 7847 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7848 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7849 } else if (isipv6) { 7850 /* Use the default MSS. */ 7851 tp->t_maxseg = V_tcp_v6mssdflt; 7852 /* 7853 * Disable Path MTU Discovery when we switch 7854 * to minmss. 7855 */ 7856 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7857 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7858 } 7859 #endif 7860 #if defined(INET6) && defined(INET) 7861 else 7862 #endif 7863 #ifdef INET 7864 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7865 /* Use the sysctl tuneable blackhole MSS. */ 7866 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7867 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7868 } else { 7869 /* Use the default MSS. */ 7870 tp->t_maxseg = V_tcp_mssdflt; 7871 /* 7872 * Disable Path MTU Discovery when we switch 7873 * to minmss. 7874 */ 7875 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7876 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7877 } 7878 #endif 7879 } else { 7880 /* 7881 * If further retransmissions are still unsuccessful 7882 * with a lowered MTU, maybe this isn't a blackhole 7883 * and we restore the previous MSS and blackhole 7884 * detection flags. The limit '6' is determined by 7885 * giving each probe stage (1448, 1188, 524) 2 7886 * chances to recover. 7887 */ 7888 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7889 (tp->t_rxtshift >= 6)) { 7890 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7891 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7892 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7893 if (tp->t_maxseg < V_tcp_mssdflt) { 7894 /* 7895 * The MSS is so small we should not 7896 * process incoming SACK's since we are 7897 * subject to attack in such a case. 7898 */ 7899 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 7900 } else { 7901 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 7902 } 7903 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7904 } 7905 } 7906 } 7907 /* 7908 * Disable RFC1323 and SACK if we haven't got any response to 7909 * our third SYN to work-around some broken terminal servers 7910 * (most of which have hopefully been retired) that have bad VJ 7911 * header compression code which trashes TCP segments containing 7912 * unknown-to-them TCP options. 7913 */ 7914 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7915 (tp->t_rxtshift == 3)) 7916 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7917 /* 7918 * If we backed off this far, our srtt estimate is probably bogus. 7919 * Clobber it so we'll take the next rtt measurement as our srtt; 7920 * move the current srtt into rttvar to keep the current retransmit 7921 * times until then. 7922 */ 7923 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7924 #ifdef INET6 7925 if ((inp->inp_vflag & INP_IPV6) != 0) 7926 in6_losing(inp); 7927 else 7928 #endif 7929 in_losing(inp); 7930 tp->t_rttvar += tp->t_srtt; 7931 tp->t_srtt = 0; 7932 } 7933 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7934 tp->snd_recover = tp->snd_max; 7935 tp->t_flags |= TF_ACKNOW; 7936 tp->t_rtttime = 0; 7937 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 7938 out: 7939 return (retval); 7940 } 7941 7942 static int 7943 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7944 { 7945 int32_t ret = 0; 7946 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7947 7948 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7949 (tp->t_flags & TF_GPUTINPROG)) { 7950 /* 7951 * We have a goodput in progress 7952 * and we have entered a late state. 7953 * Do we have enough data in the sb 7954 * to handle the GPUT request? 7955 */ 7956 uint32_t bytes; 7957 7958 bytes = tp->gput_ack - tp->gput_seq; 7959 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7960 bytes += tp->gput_seq - tp->snd_una; 7961 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 7962 /* 7963 * There are not enough bytes in the socket 7964 * buffer that have been sent to cover this 7965 * measurement. Cancel it. 7966 */ 7967 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7968 rack->r_ctl.rc_gp_srtt /*flex1*/, 7969 tp->gput_seq, 7970 0, 0, 18, __LINE__, NULL, 0); 7971 tp->t_flags &= ~TF_GPUTINPROG; 7972 } 7973 } 7974 if (timers == 0) { 7975 return (0); 7976 } 7977 if (tp->t_state == TCPS_LISTEN) { 7978 /* no timers on listen sockets */ 7979 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7980 return (0); 7981 return (1); 7982 } 7983 if ((timers & PACE_TMR_RACK) && 7984 rack->rc_on_min_to) { 7985 /* 7986 * For the rack timer when we 7987 * are on a min-timeout (which means rrr_conf = 3) 7988 * we don't want to check the timer. It may 7989 * be going off for a pace and thats ok we 7990 * want to send the retransmit (if its ready). 7991 * 7992 * If its on a normal rack timer (non-min) then 7993 * we will check if its expired. 7994 */ 7995 goto skip_time_check; 7996 } 7997 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7998 uint32_t left; 7999 8000 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8001 ret = -1; 8002 rack_log_to_processing(rack, cts, ret, 0); 8003 return (0); 8004 } 8005 if (hpts_calling == 0) { 8006 /* 8007 * A user send or queued mbuf (sack) has called us? We 8008 * return 0 and let the pacing guards 8009 * deal with it if they should or 8010 * should not cause a send. 8011 */ 8012 ret = -2; 8013 rack_log_to_processing(rack, cts, ret, 0); 8014 return (0); 8015 } 8016 /* 8017 * Ok our timer went off early and we are not paced false 8018 * alarm, go back to sleep. We make sure we don't have 8019 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8020 * flag in place. 8021 */ 8022 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8023 ret = -3; 8024 left = rack->r_ctl.rc_timer_exp - cts; 8025 tcp_hpts_insert(tp, left, NULL); 8026 rack_log_to_processing(rack, cts, ret, left); 8027 return (1); 8028 } 8029 skip_time_check: 8030 rack->rc_tmr_stopped = 0; 8031 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8032 if (timers & PACE_TMR_DELACK) { 8033 ret = rack_timeout_delack(tp, rack, cts); 8034 } else if (timers & PACE_TMR_RACK) { 8035 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8036 rack->r_fast_output = 0; 8037 ret = rack_timeout_rack(tp, rack, cts); 8038 } else if (timers & PACE_TMR_TLP) { 8039 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8040 rack->r_fast_output = 0; 8041 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8042 } else if (timers & PACE_TMR_RXT) { 8043 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8044 rack->r_fast_output = 0; 8045 ret = rack_timeout_rxt(tp, rack, cts); 8046 } else if (timers & PACE_TMR_PERSIT) { 8047 ret = rack_timeout_persist(tp, rack, cts); 8048 } else if (timers & PACE_TMR_KEEP) { 8049 ret = rack_timeout_keepalive(tp, rack, cts); 8050 } 8051 rack_log_to_processing(rack, cts, ret, timers); 8052 return (ret); 8053 } 8054 8055 static void 8056 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8057 { 8058 struct timeval tv; 8059 uint32_t us_cts, flags_on_entry; 8060 uint8_t hpts_removed = 0; 8061 8062 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8063 us_cts = tcp_get_usecs(&tv); 8064 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8065 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8066 ((tp->snd_max - tp->snd_una) == 0))) { 8067 tcp_hpts_remove(rack->rc_tp); 8068 hpts_removed = 1; 8069 /* If we were not delayed cancel out the flag. */ 8070 if ((tp->snd_max - tp->snd_una) == 0) 8071 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8072 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8073 } 8074 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8075 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8076 if (tcp_in_hpts(rack->rc_tp) && 8077 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8078 /* 8079 * Canceling timer's when we have no output being 8080 * paced. We also must remove ourselves from the 8081 * hpts. 8082 */ 8083 tcp_hpts_remove(rack->rc_tp); 8084 hpts_removed = 1; 8085 } 8086 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8087 } 8088 if (hpts_removed == 0) 8089 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8090 } 8091 8092 static int 8093 rack_stopall(struct tcpcb *tp) 8094 { 8095 struct tcp_rack *rack; 8096 8097 rack = (struct tcp_rack *)tp->t_fb_ptr; 8098 rack->t_timers_stopped = 1; 8099 8100 tcp_hpts_remove(tp); 8101 8102 return (0); 8103 } 8104 8105 static void 8106 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8107 { 8108 /* 8109 * Assure no timers are running. 8110 */ 8111 if (tcp_timer_active(tp, TT_PERSIST)) { 8112 /* We enter in persists, set the flag appropriately */ 8113 rack->rc_in_persist = 1; 8114 } 8115 if (tcp_in_hpts(rack->rc_tp)) { 8116 tcp_hpts_remove(rack->rc_tp); 8117 } 8118 } 8119 8120 static void 8121 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8122 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8123 { 8124 int32_t idx; 8125 8126 rsm->r_rtr_cnt++; 8127 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8128 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8129 rsm->r_flags |= RACK_OVERMAX; 8130 } 8131 rsm->r_act_rxt_cnt++; 8132 /* Peg the count/index */ 8133 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8134 rsm->r_dupack = 0; 8135 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8136 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8137 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8138 } 8139 if (rsm->r_flags & RACK_WAS_LOST) { 8140 /* 8141 * We retransmitted it putting it back in flight 8142 * remove the lost desgination and reduce the 8143 * bytes considered lost. 8144 */ 8145 rack_mark_nolonger_lost(rack, rsm); 8146 } 8147 idx = rsm->r_rtr_cnt - 1; 8148 rsm->r_tim_lastsent[idx] = ts; 8149 /* 8150 * Here we don't add in the len of send, since its already 8151 * in snduna <->snd_max. 8152 */ 8153 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8154 rack->r_ctl.rc_sacked); 8155 if (rsm->r_flags & RACK_ACKED) { 8156 /* Problably MTU discovery messing with us */ 8157 rsm->r_flags &= ~RACK_ACKED; 8158 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8159 } 8160 if (rsm->r_in_tmap) { 8161 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8162 rsm->r_in_tmap = 0; 8163 } 8164 /* Lets make sure it really is in or not the GP window */ 8165 rack_mark_in_gp_win(tp, rsm); 8166 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8167 rsm->r_in_tmap = 1; 8168 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8169 /* Take off the must retransmit flag, if its on */ 8170 if (rsm->r_flags & RACK_MUST_RXT) { 8171 if (rack->r_must_retran) 8172 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8173 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8174 /* 8175 * We have retransmitted all we need. Clear 8176 * any must retransmit flags. 8177 */ 8178 rack->r_must_retran = 0; 8179 rack->r_ctl.rc_out_at_rto = 0; 8180 } 8181 rsm->r_flags &= ~RACK_MUST_RXT; 8182 } 8183 /* Remove any collapsed flag */ 8184 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8185 if (rsm->r_flags & RACK_SACK_PASSED) { 8186 /* We have retransmitted due to the SACK pass */ 8187 rsm->r_flags &= ~RACK_SACK_PASSED; 8188 rsm->r_flags |= RACK_WAS_SACKPASS; 8189 } 8190 } 8191 8192 static uint32_t 8193 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8194 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8195 { 8196 /* 8197 * We (re-)transmitted starting at rsm->r_start for some length 8198 * (possibly less than r_end. 8199 */ 8200 struct rack_sendmap *nrsm; 8201 int insret __diagused; 8202 uint32_t c_end; 8203 int32_t len; 8204 8205 len = *lenp; 8206 c_end = rsm->r_start + len; 8207 if (SEQ_GEQ(c_end, rsm->r_end)) { 8208 /* 8209 * We retransmitted the whole piece or more than the whole 8210 * slopping into the next rsm. 8211 */ 8212 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8213 if (c_end == rsm->r_end) { 8214 *lenp = 0; 8215 return (0); 8216 } else { 8217 int32_t act_len; 8218 8219 /* Hangs over the end return whats left */ 8220 act_len = rsm->r_end - rsm->r_start; 8221 *lenp = (len - act_len); 8222 return (rsm->r_end); 8223 } 8224 /* We don't get out of this block. */ 8225 } 8226 /* 8227 * Here we retransmitted less than the whole thing which means we 8228 * have to split this into what was transmitted and what was not. 8229 */ 8230 nrsm = rack_alloc_full_limit(rack); 8231 if (nrsm == NULL) { 8232 /* 8233 * We can't get memory, so lets not proceed. 8234 */ 8235 *lenp = 0; 8236 return (0); 8237 } 8238 /* 8239 * So here we are going to take the original rsm and make it what we 8240 * retransmitted. nrsm will be the tail portion we did not 8241 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8242 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8243 * 1, 6 and the new piece will be 6, 11. 8244 */ 8245 rack_clone_rsm(rack, nrsm, rsm, c_end); 8246 nrsm->r_dupack = 0; 8247 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8248 #ifndef INVARIANTS 8249 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8250 #else 8251 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8252 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8253 nrsm, insret, rack, rsm); 8254 } 8255 #endif 8256 if (rsm->r_in_tmap) { 8257 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8258 nrsm->r_in_tmap = 1; 8259 } 8260 rsm->r_flags &= (~RACK_HAS_FIN); 8261 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8262 /* Log a split of rsm into rsm and nrsm */ 8263 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8264 *lenp = 0; 8265 return (0); 8266 } 8267 8268 static void 8269 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8270 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8271 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 8272 uint32_t s_moff, int hw_tls, int segsiz) 8273 { 8274 struct tcp_rack *rack; 8275 struct rack_sendmap *rsm, *nrsm; 8276 int insret __diagused; 8277 8278 register uint32_t snd_max, snd_una; 8279 8280 /* 8281 * Add to the RACK log of packets in flight or retransmitted. If 8282 * there is a TS option we will use the TS echoed, if not we will 8283 * grab a TS. 8284 * 8285 * Retransmissions will increment the count and move the ts to its 8286 * proper place. Note that if options do not include TS's then we 8287 * won't be able to effectively use the ACK for an RTT on a retran. 8288 * 8289 * Notes about r_start and r_end. Lets consider a send starting at 8290 * sequence 1 for 10 bytes. In such an example the r_start would be 8291 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8292 * This means that r_end is actually the first sequence for the next 8293 * slot (11). 8294 * 8295 */ 8296 /* 8297 * If err is set what do we do XXXrrs? should we not add the thing? 8298 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8299 * i.e. proceed with add ** do this for now. 8300 */ 8301 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8302 if (err) 8303 /* 8304 * We don't log errors -- we could but snd_max does not 8305 * advance in this case either. 8306 */ 8307 return; 8308 8309 if (th_flags & TH_RST) { 8310 /* 8311 * We don't log resets and we return immediately from 8312 * sending 8313 */ 8314 return; 8315 } 8316 rack = (struct tcp_rack *)tp->t_fb_ptr; 8317 snd_una = tp->snd_una; 8318 snd_max = tp->snd_max; 8319 if (th_flags & (TH_SYN | TH_FIN)) { 8320 /* 8321 * The call to rack_log_output is made before bumping 8322 * snd_max. This means we can record one extra byte on a SYN 8323 * or FIN if seq_out is adding more on and a FIN is present 8324 * (and we are not resending). 8325 */ 8326 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 8327 len++; 8328 if (th_flags & TH_FIN) 8329 len++; 8330 } 8331 if (SEQ_LEQ((seq_out + len), snd_una)) { 8332 /* Are sending an old segment to induce an ack (keep-alive)? */ 8333 return; 8334 } 8335 if (SEQ_LT(seq_out, snd_una)) { 8336 /* huh? should we panic? */ 8337 uint32_t end; 8338 8339 end = seq_out + len; 8340 seq_out = snd_una; 8341 if (SEQ_GEQ(end, seq_out)) 8342 len = end - seq_out; 8343 else 8344 len = 0; 8345 } 8346 if (len == 0) { 8347 /* We don't log zero window probes */ 8348 return; 8349 } 8350 if (IN_FASTRECOVERY(tp->t_flags)) { 8351 rack->r_ctl.rc_prr_out += len; 8352 } 8353 /* First question is it a retransmission or new? */ 8354 if (seq_out == snd_max) { 8355 /* Its new */ 8356 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 8357 again: 8358 rsm = rack_alloc(rack); 8359 if (rsm == NULL) { 8360 /* 8361 * Hmm out of memory and the tcb got destroyed while 8362 * we tried to wait. 8363 */ 8364 return; 8365 } 8366 if (th_flags & TH_FIN) { 8367 rsm->r_flags = RACK_HAS_FIN|add_flag; 8368 } else { 8369 rsm->r_flags = add_flag; 8370 } 8371 if (hw_tls) 8372 rsm->r_hw_tls = 1; 8373 rsm->r_tim_lastsent[0] = cts; 8374 rsm->r_rtr_cnt = 1; 8375 rsm->r_act_rxt_cnt = 0; 8376 rsm->r_rtr_bytes = 0; 8377 if (th_flags & TH_SYN) { 8378 /* The data space is one beyond snd_una */ 8379 rsm->r_flags |= RACK_HAS_SYN; 8380 } 8381 rsm->r_start = seq_out; 8382 rsm->r_end = rsm->r_start + len; 8383 rack_mark_in_gp_win(tp, rsm); 8384 rsm->r_dupack = 0; 8385 /* 8386 * save off the mbuf location that 8387 * sndmbuf_noadv returned (which is 8388 * where we started copying from).. 8389 */ 8390 rsm->m = s_mb; 8391 rsm->soff = s_moff; 8392 /* 8393 * Here we do add in the len of send, since its not yet 8394 * reflected in in snduna <->snd_max 8395 */ 8396 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 8397 rack->r_ctl.rc_sacked) + 8398 (rsm->r_end - rsm->r_start)); 8399 if ((rack->rc_initial_ss_comp == 0) && 8400 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 8401 rack->r_ctl.ss_hi_fs = rsm->r_fas; 8402 } 8403 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 8404 if (rsm->m) { 8405 if (rsm->m->m_len <= rsm->soff) { 8406 /* 8407 * XXXrrs Question, will this happen? 8408 * 8409 * If sbsndptr is set at the correct place 8410 * then s_moff should always be somewhere 8411 * within rsm->m. But if the sbsndptr was 8412 * off then that won't be true. If it occurs 8413 * we need to walkout to the correct location. 8414 */ 8415 struct mbuf *lm; 8416 8417 lm = rsm->m; 8418 while (lm->m_len <= rsm->soff) { 8419 rsm->soff -= lm->m_len; 8420 lm = lm->m_next; 8421 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 8422 __func__, rack, s_moff, s_mb, rsm->soff)); 8423 } 8424 rsm->m = lm; 8425 } 8426 rsm->orig_m_len = rsm->m->m_len; 8427 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 8428 } else { 8429 rsm->orig_m_len = 0; 8430 rsm->orig_t_space = 0; 8431 } 8432 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 8433 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8434 /* Log a new rsm */ 8435 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 8436 #ifndef INVARIANTS 8437 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 8438 #else 8439 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 8440 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8441 nrsm, insret, rack, rsm); 8442 } 8443 #endif 8444 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8445 rsm->r_in_tmap = 1; 8446 if (rsm->r_flags & RACK_IS_PCM) { 8447 rack->r_ctl.pcm_i.send_time = cts; 8448 rack->r_ctl.pcm_i.eseq = rsm->r_end; 8449 /* First time through we set the start too */ 8450 if (rack->pcm_in_progress == 0) 8451 rack->r_ctl.pcm_i.sseq = rsm->r_start; 8452 } 8453 /* 8454 * Special case detection, is there just a single 8455 * packet outstanding when we are not in recovery? 8456 * 8457 * If this is true mark it so. 8458 */ 8459 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 8460 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 8461 struct rack_sendmap *prsm; 8462 8463 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 8464 if (prsm) 8465 prsm->r_one_out_nr = 1; 8466 } 8467 return; 8468 } 8469 /* 8470 * If we reach here its a retransmission and we need to find it. 8471 */ 8472 more: 8473 if (hintrsm && (hintrsm->r_start == seq_out)) { 8474 rsm = hintrsm; 8475 hintrsm = NULL; 8476 } else { 8477 /* No hints sorry */ 8478 rsm = NULL; 8479 } 8480 if ((rsm) && (rsm->r_start == seq_out)) { 8481 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8482 if (len == 0) { 8483 return; 8484 } else { 8485 goto more; 8486 } 8487 } 8488 /* Ok it was not the last pointer go through it the hard way. */ 8489 refind: 8490 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 8491 if (rsm) { 8492 if (rsm->r_start == seq_out) { 8493 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8494 if (len == 0) { 8495 return; 8496 } else { 8497 goto refind; 8498 } 8499 } 8500 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 8501 /* Transmitted within this piece */ 8502 /* 8503 * Ok we must split off the front and then let the 8504 * update do the rest 8505 */ 8506 nrsm = rack_alloc_full_limit(rack); 8507 if (nrsm == NULL) { 8508 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 8509 return; 8510 } 8511 /* 8512 * copy rsm to nrsm and then trim the front of rsm 8513 * to not include this part. 8514 */ 8515 rack_clone_rsm(rack, nrsm, rsm, seq_out); 8516 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8517 #ifndef INVARIANTS 8518 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8519 #else 8520 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8521 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8522 nrsm, insret, rack, rsm); 8523 } 8524 #endif 8525 if (rsm->r_in_tmap) { 8526 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8527 nrsm->r_in_tmap = 1; 8528 } 8529 rsm->r_flags &= (~RACK_HAS_FIN); 8530 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 8531 if (len == 0) { 8532 return; 8533 } else if (len > 0) 8534 goto refind; 8535 } 8536 } 8537 /* 8538 * Hmm not found in map did they retransmit both old and on into the 8539 * new? 8540 */ 8541 if (seq_out == tp->snd_max) { 8542 goto again; 8543 } else if (SEQ_LT(seq_out, tp->snd_max)) { 8544 #ifdef INVARIANTS 8545 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 8546 seq_out, len, tp->snd_una, tp->snd_max); 8547 printf("Starting Dump of all rack entries\n"); 8548 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8549 printf("rsm:%p start:%u end:%u\n", 8550 rsm, rsm->r_start, rsm->r_end); 8551 } 8552 printf("Dump complete\n"); 8553 panic("seq_out not found rack:%p tp:%p", 8554 rack, tp); 8555 #endif 8556 } else { 8557 #ifdef INVARIANTS 8558 /* 8559 * Hmm beyond sndmax? (only if we are using the new rtt-pack 8560 * flag) 8561 */ 8562 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 8563 seq_out, len, tp->snd_max, tp); 8564 #endif 8565 } 8566 } 8567 8568 /* 8569 * Record one of the RTT updates from an ack into 8570 * our sample structure. 8571 */ 8572 8573 static void 8574 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 8575 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 8576 { 8577 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8578 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 8579 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 8580 } 8581 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8582 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 8583 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 8584 } 8585 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 8586 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 8587 rack->r_ctl.rc_gp_lowrtt = us_rtt; 8588 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 8589 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 8590 } 8591 if ((confidence == 1) && 8592 ((rsm == NULL) || 8593 (rsm->r_just_ret) || 8594 (rsm->r_one_out_nr && 8595 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 8596 /* 8597 * If the rsm had a just return 8598 * hit it then we can't trust the 8599 * rtt measurement for buffer deterimination 8600 * Note that a confidence of 2, indicates 8601 * SACK'd which overrides the r_just_ret or 8602 * the r_one_out_nr. If it was a CUM-ACK and 8603 * we had only two outstanding, but get an 8604 * ack for only 1. Then that also lowers our 8605 * confidence. 8606 */ 8607 confidence = 0; 8608 } 8609 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8610 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 8611 if (rack->r_ctl.rack_rs.confidence == 0) { 8612 /* 8613 * We take anything with no current confidence 8614 * saved. 8615 */ 8616 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8617 rack->r_ctl.rack_rs.confidence = confidence; 8618 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8619 } else if (confidence != 0) { 8620 /* 8621 * Once we have a confident number, 8622 * we can update it with a smaller 8623 * value since this confident number 8624 * may include the DSACK time until 8625 * the next segment (the second one) arrived. 8626 */ 8627 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8628 rack->r_ctl.rack_rs.confidence = confidence; 8629 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8630 } 8631 } 8632 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 8633 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 8634 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 8635 rack->r_ctl.rack_rs.rs_rtt_cnt++; 8636 } 8637 8638 /* 8639 * Collect new round-trip time estimate 8640 * and update averages and current timeout. 8641 */ 8642 static void 8643 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 8644 { 8645 int32_t delta; 8646 int32_t rtt; 8647 8648 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 8649 /* No valid sample */ 8650 return; 8651 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 8652 /* We are to use the lowest RTT seen in a single ack */ 8653 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 8654 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 8655 /* We are to use the highest RTT seen in a single ack */ 8656 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 8657 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 8658 /* We are to use the average RTT seen in a single ack */ 8659 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 8660 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 8661 } else { 8662 #ifdef INVARIANTS 8663 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 8664 #endif 8665 return; 8666 } 8667 if (rtt == 0) 8668 rtt = 1; 8669 if (rack->rc_gp_rtt_set == 0) { 8670 /* 8671 * With no RTT we have to accept 8672 * even one we are not confident of. 8673 */ 8674 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 8675 rack->rc_gp_rtt_set = 1; 8676 } else if (rack->r_ctl.rack_rs.confidence) { 8677 /* update the running gp srtt */ 8678 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 8679 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 8680 } 8681 if (rack->r_ctl.rack_rs.confidence) { 8682 /* 8683 * record the low and high for highly buffered path computation, 8684 * we only do this if we are confident (not a retransmission). 8685 */ 8686 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 8687 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8688 } 8689 if (rack->rc_highly_buffered == 0) { 8690 /* 8691 * Currently once we declare a path has 8692 * highly buffered there is no going 8693 * back, which may be a problem... 8694 */ 8695 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 8696 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 8697 rack->r_ctl.rc_highest_us_rtt, 8698 rack->r_ctl.rc_lowest_us_rtt, 8699 RACK_RTTS_SEEHBP); 8700 rack->rc_highly_buffered = 1; 8701 } 8702 } 8703 } 8704 if ((rack->r_ctl.rack_rs.confidence) || 8705 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 8706 /* 8707 * If we are highly confident of it <or> it was 8708 * never retransmitted we accept it as the last us_rtt. 8709 */ 8710 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8711 /* The lowest rtt can be set if its was not retransmited */ 8712 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 8713 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8714 if (rack->r_ctl.rc_lowest_us_rtt == 0) 8715 rack->r_ctl.rc_lowest_us_rtt = 1; 8716 } 8717 } 8718 rack = (struct tcp_rack *)tp->t_fb_ptr; 8719 if (tp->t_srtt != 0) { 8720 /* 8721 * We keep a simple srtt in microseconds, like our rtt 8722 * measurement. We don't need to do any tricks with shifting 8723 * etc. Instead we just add in 1/8th of the new measurement 8724 * and subtract out 1/8 of the old srtt. We do the same with 8725 * the variance after finding the absolute value of the 8726 * difference between this sample and the current srtt. 8727 */ 8728 delta = tp->t_srtt - rtt; 8729 /* Take off 1/8th of the current sRTT */ 8730 tp->t_srtt -= (tp->t_srtt >> 3); 8731 /* Add in 1/8th of the new RTT just measured */ 8732 tp->t_srtt += (rtt >> 3); 8733 if (tp->t_srtt <= 0) 8734 tp->t_srtt = 1; 8735 /* Now lets make the absolute value of the variance */ 8736 if (delta < 0) 8737 delta = -delta; 8738 /* Subtract out 1/8th */ 8739 tp->t_rttvar -= (tp->t_rttvar >> 3); 8740 /* Add in 1/8th of the new variance we just saw */ 8741 tp->t_rttvar += (delta >> 3); 8742 if (tp->t_rttvar <= 0) 8743 tp->t_rttvar = 1; 8744 } else { 8745 /* 8746 * No rtt measurement yet - use the unsmoothed rtt. Set the 8747 * variance to half the rtt (so our first retransmit happens 8748 * at 3*rtt). 8749 */ 8750 tp->t_srtt = rtt; 8751 tp->t_rttvar = rtt >> 1; 8752 } 8753 rack->rc_srtt_measure_made = 1; 8754 KMOD_TCPSTAT_INC(tcps_rttupdated); 8755 if (tp->t_rttupdated < UCHAR_MAX) 8756 tp->t_rttupdated++; 8757 #ifdef STATS 8758 if (rack_stats_gets_ms_rtt == 0) { 8759 /* Send in the microsecond rtt used for rxt timeout purposes */ 8760 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 8761 } else if (rack_stats_gets_ms_rtt == 1) { 8762 /* Send in the millisecond rtt used for rxt timeout purposes */ 8763 int32_t ms_rtt; 8764 8765 /* Round up */ 8766 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8767 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8768 } else if (rack_stats_gets_ms_rtt == 2) { 8769 /* Send in the millisecond rtt has close to the path RTT as we can get */ 8770 int32_t ms_rtt; 8771 8772 /* Round up */ 8773 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8774 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8775 } else { 8776 /* Send in the microsecond rtt has close to the path RTT as we can get */ 8777 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8778 } 8779 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8780 #endif 8781 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time); 8782 /* 8783 * the retransmit should happen at rtt + 4 * rttvar. Because of the 8784 * way we do the smoothing, srtt and rttvar will each average +1/2 8785 * tick of bias. When we compute the retransmit timer, we want 1/2 8786 * tick of rounding and 1 extra tick because of +-1/2 tick 8787 * uncertainty in the firing of the timer. The bias will give us 8788 * exactly the 1.5 tick we need. But, because the bias is 8789 * statistical, we have to test that we don't drop below the minimum 8790 * feasible timer (which is 2 ticks). 8791 */ 8792 tp->t_rxtshift = 0; 8793 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8794 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 8795 rack_log_rtt_sample(rack, rtt); 8796 tp->t_softerror = 0; 8797 } 8798 8799 8800 static void 8801 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 8802 { 8803 /* 8804 * Apply to filter the inbound us-rtt at us_cts. 8805 */ 8806 uint32_t old_rtt; 8807 8808 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 8809 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 8810 us_rtt, us_cts); 8811 if (old_rtt > us_rtt) { 8812 /* We just hit a new lower rtt time */ 8813 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 8814 __LINE__, RACK_RTTS_NEWRTT); 8815 /* 8816 * Only count it if its lower than what we saw within our 8817 * calculated range. 8818 */ 8819 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 8820 if (rack_probertt_lower_within && 8821 rack->rc_gp_dyn_mul && 8822 (rack->use_fixed_rate == 0) && 8823 (rack->rc_always_pace)) { 8824 /* 8825 * We are seeing a new lower rtt very close 8826 * to the time that we would have entered probe-rtt. 8827 * This is probably due to the fact that a peer flow 8828 * has entered probe-rtt. Lets go in now too. 8829 */ 8830 uint32_t val; 8831 8832 val = rack_probertt_lower_within * rack_time_between_probertt; 8833 val /= 100; 8834 if ((rack->in_probe_rtt == 0) && 8835 (rack->rc_skip_timely == 0) && 8836 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 8837 rack_enter_probertt(rack, us_cts); 8838 } 8839 } 8840 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 8841 } 8842 } 8843 } 8844 8845 static int 8846 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 8847 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 8848 { 8849 uint32_t us_rtt; 8850 int32_t i, all; 8851 uint32_t t, len_acked; 8852 8853 if ((rsm->r_flags & RACK_ACKED) || 8854 (rsm->r_flags & RACK_WAS_ACKED)) 8855 /* Already done */ 8856 return (0); 8857 if (rsm->r_no_rtt_allowed) { 8858 /* Not allowed */ 8859 return (0); 8860 } 8861 if (ack_type == CUM_ACKED) { 8862 if (SEQ_GT(th_ack, rsm->r_end)) { 8863 len_acked = rsm->r_end - rsm->r_start; 8864 all = 1; 8865 } else { 8866 len_acked = th_ack - rsm->r_start; 8867 all = 0; 8868 } 8869 } else { 8870 len_acked = rsm->r_end - rsm->r_start; 8871 all = 0; 8872 } 8873 if (rsm->r_rtr_cnt == 1) { 8874 8875 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8876 if ((int)t <= 0) 8877 t = 1; 8878 if (!tp->t_rttlow || tp->t_rttlow > t) 8879 tp->t_rttlow = t; 8880 if (!rack->r_ctl.rc_rack_min_rtt || 8881 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8882 rack->r_ctl.rc_rack_min_rtt = t; 8883 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8884 rack->r_ctl.rc_rack_min_rtt = 1; 8885 } 8886 } 8887 if (TSTMP_GT(tcp_tv_to_usec(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8888 us_rtt = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8889 else 8890 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8891 if (us_rtt == 0) 8892 us_rtt = 1; 8893 if (CC_ALGO(tp)->rttsample != NULL) { 8894 /* Kick the RTT to the CC */ 8895 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8896 } 8897 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usec(&rack->r_ctl.act_rcv_time)); 8898 if (ack_type == SACKED) { 8899 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8900 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8901 } else { 8902 /* 8903 * We need to setup what our confidence 8904 * is in this ack. 8905 * 8906 * If the rsm was app limited and it is 8907 * less than a mss in length (the end 8908 * of the send) then we have a gap. If we 8909 * were app limited but say we were sending 8910 * multiple MSS's then we are more confident 8911 * int it. 8912 * 8913 * When we are not app-limited then we see if 8914 * the rsm is being included in the current 8915 * measurement, we tell this by the app_limited_needs_set 8916 * flag. 8917 * 8918 * Note that being cwnd blocked is not applimited 8919 * as well as the pacing delay between packets which 8920 * are sending only 1 or 2 MSS's also will show up 8921 * in the RTT. We probably need to examine this algorithm 8922 * a bit more and enhance it to account for the delay 8923 * between rsm's. We could do that by saving off the 8924 * pacing delay of each rsm (in an rsm) and then 8925 * factoring that in somehow though for now I am 8926 * not sure how :) 8927 */ 8928 int calc_conf = 0; 8929 8930 if (rsm->r_flags & RACK_APP_LIMITED) { 8931 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8932 calc_conf = 0; 8933 else 8934 calc_conf = 1; 8935 } else if (rack->app_limited_needs_set == 0) { 8936 calc_conf = 1; 8937 } else { 8938 calc_conf = 0; 8939 } 8940 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8941 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8942 calc_conf, rsm, rsm->r_rtr_cnt); 8943 } 8944 if ((rsm->r_flags & RACK_TLP) && 8945 (!IN_FASTRECOVERY(tp->t_flags))) { 8946 /* Segment was a TLP and our retrans matched */ 8947 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8948 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 8949 } 8950 } 8951 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 8952 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8953 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 8954 /* New more recent rack_tmit_time */ 8955 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8956 if (rack->r_ctl.rc_rack_tmit_time == 0) 8957 rack->r_ctl.rc_rack_tmit_time = 1; 8958 rack->rc_rack_rtt = t; 8959 } 8960 return (1); 8961 } 8962 /* 8963 * We clear the soft/rxtshift since we got an ack. 8964 * There is no assurance we will call the commit() function 8965 * so we need to clear these to avoid incorrect handling. 8966 */ 8967 tp->t_rxtshift = 0; 8968 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8969 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8970 tp->t_softerror = 0; 8971 if (to && (to->to_flags & TOF_TS) && 8972 (ack_type == CUM_ACKED) && 8973 (to->to_tsecr) && 8974 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8975 /* 8976 * Now which timestamp does it match? In this block the ACK 8977 * must be coming from a previous transmission. 8978 */ 8979 for (i = 0; i < rsm->r_rtr_cnt; i++) { 8980 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 8981 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8982 if ((int)t <= 0) 8983 t = 1; 8984 if (CC_ALGO(tp)->rttsample != NULL) { 8985 /* 8986 * Kick the RTT to the CC, here 8987 * we lie a bit in that we know the 8988 * retransmission is correct even though 8989 * we retransmitted. This is because 8990 * we match the timestamps. 8991 */ 8992 if (TSTMP_GT(tcp_tv_to_usec(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 8993 us_rtt = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 8994 else 8995 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 8996 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8997 } 8998 if ((i + 1) < rsm->r_rtr_cnt) { 8999 /* 9000 * The peer ack'd from our previous 9001 * transmission. We have a spurious 9002 * retransmission and thus we dont 9003 * want to update our rack_rtt. 9004 * 9005 * Hmm should there be a CC revert here? 9006 * 9007 */ 9008 return (0); 9009 } 9010 if (!tp->t_rttlow || tp->t_rttlow > t) 9011 tp->t_rttlow = t; 9012 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9013 rack->r_ctl.rc_rack_min_rtt = t; 9014 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9015 rack->r_ctl.rc_rack_min_rtt = 1; 9016 } 9017 } 9018 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9019 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9020 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9021 /* New more recent rack_tmit_time */ 9022 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9023 if (rack->r_ctl.rc_rack_tmit_time == 0) 9024 rack->r_ctl.rc_rack_tmit_time = 1; 9025 rack->rc_rack_rtt = t; 9026 } 9027 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9028 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9029 rsm->r_rtr_cnt); 9030 return (1); 9031 } 9032 } 9033 /* If we are logging log out the sendmap */ 9034 if (tcp_bblogging_on(rack->rc_tp)) { 9035 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9036 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9037 } 9038 } 9039 goto ts_not_found; 9040 } else { 9041 /* 9042 * Ok its a SACK block that we retransmitted. or a windows 9043 * machine without timestamps. We can tell nothing from the 9044 * time-stamp since its not there or the time the peer last 9045 * received a segment that moved forward its cum-ack point. 9046 */ 9047 ts_not_found: 9048 i = rsm->r_rtr_cnt - 1; 9049 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9050 if ((int)t <= 0) 9051 t = 1; 9052 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9053 /* 9054 * We retransmitted and the ack came back in less 9055 * than the smallest rtt we have observed. We most 9056 * likely did an improper retransmit as outlined in 9057 * 6.2 Step 2 point 2 in the rack-draft so we 9058 * don't want to update our rack_rtt. We in 9059 * theory (in future) might want to think about reverting our 9060 * cwnd state but we won't for now. 9061 */ 9062 return (0); 9063 } else if (rack->r_ctl.rc_rack_min_rtt) { 9064 /* 9065 * We retransmitted it and the retransmit did the 9066 * job. 9067 */ 9068 if (!rack->r_ctl.rc_rack_min_rtt || 9069 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9070 rack->r_ctl.rc_rack_min_rtt = t; 9071 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9072 rack->r_ctl.rc_rack_min_rtt = 1; 9073 } 9074 } 9075 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9076 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9077 (uint32_t)rsm->r_tim_lastsent[i]))) { 9078 /* New more recent rack_tmit_time */ 9079 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9080 if (rack->r_ctl.rc_rack_tmit_time == 0) 9081 rack->r_ctl.rc_rack_tmit_time = 1; 9082 rack->rc_rack_rtt = t; 9083 } 9084 return (1); 9085 } 9086 } 9087 return (0); 9088 } 9089 9090 /* 9091 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9092 */ 9093 static void 9094 rack_log_sack_passed(struct tcpcb *tp, 9095 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9096 { 9097 struct rack_sendmap *nrsm; 9098 uint32_t thresh; 9099 9100 /* Get our rxt threshold for lost consideration */ 9101 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9102 /* Now start looking at rsm's */ 9103 nrsm = rsm; 9104 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9105 rack_head, r_tnext) { 9106 if (nrsm == rsm) { 9107 /* Skip original segment he is acked */ 9108 continue; 9109 } 9110 if (nrsm->r_flags & RACK_ACKED) { 9111 /* 9112 * Skip ack'd segments, though we 9113 * should not see these, since tmap 9114 * should not have ack'd segments. 9115 */ 9116 continue; 9117 } 9118 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9119 /* 9120 * If the peer dropped the rwnd on 9121 * these then we don't worry about them. 9122 */ 9123 continue; 9124 } 9125 /* Check lost state */ 9126 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9127 uint32_t exp; 9128 9129 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9130 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9131 /* We consider it lost */ 9132 nrsm->r_flags |= RACK_WAS_LOST; 9133 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9134 } 9135 } 9136 if (nrsm->r_flags & RACK_SACK_PASSED) { 9137 /* 9138 * We found one that is already marked 9139 * passed, we have been here before and 9140 * so all others below this are marked. 9141 */ 9142 break; 9143 } 9144 nrsm->r_flags |= RACK_SACK_PASSED; 9145 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9146 } 9147 } 9148 9149 static void 9150 rack_need_set_test(struct tcpcb *tp, 9151 struct tcp_rack *rack, 9152 struct rack_sendmap *rsm, 9153 tcp_seq th_ack, 9154 int line, 9155 int use_which) 9156 { 9157 struct rack_sendmap *s_rsm; 9158 9159 if ((tp->t_flags & TF_GPUTINPROG) && 9160 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9161 /* 9162 * We were app limited, and this ack 9163 * butts up or goes beyond the point where we want 9164 * to start our next measurement. We need 9165 * to record the new gput_ts as here and 9166 * possibly update the start sequence. 9167 */ 9168 uint32_t seq, ts; 9169 9170 if (rsm->r_rtr_cnt > 1) { 9171 /* 9172 * This is a retransmit, can we 9173 * really make any assessment at this 9174 * point? We are not really sure of 9175 * the timestamp, is it this or the 9176 * previous transmission? 9177 * 9178 * Lets wait for something better that 9179 * is not retransmitted. 9180 */ 9181 return; 9182 } 9183 seq = tp->gput_seq; 9184 ts = tp->gput_ts; 9185 rack->app_limited_needs_set = 0; 9186 tp->gput_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 9187 /* Do we start at a new end? */ 9188 if ((use_which == RACK_USE_BEG) && 9189 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9190 /* 9191 * When we get an ACK that just eats 9192 * up some of the rsm, we set RACK_USE_BEG 9193 * since whats at r_start (i.e. th_ack) 9194 * is left unacked and thats where the 9195 * measurement now starts. 9196 */ 9197 tp->gput_seq = rsm->r_start; 9198 } 9199 if ((use_which == RACK_USE_END) && 9200 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9201 /* 9202 * We use the end when the cumack 9203 * is moving forward and completely 9204 * deleting the rsm passed so basically 9205 * r_end holds th_ack. 9206 * 9207 * For SACK's we also want to use the end 9208 * since this piece just got sacked and 9209 * we want to target anything after that 9210 * in our measurement. 9211 */ 9212 tp->gput_seq = rsm->r_end; 9213 } 9214 if (use_which == RACK_USE_END_OR_THACK) { 9215 /* 9216 * special case for ack moving forward, 9217 * not a sack, we need to move all the 9218 * way up to where this ack cum-ack moves 9219 * to. 9220 */ 9221 if (SEQ_GT(th_ack, rsm->r_end)) 9222 tp->gput_seq = th_ack; 9223 else 9224 tp->gput_seq = rsm->r_end; 9225 } 9226 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9227 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9228 else 9229 s_rsm = NULL; 9230 /* 9231 * Pick up the correct send time if we can the rsm passed in 9232 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9233 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9234 * find a different seq i.e. the next send up. 9235 * 9236 * If that has not been sent, s_rsm will be NULL and we must 9237 * arrange it so this function will get called again by setting 9238 * app_limited_needs_set. 9239 */ 9240 if (s_rsm) 9241 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9242 else { 9243 /* If we hit here we have to have *not* sent tp->gput_seq */ 9244 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9245 /* Set it up so we will go through here again */ 9246 rack->app_limited_needs_set = 1; 9247 } 9248 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9249 /* 9250 * We moved beyond this guy's range, re-calculate 9251 * the new end point. 9252 */ 9253 if (rack->rc_gp_filled == 0) { 9254 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9255 } else { 9256 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9257 } 9258 } 9259 /* 9260 * We are moving the goal post, we may be able to clear the 9261 * measure_saw_probe_rtt flag. 9262 */ 9263 if ((rack->in_probe_rtt == 0) && 9264 (rack->measure_saw_probe_rtt) && 9265 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9266 rack->measure_saw_probe_rtt = 0; 9267 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9268 seq, tp->gput_seq, 9269 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9270 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9271 5, line, NULL, 0); 9272 if (rack->rc_gp_filled && 9273 ((tp->gput_ack - tp->gput_seq) < 9274 max(rc_init_window(rack), (MIN_GP_WIN * 9275 ctf_fixed_maxseg(tp))))) { 9276 uint32_t ideal_amount; 9277 9278 ideal_amount = rack_get_measure_window(tp, rack); 9279 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9280 /* 9281 * There is no sense of continuing this measurement 9282 * because its too small to gain us anything we 9283 * trust. Skip it and that way we can start a new 9284 * measurement quicker. 9285 */ 9286 tp->t_flags &= ~TF_GPUTINPROG; 9287 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9288 0, 0, 9289 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9290 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9291 6, __LINE__, NULL, 0); 9292 } else { 9293 /* 9294 * Reset the window further out. 9295 */ 9296 tp->gput_ack = tp->gput_seq + ideal_amount; 9297 } 9298 } 9299 rack_tend_gp_marks(tp, rack); 9300 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9301 } 9302 } 9303 9304 static inline int 9305 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9306 { 9307 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 9308 /* Behind our TLP definition or right at */ 9309 return (0); 9310 } 9311 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 9312 /* The start is beyond or right at our end of TLP definition */ 9313 return (0); 9314 } 9315 /* It has to be a sub-part of the original TLP recorded */ 9316 return (1); 9317 } 9318 9319 static uint32_t 9320 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 9321 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 9322 uint32_t segsiz) 9323 { 9324 uint32_t start, end, changed = 0; 9325 struct rack_sendmap stack_map; 9326 struct rack_sendmap *rsm, *nrsm, *prev, *next; 9327 int insret __diagused; 9328 int32_t used_ref = 1; 9329 int can_use_hookery = 0; 9330 9331 start = sack->start; 9332 end = sack->end; 9333 rsm = *prsm; 9334 9335 do_rest_ofb: 9336 if ((rsm == NULL) || 9337 (SEQ_LT(end, rsm->r_start)) || 9338 (SEQ_GEQ(start, rsm->r_end)) || 9339 (SEQ_LT(start, rsm->r_start))) { 9340 /* 9341 * We are not in the right spot, 9342 * find the correct spot in the tree. 9343 */ 9344 used_ref = 0; 9345 rsm = tqhash_find(rack->r_ctl.tqh, start); 9346 } 9347 if (rsm == NULL) { 9348 /* TSNH */ 9349 goto out; 9350 } 9351 /* Ok we have an ACK for some piece of this rsm */ 9352 if (rsm->r_start != start) { 9353 if ((rsm->r_flags & RACK_ACKED) == 0) { 9354 /* 9355 * Before any splitting or hookery is 9356 * done is it a TLP of interest i.e. rxt? 9357 */ 9358 if ((rsm->r_flags & RACK_TLP) && 9359 (rsm->r_rtr_cnt > 1)) { 9360 /* 9361 * We are splitting a rxt TLP, check 9362 * if we need to save off the start/end 9363 */ 9364 if (rack->rc_last_tlp_acked_set && 9365 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9366 /* 9367 * We already turned this on since we are inside 9368 * the previous one was a partially sack now we 9369 * are getting another one (maybe all of it). 9370 * 9371 */ 9372 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9373 /* 9374 * Lets make sure we have all of it though. 9375 */ 9376 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9377 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9378 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9379 rack->r_ctl.last_tlp_acked_end); 9380 } 9381 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9382 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9383 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9384 rack->r_ctl.last_tlp_acked_end); 9385 } 9386 } else { 9387 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9388 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9389 rack->rc_last_tlp_past_cumack = 0; 9390 rack->rc_last_tlp_acked_set = 1; 9391 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9392 } 9393 } 9394 /** 9395 * Need to split this in two pieces the before and after, 9396 * the before remains in the map, the after must be 9397 * added. In other words we have: 9398 * rsm |--------------| 9399 * sackblk |-------> 9400 * rsm will become 9401 * rsm |---| 9402 * and nrsm will be the sacked piece 9403 * nrsm |----------| 9404 * 9405 * But before we start down that path lets 9406 * see if the sack spans over on top of 9407 * the next guy and it is already sacked. 9408 * 9409 */ 9410 /* 9411 * Hookery can only be used if the two entries 9412 * are in the same bucket and neither one of 9413 * them staddle the bucket line. 9414 */ 9415 next = tqhash_next(rack->r_ctl.tqh, rsm); 9416 if (next && 9417 (rsm->bindex == next->bindex) && 9418 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9419 ((next->r_flags & RACK_STRADDLE) == 0) && 9420 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9421 ((next->r_flags & RACK_IS_PCM) == 0) && 9422 (rsm->r_flags & RACK_IN_GP_WIN) && 9423 (next->r_flags & RACK_IN_GP_WIN)) 9424 can_use_hookery = 1; 9425 else 9426 can_use_hookery = 0; 9427 if (next && can_use_hookery && 9428 (next->r_flags & RACK_ACKED) && 9429 SEQ_GEQ(end, next->r_start)) { 9430 /** 9431 * So the next one is already acked, and 9432 * we can thus by hookery use our stack_map 9433 * to reflect the piece being sacked and 9434 * then adjust the two tree entries moving 9435 * the start and ends around. So we start like: 9436 * rsm |------------| (not-acked) 9437 * next |-----------| (acked) 9438 * sackblk |--------> 9439 * We want to end like so: 9440 * rsm |------| (not-acked) 9441 * next |-----------------| (acked) 9442 * nrsm |-----| 9443 * Where nrsm is a temporary stack piece we 9444 * use to update all the gizmos. 9445 */ 9446 /* Copy up our fudge block */ 9447 nrsm = &stack_map; 9448 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9449 /* Now adjust our tree blocks */ 9450 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 9451 next->r_start = start; 9452 rsm->r_flags |= RACK_SHUFFLED; 9453 next->r_flags |= RACK_SHUFFLED; 9454 /* Now we must adjust back where next->m is */ 9455 rack_setup_offset_for_rsm(rack, rsm, next); 9456 /* 9457 * Which timestamp do we keep? It is rather 9458 * important in GP measurements to have the 9459 * accurate end of the send window. 9460 * 9461 * We keep the largest value, which is the newest 9462 * send. We do this in case a segment that is 9463 * joined together and not part of a GP estimate 9464 * later gets expanded into the GP estimate. 9465 * 9466 * We prohibit the merging of unlike kinds i.e. 9467 * all pieces that are in the GP estimate can be 9468 * merged and all pieces that are not in a GP estimate 9469 * can be merged, but not disimilar pieces. Combine 9470 * this with taking the highest here and we should 9471 * be ok unless of course the client reneges. Then 9472 * all bets are off. 9473 */ 9474 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 9475 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 9476 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 9477 /* 9478 * And we must keep the newest ack arrival time. 9479 */ 9480 if (next->r_ack_arrival < 9481 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9482 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9483 9484 9485 /* We don't need to adjust rsm, it did not change */ 9486 /* Clear out the dup ack count of the remainder */ 9487 rsm->r_dupack = 0; 9488 rsm->r_just_ret = 0; 9489 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9490 /* Now lets make sure our fudge block is right */ 9491 nrsm->r_start = start; 9492 /* Now lets update all the stats and such */ 9493 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9494 if (rack->app_limited_needs_set) 9495 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9496 changed += (nrsm->r_end - nrsm->r_start); 9497 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9498 if (rsm->r_flags & RACK_WAS_LOST) { 9499 int my_chg; 9500 9501 /* 9502 * Note here we do not use our rack_mark_nolonger_lost() function 9503 * since we are moving our data pointer around and the 9504 * ack'ed side is already not considered lost. 9505 */ 9506 my_chg = (nrsm->r_end - nrsm->r_start); 9507 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9508 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9509 if (my_chg <= rack->r_ctl.rc_considered_lost) 9510 rack->r_ctl.rc_considered_lost -= my_chg; 9511 else 9512 rack->r_ctl.rc_considered_lost = 0; 9513 } 9514 if (nrsm->r_flags & RACK_SACK_PASSED) { 9515 rack->r_ctl.rc_reorder_ts = cts; 9516 if (rack->r_ctl.rc_reorder_ts == 0) 9517 rack->r_ctl.rc_reorder_ts = 1; 9518 } 9519 /* 9520 * Now we want to go up from rsm (the 9521 * one left un-acked) to the next one 9522 * in the tmap. We do this so when 9523 * we walk backwards we include marking 9524 * sack-passed on rsm (The one passed in 9525 * is skipped since it is generally called 9526 * on something sacked before removing it 9527 * from the tmap). 9528 */ 9529 if (rsm->r_in_tmap) { 9530 nrsm = TAILQ_NEXT(rsm, r_tnext); 9531 /* 9532 * Now that we have the next 9533 * one walk backwards from there. 9534 */ 9535 if (nrsm && nrsm->r_in_tmap) 9536 rack_log_sack_passed(tp, rack, nrsm, cts); 9537 } 9538 /* Now are we done? */ 9539 if (SEQ_LT(end, next->r_end) || 9540 (end == next->r_end)) { 9541 /* Done with block */ 9542 goto out; 9543 } 9544 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 9545 counter_u64_add(rack_sack_used_next_merge, 1); 9546 /* Postion for the next block */ 9547 start = next->r_end; 9548 rsm = tqhash_next(rack->r_ctl.tqh, next); 9549 if (rsm == NULL) 9550 goto out; 9551 } else { 9552 /** 9553 * We can't use any hookery here, so we 9554 * need to split the map. We enter like 9555 * so: 9556 * rsm |--------| 9557 * sackblk |-----> 9558 * We will add the new block nrsm and 9559 * that will be the new portion, and then 9560 * fall through after reseting rsm. So we 9561 * split and look like this: 9562 * rsm |----| 9563 * sackblk |-----> 9564 * nrsm |---| 9565 * We then fall through reseting 9566 * rsm to nrsm, so the next block 9567 * picks it up. 9568 */ 9569 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9570 if (nrsm == NULL) { 9571 /* 9572 * failed XXXrrs what can we do but loose the sack 9573 * info? 9574 */ 9575 goto out; 9576 } 9577 counter_u64_add(rack_sack_splits, 1); 9578 rack_clone_rsm(rack, nrsm, rsm, start); 9579 rsm->r_just_ret = 0; 9580 #ifndef INVARIANTS 9581 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9582 #else 9583 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9584 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9585 nrsm, insret, rack, rsm); 9586 } 9587 #endif 9588 if (rsm->r_in_tmap) { 9589 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9590 nrsm->r_in_tmap = 1; 9591 } 9592 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 9593 rsm->r_flags &= (~RACK_HAS_FIN); 9594 /* Position us to point to the new nrsm that starts the sack blk */ 9595 rsm = nrsm; 9596 } 9597 } else { 9598 /* Already sacked this piece */ 9599 counter_u64_add(rack_sack_skipped_acked, 1); 9600 if (end == rsm->r_end) { 9601 /* Done with block */ 9602 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9603 goto out; 9604 } else if (SEQ_LT(end, rsm->r_end)) { 9605 /* A partial sack to a already sacked block */ 9606 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9607 goto out; 9608 } else { 9609 /* 9610 * The end goes beyond this guy 9611 * reposition the start to the 9612 * next block. 9613 */ 9614 start = rsm->r_end; 9615 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9616 if (rsm == NULL) 9617 goto out; 9618 } 9619 } 9620 } 9621 if (SEQ_GEQ(end, rsm->r_end)) { 9622 /** 9623 * The end of this block is either beyond this guy or right 9624 * at this guy. I.e.: 9625 * rsm --- |-----| 9626 * end |-----| 9627 * <or> 9628 * end |---------| 9629 */ 9630 if ((rsm->r_flags & RACK_ACKED) == 0) { 9631 /* 9632 * Is it a TLP of interest? 9633 */ 9634 if ((rsm->r_flags & RACK_TLP) && 9635 (rsm->r_rtr_cnt > 1)) { 9636 /* 9637 * We are splitting a rxt TLP, check 9638 * if we need to save off the start/end 9639 */ 9640 if (rack->rc_last_tlp_acked_set && 9641 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9642 /* 9643 * We already turned this on since we are inside 9644 * the previous one was a partially sack now we 9645 * are getting another one (maybe all of it). 9646 */ 9647 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9648 /* 9649 * Lets make sure we have all of it though. 9650 */ 9651 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9652 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9653 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9654 rack->r_ctl.last_tlp_acked_end); 9655 } 9656 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9657 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9658 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9659 rack->r_ctl.last_tlp_acked_end); 9660 } 9661 } else { 9662 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9663 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9664 rack->rc_last_tlp_past_cumack = 0; 9665 rack->rc_last_tlp_acked_set = 1; 9666 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9667 } 9668 } 9669 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9670 changed += (rsm->r_end - rsm->r_start); 9671 /* You get a count for acking a whole segment or more */ 9672 if (rsm->r_flags & RACK_WAS_LOST) { 9673 /* 9674 * Here we can use the inline function since 9675 * the rsm is truly marked lost and now no longer lost. 9676 */ 9677 rack_mark_nolonger_lost(rack, rsm); 9678 } 9679 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9680 if (rsm->r_in_tmap) /* should be true */ 9681 rack_log_sack_passed(tp, rack, rsm, cts); 9682 /* Is Reordering occuring? */ 9683 if (rsm->r_flags & RACK_SACK_PASSED) { 9684 rsm->r_flags &= ~RACK_SACK_PASSED; 9685 rack->r_ctl.rc_reorder_ts = cts; 9686 if (rack->r_ctl.rc_reorder_ts == 0) 9687 rack->r_ctl.rc_reorder_ts = 1; 9688 } 9689 if (rack->app_limited_needs_set) 9690 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9691 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9692 rsm->r_flags |= RACK_ACKED; 9693 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9694 if (rsm->r_in_tmap) { 9695 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9696 rsm->r_in_tmap = 0; 9697 } 9698 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 9699 } else { 9700 counter_u64_add(rack_sack_skipped_acked, 1); 9701 } 9702 if (end == rsm->r_end) { 9703 /* This block only - done, setup for next */ 9704 goto out; 9705 } 9706 /* 9707 * There is more not coverend by this rsm move on 9708 * to the next block in the tail queue hash table. 9709 */ 9710 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 9711 start = rsm->r_end; 9712 rsm = nrsm; 9713 if (rsm == NULL) 9714 goto out; 9715 goto do_rest_ofb; 9716 } 9717 /** 9718 * The end of this sack block is smaller than 9719 * our rsm i.e.: 9720 * rsm --- |-----| 9721 * end |--| 9722 */ 9723 if ((rsm->r_flags & RACK_ACKED) == 0) { 9724 /* 9725 * Is it a TLP of interest? 9726 */ 9727 if ((rsm->r_flags & RACK_TLP) && 9728 (rsm->r_rtr_cnt > 1)) { 9729 /* 9730 * We are splitting a rxt TLP, check 9731 * if we need to save off the start/end 9732 */ 9733 if (rack->rc_last_tlp_acked_set && 9734 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9735 /* 9736 * We already turned this on since we are inside 9737 * the previous one was a partially sack now we 9738 * are getting another one (maybe all of it). 9739 */ 9740 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9741 /* 9742 * Lets make sure we have all of it though. 9743 */ 9744 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9745 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9746 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9747 rack->r_ctl.last_tlp_acked_end); 9748 } 9749 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9750 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9751 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9752 rack->r_ctl.last_tlp_acked_end); 9753 } 9754 } else { 9755 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9756 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9757 rack->rc_last_tlp_past_cumack = 0; 9758 rack->rc_last_tlp_acked_set = 1; 9759 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9760 } 9761 } 9762 /* 9763 * Hookery can only be used if the two entries 9764 * are in the same bucket and neither one of 9765 * them staddle the bucket line. 9766 */ 9767 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 9768 if (prev && 9769 (rsm->bindex == prev->bindex) && 9770 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9771 ((prev->r_flags & RACK_STRADDLE) == 0) && 9772 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9773 ((prev->r_flags & RACK_IS_PCM) == 0) && 9774 (rsm->r_flags & RACK_IN_GP_WIN) && 9775 (prev->r_flags & RACK_IN_GP_WIN)) 9776 can_use_hookery = 1; 9777 else 9778 can_use_hookery = 0; 9779 if (prev && can_use_hookery && 9780 (prev->r_flags & RACK_ACKED)) { 9781 /** 9782 * Goal, we want the right remainder of rsm to shrink 9783 * in place and span from (rsm->r_start = end) to rsm->r_end. 9784 * We want to expand prev to go all the way 9785 * to prev->r_end <- end. 9786 * so in the tree we have before: 9787 * prev |--------| (acked) 9788 * rsm |-------| (non-acked) 9789 * sackblk |-| 9790 * We churn it so we end up with 9791 * prev |----------| (acked) 9792 * rsm |-----| (non-acked) 9793 * nrsm |-| (temporary) 9794 * 9795 * Note if either prev/rsm is a TLP we don't 9796 * do this. 9797 */ 9798 nrsm = &stack_map; 9799 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9800 tqhash_update_end(rack->r_ctl.tqh, prev, end); 9801 rsm->r_start = end; 9802 rsm->r_flags |= RACK_SHUFFLED; 9803 prev->r_flags |= RACK_SHUFFLED; 9804 /* Now adjust nrsm (stack copy) to be 9805 * the one that is the small 9806 * piece that was "sacked". 9807 */ 9808 nrsm->r_end = end; 9809 rsm->r_dupack = 0; 9810 /* 9811 * Which timestamp do we keep? It is rather 9812 * important in GP measurements to have the 9813 * accurate end of the send window. 9814 * 9815 * We keep the largest value, which is the newest 9816 * send. We do this in case a segment that is 9817 * joined together and not part of a GP estimate 9818 * later gets expanded into the GP estimate. 9819 * 9820 * We prohibit the merging of unlike kinds i.e. 9821 * all pieces that are in the GP estimate can be 9822 * merged and all pieces that are not in a GP estimate 9823 * can be merged, but not disimilar pieces. Combine 9824 * this with taking the highest here and we should 9825 * be ok unless of course the client reneges. Then 9826 * all bets are off. 9827 */ 9828 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 9829 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 9830 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9831 } 9832 /* 9833 * And we must keep the newest ack arrival time. 9834 */ 9835 9836 if(prev->r_ack_arrival < 9837 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9838 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9839 9840 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9841 /* 9842 * Now that the rsm has had its start moved forward 9843 * lets go ahead and get its new place in the world. 9844 */ 9845 rack_setup_offset_for_rsm(rack, prev, rsm); 9846 /* 9847 * Now nrsm is our new little piece 9848 * that is acked (which was merged 9849 * to prev). Update the rtt and changed 9850 * based on that. Also check for reordering. 9851 */ 9852 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9853 if (rack->app_limited_needs_set) 9854 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9855 changed += (nrsm->r_end - nrsm->r_start); 9856 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9857 if (rsm->r_flags & RACK_WAS_LOST) { 9858 int my_chg; 9859 9860 /* 9861 * Note here we are using hookery again so we can't 9862 * use our rack_mark_nolonger_lost() function. 9863 */ 9864 my_chg = (nrsm->r_end - nrsm->r_start); 9865 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9866 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9867 if (my_chg <= rack->r_ctl.rc_considered_lost) 9868 rack->r_ctl.rc_considered_lost -= my_chg; 9869 else 9870 rack->r_ctl.rc_considered_lost = 0; 9871 } 9872 if (nrsm->r_flags & RACK_SACK_PASSED) { 9873 rack->r_ctl.rc_reorder_ts = cts; 9874 if (rack->r_ctl.rc_reorder_ts == 0) 9875 rack->r_ctl.rc_reorder_ts = 1; 9876 } 9877 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 9878 rsm = prev; 9879 counter_u64_add(rack_sack_used_prev_merge, 1); 9880 } else { 9881 /** 9882 * This is the case where our previous 9883 * block is not acked either, so we must 9884 * split the block in two. 9885 */ 9886 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9887 if (nrsm == NULL) { 9888 /* failed rrs what can we do but loose the sack info? */ 9889 goto out; 9890 } 9891 if ((rsm->r_flags & RACK_TLP) && 9892 (rsm->r_rtr_cnt > 1)) { 9893 /* 9894 * We are splitting a rxt TLP, check 9895 * if we need to save off the start/end 9896 */ 9897 if (rack->rc_last_tlp_acked_set && 9898 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9899 /* 9900 * We already turned this on since this block is inside 9901 * the previous one was a partially sack now we 9902 * are getting another one (maybe all of it). 9903 */ 9904 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9905 /* 9906 * Lets make sure we have all of it though. 9907 */ 9908 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9909 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9910 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9911 rack->r_ctl.last_tlp_acked_end); 9912 } 9913 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9914 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9915 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9916 rack->r_ctl.last_tlp_acked_end); 9917 } 9918 } else { 9919 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9920 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9921 rack->rc_last_tlp_acked_set = 1; 9922 rack->rc_last_tlp_past_cumack = 0; 9923 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9924 } 9925 } 9926 /** 9927 * In this case nrsm becomes 9928 * nrsm->r_start = end; 9929 * nrsm->r_end = rsm->r_end; 9930 * which is un-acked. 9931 * <and> 9932 * rsm->r_end = nrsm->r_start; 9933 * i.e. the remaining un-acked 9934 * piece is left on the left 9935 * hand side. 9936 * 9937 * So we start like this 9938 * rsm |----------| (not acked) 9939 * sackblk |---| 9940 * build it so we have 9941 * rsm |---| (acked) 9942 * nrsm |------| (not acked) 9943 */ 9944 counter_u64_add(rack_sack_splits, 1); 9945 rack_clone_rsm(rack, nrsm, rsm, end); 9946 rsm->r_flags &= (~RACK_HAS_FIN); 9947 rsm->r_just_ret = 0; 9948 #ifndef INVARIANTS 9949 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9950 #else 9951 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9952 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 9953 nrsm, insret, rack, rsm); 9954 } 9955 #endif 9956 if (rsm->r_in_tmap) { 9957 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9958 nrsm->r_in_tmap = 1; 9959 } 9960 nrsm->r_dupack = 0; 9961 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 9962 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9963 changed += (rsm->r_end - rsm->r_start); 9964 if (rsm->r_flags & RACK_WAS_LOST) { 9965 /* 9966 * Here it is safe to use our function. 9967 */ 9968 rack_mark_nolonger_lost(rack, rsm); 9969 } 9970 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9971 9972 if (rsm->r_in_tmap) /* should be true */ 9973 rack_log_sack_passed(tp, rack, rsm, cts); 9974 /* Is Reordering occuring? */ 9975 if (rsm->r_flags & RACK_SACK_PASSED) { 9976 rsm->r_flags &= ~RACK_SACK_PASSED; 9977 rack->r_ctl.rc_reorder_ts = cts; 9978 if (rack->r_ctl.rc_reorder_ts == 0) 9979 rack->r_ctl.rc_reorder_ts = 1; 9980 } 9981 if (rack->app_limited_needs_set) 9982 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9983 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9984 rsm->r_flags |= RACK_ACKED; 9985 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9986 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 9987 if (rsm->r_in_tmap) { 9988 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9989 rsm->r_in_tmap = 0; 9990 } 9991 } 9992 } else if (start != end){ 9993 /* 9994 * The block was already acked. 9995 */ 9996 counter_u64_add(rack_sack_skipped_acked, 1); 9997 } 9998 out: 9999 if (rsm && 10000 ((rsm->r_flags & RACK_TLP) == 0) && 10001 (rsm->r_flags & RACK_ACKED)) { 10002 /* 10003 * Now can we merge where we worked 10004 * with either the previous or 10005 * next block? 10006 */ 10007 next = tqhash_next(rack->r_ctl.tqh, rsm); 10008 while (next) { 10009 if (next->r_flags & RACK_TLP) 10010 break; 10011 /* Only allow merges between ones in or out of GP window */ 10012 if ((next->r_flags & RACK_IN_GP_WIN) && 10013 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10014 break; 10015 } 10016 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10017 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10018 break; 10019 } 10020 if (rsm->bindex != next->bindex) 10021 break; 10022 if (rsm->r_flags & RACK_STRADDLE) 10023 break; 10024 if (rsm->r_flags & RACK_IS_PCM) 10025 break; 10026 if (next->r_flags & RACK_STRADDLE) 10027 break; 10028 if (next->r_flags & RACK_IS_PCM) 10029 break; 10030 if (next->r_flags & RACK_ACKED) { 10031 /* yep this and next can be merged */ 10032 rsm = rack_merge_rsm(rack, rsm, next); 10033 next = tqhash_next(rack->r_ctl.tqh, rsm); 10034 } else 10035 break; 10036 } 10037 /* Now what about the previous? */ 10038 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10039 while (prev) { 10040 if (prev->r_flags & RACK_TLP) 10041 break; 10042 /* Only allow merges between ones in or out of GP window */ 10043 if ((prev->r_flags & RACK_IN_GP_WIN) && 10044 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10045 break; 10046 } 10047 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10048 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10049 break; 10050 } 10051 if (rsm->bindex != prev->bindex) 10052 break; 10053 if (rsm->r_flags & RACK_STRADDLE) 10054 break; 10055 if (rsm->r_flags & RACK_IS_PCM) 10056 break; 10057 if (prev->r_flags & RACK_STRADDLE) 10058 break; 10059 if (prev->r_flags & RACK_IS_PCM) 10060 break; 10061 if (prev->r_flags & RACK_ACKED) { 10062 /* yep the previous and this can be merged */ 10063 rsm = rack_merge_rsm(rack, prev, rsm); 10064 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10065 } else 10066 break; 10067 } 10068 } 10069 if (used_ref == 0) { 10070 counter_u64_add(rack_sack_proc_all, 1); 10071 } else { 10072 counter_u64_add(rack_sack_proc_short, 1); 10073 } 10074 /* Save off the next one for quick reference. */ 10075 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10076 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10077 return (changed); 10078 } 10079 10080 static void inline 10081 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10082 { 10083 struct rack_sendmap *tmap; 10084 10085 tmap = NULL; 10086 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10087 /* Its no longer sacked, mark it so */ 10088 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10089 #ifdef INVARIANTS 10090 if (rsm->r_in_tmap) { 10091 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10092 rack, rsm, rsm->r_flags); 10093 } 10094 #endif 10095 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10096 /* Rebuild it into our tmap */ 10097 if (tmap == NULL) { 10098 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10099 tmap = rsm; 10100 } else { 10101 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10102 tmap = rsm; 10103 } 10104 tmap->r_in_tmap = 1; 10105 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10106 } 10107 /* 10108 * Now lets possibly clear the sack filter so we start 10109 * recognizing sacks that cover this area. 10110 */ 10111 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10112 10113 } 10114 10115 10116 static void inline 10117 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10118 { 10119 /* 10120 * We look at advancing the end send time for our GP 10121 * measurement tracking only as the cumulative acknowledgment 10122 * moves forward. You might wonder about this, why not 10123 * at every transmission or retransmission within the 10124 * GP window update the rc_gp_cumack_ts? Well its rather 10125 * nuanced but basically the GP window *may* expand (as 10126 * it does below) or worse and harder to track it may shrink. 10127 * 10128 * This last makes it impossible to track at the time of 10129 * the send, since you may set forward your rc_gp_cumack_ts 10130 * when you send, because that send *is* in your currently 10131 * "guessed" window, but then it shrinks. Now which was 10132 * the send time of the last bytes in the window, by the 10133 * time you ask that question that part of the sendmap 10134 * is freed. So you don't know and you will have too 10135 * long of send window. Instead by updating the time 10136 * marker only when the cumack advances this assures us 10137 * that we will have only the sends in the window of our 10138 * GP measurement. 10139 * 10140 * Another complication from this is the 10141 * merging of sendmap entries. During SACK processing this 10142 * can happen to conserve the sendmap size. That breaks 10143 * everything down in tracking the send window of the GP 10144 * estimate. So to prevent that and keep it working with 10145 * a tiny bit more limited merging, we only allow like 10146 * types to be merged. I.e. if two sends are in the GP window 10147 * then its ok to merge them together. If two sends are not 10148 * in the GP window its ok to merge them together too. Though 10149 * one send in and one send out cannot be merged. We combine 10150 * this with never allowing the shrinking of the GP window when 10151 * we are in recovery so that we can properly calculate the 10152 * sending times. 10153 * 10154 * This all of course seems complicated, because it is.. :) 10155 * 10156 * The cum-ack is being advanced upon the sendmap. 10157 * If we are not doing a GP estimate don't 10158 * proceed. 10159 */ 10160 uint64_t ts; 10161 10162 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10163 return; 10164 /* 10165 * If this sendmap entry is going 10166 * beyond the measurement window we had picked, 10167 * expand the measurement window by that much. 10168 */ 10169 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10170 tp->gput_ack = rsm->r_end; 10171 } 10172 /* 10173 * If we have not setup a ack, then we 10174 * have no idea if the newly acked pieces 10175 * will be "in our seq measurement range". If 10176 * it is when we clear the app_limited_needs_set 10177 * flag the timestamp will be updated. 10178 */ 10179 if (rack->app_limited_needs_set) 10180 return; 10181 /* 10182 * Finally, we grab out the latest timestamp 10183 * that this packet was sent and then see 10184 * if: 10185 * a) The packet touches are newly defined GP range. 10186 * b) The time is greater than (newer) than the 10187 * one we currently have. If so we update 10188 * our sending end time window. 10189 * 10190 * Note we *do not* do this at send time. The reason 10191 * is that if you do you *may* pick up a newer timestamp 10192 * for a range you are not going to measure. We project 10193 * out how far and then sometimes modify that to be 10194 * smaller. If that occurs then you will have a send 10195 * that does not belong to the range included. 10196 */ 10197 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10198 rack->r_ctl.rc_gp_cumack_ts) 10199 return; 10200 if (rack_in_gp_window(tp, rsm)) { 10201 rack->r_ctl.rc_gp_cumack_ts = ts; 10202 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10203 __LINE__, from, rsm); 10204 } 10205 } 10206 10207 static void 10208 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10209 { 10210 struct rack_sendmap *rsm; 10211 /* 10212 * The ACK point is advancing to th_ack, we must drop off 10213 * the packets in the rack log and calculate any eligble 10214 * RTT's. 10215 */ 10216 10217 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 10218 /* 10219 * If we have some sack blocks in the filter 10220 * lets prune them out by calling sfb with no blocks. 10221 */ 10222 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 10223 } 10224 if (SEQ_GT(th_ack, tp->snd_una)) { 10225 /* Clear any app ack remembered settings */ 10226 rack->r_ctl.cleared_app_ack = 0; 10227 } 10228 rack->r_wanted_output = 1; 10229 if (SEQ_GT(th_ack, tp->snd_una)) 10230 rack->r_ctl.last_cumack_advance = acktime; 10231 10232 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10233 if ((rack->rc_last_tlp_acked_set == 1)&& 10234 (rack->rc_last_tlp_past_cumack == 1) && 10235 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10236 /* 10237 * We have reached the point where our last rack 10238 * tlp retransmit sequence is ahead of the cum-ack. 10239 * This can only happen when the cum-ack moves all 10240 * the way around (its been a full 2^^31+1 bytes 10241 * or more since we sent a retransmitted TLP). Lets 10242 * turn off the valid flag since its not really valid. 10243 * 10244 * Note since sack's also turn on this event we have 10245 * a complication, we have to wait to age it out until 10246 * the cum-ack is by the TLP before checking which is 10247 * what the next else clause does. 10248 */ 10249 rack_log_dsack_event(rack, 9, __LINE__, 10250 rack->r_ctl.last_tlp_acked_start, 10251 rack->r_ctl.last_tlp_acked_end); 10252 rack->rc_last_tlp_acked_set = 0; 10253 rack->rc_last_tlp_past_cumack = 0; 10254 } else if ((rack->rc_last_tlp_acked_set == 1) && 10255 (rack->rc_last_tlp_past_cumack == 0) && 10256 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10257 /* 10258 * It is safe to start aging TLP's out. 10259 */ 10260 rack->rc_last_tlp_past_cumack = 1; 10261 } 10262 /* We do the same for the tlp send seq as well */ 10263 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10264 (rack->rc_last_sent_tlp_past_cumack == 1) && 10265 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10266 rack_log_dsack_event(rack, 9, __LINE__, 10267 rack->r_ctl.last_sent_tlp_seq, 10268 (rack->r_ctl.last_sent_tlp_seq + 10269 rack->r_ctl.last_sent_tlp_len)); 10270 rack->rc_last_sent_tlp_seq_valid = 0; 10271 rack->rc_last_sent_tlp_past_cumack = 0; 10272 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10273 (rack->rc_last_sent_tlp_past_cumack == 0) && 10274 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10275 /* 10276 * It is safe to start aging TLP's send. 10277 */ 10278 rack->rc_last_sent_tlp_past_cumack = 1; 10279 } 10280 more: 10281 rsm = tqhash_min(rack->r_ctl.tqh); 10282 if (rsm == NULL) { 10283 if ((th_ack - 1) == tp->iss) { 10284 /* 10285 * For the SYN incoming case we will not 10286 * have called tcp_output for the sending of 10287 * the SYN, so there will be no map. All 10288 * other cases should probably be a panic. 10289 */ 10290 return; 10291 } 10292 if (tp->t_flags & TF_SENTFIN) { 10293 /* if we sent a FIN we often will not have map */ 10294 return; 10295 } 10296 #ifdef INVARIANTS 10297 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 10298 tp, 10299 tp->t_state, th_ack, rack, 10300 tp->snd_una, tp->snd_max); 10301 #endif 10302 return; 10303 } 10304 if (SEQ_LT(th_ack, rsm->r_start)) { 10305 /* Huh map is missing this */ 10306 #ifdef INVARIANTS 10307 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 10308 rsm->r_start, 10309 th_ack, tp->t_state, rack->r_state); 10310 #endif 10311 return; 10312 } 10313 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 10314 10315 /* Now was it a retransmitted TLP? */ 10316 if ((rsm->r_flags & RACK_TLP) && 10317 (rsm->r_rtr_cnt > 1)) { 10318 /* 10319 * Yes, this rsm was a TLP and retransmitted, remember that 10320 * since if a DSACK comes back on this we don't want 10321 * to think of it as a reordered segment. This may 10322 * get updated again with possibly even other TLPs 10323 * in flight, but thats ok. Only when we don't send 10324 * a retransmitted TLP for 1/2 the sequences space 10325 * will it get turned off (above). 10326 */ 10327 if (rack->rc_last_tlp_acked_set && 10328 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10329 /* 10330 * We already turned this on since the end matches, 10331 * the previous one was a partially ack now we 10332 * are getting another one (maybe all of it). 10333 */ 10334 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10335 /* 10336 * Lets make sure we have all of it though. 10337 */ 10338 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10339 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10340 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10341 rack->r_ctl.last_tlp_acked_end); 10342 } 10343 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10344 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10345 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10346 rack->r_ctl.last_tlp_acked_end); 10347 } 10348 } else { 10349 rack->rc_last_tlp_past_cumack = 1; 10350 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10351 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10352 rack->rc_last_tlp_acked_set = 1; 10353 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10354 } 10355 } 10356 /* Now do we consume the whole thing? */ 10357 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 10358 if (SEQ_GEQ(th_ack, rsm->r_end)) { 10359 /* Its all consumed. */ 10360 uint32_t left; 10361 uint8_t newly_acked; 10362 10363 if (rsm->r_flags & RACK_WAS_LOST) { 10364 /* 10365 * This can happen when we marked it as lost 10366 * and yet before retransmitting we get an ack 10367 * which can happen due to reordering. 10368 */ 10369 rack_mark_nolonger_lost(rack, rsm); 10370 } 10371 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 10372 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 10373 rsm->r_rtr_bytes = 0; 10374 /* 10375 * Record the time of highest cumack sent if its in our measurement 10376 * window and possibly bump out the end. 10377 */ 10378 rack_rsm_sender_update(rack, tp, rsm, 4); 10379 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 10380 if (rsm->r_in_tmap) { 10381 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10382 rsm->r_in_tmap = 0; 10383 } 10384 newly_acked = 1; 10385 if (rsm->r_flags & RACK_ACKED) { 10386 /* 10387 * It was acked on the scoreboard -- remove 10388 * it from total 10389 */ 10390 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10391 newly_acked = 0; 10392 } else if (rsm->r_flags & RACK_SACK_PASSED) { 10393 /* 10394 * There are segments ACKED on the 10395 * scoreboard further up. We are seeing 10396 * reordering. 10397 */ 10398 rsm->r_flags &= ~RACK_SACK_PASSED; 10399 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10400 rsm->r_flags |= RACK_ACKED; 10401 rack->r_ctl.rc_reorder_ts = cts; 10402 if (rack->r_ctl.rc_reorder_ts == 0) 10403 rack->r_ctl.rc_reorder_ts = 1; 10404 if (rack->r_ent_rec_ns) { 10405 /* 10406 * We have sent no more, and we saw an sack 10407 * then ack arrive. 10408 */ 10409 rack->r_might_revert = 1; 10410 } 10411 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10412 } else { 10413 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10414 } 10415 if ((rsm->r_flags & RACK_TO_REXT) && 10416 (tp->t_flags & TF_RCVD_TSTMP) && 10417 (to->to_flags & TOF_TS) && 10418 (to->to_tsecr != 0) && 10419 (tp->t_flags & TF_PREVVALID)) { 10420 /* 10421 * We can use the timestamp to see 10422 * if this retransmission was from the 10423 * first transmit. If so we made a mistake. 10424 */ 10425 tp->t_flags &= ~TF_PREVVALID; 10426 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 10427 /* The first transmit is what this ack is for */ 10428 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 10429 } 10430 } 10431 left = th_ack - rsm->r_end; 10432 if (rack->app_limited_needs_set && newly_acked) 10433 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 10434 /* Free back to zone */ 10435 rack_free(rack, rsm); 10436 if (left) { 10437 goto more; 10438 } 10439 /* Check for reneging */ 10440 rsm = tqhash_min(rack->r_ctl.tqh); 10441 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 10442 /* 10443 * The peer has moved snd_una up to 10444 * the edge of this send, i.e. one 10445 * that it had previously acked. The only 10446 * way that can be true if the peer threw 10447 * away data (space issues) that it had 10448 * previously sacked (else it would have 10449 * given us snd_una up to (rsm->r_end). 10450 * We need to undo the acked markings here. 10451 * 10452 * Note we have to look to make sure th_ack is 10453 * our rsm->r_start in case we get an old ack 10454 * where th_ack is behind snd_una. 10455 */ 10456 rack_peer_reneges(rack, rsm, th_ack); 10457 } 10458 return; 10459 } 10460 if (rsm->r_flags & RACK_ACKED) { 10461 /* 10462 * It was acked on the scoreboard -- remove it from 10463 * total for the part being cum-acked. 10464 */ 10465 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 10466 } else { 10467 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 10468 } 10469 /* And what about the lost flag? */ 10470 if (rsm->r_flags & RACK_WAS_LOST) { 10471 /* 10472 * This can happen when we marked it as lost 10473 * and yet before retransmitting we get an ack 10474 * which can happen due to reordering. In this 10475 * case its only a partial ack of the send. 10476 */ 10477 rack_mark_nolonger_lost(rack, rsm); 10478 } 10479 /* 10480 * Clear the dup ack count for 10481 * the piece that remains. 10482 */ 10483 rsm->r_dupack = 0; 10484 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10485 if (rsm->r_rtr_bytes) { 10486 /* 10487 * It was retransmitted adjust the 10488 * sack holes for what was acked. 10489 */ 10490 int ack_am; 10491 10492 ack_am = (th_ack - rsm->r_start); 10493 if (ack_am >= rsm->r_rtr_bytes) { 10494 rack->r_ctl.rc_holes_rxt -= ack_am; 10495 rsm->r_rtr_bytes -= ack_am; 10496 } 10497 } 10498 /* 10499 * Update where the piece starts and record 10500 * the time of send of highest cumack sent if 10501 * its in our GP range. 10502 */ 10503 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 10504 /* Now we need to move our offset forward too */ 10505 if (rsm->m && 10506 ((rsm->orig_m_len != rsm->m->m_len) || 10507 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 10508 /* Fix up the orig_m_len and possibly the mbuf offset */ 10509 rack_adjust_orig_mlen(rsm); 10510 } 10511 rsm->soff += (th_ack - rsm->r_start); 10512 rack_rsm_sender_update(rack, tp, rsm, 5); 10513 /* The trim will move th_ack into r_start for us */ 10514 tqhash_trim(rack->r_ctl.tqh, th_ack); 10515 /* Now do we need to move the mbuf fwd too? */ 10516 { 10517 struct mbuf *m; 10518 uint32_t soff; 10519 10520 m = rsm->m; 10521 soff = rsm->soff; 10522 if (m) { 10523 while (soff >= m->m_len) { 10524 soff -= m->m_len; 10525 KASSERT((m->m_next != NULL), 10526 (" rsm:%p off:%u soff:%u m:%p", 10527 rsm, rsm->soff, soff, m)); 10528 m = m->m_next; 10529 if (m == NULL) { 10530 /* 10531 * This is a fall-back that prevents a panic. In reality 10532 * we should be able to walk the mbuf's and find our place. 10533 * At this point snd_una has not been updated with the sbcut() yet 10534 * but tqhash_trim did update rsm->r_start so the offset calcuation 10535 * should work fine. This is undesirable since we will take cache 10536 * hits to access the socket buffer. And even more puzzling is that 10537 * it happens occasionally. It should not :( 10538 */ 10539 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 10540 (rsm->r_start - tp->snd_una), 10541 &soff); 10542 break; 10543 } 10544 } 10545 /* 10546 * Now save in our updated values. 10547 */ 10548 rsm->m = m; 10549 rsm->soff = soff; 10550 rsm->orig_m_len = rsm->m->m_len; 10551 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 10552 } 10553 } 10554 if (rack->app_limited_needs_set && 10555 SEQ_GEQ(th_ack, tp->gput_seq)) 10556 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 10557 } 10558 10559 static void 10560 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 10561 { 10562 struct rack_sendmap *rsm; 10563 int sack_pass_fnd = 0; 10564 10565 if (rack->r_might_revert) { 10566 /* 10567 * Ok we have reordering, have not sent anything, we 10568 * might want to revert the congestion state if nothing 10569 * further has SACK_PASSED on it. Lets check. 10570 * 10571 * We also get here when we have DSACKs come in for 10572 * all the data that we FR'd. Note that a rxt or tlp 10573 * timer clears this from happening. 10574 */ 10575 10576 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 10577 if (rsm->r_flags & RACK_SACK_PASSED) { 10578 sack_pass_fnd = 1; 10579 break; 10580 } 10581 } 10582 if (sack_pass_fnd == 0) { 10583 /* 10584 * We went into recovery 10585 * incorrectly due to reordering! 10586 */ 10587 int orig_cwnd; 10588 10589 rack->r_ent_rec_ns = 0; 10590 orig_cwnd = tp->snd_cwnd; 10591 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 10592 tp->snd_recover = tp->snd_una; 10593 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 10594 if (IN_RECOVERY(tp->t_flags)) { 10595 rack_exit_recovery(tp, rack, 3); 10596 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 10597 /* 10598 * We were in recovery, had an RTO 10599 * and then re-entered recovery (more sack's arrived) 10600 * and we have properly recorded the old ssthresh from 10601 * the first recovery. We want to be able to slow-start 10602 * back to this level. The ssthresh from the timeout 10603 * and then back into recovery will end up most likely 10604 * to be min(cwnd=1mss, 2mss). Which makes it basically 10605 * so we get no slow-start after our RTO. 10606 */ 10607 rack->rto_from_rec = 0; 10608 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 10609 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 10610 } 10611 } 10612 } 10613 rack->r_might_revert = 0; 10614 } 10615 } 10616 10617 10618 static int 10619 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 10620 { 10621 10622 uint32_t am, l_end; 10623 int was_tlp = 0; 10624 10625 if (SEQ_GT(end, start)) 10626 am = end - start; 10627 else 10628 am = 0; 10629 if ((rack->rc_last_tlp_acked_set ) && 10630 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 10631 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 10632 /* 10633 * The DSACK is because of a TLP which we don't 10634 * do anything with the reordering window over since 10635 * it was not reordering that caused the DSACK but 10636 * our previous retransmit TLP. 10637 */ 10638 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10639 was_tlp = 1; 10640 goto skip_dsack_round; 10641 } 10642 if (rack->rc_last_sent_tlp_seq_valid) { 10643 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 10644 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 10645 (SEQ_LEQ(end, l_end))) { 10646 /* 10647 * This dsack is from the last sent TLP, ignore it 10648 * for reordering purposes. 10649 */ 10650 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10651 was_tlp = 1; 10652 goto skip_dsack_round; 10653 } 10654 } 10655 if (rack->rc_dsack_round_seen == 0) { 10656 rack->rc_dsack_round_seen = 1; 10657 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 10658 rack->r_ctl.num_dsack++; 10659 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 10660 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 10661 } 10662 skip_dsack_round: 10663 /* 10664 * We keep track of how many DSACK blocks we get 10665 * after a recovery incident. 10666 */ 10667 rack->r_ctl.dsack_byte_cnt += am; 10668 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 10669 rack->r_ctl.retran_during_recovery && 10670 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 10671 /* 10672 * False recovery most likely culprit is reordering. If 10673 * nothing else is missing we need to revert. 10674 */ 10675 rack->r_might_revert = 1; 10676 rack_handle_might_revert(rack->rc_tp, rack); 10677 rack->r_might_revert = 0; 10678 rack->r_ctl.retran_during_recovery = 0; 10679 rack->r_ctl.dsack_byte_cnt = 0; 10680 } 10681 return (was_tlp); 10682 } 10683 10684 static uint32_t 10685 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 10686 { 10687 return (((tp->snd_max - snd_una) - 10688 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 10689 } 10690 10691 static int32_t 10692 rack_compute_pipe(struct tcpcb *tp) 10693 { 10694 return ((int32_t)do_rack_compute_pipe(tp, 10695 (struct tcp_rack *)tp->t_fb_ptr, 10696 tp->snd_una)); 10697 } 10698 10699 static void 10700 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 10701 { 10702 /* Deal with changed and PRR here (in recovery only) */ 10703 uint32_t pipe, snd_una; 10704 10705 rack->r_ctl.rc_prr_delivered += changed; 10706 10707 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 10708 /* 10709 * It is all outstanding, we are application limited 10710 * and thus we don't need more room to send anything. 10711 * Note we use tp->snd_una here and not th_ack because 10712 * the data as yet not been cut from the sb. 10713 */ 10714 rack->r_ctl.rc_prr_sndcnt = 0; 10715 return; 10716 } 10717 /* Compute prr_sndcnt */ 10718 if (SEQ_GT(tp->snd_una, th_ack)) { 10719 snd_una = tp->snd_una; 10720 } else { 10721 snd_una = th_ack; 10722 } 10723 pipe = do_rack_compute_pipe(tp, rack, snd_una); 10724 if (pipe > tp->snd_ssthresh) { 10725 long sndcnt; 10726 10727 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 10728 if (rack->r_ctl.rc_prr_recovery_fs > 0) 10729 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 10730 else { 10731 rack->r_ctl.rc_prr_sndcnt = 0; 10732 rack_log_to_prr(rack, 9, 0, __LINE__); 10733 sndcnt = 0; 10734 } 10735 sndcnt++; 10736 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 10737 sndcnt -= rack->r_ctl.rc_prr_out; 10738 else 10739 sndcnt = 0; 10740 rack->r_ctl.rc_prr_sndcnt = sndcnt; 10741 rack_log_to_prr(rack, 10, 0, __LINE__); 10742 } else { 10743 uint32_t limit; 10744 10745 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 10746 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 10747 else 10748 limit = 0; 10749 if (changed > limit) 10750 limit = changed; 10751 limit += ctf_fixed_maxseg(tp); 10752 if (tp->snd_ssthresh > pipe) { 10753 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 10754 rack_log_to_prr(rack, 11, 0, __LINE__); 10755 } else { 10756 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 10757 rack_log_to_prr(rack, 12, 0, __LINE__); 10758 } 10759 } 10760 } 10761 10762 static void 10763 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 10764 int *dsack_seen, int *sacks_seen) 10765 { 10766 uint32_t changed; 10767 struct tcp_rack *rack; 10768 struct rack_sendmap *rsm; 10769 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 10770 register uint32_t th_ack; 10771 int32_t i, j, k, num_sack_blks = 0; 10772 uint32_t cts, acked, ack_point; 10773 int loop_start = 0; 10774 uint32_t tsused; 10775 uint32_t segsiz; 10776 10777 10778 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10779 if (tcp_get_flags(th) & TH_RST) { 10780 /* We don't log resets */ 10781 return; 10782 } 10783 rack = (struct tcp_rack *)tp->t_fb_ptr; 10784 cts = tcp_get_usecs(NULL); 10785 rsm = tqhash_min(rack->r_ctl.tqh); 10786 changed = 0; 10787 th_ack = th->th_ack; 10788 segsiz = ctf_fixed_maxseg(rack->rc_tp); 10789 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 10790 /* 10791 * You only get credit for 10792 * MSS and greater (and you get extra 10793 * credit for larger cum-ack moves). 10794 */ 10795 int ac; 10796 10797 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 10798 counter_u64_add(rack_ack_total, ac); 10799 } 10800 if (SEQ_GT(th_ack, tp->snd_una)) { 10801 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 10802 tp->t_acktime = ticks; 10803 } 10804 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 10805 changed = th_ack - rsm->r_start; 10806 if (changed) { 10807 rack_process_to_cumack(tp, rack, th_ack, cts, to, 10808 tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time)); 10809 } 10810 if ((to->to_flags & TOF_SACK) == 0) { 10811 /* We are done nothing left and no sack. */ 10812 rack_handle_might_revert(tp, rack); 10813 /* 10814 * For cases where we struck a dup-ack 10815 * with no SACK, add to the changes so 10816 * PRR will work right. 10817 */ 10818 if (dup_ack_struck && (changed == 0)) { 10819 changed += ctf_fixed_maxseg(rack->rc_tp); 10820 } 10821 goto out; 10822 } 10823 /* Sack block processing */ 10824 if (SEQ_GT(th_ack, tp->snd_una)) 10825 ack_point = th_ack; 10826 else 10827 ack_point = tp->snd_una; 10828 for (i = 0; i < to->to_nsacks; i++) { 10829 bcopy((to->to_sacks + i * TCPOLEN_SACK), 10830 &sack, sizeof(sack)); 10831 sack.start = ntohl(sack.start); 10832 sack.end = ntohl(sack.end); 10833 if (SEQ_GT(sack.end, sack.start) && 10834 SEQ_GT(sack.start, ack_point) && 10835 SEQ_LT(sack.start, tp->snd_max) && 10836 SEQ_GT(sack.end, ack_point) && 10837 SEQ_LEQ(sack.end, tp->snd_max)) { 10838 sack_blocks[num_sack_blks] = sack; 10839 num_sack_blks++; 10840 } else if (SEQ_LEQ(sack.start, th_ack) && 10841 SEQ_LEQ(sack.end, th_ack)) { 10842 int was_tlp; 10843 10844 if (dsack_seen != NULL) 10845 *dsack_seen = 1; 10846 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 10847 /* 10848 * Its a D-SACK block. 10849 */ 10850 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 10851 } 10852 } 10853 if (rack->rc_dsack_round_seen) { 10854 /* Is the dsack roound over? */ 10855 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 10856 /* Yes it is */ 10857 rack->rc_dsack_round_seen = 0; 10858 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 10859 } 10860 } 10861 /* 10862 * Sort the SACK blocks so we can update the rack scoreboard with 10863 * just one pass. 10864 */ 10865 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 10866 num_sack_blks, th->th_ack); 10867 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 10868 if (sacks_seen != NULL) 10869 *sacks_seen = num_sack_blks; 10870 if (num_sack_blks == 0) { 10871 /* Nothing to sack, but we need to update counts */ 10872 goto out_with_totals; 10873 } 10874 /* Its a sack of some sort */ 10875 if (num_sack_blks < 2) { 10876 /* Only one, we don't need to sort */ 10877 goto do_sack_work; 10878 } 10879 /* Sort the sacks */ 10880 for (i = 0; i < num_sack_blks; i++) { 10881 for (j = i + 1; j < num_sack_blks; j++) { 10882 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 10883 sack = sack_blocks[i]; 10884 sack_blocks[i] = sack_blocks[j]; 10885 sack_blocks[j] = sack; 10886 } 10887 } 10888 } 10889 /* 10890 * Now are any of the sack block ends the same (yes some 10891 * implementations send these)? 10892 */ 10893 again: 10894 if (num_sack_blks == 0) 10895 goto out_with_totals; 10896 if (num_sack_blks > 1) { 10897 for (i = 0; i < num_sack_blks; i++) { 10898 for (j = i + 1; j < num_sack_blks; j++) { 10899 if (sack_blocks[i].end == sack_blocks[j].end) { 10900 /* 10901 * Ok these two have the same end we 10902 * want the smallest end and then 10903 * throw away the larger and start 10904 * again. 10905 */ 10906 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 10907 /* 10908 * The second block covers 10909 * more area use that 10910 */ 10911 sack_blocks[i].start = sack_blocks[j].start; 10912 } 10913 /* 10914 * Now collapse out the dup-sack and 10915 * lower the count 10916 */ 10917 for (k = (j + 1); k < num_sack_blks; k++) { 10918 sack_blocks[j].start = sack_blocks[k].start; 10919 sack_blocks[j].end = sack_blocks[k].end; 10920 j++; 10921 } 10922 num_sack_blks--; 10923 goto again; 10924 } 10925 } 10926 } 10927 } 10928 do_sack_work: 10929 /* 10930 * First lets look to see if 10931 * we have retransmitted and 10932 * can use the transmit next? 10933 */ 10934 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 10935 if (rsm && 10936 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 10937 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 10938 /* 10939 * We probably did the FR and the next 10940 * SACK in continues as we would expect. 10941 */ 10942 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); 10943 if (acked) { 10944 rack->r_wanted_output = 1; 10945 changed += acked; 10946 } 10947 if (num_sack_blks == 1) { 10948 /* 10949 * This is what we would expect from 10950 * a normal implementation to happen 10951 * after we have retransmitted the FR, 10952 * i.e the sack-filter pushes down 10953 * to 1 block and the next to be retransmitted 10954 * is the sequence in the sack block (has more 10955 * are acked). Count this as ACK'd data to boost 10956 * up the chances of recovering any false positives. 10957 */ 10958 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 10959 counter_u64_add(rack_express_sack, 1); 10960 goto out_with_totals; 10961 } else { 10962 /* 10963 * Start the loop through the 10964 * rest of blocks, past the first block. 10965 */ 10966 loop_start = 1; 10967 } 10968 } 10969 counter_u64_add(rack_sack_total, 1); 10970 rsm = rack->r_ctl.rc_sacklast; 10971 for (i = loop_start; i < num_sack_blks; i++) { 10972 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); 10973 if (acked) { 10974 rack->r_wanted_output = 1; 10975 changed += acked; 10976 } 10977 } 10978 out_with_totals: 10979 if (num_sack_blks > 1) { 10980 /* 10981 * You get an extra stroke if 10982 * you have more than one sack-blk, this 10983 * could be where we are skipping forward 10984 * and the sack-filter is still working, or 10985 * it could be an attacker constantly 10986 * moving us. 10987 */ 10988 counter_u64_add(rack_move_some, 1); 10989 } 10990 out: 10991 if (changed) { 10992 /* Something changed cancel the rack timer */ 10993 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10994 } 10995 tsused = tcp_get_usecs(NULL); 10996 rsm = tcp_rack_output(tp, rack, tsused); 10997 if ((!IN_FASTRECOVERY(tp->t_flags)) && 10998 rsm && 10999 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11000 /* Enter recovery */ 11001 entered_recovery = 1; 11002 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 11003 /* 11004 * When we enter recovery we need to assure we send 11005 * one packet. 11006 */ 11007 if (rack->rack_no_prr == 0) { 11008 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11009 rack_log_to_prr(rack, 8, 0, __LINE__); 11010 } 11011 rack->r_timer_override = 1; 11012 rack->r_early = 0; 11013 rack->r_ctl.rc_agg_early = 0; 11014 } else if (IN_FASTRECOVERY(tp->t_flags) && 11015 rsm && 11016 (rack->r_rr_config == 3)) { 11017 /* 11018 * Assure we can output and we get no 11019 * remembered pace time except the retransmit. 11020 */ 11021 rack->r_timer_override = 1; 11022 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11023 rack->r_ctl.rc_resend = rsm; 11024 } 11025 if (IN_FASTRECOVERY(tp->t_flags) && 11026 (rack->rack_no_prr == 0) && 11027 (entered_recovery == 0)) { 11028 rack_update_prr(tp, rack, changed, th_ack); 11029 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11030 ((tcp_in_hpts(rack->rc_tp) == 0) && 11031 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11032 /* 11033 * If you are pacing output you don't want 11034 * to override. 11035 */ 11036 rack->r_early = 0; 11037 rack->r_ctl.rc_agg_early = 0; 11038 rack->r_timer_override = 1; 11039 } 11040 } 11041 } 11042 11043 static void 11044 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 11045 { 11046 struct rack_sendmap *rsm; 11047 11048 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11049 while (rsm) { 11050 /* 11051 * We need to skip anything already set 11052 * to be retransmitted. 11053 */ 11054 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11055 (rsm->r_flags & RACK_MUST_RXT)) { 11056 rsm = TAILQ_NEXT(rsm, r_tnext); 11057 continue; 11058 } 11059 break; 11060 } 11061 if (rsm && (rsm->r_dupack < 0xff)) { 11062 rsm->r_dupack++; 11063 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11064 struct timeval tv; 11065 uint32_t cts; 11066 /* 11067 * Here we see if we need to retransmit. For 11068 * a SACK type connection if enough time has passed 11069 * we will get a return of the rsm. For a non-sack 11070 * connection we will get the rsm returned if the 11071 * dupack value is 3 or more. 11072 */ 11073 cts = tcp_get_usecs(&tv); 11074 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11075 if (rack->r_ctl.rc_resend != NULL) { 11076 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11077 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11078 th_ack, __LINE__); 11079 } 11080 rack->r_wanted_output = 1; 11081 rack->r_timer_override = 1; 11082 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11083 } 11084 } else { 11085 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11086 } 11087 } 11088 } 11089 11090 static void 11091 rack_check_bottom_drag(struct tcpcb *tp, 11092 struct tcp_rack *rack, 11093 struct socket *so) 11094 { 11095 /* 11096 * So what is dragging bottom? 11097 * 11098 * Dragging bottom means you were under pacing and had a 11099 * delay in processing inbound acks waiting on our pacing 11100 * timer to expire. While you were waiting all of the acknowledgments 11101 * for the packets you sent have arrived. This means we are pacing 11102 * way underneath the bottleneck to the point where our Goodput 11103 * measurements stop working, since they require more than one 11104 * ack (usually at least 8 packets worth with multiple acks so we can 11105 * gauge the inter-ack times). If that occurs we have a real problem 11106 * since we are stuck in a hole that we can't get out of without 11107 * something speeding us up. 11108 * 11109 * We also check to see if we are widdling down to just one segment 11110 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 11111 * then we are adding the delayed ack interval into our measurments and 11112 * we need to speed up slightly. 11113 */ 11114 uint32_t segsiz, minseg; 11115 11116 segsiz = ctf_fixed_maxseg(tp); 11117 minseg = segsiz; 11118 if (tp->snd_max == tp->snd_una) { 11119 /* 11120 * We are doing dynamic pacing and we are way 11121 * under. Basically everything got acked while 11122 * we were still waiting on the pacer to expire. 11123 * 11124 * This means we need to boost the b/w in 11125 * addition to any earlier boosting of 11126 * the multiplier. 11127 */ 11128 uint64_t lt_bw; 11129 11130 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 11131 lt_bw = rack_get_lt_bw(rack); 11132 rack->rc_dragged_bottom = 1; 11133 rack_validate_multipliers_at_or_above100(rack); 11134 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11135 (rack->dis_lt_bw == 0) && 11136 (rack->use_lesser_lt_bw == 0) && 11137 (lt_bw > 0)) { 11138 /* 11139 * Lets use the long-term b/w we have 11140 * been getting as a base. 11141 */ 11142 if (rack->rc_gp_filled == 0) { 11143 if (lt_bw > ONE_POINT_TWO_MEG) { 11144 /* 11145 * If we have no measurement 11146 * don't let us set in more than 11147 * 1.2Mbps. If we are still too 11148 * low after pacing with this we 11149 * will hopefully have a max b/w 11150 * available to sanity check things. 11151 */ 11152 lt_bw = ONE_POINT_TWO_MEG; 11153 } 11154 rack->r_ctl.rc_rtt_diff = 0; 11155 rack->r_ctl.gp_bw = lt_bw; 11156 rack->rc_gp_filled = 1; 11157 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11158 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11159 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11160 } else if (lt_bw > rack->r_ctl.gp_bw) { 11161 rack->r_ctl.rc_rtt_diff = 0; 11162 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11163 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11164 rack->r_ctl.gp_bw = lt_bw; 11165 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11166 } else 11167 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11168 if ((rack->gp_ready == 0) && 11169 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11170 /* We have enough measurements now */ 11171 rack->gp_ready = 1; 11172 if (rack->dgp_on || 11173 rack->rack_hibeta) 11174 rack_set_cc_pacing(rack); 11175 if (rack->defer_options) 11176 rack_apply_deferred_options(rack); 11177 } 11178 } else { 11179 /* 11180 * zero rtt possibly?, settle for just an old increase. 11181 */ 11182 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11183 } 11184 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11185 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11186 minseg)) && 11187 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11188 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11189 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11190 (segsiz * rack_req_segs))) { 11191 /* 11192 * We are doing dynamic GP pacing and 11193 * we have everything except 1MSS or less 11194 * bytes left out. We are still pacing away. 11195 * And there is data that could be sent, This 11196 * means we are inserting delayed ack time in 11197 * our measurements because we are pacing too slow. 11198 */ 11199 rack_validate_multipliers_at_or_above100(rack); 11200 rack->rc_dragged_bottom = 1; 11201 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11202 } 11203 } 11204 11205 #ifdef TCP_REQUEST_TRK 11206 static void 11207 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11208 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11209 { 11210 int do_log; 11211 11212 do_log = tcp_bblogging_on(rack->rc_tp); 11213 if (do_log == 0) { 11214 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11215 return; 11216 /* We only allow the three below with point logging on */ 11217 if ((mod != HYBRID_LOG_RULES_APP) && 11218 (mod != HYBRID_LOG_RULES_SET) && 11219 (mod != HYBRID_LOG_REQ_COMP)) 11220 return; 11221 11222 } 11223 if (do_log) { 11224 union tcp_log_stackspecific log; 11225 struct timeval tv; 11226 11227 /* Convert our ms to a microsecond */ 11228 memset(&log, 0, sizeof(log)); 11229 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11230 log.u_bbr.flex1 = seq; 11231 log.u_bbr.cwnd_gain = line; 11232 if (cur != NULL) { 11233 uint64_t off; 11234 11235 log.u_bbr.flex2 = cur->start_seq; 11236 log.u_bbr.flex3 = cur->end_seq; 11237 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11238 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11239 log.u_bbr.flex6 = cur->flags; 11240 log.u_bbr.pkts_out = cur->hybrid_flags; 11241 log.u_bbr.rttProp = cur->timestamp; 11242 log.u_bbr.cur_del_rate = cur->cspr; 11243 log.u_bbr.bw_inuse = cur->start; 11244 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11245 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11246 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11247 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11248 log.u_bbr.inhpts = 1; 11249 #ifdef TCP_REQUEST_TRK 11250 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11251 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11252 #endif 11253 } else { 11254 log.u_bbr.flex2 = err; 11255 } 11256 /* 11257 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11258 */ 11259 log.u_bbr.flex7 = rack->rc_catch_up; 11260 log.u_bbr.flex7 <<= 1; 11261 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11262 log.u_bbr.flex7 <<= 1; 11263 log.u_bbr.flex7 |= rack->dgp_on; 11264 /* 11265 * Compose bbr_state to be a bit wise 0000ADHF 11266 * where A is the always_pace flag 11267 * where D is the dgp_on flag 11268 * where H is the hybrid_mode on flag 11269 * where F is the use_fixed_rate flag. 11270 */ 11271 log.u_bbr.bbr_state = rack->rc_always_pace; 11272 log.u_bbr.bbr_state <<= 1; 11273 log.u_bbr.bbr_state |= rack->dgp_on; 11274 log.u_bbr.bbr_state <<= 1; 11275 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 11276 log.u_bbr.bbr_state <<= 1; 11277 log.u_bbr.bbr_state |= rack->use_fixed_rate; 11278 log.u_bbr.flex8 = mod; 11279 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11280 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11281 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11282 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 11283 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 11284 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 11285 tcp_log_event(rack->rc_tp, NULL, 11286 &rack->rc_inp->inp_socket->so_rcv, 11287 &rack->rc_inp->inp_socket->so_snd, 11288 TCP_HYBRID_PACING_LOG, 0, 11289 0, &log, false, NULL, __func__, __LINE__, &tv); 11290 } 11291 } 11292 #endif 11293 11294 #ifdef TCP_REQUEST_TRK 11295 static void 11296 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11297 { 11298 struct tcp_sendfile_track *rc_cur, *orig_ent; 11299 struct tcpcb *tp; 11300 int err = 0; 11301 11302 orig_ent = rack->r_ctl.rc_last_sft; 11303 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 11304 if (rc_cur == NULL) { 11305 /* If not in the beginning what about the end piece */ 11306 if (rack->rc_hybrid_mode) 11307 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11308 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 11309 } else { 11310 err = 12345; 11311 } 11312 /* If we find no parameters we are in straight DGP mode */ 11313 if(rc_cur == NULL) { 11314 /* None found for this seq, just DGP for now */ 11315 if (rack->rc_hybrid_mode) { 11316 rack->r_ctl.client_suggested_maxseg = 0; 11317 rack->rc_catch_up = 0; 11318 if (rack->cspr_is_fcc == 0) 11319 rack->r_ctl.bw_rate_cap = 0; 11320 else 11321 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11322 } 11323 if (rack->rc_hybrid_mode) { 11324 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11325 } 11326 if (rack->r_ctl.rc_last_sft) { 11327 rack->r_ctl.rc_last_sft = NULL; 11328 } 11329 return; 11330 } 11331 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 11332 /* This entry was never setup for hybrid pacing on/off etc */ 11333 if (rack->rc_hybrid_mode) { 11334 rack->r_ctl.client_suggested_maxseg = 0; 11335 rack->rc_catch_up = 0; 11336 rack->r_ctl.bw_rate_cap = 0; 11337 } 11338 if (rack->r_ctl.rc_last_sft) { 11339 rack->r_ctl.rc_last_sft = NULL; 11340 } 11341 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11342 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 11343 rc_cur->first_send = cts; 11344 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 11345 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11346 } 11347 return; 11348 } 11349 /* 11350 * Ok if we have a new entry *or* have never 11351 * set up an entry we need to proceed. If 11352 * we have already set it up this entry we 11353 * just continue along with what we already 11354 * setup. 11355 */ 11356 tp = rack->rc_tp; 11357 if ((rack->r_ctl.rc_last_sft != NULL) && 11358 (rack->r_ctl.rc_last_sft == rc_cur)) { 11359 /* Its already in place */ 11360 if (rack->rc_hybrid_mode) 11361 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 11362 return; 11363 } 11364 if (rack->rc_hybrid_mode == 0) { 11365 rack->r_ctl.rc_last_sft = rc_cur; 11366 if (orig_ent) { 11367 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11368 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11369 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11370 } 11371 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11372 return; 11373 } 11374 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 11375 /* Compensate for all the header overhead's */ 11376 if (rack->cspr_is_fcc == 0) 11377 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11378 else 11379 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11380 } else { 11381 if (rack->rc_hybrid_mode) { 11382 if (rack->cspr_is_fcc == 0) 11383 rack->r_ctl.bw_rate_cap = 0; 11384 else 11385 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11386 } 11387 } 11388 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 11389 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 11390 else 11391 rack->r_ctl.client_suggested_maxseg = 0; 11392 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 11393 /* 11394 * It is the same timestamp as the previous one 11395 * add the hybrid flag that will indicate we use 11396 * sendtime not arrival time for catch-up mode. 11397 */ 11398 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 11399 } 11400 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 11401 (rc_cur->cspr > 0)) { 11402 uint64_t len; 11403 11404 rack->rc_catch_up = 1; 11405 /* 11406 * Calculate the deadline time, first set the 11407 * time to when the request arrived. 11408 */ 11409 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 11410 /* 11411 * For cases where its a duplicate tm (we received more 11412 * than one request for a tm) we want to use now, the point 11413 * where we are just sending the first bit of the request. 11414 */ 11415 rc_cur->deadline = cts; 11416 } else { 11417 /* 11418 * Here we have a different tm from the last request 11419 * so we want to use arrival time as our base. 11420 */ 11421 rc_cur->deadline = rc_cur->localtime; 11422 } 11423 /* 11424 * Next calculate the length and compensate for 11425 * TLS if need be. 11426 */ 11427 len = rc_cur->end - rc_cur->start; 11428 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 11429 /* 11430 * This session is doing TLS. Take a swag guess 11431 * at the overhead. 11432 */ 11433 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 11434 } 11435 /* 11436 * Now considering the size, and the cspr, what is the time that 11437 * would be required at the cspr rate. Here we use the raw 11438 * cspr value since the client only looks at the raw data. We 11439 * do use len which includes TLS overhead, but not the TCP/IP etc. 11440 * That will get made up for in the CU pacing rate set. 11441 */ 11442 len *= HPTS_USEC_IN_SEC; 11443 len /= rc_cur->cspr; 11444 rc_cur->deadline += len; 11445 } else { 11446 rack->rc_catch_up = 0; 11447 rc_cur->deadline = 0; 11448 } 11449 if (rack->r_ctl.client_suggested_maxseg != 0) { 11450 /* 11451 * We need to reset the max pace segs if we have a 11452 * client_suggested_maxseg. 11453 */ 11454 rack_set_pace_segments(tp, rack, __LINE__, NULL); 11455 } 11456 if (orig_ent) { 11457 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11458 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11459 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11460 } 11461 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11462 /* Remember it for next time and for CU mode */ 11463 rack->r_ctl.rc_last_sft = rc_cur; 11464 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 11465 } 11466 #endif 11467 11468 static void 11469 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11470 { 11471 #ifdef TCP_REQUEST_TRK 11472 struct tcp_sendfile_track *ent; 11473 11474 ent = rack->r_ctl.rc_last_sft; 11475 if ((ent == NULL) || 11476 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 11477 (SEQ_GEQ(seq, ent->end_seq))) { 11478 /* Time to update the track. */ 11479 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 11480 ent = rack->r_ctl.rc_last_sft; 11481 } 11482 /* Out of all */ 11483 if (ent == NULL) { 11484 return; 11485 } 11486 if (SEQ_LT(ent->end_seq, (seq + len))) { 11487 /* 11488 * This is the case where our end_seq guess 11489 * was wrong. This is usually due to TLS having 11490 * more bytes then our guess. It could also be the 11491 * case that the client sent in two requests closely 11492 * and the SB is full of both so we are sending part 11493 * of each (end|beg). In such a case lets move this 11494 * guys end to match the end of this send. That 11495 * way it will complete when all of it is acked. 11496 */ 11497 ent->end_seq = (seq + len); 11498 if (rack->rc_hybrid_mode) 11499 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 11500 } 11501 /* Now validate we have set the send time of this one */ 11502 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11503 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 11504 ent->first_send = cts; 11505 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 11506 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11507 } 11508 #endif 11509 } 11510 11511 static void 11512 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 11513 { 11514 /* 11515 * The fast output path is enabled and we 11516 * have moved the cumack forward. Lets see if 11517 * we can expand forward the fast path length by 11518 * that amount. What we would ideally like to 11519 * do is increase the number of bytes in the 11520 * fast path block (left_to_send) by the 11521 * acked amount. However we have to gate that 11522 * by two factors: 11523 * 1) The amount outstanding and the rwnd of the peer 11524 * (i.e. we don't want to exceed the rwnd of the peer). 11525 * <and> 11526 * 2) The amount of data left in the socket buffer (i.e. 11527 * we can't send beyond what is in the buffer). 11528 * 11529 * Note that this does not take into account any increase 11530 * in the cwnd. We will only extend the fast path by 11531 * what was acked. 11532 */ 11533 uint32_t new_total, gating_val; 11534 11535 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 11536 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 11537 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 11538 if (new_total <= gating_val) { 11539 /* We can increase left_to_send by the acked amount */ 11540 counter_u64_add(rack_extended_rfo, 1); 11541 rack->r_ctl.fsb.left_to_send = new_total; 11542 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 11543 ("rack:%p left_to_send:%u sbavail:%u out:%u", 11544 rack, rack->r_ctl.fsb.left_to_send, 11545 sbavail(&rack->rc_inp->inp_socket->so_snd), 11546 (tp->snd_max - tp->snd_una))); 11547 11548 } 11549 } 11550 11551 static void 11552 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 11553 { 11554 /* 11555 * Here any sendmap entry that points to the 11556 * beginning mbuf must be adjusted to the correct 11557 * offset. This must be called with: 11558 * 1) The socket buffer locked 11559 * 2) snd_una adjusted to its new position. 11560 * 11561 * Note that (2) implies rack_ack_received has also 11562 * been called and all the sbcut's have been done. 11563 * 11564 * We grab the first mbuf in the socket buffer and 11565 * then go through the front of the sendmap, recalculating 11566 * the stored offset for any sendmap entry that has 11567 * that mbuf. We must use the sb functions to do this 11568 * since its possible an add was done has well as 11569 * the subtraction we may have just completed. This should 11570 * not be a penalty though, since we just referenced the sb 11571 * to go in and trim off the mbufs that we freed (of course 11572 * there will be a penalty for the sendmap references though). 11573 * 11574 * Note also with INVARIANT on, we validate with a KASSERT 11575 * that the first sendmap entry has a soff of 0. 11576 * 11577 */ 11578 struct mbuf *m; 11579 struct rack_sendmap *rsm; 11580 tcp_seq snd_una; 11581 #ifdef INVARIANTS 11582 int first_processed = 0; 11583 #endif 11584 11585 snd_una = rack->rc_tp->snd_una; 11586 SOCKBUF_LOCK_ASSERT(sb); 11587 m = sb->sb_mb; 11588 rsm = tqhash_min(rack->r_ctl.tqh); 11589 if ((rsm == NULL) || (m == NULL)) { 11590 /* Nothing outstanding */ 11591 return; 11592 } 11593 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 11594 KASSERT((rsm->m == m), 11595 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 11596 rack, sb, rsm)); 11597 while (rsm->m && (rsm->m == m)) { 11598 /* one to adjust */ 11599 #ifdef INVARIANTS 11600 struct mbuf *tm; 11601 uint32_t soff; 11602 11603 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 11604 if ((rsm->orig_m_len != m->m_len) || 11605 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 11606 rack_adjust_orig_mlen(rsm); 11607 } 11608 if (first_processed == 0) { 11609 KASSERT((rsm->soff == 0), 11610 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 11611 rack, rsm)); 11612 first_processed = 1; 11613 } 11614 if ((rsm->soff != soff) || (rsm->m != tm)) { 11615 /* 11616 * This is not a fatal error, we anticipate it 11617 * might happen (the else code), so we count it here 11618 * so that under invariant we can see that it really 11619 * does happen. 11620 */ 11621 counter_u64_add(rack_adjust_map_bw, 1); 11622 } 11623 rsm->m = tm; 11624 rsm->soff = soff; 11625 if (tm) { 11626 rsm->orig_m_len = rsm->m->m_len; 11627 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11628 } else { 11629 rsm->orig_m_len = 0; 11630 rsm->orig_t_space = 0; 11631 } 11632 #else 11633 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 11634 if (rsm->m) { 11635 rsm->orig_m_len = rsm->m->m_len; 11636 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11637 } else { 11638 rsm->orig_m_len = 0; 11639 rsm->orig_t_space = 0; 11640 } 11641 #endif 11642 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 11643 if (rsm == NULL) 11644 break; 11645 } 11646 } 11647 11648 #ifdef TCP_REQUEST_TRK 11649 static inline void 11650 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 11651 { 11652 struct tcp_sendfile_track *ent; 11653 int i; 11654 11655 if ((rack->rc_hybrid_mode == 0) && 11656 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 11657 /* 11658 * Just do normal completions hybrid pacing is not on 11659 * and CLDL is off as well. 11660 */ 11661 tcp_req_check_for_comp(rack->rc_tp, th_ack); 11662 return; 11663 } 11664 /* 11665 * Originally I was just going to find the th_ack associated 11666 * with an entry. But then I realized a large strech ack could 11667 * in theory ack two or more requests at once. So instead we 11668 * need to find all entries that are completed by th_ack not 11669 * just a single entry and do our logging. 11670 */ 11671 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11672 while (ent != NULL) { 11673 /* 11674 * We may be doing hybrid pacing or CLDL and need more details possibly 11675 * so we do it manually instead of calling 11676 * tcp_req_check_for_comp() 11677 */ 11678 uint64_t laa, tim, data, cbw, ftim; 11679 11680 /* Ok this ack frees it */ 11681 rack_log_hybrid(rack, th_ack, 11682 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 11683 rack_log_hybrid_sends(rack, ent, __LINE__); 11684 /* calculate the time based on the ack arrival */ 11685 data = ent->end - ent->start; 11686 laa = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time); 11687 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 11688 if (ent->first_send > ent->localtime) 11689 ftim = ent->first_send; 11690 else 11691 ftim = ent->localtime; 11692 } else { 11693 /* TSNH */ 11694 ftim = ent->localtime; 11695 } 11696 if (laa > ent->localtime) 11697 tim = laa - ftim; 11698 else 11699 tim = 0; 11700 cbw = data * HPTS_USEC_IN_SEC; 11701 if (tim > 0) 11702 cbw /= tim; 11703 else 11704 cbw = 0; 11705 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 11706 /* 11707 * Check to see if we are freeing what we are pointing to send wise 11708 * if so be sure to NULL the pointer so we know we are no longer 11709 * set to anything. 11710 */ 11711 if (ent == rack->r_ctl.rc_last_sft) { 11712 rack->r_ctl.rc_last_sft = NULL; 11713 if (rack->rc_hybrid_mode) { 11714 rack->rc_catch_up = 0; 11715 if (rack->cspr_is_fcc == 0) 11716 rack->r_ctl.bw_rate_cap = 0; 11717 else 11718 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11719 rack->r_ctl.client_suggested_maxseg = 0; 11720 } 11721 } 11722 /* Generate the log that the tcp_netflix call would have */ 11723 tcp_req_log_req_info(rack->rc_tp, ent, 11724 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 11725 /* Free it and see if there is another one */ 11726 tcp_req_free_a_slot(rack->rc_tp, ent); 11727 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11728 } 11729 } 11730 #endif 11731 11732 11733 /* 11734 * Return value of 1, we do not need to call rack_process_data(). 11735 * return value of 0, rack_process_data can be called. 11736 * For ret_val if its 0 the TCP is locked, if its non-zero 11737 * its unlocked and probably unsafe to touch the TCB. 11738 */ 11739 static int 11740 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11741 struct tcpcb *tp, struct tcpopt *to, 11742 uint32_t tiwin, int32_t tlen, 11743 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 11744 { 11745 int32_t ourfinisacked = 0; 11746 int32_t nsegs, acked_amount; 11747 int32_t acked; 11748 struct mbuf *mfree; 11749 struct tcp_rack *rack; 11750 int32_t under_pacing = 0; 11751 int32_t post_recovery = 0; 11752 uint32_t p_cwnd; 11753 11754 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11755 11756 rack = (struct tcp_rack *)tp->t_fb_ptr; 11757 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { 11758 /* Checking SEG.ACK against ISS is definitely redundant. */ 11759 tp->t_flags2 |= TF2_NO_ISS_CHECK; 11760 } 11761 if (!V_tcp_insecure_ack) { 11762 tcp_seq seq_min; 11763 bool ghost_ack_check; 11764 11765 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { 11766 /* Check for too old ACKs (RFC 5961, Section 5.2). */ 11767 seq_min = tp->snd_una - tp->max_sndwnd; 11768 ghost_ack_check = false; 11769 } else { 11770 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { 11771 /* Checking for ghost ACKs is stricter. */ 11772 seq_min = tp->iss + 1; 11773 ghost_ack_check = true; 11774 } else { 11775 /* 11776 * Checking for too old ACKs (RFC 5961, 11777 * Section 5.2) is stricter. 11778 */ 11779 seq_min = tp->snd_una - tp->max_sndwnd; 11780 ghost_ack_check = false; 11781 } 11782 } 11783 if (SEQ_LT(th->th_ack, seq_min)) { 11784 if (ghost_ack_check) 11785 TCPSTAT_INC(tcps_rcvghostack); 11786 else 11787 TCPSTAT_INC(tcps_rcvacktooold); 11788 /* Send challenge ACK. */ 11789 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11790 rack->r_wanted_output = 1; 11791 return (1); 11792 } 11793 } 11794 if (SEQ_GT(th->th_ack, tp->snd_max)) { 11795 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11796 rack->r_wanted_output = 1; 11797 return (1); 11798 } 11799 if (rack->gp_ready && 11800 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11801 under_pacing = 1; 11802 } 11803 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 11804 int in_rec, dup_ack_struck = 0; 11805 int dsack_seen = 0, sacks_seen = 0; 11806 11807 in_rec = IN_FASTRECOVERY(tp->t_flags); 11808 if (rack->rc_in_persist) { 11809 tp->t_rxtshift = 0; 11810 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11811 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11812 } 11813 11814 if ((th->th_ack == tp->snd_una) && 11815 (tiwin == tp->snd_wnd) && 11816 (orig_tlen == 0) && 11817 ((to->to_flags & TOF_SACK) == 0)) { 11818 rack_strike_dupack(rack, th->th_ack); 11819 dup_ack_struck = 1; 11820 } 11821 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 11822 dup_ack_struck, &dsack_seen, &sacks_seen); 11823 11824 } 11825 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11826 /* 11827 * Old ack, behind (or duplicate to) the last one rcv'd 11828 * Note: We mark reordering is occuring if its 11829 * less than and we have not closed our window. 11830 */ 11831 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 11832 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 11833 if (rack->r_ctl.rc_reorder_ts == 0) 11834 rack->r_ctl.rc_reorder_ts = 1; 11835 } 11836 return (0); 11837 } 11838 /* 11839 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 11840 * something we sent. 11841 */ 11842 if (tp->t_flags & TF_NEEDSYN) { 11843 /* 11844 * T/TCP: Connection was half-synchronized, and our SYN has 11845 * been ACK'd (so connection is now fully synchronized). Go 11846 * to non-starred state, increment snd_una for ACK of SYN, 11847 * and check if we can do window scaling. 11848 */ 11849 tp->t_flags &= ~TF_NEEDSYN; 11850 tp->snd_una++; 11851 /* Do window scaling? */ 11852 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11853 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11854 tp->rcv_scale = tp->request_r_scale; 11855 /* Send window already scaled. */ 11856 } 11857 } 11858 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11859 11860 acked = BYTES_THIS_ACK(tp, th); 11861 if (acked) { 11862 /* 11863 * Any time we move the cum-ack forward clear 11864 * keep-alive tied probe-not-answered. The 11865 * persists clears its own on entry. 11866 */ 11867 rack->probe_not_answered = 0; 11868 } 11869 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11870 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11871 /* 11872 * If we just performed our first retransmit, and the ACK arrives 11873 * within our recovery window, then it was a mistake to do the 11874 * retransmit in the first place. Recover our original cwnd and 11875 * ssthresh, and proceed to transmit where we left off. 11876 */ 11877 if ((tp->t_flags & TF_PREVVALID) && 11878 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11879 tp->t_flags &= ~TF_PREVVALID; 11880 if (tp->t_rxtshift == 1 && 11881 (int)(ticks - tp->t_badrxtwin) < 0) 11882 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 11883 } 11884 if (acked) { 11885 /* assure we are not backed off */ 11886 tp->t_rxtshift = 0; 11887 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11888 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11889 rack->rc_tlp_in_progress = 0; 11890 rack->r_ctl.rc_tlp_cnt_out = 0; 11891 /* 11892 * If it is the RXT timer we want to 11893 * stop it, so we can restart a TLP. 11894 */ 11895 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11896 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11897 #ifdef TCP_REQUEST_TRK 11898 rack_req_check_for_comp(rack, th->th_ack); 11899 #endif 11900 } 11901 /* 11902 * If we have a timestamp reply, update smoothed round trip time. If 11903 * no timestamp is present but transmit timer is running and timed 11904 * sequence number was acked, update smoothed round trip time. Since 11905 * we now have an rtt measurement, cancel the timer backoff (cf., 11906 * Phil Karn's retransmit alg.). Recompute the initial retransmit 11907 * timer. 11908 * 11909 * Some boxes send broken timestamp replies during the SYN+ACK 11910 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11911 * and blow up the retransmit timer. 11912 */ 11913 /* 11914 * If all outstanding data is acked, stop retransmit timer and 11915 * remember to restart (more output or persist). If there is more 11916 * data to be acked, restart retransmit timer, using current 11917 * (possibly backed-off) value. 11918 */ 11919 if (acked == 0) { 11920 if (ofia) 11921 *ofia = ourfinisacked; 11922 return (0); 11923 } 11924 if (IN_RECOVERY(tp->t_flags)) { 11925 if (SEQ_LT(th->th_ack, tp->snd_recover) && 11926 (SEQ_LT(th->th_ack, tp->snd_max))) { 11927 tcp_rack_partialack(tp); 11928 } else { 11929 rack_post_recovery(tp, th->th_ack); 11930 post_recovery = 1; 11931 /* 11932 * Grab the segsiz, multiply by 2 and add the snd_cwnd 11933 * that is the max the CC should add if we are exiting 11934 * recovery and doing a late add. 11935 */ 11936 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 11937 p_cwnd <<= 1; 11938 p_cwnd += tp->snd_cwnd; 11939 } 11940 } else if ((rack->rto_from_rec == 1) && 11941 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 11942 /* 11943 * We were in recovery, hit a rxt timeout 11944 * and never re-entered recovery. The timeout(s) 11945 * made up all the lost data. In such a case 11946 * we need to clear the rto_from_rec flag. 11947 */ 11948 rack->rto_from_rec = 0; 11949 } 11950 /* 11951 * Let the congestion control algorithm update congestion control 11952 * related information. This typically means increasing the 11953 * congestion window. 11954 */ 11955 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 11956 if (post_recovery && 11957 (tp->snd_cwnd > p_cwnd)) { 11958 /* Must be non-newreno (cubic) getting too ahead of itself */ 11959 tp->snd_cwnd = p_cwnd; 11960 } 11961 SOCK_SENDBUF_LOCK(so); 11962 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 11963 tp->snd_wnd -= acked_amount; 11964 mfree = sbcut_locked(&so->so_snd, acked_amount); 11965 if ((sbused(&so->so_snd) == 0) && 11966 (acked > acked_amount) && 11967 (tp->t_state >= TCPS_FIN_WAIT_1) && 11968 (tp->t_flags & TF_SENTFIN)) { 11969 /* 11970 * We must be sure our fin 11971 * was sent and acked (we can be 11972 * in FIN_WAIT_1 without having 11973 * sent the fin). 11974 */ 11975 ourfinisacked = 1; 11976 } 11977 tp->snd_una = th->th_ack; 11978 /* wakeups? */ 11979 if (acked_amount && sbavail(&so->so_snd)) 11980 rack_adjust_sendmap_head(rack, &so->so_snd); 11981 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11982 /* NB: sowwakeup_locked() does an implicit unlock. */ 11983 sowwakeup_locked(so); 11984 m_freem(mfree); 11985 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 11986 tp->snd_recover = tp->snd_una; 11987 11988 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 11989 tp->snd_nxt = tp->snd_max; 11990 } 11991 if (under_pacing && 11992 (rack->use_fixed_rate == 0) && 11993 (rack->in_probe_rtt == 0) && 11994 rack->rc_gp_dyn_mul && 11995 rack->rc_always_pace) { 11996 /* Check if we are dragging bottom */ 11997 rack_check_bottom_drag(tp, rack, so); 11998 } 11999 if (tp->snd_una == tp->snd_max) { 12000 /* Nothing left outstanding */ 12001 tp->t_flags &= ~TF_PREVVALID; 12002 if (rack->r_ctl.rc_went_idle_time == 0) 12003 rack->r_ctl.rc_went_idle_time = 1; 12004 rack->r_ctl.retran_during_recovery = 0; 12005 rack->r_ctl.dsack_byte_cnt = 0; 12006 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12007 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12008 tp->t_acktime = 0; 12009 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12010 rack->rc_suspicious = 0; 12011 /* Set need output so persist might get set */ 12012 rack->r_wanted_output = 1; 12013 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12014 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12015 (sbavail(&so->so_snd) == 0) && 12016 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12017 /* 12018 * The socket was gone and the 12019 * peer sent data (now or in the past), time to 12020 * reset him. 12021 */ 12022 *ret_val = 1; 12023 /* tcp_close will kill the inp pre-log the Reset */ 12024 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12025 tp = tcp_close(tp); 12026 ctf_do_dropwithreset(m, tp, th, tlen); 12027 return (1); 12028 } 12029 } 12030 if (ofia) 12031 *ofia = ourfinisacked; 12032 return (0); 12033 } 12034 12035 12036 static void 12037 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12038 int dir, uint32_t flags, struct rack_sendmap *rsm) 12039 { 12040 if (tcp_bblogging_on(rack->rc_tp)) { 12041 union tcp_log_stackspecific log; 12042 struct timeval tv; 12043 12044 memset(&log, 0, sizeof(log)); 12045 log.u_bbr.flex1 = cnt; 12046 log.u_bbr.flex2 = split; 12047 log.u_bbr.flex3 = out; 12048 log.u_bbr.flex4 = line; 12049 log.u_bbr.flex5 = rack->r_must_retran; 12050 log.u_bbr.flex6 = flags; 12051 log.u_bbr.flex7 = rack->rc_has_collapsed; 12052 log.u_bbr.flex8 = dir; /* 12053 * 1 is collapsed, 0 is uncollapsed, 12054 * 2 is log of a rsm being marked, 3 is a split. 12055 */ 12056 if (rsm == NULL) 12057 log.u_bbr.rttProp = 0; 12058 else 12059 log.u_bbr.rttProp = (uintptr_t)rsm; 12060 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12061 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12062 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12063 &rack->rc_inp->inp_socket->so_rcv, 12064 &rack->rc_inp->inp_socket->so_snd, 12065 TCP_RACK_LOG_COLLAPSE, 0, 12066 0, &log, false, &tv); 12067 } 12068 } 12069 12070 static void 12071 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12072 { 12073 /* 12074 * Here all we do is mark the collapsed point and set the flag. 12075 * This may happen again and again, but there is no 12076 * sense splitting our map until we know where the 12077 * peer finally lands in the collapse. 12078 */ 12079 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12080 if ((rack->rc_has_collapsed == 0) || 12081 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12082 counter_u64_add(rack_collapsed_win_seen, 1); 12083 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12084 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12085 rack->rc_has_collapsed = 1; 12086 rack->r_collapse_point_valid = 1; 12087 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12088 } 12089 12090 static void 12091 rack_un_collapse_window(struct tcp_rack *rack, int line) 12092 { 12093 struct rack_sendmap *nrsm, *rsm; 12094 int cnt = 0, split = 0; 12095 int insret __diagused; 12096 12097 12098 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12099 rack->rc_has_collapsed = 0; 12100 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12101 if (rsm == NULL) { 12102 /* Nothing to do maybe the peer ack'ed it all */ 12103 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12104 return; 12105 } 12106 /* Now do we need to split this one? */ 12107 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12108 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12109 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12110 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12111 if (nrsm == NULL) { 12112 /* We can't get a rsm, mark all? */ 12113 nrsm = rsm; 12114 goto no_split; 12115 } 12116 /* Clone it */ 12117 split = 1; 12118 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12119 #ifndef INVARIANTS 12120 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12121 #else 12122 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12123 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12124 nrsm, insret, rack, rsm); 12125 } 12126 #endif 12127 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12128 rack->r_ctl.last_collapse_point, __LINE__); 12129 if (rsm->r_in_tmap) { 12130 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12131 nrsm->r_in_tmap = 1; 12132 } 12133 /* 12134 * Set in the new RSM as the 12135 * collapsed starting point 12136 */ 12137 rsm = nrsm; 12138 } 12139 12140 no_split: 12141 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12142 cnt++; 12143 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12144 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12145 cnt++; 12146 } 12147 if (cnt) { 12148 counter_u64_add(rack_collapsed_win, 1); 12149 } 12150 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12151 } 12152 12153 static void 12154 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12155 int32_t tlen, int32_t tfo_syn) 12156 { 12157 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12158 rack_timer_cancel(tp, rack, 12159 rack->r_ctl.rc_rcvtime, __LINE__); 12160 tp->t_flags |= TF_DELACK; 12161 } else { 12162 rack->r_wanted_output = 1; 12163 tp->t_flags |= TF_ACKNOW; 12164 } 12165 } 12166 12167 static void 12168 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12169 { 12170 /* 12171 * If fast output is in progress, lets validate that 12172 * the new window did not shrink on us and make it 12173 * so fast output should end. 12174 */ 12175 if (rack->r_fast_output) { 12176 uint32_t out; 12177 12178 /* 12179 * Calculate what we will send if left as is 12180 * and compare that to our send window. 12181 */ 12182 out = ctf_outstanding(tp); 12183 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12184 /* ok we have an issue */ 12185 if (out >= tp->snd_wnd) { 12186 /* Turn off fast output the window is met or collapsed */ 12187 rack->r_fast_output = 0; 12188 } else { 12189 /* we have some room left */ 12190 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12191 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12192 /* If not at least 1 full segment never mind */ 12193 rack->r_fast_output = 0; 12194 } 12195 } 12196 } 12197 } 12198 } 12199 12200 /* 12201 * Return value of 1, the TCB is unlocked and most 12202 * likely gone, return value of 0, the TCP is still 12203 * locked. 12204 */ 12205 static int 12206 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12207 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12208 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12209 { 12210 /* 12211 * Update window information. Don't look at window if no ACK: TAC's 12212 * send garbage on first SYN. 12213 */ 12214 int32_t nsegs; 12215 int32_t tfo_syn; 12216 struct tcp_rack *rack; 12217 12218 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12219 12220 rack = (struct tcp_rack *)tp->t_fb_ptr; 12221 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12222 if ((thflags & TH_ACK) && 12223 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12224 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12225 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12226 /* keep track of pure window updates */ 12227 if (tlen == 0 && 12228 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12229 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12230 tp->snd_wnd = tiwin; 12231 rack_validate_fo_sendwin_up(tp, rack); 12232 tp->snd_wl1 = th->th_seq; 12233 tp->snd_wl2 = th->th_ack; 12234 if (tp->snd_wnd > tp->max_sndwnd) 12235 tp->max_sndwnd = tp->snd_wnd; 12236 rack->r_wanted_output = 1; 12237 } else if (thflags & TH_ACK) { 12238 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12239 tp->snd_wnd = tiwin; 12240 rack_validate_fo_sendwin_up(tp, rack); 12241 tp->snd_wl1 = th->th_seq; 12242 tp->snd_wl2 = th->th_ack; 12243 } 12244 } 12245 if (tp->snd_wnd < ctf_outstanding(tp)) 12246 /* The peer collapsed the window */ 12247 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12248 else if (rack->rc_has_collapsed) 12249 rack_un_collapse_window(rack, __LINE__); 12250 if ((rack->r_collapse_point_valid) && 12251 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12252 rack->r_collapse_point_valid = 0; 12253 /* Was persist timer active and now we have window space? */ 12254 if ((rack->rc_in_persist != 0) && 12255 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12256 rack->r_ctl.rc_pace_min_segs))) { 12257 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12258 tp->snd_nxt = tp->snd_max; 12259 /* Make sure we output to start the timer */ 12260 rack->r_wanted_output = 1; 12261 } 12262 /* Do we enter persists? */ 12263 if ((rack->rc_in_persist == 0) && 12264 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12265 TCPS_HAVEESTABLISHED(tp->t_state) && 12266 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12267 sbavail(&tptosocket(tp)->so_snd) && 12268 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12269 /* 12270 * Here the rwnd is less than 12271 * the pacing size, we are established, 12272 * nothing is outstanding, and there is 12273 * data to send. Enter persists. 12274 */ 12275 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12276 } 12277 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12278 m_freem(m); 12279 return (0); 12280 } 12281 /* 12282 * don't process the URG bit, ignore them drag 12283 * along the up. 12284 */ 12285 tp->rcv_up = tp->rcv_nxt; 12286 12287 /* 12288 * Process the segment text, merging it into the TCP sequencing 12289 * queue, and arranging for acknowledgment of receipt if necessary. 12290 * This process logically involves adjusting tp->rcv_wnd as data is 12291 * presented to the user (this happens in tcp_usrreq.c, case 12292 * PRU_RCVD). If a FIN has already been received on this connection 12293 * then we just ignore the text. 12294 */ 12295 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12296 (tp->t_flags & TF_FASTOPEN)); 12297 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12298 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12299 tcp_seq save_start = th->th_seq; 12300 tcp_seq save_rnxt = tp->rcv_nxt; 12301 int save_tlen = tlen; 12302 12303 m_adj(m, drop_hdrlen); /* delayed header drop */ 12304 /* 12305 * Insert segment which includes th into TCP reassembly 12306 * queue with control block tp. Set thflags to whether 12307 * reassembly now includes a segment with FIN. This handles 12308 * the common case inline (segment is the next to be 12309 * received on an established connection, and the queue is 12310 * empty), avoiding linkage into and removal from the queue 12311 * and repetition of various conversions. Set DELACK for 12312 * segments received in order, but ack immediately when 12313 * segments are out of order (so fast retransmit can work). 12314 */ 12315 if (th->th_seq == tp->rcv_nxt && 12316 SEGQ_EMPTY(tp) && 12317 (TCPS_HAVEESTABLISHED(tp->t_state) || 12318 tfo_syn)) { 12319 #ifdef NETFLIX_SB_LIMITS 12320 u_int mcnt, appended; 12321 12322 if (so->so_rcv.sb_shlim) { 12323 mcnt = m_memcnt(m); 12324 appended = 0; 12325 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12326 CFO_NOSLEEP, NULL) == false) { 12327 counter_u64_add(tcp_sb_shlim_fails, 1); 12328 m_freem(m); 12329 return (0); 12330 } 12331 } 12332 #endif 12333 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 12334 tp->rcv_nxt += tlen; 12335 if (tlen && 12336 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12337 (tp->t_fbyte_in == 0)) { 12338 tp->t_fbyte_in = ticks; 12339 if (tp->t_fbyte_in == 0) 12340 tp->t_fbyte_in = 1; 12341 if (tp->t_fbyte_out && tp->t_fbyte_in) 12342 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12343 } 12344 thflags = tcp_get_flags(th) & TH_FIN; 12345 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12346 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12347 SOCK_RECVBUF_LOCK(so); 12348 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12349 m_freem(m); 12350 } else { 12351 int32_t newsize; 12352 12353 if (tlen > 0) { 12354 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12355 if (newsize) 12356 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12357 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12358 } 12359 #ifdef NETFLIX_SB_LIMITS 12360 appended = 12361 #endif 12362 sbappendstream_locked(&so->so_rcv, m, 0); 12363 } 12364 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12365 /* NB: sorwakeup_locked() does an implicit unlock. */ 12366 sorwakeup_locked(so); 12367 #ifdef NETFLIX_SB_LIMITS 12368 if (so->so_rcv.sb_shlim && appended != mcnt) 12369 counter_fo_release(so->so_rcv.sb_shlim, 12370 mcnt - appended); 12371 #endif 12372 } else { 12373 /* 12374 * XXX: Due to the header drop above "th" is 12375 * theoretically invalid by now. Fortunately 12376 * m_adj() doesn't actually frees any mbufs when 12377 * trimming from the head. 12378 */ 12379 tcp_seq temp = save_start; 12380 12381 thflags = tcp_reass(tp, th, &temp, &tlen, m); 12382 tp->t_flags |= TF_ACKNOW; 12383 if (tp->t_flags & TF_WAKESOR) { 12384 tp->t_flags &= ~TF_WAKESOR; 12385 /* NB: sorwakeup_locked() does an implicit unlock. */ 12386 sorwakeup_locked(so); 12387 } 12388 } 12389 if ((tp->t_flags & TF_SACK_PERMIT) && 12390 (save_tlen > 0) && 12391 TCPS_HAVEESTABLISHED(tp->t_state)) { 12392 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 12393 /* 12394 * DSACK actually handled in the fastpath 12395 * above. 12396 */ 12397 tcp_update_sack_list(tp, save_start, 12398 save_start + save_tlen); 12399 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 12400 if ((tp->rcv_numsacks >= 1) && 12401 (tp->sackblks[0].end == save_start)) { 12402 /* 12403 * Partial overlap, recorded at todrop 12404 * above. 12405 */ 12406 tcp_update_sack_list(tp, 12407 tp->sackblks[0].start, 12408 tp->sackblks[0].end); 12409 } else { 12410 tcp_update_dsack_list(tp, save_start, 12411 save_start + save_tlen); 12412 } 12413 } else if (tlen >= save_tlen) { 12414 /* Update of sackblks. */ 12415 tcp_update_dsack_list(tp, save_start, 12416 save_start + save_tlen); 12417 } else if (tlen > 0) { 12418 tcp_update_dsack_list(tp, save_start, 12419 save_start + tlen); 12420 } 12421 } 12422 } else { 12423 m_freem(m); 12424 thflags &= ~TH_FIN; 12425 } 12426 12427 /* 12428 * If FIN is received ACK the FIN and let the user know that the 12429 * connection is closing. 12430 */ 12431 if (thflags & TH_FIN) { 12432 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12433 /* The socket upcall is handled by socantrcvmore. */ 12434 socantrcvmore(so); 12435 /* 12436 * If connection is half-synchronized (ie NEEDSYN 12437 * flag on) then delay ACK, so it may be piggybacked 12438 * when SYN is sent. Otherwise, since we received a 12439 * FIN then no more input can be expected, send ACK 12440 * now. 12441 */ 12442 if (tp->t_flags & TF_NEEDSYN) { 12443 rack_timer_cancel(tp, rack, 12444 rack->r_ctl.rc_rcvtime, __LINE__); 12445 tp->t_flags |= TF_DELACK; 12446 } else { 12447 tp->t_flags |= TF_ACKNOW; 12448 } 12449 tp->rcv_nxt++; 12450 } 12451 switch (tp->t_state) { 12452 /* 12453 * In SYN_RECEIVED and ESTABLISHED STATES enter the 12454 * CLOSE_WAIT state. 12455 */ 12456 case TCPS_SYN_RECEIVED: 12457 tp->t_starttime = ticks; 12458 /* FALLTHROUGH */ 12459 case TCPS_ESTABLISHED: 12460 rack_timer_cancel(tp, rack, 12461 rack->r_ctl.rc_rcvtime, __LINE__); 12462 tcp_state_change(tp, TCPS_CLOSE_WAIT); 12463 break; 12464 12465 /* 12466 * If still in FIN_WAIT_1 STATE FIN has not been 12467 * acked so enter the CLOSING state. 12468 */ 12469 case TCPS_FIN_WAIT_1: 12470 rack_timer_cancel(tp, rack, 12471 rack->r_ctl.rc_rcvtime, __LINE__); 12472 tcp_state_change(tp, TCPS_CLOSING); 12473 break; 12474 12475 /* 12476 * In FIN_WAIT_2 state enter the TIME_WAIT state, 12477 * starting the time-wait timer, turning off the 12478 * other standard timers. 12479 */ 12480 case TCPS_FIN_WAIT_2: 12481 rack_timer_cancel(tp, rack, 12482 rack->r_ctl.rc_rcvtime, __LINE__); 12483 tcp_twstart(tp); 12484 return (1); 12485 } 12486 } 12487 /* 12488 * Return any desired output. 12489 */ 12490 if ((tp->t_flags & TF_ACKNOW) || 12491 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 12492 rack->r_wanted_output = 1; 12493 } 12494 return (0); 12495 } 12496 12497 /* 12498 * Here nothing is really faster, its just that we 12499 * have broken out the fast-data path also just like 12500 * the fast-ack. 12501 */ 12502 static int 12503 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 12504 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12505 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 12506 { 12507 int32_t nsegs; 12508 int32_t newsize = 0; /* automatic sockbuf scaling */ 12509 struct tcp_rack *rack; 12510 #ifdef NETFLIX_SB_LIMITS 12511 u_int mcnt, appended; 12512 #endif 12513 12514 /* 12515 * If last ACK falls within this segment's sequence numbers, record 12516 * the timestamp. NOTE that the test is modified according to the 12517 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12518 */ 12519 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 12520 return (0); 12521 } 12522 if (tiwin && tiwin != tp->snd_wnd) { 12523 return (0); 12524 } 12525 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 12526 return (0); 12527 } 12528 if (__predict_false((to->to_flags & TOF_TS) && 12529 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 12530 return (0); 12531 } 12532 if (__predict_false((th->th_ack != tp->snd_una))) { 12533 return (0); 12534 } 12535 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 12536 return (0); 12537 } 12538 if ((to->to_flags & TOF_TS) != 0 && 12539 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12540 tp->ts_recent_age = tcp_ts_getticks(); 12541 tp->ts_recent = to->to_tsval; 12542 } 12543 rack = (struct tcp_rack *)tp->t_fb_ptr; 12544 /* 12545 * This is a pure, in-sequence data packet with nothing on the 12546 * reassembly queue and we have enough buffer space to take it. 12547 */ 12548 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12549 12550 #ifdef NETFLIX_SB_LIMITS 12551 if (so->so_rcv.sb_shlim) { 12552 mcnt = m_memcnt(m); 12553 appended = 0; 12554 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12555 CFO_NOSLEEP, NULL) == false) { 12556 counter_u64_add(tcp_sb_shlim_fails, 1); 12557 m_freem(m); 12558 return (1); 12559 } 12560 } 12561 #endif 12562 /* Clean receiver SACK report if present */ 12563 if (tp->rcv_numsacks) 12564 tcp_clean_sackreport(tp); 12565 KMOD_TCPSTAT_INC(tcps_preddat); 12566 tp->rcv_nxt += tlen; 12567 if (tlen && 12568 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12569 (tp->t_fbyte_in == 0)) { 12570 tp->t_fbyte_in = ticks; 12571 if (tp->t_fbyte_in == 0) 12572 tp->t_fbyte_in = 1; 12573 if (tp->t_fbyte_out && tp->t_fbyte_in) 12574 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12575 } 12576 /* 12577 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 12578 */ 12579 tp->snd_wl1 = th->th_seq; 12580 /* 12581 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 12582 */ 12583 tp->rcv_up = tp->rcv_nxt; 12584 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12585 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12586 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12587 12588 /* Add data to socket buffer. */ 12589 SOCK_RECVBUF_LOCK(so); 12590 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12591 m_freem(m); 12592 } else { 12593 /* 12594 * Set new socket buffer size. Give up when limit is 12595 * reached. 12596 */ 12597 if (newsize) 12598 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12599 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12600 m_adj(m, drop_hdrlen); /* delayed header drop */ 12601 #ifdef NETFLIX_SB_LIMITS 12602 appended = 12603 #endif 12604 sbappendstream_locked(&so->so_rcv, m, 0); 12605 ctf_calc_rwin(so, tp); 12606 } 12607 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12608 /* NB: sorwakeup_locked() does an implicit unlock. */ 12609 sorwakeup_locked(so); 12610 #ifdef NETFLIX_SB_LIMITS 12611 if (so->so_rcv.sb_shlim && mcnt != appended) 12612 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 12613 #endif 12614 rack_handle_delayed_ack(tp, rack, tlen, 0); 12615 if (tp->snd_una == tp->snd_max) 12616 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12617 return (1); 12618 } 12619 12620 /* 12621 * This subfunction is used to try to highly optimize the 12622 * fast path. We again allow window updates that are 12623 * in sequence to remain in the fast-path. We also add 12624 * in the __predict's to attempt to help the compiler. 12625 * Note that if we return a 0, then we can *not* process 12626 * it and the caller should push the packet into the 12627 * slow-path. 12628 */ 12629 static int 12630 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12631 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12632 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 12633 { 12634 int32_t acked; 12635 int32_t nsegs; 12636 int32_t under_pacing = 0; 12637 struct tcp_rack *rack; 12638 12639 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12640 /* Old ack, behind (or duplicate to) the last one rcv'd */ 12641 return (0); 12642 } 12643 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 12644 /* Above what we have sent? */ 12645 return (0); 12646 } 12647 if (__predict_false(tiwin == 0)) { 12648 /* zero window */ 12649 return (0); 12650 } 12651 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 12652 /* We need a SYN or a FIN, unlikely.. */ 12653 return (0); 12654 } 12655 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 12656 /* Timestamp is behind .. old ack with seq wrap? */ 12657 return (0); 12658 } 12659 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 12660 /* Still recovering */ 12661 return (0); 12662 } 12663 rack = (struct tcp_rack *)tp->t_fb_ptr; 12664 if (rack->r_ctl.rc_sacked) { 12665 /* We have sack holes on our scoreboard */ 12666 return (0); 12667 } 12668 /* Ok if we reach here, we can process a fast-ack */ 12669 if (rack->gp_ready && 12670 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12671 under_pacing = 1; 12672 } 12673 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12674 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 12675 /* Did the window get updated? */ 12676 if (tiwin != tp->snd_wnd) { 12677 tp->snd_wnd = tiwin; 12678 rack_validate_fo_sendwin_up(tp, rack); 12679 tp->snd_wl1 = th->th_seq; 12680 if (tp->snd_wnd > tp->max_sndwnd) 12681 tp->max_sndwnd = tp->snd_wnd; 12682 } 12683 /* Do we exit persists? */ 12684 if ((rack->rc_in_persist != 0) && 12685 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12686 rack->r_ctl.rc_pace_min_segs))) { 12687 rack_exit_persist(tp, rack, cts); 12688 } 12689 /* Do we enter persists? */ 12690 if ((rack->rc_in_persist == 0) && 12691 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12692 TCPS_HAVEESTABLISHED(tp->t_state) && 12693 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12694 sbavail(&tptosocket(tp)->so_snd) && 12695 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12696 /* 12697 * Here the rwnd is less than 12698 * the pacing size, we are established, 12699 * nothing is outstanding, and there is 12700 * data to send. Enter persists. 12701 */ 12702 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 12703 } 12704 /* 12705 * If last ACK falls within this segment's sequence numbers, record 12706 * the timestamp. NOTE that the test is modified according to the 12707 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12708 */ 12709 if ((to->to_flags & TOF_TS) != 0 && 12710 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12711 tp->ts_recent_age = tcp_ts_getticks(); 12712 tp->ts_recent = to->to_tsval; 12713 } 12714 /* 12715 * This is a pure ack for outstanding data. 12716 */ 12717 KMOD_TCPSTAT_INC(tcps_predack); 12718 12719 /* 12720 * "bad retransmit" recovery. 12721 */ 12722 if ((tp->t_flags & TF_PREVVALID) && 12723 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12724 tp->t_flags &= ~TF_PREVVALID; 12725 if (tp->t_rxtshift == 1 && 12726 (int)(ticks - tp->t_badrxtwin) < 0) 12727 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12728 } 12729 /* 12730 * Recalculate the transmit timer / rtt. 12731 * 12732 * Some boxes send broken timestamp replies during the SYN+ACK 12733 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12734 * and blow up the retransmit timer. 12735 */ 12736 acked = BYTES_THIS_ACK(tp, th); 12737 12738 #ifdef TCP_HHOOK 12739 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 12740 hhook_run_tcp_est_in(tp, th, to); 12741 #endif 12742 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12743 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12744 if (acked) { 12745 struct mbuf *mfree; 12746 12747 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 12748 SOCK_SENDBUF_LOCK(so); 12749 mfree = sbcut_locked(&so->so_snd, acked); 12750 tp->snd_una = th->th_ack; 12751 /* Note we want to hold the sb lock through the sendmap adjust */ 12752 rack_adjust_sendmap_head(rack, &so->so_snd); 12753 /* Wake up the socket if we have room to write more */ 12754 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12755 sowwakeup_locked(so); 12756 m_freem(mfree); 12757 tp->t_rxtshift = 0; 12758 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12759 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12760 rack->rc_tlp_in_progress = 0; 12761 rack->r_ctl.rc_tlp_cnt_out = 0; 12762 /* 12763 * If it is the RXT timer we want to 12764 * stop it, so we can restart a TLP. 12765 */ 12766 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12767 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12768 12769 #ifdef TCP_REQUEST_TRK 12770 rack_req_check_for_comp(rack, th->th_ack); 12771 #endif 12772 } 12773 /* 12774 * Let the congestion control algorithm update congestion control 12775 * related information. This typically means increasing the 12776 * congestion window. 12777 */ 12778 if (tp->snd_wnd < ctf_outstanding(tp)) { 12779 /* The peer collapsed the window */ 12780 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12781 } else if (rack->rc_has_collapsed) 12782 rack_un_collapse_window(rack, __LINE__); 12783 if ((rack->r_collapse_point_valid) && 12784 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 12785 rack->r_collapse_point_valid = 0; 12786 /* 12787 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 12788 */ 12789 tp->snd_wl2 = th->th_ack; 12790 tp->t_dupacks = 0; 12791 m_freem(m); 12792 /* ND6_HINT(tp); *//* Some progress has been made. */ 12793 12794 /* 12795 * If all outstanding data are acked, stop retransmit timer, 12796 * otherwise restart timer using current (possibly backed-off) 12797 * value. If process is waiting for space, wakeup/selwakeup/signal. 12798 * If data are ready to send, let tcp_output decide between more 12799 * output or persist. 12800 */ 12801 if (under_pacing && 12802 (rack->use_fixed_rate == 0) && 12803 (rack->in_probe_rtt == 0) && 12804 rack->rc_gp_dyn_mul && 12805 rack->rc_always_pace) { 12806 /* Check if we are dragging bottom */ 12807 rack_check_bottom_drag(tp, rack, so); 12808 } 12809 if (tp->snd_una == tp->snd_max) { 12810 tp->t_flags &= ~TF_PREVVALID; 12811 rack->r_ctl.retran_during_recovery = 0; 12812 rack->rc_suspicious = 0; 12813 rack->r_ctl.dsack_byte_cnt = 0; 12814 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12815 if (rack->r_ctl.rc_went_idle_time == 0) 12816 rack->r_ctl.rc_went_idle_time = 1; 12817 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12818 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12819 tp->t_acktime = 0; 12820 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12821 } 12822 if (acked && rack->r_fast_output) 12823 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 12824 if (sbavail(&so->so_snd)) { 12825 rack->r_wanted_output = 1; 12826 } 12827 return (1); 12828 } 12829 12830 /* 12831 * Return value of 1, the TCB is unlocked and most 12832 * likely gone, return value of 0, the TCP is still 12833 * locked. 12834 */ 12835 static int 12836 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 12837 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12838 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12839 { 12840 int32_t ret_val = 0; 12841 int32_t orig_tlen = tlen; 12842 int32_t todrop; 12843 int32_t ourfinisacked = 0; 12844 struct tcp_rack *rack; 12845 12846 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12847 12848 ctf_calc_rwin(so, tp); 12849 /* 12850 * If the state is SYN_SENT: if seg contains an ACK, but not for our 12851 * SYN, drop the input. if seg contains a RST, then drop the 12852 * connection. if seg does not contain SYN, then drop it. Otherwise 12853 * this is an acceptable SYN segment initialize tp->rcv_nxt and 12854 * tp->irs if seg contains ack then advance tp->snd_una if seg 12855 * contains an ECE and ECN support is enabled, the stream is ECN 12856 * capable. if SYN has been acked change to ESTABLISHED else 12857 * SYN_RCVD state arrange for segment to be acked (eventually) 12858 * continue processing rest of data/controls. 12859 */ 12860 if ((thflags & TH_ACK) && 12861 (SEQ_LEQ(th->th_ack, tp->iss) || 12862 SEQ_GT(th->th_ack, tp->snd_max))) { 12863 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 12864 ctf_do_dropwithreset(m, tp, th, tlen); 12865 return (1); 12866 } 12867 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 12868 TCP_PROBE5(connect__refused, NULL, tp, 12869 mtod(m, const char *), tp, th); 12870 tp = tcp_drop(tp, ECONNREFUSED); 12871 ctf_do_drop(m, tp); 12872 return (1); 12873 } 12874 if (thflags & TH_RST) { 12875 ctf_do_drop(m, tp); 12876 return (1); 12877 } 12878 if (!(thflags & TH_SYN)) { 12879 ctf_do_drop(m, tp); 12880 return (1); 12881 } 12882 tp->irs = th->th_seq; 12883 tcp_rcvseqinit(tp); 12884 rack = (struct tcp_rack *)tp->t_fb_ptr; 12885 if (thflags & TH_ACK) { 12886 int tfo_partial = 0; 12887 12888 KMOD_TCPSTAT_INC(tcps_connects); 12889 soisconnected(so); 12890 #ifdef MAC 12891 mac_socketpeer_set_from_mbuf(m, so); 12892 #endif 12893 /* Do window scaling on this connection? */ 12894 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12895 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12896 tp->rcv_scale = tp->request_r_scale; 12897 } 12898 tp->rcv_adv += min(tp->rcv_wnd, 12899 TCP_MAXWIN << tp->rcv_scale); 12900 /* 12901 * If not all the data that was sent in the TFO SYN 12902 * has been acked, resend the remainder right away. 12903 */ 12904 if ((tp->t_flags & TF_FASTOPEN) && 12905 (tp->snd_una != tp->snd_max)) { 12906 /* Was it a partial ack? */ 12907 if (SEQ_LT(th->th_ack, tp->snd_max)) 12908 tfo_partial = 1; 12909 } 12910 /* 12911 * If there's data, delay ACK; if there's also a FIN ACKNOW 12912 * will be turned on later. 12913 */ 12914 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 12915 rack_timer_cancel(tp, rack, 12916 rack->r_ctl.rc_rcvtime, __LINE__); 12917 tp->t_flags |= TF_DELACK; 12918 } else { 12919 rack->r_wanted_output = 1; 12920 tp->t_flags |= TF_ACKNOW; 12921 } 12922 12923 tcp_ecn_input_syn_sent(tp, thflags, iptos); 12924 12925 if (SEQ_GT(th->th_ack, tp->snd_una)) { 12926 /* 12927 * We advance snd_una for the 12928 * fast open case. If th_ack is 12929 * acknowledging data beyond 12930 * snd_una we can't just call 12931 * ack-processing since the 12932 * data stream in our send-map 12933 * will start at snd_una + 1 (one 12934 * beyond the SYN). If its just 12935 * equal we don't need to do that 12936 * and there is no send_map. 12937 */ 12938 tp->snd_una++; 12939 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 12940 /* 12941 * We sent a SYN with data, and thus have a 12942 * sendmap entry with a SYN set. Lets find it 12943 * and take off the send bit and the byte and 12944 * set it up to be what we send (send it next). 12945 */ 12946 struct rack_sendmap *rsm; 12947 12948 rsm = tqhash_min(rack->r_ctl.tqh); 12949 if (rsm) { 12950 if (rsm->r_flags & RACK_HAS_SYN) { 12951 rsm->r_flags &= ~RACK_HAS_SYN; 12952 rsm->r_start++; 12953 } 12954 rack->r_ctl.rc_resend = rsm; 12955 } 12956 } 12957 } 12958 /* 12959 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 12960 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 12961 */ 12962 tp->t_starttime = ticks; 12963 if (tp->t_flags & TF_NEEDFIN) { 12964 tcp_state_change(tp, TCPS_FIN_WAIT_1); 12965 tp->t_flags &= ~TF_NEEDFIN; 12966 thflags &= ~TH_SYN; 12967 } else { 12968 tcp_state_change(tp, TCPS_ESTABLISHED); 12969 TCP_PROBE5(connect__established, NULL, tp, 12970 mtod(m, const char *), tp, th); 12971 rack_cc_conn_init(tp); 12972 } 12973 } else { 12974 /* 12975 * Received initial SYN in SYN-SENT[*] state => simultaneous 12976 * open. If segment contains CC option and there is a 12977 * cached CC, apply TAO test. If it succeeds, connection is * 12978 * half-synchronized. Otherwise, do 3-way handshake: 12979 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 12980 * there was no CC option, clear cached CC value. 12981 */ 12982 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 12983 tcp_state_change(tp, TCPS_SYN_RECEIVED); 12984 } 12985 /* 12986 * Advance th->th_seq to correspond to first data byte. If data, 12987 * trim to stay within window, dropping FIN if necessary. 12988 */ 12989 th->th_seq++; 12990 if (tlen > tp->rcv_wnd) { 12991 todrop = tlen - tp->rcv_wnd; 12992 m_adj(m, -todrop); 12993 tlen = tp->rcv_wnd; 12994 thflags &= ~TH_FIN; 12995 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 12996 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 12997 } 12998 tp->snd_wl1 = th->th_seq - 1; 12999 tp->rcv_up = th->th_seq; 13000 /* 13001 * Client side of transaction: already sent SYN and data. If the 13002 * remote host used T/TCP to validate the SYN, our data will be 13003 * ACK'd; if so, enter normal data segment processing in the middle 13004 * of step 5, ack processing. Otherwise, goto step 6. 13005 */ 13006 if (thflags & TH_ACK) { 13007 /* For syn-sent we need to possibly update the rtt */ 13008 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13009 uint32_t t, mcts; 13010 13011 mcts = tcp_ts_getticks(); 13012 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13013 if (!tp->t_rttlow || tp->t_rttlow > t) 13014 tp->t_rttlow = t; 13015 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13016 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13017 tcp_rack_xmit_timer_commit(rack, tp); 13018 } 13019 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 13020 return (ret_val); 13021 /* We may have changed to FIN_WAIT_1 above */ 13022 if (tp->t_state == TCPS_FIN_WAIT_1) { 13023 /* 13024 * In FIN_WAIT_1 STATE in addition to the processing 13025 * for the ESTABLISHED state if our FIN is now 13026 * acknowledged then enter FIN_WAIT_2. 13027 */ 13028 if (ourfinisacked) { 13029 /* 13030 * If we can't receive any more data, then 13031 * closing user can proceed. Starting the 13032 * timer is contrary to the specification, 13033 * but if we don't get a FIN we'll hang 13034 * forever. 13035 * 13036 * XXXjl: we should release the tp also, and 13037 * use a compressed state. 13038 */ 13039 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13040 soisdisconnected(so); 13041 tcp_timer_activate(tp, TT_2MSL, 13042 (tcp_fast_finwait2_recycle ? 13043 tcp_finwait2_timeout : 13044 TP_MAXIDLE(tp))); 13045 } 13046 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13047 } 13048 } 13049 } 13050 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13051 tiwin, thflags, nxt_pkt)); 13052 } 13053 13054 /* 13055 * Return value of 1, the TCB is unlocked and most 13056 * likely gone, return value of 0, the TCP is still 13057 * locked. 13058 */ 13059 static int 13060 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13061 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13062 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13063 { 13064 struct tcp_rack *rack; 13065 int32_t orig_tlen = tlen; 13066 int32_t ret_val = 0; 13067 int32_t ourfinisacked = 0; 13068 13069 rack = (struct tcp_rack *)tp->t_fb_ptr; 13070 ctf_calc_rwin(so, tp); 13071 if ((thflags & TH_RST) || 13072 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13073 return (ctf_process_rst(m, th, so, tp)); 13074 if ((thflags & TH_ACK) && 13075 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13076 SEQ_GT(th->th_ack, tp->snd_max))) { 13077 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13078 ctf_do_dropwithreset(m, tp, th, tlen); 13079 return (1); 13080 } 13081 if (tp->t_flags & TF_FASTOPEN) { 13082 /* 13083 * When a TFO connection is in SYN_RECEIVED, the 13084 * only valid packets are the initial SYN, a 13085 * retransmit/copy of the initial SYN (possibly with 13086 * a subset of the original data), a valid ACK, a 13087 * FIN, or a RST. 13088 */ 13089 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13090 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13091 ctf_do_dropwithreset(m, tp, th, tlen); 13092 return (1); 13093 } else if (thflags & TH_SYN) { 13094 /* non-initial SYN is ignored */ 13095 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13096 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13097 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13098 ctf_do_drop(m, NULL); 13099 return (0); 13100 } 13101 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13102 ctf_do_drop(m, NULL); 13103 return (0); 13104 } 13105 } 13106 13107 /* 13108 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13109 * it's less than ts_recent, drop it. 13110 */ 13111 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13112 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13113 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13114 return (ret_val); 13115 } 13116 /* 13117 * In the SYN-RECEIVED state, validate that the packet belongs to 13118 * this connection before trimming the data to fit the receive 13119 * window. Check the sequence number versus IRS since we know the 13120 * sequence numbers haven't wrapped. This is a partial fix for the 13121 * "LAND" DoS attack. 13122 */ 13123 if (SEQ_LT(th->th_seq, tp->irs)) { 13124 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13125 ctf_do_dropwithreset(m, tp, th, tlen); 13126 return (1); 13127 } 13128 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13129 return (ret_val); 13130 } 13131 /* 13132 * If last ACK falls within this segment's sequence numbers, record 13133 * its timestamp. NOTE: 1) That the test incorporates suggestions 13134 * from the latest proposal of the tcplw@cray.com list (Braden 13135 * 1993/04/26). 2) That updating only on newer timestamps interferes 13136 * with our earlier PAWS tests, so this check should be solely 13137 * predicated on the sequence space of this segment. 3) That we 13138 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13139 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13140 * SEG.Len, This modified check allows us to overcome RFC1323's 13141 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13142 * p.869. In such cases, we can still calculate the RTT correctly 13143 * when RCV.NXT == Last.ACK.Sent. 13144 */ 13145 if ((to->to_flags & TOF_TS) != 0 && 13146 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13147 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13148 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13149 tp->ts_recent_age = tcp_ts_getticks(); 13150 tp->ts_recent = to->to_tsval; 13151 } 13152 tp->snd_wnd = tiwin; 13153 rack_validate_fo_sendwin_up(tp, rack); 13154 /* 13155 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13156 * is on (half-synchronized state), then queue data for later 13157 * processing; else drop segment and return. 13158 */ 13159 if ((thflags & TH_ACK) == 0) { 13160 if (tp->t_flags & TF_FASTOPEN) { 13161 rack_cc_conn_init(tp); 13162 } 13163 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13164 tiwin, thflags, nxt_pkt)); 13165 } 13166 KMOD_TCPSTAT_INC(tcps_connects); 13167 if (tp->t_flags & TF_SONOTCONN) { 13168 tp->t_flags &= ~TF_SONOTCONN; 13169 soisconnected(so); 13170 } 13171 /* Do window scaling? */ 13172 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13173 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13174 tp->rcv_scale = tp->request_r_scale; 13175 } 13176 /* 13177 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13178 * FIN-WAIT-1 13179 */ 13180 tp->t_starttime = ticks; 13181 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 13182 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13183 tp->t_tfo_pending = NULL; 13184 } 13185 if (tp->t_flags & TF_NEEDFIN) { 13186 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13187 tp->t_flags &= ~TF_NEEDFIN; 13188 } else { 13189 tcp_state_change(tp, TCPS_ESTABLISHED); 13190 TCP_PROBE5(accept__established, NULL, tp, 13191 mtod(m, const char *), tp, th); 13192 /* 13193 * TFO connections call cc_conn_init() during SYN 13194 * processing. Calling it again here for such connections 13195 * is not harmless as it would undo the snd_cwnd reduction 13196 * that occurs when a TFO SYN|ACK is retransmitted. 13197 */ 13198 if (!(tp->t_flags & TF_FASTOPEN)) 13199 rack_cc_conn_init(tp); 13200 } 13201 /* 13202 * Account for the ACK of our SYN prior to 13203 * regular ACK processing below, except for 13204 * simultaneous SYN, which is handled later. 13205 */ 13206 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13207 tp->snd_una++; 13208 /* 13209 * If segment contains data or ACK, will call tcp_reass() later; if 13210 * not, do so now to pass queued data to user. 13211 */ 13212 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13213 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13214 (struct mbuf *)0); 13215 if (tp->t_flags & TF_WAKESOR) { 13216 tp->t_flags &= ~TF_WAKESOR; 13217 /* NB: sorwakeup_locked() does an implicit unlock. */ 13218 sorwakeup_locked(so); 13219 } 13220 } 13221 tp->snd_wl1 = th->th_seq - 1; 13222 /* For syn-recv we need to possibly update the rtt */ 13223 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13224 uint32_t t, mcts; 13225 13226 mcts = tcp_ts_getticks(); 13227 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13228 if (!tp->t_rttlow || tp->t_rttlow > t) 13229 tp->t_rttlow = t; 13230 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13231 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13232 tcp_rack_xmit_timer_commit(rack, tp); 13233 } 13234 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13235 return (ret_val); 13236 } 13237 if (tp->t_state == TCPS_FIN_WAIT_1) { 13238 /* We could have went to FIN_WAIT_1 (or EST) above */ 13239 /* 13240 * In FIN_WAIT_1 STATE in addition to the processing for the 13241 * ESTABLISHED state if our FIN is now acknowledged then 13242 * enter FIN_WAIT_2. 13243 */ 13244 if (ourfinisacked) { 13245 /* 13246 * If we can't receive any more data, then closing 13247 * user can proceed. Starting the timer is contrary 13248 * to the specification, but if we don't get a FIN 13249 * we'll hang forever. 13250 * 13251 * XXXjl: we should release the tp also, and use a 13252 * compressed state. 13253 */ 13254 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13255 soisdisconnected(so); 13256 tcp_timer_activate(tp, TT_2MSL, 13257 (tcp_fast_finwait2_recycle ? 13258 tcp_finwait2_timeout : 13259 TP_MAXIDLE(tp))); 13260 } 13261 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13262 } 13263 } 13264 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13265 tiwin, thflags, nxt_pkt)); 13266 } 13267 13268 /* 13269 * Return value of 1, the TCB is unlocked and most 13270 * likely gone, return value of 0, the TCP is still 13271 * locked. 13272 */ 13273 static int 13274 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13275 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13276 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13277 { 13278 int32_t ret_val = 0; 13279 int32_t orig_tlen = tlen; 13280 struct tcp_rack *rack; 13281 13282 /* 13283 * Header prediction: check for the two common cases of a 13284 * uni-directional data xfer. If the packet has no control flags, 13285 * is in-sequence, the window didn't change and we're not 13286 * retransmitting, it's a candidate. If the length is zero and the 13287 * ack moved forward, we're the sender side of the xfer. Just free 13288 * the data acked & wake any higher level process that was blocked 13289 * waiting for space. If the length is non-zero and the ack didn't 13290 * move, we're the receiver side. If we're getting packets in-order 13291 * (the reassembly queue is empty), add the data toc The socket 13292 * buffer and note that we need a delayed ack. Make sure that the 13293 * hidden state-flags are also off. Since we check for 13294 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13295 */ 13296 rack = (struct tcp_rack *)tp->t_fb_ptr; 13297 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13298 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13299 __predict_true(SEGQ_EMPTY(tp)) && 13300 __predict_true(th->th_seq == tp->rcv_nxt)) { 13301 if (tlen == 0) { 13302 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13303 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13304 return (0); 13305 } 13306 } else { 13307 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13308 tiwin, nxt_pkt, iptos)) { 13309 return (0); 13310 } 13311 } 13312 } 13313 ctf_calc_rwin(so, tp); 13314 13315 if ((thflags & TH_RST) || 13316 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13317 return (ctf_process_rst(m, th, so, tp)); 13318 13319 /* 13320 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13321 * synchronized state. 13322 */ 13323 if (thflags & TH_SYN) { 13324 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13325 return (ret_val); 13326 } 13327 /* 13328 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13329 * it's less than ts_recent, drop it. 13330 */ 13331 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13332 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13333 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13334 return (ret_val); 13335 } 13336 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13337 return (ret_val); 13338 } 13339 /* 13340 * If last ACK falls within this segment's sequence numbers, record 13341 * its timestamp. NOTE: 1) That the test incorporates suggestions 13342 * from the latest proposal of the tcplw@cray.com list (Braden 13343 * 1993/04/26). 2) That updating only on newer timestamps interferes 13344 * with our earlier PAWS tests, so this check should be solely 13345 * predicated on the sequence space of this segment. 3) That we 13346 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13347 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13348 * SEG.Len, This modified check allows us to overcome RFC1323's 13349 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13350 * p.869. In such cases, we can still calculate the RTT correctly 13351 * when RCV.NXT == Last.ACK.Sent. 13352 */ 13353 if ((to->to_flags & TOF_TS) != 0 && 13354 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13355 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13356 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13357 tp->ts_recent_age = tcp_ts_getticks(); 13358 tp->ts_recent = to->to_tsval; 13359 } 13360 /* 13361 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13362 * is on (half-synchronized state), then queue data for later 13363 * processing; else drop segment and return. 13364 */ 13365 if ((thflags & TH_ACK) == 0) { 13366 if (tp->t_flags & TF_NEEDSYN) { 13367 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13368 tiwin, thflags, nxt_pkt)); 13369 13370 } else if (tp->t_flags & TF_ACKNOW) { 13371 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13372 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13373 return (ret_val); 13374 } else { 13375 ctf_do_drop(m, NULL); 13376 return (0); 13377 } 13378 } 13379 /* 13380 * Ack processing. 13381 */ 13382 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13383 return (ret_val); 13384 } 13385 if (sbavail(&so->so_snd)) { 13386 if (ctf_progress_timeout_check(tp, true)) { 13387 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 13388 ctf_do_dropwithreset_conn(m, tp, th, tlen); 13389 return (1); 13390 } 13391 } 13392 /* State changes only happen in rack_process_data() */ 13393 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13394 tiwin, thflags, nxt_pkt)); 13395 } 13396 13397 /* 13398 * Return value of 1, the TCB is unlocked and most 13399 * likely gone, return value of 0, the TCP is still 13400 * locked. 13401 */ 13402 static int 13403 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 13404 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13405 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13406 { 13407 int32_t ret_val = 0; 13408 int32_t orig_tlen = tlen; 13409 13410 ctf_calc_rwin(so, tp); 13411 if ((thflags & TH_RST) || 13412 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13413 return (ctf_process_rst(m, th, so, tp)); 13414 /* 13415 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13416 * synchronized state. 13417 */ 13418 if (thflags & TH_SYN) { 13419 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13420 return (ret_val); 13421 } 13422 /* 13423 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13424 * it's less than ts_recent, drop it. 13425 */ 13426 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13427 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13428 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13429 return (ret_val); 13430 } 13431 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13432 return (ret_val); 13433 } 13434 /* 13435 * If last ACK falls within this segment's sequence numbers, record 13436 * its timestamp. NOTE: 1) That the test incorporates suggestions 13437 * from the latest proposal of the tcplw@cray.com list (Braden 13438 * 1993/04/26). 2) That updating only on newer timestamps interferes 13439 * with our earlier PAWS tests, so this check should be solely 13440 * predicated on the sequence space of this segment. 3) That we 13441 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13442 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13443 * SEG.Len, This modified check allows us to overcome RFC1323's 13444 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13445 * p.869. In such cases, we can still calculate the RTT correctly 13446 * when RCV.NXT == Last.ACK.Sent. 13447 */ 13448 if ((to->to_flags & TOF_TS) != 0 && 13449 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13450 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13451 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13452 tp->ts_recent_age = tcp_ts_getticks(); 13453 tp->ts_recent = to->to_tsval; 13454 } 13455 /* 13456 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13457 * is on (half-synchronized state), then queue data for later 13458 * processing; else drop segment and return. 13459 */ 13460 if ((thflags & TH_ACK) == 0) { 13461 if (tp->t_flags & TF_NEEDSYN) { 13462 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13463 tiwin, thflags, nxt_pkt)); 13464 13465 } else if (tp->t_flags & TF_ACKNOW) { 13466 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13467 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13468 return (ret_val); 13469 } else { 13470 ctf_do_drop(m, NULL); 13471 return (0); 13472 } 13473 } 13474 /* 13475 * Ack processing. 13476 */ 13477 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13478 return (ret_val); 13479 } 13480 if (sbavail(&so->so_snd)) { 13481 if (ctf_progress_timeout_check(tp, true)) { 13482 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13483 tp, tick, PROGRESS_DROP, __LINE__); 13484 ctf_do_dropwithreset_conn(m, tp, th, tlen); 13485 return (1); 13486 } 13487 } 13488 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13489 tiwin, thflags, nxt_pkt)); 13490 } 13491 13492 static int 13493 rack_check_data_after_close(struct mbuf *m, 13494 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 13495 { 13496 struct tcp_rack *rack; 13497 13498 rack = (struct tcp_rack *)tp->t_fb_ptr; 13499 if (rack->rc_allow_data_af_clo == 0) { 13500 close_now: 13501 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13502 /* tcp_close will kill the inp pre-log the Reset */ 13503 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13504 tp = tcp_close(tp); 13505 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 13506 ctf_do_dropwithreset(m, tp, th, *tlen); 13507 return (1); 13508 } 13509 if (sbavail(&so->so_snd) == 0) 13510 goto close_now; 13511 /* Ok we allow data that is ignored and a followup reset */ 13512 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13513 tp->rcv_nxt = th->th_seq + *tlen; 13514 tp->t_flags2 |= TF2_DROP_AF_DATA; 13515 rack->r_wanted_output = 1; 13516 *tlen = 0; 13517 return (0); 13518 } 13519 13520 /* 13521 * Return value of 1, the TCB is unlocked and most 13522 * likely gone, return value of 0, the TCP is still 13523 * locked. 13524 */ 13525 static int 13526 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 13527 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13528 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13529 { 13530 int32_t ret_val = 0; 13531 int32_t orig_tlen = tlen; 13532 int32_t ourfinisacked = 0; 13533 13534 ctf_calc_rwin(so, tp); 13535 13536 if ((thflags & TH_RST) || 13537 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13538 return (ctf_process_rst(m, th, so, tp)); 13539 /* 13540 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13541 * synchronized state. 13542 */ 13543 if (thflags & TH_SYN) { 13544 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13545 return (ret_val); 13546 } 13547 /* 13548 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13549 * it's less than ts_recent, drop it. 13550 */ 13551 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13552 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13553 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13554 return (ret_val); 13555 } 13556 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13557 return (ret_val); 13558 } 13559 /* 13560 * If new data are received on a connection after the user processes 13561 * are gone, then RST the other end. 13562 */ 13563 if ((tp->t_flags & TF_CLOSED) && tlen && 13564 rack_check_data_after_close(m, tp, &tlen, th, so)) 13565 return (1); 13566 /* 13567 * If last ACK falls within this segment's sequence numbers, record 13568 * its timestamp. NOTE: 1) That the test incorporates suggestions 13569 * from the latest proposal of the tcplw@cray.com list (Braden 13570 * 1993/04/26). 2) That updating only on newer timestamps interferes 13571 * with our earlier PAWS tests, so this check should be solely 13572 * predicated on the sequence space of this segment. 3) That we 13573 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13574 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13575 * SEG.Len, This modified check allows us to overcome RFC1323's 13576 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13577 * p.869. In such cases, we can still calculate the RTT correctly 13578 * when RCV.NXT == Last.ACK.Sent. 13579 */ 13580 if ((to->to_flags & TOF_TS) != 0 && 13581 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13582 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13583 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13584 tp->ts_recent_age = tcp_ts_getticks(); 13585 tp->ts_recent = to->to_tsval; 13586 } 13587 /* 13588 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13589 * is on (half-synchronized state), then queue data for later 13590 * processing; else drop segment and return. 13591 */ 13592 if ((thflags & TH_ACK) == 0) { 13593 if (tp->t_flags & TF_NEEDSYN) { 13594 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13595 tiwin, thflags, nxt_pkt)); 13596 } else if (tp->t_flags & TF_ACKNOW) { 13597 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13598 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13599 return (ret_val); 13600 } else { 13601 ctf_do_drop(m, NULL); 13602 return (0); 13603 } 13604 } 13605 /* 13606 * Ack processing. 13607 */ 13608 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13609 return (ret_val); 13610 } 13611 if (ourfinisacked) { 13612 /* 13613 * If we can't receive any more data, then closing user can 13614 * proceed. Starting the timer is contrary to the 13615 * specification, but if we don't get a FIN we'll hang 13616 * forever. 13617 * 13618 * XXXjl: we should release the tp also, and use a 13619 * compressed state. 13620 */ 13621 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13622 soisdisconnected(so); 13623 tcp_timer_activate(tp, TT_2MSL, 13624 (tcp_fast_finwait2_recycle ? 13625 tcp_finwait2_timeout : 13626 TP_MAXIDLE(tp))); 13627 } 13628 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13629 } 13630 if (sbavail(&so->so_snd)) { 13631 if (ctf_progress_timeout_check(tp, true)) { 13632 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13633 tp, tick, PROGRESS_DROP, __LINE__); 13634 ctf_do_dropwithreset_conn(m, tp, th, tlen); 13635 return (1); 13636 } 13637 } 13638 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13639 tiwin, thflags, nxt_pkt)); 13640 } 13641 13642 /* 13643 * Return value of 1, the TCB is unlocked and most 13644 * likely gone, return value of 0, the TCP is still 13645 * locked. 13646 */ 13647 static int 13648 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 13649 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13650 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13651 { 13652 int32_t ret_val = 0; 13653 int32_t orig_tlen = tlen; 13654 int32_t ourfinisacked = 0; 13655 13656 ctf_calc_rwin(so, tp); 13657 13658 if ((thflags & TH_RST) || 13659 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13660 return (ctf_process_rst(m, th, so, tp)); 13661 /* 13662 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13663 * synchronized state. 13664 */ 13665 if (thflags & TH_SYN) { 13666 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13667 return (ret_val); 13668 } 13669 /* 13670 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13671 * it's less than ts_recent, drop it. 13672 */ 13673 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13674 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13675 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13676 return (ret_val); 13677 } 13678 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13679 return (ret_val); 13680 } 13681 /* 13682 * If last ACK falls within this segment's sequence numbers, record 13683 * its timestamp. NOTE: 1) That the test incorporates suggestions 13684 * from the latest proposal of the tcplw@cray.com list (Braden 13685 * 1993/04/26). 2) That updating only on newer timestamps interferes 13686 * with our earlier PAWS tests, so this check should be solely 13687 * predicated on the sequence space of this segment. 3) That we 13688 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13689 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13690 * SEG.Len, This modified check allows us to overcome RFC1323's 13691 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13692 * p.869. In such cases, we can still calculate the RTT correctly 13693 * when RCV.NXT == Last.ACK.Sent. 13694 */ 13695 if ((to->to_flags & TOF_TS) != 0 && 13696 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13697 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13698 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13699 tp->ts_recent_age = tcp_ts_getticks(); 13700 tp->ts_recent = to->to_tsval; 13701 } 13702 /* 13703 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13704 * is on (half-synchronized state), then queue data for later 13705 * processing; else drop segment and return. 13706 */ 13707 if ((thflags & TH_ACK) == 0) { 13708 if (tp->t_flags & TF_NEEDSYN) { 13709 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13710 tiwin, thflags, nxt_pkt)); 13711 } else if (tp->t_flags & TF_ACKNOW) { 13712 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13713 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13714 return (ret_val); 13715 } else { 13716 ctf_do_drop(m, NULL); 13717 return (0); 13718 } 13719 } 13720 /* 13721 * Ack processing. 13722 */ 13723 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13724 return (ret_val); 13725 } 13726 if (ourfinisacked) { 13727 tcp_twstart(tp); 13728 m_freem(m); 13729 return (1); 13730 } 13731 if (sbavail(&so->so_snd)) { 13732 if (ctf_progress_timeout_check(tp, true)) { 13733 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13734 tp, tick, PROGRESS_DROP, __LINE__); 13735 ctf_do_dropwithreset_conn(m, tp, th, tlen); 13736 return (1); 13737 } 13738 } 13739 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13740 tiwin, thflags, nxt_pkt)); 13741 } 13742 13743 /* 13744 * Return value of 1, the TCB is unlocked and most 13745 * likely gone, return value of 0, the TCP is still 13746 * locked. 13747 */ 13748 static int 13749 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13750 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13751 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13752 { 13753 int32_t ret_val = 0; 13754 int32_t orig_tlen; 13755 int32_t ourfinisacked = 0; 13756 13757 ctf_calc_rwin(so, tp); 13758 13759 if ((thflags & TH_RST) || 13760 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13761 return (ctf_process_rst(m, th, so, tp)); 13762 /* 13763 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13764 * synchronized state. 13765 */ 13766 if (thflags & TH_SYN) { 13767 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13768 return (ret_val); 13769 } 13770 /* 13771 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13772 * it's less than ts_recent, drop it. 13773 */ 13774 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13775 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13776 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13777 return (ret_val); 13778 } 13779 orig_tlen = tlen; 13780 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13781 return (ret_val); 13782 } 13783 /* 13784 * If last ACK falls within this segment's sequence numbers, record 13785 * its timestamp. NOTE: 1) That the test incorporates suggestions 13786 * from the latest proposal of the tcplw@cray.com list (Braden 13787 * 1993/04/26). 2) That updating only on newer timestamps interferes 13788 * with our earlier PAWS tests, so this check should be solely 13789 * predicated on the sequence space of this segment. 3) That we 13790 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13791 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13792 * SEG.Len, This modified check allows us to overcome RFC1323's 13793 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13794 * p.869. In such cases, we can still calculate the RTT correctly 13795 * when RCV.NXT == Last.ACK.Sent. 13796 */ 13797 if ((to->to_flags & TOF_TS) != 0 && 13798 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13799 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13800 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13801 tp->ts_recent_age = tcp_ts_getticks(); 13802 tp->ts_recent = to->to_tsval; 13803 } 13804 /* 13805 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13806 * is on (half-synchronized state), then queue data for later 13807 * processing; else drop segment and return. 13808 */ 13809 if ((thflags & TH_ACK) == 0) { 13810 if (tp->t_flags & TF_NEEDSYN) { 13811 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13812 tiwin, thflags, nxt_pkt)); 13813 } else if (tp->t_flags & TF_ACKNOW) { 13814 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13815 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13816 return (ret_val); 13817 } else { 13818 ctf_do_drop(m, NULL); 13819 return (0); 13820 } 13821 } 13822 /* 13823 * case TCPS_LAST_ACK: Ack processing. 13824 */ 13825 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13826 return (ret_val); 13827 } 13828 if (ourfinisacked) { 13829 tp = tcp_close(tp); 13830 ctf_do_drop(m, tp); 13831 return (1); 13832 } 13833 if (sbavail(&so->so_snd)) { 13834 if (ctf_progress_timeout_check(tp, true)) { 13835 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13836 tp, tick, PROGRESS_DROP, __LINE__); 13837 ctf_do_dropwithreset_conn(m, tp, th, tlen); 13838 return (1); 13839 } 13840 } 13841 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13842 tiwin, thflags, nxt_pkt)); 13843 } 13844 13845 /* 13846 * Return value of 1, the TCB is unlocked and most 13847 * likely gone, return value of 0, the TCP is still 13848 * locked. 13849 */ 13850 static int 13851 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 13852 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13853 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13854 { 13855 int32_t ret_val = 0; 13856 int32_t orig_tlen = tlen; 13857 int32_t ourfinisacked = 0; 13858 13859 ctf_calc_rwin(so, tp); 13860 13861 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 13862 if ((thflags & TH_RST) || 13863 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13864 return (ctf_process_rst(m, th, so, tp)); 13865 /* 13866 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13867 * synchronized state. 13868 */ 13869 if (thflags & TH_SYN) { 13870 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13871 return (ret_val); 13872 } 13873 /* 13874 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13875 * it's less than ts_recent, drop it. 13876 */ 13877 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13878 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13879 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13880 return (ret_val); 13881 } 13882 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13883 return (ret_val); 13884 } 13885 /* 13886 * If new data are received on a connection after the user processes 13887 * are gone, then RST the other end. 13888 */ 13889 if ((tp->t_flags & TF_CLOSED) && tlen && 13890 rack_check_data_after_close(m, tp, &tlen, th, so)) 13891 return (1); 13892 /* 13893 * If last ACK falls within this segment's sequence numbers, record 13894 * its timestamp. NOTE: 1) That the test incorporates suggestions 13895 * from the latest proposal of the tcplw@cray.com list (Braden 13896 * 1993/04/26). 2) That updating only on newer timestamps interferes 13897 * with our earlier PAWS tests, so this check should be solely 13898 * predicated on the sequence space of this segment. 3) That we 13899 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13900 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13901 * SEG.Len, This modified check allows us to overcome RFC1323's 13902 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13903 * p.869. In such cases, we can still calculate the RTT correctly 13904 * when RCV.NXT == Last.ACK.Sent. 13905 */ 13906 if ((to->to_flags & TOF_TS) != 0 && 13907 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13908 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13909 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13910 tp->ts_recent_age = tcp_ts_getticks(); 13911 tp->ts_recent = to->to_tsval; 13912 } 13913 /* 13914 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13915 * is on (half-synchronized state), then queue data for later 13916 * processing; else drop segment and return. 13917 */ 13918 if ((thflags & TH_ACK) == 0) { 13919 if (tp->t_flags & TF_NEEDSYN) { 13920 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13921 tiwin, thflags, nxt_pkt)); 13922 } else if (tp->t_flags & TF_ACKNOW) { 13923 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13924 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13925 return (ret_val); 13926 } else { 13927 ctf_do_drop(m, NULL); 13928 return (0); 13929 } 13930 } 13931 /* 13932 * Ack processing. 13933 */ 13934 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13935 return (ret_val); 13936 } 13937 if (sbavail(&so->so_snd)) { 13938 if (ctf_progress_timeout_check(tp, true)) { 13939 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13940 tp, tick, PROGRESS_DROP, __LINE__); 13941 ctf_do_dropwithreset_conn(m, tp, th, tlen); 13942 return (1); 13943 } 13944 } 13945 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13946 tiwin, thflags, nxt_pkt)); 13947 } 13948 13949 static void inline 13950 rack_clear_rate_sample(struct tcp_rack *rack) 13951 { 13952 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 13953 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 13954 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 13955 } 13956 13957 static void 13958 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 13959 { 13960 uint64_t bw_est, rate_wanted; 13961 int chged = 0; 13962 uint32_t user_max, orig_min, orig_max; 13963 13964 #ifdef TCP_REQUEST_TRK 13965 if (rack->rc_hybrid_mode && 13966 (rack->r_ctl.rc_pace_max_segs != 0) && 13967 (rack_hybrid_allow_set_maxseg == 1) && 13968 (rack->r_ctl.rc_last_sft != NULL)) { 13969 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 13970 return; 13971 } 13972 #endif 13973 orig_min = rack->r_ctl.rc_pace_min_segs; 13974 orig_max = rack->r_ctl.rc_pace_max_segs; 13975 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 13976 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 13977 chged = 1; 13978 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 13979 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 13980 if (user_max != rack->r_ctl.rc_pace_max_segs) 13981 chged = 1; 13982 } 13983 if (rack->rc_force_max_seg) { 13984 rack->r_ctl.rc_pace_max_segs = user_max; 13985 } else if (rack->use_fixed_rate) { 13986 bw_est = rack_get_bw(rack); 13987 if ((rack->r_ctl.crte == NULL) || 13988 (bw_est != rack->r_ctl.crte->rate)) { 13989 rack->r_ctl.rc_pace_max_segs = user_max; 13990 } else { 13991 /* We are pacing right at the hardware rate */ 13992 uint32_t segsiz, pace_one; 13993 13994 if (rack_pace_one_seg || 13995 (rack->r_ctl.rc_user_set_min_segs == 1)) 13996 pace_one = 1; 13997 else 13998 pace_one = 0; 13999 segsiz = min(ctf_fixed_maxseg(tp), 14000 rack->r_ctl.rc_pace_min_segs); 14001 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14002 tp, bw_est, segsiz, pace_one, 14003 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14004 } 14005 } else if (rack->rc_always_pace) { 14006 if (rack->r_ctl.gp_bw || 14007 rack->r_ctl.init_rate) { 14008 /* We have a rate of some sort set */ 14009 uint32_t orig; 14010 14011 bw_est = rack_get_bw(rack); 14012 orig = rack->r_ctl.rc_pace_max_segs; 14013 if (fill_override) 14014 rate_wanted = *fill_override; 14015 else 14016 rate_wanted = rack_get_gp_est(rack); 14017 if (rate_wanted) { 14018 /* We have something */ 14019 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14020 rate_wanted, 14021 ctf_fixed_maxseg(rack->rc_tp)); 14022 } else 14023 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14024 if (orig != rack->r_ctl.rc_pace_max_segs) 14025 chged = 1; 14026 } else if ((rack->r_ctl.gp_bw == 0) && 14027 (rack->r_ctl.rc_pace_max_segs == 0)) { 14028 /* 14029 * If we have nothing limit us to bursting 14030 * out IW sized pieces. 14031 */ 14032 chged = 1; 14033 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14034 } 14035 } 14036 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14037 chged = 1; 14038 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14039 } 14040 if (chged) 14041 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14042 } 14043 14044 14045 static void 14046 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14047 { 14048 #ifdef INET6 14049 struct ip6_hdr *ip6 = NULL; 14050 #endif 14051 #ifdef INET 14052 struct ip *ip = NULL; 14053 #endif 14054 struct udphdr *udp = NULL; 14055 14056 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14057 #ifdef INET6 14058 if (rack->r_is_v6) { 14059 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14060 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14061 if (tp->t_port) { 14062 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14063 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14064 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14065 udp->uh_dport = tp->t_port; 14066 rack->r_ctl.fsb.udp = udp; 14067 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14068 } else 14069 { 14070 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14071 rack->r_ctl.fsb.udp = NULL; 14072 } 14073 tcpip_fillheaders(rack->rc_inp, 14074 tp->t_port, 14075 ip6, rack->r_ctl.fsb.th); 14076 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14077 } else 14078 #endif /* INET6 */ 14079 #ifdef INET 14080 { 14081 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14082 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14083 if (tp->t_port) { 14084 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14085 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14086 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14087 udp->uh_dport = tp->t_port; 14088 rack->r_ctl.fsb.udp = udp; 14089 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14090 } else 14091 { 14092 rack->r_ctl.fsb.udp = NULL; 14093 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14094 } 14095 tcpip_fillheaders(rack->rc_inp, 14096 tp->t_port, 14097 ip, rack->r_ctl.fsb.th); 14098 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14099 } 14100 #endif 14101 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14102 (long)TCP_MAXWIN << tp->rcv_scale); 14103 rack->r_fsb_inited = 1; 14104 } 14105 14106 static int 14107 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14108 { 14109 /* 14110 * Allocate the larger of spaces V6 if available else just 14111 * V4 and include udphdr (overbook) 14112 */ 14113 #ifdef INET6 14114 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14115 #else 14116 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14117 #endif 14118 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14119 M_TCPFSB, M_NOWAIT|M_ZERO); 14120 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14121 return (ENOMEM); 14122 } 14123 rack->r_fsb_inited = 0; 14124 return (0); 14125 } 14126 14127 static void 14128 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14129 { 14130 /* 14131 * Types of logs (mod value) 14132 * 20 - Initial round setup 14133 * 21 - Rack declares a new round. 14134 */ 14135 struct tcpcb *tp; 14136 14137 tp = rack->rc_tp; 14138 if (tcp_bblogging_on(tp)) { 14139 union tcp_log_stackspecific log; 14140 struct timeval tv; 14141 14142 memset(&log, 0, sizeof(log)); 14143 log.u_bbr.flex1 = rack->r_ctl.current_round; 14144 log.u_bbr.flex2 = rack->r_ctl.roundends; 14145 log.u_bbr.flex3 = high_seq; 14146 log.u_bbr.flex4 = tp->snd_max; 14147 log.u_bbr.flex8 = mod; 14148 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14149 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14150 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14151 TCP_LOG_EVENTP(tp, NULL, 14152 &tptosocket(tp)->so_rcv, 14153 &tptosocket(tp)->so_snd, 14154 TCP_HYSTART, 0, 14155 0, &log, false, &tv); 14156 } 14157 } 14158 14159 static void 14160 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14161 { 14162 rack->rack_deferred_inited = 1; 14163 rack->r_ctl.roundends = tp->snd_max; 14164 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14165 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14166 } 14167 14168 static void 14169 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14170 { 14171 /* Retransmit bit controls. 14172 * 14173 * The setting of these values control one of 14174 * three settings you can have and dictate 14175 * how rack does retransmissions. Note this 14176 * is in *any* mode i.e. pacing on or off DGP 14177 * fixed rate pacing, or just bursting rack. 14178 * 14179 * 1 - Use full sized retransmits i.e. limit 14180 * the size to whatever the pace_max_segments 14181 * size is. 14182 * 14183 * 2 - Use pacer min granularity as a guide to 14184 * the size combined with the current calculated 14185 * goodput b/w measurement. So for example if 14186 * the goodput is measured at 20Mbps we would 14187 * calculate 8125 (pacer minimum 250usec in 14188 * that b/w) and then round it up to the next 14189 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14190 * 14191 * 0 - The rack default 1 MSS (anything not 0/1/2 14192 * fall here too if we are setting via rack_init()). 14193 * 14194 */ 14195 if (ctl == 1) { 14196 rack->full_size_rxt = 1; 14197 rack->shape_rxt_to_pacing_min = 0; 14198 } else if (ctl == 2) { 14199 rack->full_size_rxt = 0; 14200 rack->shape_rxt_to_pacing_min = 1; 14201 } else { 14202 rack->full_size_rxt = 0; 14203 rack->shape_rxt_to_pacing_min = 0; 14204 } 14205 } 14206 14207 static void 14208 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14209 uint32_t flex1, 14210 uint32_t flex2, 14211 uint32_t flex3) 14212 { 14213 if (tcp_bblogging_on(rack->rc_tp)) { 14214 union tcp_log_stackspecific log; 14215 struct timeval tv; 14216 14217 memset(&log, 0, sizeof(log)); 14218 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14219 log.u_bbr.flex8 = mod; 14220 log.u_bbr.flex1 = flex1; 14221 log.u_bbr.flex2 = flex2; 14222 log.u_bbr.flex3 = flex3; 14223 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14224 0, &log, false, NULL, __func__, __LINE__, &tv); 14225 } 14226 } 14227 14228 static int 14229 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14230 { 14231 struct tcp_rack *rack; 14232 struct rack_sendmap *rsm; 14233 int i; 14234 14235 14236 rack = (struct tcp_rack *)tp->t_fb_ptr; 14237 switch (reqr->req) { 14238 case TCP_QUERY_SENDMAP: 14239 if ((reqr->req_param == tp->snd_max) || 14240 (tp->snd_max == tp->snd_una)){ 14241 /* Unlikely */ 14242 return (0); 14243 } 14244 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14245 if (rsm == NULL) { 14246 /* Can't find that seq -- unlikely */ 14247 return (0); 14248 } 14249 reqr->sendmap_start = rsm->r_start; 14250 reqr->sendmap_end = rsm->r_end; 14251 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14252 reqr->sendmap_fas = rsm->r_fas; 14253 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14254 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14255 for(i=0; i<reqr->sendmap_send_cnt; i++) 14256 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14257 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14258 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14259 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14260 reqr->sendmap_dupacks = rsm->r_dupack; 14261 rack_log_chg_info(tp, rack, 1, 14262 rsm->r_start, 14263 rsm->r_end, 14264 rsm->r_flags); 14265 return(1); 14266 break; 14267 case TCP_QUERY_TIMERS_UP: 14268 if (rack->r_ctl.rc_hpts_flags == 0) { 14269 /* no timers up */ 14270 return (0); 14271 } 14272 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14273 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14274 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14275 } 14276 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14277 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 14278 } 14279 rack_log_chg_info(tp, rack, 2, 14280 rack->r_ctl.rc_hpts_flags, 14281 rack->r_ctl.rc_last_output_to, 14282 rack->r_ctl.rc_timer_exp); 14283 return (1); 14284 break; 14285 case TCP_QUERY_RACK_TIMES: 14286 /* Reordering items */ 14287 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 14288 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 14289 /* Timerstamps and timers */ 14290 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 14291 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 14292 reqr->rack_rtt = rack->rc_rack_rtt; 14293 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 14294 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 14295 /* PRR data */ 14296 reqr->rack_sacked = rack->r_ctl.rc_sacked; 14297 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 14298 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 14299 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 14300 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 14301 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 14302 /* TLP and persists info */ 14303 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 14304 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 14305 if (rack->rc_in_persist) { 14306 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 14307 reqr->rack_in_persist = 1; 14308 } else { 14309 reqr->rack_time_went_idle = 0; 14310 reqr->rack_in_persist = 0; 14311 } 14312 if (rack->r_wanted_output) 14313 reqr->rack_wanted_output = 1; 14314 else 14315 reqr->rack_wanted_output = 0; 14316 return (1); 14317 break; 14318 default: 14319 return (-EINVAL); 14320 } 14321 } 14322 14323 static void 14324 rack_switch_failed(struct tcpcb *tp) 14325 { 14326 /* 14327 * This method gets called if a stack switch was 14328 * attempted and it failed. We are left 14329 * but our hpts timers were stopped and we 14330 * need to validate time units and t_flags2. 14331 */ 14332 struct tcp_rack *rack; 14333 struct timeval tv; 14334 uint32_t cts; 14335 uint32_t toval; 14336 struct hpts_diag diag; 14337 14338 rack = (struct tcp_rack *)tp->t_fb_ptr; 14339 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 14340 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14341 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14342 else 14343 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14344 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14345 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14346 if (tp->t_in_hpts > IHPTS_NONE) { 14347 /* Strange */ 14348 return; 14349 } 14350 cts = tcp_get_usecs(&tv); 14351 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14352 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 14353 toval = rack->r_ctl.rc_last_output_to - cts; 14354 } else { 14355 /* one slot please */ 14356 toval = HPTS_USECS_PER_SLOT; 14357 } 14358 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14359 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 14360 toval = rack->r_ctl.rc_timer_exp - cts; 14361 } else { 14362 /* one slot please */ 14363 toval = HPTS_USECS_PER_SLOT; 14364 } 14365 } else 14366 toval = HPTS_USECS_PER_SLOT; 14367 tcp_hpts_insert(tp, toval, &diag); 14368 rack_log_hpts_diag(rack, cts, &diag, &tv); 14369 } 14370 14371 static int 14372 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 14373 { 14374 struct rack_sendmap *rsm, *ersm; 14375 int insret __diagused; 14376 /* 14377 * When initing outstanding, we must be quite careful 14378 * to not refer to tp->t_fb_ptr. This has the old rack 14379 * pointer in it, not the "new" one (when we are doing 14380 * a stack switch). 14381 */ 14382 14383 14384 if (tp->t_fb->tfb_chg_query == NULL) { 14385 /* Create a send map for the current outstanding data */ 14386 14387 rsm = rack_alloc(rack); 14388 if (rsm == NULL) { 14389 uma_zfree(rack_pcb_zone, ptr); 14390 return (ENOMEM); 14391 } 14392 rsm->r_no_rtt_allowed = 1; 14393 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 14394 rsm->r_rtr_cnt = 1; 14395 rsm->r_rtr_bytes = 0; 14396 if (tp->t_flags & TF_SENTFIN) 14397 rsm->r_flags |= RACK_HAS_FIN; 14398 rsm->r_end = tp->snd_max; 14399 if (tp->snd_una == tp->iss) { 14400 /* The data space is one beyond snd_una */ 14401 rsm->r_flags |= RACK_HAS_SYN; 14402 rsm->r_start = tp->iss; 14403 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 14404 } else 14405 rsm->r_start = tp->snd_una; 14406 rsm->r_dupack = 0; 14407 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 14408 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 14409 if (rsm->m) { 14410 rsm->orig_m_len = rsm->m->m_len; 14411 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14412 } else { 14413 rsm->orig_m_len = 0; 14414 rsm->orig_t_space = 0; 14415 } 14416 } else { 14417 /* 14418 * This can happen if we have a stand-alone FIN or 14419 * SYN. 14420 */ 14421 rsm->m = NULL; 14422 rsm->orig_m_len = 0; 14423 rsm->orig_t_space = 0; 14424 rsm->soff = 0; 14425 } 14426 #ifdef INVARIANTS 14427 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14428 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14429 insret, rack, rsm); 14430 } 14431 #else 14432 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14433 #endif 14434 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14435 rsm->r_in_tmap = 1; 14436 } else { 14437 /* We have a query mechanism, lets use it */ 14438 struct tcp_query_resp qr; 14439 int i; 14440 tcp_seq at; 14441 14442 at = tp->snd_una; 14443 while (at != tp->snd_max) { 14444 memset(&qr, 0, sizeof(qr)); 14445 qr.req = TCP_QUERY_SENDMAP; 14446 qr.req_param = at; 14447 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 14448 break; 14449 /* Move forward */ 14450 at = qr.sendmap_end; 14451 /* Now lets build the entry for this one */ 14452 rsm = rack_alloc(rack); 14453 if (rsm == NULL) { 14454 uma_zfree(rack_pcb_zone, ptr); 14455 return (ENOMEM); 14456 } 14457 memset(rsm, 0, sizeof(struct rack_sendmap)); 14458 /* Now configure the rsm and insert it */ 14459 rsm->r_dupack = qr.sendmap_dupacks; 14460 rsm->r_start = qr.sendmap_start; 14461 rsm->r_end = qr.sendmap_end; 14462 if (qr.sendmap_fas) 14463 rsm->r_fas = qr.sendmap_end; 14464 else 14465 rsm->r_fas = rsm->r_start - tp->snd_una; 14466 /* 14467 * We have carefully aligned the bits 14468 * so that all we have to do is copy over 14469 * the bits with the mask. 14470 */ 14471 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 14472 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 14473 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 14474 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 14475 for (i=0 ; i<rsm->r_rtr_cnt; i++) 14476 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 14477 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 14478 (rsm->r_start - tp->snd_una), &rsm->soff); 14479 if (rsm->m) { 14480 rsm->orig_m_len = rsm->m->m_len; 14481 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14482 } else { 14483 rsm->orig_m_len = 0; 14484 rsm->orig_t_space = 0; 14485 } 14486 #ifdef INVARIANTS 14487 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14488 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14489 insret, rack, rsm); 14490 } 14491 #else 14492 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14493 #endif 14494 if ((rsm->r_flags & RACK_ACKED) == 0) { 14495 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 14496 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 14497 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 14498 /* 14499 * If the existing ersm was sent at 14500 * a later time than the new one, then 14501 * the new one should appear ahead of this 14502 * ersm. 14503 */ 14504 rsm->r_in_tmap = 1; 14505 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 14506 break; 14507 } 14508 } 14509 if (rsm->r_in_tmap == 0) { 14510 /* 14511 * Not found so shove it on the tail. 14512 */ 14513 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14514 rsm->r_in_tmap = 1; 14515 } 14516 } else { 14517 if ((rack->r_ctl.rc_sacklast == NULL) || 14518 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 14519 rack->r_ctl.rc_sacklast = rsm; 14520 } 14521 } 14522 rack_log_chg_info(tp, rack, 3, 14523 rsm->r_start, 14524 rsm->r_end, 14525 rsm->r_flags); 14526 } 14527 } 14528 return (0); 14529 } 14530 14531 14532 static int32_t 14533 rack_init(struct tcpcb *tp, void **ptr) 14534 { 14535 struct inpcb *inp = tptoinpcb(tp); 14536 struct tcp_rack *rack = NULL; 14537 uint32_t iwin, snt, us_cts; 14538 size_t sz; 14539 int err, no_query; 14540 14541 tcp_hpts_init(tp); 14542 14543 /* 14544 * First are we the initial or are we a switched stack? 14545 * If we are initing via tcp_newtcppcb the ptr passed 14546 * will be tp->t_fb_ptr. If its a stack switch that 14547 * has a previous stack we can query it will be a local 14548 * var that will in the end be set into t_fb_ptr. 14549 */ 14550 if (ptr == &tp->t_fb_ptr) 14551 no_query = 1; 14552 else 14553 no_query = 0; 14554 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 14555 if (*ptr == NULL) { 14556 /* 14557 * We need to allocate memory but cant. The INP and INP_INFO 14558 * locks and they are recursive (happens during setup. So a 14559 * scheme to drop the locks fails :( 14560 * 14561 */ 14562 return(ENOMEM); 14563 } 14564 memset(*ptr, 0, sizeof(struct tcp_rack)); 14565 rack = (struct tcp_rack *)*ptr; 14566 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 14567 if (rack->r_ctl.tqh == NULL) { 14568 uma_zfree(rack_pcb_zone, rack); 14569 return(ENOMEM); 14570 } 14571 tqhash_init(rack->r_ctl.tqh); 14572 TAILQ_INIT(&rack->r_ctl.rc_free); 14573 TAILQ_INIT(&rack->r_ctl.rc_tmap); 14574 rack->rc_tp = tp; 14575 rack->rc_inp = inp; 14576 /* Set the flag */ 14577 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 14578 /* Probably not needed but lets be sure */ 14579 rack_clear_rate_sample(rack); 14580 /* 14581 * Save off the default values, socket options will poke 14582 * at these if pacing is not on or we have not yet 14583 * reached where pacing is on (gp_ready/fixed enabled). 14584 * When they get set into the CC module (when gp_ready 14585 * is enabled or we enable fixed) then we will set these 14586 * values into the CC and place in here the old values 14587 * so we have a restoral. Then we will set the flag 14588 * rc_pacing_cc_set. That way whenever we turn off pacing 14589 * or switch off this stack, we will know to go restore 14590 * the saved values. 14591 * 14592 * We specifically put into the beta the ecn value for pacing. 14593 */ 14594 rack->rc_new_rnd_needed = 1; 14595 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 14596 /* We want abe like behavior as well */ 14597 14598 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 14599 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 14600 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 14601 if (rack_fill_cw_state) 14602 rack->rc_pace_to_cwnd = 1; 14603 if (rack_pacing_min_seg) 14604 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 14605 if (use_rack_rr) 14606 rack->use_rack_rr = 1; 14607 if (rack_dnd_default) { 14608 rack->rc_pace_dnd = 1; 14609 } 14610 if (V_tcp_delack_enabled) 14611 tp->t_delayed_ack = 1; 14612 else 14613 tp->t_delayed_ack = 0; 14614 #ifdef TCP_ACCOUNTING 14615 if (rack_tcp_accounting) { 14616 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 14617 } 14618 #endif 14619 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 14620 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 14621 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 14622 if (rack->r_ctl.pcm_s == NULL) { 14623 rack->r_ctl.pcm_i.cnt_alloc = 0; 14624 } 14625 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 14626 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 14627 if (rack_enable_shared_cwnd) 14628 rack->rack_enable_scwnd = 1; 14629 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 14630 rack->rc_user_set_max_segs = rack_hptsi_segments; 14631 rack->r_ctl.max_reduction = rack_max_reduce; 14632 rack->rc_force_max_seg = 0; 14633 TAILQ_INIT(&rack->r_ctl.opt_list); 14634 rack->r_ctl.rc_saved_beta = V_newreno_beta_ecn; 14635 rack->r_ctl.rc_saved_beta_ecn = V_newreno_beta_ecn; 14636 if (rack_hibeta_setting) { 14637 rack->rack_hibeta = 1; 14638 if ((rack_hibeta_setting >= 50) && 14639 (rack_hibeta_setting <= 100)) { 14640 rack->r_ctl.rc_saved_beta = rack_hibeta_setting; 14641 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 14642 } 14643 } else { 14644 rack->r_ctl.saved_hibeta = 50; 14645 } 14646 /* 14647 * We initialize to all ones so we never match 0 14648 * just in case the client sends in 0, it hopefully 14649 * will never have all 1's in ms :-) 14650 */ 14651 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 14652 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 14653 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 14654 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 14655 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 14656 rack->r_ctl.rc_highest_us_rtt = 0; 14657 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 14658 rack->pcm_enabled = rack_pcm_is_enabled; 14659 if (rack_fillcw_bw_cap) 14660 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 14661 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 14662 if (rack_use_cmp_acks) 14663 rack->r_use_cmp_ack = 1; 14664 if (rack_disable_prr) 14665 rack->rack_no_prr = 1; 14666 if (rack_gp_no_rec_chg) 14667 rack->rc_gp_no_rec_chg = 1; 14668 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 14669 rack->r_ctl.pacing_method |= RACK_REG_PACING; 14670 rack->rc_always_pace = 1; 14671 if (rack->rack_hibeta) 14672 rack_set_cc_pacing(rack); 14673 } else 14674 rack->rc_always_pace = 0; 14675 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 14676 rack->r_mbuf_queue = 1; 14677 else 14678 rack->r_mbuf_queue = 0; 14679 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14680 if (rack_limits_scwnd) 14681 rack->r_limit_scw = 1; 14682 else 14683 rack->r_limit_scw = 0; 14684 rack_init_retransmit_value(rack, rack_rxt_controls); 14685 rack->rc_labc = V_tcp_abc_l_var; 14686 if (rack_honors_hpts_min_to) 14687 rack->r_use_hpts_min = 1; 14688 if (tp->snd_una != 0) { 14689 rack->rc_sendvars_notset = 0; 14690 /* 14691 * Make sure any TCP timers are not running. 14692 */ 14693 tcp_timer_stop(tp); 14694 } else { 14695 /* 14696 * Server side, we are called from the 14697 * syn-cache. This means none of the 14698 * snd_una/max are set yet so we have 14699 * to defer this until the first send. 14700 */ 14701 rack->rc_sendvars_notset = 1; 14702 } 14703 14704 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 14705 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 14706 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 14707 rack->r_ctl.rc_min_to = rack_min_to; 14708 microuptime(&rack->r_ctl.act_rcv_time); 14709 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 14710 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 14711 if (rack_hw_up_only) 14712 rack->r_up_only = 1; 14713 if (rack_do_dyn_mul) { 14714 /* When dynamic adjustment is on CA needs to start at 100% */ 14715 rack->rc_gp_dyn_mul = 1; 14716 if (rack_do_dyn_mul >= 100) 14717 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 14718 } else 14719 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 14720 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 14721 if (rack_timely_off) { 14722 rack->rc_skip_timely = 1; 14723 } 14724 if (rack->rc_skip_timely) { 14725 rack->r_ctl.rack_per_of_gp_rec = 90; 14726 rack->r_ctl.rack_per_of_gp_ca = 100; 14727 rack->r_ctl.rack_per_of_gp_ss = 250; 14728 } 14729 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 14730 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time); 14731 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time); 14732 14733 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 14734 rack_probertt_filter_life); 14735 us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 14736 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 14737 rack->r_ctl.rc_time_of_last_probertt = us_cts; 14738 rack->r_ctl.rc_went_idle_time = us_cts; 14739 rack->r_ctl.rc_time_probertt_starts = 0; 14740 14741 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 14742 if (rack_rnd_cnt_req & 0x10000) 14743 rack->r_ctl.gate_to_fs = 1; 14744 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 14745 if ((rack_rnd_cnt_req & 0x100) > 0) { 14746 14747 } 14748 if (rack_dsack_std_based & 0x1) { 14749 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 14750 rack->rc_rack_tmr_std_based = 1; 14751 } 14752 if (rack_dsack_std_based & 0x2) { 14753 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 14754 rack->rc_rack_use_dsack = 1; 14755 } 14756 /* We require at least one measurement, even if the sysctl is 0 */ 14757 if (rack_req_measurements) 14758 rack->r_ctl.req_measurements = rack_req_measurements; 14759 else 14760 rack->r_ctl.req_measurements = 1; 14761 if (rack_enable_hw_pacing) 14762 rack->rack_hdw_pace_ena = 1; 14763 if (rack_hw_rate_caps) 14764 rack->r_rack_hw_rate_caps = 1; 14765 if (rack_non_rxt_use_cr) 14766 rack->rack_rec_nonrxt_use_cr = 1; 14767 /* Lets setup the fsb block */ 14768 err = rack_init_fsb(tp, rack); 14769 if (err) { 14770 uma_zfree(rack_pcb_zone, *ptr); 14771 *ptr = NULL; 14772 return (err); 14773 } 14774 if (rack_do_hystart) { 14775 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 14776 if (rack_do_hystart > 1) 14777 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 14778 if (rack_do_hystart > 2) 14779 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 14780 } 14781 /* Log what we will do with queries */ 14782 rack_log_chg_info(tp, rack, 7, 14783 no_query, 0, 0); 14784 if (rack_def_profile) 14785 rack_set_profile(rack, rack_def_profile); 14786 /* Cancel the GP measurement in progress */ 14787 tp->t_flags &= ~TF_GPUTINPROG; 14788 if ((tp->t_state != TCPS_CLOSED) && 14789 (tp->t_state != TCPS_TIME_WAIT)) { 14790 /* 14791 * We are already open, we may 14792 * need to adjust a few things. 14793 */ 14794 if (SEQ_GT(tp->snd_max, tp->iss)) 14795 snt = tp->snd_max - tp->iss; 14796 else 14797 snt = 0; 14798 iwin = rc_init_window(rack); 14799 if ((snt < iwin) && 14800 (no_query == 1)) { 14801 /* We are not past the initial window 14802 * on the first init (i.e. a stack switch 14803 * has not yet occured) so we need to make 14804 * sure cwnd and ssthresh is correct. 14805 */ 14806 if (tp->snd_cwnd < iwin) 14807 tp->snd_cwnd = iwin; 14808 /* 14809 * If we are within the initial window 14810 * we want ssthresh to be unlimited. Setting 14811 * it to the rwnd (which the default stack does 14812 * and older racks) is not really a good idea 14813 * since we want to be in SS and grow both the 14814 * cwnd and the rwnd (via dynamic rwnd growth). If 14815 * we set it to the rwnd then as the peer grows its 14816 * rwnd we will be stuck in CA and never hit SS. 14817 * 14818 * Its far better to raise it up high (this takes the 14819 * risk that there as been a loss already, probably 14820 * we should have an indicator in all stacks of loss 14821 * but we don't), but considering the normal use this 14822 * is a risk worth taking. The consequences of not 14823 * hitting SS are far worse than going one more time 14824 * into it early on (before we have sent even a IW). 14825 * It is highly unlikely that we will have had a loss 14826 * before getting the IW out. 14827 */ 14828 tp->snd_ssthresh = 0xffffffff; 14829 } 14830 /* 14831 * Any init based on sequence numbers 14832 * should be done in the deferred init path 14833 * since we can be CLOSED and not have them 14834 * inited when rack_init() is called. We 14835 * are not closed so lets call it. 14836 */ 14837 rack_deferred_init(tp, rack); 14838 } 14839 if ((tp->t_state != TCPS_CLOSED) && 14840 (tp->t_state != TCPS_TIME_WAIT) && 14841 (no_query == 0) && 14842 (tp->snd_una != tp->snd_max)) { 14843 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 14844 if (err) { 14845 *ptr = NULL; 14846 return(err); 14847 } 14848 } 14849 rack_stop_all_timers(tp, rack); 14850 /* Setup all the t_flags2 */ 14851 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14852 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14853 else 14854 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14855 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14856 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14857 /* 14858 * Timers in Rack are kept in microseconds so lets 14859 * convert any initial incoming variables 14860 * from ticks into usecs. Note that we 14861 * also change the values of t_srtt and t_rttvar, if 14862 * they are non-zero. They are kept with a 5 14863 * bit decimal so we have to carefully convert 14864 * these to get the full precision. 14865 */ 14866 rack_convert_rtts(tp); 14867 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 14868 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 14869 /* We do not start any timers on DROPPED connections */ 14870 if (tp->t_fb->tfb_chg_query == NULL) { 14871 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14872 } else { 14873 struct tcp_query_resp qr; 14874 int ret; 14875 14876 memset(&qr, 0, sizeof(qr)); 14877 14878 /* Get the misc time stamps and such for rack */ 14879 qr.req = TCP_QUERY_RACK_TIMES; 14880 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14881 if (ret == 1) { 14882 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 14883 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 14884 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 14885 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 14886 rack->rc_rack_rtt = qr.rack_rtt; 14887 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 14888 rack->r_ctl.rc_sacked = qr.rack_sacked; 14889 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 14890 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 14891 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 14892 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 14893 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 14894 if (qr.rack_tlp_out) { 14895 rack->rc_tlp_in_progress = 1; 14896 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 14897 } else { 14898 rack->rc_tlp_in_progress = 0; 14899 rack->r_ctl.rc_tlp_cnt_out = 0; 14900 } 14901 if (qr.rack_srtt_measured) 14902 rack->rc_srtt_measure_made = 1; 14903 if (qr.rack_in_persist == 1) { 14904 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 14905 #ifdef NETFLIX_SHARED_CWND 14906 if (rack->r_ctl.rc_scw) { 14907 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 14908 rack->rack_scwnd_is_idle = 1; 14909 } 14910 #endif 14911 rack->r_ctl.persist_lost_ends = 0; 14912 rack->probe_not_answered = 0; 14913 rack->forced_ack = 0; 14914 tp->t_rxtshift = 0; 14915 rack->rc_in_persist = 1; 14916 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 14917 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 14918 } 14919 if (qr.rack_wanted_output) 14920 rack->r_wanted_output = 1; 14921 rack_log_chg_info(tp, rack, 6, 14922 qr.rack_min_rtt, 14923 qr.rack_rtt, 14924 qr.rack_reorder_ts); 14925 } 14926 /* Get the old stack timers */ 14927 qr.req_param = 0; 14928 qr.req = TCP_QUERY_TIMERS_UP; 14929 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14930 if (ret) { 14931 /* 14932 * non-zero return means we have a timer('s) 14933 * to start. Zero means no timer (no keepalive 14934 * I suppose). 14935 */ 14936 uint32_t tov = 0; 14937 14938 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 14939 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 14940 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 14941 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 14942 tov = qr.timer_pacing_to - us_cts; 14943 else 14944 tov = HPTS_USECS_PER_SLOT; 14945 } 14946 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 14947 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 14948 if (tov == 0) { 14949 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 14950 tov = qr.timer_timer_exp - us_cts; 14951 else 14952 tov = HPTS_USECS_PER_SLOT; 14953 } 14954 } 14955 rack_log_chg_info(tp, rack, 4, 14956 rack->r_ctl.rc_hpts_flags, 14957 rack->r_ctl.rc_last_output_to, 14958 rack->r_ctl.rc_timer_exp); 14959 if (tov) { 14960 struct hpts_diag diag; 14961 14962 tcp_hpts_insert(tp, tov, &diag); 14963 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 14964 } 14965 } 14966 } 14967 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 14968 __LINE__, RACK_RTTS_INIT); 14969 } 14970 return (0); 14971 } 14972 14973 static int 14974 rack_handoff_ok(struct tcpcb *tp) 14975 { 14976 if ((tp->t_state == TCPS_CLOSED) || 14977 (tp->t_state == TCPS_LISTEN)) { 14978 /* Sure no problem though it may not stick */ 14979 return (0); 14980 } 14981 if ((tp->t_state == TCPS_SYN_SENT) || 14982 (tp->t_state == TCPS_SYN_RECEIVED)) { 14983 /* 14984 * We really don't know if you support sack, 14985 * you have to get to ESTAB or beyond to tell. 14986 */ 14987 return (EAGAIN); 14988 } 14989 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 14990 /* 14991 * Rack will only send a FIN after all data is acknowledged. 14992 * So in this case we have more data outstanding. We can't 14993 * switch stacks until either all data and only the FIN 14994 * is left (in which case rack_init() now knows how 14995 * to deal with that) <or> all is acknowledged and we 14996 * are only left with incoming data, though why you 14997 * would want to switch to rack after all data is acknowledged 14998 * I have no idea (rrs)! 14999 */ 15000 return (EAGAIN); 15001 } 15002 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15003 return (0); 15004 } 15005 /* 15006 * If we reach here we don't do SACK on this connection so we can 15007 * never do rack. 15008 */ 15009 return (EINVAL); 15010 } 15011 15012 static void 15013 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15014 { 15015 15016 if (tp->t_fb_ptr) { 15017 uint32_t cnt_free = 0; 15018 struct tcp_rack *rack; 15019 struct rack_sendmap *rsm; 15020 15021 tcp_handle_orphaned_packets(tp); 15022 tp->t_flags &= ~TF_FORCEDATA; 15023 rack = (struct tcp_rack *)tp->t_fb_ptr; 15024 rack_log_pacing_delay_calc(rack, 15025 0, 15026 0, 15027 0, 15028 rack_get_gp_est(rack), /* delRate */ 15029 rack_get_lt_bw(rack), /* rttProp */ 15030 20, __LINE__, NULL, 0); 15031 #ifdef NETFLIX_SHARED_CWND 15032 if (rack->r_ctl.rc_scw) { 15033 uint32_t limit; 15034 15035 if (rack->r_limit_scw) 15036 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15037 else 15038 limit = 0; 15039 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15040 rack->r_ctl.rc_scw_index, 15041 limit); 15042 rack->r_ctl.rc_scw = NULL; 15043 } 15044 #endif 15045 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15046 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15047 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15048 rack->r_ctl.fsb.th = NULL; 15049 } 15050 if (rack->rc_always_pace == 1) { 15051 rack_remove_pacing(rack); 15052 } 15053 /* Clean up any options if they were not applied */ 15054 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15055 struct deferred_opt_list *dol; 15056 15057 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15058 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15059 free(dol, M_TCPDO); 15060 } 15061 /* rack does not use force data but other stacks may clear it */ 15062 if (rack->r_ctl.crte != NULL) { 15063 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15064 rack->rack_hdrw_pacing = 0; 15065 rack->r_ctl.crte = NULL; 15066 } 15067 #ifdef TCP_BLACKBOX 15068 tcp_log_flowend(tp); 15069 #endif 15070 /* 15071 * Lets take a different approach to purging just 15072 * get each one and free it like a cum-ack would and 15073 * not use a foreach loop. 15074 */ 15075 rsm = tqhash_min(rack->r_ctl.tqh); 15076 while (rsm) { 15077 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15078 rack->r_ctl.rc_num_maps_alloced--; 15079 uma_zfree(rack_zone, rsm); 15080 rsm = tqhash_min(rack->r_ctl.tqh); 15081 } 15082 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15083 while (rsm) { 15084 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15085 rack->r_ctl.rc_num_maps_alloced--; 15086 rack->rc_free_cnt--; 15087 cnt_free++; 15088 uma_zfree(rack_zone, rsm); 15089 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15090 } 15091 if (rack->r_ctl.pcm_s != NULL) { 15092 free(rack->r_ctl.pcm_s, M_TCPPCM); 15093 rack->r_ctl.pcm_s = NULL; 15094 rack->r_ctl.pcm_i.cnt_alloc = 0; 15095 rack->r_ctl.pcm_i.cnt = 0; 15096 } 15097 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15098 (tcp_bblogging_on(tp))) { 15099 union tcp_log_stackspecific log; 15100 struct timeval tv; 15101 15102 memset(&log, 0, sizeof(log)); 15103 log.u_bbr.flex8 = 10; 15104 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15105 log.u_bbr.flex2 = rack->rc_free_cnt; 15106 log.u_bbr.flex3 = cnt_free; 15107 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15108 rsm = tqhash_min(rack->r_ctl.tqh); 15109 log.u_bbr.delRate = (uintptr_t)rsm; 15110 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15111 log.u_bbr.cur_del_rate = (uintptr_t)rsm; 15112 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15113 log.u_bbr.pkt_epoch = __LINE__; 15114 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15115 0, &log, false, NULL, NULL, 0, &tv); 15116 } 15117 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15118 ("rack:%p num_aloc:%u after freeing all?", 15119 rack, 15120 rack->r_ctl.rc_num_maps_alloced)); 15121 rack->rc_free_cnt = 0; 15122 free(rack->r_ctl.tqh, M_TCPFSB); 15123 rack->r_ctl.tqh = NULL; 15124 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15125 tp->t_fb_ptr = NULL; 15126 } 15127 /* Make sure snd_nxt is correctly set */ 15128 tp->snd_nxt = tp->snd_max; 15129 } 15130 15131 static void 15132 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15133 { 15134 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15135 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15136 } 15137 switch (tp->t_state) { 15138 case TCPS_SYN_SENT: 15139 rack->r_state = TCPS_SYN_SENT; 15140 rack->r_substate = rack_do_syn_sent; 15141 break; 15142 case TCPS_SYN_RECEIVED: 15143 rack->r_state = TCPS_SYN_RECEIVED; 15144 rack->r_substate = rack_do_syn_recv; 15145 break; 15146 case TCPS_ESTABLISHED: 15147 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15148 rack->r_state = TCPS_ESTABLISHED; 15149 rack->r_substate = rack_do_established; 15150 break; 15151 case TCPS_CLOSE_WAIT: 15152 rack->r_state = TCPS_CLOSE_WAIT; 15153 rack->r_substate = rack_do_close_wait; 15154 break; 15155 case TCPS_FIN_WAIT_1: 15156 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15157 rack->r_state = TCPS_FIN_WAIT_1; 15158 rack->r_substate = rack_do_fin_wait_1; 15159 break; 15160 case TCPS_CLOSING: 15161 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15162 rack->r_state = TCPS_CLOSING; 15163 rack->r_substate = rack_do_closing; 15164 break; 15165 case TCPS_LAST_ACK: 15166 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15167 rack->r_state = TCPS_LAST_ACK; 15168 rack->r_substate = rack_do_lastack; 15169 break; 15170 case TCPS_FIN_WAIT_2: 15171 rack->r_state = TCPS_FIN_WAIT_2; 15172 rack->r_substate = rack_do_fin_wait_2; 15173 break; 15174 case TCPS_LISTEN: 15175 case TCPS_CLOSED: 15176 case TCPS_TIME_WAIT: 15177 default: 15178 break; 15179 }; 15180 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15181 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15182 15183 } 15184 15185 static void 15186 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15187 { 15188 /* 15189 * We received an ack, and then did not 15190 * call send or were bounced out due to the 15191 * hpts was running. Now a timer is up as well, is 15192 * it the right timer? 15193 */ 15194 struct rack_sendmap *rsm; 15195 int tmr_up; 15196 15197 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15198 if (tcp_in_hpts(rack->rc_tp) == 0) { 15199 /* 15200 * Ok we probably need some timer up, but no 15201 * matter what the mask we are not in hpts. We 15202 * may have received an old ack and thus did nothing. 15203 */ 15204 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15205 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15206 return; 15207 } 15208 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15209 return; 15210 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15211 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15212 (tmr_up == PACE_TMR_RXT)) { 15213 /* Should be an RXT */ 15214 return; 15215 } 15216 if (rsm == NULL) { 15217 /* Nothing outstanding? */ 15218 if (tp->t_flags & TF_DELACK) { 15219 if (tmr_up == PACE_TMR_DELACK) 15220 /* We are supposed to have delayed ack up and we do */ 15221 return; 15222 } else if (((V_tcp_always_keepalive || 15223 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 15224 (tp->t_state <= TCPS_CLOSING)) && 15225 (tmr_up == PACE_TMR_KEEP) && 15226 (tp->snd_max == tp->snd_una)) { 15227 /* We should have keep alive up and we do */ 15228 return; 15229 } 15230 } 15231 if (SEQ_GT(tp->snd_max, tp->snd_una) && 15232 ((tmr_up == PACE_TMR_TLP) || 15233 (tmr_up == PACE_TMR_RACK) || 15234 (tmr_up == PACE_TMR_RXT))) { 15235 /* 15236 * Either a Rack, TLP or RXT is fine if we 15237 * have outstanding data. 15238 */ 15239 return; 15240 } else if (tmr_up == PACE_TMR_DELACK) { 15241 /* 15242 * If the delayed ack was going to go off 15243 * before the rtx/tlp/rack timer were going to 15244 * expire, then that would be the timer in control. 15245 * Note we don't check the time here trusting the 15246 * code is correct. 15247 */ 15248 return; 15249 } 15250 /* 15251 * Ok the timer originally started is not what we want now. 15252 * We will force the hpts to be stopped if any, and restart 15253 * with the slot set to what was in the saved slot. 15254 */ 15255 if (tcp_in_hpts(rack->rc_tp)) { 15256 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15257 uint32_t us_cts; 15258 15259 us_cts = tcp_get_usecs(NULL); 15260 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 15261 rack->r_early = 1; 15262 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 15263 } 15264 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 15265 } 15266 tcp_hpts_remove(rack->rc_tp); 15267 } 15268 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15269 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15270 } 15271 15272 15273 static void 15274 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 15275 { 15276 if ((SEQ_LT(tp->snd_wl1, seq) || 15277 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 15278 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 15279 /* keep track of pure window updates */ 15280 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 15281 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 15282 tp->snd_wnd = tiwin; 15283 rack_validate_fo_sendwin_up(tp, rack); 15284 tp->snd_wl1 = seq; 15285 tp->snd_wl2 = ack; 15286 if (tp->snd_wnd > tp->max_sndwnd) 15287 tp->max_sndwnd = tp->snd_wnd; 15288 rack->r_wanted_output = 1; 15289 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 15290 tp->snd_wnd = tiwin; 15291 rack_validate_fo_sendwin_up(tp, rack); 15292 tp->snd_wl1 = seq; 15293 tp->snd_wl2 = ack; 15294 } else { 15295 /* Not a valid win update */ 15296 return; 15297 } 15298 if (tp->snd_wnd > tp->max_sndwnd) 15299 tp->max_sndwnd = tp->snd_wnd; 15300 /* Do we exit persists? */ 15301 if ((rack->rc_in_persist != 0) && 15302 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 15303 rack->r_ctl.rc_pace_min_segs))) { 15304 rack_exit_persist(tp, rack, cts); 15305 } 15306 /* Do we enter persists? */ 15307 if ((rack->rc_in_persist == 0) && 15308 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 15309 TCPS_HAVEESTABLISHED(tp->t_state) && 15310 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 15311 sbavail(&tptosocket(tp)->so_snd) && 15312 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 15313 /* 15314 * Here the rwnd is less than 15315 * the pacing size, we are established, 15316 * nothing is outstanding, and there is 15317 * data to send. Enter persists. 15318 */ 15319 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 15320 } 15321 } 15322 15323 static void 15324 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 15325 { 15326 15327 if (tcp_bblogging_on(rack->rc_tp)) { 15328 struct inpcb *inp = tptoinpcb(tp); 15329 union tcp_log_stackspecific log; 15330 struct timeval ltv; 15331 char tcp_hdr_buf[60]; 15332 struct tcphdr *th; 15333 struct timespec ts; 15334 uint32_t orig_snd_una; 15335 uint8_t xx = 0; 15336 15337 #ifdef TCP_REQUEST_TRK 15338 struct tcp_sendfile_track *tcp_req; 15339 15340 if (SEQ_GT(ae->ack, tp->snd_una)) { 15341 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 15342 } else { 15343 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 15344 } 15345 #endif 15346 memset(&log, 0, sizeof(log)); 15347 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 15348 if (rack->rack_no_prr == 0) 15349 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15350 else 15351 log.u_bbr.flex1 = 0; 15352 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 15353 log.u_bbr.use_lt_bw <<= 1; 15354 log.u_bbr.use_lt_bw |= rack->r_might_revert; 15355 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 15356 log.u_bbr.bbr_state = rack->rc_free_cnt; 15357 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15358 log.u_bbr.pkts_out = tp->t_maxseg; 15359 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 15360 log.u_bbr.flex7 = 1; 15361 log.u_bbr.lost = ae->flags; 15362 log.u_bbr.cwnd_gain = ackval; 15363 log.u_bbr.pacing_gain = 0x2; 15364 if (ae->flags & TSTMP_HDWR) { 15365 /* Record the hardware timestamp if present */ 15366 log.u_bbr.flex3 = M_TSTMP; 15367 ts.tv_sec = ae->timestamp / 1000000000; 15368 ts.tv_nsec = ae->timestamp % 1000000000; 15369 ltv.tv_sec = ts.tv_sec; 15370 ltv.tv_usec = ts.tv_nsec / 1000; 15371 log.u_bbr.lt_epoch = tcp_tv_to_usec(<v); 15372 } else if (ae->flags & TSTMP_LRO) { 15373 /* Record the LRO the arrival timestamp */ 15374 log.u_bbr.flex3 = M_TSTMP_LRO; 15375 ts.tv_sec = ae->timestamp / 1000000000; 15376 ts.tv_nsec = ae->timestamp % 1000000000; 15377 ltv.tv_sec = ts.tv_sec; 15378 ltv.tv_usec = ts.tv_nsec / 1000; 15379 log.u_bbr.flex5 = tcp_tv_to_usec(<v); 15380 } 15381 log.u_bbr.timeStamp = tcp_get_usecs(<v); 15382 /* Log the rcv time */ 15383 log.u_bbr.delRate = ae->timestamp; 15384 #ifdef TCP_REQUEST_TRK 15385 log.u_bbr.applimited = tp->t_tcpreq_closed; 15386 log.u_bbr.applimited <<= 8; 15387 log.u_bbr.applimited |= tp->t_tcpreq_open; 15388 log.u_bbr.applimited <<= 8; 15389 log.u_bbr.applimited |= tp->t_tcpreq_req; 15390 if (tcp_req) { 15391 /* Copy out any client req info */ 15392 /* seconds */ 15393 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 15394 /* useconds */ 15395 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 15396 log.u_bbr.rttProp = tcp_req->timestamp; 15397 log.u_bbr.cur_del_rate = tcp_req->start; 15398 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 15399 log.u_bbr.flex8 |= 1; 15400 } else { 15401 log.u_bbr.flex8 |= 2; 15402 log.u_bbr.bw_inuse = tcp_req->end; 15403 } 15404 log.u_bbr.flex6 = tcp_req->start_seq; 15405 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 15406 log.u_bbr.flex8 |= 4; 15407 log.u_bbr.epoch = tcp_req->end_seq; 15408 } 15409 } 15410 #endif 15411 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 15412 th = (struct tcphdr *)tcp_hdr_buf; 15413 th->th_seq = ae->seq; 15414 th->th_ack = ae->ack; 15415 th->th_win = ae->win; 15416 /* Now fill in the ports */ 15417 th->th_sport = inp->inp_fport; 15418 th->th_dport = inp->inp_lport; 15419 tcp_set_flags(th, ae->flags); 15420 /* Now do we have a timestamp option? */ 15421 if (ae->flags & HAS_TSTMP) { 15422 u_char *cp; 15423 uint32_t val; 15424 15425 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 15426 cp = (u_char *)(th + 1); 15427 *cp = TCPOPT_NOP; 15428 cp++; 15429 *cp = TCPOPT_NOP; 15430 cp++; 15431 *cp = TCPOPT_TIMESTAMP; 15432 cp++; 15433 *cp = TCPOLEN_TIMESTAMP; 15434 cp++; 15435 val = htonl(ae->ts_value); 15436 bcopy((char *)&val, 15437 (char *)cp, sizeof(uint32_t)); 15438 val = htonl(ae->ts_echo); 15439 bcopy((char *)&val, 15440 (char *)(cp + 4), sizeof(uint32_t)); 15441 } else 15442 th->th_off = (sizeof(struct tcphdr) >> 2); 15443 15444 /* 15445 * For sane logging we need to play a little trick. 15446 * If the ack were fully processed we would have moved 15447 * snd_una to high_seq, but since compressed acks are 15448 * processed in two phases, at this point (logging) snd_una 15449 * won't be advanced. So we would see multiple acks showing 15450 * the advancement. We can prevent that by "pretending" that 15451 * snd_una was advanced and then un-advancing it so that the 15452 * logging code has the right value for tlb_snd_una. 15453 */ 15454 if (tp->snd_una != high_seq) { 15455 orig_snd_una = tp->snd_una; 15456 tp->snd_una = high_seq; 15457 xx = 1; 15458 } else 15459 xx = 0; 15460 TCP_LOG_EVENTP(tp, th, 15461 &tptosocket(tp)->so_rcv, 15462 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 15463 0, &log, true, <v); 15464 if (xx) { 15465 tp->snd_una = orig_snd_una; 15466 } 15467 } 15468 15469 } 15470 15471 static void 15472 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 15473 { 15474 uint32_t us_rtt; 15475 /* 15476 * A persist or keep-alive was forced out, update our 15477 * min rtt time. Note now worry about lost responses. 15478 * When a subsequent keep-alive or persist times out 15479 * and forced_ack is still on, then the last probe 15480 * was not responded to. In such cases we have a 15481 * sysctl that controls the behavior. Either we apply 15482 * the rtt but with reduced confidence (0). Or we just 15483 * plain don't apply the rtt estimate. Having data flow 15484 * will clear the probe_not_answered flag i.e. cum-ack 15485 * move forward <or> exiting and reentering persists. 15486 */ 15487 15488 rack->forced_ack = 0; 15489 rack->rc_tp->t_rxtshift = 0; 15490 if ((rack->rc_in_persist && 15491 (tiwin == rack->rc_tp->snd_wnd)) || 15492 (rack->rc_in_persist == 0)) { 15493 /* 15494 * In persists only apply the RTT update if this is 15495 * a response to our window probe. And that 15496 * means the rwnd sent must match the current 15497 * snd_wnd. If it does not, then we got a 15498 * window update ack instead. For keepalive 15499 * we allow the answer no matter what the window. 15500 * 15501 * Note that if the probe_not_answered is set then 15502 * the forced_ack_ts is the oldest one i.e. the first 15503 * probe sent that might have been lost. This assures 15504 * us that if we do calculate an RTT it is longer not 15505 * some short thing. 15506 */ 15507 if (rack->rc_in_persist) 15508 counter_u64_add(rack_persists_acks, 1); 15509 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 15510 if (us_rtt == 0) 15511 us_rtt = 1; 15512 if (rack->probe_not_answered == 0) { 15513 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15514 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 15515 } else { 15516 /* We have a retransmitted probe here too */ 15517 if (rack_apply_rtt_with_reduced_conf) { 15518 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15519 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 15520 } 15521 } 15522 } 15523 } 15524 15525 static void 15526 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15527 { 15528 /* 15529 * The next send has occurred mark the end of the round 15530 * as when that data gets acknowledged. We can 15531 * also do common things we might need to do when 15532 * a round begins. 15533 */ 15534 rack->r_ctl.roundends = tp->snd_max; 15535 rack->rc_new_rnd_needed = 0; 15536 rack_log_hystart_event(rack, tp->snd_max, 4); 15537 } 15538 15539 15540 static void 15541 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 15542 uint32_t flex3) 15543 { 15544 if (tcp_bblogging_on(rack->rc_tp)) { 15545 union tcp_log_stackspecific log; 15546 struct timeval tv; 15547 15548 (void)tcp_get_usecs(&tv); 15549 memset(&log, 0, sizeof(log)); 15550 log.u_bbr.timeStamp = tcp_tv_to_usec(&tv); 15551 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15552 log.u_bbr.flex8 = mod; 15553 log.u_bbr.flex1 = flex1; 15554 log.u_bbr.flex2 = flex2; 15555 log.u_bbr.flex3 = flex3; 15556 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 15557 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 15558 log.u_bbr.bbr_substate = rack->pcm_needed; 15559 log.u_bbr.bbr_substate <<= 1; 15560 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 15561 log.u_bbr.bbr_substate <<= 1; 15562 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 15563 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 15564 0, &log, false, NULL, NULL, 0, &tv); 15565 } 15566 } 15567 15568 static void 15569 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15570 { 15571 /* 15572 * The round (current_round) has ended. We now 15573 * setup for the next round by incrementing the 15574 * round numnber and doing any round specific 15575 * things. 15576 */ 15577 rack_log_hystart_event(rack, high_seq, 21); 15578 rack->r_ctl.current_round++; 15579 /* New round (current_round) begins at next send */ 15580 rack->rc_new_rnd_needed = 1; 15581 if ((rack->pcm_enabled == 1) && 15582 (rack->pcm_needed == 0) && 15583 (rack->pcm_in_progress == 0)) { 15584 /* 15585 * If we have enabled PCM, then we need to 15586 * check if the round has adanced to the state 15587 * where one is required. 15588 */ 15589 int rnds; 15590 15591 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 15592 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 15593 rack->pcm_needed = 1; 15594 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15595 } else if (rack_verbose_logging) { 15596 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15597 } 15598 } 15599 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 15600 /* We have hystart enabled send the round info in */ 15601 if (CC_ALGO(tp)->newround != NULL) { 15602 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 15603 } 15604 } 15605 /* 15606 * For DGP an initial startup check. We want to validate 15607 * that we are not just pushing on slow-start and just 15608 * not gaining.. i.e. filling buffers without getting any 15609 * boost in b/w during the inital slow-start. 15610 */ 15611 if (rack->dgp_on && 15612 (rack->rc_initial_ss_comp == 0) && 15613 (tp->snd_cwnd < tp->snd_ssthresh) && 15614 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 15615 (rack->r_ctl.gp_rnd_thresh > 0) && 15616 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 15617 15618 /* 15619 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 15620 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 15621 * exit SS. 15622 * 15623 * Pick up the flight size now as we enter slowstart (not the 15624 * cwnd which may be inflated). 15625 */ 15626 rack->rc_initial_ss_comp = 1; 15627 15628 if (tcp_bblogging_on(rack->rc_tp)) { 15629 union tcp_log_stackspecific log; 15630 struct timeval tv; 15631 15632 memset(&log, 0, sizeof(log)); 15633 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15634 log.u_bbr.flex1 = rack->r_ctl.current_round; 15635 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 15636 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 15637 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 15638 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 15639 log.u_bbr.flex8 = 40; 15640 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 15641 0, &log, false, NULL, __func__, __LINE__,&tv); 15642 } 15643 if ((rack->r_ctl.gate_to_fs == 1) && 15644 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 15645 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 15646 } 15647 tp->snd_ssthresh = tp->snd_cwnd - 1; 15648 /* Turn off any fast output running */ 15649 rack->r_fast_output = 0; 15650 } 15651 } 15652 15653 static int 15654 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 15655 { 15656 /* 15657 * Handle a "special" compressed ack mbuf. Each incoming 15658 * ack has only four possible dispositions: 15659 * 15660 * A) It moves the cum-ack forward 15661 * B) It is behind the cum-ack. 15662 * C) It is a window-update ack. 15663 * D) It is a dup-ack. 15664 * 15665 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 15666 * in the incoming mbuf. We also need to still pay attention 15667 * to nxt_pkt since there may be another packet after this 15668 * one. 15669 */ 15670 #ifdef TCP_ACCOUNTING 15671 uint64_t ts_val; 15672 uint64_t rdstc; 15673 #endif 15674 int segsiz; 15675 struct timespec ts; 15676 struct tcp_rack *rack; 15677 struct tcp_ackent *ae; 15678 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 15679 int cnt, i, did_out, ourfinisacked = 0; 15680 struct tcpopt to_holder, *to = NULL; 15681 #ifdef TCP_ACCOUNTING 15682 int win_up_req = 0; 15683 #endif 15684 int nsegs = 0; 15685 int under_pacing = 0; 15686 int post_recovery = 0; 15687 #ifdef TCP_ACCOUNTING 15688 sched_pin(); 15689 #endif 15690 rack = (struct tcp_rack *)tp->t_fb_ptr; 15691 if (rack->gp_ready && 15692 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 15693 under_pacing = 1; 15694 15695 if (rack->r_state != tp->t_state) 15696 rack_set_state(tp, rack); 15697 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 15698 (tp->t_flags & TF_GPUTINPROG)) { 15699 /* 15700 * We have a goodput in progress 15701 * and we have entered a late state. 15702 * Do we have enough data in the sb 15703 * to handle the GPUT request? 15704 */ 15705 uint32_t bytes; 15706 15707 bytes = tp->gput_ack - tp->gput_seq; 15708 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 15709 bytes += tp->gput_seq - tp->snd_una; 15710 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 15711 /* 15712 * There are not enough bytes in the socket 15713 * buffer that have been sent to cover this 15714 * measurement. Cancel it. 15715 */ 15716 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 15717 rack->r_ctl.rc_gp_srtt /*flex1*/, 15718 tp->gput_seq, 15719 0, 0, 18, __LINE__, NULL, 0); 15720 tp->t_flags &= ~TF_GPUTINPROG; 15721 } 15722 } 15723 to = &to_holder; 15724 to->to_flags = 0; 15725 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 15726 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 15727 cnt = m->m_len / sizeof(struct tcp_ackent); 15728 counter_u64_add(rack_multi_single_eq, cnt); 15729 high_seq = tp->snd_una; 15730 the_win = tp->snd_wnd; 15731 win_seq = tp->snd_wl1; 15732 win_upd_ack = tp->snd_wl2; 15733 cts = tcp_tv_to_usec(tv); 15734 ms_cts = tcp_tv_to_msec(tv); 15735 rack->r_ctl.rc_rcvtime = cts; 15736 segsiz = ctf_fixed_maxseg(tp); 15737 if ((rack->rc_gp_dyn_mul) && 15738 (rack->use_fixed_rate == 0) && 15739 (rack->rc_always_pace)) { 15740 /* Check in on probertt */ 15741 rack_check_probe_rtt(rack, cts); 15742 } 15743 for (i = 0; i < cnt; i++) { 15744 #ifdef TCP_ACCOUNTING 15745 ts_val = get_cyclecount(); 15746 #endif 15747 rack_clear_rate_sample(rack); 15748 ae = ((mtod(m, struct tcp_ackent *)) + i); 15749 if (ae->flags & TH_FIN) 15750 rack_log_pacing_delay_calc(rack, 15751 0, 15752 0, 15753 0, 15754 rack_get_gp_est(rack), /* delRate */ 15755 rack_get_lt_bw(rack), /* rttProp */ 15756 20, __LINE__, NULL, 0); 15757 /* Setup the window */ 15758 tiwin = ae->win << tp->snd_scale; 15759 if (tiwin > rack->r_ctl.rc_high_rwnd) 15760 rack->r_ctl.rc_high_rwnd = tiwin; 15761 /* figure out the type of ack */ 15762 if (SEQ_LT(ae->ack, high_seq)) { 15763 /* Case B*/ 15764 ae->ack_val_set = ACK_BEHIND; 15765 } else if (SEQ_GT(ae->ack, high_seq)) { 15766 /* Case A */ 15767 ae->ack_val_set = ACK_CUMACK; 15768 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 15769 /* Case D */ 15770 ae->ack_val_set = ACK_DUPACK; 15771 } else { 15772 /* Case C */ 15773 ae->ack_val_set = ACK_RWND; 15774 } 15775 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 15776 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 15777 /* Validate timestamp */ 15778 if (ae->flags & HAS_TSTMP) { 15779 /* Setup for a timestamp */ 15780 to->to_flags = TOF_TS; 15781 ae->ts_echo -= tp->ts_offset; 15782 to->to_tsecr = ae->ts_echo; 15783 to->to_tsval = ae->ts_value; 15784 /* 15785 * If echoed timestamp is later than the current time, fall back to 15786 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 15787 * were used when this connection was established. 15788 */ 15789 if (TSTMP_GT(ae->ts_echo, ms_cts)) 15790 to->to_tsecr = 0; 15791 if (tp->ts_recent && 15792 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 15793 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 15794 #ifdef TCP_ACCOUNTING 15795 rdstc = get_cyclecount(); 15796 if (rdstc > ts_val) { 15797 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15798 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15799 } 15800 } 15801 #endif 15802 continue; 15803 } 15804 } 15805 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 15806 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 15807 tp->ts_recent_age = tcp_ts_getticks(); 15808 tp->ts_recent = ae->ts_value; 15809 } 15810 } else { 15811 /* Setup for a no options */ 15812 to->to_flags = 0; 15813 } 15814 /* Update the rcv time and perform idle reduction possibly */ 15815 if (tp->t_idle_reduce && 15816 (tp->snd_max == tp->snd_una) && 15817 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 15818 counter_u64_add(rack_input_idle_reduces, 1); 15819 rack_cc_after_idle(rack, tp); 15820 } 15821 tp->t_rcvtime = ticks; 15822 /* Now what about ECN of a chain of pure ACKs? */ 15823 if (tcp_ecn_input_segment(tp, ae->flags, 0, 15824 tcp_packets_this_ack(tp, ae->ack), 15825 ae->codepoint)) 15826 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 15827 #ifdef TCP_ACCOUNTING 15828 /* Count for the specific type of ack in */ 15829 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15830 tp->tcp_cnt_counters[ae->ack_val_set]++; 15831 } 15832 #endif 15833 /* 15834 * Note how we could move up these in the determination 15835 * above, but we don't so that way the timestamp checks (and ECN) 15836 * is done first before we do any processing on the ACK. 15837 * The non-compressed path through the code has this 15838 * weakness (noted by @jtl) that it actually does some 15839 * processing before verifying the timestamp information. 15840 * We don't take that path here which is why we set 15841 * the ack_val_set first, do the timestamp and ecn 15842 * processing, and then look at what we have setup. 15843 */ 15844 if (ae->ack_val_set == ACK_BEHIND) { 15845 /* 15846 * Case B flag reordering, if window is not closed 15847 * or it could be a keep-alive or persists 15848 */ 15849 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 15850 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 15851 if (rack->r_ctl.rc_reorder_ts == 0) 15852 rack->r_ctl.rc_reorder_ts = 1; 15853 } 15854 } else if (ae->ack_val_set == ACK_DUPACK) { 15855 /* Case D */ 15856 rack_strike_dupack(rack, ae->ack); 15857 } else if (ae->ack_val_set == ACK_RWND) { 15858 /* Case C */ 15859 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15860 ts.tv_sec = ae->timestamp / 1000000000; 15861 ts.tv_nsec = ae->timestamp % 1000000000; 15862 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15863 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15864 } else { 15865 rack->r_ctl.act_rcv_time = *tv; 15866 } 15867 if (rack->forced_ack) { 15868 rack_handle_probe_response(rack, tiwin, 15869 tcp_tv_to_usec(&rack->r_ctl.act_rcv_time)); 15870 } 15871 #ifdef TCP_ACCOUNTING 15872 win_up_req = 1; 15873 #endif 15874 win_upd_ack = ae->ack; 15875 win_seq = ae->seq; 15876 the_win = tiwin; 15877 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15878 } else { 15879 /* Case A */ 15880 if (SEQ_GT(ae->ack, tp->snd_max)) { 15881 /* 15882 * We just send an ack since the incoming 15883 * ack is beyond the largest seq we sent. 15884 */ 15885 if ((tp->t_flags & TF_ACKNOW) == 0) { 15886 ctf_ack_war_checks(tp); 15887 if (tp->t_flags && TF_ACKNOW) 15888 rack->r_wanted_output = 1; 15889 } 15890 } else { 15891 nsegs++; 15892 /* If the window changed setup to update */ 15893 if (tiwin != tp->snd_wnd) { 15894 win_upd_ack = ae->ack; 15895 win_seq = ae->seq; 15896 the_win = tiwin; 15897 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15898 } 15899 #ifdef TCP_ACCOUNTING 15900 /* Account for the acks */ 15901 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15902 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 15903 } 15904 #endif 15905 high_seq = ae->ack; 15906 /* Setup our act_rcv_time */ 15907 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15908 ts.tv_sec = ae->timestamp / 1000000000; 15909 ts.tv_nsec = ae->timestamp % 1000000000; 15910 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15911 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15912 } else { 15913 rack->r_ctl.act_rcv_time = *tv; 15914 } 15915 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 15916 tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time)); 15917 #ifdef TCP_REQUEST_TRK 15918 rack_req_check_for_comp(rack, high_seq); 15919 #endif 15920 if (rack->rc_dsack_round_seen) { 15921 /* Is the dsack round over? */ 15922 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 15923 /* Yes it is */ 15924 rack->rc_dsack_round_seen = 0; 15925 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 15926 } 15927 } 15928 } 15929 } 15930 /* And lets be sure to commit the rtt measurements for this ack */ 15931 tcp_rack_xmit_timer_commit(rack, tp); 15932 #ifdef TCP_ACCOUNTING 15933 rdstc = get_cyclecount(); 15934 if (rdstc > ts_val) { 15935 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15936 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15937 if (ae->ack_val_set == ACK_CUMACK) 15938 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 15939 } 15940 } 15941 #endif 15942 } 15943 #ifdef TCP_ACCOUNTING 15944 ts_val = get_cyclecount(); 15945 #endif 15946 /* Tend to any collapsed window */ 15947 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 15948 /* The peer collapsed the window */ 15949 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 15950 } else if (rack->rc_has_collapsed) 15951 rack_un_collapse_window(rack, __LINE__); 15952 if ((rack->r_collapse_point_valid) && 15953 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 15954 rack->r_collapse_point_valid = 0; 15955 acked_amount = acked = (high_seq - tp->snd_una); 15956 if (acked) { 15957 /* 15958 * The draft (v3) calls for us to use SEQ_GEQ, but that 15959 * causes issues when we are just going app limited. Lets 15960 * instead use SEQ_GT <or> where its equal but more data 15961 * is outstanding. 15962 * 15963 * Also make sure we are on the last ack of a series. We 15964 * have to have all the ack's processed in queue to know 15965 * if there is something left outstanding. 15966 * 15967 */ 15968 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 15969 (rack->rc_new_rnd_needed == 0) && 15970 (nxt_pkt == 0)) { 15971 /* 15972 * We have crossed into a new round with 15973 * this th_ack value. 15974 */ 15975 rack_new_round_setup(tp, rack, high_seq); 15976 } 15977 /* 15978 * Clear the probe not answered flag 15979 * since cum-ack moved forward. 15980 */ 15981 rack->probe_not_answered = 0; 15982 if (tp->t_flags & TF_NEEDSYN) { 15983 /* 15984 * T/TCP: Connection was half-synchronized, and our SYN has 15985 * been ACK'd (so connection is now fully synchronized). Go 15986 * to non-starred state, increment snd_una for ACK of SYN, 15987 * and check if we can do window scaling. 15988 */ 15989 tp->t_flags &= ~TF_NEEDSYN; 15990 tp->snd_una++; 15991 acked_amount = acked = (high_seq - tp->snd_una); 15992 } 15993 if (acked > sbavail(&so->so_snd)) 15994 acked_amount = sbavail(&so->so_snd); 15995 if (IN_FASTRECOVERY(tp->t_flags) && 15996 (rack->rack_no_prr == 0)) 15997 rack_update_prr(tp, rack, acked_amount, high_seq); 15998 if (IN_RECOVERY(tp->t_flags)) { 15999 if (SEQ_LT(high_seq, tp->snd_recover) && 16000 (SEQ_LT(high_seq, tp->snd_max))) { 16001 tcp_rack_partialack(tp); 16002 } else { 16003 rack_post_recovery(tp, high_seq); 16004 post_recovery = 1; 16005 } 16006 } else if ((rack->rto_from_rec == 1) && 16007 SEQ_GEQ(high_seq, tp->snd_recover)) { 16008 /* 16009 * We were in recovery, hit a rxt timeout 16010 * and never re-entered recovery. The timeout(s) 16011 * made up all the lost data. In such a case 16012 * we need to clear the rto_from_rec flag. 16013 */ 16014 rack->rto_from_rec = 0; 16015 } 16016 /* Handle the rack-log-ack part (sendmap) */ 16017 if ((sbused(&so->so_snd) == 0) && 16018 (acked > acked_amount) && 16019 (tp->t_state >= TCPS_FIN_WAIT_1) && 16020 (tp->t_flags & TF_SENTFIN)) { 16021 /* 16022 * We must be sure our fin 16023 * was sent and acked (we can be 16024 * in FIN_WAIT_1 without having 16025 * sent the fin). 16026 */ 16027 ourfinisacked = 1; 16028 /* 16029 * Lets make sure snd_una is updated 16030 * since most likely acked_amount = 0 (it 16031 * should be). 16032 */ 16033 tp->snd_una = high_seq; 16034 } 16035 /* Did we make a RTO error? */ 16036 if ((tp->t_flags & TF_PREVVALID) && 16037 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16038 tp->t_flags &= ~TF_PREVVALID; 16039 if (tp->t_rxtshift == 1 && 16040 (int)(ticks - tp->t_badrxtwin) < 0) 16041 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16042 } 16043 /* Handle the data in the socket buffer */ 16044 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16045 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16046 if (acked_amount > 0) { 16047 uint32_t p_cwnd; 16048 struct mbuf *mfree; 16049 16050 if (post_recovery) { 16051 /* 16052 * Grab the segsiz, multiply by 2 and add the snd_cwnd 16053 * that is the max the CC should add if we are exiting 16054 * recovery and doing a late add. 16055 */ 16056 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16057 p_cwnd <<= 1; 16058 p_cwnd += tp->snd_cwnd; 16059 } 16060 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 16061 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 16062 /* Must be non-newreno (cubic) getting too ahead of itself */ 16063 tp->snd_cwnd = p_cwnd; 16064 } 16065 SOCK_SENDBUF_LOCK(so); 16066 mfree = sbcut_locked(&so->so_snd, acked_amount); 16067 tp->snd_una = high_seq; 16068 /* Note we want to hold the sb lock through the sendmap adjust */ 16069 rack_adjust_sendmap_head(rack, &so->so_snd); 16070 /* Wake up the socket if we have room to write more */ 16071 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16072 sowwakeup_locked(so); 16073 m_freem(mfree); 16074 } 16075 /* update progress */ 16076 tp->t_acktime = ticks; 16077 rack_log_progress_event(rack, tp, tp->t_acktime, 16078 PROGRESS_UPDATE, __LINE__); 16079 /* Clear out shifts and such */ 16080 tp->t_rxtshift = 0; 16081 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16082 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16083 rack->rc_tlp_in_progress = 0; 16084 rack->r_ctl.rc_tlp_cnt_out = 0; 16085 /* Send recover and snd_nxt must be dragged along */ 16086 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16087 tp->snd_recover = tp->snd_una; 16088 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16089 tp->snd_nxt = tp->snd_max; 16090 /* 16091 * If the RXT timer is running we want to 16092 * stop it, so we can restart a TLP (or new RXT). 16093 */ 16094 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16095 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16096 tp->snd_wl2 = high_seq; 16097 tp->t_dupacks = 0; 16098 if (under_pacing && 16099 (rack->use_fixed_rate == 0) && 16100 (rack->in_probe_rtt == 0) && 16101 rack->rc_gp_dyn_mul && 16102 rack->rc_always_pace) { 16103 /* Check if we are dragging bottom */ 16104 rack_check_bottom_drag(tp, rack, so); 16105 } 16106 if (tp->snd_una == tp->snd_max) { 16107 tp->t_flags &= ~TF_PREVVALID; 16108 rack->r_ctl.retran_during_recovery = 0; 16109 rack->rc_suspicious = 0; 16110 rack->r_ctl.dsack_byte_cnt = 0; 16111 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16112 if (rack->r_ctl.rc_went_idle_time == 0) 16113 rack->r_ctl.rc_went_idle_time = 1; 16114 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16115 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16116 tp->t_acktime = 0; 16117 /* Set so we might enter persists... */ 16118 rack->r_wanted_output = 1; 16119 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16120 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16121 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16122 (sbavail(&so->so_snd) == 0) && 16123 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16124 /* 16125 * The socket was gone and the 16126 * peer sent data (not now in the past), time to 16127 * reset him. 16128 */ 16129 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16130 /* tcp_close will kill the inp pre-log the Reset */ 16131 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16132 #ifdef TCP_ACCOUNTING 16133 rdstc = get_cyclecount(); 16134 if (rdstc > ts_val) { 16135 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16136 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16137 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16138 } 16139 } 16140 #endif 16141 m_freem(m); 16142 tp = tcp_close(tp); 16143 if (tp == NULL) { 16144 #ifdef TCP_ACCOUNTING 16145 sched_unpin(); 16146 #endif 16147 return (1); 16148 } 16149 /* 16150 * We would normally do drop-with-reset which would 16151 * send back a reset. We can't since we don't have 16152 * all the needed bits. Instead lets arrange for 16153 * a call to tcp_output(). That way since we 16154 * are in the closed state we will generate a reset. 16155 * 16156 * Note if tcp_accounting is on we don't unpin since 16157 * we do that after the goto label. 16158 */ 16159 goto send_out_a_rst; 16160 } 16161 if ((sbused(&so->so_snd) == 0) && 16162 (tp->t_state >= TCPS_FIN_WAIT_1) && 16163 (tp->t_flags & TF_SENTFIN)) { 16164 /* 16165 * If we can't receive any more data, then closing user can 16166 * proceed. Starting the timer is contrary to the 16167 * specification, but if we don't get a FIN we'll hang 16168 * forever. 16169 * 16170 */ 16171 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16172 soisdisconnected(so); 16173 tcp_timer_activate(tp, TT_2MSL, 16174 (tcp_fast_finwait2_recycle ? 16175 tcp_finwait2_timeout : 16176 TP_MAXIDLE(tp))); 16177 } 16178 if (ourfinisacked == 0) { 16179 /* 16180 * We don't change to fin-wait-2 if we have our fin acked 16181 * which means we are probably in TCPS_CLOSING. 16182 */ 16183 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16184 } 16185 } 16186 } 16187 /* Wake up the socket if we have room to write more */ 16188 if (sbavail(&so->so_snd)) { 16189 rack->r_wanted_output = 1; 16190 if (ctf_progress_timeout_check(tp, true)) { 16191 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16192 tp, tick, PROGRESS_DROP, __LINE__); 16193 /* 16194 * We cheat here and don't send a RST, we should send one 16195 * when the pacer drops the connection. 16196 */ 16197 #ifdef TCP_ACCOUNTING 16198 rdstc = get_cyclecount(); 16199 if (rdstc > ts_val) { 16200 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16201 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16202 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16203 } 16204 } 16205 sched_unpin(); 16206 #endif 16207 (void)tcp_drop(tp, ETIMEDOUT); 16208 m_freem(m); 16209 return (1); 16210 } 16211 } 16212 if (ourfinisacked) { 16213 switch(tp->t_state) { 16214 case TCPS_CLOSING: 16215 #ifdef TCP_ACCOUNTING 16216 rdstc = get_cyclecount(); 16217 if (rdstc > ts_val) { 16218 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16219 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16220 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16221 } 16222 } 16223 sched_unpin(); 16224 #endif 16225 tcp_twstart(tp); 16226 m_freem(m); 16227 return (1); 16228 break; 16229 case TCPS_LAST_ACK: 16230 #ifdef TCP_ACCOUNTING 16231 rdstc = get_cyclecount(); 16232 if (rdstc > ts_val) { 16233 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16234 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16235 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16236 } 16237 } 16238 sched_unpin(); 16239 #endif 16240 tp = tcp_close(tp); 16241 ctf_do_drop(m, tp); 16242 return (1); 16243 break; 16244 case TCPS_FIN_WAIT_1: 16245 #ifdef TCP_ACCOUNTING 16246 rdstc = get_cyclecount(); 16247 if (rdstc > ts_val) { 16248 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16249 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16250 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16251 } 16252 } 16253 #endif 16254 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16255 soisdisconnected(so); 16256 tcp_timer_activate(tp, TT_2MSL, 16257 (tcp_fast_finwait2_recycle ? 16258 tcp_finwait2_timeout : 16259 TP_MAXIDLE(tp))); 16260 } 16261 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16262 break; 16263 default: 16264 break; 16265 } 16266 } 16267 if (rack->r_fast_output) { 16268 /* 16269 * We re doing fast output.. can we expand that? 16270 */ 16271 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 16272 } 16273 #ifdef TCP_ACCOUNTING 16274 rdstc = get_cyclecount(); 16275 if (rdstc > ts_val) { 16276 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16277 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16278 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16279 } 16280 } 16281 16282 } else if (win_up_req) { 16283 rdstc = get_cyclecount(); 16284 if (rdstc > ts_val) { 16285 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16286 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 16287 } 16288 } 16289 #endif 16290 } 16291 /* Now is there a next packet, if so we are done */ 16292 m_freem(m); 16293 did_out = 0; 16294 if (nxt_pkt) { 16295 #ifdef TCP_ACCOUNTING 16296 sched_unpin(); 16297 #endif 16298 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 16299 return (0); 16300 } 16301 rack_handle_might_revert(tp, rack); 16302 ctf_calc_rwin(so, tp); 16303 if ((rack->r_wanted_output != 0) || 16304 (rack->r_fast_output != 0) || 16305 (tp->t_flags & TF_ACKNOW )) { 16306 send_out_a_rst: 16307 if (tcp_output(tp) < 0) { 16308 #ifdef TCP_ACCOUNTING 16309 sched_unpin(); 16310 #endif 16311 return (1); 16312 } 16313 did_out = 1; 16314 } 16315 if (tp->t_flags2 & TF2_HPTS_CALLS) 16316 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16317 rack_free_trim(rack); 16318 #ifdef TCP_ACCOUNTING 16319 sched_unpin(); 16320 #endif 16321 rack_timer_audit(tp, rack, &so->so_snd); 16322 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 16323 return (0); 16324 } 16325 16326 #define TCP_LRO_TS_OPTION \ 16327 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 16328 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 16329 16330 static int 16331 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16332 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 16333 struct timeval *tv) 16334 { 16335 struct inpcb *inp = tptoinpcb(tp); 16336 struct socket *so = tptosocket(tp); 16337 #ifdef TCP_ACCOUNTING 16338 uint64_t ts_val; 16339 #endif 16340 int32_t thflags, retval, did_out = 0; 16341 int32_t way_out = 0; 16342 /* 16343 * cts - is the current time from tv (caller gets ts) in microseconds. 16344 * ms_cts - is the current time from tv in milliseconds. 16345 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 16346 */ 16347 uint32_t cts, us_cts, ms_cts; 16348 uint32_t tiwin; 16349 struct timespec ts; 16350 struct tcpopt to; 16351 struct tcp_rack *rack; 16352 struct rack_sendmap *rsm; 16353 int32_t prev_state = 0; 16354 int no_output = 0; 16355 int time_remaining = 0; 16356 #ifdef TCP_ACCOUNTING 16357 int ack_val_set = 0xf; 16358 #endif 16359 int nsegs; 16360 16361 NET_EPOCH_ASSERT(); 16362 INP_WLOCK_ASSERT(inp); 16363 16364 /* 16365 * tv passed from common code is from either M_TSTMP_LRO or 16366 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 16367 */ 16368 rack = (struct tcp_rack *)tp->t_fb_ptr; 16369 if (rack->rack_deferred_inited == 0) { 16370 /* 16371 * If we are the connecting socket we will 16372 * hit rack_init() when no sequence numbers 16373 * are setup. This makes it so we must defer 16374 * some initialization. Call that now. 16375 */ 16376 rack_deferred_init(tp, rack); 16377 } 16378 /* 16379 * Check to see if we need to skip any output plans. This 16380 * can happen in the non-LRO path where we are pacing and 16381 * must process the ack coming in but need to defer sending 16382 * anything becase a pacing timer is running. 16383 */ 16384 us_cts = tcp_tv_to_usec(tv); 16385 if (m->m_flags & M_ACKCMP) { 16386 /* 16387 * All compressed ack's are ack's by definition so 16388 * remove any ack required flag and then do the processing. 16389 */ 16390 rack->rc_ack_required = 0; 16391 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 16392 } 16393 thflags = tcp_get_flags(th); 16394 if ((rack->rc_always_pace == 1) && 16395 (rack->rc_ack_can_sendout_data == 0) && 16396 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16397 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 16398 /* 16399 * Ok conditions are right for queuing the packets 16400 * but we do have to check the flags in the inp, it 16401 * could be, if a sack is present, we want to be awoken and 16402 * so should process the packets. 16403 */ 16404 time_remaining = rack->r_ctl.rc_last_output_to - us_cts; 16405 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 16406 no_output = 1; 16407 } else { 16408 /* 16409 * If there is no options, or just a 16410 * timestamp option, we will want to queue 16411 * the packets. This is the same that LRO does 16412 * and will need to change with accurate ECN. 16413 */ 16414 uint32_t *ts_ptr; 16415 int optlen; 16416 16417 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 16418 ts_ptr = (uint32_t *)(th + 1); 16419 if ((optlen == 0) || 16420 ((optlen == TCPOLEN_TSTAMP_APPA) && 16421 (*ts_ptr == TCP_LRO_TS_OPTION))) 16422 no_output = 1; 16423 } 16424 if ((no_output == 1) && (time_remaining < tcp_min_hptsi_time)) { 16425 /* 16426 * It is unrealistic to think we can pace in less than 16427 * the minimum granularity of the pacer (def:250usec). So 16428 * if we have less than that time remaining we should go 16429 * ahead and allow output to be "early". We will attempt to 16430 * make up for it in any pacing time we try to apply on 16431 * the outbound packet. 16432 */ 16433 no_output = 0; 16434 } 16435 } 16436 /* 16437 * If there is a RST or FIN lets dump out the bw 16438 * with a FIN the connection may go on but we 16439 * may not. 16440 */ 16441 if ((thflags & TH_FIN) || (thflags & TH_RST)) 16442 rack_log_pacing_delay_calc(rack, 16443 rack->r_ctl.gp_bw, 16444 0, 16445 0, 16446 rack_get_gp_est(rack), /* delRate */ 16447 rack_get_lt_bw(rack), /* rttProp */ 16448 20, __LINE__, NULL, 0); 16449 if (m->m_flags & M_ACKCMP) { 16450 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 16451 } 16452 cts = tcp_tv_to_usec(tv); 16453 ms_cts = tcp_tv_to_msec(tv); 16454 nsegs = m->m_pkthdr.lro_nsegs; 16455 counter_u64_add(rack_proc_non_comp_ack, 1); 16456 #ifdef TCP_ACCOUNTING 16457 sched_pin(); 16458 if (thflags & TH_ACK) 16459 ts_val = get_cyclecount(); 16460 #endif 16461 if ((m->m_flags & M_TSTMP) || 16462 (m->m_flags & M_TSTMP_LRO)) { 16463 mbuf_tstmp2timespec(m, &ts); 16464 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16465 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16466 } else 16467 rack->r_ctl.act_rcv_time = *tv; 16468 kern_prefetch(rack, &prev_state); 16469 prev_state = 0; 16470 /* 16471 * Unscale the window into a 32-bit value. For the SYN_SENT state 16472 * the scale is zero. 16473 */ 16474 tiwin = th->th_win << tp->snd_scale; 16475 #ifdef TCP_ACCOUNTING 16476 if (thflags & TH_ACK) { 16477 /* 16478 * We have a tradeoff here. We can either do what we are 16479 * doing i.e. pinning to this CPU and then doing the accounting 16480 * <or> we could do a critical enter, setup the rdtsc and cpu 16481 * as in below, and then validate we are on the same CPU on 16482 * exit. I have choosen to not do the critical enter since 16483 * that often will gain you a context switch, and instead lock 16484 * us (line above this if) to the same CPU with sched_pin(). This 16485 * means we may be context switched out for a higher priority 16486 * interupt but we won't be moved to another CPU. 16487 * 16488 * If this occurs (which it won't very often since we most likely 16489 * are running this code in interupt context and only a higher 16490 * priority will bump us ... clock?) we will falsely add in 16491 * to the time the interupt processing time plus the ack processing 16492 * time. This is ok since its a rare event. 16493 */ 16494 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 16495 ctf_fixed_maxseg(tp)); 16496 } 16497 #endif 16498 /* 16499 * Parse options on any incoming segment. 16500 */ 16501 memset(&to, 0, sizeof(to)); 16502 tcp_dooptions(&to, (u_char *)(th + 1), 16503 (th->th_off << 2) - sizeof(struct tcphdr), 16504 (thflags & TH_SYN) ? TO_SYN : 0); 16505 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 16506 __func__)); 16507 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 16508 __func__)); 16509 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 16510 /* 16511 * We don't look at sack's from the 16512 * peer because the MSS is too small which 16513 * can subject us to an attack. 16514 */ 16515 to.to_flags &= ~TOF_SACK; 16516 } 16517 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16518 (tp->t_flags & TF_GPUTINPROG)) { 16519 /* 16520 * We have a goodput in progress 16521 * and we have entered a late state. 16522 * Do we have enough data in the sb 16523 * to handle the GPUT request? 16524 */ 16525 uint32_t bytes; 16526 16527 bytes = tp->gput_ack - tp->gput_seq; 16528 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16529 bytes += tp->gput_seq - tp->snd_una; 16530 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16531 /* 16532 * There are not enough bytes in the socket 16533 * buffer that have been sent to cover this 16534 * measurement. Cancel it. 16535 */ 16536 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16537 rack->r_ctl.rc_gp_srtt /*flex1*/, 16538 tp->gput_seq, 16539 0, 0, 18, __LINE__, NULL, 0); 16540 tp->t_flags &= ~TF_GPUTINPROG; 16541 } 16542 } 16543 if (tcp_bblogging_on(rack->rc_tp)) { 16544 union tcp_log_stackspecific log; 16545 struct timeval ltv; 16546 #ifdef TCP_REQUEST_TRK 16547 struct tcp_sendfile_track *tcp_req; 16548 16549 if (SEQ_GT(th->th_ack, tp->snd_una)) { 16550 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 16551 } else { 16552 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 16553 } 16554 #endif 16555 memset(&log, 0, sizeof(log)); 16556 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16557 if (rack->rack_no_prr == 0) 16558 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16559 else 16560 log.u_bbr.flex1 = 0; 16561 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16562 log.u_bbr.use_lt_bw <<= 1; 16563 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16564 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16565 log.u_bbr.bbr_state = rack->rc_free_cnt; 16566 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16567 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 16568 log.u_bbr.flex3 = m->m_flags; 16569 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16570 log.u_bbr.lost = thflags; 16571 log.u_bbr.pacing_gain = 0x1; 16572 #ifdef TCP_ACCOUNTING 16573 log.u_bbr.cwnd_gain = ack_val_set; 16574 #endif 16575 log.u_bbr.flex7 = 2; 16576 if (m->m_flags & M_TSTMP) { 16577 /* Record the hardware timestamp if present */ 16578 mbuf_tstmp2timespec(m, &ts); 16579 ltv.tv_sec = ts.tv_sec; 16580 ltv.tv_usec = ts.tv_nsec / 1000; 16581 log.u_bbr.lt_epoch = tcp_tv_to_usec(<v); 16582 } else if (m->m_flags & M_TSTMP_LRO) { 16583 /* Record the LRO the arrival timestamp */ 16584 mbuf_tstmp2timespec(m, &ts); 16585 ltv.tv_sec = ts.tv_sec; 16586 ltv.tv_usec = ts.tv_nsec / 1000; 16587 log.u_bbr.flex5 = tcp_tv_to_usec(<v); 16588 } 16589 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16590 /* Log the rcv time */ 16591 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 16592 #ifdef TCP_REQUEST_TRK 16593 log.u_bbr.applimited = tp->t_tcpreq_closed; 16594 log.u_bbr.applimited <<= 8; 16595 log.u_bbr.applimited |= tp->t_tcpreq_open; 16596 log.u_bbr.applimited <<= 8; 16597 log.u_bbr.applimited |= tp->t_tcpreq_req; 16598 if (tcp_req) { 16599 /* Copy out any client req info */ 16600 /* seconds */ 16601 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16602 /* useconds */ 16603 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16604 log.u_bbr.rttProp = tcp_req->timestamp; 16605 log.u_bbr.cur_del_rate = tcp_req->start; 16606 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16607 log.u_bbr.flex8 |= 1; 16608 } else { 16609 log.u_bbr.flex8 |= 2; 16610 log.u_bbr.bw_inuse = tcp_req->end; 16611 } 16612 log.u_bbr.flex6 = tcp_req->start_seq; 16613 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16614 log.u_bbr.flex8 |= 4; 16615 log.u_bbr.epoch = tcp_req->end_seq; 16616 } 16617 } 16618 #endif 16619 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 16620 tlen, &log, true, <v); 16621 } 16622 /* Remove ack required flag if set, we have one */ 16623 if (thflags & TH_ACK) 16624 rack->rc_ack_required = 0; 16625 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16626 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 16627 way_out = 4; 16628 retval = 0; 16629 m_freem(m); 16630 goto done_with_input; 16631 } 16632 /* 16633 * If a segment with the ACK-bit set arrives in the SYN-SENT state 16634 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 16635 */ 16636 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 16637 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 16638 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 16639 ctf_do_dropwithreset(m, tp, th, tlen); 16640 #ifdef TCP_ACCOUNTING 16641 sched_unpin(); 16642 #endif 16643 return (1); 16644 } 16645 /* 16646 * If timestamps were negotiated during SYN/ACK and a 16647 * segment without a timestamp is received, silently drop 16648 * the segment, unless it is a RST segment or missing timestamps are 16649 * tolerated. 16650 * See section 3.2 of RFC 7323. 16651 */ 16652 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 16653 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 16654 way_out = 5; 16655 retval = 0; 16656 m_freem(m); 16657 goto done_with_input; 16658 } 16659 /* 16660 * Segment received on connection. Reset idle time and keep-alive 16661 * timer. XXX: This should be done after segment validation to 16662 * ignore broken/spoofed segs. 16663 */ 16664 if (tp->t_idle_reduce && 16665 (tp->snd_max == tp->snd_una) && 16666 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16667 counter_u64_add(rack_input_idle_reduces, 1); 16668 rack_cc_after_idle(rack, tp); 16669 } 16670 tp->t_rcvtime = ticks; 16671 #ifdef STATS 16672 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 16673 #endif 16674 if (tiwin > rack->r_ctl.rc_high_rwnd) 16675 rack->r_ctl.rc_high_rwnd = tiwin; 16676 /* 16677 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 16678 * this to occur after we've validated the segment. 16679 */ 16680 if (tcp_ecn_input_segment(tp, thflags, tlen, 16681 tcp_packets_this_ack(tp, th->th_ack), 16682 iptos)) 16683 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 16684 16685 /* 16686 * If echoed timestamp is later than the current time, fall back to 16687 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16688 * were used when this connection was established. 16689 */ 16690 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 16691 to.to_tsecr -= tp->ts_offset; 16692 if (TSTMP_GT(to.to_tsecr, ms_cts)) 16693 to.to_tsecr = 0; 16694 } 16695 if ((rack->r_rcvpath_rtt_up == 1) && 16696 (to.to_flags & TOF_TS) && 16697 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 16698 uint32_t rtt = 0; 16699 16700 /* 16701 * We are receiving only and thus not sending 16702 * data to do an RTT. We set a flag when we first 16703 * sent this TS to the peer. We now have it back 16704 * and have an RTT to share. We log it as a conf 16705 * 4, we are not so sure about it.. since we 16706 * may have lost an ack. 16707 */ 16708 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 16709 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 16710 rack->r_rcvpath_rtt_up = 0; 16711 /* Submit and commit the timer */ 16712 if (rtt > 0) { 16713 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 16714 tcp_rack_xmit_timer_commit(rack, tp); 16715 } 16716 } 16717 /* 16718 * If its the first time in we need to take care of options and 16719 * verify we can do SACK for rack! 16720 */ 16721 if (rack->r_state == 0) { 16722 /* Should be init'd by rack_init() */ 16723 KASSERT(rack->rc_inp != NULL, 16724 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 16725 if (rack->rc_inp == NULL) { 16726 rack->rc_inp = inp; 16727 } 16728 16729 /* 16730 * Process options only when we get SYN/ACK back. The SYN 16731 * case for incoming connections is handled in tcp_syncache. 16732 * According to RFC1323 the window field in a SYN (i.e., a 16733 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 16734 * this is traditional behavior, may need to be cleaned up. 16735 */ 16736 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 16737 /* Handle parallel SYN for ECN */ 16738 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 16739 if ((to.to_flags & TOF_SCALE) && 16740 (tp->t_flags & TF_REQ_SCALE)) { 16741 tp->t_flags |= TF_RCVD_SCALE; 16742 tp->snd_scale = to.to_wscale; 16743 } else 16744 tp->t_flags &= ~TF_REQ_SCALE; 16745 /* 16746 * Initial send window. It will be updated with the 16747 * next incoming segment to the scaled value. 16748 */ 16749 tp->snd_wnd = th->th_win; 16750 rack_validate_fo_sendwin_up(tp, rack); 16751 if ((to.to_flags & TOF_TS) && 16752 (tp->t_flags & TF_REQ_TSTMP)) { 16753 tp->t_flags |= TF_RCVD_TSTMP; 16754 tp->ts_recent = to.to_tsval; 16755 tp->ts_recent_age = cts; 16756 } else 16757 tp->t_flags &= ~TF_REQ_TSTMP; 16758 if (to.to_flags & TOF_MSS) { 16759 tcp_mss(tp, to.to_mss); 16760 } 16761 if ((tp->t_flags & TF_SACK_PERMIT) && 16762 (to.to_flags & TOF_SACKPERM) == 0) 16763 tp->t_flags &= ~TF_SACK_PERMIT; 16764 if (tp->t_flags & TF_FASTOPEN) { 16765 if (to.to_flags & TOF_FASTOPEN) { 16766 uint16_t mss; 16767 16768 if (to.to_flags & TOF_MSS) 16769 mss = to.to_mss; 16770 else 16771 if ((inp->inp_vflag & INP_IPV6) != 0) 16772 mss = TCP6_MSS; 16773 else 16774 mss = TCP_MSS; 16775 tcp_fastopen_update_cache(tp, mss, 16776 to.to_tfo_len, to.to_tfo_cookie); 16777 } else 16778 tcp_fastopen_disable_path(tp); 16779 } 16780 } 16781 /* 16782 * At this point we are at the initial call. Here we decide 16783 * if we are doing RACK or not. We do this by seeing if 16784 * TF_SACK_PERMIT is set and the sack-not-required is clear. 16785 * The code now does do dup-ack counting so if you don't 16786 * switch back you won't get rack & TLP, but you will still 16787 * get this stack. 16788 */ 16789 16790 if ((rack_sack_not_required == 0) && 16791 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 16792 tcp_switch_back_to_default(tp); 16793 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 16794 tlen, iptos); 16795 #ifdef TCP_ACCOUNTING 16796 sched_unpin(); 16797 #endif 16798 return (1); 16799 } 16800 tcp_set_hpts(tp); 16801 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 16802 } 16803 if (thflags & TH_FIN) 16804 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 16805 us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); 16806 if ((rack->rc_gp_dyn_mul) && 16807 (rack->use_fixed_rate == 0) && 16808 (rack->rc_always_pace)) { 16809 /* Check in on probertt */ 16810 rack_check_probe_rtt(rack, cts); 16811 } 16812 rack_clear_rate_sample(rack); 16813 if ((rack->forced_ack) && 16814 ((tcp_get_flags(th) & TH_RST) == 0)) { 16815 rack_handle_probe_response(rack, tiwin, us_cts); 16816 } 16817 /* 16818 * This is the one exception case where we set the rack state 16819 * always. All other times (timers etc) we must have a rack-state 16820 * set (so we assure we have done the checks above for SACK). 16821 */ 16822 rack->r_ctl.rc_rcvtime = cts; 16823 if (rack->r_state != tp->t_state) 16824 rack_set_state(tp, rack); 16825 if (SEQ_GT(th->th_ack, tp->snd_una) && 16826 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 16827 kern_prefetch(rsm, &prev_state); 16828 prev_state = rack->r_state; 16829 if ((thflags & TH_RST) && 16830 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 16831 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 16832 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 16833 /* The connection will be killed by a reset check the tracepoint */ 16834 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 16835 } 16836 retval = (*rack->r_substate) (m, th, so, 16837 tp, &to, drop_hdrlen, 16838 tlen, tiwin, thflags, nxt_pkt, iptos); 16839 if (retval == 0) { 16840 /* 16841 * If retval is 1 the tcb is unlocked and most likely the tp 16842 * is gone. 16843 */ 16844 INP_WLOCK_ASSERT(inp); 16845 if ((rack->rc_gp_dyn_mul) && 16846 (rack->rc_always_pace) && 16847 (rack->use_fixed_rate == 0) && 16848 rack->in_probe_rtt && 16849 (rack->r_ctl.rc_time_probertt_starts == 0)) { 16850 /* 16851 * If we are going for target, lets recheck before 16852 * we output. 16853 */ 16854 rack_check_probe_rtt(rack, cts); 16855 } 16856 if (rack->set_pacing_done_a_iw == 0) { 16857 /* How much has been acked? */ 16858 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 16859 /* We have enough to set in the pacing segment size */ 16860 rack->set_pacing_done_a_iw = 1; 16861 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16862 } 16863 } 16864 tcp_rack_xmit_timer_commit(rack, tp); 16865 #ifdef TCP_ACCOUNTING 16866 /* 16867 * If we set the ack_val_se to what ack processing we are doing 16868 * we also want to track how many cycles we burned. Note 16869 * the bits after tcp_output we let be "free". This is because 16870 * we are also tracking the tcp_output times as well. Note the 16871 * use of 0xf here since we only have 11 counter (0 - 0xa) and 16872 * 0xf cannot be returned and is what we initialize it too to 16873 * indicate we are not doing the tabulations. 16874 */ 16875 if (ack_val_set != 0xf) { 16876 uint64_t crtsc; 16877 16878 crtsc = get_cyclecount(); 16879 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16880 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 16881 } 16882 } 16883 #endif 16884 if ((nxt_pkt == 0) && (no_output == 0)) { 16885 if ((rack->r_wanted_output != 0) || 16886 (tp->t_flags & TF_ACKNOW) || 16887 (rack->r_fast_output != 0)) { 16888 16889 do_output_now: 16890 if (tcp_output(tp) < 0) { 16891 #ifdef TCP_ACCOUNTING 16892 sched_unpin(); 16893 #endif 16894 return (1); 16895 } 16896 did_out = 1; 16897 } 16898 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16899 rack_free_trim(rack); 16900 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 16901 goto do_output_now; 16902 } else if ((no_output == 1) && 16903 (nxt_pkt == 0) && 16904 (tcp_in_hpts(rack->rc_tp) == 0)) { 16905 /* 16906 * We are not in hpts and we had a pacing timer up. Use 16907 * the remaining time (time_remaining) to restart the timer. 16908 */ 16909 KASSERT ((time_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 16910 rack_start_hpts_timer(rack, tp, cts, time_remaining, 0, 0); 16911 rack_free_trim(rack); 16912 } 16913 /* Clear the flag, it may have been cleared by output but we may not have */ 16914 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 16915 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16916 /* 16917 * The draft (v3) calls for us to use SEQ_GEQ, but that 16918 * causes issues when we are just going app limited. Lets 16919 * instead use SEQ_GT <or> where its equal but more data 16920 * is outstanding. 16921 * 16922 * Also make sure we are on the last ack of a series. We 16923 * have to have all the ack's processed in queue to know 16924 * if there is something left outstanding. 16925 */ 16926 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 16927 (rack->rc_new_rnd_needed == 0) && 16928 (nxt_pkt == 0)) { 16929 /* 16930 * We have crossed into a new round with 16931 * the new snd_unae. 16932 */ 16933 rack_new_round_setup(tp, rack, tp->snd_una); 16934 } 16935 if ((nxt_pkt == 0) && 16936 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 16937 (SEQ_GT(tp->snd_max, tp->snd_una) || 16938 (tp->t_flags & TF_DELACK) || 16939 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16940 (tp->t_state <= TCPS_CLOSING)))) { 16941 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 16942 if ((tp->snd_max == tp->snd_una) && 16943 ((tp->t_flags & TF_DELACK) == 0) && 16944 (tcp_in_hpts(rack->rc_tp)) && 16945 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 16946 /* keep alive not needed if we are hptsi output yet */ 16947 ; 16948 } else { 16949 int late = 0; 16950 if (tcp_in_hpts(tp)) { 16951 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16952 us_cts = tcp_get_usecs(NULL); 16953 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 16954 rack->r_early = 1; 16955 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 16956 } else 16957 late = 1; 16958 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16959 } 16960 tcp_hpts_remove(tp); 16961 } 16962 if (late && (did_out == 0)) { 16963 /* 16964 * We are late in the sending 16965 * and we did not call the output 16966 * (this probably should not happen). 16967 */ 16968 goto do_output_now; 16969 } 16970 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16971 } 16972 way_out = 1; 16973 } else if (nxt_pkt == 0) { 16974 /* Do we have the correct timer running? */ 16975 rack_timer_audit(tp, rack, &so->so_snd); 16976 way_out = 2; 16977 } 16978 done_with_input: 16979 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 16980 if (did_out) 16981 rack->r_wanted_output = 0; 16982 } 16983 16984 #ifdef TCP_ACCOUNTING 16985 sched_unpin(); 16986 #endif 16987 return (retval); 16988 } 16989 16990 static void 16991 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16992 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 16993 { 16994 struct timeval tv; 16995 16996 /* First lets see if we have old packets */ 16997 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 16998 if (ctf_do_queued_segments(tp, 1)) { 16999 m_freem(m); 17000 return; 17001 } 17002 } 17003 if (m->m_flags & M_TSTMP_LRO) { 17004 mbuf_tstmp2timeval(m, &tv); 17005 } else { 17006 /* Should not be should we kassert instead? */ 17007 tcp_get_usecs(&tv); 17008 } 17009 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17010 &tv) == 0) { 17011 INP_WUNLOCK(tptoinpcb(tp)); 17012 } 17013 } 17014 17015 struct rack_sendmap * 17016 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17017 { 17018 struct rack_sendmap *rsm = NULL; 17019 int32_t idx; 17020 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17021 17022 /* Return the next guy to be re-transmitted */ 17023 if (tqhash_empty(rack->r_ctl.tqh)) { 17024 return (NULL); 17025 } 17026 if (tp->t_flags & TF_SENTFIN) { 17027 /* retran the end FIN? */ 17028 return (NULL); 17029 } 17030 /* ok lets look at this one */ 17031 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17032 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17033 return (rsm); 17034 } 17035 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17036 goto check_it; 17037 } 17038 rsm = rack_find_lowest_rsm(rack); 17039 if (rsm == NULL) { 17040 return (NULL); 17041 } 17042 check_it: 17043 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 17044 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17045 /* 17046 * No sack so we automatically do the 3 strikes and 17047 * retransmit (no rack timer would be started). 17048 */ 17049 return (rsm); 17050 } 17051 if (rsm->r_flags & RACK_ACKED) { 17052 return (NULL); 17053 } 17054 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17055 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17056 /* Its not yet ready */ 17057 return (NULL); 17058 } 17059 srtt = rack_grab_rtt(tp, rack); 17060 idx = rsm->r_rtr_cnt - 1; 17061 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17062 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 17063 if ((tsused == ts_low) || 17064 (TSTMP_LT(tsused, ts_low))) { 17065 /* No time since sending */ 17066 return (NULL); 17067 } 17068 if ((tsused - ts_low) < thresh) { 17069 /* It has not been long enough yet */ 17070 return (NULL); 17071 } 17072 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17073 ((rsm->r_flags & RACK_SACK_PASSED))) { 17074 /* 17075 * We have passed the dup-ack threshold <or> 17076 * a SACK has indicated this is missing. 17077 * Note that if you are a declared attacker 17078 * it is only the dup-ack threshold that 17079 * will cause retransmits. 17080 */ 17081 /* log retransmit reason */ 17082 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17083 rack->r_fast_output = 0; 17084 return (rsm); 17085 } 17086 return (NULL); 17087 } 17088 17089 static void 17090 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay, 17091 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17092 int line, struct rack_sendmap *rsm, uint8_t quality) 17093 { 17094 if (tcp_bblogging_on(rack->rc_tp)) { 17095 union tcp_log_stackspecific log; 17096 struct timeval tv; 17097 17098 if (rack_verbose_logging == 0) { 17099 /* 17100 * We are not verbose screen out all but 17101 * ones we always want. 17102 */ 17103 if ((method != 2) && 17104 (method != 3) && 17105 (method != 7) && 17106 (method != 89) && 17107 (method != 14) && 17108 (method != 20)) { 17109 return; 17110 } 17111 } 17112 memset(&log, 0, sizeof(log)); 17113 log.u_bbr.flex1 = pacing_delay; 17114 log.u_bbr.flex2 = len; 17115 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17116 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17117 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17118 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17119 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17120 log.u_bbr.use_lt_bw <<= 1; 17121 log.u_bbr.use_lt_bw |= rack->r_late; 17122 log.u_bbr.use_lt_bw <<= 1; 17123 log.u_bbr.use_lt_bw |= rack->r_early; 17124 log.u_bbr.use_lt_bw <<= 1; 17125 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17126 log.u_bbr.use_lt_bw <<= 1; 17127 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17128 log.u_bbr.use_lt_bw <<= 1; 17129 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17130 log.u_bbr.use_lt_bw <<= 1; 17131 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17132 log.u_bbr.use_lt_bw <<= 1; 17133 log.u_bbr.use_lt_bw |= rack->gp_ready; 17134 log.u_bbr.pkt_epoch = line; 17135 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17136 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17137 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17138 log.u_bbr.bw_inuse = bw_est; 17139 log.u_bbr.delRate = bw; 17140 if (rack->r_ctl.gp_bw == 0) 17141 log.u_bbr.cur_del_rate = 0; 17142 else 17143 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17144 log.u_bbr.rttProp = len_time; 17145 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17146 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17147 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17148 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17149 /* We are in slow start */ 17150 log.u_bbr.flex7 = 1; 17151 } else { 17152 /* we are on congestion avoidance */ 17153 log.u_bbr.flex7 = 0; 17154 } 17155 log.u_bbr.flex8 = method; 17156 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17157 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17158 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17159 log.u_bbr.cwnd_gain <<= 1; 17160 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17161 log.u_bbr.cwnd_gain <<= 1; 17162 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17163 log.u_bbr.cwnd_gain <<= 1; 17164 log.u_bbr.cwnd_gain |= rack->use_fixed_rate; 17165 log.u_bbr.cwnd_gain <<= 1; 17166 log.u_bbr.cwnd_gain |= rack->rc_always_pace; 17167 log.u_bbr.cwnd_gain <<= 1; 17168 log.u_bbr.cwnd_gain |= rack->gp_ready; 17169 log.u_bbr.bbr_substate = quality; 17170 log.u_bbr.bbr_state = rack->dgp_on; 17171 log.u_bbr.bbr_state <<= 1; 17172 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17173 log.u_bbr.bbr_state <<= 2; 17174 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17175 &rack->rc_inp->inp_socket->so_rcv, 17176 &rack->rc_inp->inp_socket->so_snd, 17177 BBR_LOG_HPTSI_CALC, 0, 17178 0, &log, false, &tv); 17179 } 17180 } 17181 17182 static uint32_t 17183 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17184 { 17185 uint32_t new_tso, user_max, pace_one; 17186 17187 user_max = rack->rc_user_set_max_segs * mss; 17188 if (rack->rc_force_max_seg) { 17189 return (user_max); 17190 } 17191 if (rack->use_fixed_rate && 17192 ((rack->r_ctl.crte == NULL) || 17193 (bw != rack->r_ctl.crte->rate))) { 17194 /* Use the user mss since we are not exactly matched */ 17195 return (user_max); 17196 } 17197 if (rack_pace_one_seg || 17198 (rack->r_ctl.rc_user_set_min_segs == 1)) 17199 pace_one = 1; 17200 else 17201 pace_one = 0; 17202 17203 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17204 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17205 if (new_tso > user_max) 17206 new_tso = user_max; 17207 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17208 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17209 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17210 } 17211 if (rack->r_ctl.rc_user_set_min_segs && 17212 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17213 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17214 return (new_tso); 17215 } 17216 17217 static uint64_t 17218 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17219 { 17220 uint64_t reduced_win; 17221 uint32_t gain; 17222 17223 if (window_input < rc_init_window(rack)) { 17224 /* 17225 * The cwnd is collapsed to 17226 * nearly zero, maybe because of a time-out? 17227 * Lets drop back to the lt-bw. 17228 */ 17229 reduced_win = rack_get_lt_bw(rack); 17230 /* Set the flag so the caller knows its a rate and not a reduced window */ 17231 *rate_set = 1; 17232 gain = 100; 17233 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 17234 /* 17235 * If we are in recover our cwnd needs to be less for 17236 * our pacing consideration. 17237 */ 17238 if (rack->rack_hibeta == 0) { 17239 reduced_win = window_input / 2; 17240 gain = 50; 17241 } else { 17242 reduced_win = window_input * rack->r_ctl.saved_hibeta; 17243 reduced_win /= 100; 17244 gain = rack->r_ctl.saved_hibeta; 17245 } 17246 } else { 17247 /* 17248 * Apply Timely factor to increase/decrease the 17249 * amount we are pacing at. 17250 */ 17251 gain = rack_get_output_gain(rack, NULL); 17252 if (gain > rack_gain_p5_ub) { 17253 gain = rack_gain_p5_ub; 17254 } 17255 reduced_win = window_input * gain; 17256 reduced_win /= 100; 17257 } 17258 if (gain_b != NULL) 17259 *gain_b = gain; 17260 /* 17261 * What is being returned here is a trimmed down 17262 * window values in all cases where rate_set is left 17263 * at 0. In one case we actually return the rate (lt_bw). 17264 * the "reduced_win" is returned as a slimmed down cwnd that 17265 * is then calculated by the caller into a rate when rate_set 17266 * is 0. 17267 */ 17268 return (reduced_win); 17269 } 17270 17271 static int32_t 17272 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t pacing_delay, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 17273 { 17274 uint64_t lentim, fill_bw; 17275 17276 rack->r_via_fill_cw = 0; 17277 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 17278 return (pacing_delay); 17279 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 17280 return (pacing_delay); 17281 if (rack->r_ctl.rc_last_us_rtt == 0) 17282 return (pacing_delay); 17283 if (rack->rc_pace_fill_if_rttin_range && 17284 (rack->r_ctl.rc_last_us_rtt >= 17285 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 17286 /* The rtt is huge, N * smallest, lets not fill */ 17287 return (pacing_delay); 17288 } 17289 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 17290 return (pacing_delay); 17291 /* 17292 * first lets calculate the b/w based on the last us-rtt 17293 * and the the smallest send window. 17294 */ 17295 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17296 if (rack->rc_fillcw_apply_discount) { 17297 uint32_t rate_set = 0; 17298 17299 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 17300 if (rate_set) { 17301 goto at_lt_bw; 17302 } 17303 } 17304 /* Take the rwnd if its smaller */ 17305 if (fill_bw > rack->rc_tp->snd_wnd) 17306 fill_bw = rack->rc_tp->snd_wnd; 17307 /* Now lets make it into a b/w */ 17308 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 17309 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17310 /* Adjust to any cap */ 17311 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 17312 fill_bw = rack->r_ctl.fillcw_cap; 17313 17314 at_lt_bw: 17315 if (rack_bw_multipler > 0) { 17316 /* 17317 * We want to limit fill-cw to the some multiplier 17318 * of the max(lt_bw, gp_est). The normal default 17319 * is 0 for off, so a sysctl has enabled it. 17320 */ 17321 uint64_t lt_bw, gp, rate; 17322 17323 gp = rack_get_gp_est(rack); 17324 lt_bw = rack_get_lt_bw(rack); 17325 if (lt_bw > gp) 17326 rate = lt_bw; 17327 else 17328 rate = gp; 17329 rate *= rack_bw_multipler; 17330 rate /= 100; 17331 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 17332 union tcp_log_stackspecific log; 17333 struct timeval tv; 17334 17335 memset(&log, 0, sizeof(log)); 17336 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17337 log.u_bbr.flex1 = rack_bw_multipler; 17338 log.u_bbr.flex2 = len; 17339 log.u_bbr.cur_del_rate = gp; 17340 log.u_bbr.delRate = lt_bw; 17341 log.u_bbr.bw_inuse = rate; 17342 log.u_bbr.rttProp = fill_bw; 17343 log.u_bbr.flex8 = 44; 17344 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 17345 BBR_LOG_CWND, 0, 17346 0, &log, false, NULL, 17347 __func__, __LINE__, &tv); 17348 } 17349 if (fill_bw > rate) 17350 fill_bw = rate; 17351 } 17352 /* We are below the min b/w */ 17353 if (non_paced) 17354 *rate_wanted = fill_bw; 17355 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 17356 return (pacing_delay); 17357 rack->r_via_fill_cw = 1; 17358 if (rack->r_rack_hw_rate_caps && 17359 (rack->r_ctl.crte != NULL)) { 17360 uint64_t high_rate; 17361 17362 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 17363 if (fill_bw > high_rate) { 17364 /* We are capping bw at the highest rate table entry */ 17365 if (*rate_wanted > high_rate) { 17366 /* The original rate was also capped */ 17367 rack->r_via_fill_cw = 0; 17368 } 17369 rack_log_hdwr_pacing(rack, 17370 fill_bw, high_rate, __LINE__, 17371 0, 3); 17372 fill_bw = high_rate; 17373 if (capped) 17374 *capped = 1; 17375 } 17376 } else if ((rack->r_ctl.crte == NULL) && 17377 (rack->rack_hdrw_pacing == 0) && 17378 (rack->rack_hdw_pace_ena) && 17379 rack->r_rack_hw_rate_caps && 17380 (rack->rack_attempt_hdwr_pace == 0) && 17381 (rack->rc_inp->inp_route.ro_nh != NULL) && 17382 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17383 /* 17384 * Ok we may have a first attempt that is greater than our top rate 17385 * lets check. 17386 */ 17387 uint64_t high_rate; 17388 17389 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 17390 if (high_rate) { 17391 if (fill_bw > high_rate) { 17392 fill_bw = high_rate; 17393 if (capped) 17394 *capped = 1; 17395 } 17396 } 17397 } 17398 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 17399 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 17400 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 17401 fill_bw = rack->r_ctl.bw_rate_cap; 17402 } 17403 /* 17404 * Ok fill_bw holds our mythical b/w to fill the cwnd 17405 * in an rtt (unless it was capped), what does that 17406 * time wise equate too? 17407 */ 17408 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 17409 lentim /= fill_bw; 17410 *rate_wanted = fill_bw; 17411 if (non_paced || (lentim < pacing_delay)) { 17412 rack_log_pacing_delay_calc(rack, len, pacing_delay, fill_bw, 17413 0, lentim, 12, __LINE__, NULL, 0); 17414 return ((int32_t)lentim); 17415 } else 17416 return (pacing_delay); 17417 } 17418 17419 static int32_t 17420 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 17421 { 17422 uint64_t srtt; 17423 int32_t pacing_delay = 0; 17424 int can_start_hw_pacing = 1; 17425 int err; 17426 int pace_one; 17427 17428 if (rack_pace_one_seg || 17429 (rack->r_ctl.rc_user_set_min_segs == 1)) 17430 pace_one = 1; 17431 else 17432 pace_one = 0; 17433 if (rack->rc_always_pace == 0) { 17434 /* 17435 * We use the most optimistic possible cwnd/srtt for 17436 * sending calculations. This will make our 17437 * calculation anticipate getting more through 17438 * quicker then possible. But thats ok we don't want 17439 * the peer to have a gap in data sending. 17440 */ 17441 uint64_t cwnd, tr_perms = 0; 17442 int32_t reduce; 17443 17444 old_method: 17445 /* 17446 * We keep no precise pacing with the old method 17447 * instead we use the pacer to mitigate bursts. 17448 */ 17449 if (rack->r_ctl.rc_rack_min_rtt) 17450 srtt = rack->r_ctl.rc_rack_min_rtt; 17451 else 17452 srtt = max(tp->t_srtt, 1); 17453 if (rack->r_ctl.rc_rack_largest_cwnd) 17454 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 17455 else 17456 cwnd = rack->r_ctl.cwnd_to_use; 17457 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 17458 tr_perms = (cwnd * 1000) / srtt; 17459 if (tr_perms == 0) { 17460 tr_perms = ctf_fixed_maxseg(tp); 17461 } 17462 /* 17463 * Calculate how long this will take to drain, if 17464 * the calculation comes out to zero, thats ok we 17465 * will use send_a_lot to possibly spin around for 17466 * more increasing tot_len_this_send to the point 17467 * that its going to require a pace, or we hit the 17468 * cwnd. Which in that case we are just waiting for 17469 * a ACK. 17470 */ 17471 pacing_delay = len / tr_perms; 17472 /* Now do we reduce the time so we don't run dry? */ 17473 if (pacing_delay && rack_pacing_delay_reduction) { 17474 reduce = (pacing_delay / rack_pacing_delay_reduction); 17475 if (reduce < pacing_delay) { 17476 pacing_delay -= reduce; 17477 } else 17478 pacing_delay = 0; 17479 } else 17480 reduce = 0; 17481 pacing_delay *= HPTS_USEC_IN_MSEC; 17482 if (rack->rc_pace_to_cwnd) { 17483 uint64_t rate_wanted = 0; 17484 17485 pacing_delay = pace_to_fill_cwnd(rack, pacing_delay, len, segsiz, NULL, &rate_wanted, 1); 17486 rack->rc_ack_can_sendout_data = 1; 17487 rack_log_pacing_delay_calc(rack, len, pacing_delay, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 17488 } else 17489 rack_log_pacing_delay_calc(rack, len, pacing_delay, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 17490 /*******************************************************/ 17491 /* RRS: We insert non-paced call to stats here for len */ 17492 /*******************************************************/ 17493 } else { 17494 uint64_t bw_est, res, lentim, rate_wanted; 17495 uint32_t segs, oh; 17496 int capped = 0; 17497 int prev_fill; 17498 17499 if ((rack->r_rr_config == 1) && rsm) { 17500 return (rack->r_ctl.rc_min_to); 17501 } 17502 if (rack->use_fixed_rate) { 17503 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 17504 } else if ((rack->r_ctl.init_rate == 0) && 17505 (rack->r_ctl.gp_bw == 0)) { 17506 /* no way to yet do an estimate */ 17507 bw_est = rate_wanted = 0; 17508 } else if (rack->dgp_on) { 17509 bw_est = rack_get_bw(rack); 17510 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 17511 } else { 17512 uint32_t gain, rate_set = 0; 17513 17514 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17515 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 17516 if (rate_set == 0) { 17517 if (rate_wanted > rack->rc_tp->snd_wnd) 17518 rate_wanted = rack->rc_tp->snd_wnd; 17519 /* Now lets make it into a b/w */ 17520 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 17521 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17522 } 17523 bw_est = rate_wanted; 17524 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 17525 rack->r_ctl.cwnd_to_use, 17526 rate_wanted, bw_est, 17527 rack->r_ctl.rc_last_us_rtt, 17528 88, __LINE__, NULL, gain); 17529 } 17530 if (((bw_est == 0) || (rate_wanted == 0) || (rack->gp_ready == 0)) && 17531 (rack->use_fixed_rate == 0)) { 17532 /* 17533 * No way yet to make a b/w estimate or 17534 * our raise is set incorrectly. 17535 */ 17536 goto old_method; 17537 } 17538 rack_rate_cap_bw(rack, &rate_wanted, &capped); 17539 /* We need to account for all the overheads */ 17540 segs = (len + segsiz - 1) / segsiz; 17541 /* 17542 * We need the diff between 1514 bytes (e-mtu with e-hdr) 17543 * and how much data we put in each packet. Yes this 17544 * means we may be off if we are larger than 1500 bytes 17545 * or smaller. But this just makes us more conservative. 17546 */ 17547 17548 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 17549 if (rack->r_is_v6) { 17550 #ifdef INET6 17551 oh += sizeof(struct ip6_hdr); 17552 #endif 17553 } else { 17554 #ifdef INET 17555 oh += sizeof(struct ip); 17556 #endif 17557 } 17558 /* We add a fixed 14 for the ethernet header */ 17559 oh += 14; 17560 segs *= oh; 17561 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 17562 res = lentim / rate_wanted; 17563 pacing_delay = (uint32_t)res; 17564 if (rack_hw_rate_min && 17565 (rate_wanted < rack_hw_rate_min)) { 17566 can_start_hw_pacing = 0; 17567 if (rack->r_ctl.crte) { 17568 /* 17569 * Ok we need to release it, we 17570 * have fallen too low. 17571 */ 17572 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17573 rack->r_ctl.crte = NULL; 17574 rack->rack_attempt_hdwr_pace = 0; 17575 rack->rack_hdrw_pacing = 0; 17576 } 17577 } 17578 if (rack->r_ctl.crte && 17579 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17580 /* 17581 * We want more than the hardware can give us, 17582 * don't start any hw pacing. 17583 */ 17584 can_start_hw_pacing = 0; 17585 if (rack->r_rack_hw_rate_caps == 0) { 17586 /* 17587 * Ok we need to release it, we 17588 * want more than the card can give us and 17589 * no rate cap is in place. Set it up so 17590 * when we want less we can retry. 17591 */ 17592 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17593 rack->r_ctl.crte = NULL; 17594 rack->rack_attempt_hdwr_pace = 0; 17595 rack->rack_hdrw_pacing = 0; 17596 } 17597 } 17598 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 17599 /* 17600 * We lost our rate somehow, this can happen 17601 * if the interface changed underneath us. 17602 */ 17603 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17604 rack->r_ctl.crte = NULL; 17605 /* Lets re-allow attempting to setup pacing */ 17606 rack->rack_hdrw_pacing = 0; 17607 rack->rack_attempt_hdwr_pace = 0; 17608 rack_log_hdwr_pacing(rack, 17609 rate_wanted, bw_est, __LINE__, 17610 0, 6); 17611 } 17612 prev_fill = rack->r_via_fill_cw; 17613 if ((rack->rc_pace_to_cwnd) && 17614 (capped == 0) && 17615 (rack->dgp_on == 1) && 17616 (rack->use_fixed_rate == 0) && 17617 (rack->in_probe_rtt == 0) && 17618 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 17619 /* 17620 * We want to pace at our rate *or* faster to 17621 * fill the cwnd to the max if its not full. 17622 */ 17623 pacing_delay = pace_to_fill_cwnd(rack, pacing_delay, (len+segs), segsiz, &capped, &rate_wanted, 0); 17624 /* Re-check to make sure we are not exceeding our max b/w */ 17625 if ((rack->r_ctl.crte != NULL) && 17626 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17627 /* 17628 * We want more than the hardware can give us, 17629 * don't start any hw pacing. 17630 */ 17631 can_start_hw_pacing = 0; 17632 if (rack->r_rack_hw_rate_caps == 0) { 17633 /* 17634 * Ok we need to release it, we 17635 * want more than the card can give us and 17636 * no rate cap is in place. Set it up so 17637 * when we want less we can retry. 17638 */ 17639 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17640 rack->r_ctl.crte = NULL; 17641 rack->rack_attempt_hdwr_pace = 0; 17642 rack->rack_hdrw_pacing = 0; 17643 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 17644 } 17645 } 17646 } 17647 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 17648 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17649 if ((rack->rack_hdw_pace_ena) && 17650 (can_start_hw_pacing > 0) && 17651 (rack->rack_hdrw_pacing == 0) && 17652 (rack->rack_attempt_hdwr_pace == 0)) { 17653 /* 17654 * Lets attempt to turn on hardware pacing 17655 * if we can. 17656 */ 17657 rack->rack_attempt_hdwr_pace = 1; 17658 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 17659 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17660 rate_wanted, 17661 RS_PACING_GEQ, 17662 &err, &rack->r_ctl.crte_prev_rate); 17663 if (rack->r_ctl.crte) { 17664 rack->rack_hdrw_pacing = 1; 17665 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 17666 pace_one, rack->r_ctl.crte, 17667 NULL, rack->r_ctl.pace_len_divisor); 17668 rack_log_hdwr_pacing(rack, 17669 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17670 err, 0); 17671 rack->r_ctl.last_hw_bw_req = rate_wanted; 17672 } else { 17673 counter_u64_add(rack_hw_pace_init_fail, 1); 17674 } 17675 } else if (rack->rack_hdrw_pacing && 17676 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 17677 /* Do we need to adjust our rate? */ 17678 const struct tcp_hwrate_limit_table *nrte; 17679 17680 if (rack->r_up_only && 17681 (rate_wanted < rack->r_ctl.crte->rate)) { 17682 /** 17683 * We have four possible states here 17684 * having to do with the previous time 17685 * and this time. 17686 * previous | this-time 17687 * A) 0 | 0 -- fill_cw not in the picture 17688 * B) 1 | 0 -- we were doing a fill-cw but now are not 17689 * C) 1 | 1 -- all rates from fill_cw 17690 * D) 0 | 1 -- we were doing non-fill and now we are filling 17691 * 17692 * For case A, C and D we don't allow a drop. But for 17693 * case B where we now our on our steady rate we do 17694 * allow a drop. 17695 * 17696 */ 17697 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 17698 goto done_w_hdwr; 17699 } 17700 if ((rate_wanted > rack->r_ctl.crte->rate) || 17701 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 17702 if (rack_hw_rate_to_low && 17703 (bw_est < rack_hw_rate_to_low)) { 17704 /* 17705 * The pacing rate is too low for hardware, but 17706 * do allow hardware pacing to be restarted. 17707 */ 17708 rack_log_hdwr_pacing(rack, 17709 bw_est, rack->r_ctl.crte->rate, __LINE__, 17710 0, 5); 17711 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17712 rack->r_ctl.crte = NULL; 17713 rack->rack_attempt_hdwr_pace = 0; 17714 rack->rack_hdrw_pacing = 0; 17715 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17716 goto done_w_hdwr; 17717 } 17718 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 17719 rack->rc_tp, 17720 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17721 rate_wanted, 17722 RS_PACING_GEQ, 17723 &err, &rack->r_ctl.crte_prev_rate); 17724 if (nrte == NULL) { 17725 /* 17726 * Lost the rate, lets drop hardware pacing 17727 * period. 17728 */ 17729 rack->rack_hdrw_pacing = 0; 17730 rack->r_ctl.crte = NULL; 17731 rack_log_hdwr_pacing(rack, 17732 rate_wanted, 0, __LINE__, 17733 err, 1); 17734 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17735 counter_u64_add(rack_hw_pace_lost, 1); 17736 } else if (nrte != rack->r_ctl.crte) { 17737 rack->r_ctl.crte = nrte; 17738 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 17739 segsiz, pace_one, rack->r_ctl.crte, 17740 NULL, rack->r_ctl.pace_len_divisor); 17741 rack_log_hdwr_pacing(rack, 17742 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17743 err, 2); 17744 rack->r_ctl.last_hw_bw_req = rate_wanted; 17745 } 17746 } else { 17747 /* We just need to adjust the segment size */ 17748 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17749 rack_log_hdwr_pacing(rack, 17750 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17751 0, 4); 17752 rack->r_ctl.last_hw_bw_req = rate_wanted; 17753 } 17754 } 17755 } 17756 done_w_hdwr: 17757 if (rack_limit_time_with_srtt && 17758 (rack->use_fixed_rate == 0) && 17759 (rack->rack_hdrw_pacing == 0)) { 17760 /* 17761 * Sanity check, we do not allow the pacing delay 17762 * to be longer than the SRTT of the path. If it is 17763 * a slow path, then adding a packet should increase 17764 * the RTT and compensate for this i.e. the srtt will 17765 * be greater so the allowed pacing time will be greater. 17766 * 17767 * Note this restriction is not for where a peak rate 17768 * is set, we are doing fixed pacing or hardware pacing. 17769 */ 17770 if (rack->rc_tp->t_srtt) 17771 srtt = rack->rc_tp->t_srtt; 17772 else 17773 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 17774 if (srtt < (uint64_t)pacing_delay) { 17775 rack_log_pacing_delay_calc(rack, srtt, pacing_delay, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 17776 pacing_delay = srtt; 17777 } 17778 } 17779 /*******************************************************************/ 17780 /* RRS: We insert paced call to stats here for len and rate_wanted */ 17781 /*******************************************************************/ 17782 rack_log_pacing_delay_calc(rack, len, pacing_delay, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 17783 } 17784 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 17785 /* 17786 * If this rate is seeing enobufs when it 17787 * goes to send then either the nic is out 17788 * of gas or we are mis-estimating the time 17789 * somehow and not letting the queue empty 17790 * completely. Lets add to the pacing time. 17791 */ 17792 int hw_boost_delay; 17793 17794 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 17795 if (hw_boost_delay > rack_enobuf_hw_max) 17796 hw_boost_delay = rack_enobuf_hw_max; 17797 else if (hw_boost_delay < rack_enobuf_hw_min) 17798 hw_boost_delay = rack_enobuf_hw_min; 17799 pacing_delay += hw_boost_delay; 17800 } 17801 return (pacing_delay); 17802 } 17803 17804 static void 17805 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 17806 tcp_seq startseq, uint32_t sb_offset) 17807 { 17808 struct rack_sendmap *my_rsm = NULL; 17809 17810 if (tp->t_state < TCPS_ESTABLISHED) { 17811 /* 17812 * We don't start any measurements if we are 17813 * not at least established. 17814 */ 17815 return; 17816 } 17817 if (tp->t_state >= TCPS_FIN_WAIT_1) { 17818 /* 17819 * We will get no more data into the SB 17820 * this means we need to have the data available 17821 * before we start a measurement. 17822 */ 17823 17824 if (sbavail(&tptosocket(tp)->so_snd) < 17825 max(rc_init_window(rack), 17826 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 17827 /* Nope not enough data */ 17828 return; 17829 } 17830 } 17831 tp->t_flags |= TF_GPUTINPROG; 17832 rack->r_ctl.rc_gp_cumack_ts = 0; 17833 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 17834 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 17835 tp->gput_seq = startseq; 17836 rack->app_limited_needs_set = 0; 17837 if (rack->in_probe_rtt) 17838 rack->measure_saw_probe_rtt = 1; 17839 else if ((rack->measure_saw_probe_rtt) && 17840 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 17841 rack->measure_saw_probe_rtt = 0; 17842 if (rack->rc_gp_filled) 17843 tp->gput_ts = rack->r_ctl.last_cumack_advance; 17844 else { 17845 /* Special case initial measurement */ 17846 struct timeval tv; 17847 17848 tp->gput_ts = tcp_get_usecs(&tv); 17849 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 17850 } 17851 /* 17852 * We take a guess out into the future, 17853 * if we have no measurement and no 17854 * initial rate, we measure the first 17855 * initial-windows worth of data to 17856 * speed up getting some GP measurement and 17857 * thus start pacing. 17858 */ 17859 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 17860 rack->app_limited_needs_set = 1; 17861 tp->gput_ack = startseq + max(rc_init_window(rack), 17862 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 17863 rack_log_pacing_delay_calc(rack, 17864 tp->gput_seq, 17865 tp->gput_ack, 17866 0, 17867 tp->gput_ts, 17868 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 17869 9, 17870 __LINE__, NULL, 0); 17871 rack_tend_gp_marks(tp, rack); 17872 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 17873 return; 17874 } 17875 if (sb_offset) { 17876 /* 17877 * We are out somewhere in the sb 17878 * can we use the already outstanding data? 17879 */ 17880 17881 if (rack->r_ctl.rc_app_limited_cnt == 0) { 17882 /* 17883 * Yes first one is good and in this case 17884 * the tp->gput_ts is correctly set based on 17885 * the last ack that arrived (no need to 17886 * set things up when an ack comes in). 17887 */ 17888 my_rsm = tqhash_min(rack->r_ctl.tqh); 17889 if ((my_rsm == NULL) || 17890 (my_rsm->r_rtr_cnt != 1)) { 17891 /* retransmission? */ 17892 goto use_latest; 17893 } 17894 } else { 17895 if (rack->r_ctl.rc_first_appl == NULL) { 17896 /* 17897 * If rc_first_appl is NULL 17898 * then the cnt should be 0. 17899 * This is probably an error, maybe 17900 * a KASSERT would be approprate. 17901 */ 17902 goto use_latest; 17903 } 17904 /* 17905 * If we have a marker pointer to the last one that is 17906 * app limited we can use that, but we need to set 17907 * things up so that when it gets ack'ed we record 17908 * the ack time (if its not already acked). 17909 */ 17910 rack->app_limited_needs_set = 1; 17911 /* 17912 * We want to get to the rsm that is either 17913 * next with space i.e. over 1 MSS or the one 17914 * after that (after the app-limited). 17915 */ 17916 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 17917 if (my_rsm) { 17918 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 17919 /* Have to use the next one */ 17920 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17921 else { 17922 /* Use after the first MSS of it is acked */ 17923 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 17924 goto start_set; 17925 } 17926 } 17927 if ((my_rsm == NULL) || 17928 (my_rsm->r_rtr_cnt != 1)) { 17929 /* 17930 * Either its a retransmit or 17931 * the last is the app-limited one. 17932 */ 17933 goto use_latest; 17934 } 17935 } 17936 tp->gput_seq = my_rsm->r_start; 17937 start_set: 17938 if (my_rsm->r_flags & RACK_ACKED) { 17939 /* 17940 * This one has been acked use the arrival ack time 17941 */ 17942 struct rack_sendmap *nrsm; 17943 17944 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 17945 rack->app_limited_needs_set = 0; 17946 /* 17947 * Ok in this path we need to use the r_end now 17948 * since this guy is the starting ack. 17949 */ 17950 tp->gput_seq = my_rsm->r_end; 17951 /* 17952 * We also need to adjust up the sendtime 17953 * to the send of the next data after my_rsm. 17954 */ 17955 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17956 if (nrsm != NULL) 17957 my_rsm = nrsm; 17958 else { 17959 /* 17960 * The next as not been sent, thats the 17961 * case for using the latest. 17962 */ 17963 goto use_latest; 17964 } 17965 } 17966 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 17967 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 17968 rack->r_ctl.rc_gp_cumack_ts = 0; 17969 if ((rack->r_ctl.cleared_app_ack == 1) && 17970 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.cleared_app_ack_seq))) { 17971 /* 17972 * We just cleared an application limited period 17973 * so the next seq out needs to skip the first 17974 * ack. 17975 */ 17976 rack->app_limited_needs_set = 1; 17977 rack->r_ctl.cleared_app_ack = 0; 17978 } 17979 rack_log_pacing_delay_calc(rack, 17980 tp->gput_seq, 17981 tp->gput_ack, 17982 (uintptr_t)my_rsm, 17983 tp->gput_ts, 17984 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 17985 9, 17986 __LINE__, my_rsm, 0); 17987 /* Now lets make sure all are marked as they should be */ 17988 rack_tend_gp_marks(tp, rack); 17989 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 17990 return; 17991 } 17992 17993 use_latest: 17994 /* 17995 * We don't know how long we may have been 17996 * idle or if this is the first-send. Lets 17997 * setup the flag so we will trim off 17998 * the first ack'd data so we get a true 17999 * measurement. 18000 */ 18001 rack->app_limited_needs_set = 1; 18002 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18003 rack->r_ctl.rc_gp_cumack_ts = 0; 18004 /* Find this guy so we can pull the send time */ 18005 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18006 if (my_rsm) { 18007 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18008 if (my_rsm->r_flags & RACK_ACKED) { 18009 /* 18010 * Unlikely since its probably what was 18011 * just transmitted (but I am paranoid). 18012 */ 18013 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18014 rack->app_limited_needs_set = 0; 18015 } 18016 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18017 /* This also is unlikely */ 18018 tp->gput_seq = my_rsm->r_start; 18019 } 18020 } else { 18021 /* 18022 * TSNH unless we have some send-map limit, 18023 * and even at that it should not be hitting 18024 * that limit (we should have stopped sending). 18025 */ 18026 struct timeval tv; 18027 18028 microuptime(&tv); 18029 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18030 } 18031 rack_tend_gp_marks(tp, rack); 18032 rack_log_pacing_delay_calc(rack, 18033 tp->gput_seq, 18034 tp->gput_ack, 18035 (uintptr_t)my_rsm, 18036 tp->gput_ts, 18037 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18038 9, __LINE__, NULL, 0); 18039 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18040 } 18041 18042 static inline uint32_t 18043 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18044 uint32_t avail, int32_t sb_offset) 18045 { 18046 uint32_t len; 18047 uint32_t sendwin; 18048 18049 if (tp->snd_wnd > cwnd_to_use) 18050 sendwin = cwnd_to_use; 18051 else 18052 sendwin = tp->snd_wnd; 18053 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18054 /* We never want to go over our peers rcv-window */ 18055 len = 0; 18056 } else { 18057 uint32_t flight; 18058 18059 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18060 if (flight >= sendwin) { 18061 /* 18062 * We have in flight what we are allowed by cwnd (if 18063 * it was rwnd blocking it would have hit above out 18064 * >= tp->snd_wnd). 18065 */ 18066 return (0); 18067 } 18068 len = sendwin - flight; 18069 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18070 /* We would send too much (beyond the rwnd) */ 18071 len = tp->snd_wnd - ctf_outstanding(tp); 18072 } 18073 if ((len + sb_offset) > avail) { 18074 /* 18075 * We don't have that much in the SB, how much is 18076 * there? 18077 */ 18078 len = avail - sb_offset; 18079 } 18080 } 18081 return (len); 18082 } 18083 18084 static void 18085 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18086 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18087 int rsm_is_null, int optlen, int line, uint16_t mode) 18088 { 18089 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18090 union tcp_log_stackspecific log; 18091 struct timeval tv; 18092 18093 memset(&log, 0, sizeof(log)); 18094 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18095 log.u_bbr.flex1 = error; 18096 log.u_bbr.flex2 = flags; 18097 log.u_bbr.flex3 = rsm_is_null; 18098 log.u_bbr.flex4 = ipoptlen; 18099 log.u_bbr.flex5 = tp->rcv_numsacks; 18100 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18101 log.u_bbr.flex7 = optlen; 18102 log.u_bbr.flex8 = rack->r_fsb_inited; 18103 log.u_bbr.applimited = rack->r_fast_output; 18104 log.u_bbr.bw_inuse = rack_get_bw(rack); 18105 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18106 log.u_bbr.cwnd_gain = mode; 18107 log.u_bbr.pkts_out = orig_len; 18108 log.u_bbr.lt_epoch = len; 18109 log.u_bbr.delivered = line; 18110 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18111 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18112 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 18113 len, &log, false, NULL, __func__, __LINE__, &tv); 18114 } 18115 } 18116 18117 18118 static struct mbuf * 18119 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 18120 struct rack_fast_send_blk *fsb, 18121 int32_t seglimit, int32_t segsize, int hw_tls) 18122 { 18123 #ifdef KERN_TLS 18124 struct ktls_session *tls, *ntls; 18125 #ifdef INVARIANTS 18126 struct mbuf *start; 18127 #endif 18128 #endif 18129 struct mbuf *m, *n, **np, *smb; 18130 struct mbuf *top; 18131 int32_t off, soff; 18132 int32_t len = *plen; 18133 int32_t fragsize; 18134 int32_t len_cp = 0; 18135 uint32_t mlen, frags; 18136 18137 soff = off = the_off; 18138 smb = m = the_m; 18139 np = ⊤ 18140 top = NULL; 18141 #ifdef KERN_TLS 18142 if (hw_tls && (m->m_flags & M_EXTPG)) 18143 tls = m->m_epg_tls; 18144 else 18145 tls = NULL; 18146 #ifdef INVARIANTS 18147 start = m; 18148 #endif 18149 #endif 18150 while (len > 0) { 18151 if (m == NULL) { 18152 *plen = len_cp; 18153 break; 18154 } 18155 #ifdef KERN_TLS 18156 if (hw_tls) { 18157 if (m->m_flags & M_EXTPG) 18158 ntls = m->m_epg_tls; 18159 else 18160 ntls = NULL; 18161 18162 /* 18163 * Avoid mixing TLS records with handshake 18164 * data or TLS records from different 18165 * sessions. 18166 */ 18167 if (tls != ntls) { 18168 MPASS(m != start); 18169 *plen = len_cp; 18170 break; 18171 } 18172 } 18173 #endif 18174 mlen = min(len, m->m_len - off); 18175 if (seglimit) { 18176 /* 18177 * For M_EXTPG mbufs, add 3 segments 18178 * + 1 in case we are crossing page boundaries 18179 * + 2 in case the TLS hdr/trailer are used 18180 * It is cheaper to just add the segments 18181 * than it is to take the cache miss to look 18182 * at the mbuf ext_pgs state in detail. 18183 */ 18184 if (m->m_flags & M_EXTPG) { 18185 fragsize = min(segsize, PAGE_SIZE); 18186 frags = 3; 18187 } else { 18188 fragsize = segsize; 18189 frags = 0; 18190 } 18191 18192 /* Break if we really can't fit anymore. */ 18193 if ((frags + 1) >= seglimit) { 18194 *plen = len_cp; 18195 break; 18196 } 18197 18198 /* 18199 * Reduce size if you can't copy the whole 18200 * mbuf. If we can't copy the whole mbuf, also 18201 * adjust len so the loop will end after this 18202 * mbuf. 18203 */ 18204 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 18205 mlen = (seglimit - frags - 1) * fragsize; 18206 len = mlen; 18207 *plen = len_cp + len; 18208 } 18209 frags += howmany(mlen, fragsize); 18210 if (frags == 0) 18211 frags++; 18212 seglimit -= frags; 18213 KASSERT(seglimit > 0, 18214 ("%s: seglimit went too low", __func__)); 18215 } 18216 n = m_get(M_NOWAIT, m->m_type); 18217 *np = n; 18218 if (n == NULL) 18219 goto nospace; 18220 n->m_len = mlen; 18221 soff += mlen; 18222 len_cp += n->m_len; 18223 if (m->m_flags & (M_EXT | M_EXTPG)) { 18224 n->m_data = m->m_data + off; 18225 mb_dupcl(n, m); 18226 } else { 18227 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 18228 (u_int)n->m_len); 18229 } 18230 len -= n->m_len; 18231 off = 0; 18232 m = m->m_next; 18233 np = &n->m_next; 18234 if (len || (soff == smb->m_len)) { 18235 /* 18236 * We have more so we move forward or 18237 * we have consumed the entire mbuf and 18238 * len has fell to 0. 18239 */ 18240 soff = 0; 18241 smb = m; 18242 } 18243 18244 } 18245 if (fsb != NULL) { 18246 fsb->m = smb; 18247 fsb->off = soff; 18248 if (smb) { 18249 /* 18250 * Save off the size of the mbuf. We do 18251 * this so that we can recognize when it 18252 * has been trimmed by sbcut() as acks 18253 * come in. 18254 */ 18255 fsb->o_m_len = smb->m_len; 18256 fsb->o_t_len = M_TRAILINGROOM(smb); 18257 } else { 18258 /* 18259 * This is the case where the next mbuf went to NULL. This 18260 * means with this copy we have sent everything in the sb. 18261 * In theory we could clear the fast_output flag, but lets 18262 * not since its possible that we could get more added 18263 * and acks that call the extend function which would let 18264 * us send more. 18265 */ 18266 fsb->o_m_len = 0; 18267 fsb->o_t_len = 0; 18268 } 18269 } 18270 return (top); 18271 nospace: 18272 if (top) 18273 m_freem(top); 18274 return (NULL); 18275 18276 } 18277 18278 /* 18279 * This is a copy of m_copym(), taking the TSO segment size/limit 18280 * constraints into account, and advancing the sndptr as it goes. 18281 */ 18282 static struct mbuf * 18283 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 18284 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 18285 { 18286 struct mbuf *m, *n; 18287 int32_t soff; 18288 18289 m = rack->r_ctl.fsb.m; 18290 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 18291 /* 18292 * The trailing space changed, mbufs can grow 18293 * at the tail but they can't shrink from 18294 * it, KASSERT that. Adjust the orig_m_len to 18295 * compensate for this change. 18296 */ 18297 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 18298 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 18299 m, 18300 rack, 18301 (intmax_t)M_TRAILINGROOM(m), 18302 rack->r_ctl.fsb.o_t_len, 18303 rack->r_ctl.fsb.o_m_len, 18304 m->m_len)); 18305 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 18306 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 18307 } 18308 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 18309 /* 18310 * Mbuf shrank, trimmed off the top by an ack, our 18311 * offset changes. 18312 */ 18313 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 18314 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 18315 m, m->m_len, 18316 rack, rack->r_ctl.fsb.o_m_len, 18317 rack->r_ctl.fsb.off)); 18318 18319 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 18320 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 18321 else 18322 rack->r_ctl.fsb.off = 0; 18323 rack->r_ctl.fsb.o_m_len = m->m_len; 18324 #ifdef INVARIANTS 18325 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 18326 panic("rack:%p m:%p m_len grew outside of t_space compensation", 18327 rack, m); 18328 #endif 18329 } 18330 soff = rack->r_ctl.fsb.off; 18331 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 18332 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 18333 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 18334 __FUNCTION__, 18335 rack, *plen, m, m->m_len)); 18336 /* Save off the right location before we copy and advance */ 18337 *s_soff = soff; 18338 *s_mb = rack->r_ctl.fsb.m; 18339 n = rack_fo_base_copym(m, soff, plen, 18340 &rack->r_ctl.fsb, 18341 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 18342 return (n); 18343 } 18344 18345 /* Log the buffer level */ 18346 static void 18347 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 18348 int len, struct timeval *tv, 18349 uint32_t cts) 18350 { 18351 uint32_t p_rate = 0, p_queue = 0, err = 0; 18352 union tcp_log_stackspecific log; 18353 18354 #ifdef RATELIMIT 18355 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18356 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18357 #endif 18358 memset(&log, 0, sizeof(log)); 18359 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18360 log.u_bbr.flex1 = p_rate; 18361 log.u_bbr.flex2 = p_queue; 18362 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18363 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18364 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18365 log.u_bbr.flex7 = 99; 18366 log.u_bbr.flex8 = 0; 18367 log.u_bbr.pkts_out = err; 18368 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18369 log.u_bbr.timeStamp = cts; 18370 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18371 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18372 len, &log, false, NULL, __func__, __LINE__, tv); 18373 18374 } 18375 18376 static uint32_t 18377 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 18378 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 18379 { 18380 uint64_t lentime = 0; 18381 #ifdef RATELIMIT 18382 uint32_t p_rate = 0, p_queue = 0, err; 18383 union tcp_log_stackspecific log; 18384 uint64_t bw; 18385 18386 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18387 /* Failed or queue is zero */ 18388 if (err || (p_queue == 0)) { 18389 lentime = 0; 18390 goto out; 18391 } 18392 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18393 if (err) { 18394 lentime = 0; 18395 goto out; 18396 } 18397 /* 18398 * If we reach here we have some bytes in 18399 * the queue. The number returned is a value 18400 * between 0 and 0xffff where ffff is full 18401 * and 0 is empty. So how best to make this into 18402 * something usable? 18403 * 18404 * The "safer" way is lets take the b/w gotten 18405 * from the query (which should be our b/w rate) 18406 * and pretend that a full send (our rc_pace_max_segs) 18407 * is outstanding. We factor it so its as if a full 18408 * number of our MSS segment is terms of full 18409 * ethernet segments are outstanding. 18410 */ 18411 bw = p_rate / 8; 18412 if (bw) { 18413 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 18414 lentime *= ETHERNET_SEGMENT_SIZE; 18415 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 18416 lentime /= bw; 18417 } else { 18418 /* TSNH -- KASSERT? */ 18419 lentime = 0; 18420 } 18421 out: 18422 if (tcp_bblogging_on(tp)) { 18423 memset(&log, 0, sizeof(log)); 18424 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18425 log.u_bbr.flex1 = p_rate; 18426 log.u_bbr.flex2 = p_queue; 18427 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18428 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18429 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18430 log.u_bbr.flex7 = 99; 18431 log.u_bbr.flex8 = 0; 18432 log.u_bbr.pkts_out = err; 18433 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18434 log.u_bbr.cur_del_rate = lentime; 18435 log.u_bbr.timeStamp = cts; 18436 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18437 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18438 len, &log, false, NULL, __func__, __LINE__,tv); 18439 } 18440 #endif 18441 return ((uint32_t)lentime); 18442 } 18443 18444 static int 18445 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 18446 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 18447 { 18448 /* 18449 * Enter the fast retransmit path. We are given that a sched_pin is 18450 * in place (if accounting is compliled in) and the cycle count taken 18451 * at the entry is in the ts_val. The concept her is that the rsm 18452 * now holds the mbuf offsets and such so we can directly transmit 18453 * without a lot of overhead, the len field is already set for 18454 * us to prohibit us from sending too much (usually its 1MSS). 18455 */ 18456 struct ip *ip = NULL; 18457 struct udphdr *udp = NULL; 18458 struct tcphdr *th = NULL; 18459 struct mbuf *m = NULL; 18460 struct inpcb *inp; 18461 uint8_t *cpto; 18462 struct tcp_log_buffer *lgb; 18463 #ifdef TCP_ACCOUNTING 18464 uint64_t crtsc; 18465 int cnt_thru = 1; 18466 #endif 18467 struct tcpopt to; 18468 u_char opt[TCP_MAXOLEN]; 18469 uint32_t hdrlen, optlen; 18470 int32_t pacing_delay, segsiz, max_val, tso = 0, error = 0, ulen = 0; 18471 uint16_t flags; 18472 uint32_t if_hw_tsomaxsegcount = 0, startseq; 18473 uint32_t if_hw_tsomaxsegsize; 18474 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 18475 18476 #ifdef INET6 18477 struct ip6_hdr *ip6 = NULL; 18478 18479 if (rack->r_is_v6) { 18480 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18481 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18482 } else 18483 #endif /* INET6 */ 18484 { 18485 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18486 hdrlen = sizeof(struct tcpiphdr); 18487 } 18488 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 18489 goto failed; 18490 } 18491 if (doing_tlp) { 18492 /* Its a TLP add the flag, it may already be there but be sure */ 18493 rsm->r_flags |= RACK_TLP; 18494 } else { 18495 /* If it was a TLP it is not not on this retransmit */ 18496 rsm->r_flags &= ~RACK_TLP; 18497 } 18498 startseq = rsm->r_start; 18499 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 18500 inp = rack->rc_inp; 18501 to.to_flags = 0; 18502 flags = tcp_outflags[tp->t_state]; 18503 if (flags & (TH_SYN|TH_RST)) { 18504 goto failed; 18505 } 18506 if (rsm->r_flags & RACK_HAS_FIN) { 18507 /* We can't send a FIN here */ 18508 goto failed; 18509 } 18510 if (flags & TH_FIN) { 18511 /* We never send a FIN */ 18512 flags &= ~TH_FIN; 18513 } 18514 if (tp->t_flags & TF_RCVD_TSTMP) { 18515 to.to_tsval = ms_cts + tp->ts_offset; 18516 to.to_tsecr = tp->ts_recent; 18517 to.to_flags = TOF_TS; 18518 } 18519 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18520 /* TCP-MD5 (RFC2385). */ 18521 if (tp->t_flags & TF_SIGNATURE) 18522 to.to_flags |= TOF_SIGNATURE; 18523 #endif 18524 optlen = tcp_addoptions(&to, opt); 18525 hdrlen += optlen; 18526 udp = rack->r_ctl.fsb.udp; 18527 if (udp) 18528 hdrlen += sizeof(struct udphdr); 18529 if (rack->r_ctl.rc_pace_max_segs) 18530 max_val = rack->r_ctl.rc_pace_max_segs; 18531 else if (rack->rc_user_set_max_segs) 18532 max_val = rack->rc_user_set_max_segs * segsiz; 18533 else 18534 max_val = len; 18535 if ((tp->t_flags & TF_TSO) && 18536 V_tcp_do_tso && 18537 (len > segsiz) && 18538 (tp->t_port == 0)) 18539 tso = 1; 18540 #ifdef INET6 18541 if (MHLEN < hdrlen + max_linkhdr) 18542 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18543 else 18544 #endif 18545 m = m_gethdr(M_NOWAIT, MT_DATA); 18546 if (m == NULL) 18547 goto failed; 18548 m->m_data += max_linkhdr; 18549 m->m_len = hdrlen; 18550 th = rack->r_ctl.fsb.th; 18551 /* Establish the len to send */ 18552 if (len > max_val) 18553 len = max_val; 18554 if ((tso) && (len + optlen > segsiz)) { 18555 uint32_t if_hw_tsomax; 18556 int32_t max_len; 18557 18558 /* extract TSO information */ 18559 if_hw_tsomax = tp->t_tsomax; 18560 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18561 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18562 /* 18563 * Check if we should limit by maximum payload 18564 * length: 18565 */ 18566 if (if_hw_tsomax != 0) { 18567 /* compute maximum TSO length */ 18568 max_len = (if_hw_tsomax - hdrlen - 18569 max_linkhdr); 18570 if (max_len <= 0) { 18571 goto failed; 18572 } else if (len > max_len) { 18573 len = max_len; 18574 } 18575 } 18576 if (len <= segsiz) { 18577 /* 18578 * In case there are too many small fragments don't 18579 * use TSO: 18580 */ 18581 tso = 0; 18582 } 18583 } else { 18584 tso = 0; 18585 } 18586 if ((tso == 0) && (len > segsiz)) 18587 len = segsiz; 18588 (void)tcp_get_usecs(tv); 18589 if ((len == 0) || 18590 (len <= MHLEN - hdrlen - max_linkhdr)) { 18591 goto failed; 18592 } 18593 th->th_seq = htonl(rsm->r_start); 18594 th->th_ack = htonl(tp->rcv_nxt); 18595 /* 18596 * The PUSH bit should only be applied 18597 * if the full retransmission is made. If 18598 * we are sending less than this is the 18599 * left hand edge and should not have 18600 * the PUSH bit. 18601 */ 18602 if ((rsm->r_flags & RACK_HAD_PUSH) && 18603 (len == (rsm->r_end - rsm->r_start))) 18604 flags |= TH_PUSH; 18605 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 18606 if (th->th_win == 0) { 18607 tp->t_sndzerowin++; 18608 tp->t_flags |= TF_RXWIN0SENT; 18609 } else 18610 tp->t_flags &= ~TF_RXWIN0SENT; 18611 if (rsm->r_flags & RACK_TLP) { 18612 /* 18613 * TLP should not count in retran count, but 18614 * in its own bin 18615 */ 18616 counter_u64_add(rack_tlp_retran, 1); 18617 counter_u64_add(rack_tlp_retran_bytes, len); 18618 } else { 18619 tp->t_sndrexmitpack++; 18620 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18621 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18622 } 18623 #ifdef STATS 18624 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18625 len); 18626 #endif 18627 if (rsm->m == NULL) 18628 goto failed; 18629 if (rsm->m && 18630 ((rsm->orig_m_len != rsm->m->m_len) || 18631 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 18632 /* Fix up the orig_m_len and possibly the mbuf offset */ 18633 rack_adjust_orig_mlen(rsm); 18634 } 18635 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 18636 if (len <= segsiz) { 18637 /* 18638 * Must have ran out of mbufs for the copy 18639 * shorten it to no longer need tso. Lets 18640 * not put on sendalot since we are low on 18641 * mbufs. 18642 */ 18643 tso = 0; 18644 } 18645 if ((m->m_next == NULL) || (len <= 0)){ 18646 goto failed; 18647 } 18648 if (udp) { 18649 if (rack->r_is_v6) 18650 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18651 else 18652 ulen = hdrlen + len - sizeof(struct ip); 18653 udp->uh_ulen = htons(ulen); 18654 } 18655 m->m_pkthdr.rcvif = (struct ifnet *)0; 18656 if (TCPS_HAVERCVDSYN(tp->t_state) && 18657 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18658 int ect = tcp_ecn_output_established(tp, &flags, len, true); 18659 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18660 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18661 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18662 #ifdef INET6 18663 if (rack->r_is_v6) { 18664 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18665 ip6->ip6_flow |= htonl(ect << 20); 18666 } 18667 else 18668 #endif 18669 { 18670 ip->ip_tos &= ~IPTOS_ECN_MASK; 18671 ip->ip_tos |= ect; 18672 } 18673 } 18674 if (rack->r_ctl.crte != NULL) { 18675 /* See if we can send via the hw queue */ 18676 pacing_delay = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 18677 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 18678 if (pacing_delay == 0) 18679 ip_sendflag = 0; 18680 } 18681 tcp_set_flags(th, flags); 18682 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18683 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18684 if (to.to_flags & TOF_SIGNATURE) { 18685 /* 18686 * Calculate MD5 signature and put it into the place 18687 * determined before. 18688 * NOTE: since TCP options buffer doesn't point into 18689 * mbuf's data, calculate offset and use it. 18690 */ 18691 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18692 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18693 /* 18694 * Do not send segment if the calculation of MD5 18695 * digest has failed. 18696 */ 18697 goto failed; 18698 } 18699 } 18700 #endif 18701 #ifdef INET6 18702 if (rack->r_is_v6) { 18703 if (tp->t_port) { 18704 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18705 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18706 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18707 th->th_sum = htons(0); 18708 UDPSTAT_INC(udps_opackets); 18709 } else { 18710 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18711 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18712 th->th_sum = in6_cksum_pseudo(ip6, 18713 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18714 0); 18715 } 18716 } 18717 #endif 18718 #if defined(INET6) && defined(INET) 18719 else 18720 #endif 18721 #ifdef INET 18722 { 18723 if (tp->t_port) { 18724 m->m_pkthdr.csum_flags = CSUM_UDP; 18725 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18726 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18727 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18728 th->th_sum = htons(0); 18729 UDPSTAT_INC(udps_opackets); 18730 } else { 18731 m->m_pkthdr.csum_flags = CSUM_TCP; 18732 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18733 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18734 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18735 IPPROTO_TCP + len + optlen)); 18736 } 18737 /* IP version must be set here for ipv4/ipv6 checking later */ 18738 KASSERT(ip->ip_v == IPVERSION, 18739 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18740 } 18741 #endif 18742 if (tso) { 18743 /* 18744 * Here we use segsiz since we have no added options besides 18745 * any standard timestamp options (no DSACKs or SACKS are sent 18746 * via either fast-path). 18747 */ 18748 KASSERT(len > segsiz, 18749 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 18750 m->m_pkthdr.csum_flags |= CSUM_TSO; 18751 m->m_pkthdr.tso_segsz = segsiz; 18752 } 18753 #ifdef INET6 18754 if (rack->r_is_v6) { 18755 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 18756 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18757 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18758 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18759 else 18760 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18761 } 18762 #endif 18763 #if defined(INET) && defined(INET6) 18764 else 18765 #endif 18766 #ifdef INET 18767 { 18768 ip->ip_len = htons(m->m_pkthdr.len); 18769 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 18770 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18771 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18772 if (tp->t_port == 0 || len < V_tcp_minmss) { 18773 ip->ip_off |= htons(IP_DF); 18774 } 18775 } else { 18776 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18777 } 18778 } 18779 #endif 18780 if (doing_tlp == 0) { 18781 /* Set we retransmitted */ 18782 rack->rc_gp_saw_rec = 1; 18783 } else { 18784 /* Its a TLP set ca or ss */ 18785 if (tp->snd_cwnd > tp->snd_ssthresh) { 18786 /* Set we sent in CA */ 18787 rack->rc_gp_saw_ca = 1; 18788 } else { 18789 /* Set we sent in SS */ 18790 rack->rc_gp_saw_ss = 1; 18791 } 18792 } 18793 /* Time to copy in our header */ 18794 cpto = mtod(m, uint8_t *); 18795 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18796 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18797 if (optlen) { 18798 bcopy(opt, th + 1, optlen); 18799 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18800 } else { 18801 th->th_off = sizeof(struct tcphdr) >> 2; 18802 } 18803 if (tcp_bblogging_on(rack->rc_tp)) { 18804 union tcp_log_stackspecific log; 18805 18806 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18807 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18808 counter_u64_add(rack_collapsed_win_rxt, 1); 18809 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18810 } 18811 memset(&log, 0, sizeof(log)); 18812 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18813 if (rack->rack_no_prr) 18814 log.u_bbr.flex1 = 0; 18815 else 18816 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18817 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18818 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18819 log.u_bbr.flex4 = max_val; 18820 /* Save off the early/late values */ 18821 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18822 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18823 log.u_bbr.bw_inuse = rack_get_bw(rack); 18824 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 18825 if (doing_tlp == 0) 18826 log.u_bbr.flex8 = 1; 18827 else 18828 log.u_bbr.flex8 = 2; 18829 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18830 log.u_bbr.flex7 = 55; 18831 log.u_bbr.pkts_out = tp->t_maxseg; 18832 log.u_bbr.timeStamp = cts; 18833 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18834 if (rsm->r_rtr_cnt > 0) { 18835 /* 18836 * When we have a retransmit we want to log the 18837 * burst at send and flight at send from before. 18838 */ 18839 log.u_bbr.flex5 = rsm->r_fas; 18840 log.u_bbr.bbr_substate = rsm->r_bas; 18841 } else { 18842 /* 18843 * This is currently unlikely until we do the 18844 * packet pair probes but I will add it for completeness. 18845 */ 18846 log.u_bbr.flex5 = log.u_bbr.inflight; 18847 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 18848 } 18849 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 18850 log.u_bbr.delivered = 0; 18851 log.u_bbr.rttProp = (uintptr_t)rsm; 18852 log.u_bbr.delRate = rsm->r_flags; 18853 log.u_bbr.delRate <<= 31; 18854 log.u_bbr.delRate |= rack->r_must_retran; 18855 log.u_bbr.delRate <<= 1; 18856 log.u_bbr.delRate |= 1; 18857 log.u_bbr.pkt_epoch = __LINE__; 18858 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 18859 len, &log, false, NULL, __func__, __LINE__, tv); 18860 } else 18861 lgb = NULL; 18862 if ((rack->r_ctl.crte != NULL) && 18863 tcp_bblogging_on(tp)) { 18864 rack_log_queue_level(tp, rack, len, tv, cts); 18865 } 18866 #ifdef INET6 18867 if (rack->r_is_v6) { 18868 error = ip6_output(m, inp->in6p_outputopts, 18869 &inp->inp_route6, 18870 ip_sendflag, NULL, NULL, inp); 18871 } 18872 else 18873 #endif 18874 #ifdef INET 18875 { 18876 error = ip_output(m, NULL, 18877 &inp->inp_route, 18878 ip_sendflag, 0, inp); 18879 } 18880 #endif 18881 m = NULL; 18882 if (lgb) { 18883 lgb->tlb_errno = error; 18884 lgb = NULL; 18885 } 18886 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 18887 tp->snd_nxt = tp->snd_max; 18888 if (error) { 18889 goto failed; 18890 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 18891 rack->rc_hw_nobuf = 0; 18892 rack->r_ctl.rc_agg_delayed = 0; 18893 rack->r_early = 0; 18894 rack->r_late = 0; 18895 rack->r_ctl.rc_agg_early = 0; 18896 } 18897 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 18898 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 18899 if (doing_tlp) { 18900 rack->rc_tlp_in_progress = 1; 18901 rack->r_ctl.rc_tlp_cnt_out++; 18902 } 18903 if (error == 0) { 18904 counter_u64_add(rack_total_bytes, len); 18905 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 18906 if (doing_tlp) { 18907 rack->rc_last_sent_tlp_past_cumack = 0; 18908 rack->rc_last_sent_tlp_seq_valid = 1; 18909 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18910 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18911 } 18912 if (rack->r_ctl.rc_prr_sndcnt >= len) 18913 rack->r_ctl.rc_prr_sndcnt -= len; 18914 else 18915 rack->r_ctl.rc_prr_sndcnt = 0; 18916 } 18917 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18918 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18919 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18920 rack->r_ctl.retran_during_recovery += len; 18921 { 18922 int idx; 18923 18924 idx = (len / segsiz) + 3; 18925 if (idx >= TCP_MSS_ACCT_ATIMER) 18926 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18927 else 18928 counter_u64_add(rack_out_size[idx], 1); 18929 } 18930 if (tp->t_rtttime == 0) { 18931 tp->t_rtttime = ticks; 18932 tp->t_rtseq = startseq; 18933 KMOD_TCPSTAT_INC(tcps_segstimed); 18934 } 18935 counter_u64_add(rack_fto_rsm_send, 1); 18936 if (error && (error == ENOBUFS)) { 18937 if (rack->r_ctl.crte != NULL) { 18938 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 18939 if (tcp_bblogging_on(rack->rc_tp)) 18940 rack_log_queue_level(tp, rack, len, tv, cts); 18941 } else 18942 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 18943 pacing_delay = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18944 if (rack->rc_enobuf < 0x7f) 18945 rack->rc_enobuf++; 18946 if (pacing_delay < (10 * HPTS_USEC_IN_MSEC)) 18947 pacing_delay = 10 * HPTS_USEC_IN_MSEC; 18948 if (rack->r_ctl.crte != NULL) { 18949 counter_u64_add(rack_saw_enobuf_hw, 1); 18950 tcp_rl_log_enobuf(rack->r_ctl.crte); 18951 } 18952 counter_u64_add(rack_saw_enobuf, 1); 18953 } else { 18954 pacing_delay = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 18955 } 18956 rack_start_hpts_timer(rack, tp, cts, pacing_delay, len, 0); 18957 #ifdef TCP_ACCOUNTING 18958 crtsc = get_cyclecount(); 18959 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18960 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 18961 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 18962 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 18963 } 18964 sched_unpin(); 18965 #endif 18966 return (0); 18967 failed: 18968 if (m) 18969 m_free(m); 18970 return (-1); 18971 } 18972 18973 static void 18974 rack_sndbuf_autoscale(struct tcp_rack *rack) 18975 { 18976 /* 18977 * Automatic sizing of send socket buffer. Often the send buffer 18978 * size is not optimally adjusted to the actual network conditions 18979 * at hand (delay bandwidth product). Setting the buffer size too 18980 * small limits throughput on links with high bandwidth and high 18981 * delay (eg. trans-continental/oceanic links). Setting the 18982 * buffer size too big consumes too much real kernel memory, 18983 * especially with many connections on busy servers. 18984 * 18985 * The criteria to step up the send buffer one notch are: 18986 * 1. receive window of remote host is larger than send buffer 18987 * (with a fudge factor of 5/4th); 18988 * 2. send buffer is filled to 7/8th with data (so we actually 18989 * have data to make use of it); 18990 * 3. send buffer fill has not hit maximal automatic size; 18991 * 4. our send window (slow start and cogestion controlled) is 18992 * larger than sent but unacknowledged data in send buffer. 18993 * 18994 * Note that the rack version moves things much faster since 18995 * we want to avoid hitting cache lines in the rack_fast_output() 18996 * path so this is called much less often and thus moves 18997 * the SB forward by a percentage. 18998 */ 18999 struct socket *so; 19000 struct tcpcb *tp; 19001 uint32_t sendwin, scaleup; 19002 19003 tp = rack->rc_tp; 19004 so = rack->rc_inp->inp_socket; 19005 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19006 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19007 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19008 sbused(&so->so_snd) >= 19009 (so->so_snd.sb_hiwat / 8 * 7) && 19010 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19011 sendwin >= (sbused(&so->so_snd) - 19012 (tp->snd_max - tp->snd_una))) { 19013 if (rack_autosndbuf_inc) 19014 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19015 else 19016 scaleup = V_tcp_autosndbuf_inc; 19017 if (scaleup < V_tcp_autosndbuf_inc) 19018 scaleup = V_tcp_autosndbuf_inc; 19019 scaleup += so->so_snd.sb_hiwat; 19020 if (scaleup > V_tcp_autosndbuf_max) 19021 scaleup = V_tcp_autosndbuf_max; 19022 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19023 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19024 } 19025 } 19026 } 19027 19028 static int 19029 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19030 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long *tot_len, int *send_err, int line) 19031 { 19032 /* 19033 * Enter to do fast output. We are given that the sched_pin is 19034 * in place (if accounting is compiled in) and the cycle count taken 19035 * at entry is in place in ts_val. The idea here is that 19036 * we know how many more bytes needs to be sent (presumably either 19037 * during pacing or to fill the cwnd and that was greater than 19038 * the max-burst). We have how much to send and all the info we 19039 * need to just send. 19040 */ 19041 #ifdef INET 19042 struct ip *ip = NULL; 19043 #endif 19044 struct udphdr *udp = NULL; 19045 struct tcphdr *th = NULL; 19046 struct mbuf *m, *s_mb; 19047 struct inpcb *inp; 19048 uint8_t *cpto; 19049 struct tcp_log_buffer *lgb; 19050 #ifdef TCP_ACCOUNTING 19051 uint64_t crtsc; 19052 #endif 19053 struct tcpopt to; 19054 u_char opt[TCP_MAXOLEN]; 19055 uint32_t hdrlen, optlen; 19056 #ifdef TCP_ACCOUNTING 19057 int cnt_thru = 1; 19058 #endif 19059 int32_t pacing_delay, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19060 uint16_t flags; 19061 uint32_t s_soff; 19062 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19063 uint32_t if_hw_tsomaxsegsize; 19064 uint32_t add_flag = RACK_SENT_FP; 19065 #ifdef INET6 19066 struct ip6_hdr *ip6 = NULL; 19067 19068 if (rack->r_is_v6) { 19069 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19070 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19071 } else 19072 #endif /* INET6 */ 19073 { 19074 #ifdef INET 19075 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19076 hdrlen = sizeof(struct tcpiphdr); 19077 #endif 19078 } 19079 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19080 m = NULL; 19081 goto failed; 19082 } 19083 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19084 startseq = tp->snd_max; 19085 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19086 inp = rack->rc_inp; 19087 len = rack->r_ctl.fsb.left_to_send; 19088 to.to_flags = 0; 19089 flags = rack->r_ctl.fsb.tcp_flags; 19090 if (tp->t_flags & TF_RCVD_TSTMP) { 19091 to.to_tsval = ms_cts + tp->ts_offset; 19092 to.to_tsecr = tp->ts_recent; 19093 to.to_flags = TOF_TS; 19094 } 19095 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19096 /* TCP-MD5 (RFC2385). */ 19097 if (tp->t_flags & TF_SIGNATURE) 19098 to.to_flags |= TOF_SIGNATURE; 19099 #endif 19100 optlen = tcp_addoptions(&to, opt); 19101 hdrlen += optlen; 19102 udp = rack->r_ctl.fsb.udp; 19103 if (udp) 19104 hdrlen += sizeof(struct udphdr); 19105 if (rack->r_ctl.rc_pace_max_segs) 19106 max_val = rack->r_ctl.rc_pace_max_segs; 19107 else if (rack->rc_user_set_max_segs) 19108 max_val = rack->rc_user_set_max_segs * segsiz; 19109 else 19110 max_val = len; 19111 if ((tp->t_flags & TF_TSO) && 19112 V_tcp_do_tso && 19113 (len > segsiz) && 19114 (tp->t_port == 0)) 19115 tso = 1; 19116 again: 19117 #ifdef INET6 19118 if (MHLEN < hdrlen + max_linkhdr) 19119 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19120 else 19121 #endif 19122 m = m_gethdr(M_NOWAIT, MT_DATA); 19123 if (m == NULL) 19124 goto failed; 19125 m->m_data += max_linkhdr; 19126 m->m_len = hdrlen; 19127 th = rack->r_ctl.fsb.th; 19128 /* Establish the len to send */ 19129 if (len > max_val) 19130 len = max_val; 19131 if ((tso) && (len + optlen > segsiz)) { 19132 uint32_t if_hw_tsomax; 19133 int32_t max_len; 19134 19135 /* extract TSO information */ 19136 if_hw_tsomax = tp->t_tsomax; 19137 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19138 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19139 /* 19140 * Check if we should limit by maximum payload 19141 * length: 19142 */ 19143 if (if_hw_tsomax != 0) { 19144 /* compute maximum TSO length */ 19145 max_len = (if_hw_tsomax - hdrlen - 19146 max_linkhdr); 19147 if (max_len <= 0) { 19148 goto failed; 19149 } else if (len > max_len) { 19150 len = max_len; 19151 } 19152 } 19153 if (len <= segsiz) { 19154 /* 19155 * In case there are too many small fragments don't 19156 * use TSO: 19157 */ 19158 tso = 0; 19159 } 19160 } else { 19161 tso = 0; 19162 } 19163 if ((tso == 0) && (len > segsiz)) 19164 len = segsiz; 19165 (void)tcp_get_usecs(tv); 19166 if ((len == 0) || 19167 (len <= MHLEN - hdrlen - max_linkhdr)) { 19168 goto failed; 19169 } 19170 sb_offset = tp->snd_max - tp->snd_una; 19171 th->th_seq = htonl(tp->snd_max); 19172 th->th_ack = htonl(tp->rcv_nxt); 19173 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19174 if (th->th_win == 0) { 19175 tp->t_sndzerowin++; 19176 tp->t_flags |= TF_RXWIN0SENT; 19177 } else 19178 tp->t_flags &= ~TF_RXWIN0SENT; 19179 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 19180 KMOD_TCPSTAT_INC(tcps_sndpack); 19181 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 19182 #ifdef STATS 19183 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 19184 len); 19185 #endif 19186 if (rack->r_ctl.fsb.m == NULL) 19187 goto failed; 19188 19189 /* s_mb and s_soff are saved for rack_log_output */ 19190 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 19191 &s_mb, &s_soff); 19192 if (len <= segsiz) { 19193 /* 19194 * Must have ran out of mbufs for the copy 19195 * shorten it to no longer need tso. Lets 19196 * not put on sendalot since we are low on 19197 * mbufs. 19198 */ 19199 tso = 0; 19200 } 19201 if (rack->r_ctl.fsb.rfo_apply_push && 19202 (len == rack->r_ctl.fsb.left_to_send)) { 19203 flags |= TH_PUSH; 19204 add_flag |= RACK_HAD_PUSH; 19205 } 19206 if ((m->m_next == NULL) || (len <= 0)){ 19207 goto failed; 19208 } 19209 if (udp) { 19210 if (rack->r_is_v6) 19211 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19212 else 19213 ulen = hdrlen + len - sizeof(struct ip); 19214 udp->uh_ulen = htons(ulen); 19215 } 19216 m->m_pkthdr.rcvif = (struct ifnet *)0; 19217 if (TCPS_HAVERCVDSYN(tp->t_state) && 19218 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19219 int ect = tcp_ecn_output_established(tp, &flags, len, false); 19220 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19221 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19222 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19223 #ifdef INET6 19224 if (rack->r_is_v6) { 19225 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19226 ip6->ip6_flow |= htonl(ect << 20); 19227 } 19228 else 19229 #endif 19230 { 19231 #ifdef INET 19232 ip->ip_tos &= ~IPTOS_ECN_MASK; 19233 ip->ip_tos |= ect; 19234 #endif 19235 } 19236 } 19237 tcp_set_flags(th, flags); 19238 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19239 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19240 if (to.to_flags & TOF_SIGNATURE) { 19241 /* 19242 * Calculate MD5 signature and put it into the place 19243 * determined before. 19244 * NOTE: since TCP options buffer doesn't point into 19245 * mbuf's data, calculate offset and use it. 19246 */ 19247 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19248 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19249 /* 19250 * Do not send segment if the calculation of MD5 19251 * digest has failed. 19252 */ 19253 goto failed; 19254 } 19255 } 19256 #endif 19257 #ifdef INET6 19258 if (rack->r_is_v6) { 19259 if (tp->t_port) { 19260 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19261 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19262 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19263 th->th_sum = htons(0); 19264 UDPSTAT_INC(udps_opackets); 19265 } else { 19266 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19267 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19268 th->th_sum = in6_cksum_pseudo(ip6, 19269 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19270 0); 19271 } 19272 } 19273 #endif 19274 #if defined(INET6) && defined(INET) 19275 else 19276 #endif 19277 #ifdef INET 19278 { 19279 if (tp->t_port) { 19280 m->m_pkthdr.csum_flags = CSUM_UDP; 19281 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19282 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19283 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19284 th->th_sum = htons(0); 19285 UDPSTAT_INC(udps_opackets); 19286 } else { 19287 m->m_pkthdr.csum_flags = CSUM_TCP; 19288 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19289 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19290 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19291 IPPROTO_TCP + len + optlen)); 19292 } 19293 /* IP version must be set here for ipv4/ipv6 checking later */ 19294 KASSERT(ip->ip_v == IPVERSION, 19295 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19296 } 19297 #endif 19298 if (tso) { 19299 /* 19300 * Here we use segsiz since we have no added options besides 19301 * any standard timestamp options (no DSACKs or SACKS are sent 19302 * via either fast-path). 19303 */ 19304 KASSERT(len > segsiz, 19305 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19306 m->m_pkthdr.csum_flags |= CSUM_TSO; 19307 m->m_pkthdr.tso_segsz = segsiz; 19308 } 19309 #ifdef INET6 19310 if (rack->r_is_v6) { 19311 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19312 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19313 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19314 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19315 else 19316 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19317 } 19318 #endif 19319 #if defined(INET) && defined(INET6) 19320 else 19321 #endif 19322 #ifdef INET 19323 { 19324 ip->ip_len = htons(m->m_pkthdr.len); 19325 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19326 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19327 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19328 if (tp->t_port == 0 || len < V_tcp_minmss) { 19329 ip->ip_off |= htons(IP_DF); 19330 } 19331 } else { 19332 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19333 } 19334 } 19335 #endif 19336 if (tp->snd_cwnd > tp->snd_ssthresh) { 19337 /* Set we sent in CA */ 19338 rack->rc_gp_saw_ca = 1; 19339 } else { 19340 /* Set we sent in SS */ 19341 rack->rc_gp_saw_ss = 1; 19342 } 19343 /* Time to copy in our header */ 19344 cpto = mtod(m, uint8_t *); 19345 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19346 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19347 if (optlen) { 19348 bcopy(opt, th + 1, optlen); 19349 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19350 } else { 19351 th->th_off = sizeof(struct tcphdr) >> 2; 19352 } 19353 if ((rack->r_ctl.crte != NULL) && 19354 tcp_bblogging_on(tp)) { 19355 rack_log_queue_level(tp, rack, len, tv, cts); 19356 } 19357 if (tcp_bblogging_on(rack->rc_tp)) { 19358 union tcp_log_stackspecific log; 19359 19360 memset(&log, 0, sizeof(log)); 19361 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19362 if (rack->rack_no_prr) 19363 log.u_bbr.flex1 = 0; 19364 else 19365 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19366 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19367 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19368 log.u_bbr.flex4 = max_val; 19369 /* Save off the early/late values */ 19370 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19371 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19372 log.u_bbr.bw_inuse = rack_get_bw(rack); 19373 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19374 log.u_bbr.flex8 = 0; 19375 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19376 log.u_bbr.flex7 = 44; 19377 log.u_bbr.pkts_out = tp->t_maxseg; 19378 log.u_bbr.timeStamp = cts; 19379 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19380 log.u_bbr.flex5 = log.u_bbr.inflight; 19381 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19382 log.u_bbr.delivered = rack->r_ctl.fsb.left_to_send; 19383 log.u_bbr.rttProp = 0; 19384 log.u_bbr.delRate = rack->r_must_retran; 19385 log.u_bbr.delRate <<= 1; 19386 log.u_bbr.pkt_epoch = line; 19387 /* For fast output no retrans so just inflight and how many mss we send */ 19388 log.u_bbr.flex5 = log.u_bbr.inflight; 19389 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19390 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19391 len, &log, false, NULL, __func__, __LINE__, tv); 19392 } else 19393 lgb = NULL; 19394 #ifdef INET6 19395 if (rack->r_is_v6) { 19396 error = ip6_output(m, inp->in6p_outputopts, 19397 &inp->inp_route6, 19398 0, NULL, NULL, inp); 19399 } 19400 #endif 19401 #if defined(INET) && defined(INET6) 19402 else 19403 #endif 19404 #ifdef INET 19405 { 19406 error = ip_output(m, NULL, 19407 &inp->inp_route, 19408 0, 0, inp); 19409 } 19410 #endif 19411 if (lgb) { 19412 lgb->tlb_errno = error; 19413 lgb = NULL; 19414 } 19415 if (error) { 19416 *send_err = error; 19417 m = NULL; 19418 goto failed; 19419 } else if (rack->rc_hw_nobuf) { 19420 rack->rc_hw_nobuf = 0; 19421 rack->r_ctl.rc_agg_delayed = 0; 19422 rack->r_early = 0; 19423 rack->r_late = 0; 19424 rack->r_ctl.rc_agg_early = 0; 19425 } 19426 if ((error == 0) && (rack->lt_bw_up == 0)) { 19427 /* Unlikely */ 19428 rack->r_ctl.lt_timemark = tcp_tv_to_lusec(tv); 19429 rack->r_ctl.lt_seq = tp->snd_una; 19430 rack->lt_bw_up = 1; 19431 } else if ((error == 0) && 19432 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 19433 /* 19434 * Need to record what we have since we are 19435 * approaching seq wrap. 19436 */ 19437 struct timeval tv; 19438 uint64_t tmark; 19439 19440 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 19441 rack->r_ctl.lt_seq = tp->snd_una; 19442 tmark = tcp_get_u64_usecs(&tv); 19443 if (tmark > rack->r_ctl.lt_timemark) { 19444 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 19445 rack->r_ctl.lt_timemark = tmark; 19446 } 19447 } 19448 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 19449 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 19450 if (tp->snd_una == tp->snd_max) { 19451 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19452 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19453 tp->t_acktime = ticks; 19454 } 19455 counter_u64_add(rack_total_bytes, len); 19456 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 19457 19458 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19459 *tot_len += len; 19460 if ((tp->t_flags & TF_GPUTINPROG) == 0) 19461 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 19462 tp->snd_max += len; 19463 tp->snd_nxt = tp->snd_max; 19464 if (rack->rc_new_rnd_needed) { 19465 rack_new_round_starts(tp, rack, tp->snd_max); 19466 } 19467 { 19468 int idx; 19469 19470 idx = (len / segsiz) + 3; 19471 if (idx >= TCP_MSS_ACCT_ATIMER) 19472 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19473 else 19474 counter_u64_add(rack_out_size[idx], 1); 19475 } 19476 if (len <= rack->r_ctl.fsb.left_to_send) 19477 rack->r_ctl.fsb.left_to_send -= len; 19478 else 19479 rack->r_ctl.fsb.left_to_send = 0; 19480 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19481 rack->r_fast_output = 0; 19482 rack->r_ctl.fsb.left_to_send = 0; 19483 /* At the end of fast_output scale up the sb */ 19484 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); 19485 rack_sndbuf_autoscale(rack); 19486 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); 19487 } 19488 if (tp->t_rtttime == 0) { 19489 tp->t_rtttime = ticks; 19490 tp->t_rtseq = startseq; 19491 KMOD_TCPSTAT_INC(tcps_segstimed); 19492 } 19493 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 19494 (max_val > len) && 19495 (*tot_len < rack->r_ctl.rc_pace_max_segs) && 19496 (tso == 0)) { 19497 max_val -= len; 19498 len = segsiz; 19499 th = rack->r_ctl.fsb.th; 19500 #ifdef TCP_ACCOUNTING 19501 cnt_thru++; 19502 #endif 19503 goto again; 19504 } 19505 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19506 counter_u64_add(rack_fto_send, 1); 19507 pacing_delay = rack_get_pacing_delay(rack, tp, *tot_len, NULL, segsiz, __LINE__); 19508 rack_start_hpts_timer(rack, tp, cts, pacing_delay, *tot_len, 0); 19509 #ifdef TCP_ACCOUNTING 19510 crtsc = get_cyclecount(); 19511 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19512 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19513 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19514 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((*tot_len + segsiz - 1) / segsiz); 19515 } 19516 sched_unpin(); 19517 #endif 19518 return (0); 19519 failed: 19520 if (m) 19521 m_free(m); 19522 rack->r_fast_output = 0; 19523 return (-1); 19524 } 19525 19526 static inline void 19527 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 19528 struct sockbuf *sb, 19529 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 19530 bool hw_tls, 19531 uint16_t flags) 19532 { 19533 rack->r_fast_output = 1; 19534 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19535 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19536 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 19537 rack->r_ctl.fsb.tcp_flags = flags; 19538 rack->r_ctl.fsb.left_to_send = orig_len - len; 19539 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 19540 /* Less than a full sized pace, lets not */ 19541 rack->r_fast_output = 0; 19542 return; 19543 } else { 19544 /* Round down to the nearest pace_max_seg */ 19545 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 19546 } 19547 if (hw_tls) 19548 rack->r_ctl.fsb.hw_tls = 1; 19549 else 19550 rack->r_ctl.fsb.hw_tls = 0; 19551 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19552 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19553 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19554 (tp->snd_max - tp->snd_una))); 19555 if (rack->r_ctl.fsb.left_to_send < segsiz) 19556 rack->r_fast_output = 0; 19557 else { 19558 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19559 rack->r_ctl.fsb.rfo_apply_push = 1; 19560 else 19561 rack->r_ctl.fsb.rfo_apply_push = 0; 19562 } 19563 } 19564 19565 static uint32_t 19566 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 19567 { 19568 uint64_t min_time; 19569 uint32_t maxlen; 19570 19571 min_time = (uint64_t)get_hpts_min_sleep_time(); 19572 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 19573 maxlen = roundup(maxlen, segsiz); 19574 return (maxlen); 19575 } 19576 19577 static struct rack_sendmap * 19578 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 19579 { 19580 struct rack_sendmap *rsm = NULL; 19581 int thresh; 19582 19583 restart: 19584 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 19585 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 19586 /* Nothing, strange turn off validity */ 19587 rack->r_collapse_point_valid = 0; 19588 return (NULL); 19589 } 19590 /* Can we send it yet? */ 19591 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 19592 /* 19593 * Receiver window has not grown enough for 19594 * the segment to be put on the wire. 19595 */ 19596 return (NULL); 19597 } 19598 if (rsm->r_flags & RACK_ACKED) { 19599 /* 19600 * It has been sacked, lets move to the 19601 * next one if possible. 19602 */ 19603 rack->r_ctl.last_collapse_point = rsm->r_end; 19604 /* Are we done? */ 19605 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 19606 rack->r_ctl.high_collapse_point)) { 19607 rack->r_collapse_point_valid = 0; 19608 return (NULL); 19609 } 19610 goto restart; 19611 } 19612 /* Now has it been long enough ? */ 19613 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 19614 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 19615 rack_log_collapse(rack, rsm->r_start, 19616 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19617 thresh, __LINE__, 6, rsm->r_flags, rsm); 19618 return (rsm); 19619 } 19620 /* Not enough time */ 19621 rack_log_collapse(rack, rsm->r_start, 19622 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19623 thresh, __LINE__, 7, rsm->r_flags, rsm); 19624 return (NULL); 19625 } 19626 19627 static inline void 19628 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 19629 { 19630 if ((rack->full_size_rxt == 0) && 19631 (rack->shape_rxt_to_pacing_min == 0) && 19632 (*len >= segsiz)) { 19633 *len = segsiz; 19634 } else if (rack->shape_rxt_to_pacing_min && 19635 rack->gp_ready) { 19636 /* We use pacing min as shaping len req */ 19637 uint32_t maxlen; 19638 19639 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 19640 if (*len > maxlen) 19641 *len = maxlen; 19642 } else { 19643 /* 19644 * The else is full_size_rxt is on so send it all 19645 * note we do need to check this for exceeding 19646 * our max segment size due to the fact that 19647 * we do sometimes merge chunks together i.e. 19648 * we cannot just assume that we will never have 19649 * a chunk greater than pace_max_seg 19650 */ 19651 if (*len > pace_max_seg) 19652 *len = pace_max_seg; 19653 } 19654 } 19655 19656 static int 19657 rack_output(struct tcpcb *tp) 19658 { 19659 struct socket *so; 19660 uint32_t recwin; 19661 uint32_t sb_offset, s_moff = 0; 19662 int32_t len, error = 0; 19663 uint16_t flags; 19664 struct mbuf *m, *s_mb = NULL; 19665 struct mbuf *mb; 19666 uint32_t if_hw_tsomaxsegcount = 0; 19667 uint32_t if_hw_tsomaxsegsize; 19668 int32_t segsiz, minseg; 19669 long tot_len_this_send = 0; 19670 #ifdef INET 19671 struct ip *ip = NULL; 19672 #endif 19673 struct udphdr *udp = NULL; 19674 struct tcp_rack *rack; 19675 struct tcphdr *th; 19676 uint8_t pass = 0; 19677 uint8_t mark = 0; 19678 uint8_t check_done = 0; 19679 uint8_t wanted_cookie = 0; 19680 u_char opt[TCP_MAXOLEN]; 19681 unsigned ipoptlen, optlen, hdrlen, ulen=0; 19682 uint32_t rack_seq; 19683 19684 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 19685 unsigned ipsec_optlen = 0; 19686 19687 #endif 19688 int32_t idle, sendalot; 19689 uint32_t tot_idle; 19690 int32_t sub_from_prr = 0; 19691 volatile int32_t sack_rxmit; 19692 struct rack_sendmap *rsm = NULL; 19693 int32_t tso, mtu; 19694 struct tcpopt to; 19695 int32_t pacing_delay = 0; 19696 int32_t sup_rack = 0; 19697 uint32_t cts, ms_cts, delayed, early; 19698 uint32_t add_flag = RACK_SENT_SP; 19699 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 19700 uint8_t doing_tlp = 0; 19701 uint32_t cwnd_to_use, pace_max_seg; 19702 int32_t do_a_prefetch = 0; 19703 int32_t prefetch_rsm = 0; 19704 int32_t orig_len = 0; 19705 struct timeval tv; 19706 int32_t prefetch_so_done = 0; 19707 struct tcp_log_buffer *lgb; 19708 struct inpcb *inp = tptoinpcb(tp); 19709 struct sockbuf *sb; 19710 uint64_t ts_val = 0; 19711 #ifdef TCP_ACCOUNTING 19712 uint64_t crtsc; 19713 #endif 19714 #ifdef INET6 19715 struct ip6_hdr *ip6 = NULL; 19716 int32_t isipv6; 19717 #endif 19718 bool hpts_calling, hw_tls = false; 19719 19720 NET_EPOCH_ASSERT(); 19721 INP_WLOCK_ASSERT(inp); 19722 19723 /* setup and take the cache hits here */ 19724 rack = (struct tcp_rack *)tp->t_fb_ptr; 19725 #ifdef TCP_ACCOUNTING 19726 sched_pin(); 19727 ts_val = get_cyclecount(); 19728 #endif 19729 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 19730 tp->t_flags2 &= ~TF2_HPTS_CALLS; 19731 #ifdef TCP_OFFLOAD 19732 if (tp->t_flags & TF_TOE) { 19733 #ifdef TCP_ACCOUNTING 19734 sched_unpin(); 19735 #endif 19736 return (tcp_offload_output(tp)); 19737 } 19738 #endif 19739 if (rack->rack_deferred_inited == 0) { 19740 /* 19741 * If we are the connecting socket we will 19742 * hit rack_init() when no sequence numbers 19743 * are setup. This makes it so we must defer 19744 * some initialization. Call that now. 19745 */ 19746 rack_deferred_init(tp, rack); 19747 } 19748 /* 19749 * For TFO connections in SYN_RECEIVED, only allow the initial 19750 * SYN|ACK and those sent by the retransmit timer. 19751 */ 19752 if ((tp->t_flags & TF_FASTOPEN) && 19753 (tp->t_state == TCPS_SYN_RECEIVED) && 19754 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 19755 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 19756 #ifdef TCP_ACCOUNTING 19757 sched_unpin(); 19758 #endif 19759 return (0); 19760 } 19761 #ifdef INET6 19762 if (rack->r_state) { 19763 /* Use the cache line loaded if possible */ 19764 isipv6 = rack->r_is_v6; 19765 } else { 19766 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 19767 } 19768 #endif 19769 early = 0; 19770 cts = tcp_get_usecs(&tv); 19771 ms_cts = tcp_tv_to_msec(&tv); 19772 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 19773 tcp_in_hpts(rack->rc_tp)) { 19774 /* 19775 * We are on the hpts for some timer but not hptsi output. 19776 * Remove from the hpts unconditionally. 19777 */ 19778 rack_timer_cancel(tp, rack, cts, __LINE__); 19779 } 19780 /* Are we pacing and late? */ 19781 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19782 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 19783 /* We are delayed */ 19784 delayed = cts - rack->r_ctl.rc_last_output_to; 19785 } else { 19786 delayed = 0; 19787 } 19788 /* Do the timers, which may override the pacer */ 19789 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 19790 int retval; 19791 19792 retval = rack_process_timers(tp, rack, cts, hpts_calling, 19793 &doing_tlp); 19794 if (retval != 0) { 19795 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 19796 #ifdef TCP_ACCOUNTING 19797 sched_unpin(); 19798 #endif 19799 /* 19800 * If timers want tcp_drop(), then pass error out, 19801 * otherwise suppress it. 19802 */ 19803 return (retval < 0 ? retval : 0); 19804 } 19805 } 19806 if (rack->rc_in_persist) { 19807 if (tcp_in_hpts(rack->rc_tp) == 0) { 19808 /* Timer is not running */ 19809 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19810 } 19811 #ifdef TCP_ACCOUNTING 19812 sched_unpin(); 19813 #endif 19814 return (0); 19815 } 19816 if ((rack->rc_ack_required == 1) && 19817 (rack->r_timer_override == 0)){ 19818 /* A timeout occurred and no ack has arrived */ 19819 if (tcp_in_hpts(rack->rc_tp) == 0) { 19820 /* Timer is not running */ 19821 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19822 } 19823 #ifdef TCP_ACCOUNTING 19824 sched_unpin(); 19825 #endif 19826 return (0); 19827 } 19828 if ((rack->r_timer_override) || 19829 (rack->rc_ack_can_sendout_data) || 19830 (delayed) || 19831 (tp->t_state < TCPS_ESTABLISHED)) { 19832 rack->rc_ack_can_sendout_data = 0; 19833 if (tcp_in_hpts(rack->rc_tp)) 19834 tcp_hpts_remove(rack->rc_tp); 19835 } else if (tcp_in_hpts(rack->rc_tp)) { 19836 /* 19837 * On the hpts you can't pass even if ACKNOW is on, we will 19838 * when the hpts fires. 19839 */ 19840 #ifdef TCP_ACCOUNTING 19841 crtsc = get_cyclecount(); 19842 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19843 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 19844 tp->tcp_cnt_counters[SND_BLOCKED]++; 19845 } 19846 sched_unpin(); 19847 #endif 19848 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 19849 return (0); 19850 } 19851 /* Finish out both pacing early and late accounting */ 19852 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19853 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 19854 early = rack->r_ctl.rc_last_output_to - cts; 19855 } else 19856 early = 0; 19857 if (delayed && (rack->rc_always_pace == 1)) { 19858 rack->r_ctl.rc_agg_delayed += delayed; 19859 rack->r_late = 1; 19860 } else if (early && (rack->rc_always_pace == 1)) { 19861 rack->r_ctl.rc_agg_early += early; 19862 rack->r_early = 1; 19863 } else if (rack->rc_always_pace == 0) { 19864 /* Non-paced we are not late */ 19865 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 19866 rack->r_early = rack->r_late = 0; 19867 } 19868 /* Now that early/late accounting is done turn off the flag */ 19869 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 19870 rack->r_wanted_output = 0; 19871 rack->r_timer_override = 0; 19872 if ((tp->t_state != rack->r_state) && 19873 TCPS_HAVEESTABLISHED(tp->t_state)) { 19874 rack_set_state(tp, rack); 19875 } 19876 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19877 minseg = segsiz; 19878 if (rack->r_ctl.rc_pace_max_segs == 0) 19879 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 19880 else 19881 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 19882 if ((rack->r_fast_output) && 19883 (doing_tlp == 0) && 19884 (tp->rcv_numsacks == 0)) { 19885 int ret; 19886 19887 error = 0; 19888 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__); 19889 if (ret > 0) 19890 return(ret); 19891 else if (error) { 19892 inp = rack->rc_inp; 19893 so = inp->inp_socket; 19894 sb = &so->so_snd; 19895 goto nomore; 19896 } else { 19897 /* Return == 0, if there is more we can send tot_len wise fall through and send */ 19898 if (tot_len_this_send >= pace_max_seg) 19899 return (ret); 19900 #ifdef TCP_ACCOUNTING 19901 /* We need to re-pin since fast_output un-pined */ 19902 sched_pin(); 19903 ts_val = get_cyclecount(); 19904 #endif 19905 /* Fall back out so we can send any more that may bring us to pace_max_seg */ 19906 } 19907 } 19908 inp = rack->rc_inp; 19909 /* 19910 * For TFO connections in SYN_SENT or SYN_RECEIVED, 19911 * only allow the initial SYN or SYN|ACK and those sent 19912 * by the retransmit timer. 19913 */ 19914 if ((tp->t_flags & TF_FASTOPEN) && 19915 ((tp->t_state == TCPS_SYN_RECEIVED) || 19916 (tp->t_state == TCPS_SYN_SENT)) && 19917 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 19918 (tp->t_rxtshift == 0)) { /* not a retransmit */ 19919 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19920 #ifdef TCP_ACCOUNTING 19921 sched_unpin(); 19922 #endif 19923 return (0); 19924 } 19925 /* 19926 * Determine length of data that should be transmitted, and flags 19927 * that will be used. If there is some data or critical controls 19928 * (SYN, RST) to send, then transmit; otherwise, investigate 19929 * further. 19930 */ 19931 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 19932 if (tp->t_idle_reduce) { 19933 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 19934 rack_cc_after_idle(rack, tp); 19935 } 19936 tp->t_flags &= ~TF_LASTIDLE; 19937 if (idle) { 19938 if (tp->t_flags & TF_MORETOCOME) { 19939 tp->t_flags |= TF_LASTIDLE; 19940 idle = 0; 19941 } 19942 } 19943 if ((tp->snd_una == tp->snd_max) && 19944 rack->r_ctl.rc_went_idle_time && 19945 (cts > rack->r_ctl.rc_went_idle_time)) { 19946 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 19947 if (tot_idle > rack_min_probertt_hold) { 19948 /* Count as a probe rtt */ 19949 if (rack->in_probe_rtt == 0) { 19950 rack->r_ctl.rc_lower_rtt_us_cts = cts; 19951 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 19952 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 19953 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 19954 } else { 19955 rack_exit_probertt(rack, cts); 19956 } 19957 } 19958 } else 19959 tot_idle = 0; 19960 if (rack_use_fsb && 19961 (rack->r_ctl.fsb.tcp_ip_hdr) && 19962 (rack->r_fsb_inited == 0) && 19963 (rack->r_state != TCPS_CLOSED)) 19964 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 19965 if (rack->rc_sendvars_notset == 1) { 19966 rack->rc_sendvars_notset = 0; 19967 /* 19968 * Make sure any TCP timers (keep-alive) is not running. 19969 */ 19970 tcp_timer_stop(tp); 19971 } 19972 if ((rack->rack_no_prr == 1) && 19973 (rack->rc_always_pace == 0)) { 19974 /* 19975 * Sanity check before sending, if we have 19976 * no-pacing enabled and prr is turned off that 19977 * is a logistics error. Correct this by turnning 19978 * prr back on. A user *must* set some form of 19979 * pacing in order to turn PRR off. We do this 19980 * in the output path so that we can avoid socket 19981 * option ordering issues that would occur if we 19982 * tried to do it while setting rack_no_prr on. 19983 */ 19984 rack->rack_no_prr = 0; 19985 } 19986 if ((rack->pcm_enabled == 1) && 19987 (rack->pcm_needed == 0) && 19988 (tot_idle > 0)) { 19989 /* 19990 * We have been idle some micro seconds. We need 19991 * to factor this in to see if a PCM is needed. 19992 */ 19993 uint32_t rtts_idle, rnds; 19994 19995 if (tp->t_srtt) 19996 rtts_idle = tot_idle / tp->t_srtt; 19997 else 19998 rtts_idle = 0; 19999 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 20000 rack->r_ctl.pcm_idle_rounds += rtts_idle; 20001 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 20002 rack->pcm_needed = 1; 20003 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 20004 } 20005 } 20006 again: 20007 sendalot = 0; 20008 cts = tcp_get_usecs(&tv); 20009 ms_cts = tcp_tv_to_msec(&tv); 20010 tso = 0; 20011 mtu = 0; 20012 if (TCPS_HAVEESTABLISHED(tp->t_state) && 20013 (rack->r_ctl.pcm_max_seg == 0)) { 20014 /* 20015 * We set in our first send so we know that the ctf_fixed_maxseg 20016 * has been fully set. If we do it in rack_init() we most likely 20017 * see 512 bytes so we end up at 5120, not desirable. 20018 */ 20019 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 20020 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 20021 /* 20022 * Assure our initial PCM probe is at least 10 MSS. 20023 */ 20024 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 20025 } 20026 } 20027 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 20028 uint32_t rw_avail, cwa; 20029 20030 if (tp->snd_wnd > ctf_outstanding(tp)) 20031 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 20032 else 20033 rw_avail = 0; 20034 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 20035 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20036 else 20037 cwa = 0; 20038 if ((cwa >= rack->r_ctl.pcm_max_seg) && 20039 (rw_avail > rack->r_ctl.pcm_max_seg)) { 20040 /* Raise up the max seg for this trip through */ 20041 pace_max_seg = rack->r_ctl.pcm_max_seg; 20042 /* Disable any fast output */ 20043 rack->r_fast_output = 0; 20044 } 20045 if (rack_verbose_logging) { 20046 rack_log_pcm(rack, 4, 20047 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 20048 } 20049 } 20050 sb_offset = tp->snd_max - tp->snd_una; 20051 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20052 flags = tcp_outflags[tp->t_state]; 20053 while (rack->rc_free_cnt < rack_free_cache) { 20054 rsm = rack_alloc(rack); 20055 if (rsm == NULL) { 20056 if (hpts_calling) 20057 /* Retry in a ms */ 20058 pacing_delay = (1 * HPTS_USEC_IN_MSEC); 20059 so = inp->inp_socket; 20060 sb = &so->so_snd; 20061 goto just_return_nolock; 20062 } 20063 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 20064 rack->rc_free_cnt++; 20065 rsm = NULL; 20066 } 20067 sack_rxmit = 0; 20068 len = 0; 20069 rsm = NULL; 20070 if (flags & TH_RST) { 20071 SOCK_SENDBUF_LOCK(inp->inp_socket); 20072 so = inp->inp_socket; 20073 sb = &so->so_snd; 20074 goto send; 20075 } 20076 if (rack->r_ctl.rc_resend) { 20077 /* Retransmit timer */ 20078 rsm = rack->r_ctl.rc_resend; 20079 rack->r_ctl.rc_resend = NULL; 20080 len = rsm->r_end - rsm->r_start; 20081 sack_rxmit = 1; 20082 sendalot = 0; 20083 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20084 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20085 __func__, __LINE__, 20086 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20087 sb_offset = rsm->r_start - tp->snd_una; 20088 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20089 } else if (rack->r_collapse_point_valid && 20090 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 20091 /* 20092 * If an RSM is returned then enough time has passed 20093 * for us to retransmit it. Move up the collapse point, 20094 * since this rsm has its chance to retransmit now. 20095 */ 20096 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 20097 rack->r_ctl.last_collapse_point = rsm->r_end; 20098 /* Are we done? */ 20099 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20100 rack->r_ctl.high_collapse_point)) 20101 rack->r_collapse_point_valid = 0; 20102 sack_rxmit = 1; 20103 /* We are not doing a TLP */ 20104 doing_tlp = 0; 20105 len = rsm->r_end - rsm->r_start; 20106 sb_offset = rsm->r_start - tp->snd_una; 20107 sendalot = 0; 20108 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20109 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 20110 /* We have a retransmit that takes precedence */ 20111 if ((!IN_FASTRECOVERY(tp->t_flags)) && 20112 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 20113 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 20114 /* Enter recovery if not induced by a time-out */ 20115 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 20116 } 20117 #ifdef INVARIANTS 20118 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 20119 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 20120 tp, rack, rsm, rsm->r_start, tp->snd_una); 20121 } 20122 #endif 20123 len = rsm->r_end - rsm->r_start; 20124 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20125 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20126 __func__, __LINE__, 20127 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20128 sb_offset = rsm->r_start - tp->snd_una; 20129 sendalot = 0; 20130 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20131 if (len > 0) { 20132 sack_rxmit = 1; 20133 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 20134 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 20135 min(len, segsiz)); 20136 } 20137 } else if (rack->r_ctl.rc_tlpsend) { 20138 /* Tail loss probe */ 20139 long cwin; 20140 long tlen; 20141 20142 /* 20143 * Check if we can do a TLP with a RACK'd packet 20144 * this can happen if we are not doing the rack 20145 * cheat and we skipped to a TLP and it 20146 * went off. 20147 */ 20148 rsm = rack->r_ctl.rc_tlpsend; 20149 /* We are doing a TLP make sure the flag is preent */ 20150 rsm->r_flags |= RACK_TLP; 20151 rack->r_ctl.rc_tlpsend = NULL; 20152 sack_rxmit = 1; 20153 tlen = rsm->r_end - rsm->r_start; 20154 if (tlen > segsiz) 20155 tlen = segsiz; 20156 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20157 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20158 __func__, __LINE__, 20159 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20160 sb_offset = rsm->r_start - tp->snd_una; 20161 cwin = min(tp->snd_wnd, tlen); 20162 len = cwin; 20163 } 20164 if (rack->r_must_retran && 20165 (doing_tlp == 0) && 20166 (SEQ_GT(tp->snd_max, tp->snd_una)) && 20167 (rsm == NULL)) { 20168 /* 20169 * There are two different ways that we 20170 * can get into this block: 20171 * a) This is a non-sack connection, we had a time-out 20172 * and thus r_must_retran was set and everything 20173 * left outstanding as been marked for retransmit. 20174 * b) The MTU of the path shrank, so that everything 20175 * was marked to be retransmitted with the smaller 20176 * mtu and r_must_retran was set. 20177 * 20178 * This means that we expect the sendmap (outstanding) 20179 * to all be marked must. We can use the tmap to 20180 * look at them. 20181 * 20182 */ 20183 int sendwin, flight; 20184 20185 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 20186 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 20187 if (flight >= sendwin) { 20188 /* 20189 * We can't send yet. 20190 */ 20191 so = inp->inp_socket; 20192 sb = &so->so_snd; 20193 goto just_return_nolock; 20194 } 20195 /* 20196 * This is the case a/b mentioned above. All 20197 * outstanding/not-acked should be marked. 20198 * We can use the tmap to find them. 20199 */ 20200 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 20201 if (rsm == NULL) { 20202 /* TSNH */ 20203 rack->r_must_retran = 0; 20204 rack->r_ctl.rc_out_at_rto = 0; 20205 so = inp->inp_socket; 20206 sb = &so->so_snd; 20207 goto just_return_nolock; 20208 } 20209 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 20210 /* 20211 * The first one does not have the flag, did we collapse 20212 * further up in our list? 20213 */ 20214 rack->r_must_retran = 0; 20215 rack->r_ctl.rc_out_at_rto = 0; 20216 rsm = NULL; 20217 sack_rxmit = 0; 20218 } else { 20219 sack_rxmit = 1; 20220 len = rsm->r_end - rsm->r_start; 20221 sb_offset = rsm->r_start - tp->snd_una; 20222 sendalot = 0; 20223 if ((rack->full_size_rxt == 0) && 20224 (rack->shape_rxt_to_pacing_min == 0) && 20225 (len >= segsiz)) 20226 len = segsiz; 20227 else if (rack->shape_rxt_to_pacing_min && 20228 rack->gp_ready) { 20229 /* We use pacing min as shaping len req */ 20230 uint32_t maxlen; 20231 20232 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20233 if (len > maxlen) 20234 len = maxlen; 20235 } 20236 /* 20237 * Delay removing the flag RACK_MUST_RXT so 20238 * that the fastpath for retransmit will 20239 * work with this rsm. 20240 */ 20241 } 20242 } 20243 /* 20244 * Enforce a connection sendmap count limit if set 20245 * as long as we are not retransmiting. 20246 */ 20247 if ((rsm == NULL) && 20248 (V_tcp_map_entries_limit > 0) && 20249 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 20250 counter_u64_add(rack_to_alloc_limited, 1); 20251 if (!rack->alloc_limit_reported) { 20252 rack->alloc_limit_reported = 1; 20253 counter_u64_add(rack_alloc_limited_conns, 1); 20254 } 20255 so = inp->inp_socket; 20256 sb = &so->so_snd; 20257 goto just_return_nolock; 20258 } 20259 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 20260 /* we are retransmitting the fin */ 20261 len--; 20262 if (len) { 20263 /* 20264 * When retransmitting data do *not* include the 20265 * FIN. This could happen from a TLP probe. 20266 */ 20267 flags &= ~TH_FIN; 20268 } 20269 } 20270 if (rsm && rack->r_fsb_inited && 20271 rack_use_rsm_rfo && 20272 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 20273 int ret; 20274 20275 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 20276 if (ret == 0) 20277 return (0); 20278 } 20279 so = inp->inp_socket; 20280 sb = &so->so_snd; 20281 if (do_a_prefetch == 0) { 20282 kern_prefetch(sb, &do_a_prefetch); 20283 do_a_prefetch = 1; 20284 } 20285 #ifdef NETFLIX_SHARED_CWND 20286 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 20287 rack->rack_enable_scwnd) { 20288 /* We are doing cwnd sharing */ 20289 if (rack->gp_ready && 20290 (rack->rack_attempted_scwnd == 0) && 20291 (rack->r_ctl.rc_scw == NULL) && 20292 tp->t_lib) { 20293 /* The pcbid is in, lets make an attempt */ 20294 counter_u64_add(rack_try_scwnd, 1); 20295 rack->rack_attempted_scwnd = 1; 20296 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 20297 &rack->r_ctl.rc_scw_index, 20298 segsiz); 20299 } 20300 if (rack->r_ctl.rc_scw && 20301 (rack->rack_scwnd_is_idle == 1) && 20302 sbavail(&so->so_snd)) { 20303 /* we are no longer out of data */ 20304 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 20305 rack->rack_scwnd_is_idle = 0; 20306 } 20307 if (rack->r_ctl.rc_scw) { 20308 /* First lets update and get the cwnd */ 20309 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 20310 rack->r_ctl.rc_scw_index, 20311 tp->snd_cwnd, tp->snd_wnd, segsiz); 20312 } 20313 } 20314 #endif 20315 /* 20316 * Get standard flags, and add SYN or FIN if requested by 'hidden' 20317 * state flags. 20318 */ 20319 if (tp->t_flags & TF_NEEDFIN) 20320 flags |= TH_FIN; 20321 if (tp->t_flags & TF_NEEDSYN) 20322 flags |= TH_SYN; 20323 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 20324 void *end_rsm; 20325 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 20326 if (end_rsm) 20327 kern_prefetch(end_rsm, &prefetch_rsm); 20328 prefetch_rsm = 1; 20329 } 20330 SOCK_SENDBUF_LOCK(so); 20331 if ((sack_rxmit == 0) && 20332 (TCPS_HAVEESTABLISHED(tp->t_state) || 20333 (tp->t_flags & TF_FASTOPEN))) { 20334 /* 20335 * We are not retransmitting (sack_rxmit is 0) so we 20336 * are sending new data. This is always based on snd_max. 20337 * Now in theory snd_max may be equal to snd_una, if so 20338 * then nothing is outstanding and the offset would be 0. 20339 */ 20340 uint32_t avail; 20341 20342 avail = sbavail(sb); 20343 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 20344 sb_offset = tp->snd_max - tp->snd_una; 20345 else 20346 sb_offset = 0; 20347 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 20348 if (rack->r_ctl.rc_tlp_new_data) { 20349 /* TLP is forcing out new data */ 20350 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 20351 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 20352 } 20353 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 20354 if (tp->snd_wnd > sb_offset) 20355 len = tp->snd_wnd - sb_offset; 20356 else 20357 len = 0; 20358 } else { 20359 len = rack->r_ctl.rc_tlp_new_data; 20360 } 20361 rack->r_ctl.rc_tlp_new_data = 0; 20362 } else { 20363 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 20364 } 20365 if ((rack->r_ctl.crte == NULL) && 20366 IN_FASTRECOVERY(tp->t_flags) && 20367 (rack->full_size_rxt == 0) && 20368 (rack->shape_rxt_to_pacing_min == 0) && 20369 (len > segsiz)) { 20370 /* 20371 * For prr=off, we need to send only 1 MSS 20372 * at a time. We do this because another sack could 20373 * be arriving that causes us to send retransmits and 20374 * we don't want to be on a long pace due to a larger send 20375 * that keeps us from sending out the retransmit. 20376 */ 20377 len = segsiz; 20378 } else if (rack->shape_rxt_to_pacing_min && 20379 rack->gp_ready) { 20380 /* We use pacing min as shaping len req */ 20381 uint32_t maxlen; 20382 20383 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20384 if (len > maxlen) 20385 len = maxlen; 20386 }/* The else is full_size_rxt is on so send it all */ 20387 } else { 20388 uint32_t outstanding; 20389 /* 20390 * We are inside of a Fast recovery episode, this 20391 * is caused by a SACK or 3 dup acks. At this point 20392 * we have sent all the retransmissions and we rely 20393 * on PRR to dictate what we will send in the form of 20394 * new data. 20395 */ 20396 20397 outstanding = tp->snd_max - tp->snd_una; 20398 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 20399 if (tp->snd_wnd > outstanding) { 20400 len = tp->snd_wnd - outstanding; 20401 /* Check to see if we have the data */ 20402 if ((sb_offset + len) > avail) { 20403 /* It does not all fit */ 20404 if (avail > sb_offset) 20405 len = avail - sb_offset; 20406 else 20407 len = 0; 20408 } 20409 } else { 20410 len = 0; 20411 } 20412 } else if (avail > sb_offset) { 20413 len = avail - sb_offset; 20414 } else { 20415 len = 0; 20416 } 20417 if (len > 0) { 20418 if (len > rack->r_ctl.rc_prr_sndcnt) { 20419 len = rack->r_ctl.rc_prr_sndcnt; 20420 } 20421 if (len > 0) { 20422 sub_from_prr = 1; 20423 } 20424 } 20425 if (len > segsiz) { 20426 /* 20427 * We should never send more than a MSS when 20428 * retransmitting or sending new data in prr 20429 * mode unless the override flag is on. Most 20430 * likely the PRR algorithm is not going to 20431 * let us send a lot as well :-) 20432 */ 20433 if (rack->r_ctl.rc_prr_sendalot == 0) { 20434 len = segsiz; 20435 } 20436 } else if (len < segsiz) { 20437 /* 20438 * Do we send any? The idea here is if the 20439 * send empty's the socket buffer we want to 20440 * do it. However if not then lets just wait 20441 * for our prr_sndcnt to get bigger. 20442 */ 20443 long leftinsb; 20444 20445 leftinsb = sbavail(sb) - sb_offset; 20446 if (leftinsb > len) { 20447 /* This send does not empty the sb */ 20448 len = 0; 20449 } 20450 } 20451 } 20452 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 20453 /* 20454 * If you have not established 20455 * and are not doing FAST OPEN 20456 * no data please. 20457 */ 20458 if ((sack_rxmit == 0) && 20459 !(tp->t_flags & TF_FASTOPEN)) { 20460 len = 0; 20461 sb_offset = 0; 20462 } 20463 } 20464 if (prefetch_so_done == 0) { 20465 kern_prefetch(so, &prefetch_so_done); 20466 prefetch_so_done = 1; 20467 } 20468 orig_len = len; 20469 /* 20470 * Lop off SYN bit if it has already been sent. However, if this is 20471 * SYN-SENT state and if segment contains data and if we don't know 20472 * that foreign host supports TAO, suppress sending segment. 20473 */ 20474 if ((flags & TH_SYN) && 20475 SEQ_GT(tp->snd_max, tp->snd_una) && 20476 ((sack_rxmit == 0) && 20477 (tp->t_rxtshift == 0))) { 20478 /* 20479 * When sending additional segments following a TFO SYN|ACK, 20480 * do not include the SYN bit. 20481 */ 20482 if ((tp->t_flags & TF_FASTOPEN) && 20483 (tp->t_state == TCPS_SYN_RECEIVED)) 20484 flags &= ~TH_SYN; 20485 } 20486 /* 20487 * Be careful not to send data and/or FIN on SYN segments. This 20488 * measure is needed to prevent interoperability problems with not 20489 * fully conformant TCP implementations. 20490 */ 20491 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 20492 len = 0; 20493 flags &= ~TH_FIN; 20494 } 20495 /* 20496 * On TFO sockets, ensure no data is sent in the following cases: 20497 * 20498 * - When retransmitting SYN|ACK on a passively-created socket 20499 * 20500 * - When retransmitting SYN on an actively created socket 20501 * 20502 * - When sending a zero-length cookie (cookie request) on an 20503 * actively created socket 20504 * 20505 * - When the socket is in the CLOSED state (RST is being sent) 20506 */ 20507 if ((tp->t_flags & TF_FASTOPEN) && 20508 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 20509 ((tp->t_state == TCPS_SYN_SENT) && 20510 (tp->t_tfo_client_cookie_len == 0)) || 20511 (flags & TH_RST))) { 20512 sack_rxmit = 0; 20513 len = 0; 20514 } 20515 /* Without fast-open there should never be data sent on a SYN */ 20516 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 20517 len = 0; 20518 } 20519 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 20520 /* We only send 1 MSS if we have a DSACK block */ 20521 add_flag |= RACK_SENT_W_DSACK; 20522 len = segsiz; 20523 } 20524 if (len <= 0) { 20525 /* 20526 * We have nothing to send, or the window shrank, or 20527 * is closed, do we need to go into persists? 20528 */ 20529 len = 0; 20530 if ((tp->snd_wnd == 0) && 20531 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20532 (tp->snd_una == tp->snd_max) && 20533 (sb_offset < (int)sbavail(sb))) { 20534 rack_enter_persist(tp, rack, cts, tp->snd_una); 20535 } 20536 } else if ((rsm == NULL) && 20537 (doing_tlp == 0) && 20538 (len < pace_max_seg)) { 20539 /* 20540 * We are not sending a maximum sized segment for 20541 * some reason. Should we not send anything (think 20542 * sws or persists)? 20543 */ 20544 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20545 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20546 (len < minseg) && 20547 (len < (int)(sbavail(sb) - sb_offset))) { 20548 /* 20549 * Here the rwnd is less than 20550 * the minimum pacing size, this is not a retransmit, 20551 * we are established and 20552 * the send is not the last in the socket buffer 20553 * we send nothing, and we may enter persists 20554 * if nothing is outstanding. 20555 */ 20556 len = 0; 20557 if (tp->snd_max == tp->snd_una) { 20558 /* 20559 * Nothing out we can 20560 * go into persists. 20561 */ 20562 rack_enter_persist(tp, rack, cts, tp->snd_una); 20563 } 20564 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 20565 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20566 (len < (int)(sbavail(sb) - sb_offset)) && 20567 (len < minseg)) { 20568 /* 20569 * Here we are not retransmitting, and 20570 * the cwnd is not so small that we could 20571 * not send at least a min size (rxt timer 20572 * not having gone off), We have 2 segments or 20573 * more already in flight, its not the tail end 20574 * of the socket buffer and the cwnd is blocking 20575 * us from sending out a minimum pacing segment size. 20576 * Lets not send anything. 20577 */ 20578 len = 0; 20579 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 20580 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20581 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20582 (len < (int)(sbavail(sb) - sb_offset)) && 20583 (TCPS_HAVEESTABLISHED(tp->t_state))) { 20584 /* 20585 * Here we have a send window but we have 20586 * filled it up and we can't send another pacing segment. 20587 * We also have in flight more than 2 segments 20588 * and we are not completing the sb i.e. we allow 20589 * the last bytes of the sb to go out even if 20590 * its not a full pacing segment. 20591 */ 20592 len = 0; 20593 } else if ((rack->r_ctl.crte != NULL) && 20594 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 20595 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 20596 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 20597 (len < (int)(sbavail(sb) - sb_offset))) { 20598 /* 20599 * Here we are doing hardware pacing, this is not a TLP, 20600 * we are not sending a pace max segment size, there is rwnd 20601 * room to send at least N pace_max_seg, the cwnd is greater 20602 * than or equal to a full pacing segments plus 4 mss and we have 2 or 20603 * more segments in flight and its not the tail of the socket buffer. 20604 * 20605 * We don't want to send instead we need to get more ack's in to 20606 * allow us to send a full pacing segment. Normally, if we are pacing 20607 * about the right speed, we should have finished our pacing 20608 * send as most of the acks have come back if we are at the 20609 * right rate. This is a bit fuzzy since return path delay 20610 * can delay the acks, which is why we want to make sure we 20611 * have cwnd space to have a bit more than a max pace segments in flight. 20612 * 20613 * If we have not gotten our acks back we are pacing at too high a 20614 * rate delaying will not hurt and will bring our GP estimate down by 20615 * injecting the delay. If we don't do this we will send 20616 * 2 MSS out in response to the acks being clocked in which 20617 * defeats the point of hw-pacing (i.e. to help us get 20618 * larger TSO's out). 20619 */ 20620 len = 0; 20621 } 20622 20623 } 20624 /* len will be >= 0 after this point. */ 20625 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 20626 rack_sndbuf_autoscale(rack); 20627 /* 20628 * Decide if we can use TCP Segmentation Offloading (if supported by 20629 * hardware). 20630 * 20631 * TSO may only be used if we are in a pure bulk sending state. The 20632 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 20633 * options prevent using TSO. With TSO the TCP header is the same 20634 * (except for the sequence number) for all generated packets. This 20635 * makes it impossible to transmit any options which vary per 20636 * generated segment or packet. 20637 * 20638 * IPv4 handling has a clear separation of ip options and ip header 20639 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 20640 * the right thing below to provide length of just ip options and thus 20641 * checking for ipoptlen is enough to decide if ip options are present. 20642 */ 20643 ipoptlen = 0; 20644 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20645 /* 20646 * Pre-calculate here as we save another lookup into the darknesses 20647 * of IPsec that way and can actually decide if TSO is ok. 20648 */ 20649 #ifdef INET6 20650 if (isipv6 && IPSEC_ENABLED(ipv6)) 20651 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 20652 #ifdef INET 20653 else 20654 #endif 20655 #endif /* INET6 */ 20656 #ifdef INET 20657 if (IPSEC_ENABLED(ipv4)) 20658 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 20659 #endif /* INET */ 20660 #endif 20661 20662 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20663 ipoptlen += ipsec_optlen; 20664 #endif 20665 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 20666 (tp->t_port == 0) && 20667 ((tp->t_flags & TF_SIGNATURE) == 0) && 20668 sack_rxmit == 0 && 20669 ipoptlen == 0) 20670 tso = 1; 20671 { 20672 uint32_t outstanding __unused; 20673 20674 outstanding = tp->snd_max - tp->snd_una; 20675 if (tp->t_flags & TF_SENTFIN) { 20676 /* 20677 * If we sent a fin, snd_max is 1 higher than 20678 * snd_una 20679 */ 20680 outstanding--; 20681 } 20682 if (sack_rxmit) { 20683 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 20684 flags &= ~TH_FIN; 20685 } 20686 } 20687 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 20688 (long)TCP_MAXWIN << tp->rcv_scale); 20689 20690 /* 20691 * Sender silly window avoidance. We transmit under the following 20692 * conditions when len is non-zero: 20693 * 20694 * - We have a full segment (or more with TSO) - This is the last 20695 * buffer in a write()/send() and we are either idle or running 20696 * NODELAY - we've timed out (e.g. persist timer) - we have more 20697 * then 1/2 the maximum send window's worth of data (receiver may be 20698 * limited the window size) - we need to retransmit 20699 */ 20700 if (len) { 20701 if (len >= segsiz) { 20702 goto send; 20703 } 20704 /* 20705 * NOTE! on localhost connections an 'ack' from the remote 20706 * end may occur synchronously with the output and cause us 20707 * to flush a buffer queued with moretocome. XXX 20708 * 20709 */ 20710 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 20711 (idle || (tp->t_flags & TF_NODELAY)) && 20712 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20713 (tp->t_flags & TF_NOPUSH) == 0) { 20714 pass = 2; 20715 goto send; 20716 } 20717 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 20718 pass = 22; 20719 goto send; 20720 } 20721 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 20722 pass = 4; 20723 goto send; 20724 } 20725 if (sack_rxmit) { 20726 pass = 6; 20727 goto send; 20728 } 20729 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 20730 (ctf_outstanding(tp) < (segsiz * 2))) { 20731 /* 20732 * We have less than two MSS outstanding (delayed ack) 20733 * and our rwnd will not let us send a full sized 20734 * MSS. Lets go ahead and let this small segment 20735 * out because we want to try to have at least two 20736 * packets inflight to not be caught by delayed ack. 20737 */ 20738 pass = 12; 20739 goto send; 20740 } 20741 } 20742 /* 20743 * Sending of standalone window updates. 20744 * 20745 * Window updates are important when we close our window due to a 20746 * full socket buffer and are opening it again after the application 20747 * reads data from it. Once the window has opened again and the 20748 * remote end starts to send again the ACK clock takes over and 20749 * provides the most current window information. 20750 * 20751 * We must avoid the silly window syndrome whereas every read from 20752 * the receive buffer, no matter how small, causes a window update 20753 * to be sent. We also should avoid sending a flurry of window 20754 * updates when the socket buffer had queued a lot of data and the 20755 * application is doing small reads. 20756 * 20757 * Prevent a flurry of pointless window updates by only sending an 20758 * update when we can increase the advertized window by more than 20759 * 1/4th of the socket buffer capacity. When the buffer is getting 20760 * full or is very small be more aggressive and send an update 20761 * whenever we can increase by two mss sized segments. In all other 20762 * situations the ACK's to new incoming data will carry further 20763 * window increases. 20764 * 20765 * Don't send an independent window update if a delayed ACK is 20766 * pending (it will get piggy-backed on it) or the remote side 20767 * already has done a half-close and won't send more data. Skip 20768 * this if the connection is in T/TCP half-open state. 20769 */ 20770 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 20771 !(tp->t_flags & TF_DELACK) && 20772 !TCPS_HAVERCVDFIN(tp->t_state)) { 20773 /* 20774 * "adv" is the amount we could increase the window, taking 20775 * into account that we are limited by TCP_MAXWIN << 20776 * tp->rcv_scale. 20777 */ 20778 int32_t adv; 20779 int oldwin; 20780 20781 adv = recwin; 20782 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 20783 oldwin = (tp->rcv_adv - tp->rcv_nxt); 20784 if (adv > oldwin) 20785 adv -= oldwin; 20786 else { 20787 /* We can't increase the window */ 20788 adv = 0; 20789 } 20790 } else 20791 oldwin = 0; 20792 20793 /* 20794 * If the new window size ends up being the same as or less 20795 * than the old size when it is scaled, then don't force 20796 * a window update. 20797 */ 20798 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 20799 goto dontupdate; 20800 20801 if (adv >= (int32_t)(2 * segsiz) && 20802 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 20803 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 20804 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 20805 pass = 7; 20806 goto send; 20807 } 20808 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 20809 pass = 23; 20810 goto send; 20811 } 20812 } 20813 dontupdate: 20814 20815 /* 20816 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 20817 * is also a catch-all for the retransmit timer timeout case. 20818 */ 20819 if (tp->t_flags & TF_ACKNOW) { 20820 pass = 8; 20821 goto send; 20822 } 20823 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 20824 pass = 9; 20825 goto send; 20826 } 20827 /* 20828 * If our state indicates that FIN should be sent and we have not 20829 * yet done so, then we need to send. 20830 */ 20831 if ((flags & TH_FIN) && 20832 (tp->snd_max == tp->snd_una)) { 20833 pass = 11; 20834 goto send; 20835 } 20836 /* 20837 * No reason to send a segment, just return. 20838 */ 20839 just_return: 20840 SOCK_SENDBUF_UNLOCK(so); 20841 just_return_nolock: 20842 { 20843 int app_limited = CTF_JR_SENT_DATA; 20844 20845 if ((tp->t_flags & TF_FASTOPEN) == 0 && 20846 (flags & TH_FIN) && 20847 (len == 0) && 20848 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 20849 ((tp->snd_max - tp->snd_una) <= segsiz)) { 20850 /* 20851 * Ok less than or right at a MSS is 20852 * outstanding. The original FreeBSD stack would 20853 * have sent a FIN, which can speed things up for 20854 * a transactional application doing a MSG_WAITALL. 20855 * To speed things up since we do *not* send a FIN 20856 * if data is outstanding, we send a "challenge ack". 20857 * The idea behind that is instead of having to have 20858 * the peer wait for the delayed-ack timer to run off 20859 * we send an ack that makes the peer send us an ack. 20860 */ 20861 rack_send_ack_challange(rack); 20862 } 20863 if (tot_len_this_send > 0) { 20864 rack->r_ctl.fsb.recwin = recwin; 20865 pacing_delay = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 20866 if ((error == 0) && 20867 rack_use_rfo && 20868 ((flags & (TH_SYN|TH_FIN)) == 0) && 20869 (ipoptlen == 0) && 20870 rack->r_fsb_inited && 20871 TCPS_HAVEESTABLISHED(tp->t_state) && 20872 ((IN_RECOVERY(tp->t_flags)) == 0) && 20873 (doing_tlp == 0) && 20874 (rack->r_must_retran == 0) && 20875 ((tp->t_flags & TF_NEEDFIN) == 0) && 20876 (len > 0) && (orig_len > 0) && 20877 (orig_len > len) && 20878 ((orig_len - len) >= segsiz) && 20879 ((optlen == 0) || 20880 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 20881 /* We can send at least one more MSS using our fsb */ 20882 rack_setup_fast_output(tp, rack, sb, len, orig_len, 20883 segsiz, pace_max_seg, hw_tls, flags); 20884 } else 20885 rack->r_fast_output = 0; 20886 rack_log_fsb(rack, tp, so, flags, 20887 ipoptlen, orig_len, len, 0, 20888 1, optlen, __LINE__, 1); 20889 /* Assure when we leave that snd_nxt will point to top */ 20890 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 20891 tp->snd_nxt = tp->snd_max; 20892 } else { 20893 int end_window = 0; 20894 uint32_t seq = tp->gput_ack; 20895 20896 rsm = tqhash_max(rack->r_ctl.tqh); 20897 if (rsm) { 20898 /* 20899 * Mark the last sent that we just-returned (hinting 20900 * that delayed ack may play a role in any rtt measurement). 20901 */ 20902 rsm->r_just_ret = 1; 20903 } 20904 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 20905 rack->r_ctl.rc_agg_delayed = 0; 20906 rack->r_early = 0; 20907 rack->r_late = 0; 20908 rack->r_ctl.rc_agg_early = 0; 20909 if ((ctf_outstanding(tp) + 20910 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 20911 minseg)) >= tp->snd_wnd) { 20912 /* We are limited by the rwnd */ 20913 app_limited = CTF_JR_RWND_LIMITED; 20914 if (IN_FASTRECOVERY(tp->t_flags)) 20915 rack->r_ctl.rc_prr_sndcnt = 0; 20916 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 20917 /* We are limited by whats available -- app limited */ 20918 app_limited = CTF_JR_APP_LIMITED; 20919 if (IN_FASTRECOVERY(tp->t_flags)) 20920 rack->r_ctl.rc_prr_sndcnt = 0; 20921 } else if ((idle == 0) && 20922 ((tp->t_flags & TF_NODELAY) == 0) && 20923 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20924 (len < segsiz)) { 20925 /* 20926 * No delay is not on and the 20927 * user is sending less than 1MSS. This 20928 * brings out SWS avoidance so we 20929 * don't send. Another app-limited case. 20930 */ 20931 app_limited = CTF_JR_APP_LIMITED; 20932 } else if (tp->t_flags & TF_NOPUSH) { 20933 /* 20934 * The user has requested no push of 20935 * the last segment and we are 20936 * at the last segment. Another app 20937 * limited case. 20938 */ 20939 app_limited = CTF_JR_APP_LIMITED; 20940 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 20941 /* Its the cwnd */ 20942 app_limited = CTF_JR_CWND_LIMITED; 20943 } else if (IN_FASTRECOVERY(tp->t_flags) && 20944 (rack->rack_no_prr == 0) && 20945 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 20946 app_limited = CTF_JR_PRR; 20947 } else { 20948 /* Now why here are we not sending? */ 20949 #ifdef NOW 20950 #ifdef INVARIANTS 20951 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 20952 #endif 20953 #endif 20954 app_limited = CTF_JR_ASSESSING; 20955 } 20956 /* 20957 * App limited in some fashion, for our pacing GP 20958 * measurements we don't want any gap (even cwnd). 20959 * Close down the measurement window. 20960 */ 20961 if (rack_cwnd_block_ends_measure && 20962 ((app_limited == CTF_JR_CWND_LIMITED) || 20963 (app_limited == CTF_JR_PRR))) { 20964 /* 20965 * The reason we are not sending is 20966 * the cwnd (or prr). We have been configured 20967 * to end the measurement window in 20968 * this case. 20969 */ 20970 end_window = 1; 20971 } else if (rack_rwnd_block_ends_measure && 20972 (app_limited == CTF_JR_RWND_LIMITED)) { 20973 /* 20974 * We are rwnd limited and have been 20975 * configured to end the measurement 20976 * window in this case. 20977 */ 20978 end_window = 1; 20979 } else if (app_limited == CTF_JR_APP_LIMITED) { 20980 /* 20981 * A true application limited period, we have 20982 * ran out of data. 20983 */ 20984 end_window = 1; 20985 } else if (app_limited == CTF_JR_ASSESSING) { 20986 /* 20987 * In the assessing case we hit the end of 20988 * the if/else and had no known reason 20989 * This will panic us under invariants.. 20990 * 20991 * If we get this out in logs we need to 20992 * investagate which reason we missed. 20993 */ 20994 end_window = 1; 20995 } 20996 if (end_window) { 20997 uint8_t log = 0; 20998 20999 /* Adjust the Gput measurement */ 21000 if ((tp->t_flags & TF_GPUTINPROG) && 21001 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21002 tp->gput_ack = tp->snd_max; 21003 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21004 /* 21005 * There is not enough to measure. 21006 */ 21007 tp->t_flags &= ~TF_GPUTINPROG; 21008 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 21009 rack->r_ctl.rc_gp_srtt /*flex1*/, 21010 tp->gput_seq, 21011 0, 0, 18, __LINE__, NULL, 0); 21012 } else 21013 log = 1; 21014 } 21015 /* Mark the last packet as app limited */ 21016 rsm = tqhash_max(rack->r_ctl.tqh); 21017 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 21018 if (rack->r_ctl.rc_app_limited_cnt == 0) 21019 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 21020 else { 21021 /* 21022 * Go out to the end app limited and mark 21023 * this new one as next and move the end_appl up 21024 * to this guy. 21025 */ 21026 if (rack->r_ctl.rc_end_appl) 21027 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 21028 rack->r_ctl.rc_end_appl = rsm; 21029 } 21030 rsm->r_flags |= RACK_APP_LIMITED; 21031 rack->r_ctl.rc_app_limited_cnt++; 21032 } 21033 if (log) 21034 rack_log_pacing_delay_calc(rack, 21035 rack->r_ctl.rc_app_limited_cnt, seq, 21036 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 21037 } 21038 } 21039 /* Check if we need to go into persists or not */ 21040 if ((tp->snd_max == tp->snd_una) && 21041 TCPS_HAVEESTABLISHED(tp->t_state) && 21042 sbavail(sb) && 21043 (sbavail(sb) > tp->snd_wnd) && 21044 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 21045 /* Yes lets make sure to move to persist before timer-start */ 21046 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 21047 } 21048 rack_start_hpts_timer(rack, tp, cts, pacing_delay, tot_len_this_send, sup_rack); 21049 rack_log_type_just_return(rack, cts, tot_len_this_send, pacing_delay, hpts_calling, app_limited, cwnd_to_use); 21050 } 21051 #ifdef NETFLIX_SHARED_CWND 21052 if ((sbavail(sb) == 0) && 21053 rack->r_ctl.rc_scw) { 21054 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21055 rack->rack_scwnd_is_idle = 1; 21056 } 21057 #endif 21058 #ifdef TCP_ACCOUNTING 21059 if (tot_len_this_send > 0) { 21060 crtsc = get_cyclecount(); 21061 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21062 tp->tcp_cnt_counters[SND_OUT_DATA]++; 21063 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21064 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 21065 } 21066 } else { 21067 crtsc = get_cyclecount(); 21068 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21069 tp->tcp_cnt_counters[SND_LIMITED]++; 21070 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 21071 } 21072 } 21073 sched_unpin(); 21074 #endif 21075 return (0); 21076 21077 send: 21078 if ((rack->r_ctl.crte != NULL) && 21079 (rsm == NULL) && 21080 ((rack->rc_hw_nobuf == 1) || 21081 (rack_hw_check_queue && (check_done == 0)))) { 21082 /* 21083 * We only want to do this once with the hw_check_queue, 21084 * for the enobuf case we would only do it once if 21085 * we come around to again, the flag will be clear. 21086 */ 21087 check_done = 1; 21088 pacing_delay = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 21089 if (pacing_delay) { 21090 rack->r_ctl.rc_agg_delayed = 0; 21091 rack->r_ctl.rc_agg_early = 0; 21092 rack->r_early = 0; 21093 rack->r_late = 0; 21094 SOCK_SENDBUF_UNLOCK(so); 21095 goto skip_all_send; 21096 } 21097 } 21098 if (rsm || sack_rxmit) 21099 counter_u64_add(rack_nfto_resend, 1); 21100 else 21101 counter_u64_add(rack_non_fto_send, 1); 21102 if ((flags & TH_FIN) && 21103 sbavail(sb)) { 21104 /* 21105 * We do not transmit a FIN 21106 * with data outstanding. We 21107 * need to make it so all data 21108 * is acked first. 21109 */ 21110 flags &= ~TH_FIN; 21111 if (TCPS_HAVEESTABLISHED(tp->t_state) && 21112 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 21113 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21114 /* 21115 * Ok less than or right at a MSS is 21116 * outstanding. The original FreeBSD stack would 21117 * have sent a FIN, which can speed things up for 21118 * a transactional application doing a MSG_WAITALL. 21119 * To speed things up since we do *not* send a FIN 21120 * if data is outstanding, we send a "challenge ack". 21121 * The idea behind that is instead of having to have 21122 * the peer wait for the delayed-ack timer to run off 21123 * we send an ack that makes the peer send us an ack. 21124 */ 21125 rack_send_ack_challange(rack); 21126 } 21127 } 21128 /* Enforce stack imposed max seg size if we have one */ 21129 if (pace_max_seg && 21130 (len > pace_max_seg)) { 21131 mark = 1; 21132 len = pace_max_seg; 21133 } 21134 if ((rsm == NULL) && 21135 (rack->pcm_in_progress == 0) && 21136 (rack->r_ctl.pcm_max_seg > 0) && 21137 (len >= rack->r_ctl.pcm_max_seg)) { 21138 /* It is large enough for a measurement */ 21139 add_flag |= RACK_IS_PCM; 21140 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 21141 } else if (rack_verbose_logging) { 21142 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 21143 } 21144 21145 SOCKBUF_LOCK_ASSERT(sb); 21146 if (len > 0) { 21147 if (len >= segsiz) 21148 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 21149 else 21150 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 21151 } 21152 /* 21153 * Before ESTABLISHED, force sending of initial options unless TCP 21154 * set not to do any options. NOTE: we assume that the IP/TCP header 21155 * plus TCP options always fit in a single mbuf, leaving room for a 21156 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 21157 * + optlen <= MCLBYTES 21158 */ 21159 optlen = 0; 21160 #ifdef INET6 21161 if (isipv6) 21162 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 21163 else 21164 #endif 21165 hdrlen = sizeof(struct tcpiphdr); 21166 21167 /* 21168 * Ok what seq are we sending from. If we have 21169 * no rsm to use, then we look at various bits, 21170 * if we are putting out a SYN it will be ISS. 21171 * If we are retransmitting a FIN it will 21172 * be snd_max-1 else its snd_max. 21173 */ 21174 if (rsm == NULL) { 21175 if (flags & TH_SYN) 21176 rack_seq = tp->iss; 21177 else if ((flags & TH_FIN) && 21178 (tp->t_flags & TF_SENTFIN)) 21179 rack_seq = tp->snd_max - 1; 21180 else 21181 rack_seq = tp->snd_max; 21182 } else { 21183 rack_seq = rsm->r_start; 21184 } 21185 /* 21186 * Compute options for segment. We only have to care about SYN and 21187 * established connection segments. Options for SYN-ACK segments 21188 * are handled in TCP syncache. 21189 */ 21190 to.to_flags = 0; 21191 if ((tp->t_flags & TF_NOOPT) == 0) { 21192 /* Maximum segment size. */ 21193 if (flags & TH_SYN) { 21194 to.to_mss = tcp_mssopt(&inp->inp_inc); 21195 if (tp->t_port) 21196 to.to_mss -= V_tcp_udp_tunneling_overhead; 21197 to.to_flags |= TOF_MSS; 21198 21199 /* 21200 * On SYN or SYN|ACK transmits on TFO connections, 21201 * only include the TFO option if it is not a 21202 * retransmit, as the presence of the TFO option may 21203 * have caused the original SYN or SYN|ACK to have 21204 * been dropped by a middlebox. 21205 */ 21206 if ((tp->t_flags & TF_FASTOPEN) && 21207 (tp->t_rxtshift == 0)) { 21208 if (tp->t_state == TCPS_SYN_RECEIVED) { 21209 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 21210 to.to_tfo_cookie = 21211 (u_int8_t *)&tp->t_tfo_cookie.server; 21212 to.to_flags |= TOF_FASTOPEN; 21213 wanted_cookie = 1; 21214 } else if (tp->t_state == TCPS_SYN_SENT) { 21215 to.to_tfo_len = 21216 tp->t_tfo_client_cookie_len; 21217 to.to_tfo_cookie = 21218 tp->t_tfo_cookie.client; 21219 to.to_flags |= TOF_FASTOPEN; 21220 wanted_cookie = 1; 21221 /* 21222 * If we wind up having more data to 21223 * send with the SYN than can fit in 21224 * one segment, don't send any more 21225 * until the SYN|ACK comes back from 21226 * the other end. 21227 */ 21228 sendalot = 0; 21229 } 21230 } 21231 } 21232 /* Window scaling. */ 21233 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 21234 to.to_wscale = tp->request_r_scale; 21235 to.to_flags |= TOF_SCALE; 21236 } 21237 /* Timestamps. */ 21238 if ((tp->t_flags & TF_RCVD_TSTMP) || 21239 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 21240 uint32_t ts_to_use; 21241 21242 if ((rack->r_rcvpath_rtt_up == 1) && 21243 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 21244 /* 21245 * When we are doing a rcv_rtt probe all 21246 * other timestamps use the next msec. This 21247 * is safe since our previous ack is in the 21248 * air and we will just have a few more 21249 * on the next ms. This assures that only 21250 * the one ack has the ms_cts that was on 21251 * our ack-probe. 21252 */ 21253 ts_to_use = ms_cts + 1; 21254 } else { 21255 ts_to_use = ms_cts; 21256 } 21257 to.to_tsval = ts_to_use + tp->ts_offset; 21258 to.to_tsecr = tp->ts_recent; 21259 to.to_flags |= TOF_TS; 21260 if ((len == 0) && 21261 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21262 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 21263 (tp->snd_una == tp->snd_max) && 21264 (flags & TH_ACK) && 21265 (sbavail(sb) == 0) && 21266 (rack->r_ctl.current_round != 0) && 21267 ((flags & (TH_SYN|TH_FIN)) == 0) && 21268 (rack->r_rcvpath_rtt_up == 0)) { 21269 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 21270 rack->r_ctl.last_time_of_arm_rcv = cts; 21271 rack->r_rcvpath_rtt_up = 1; 21272 /* Subtract 1 from seq to force a response */ 21273 rack_seq--; 21274 } 21275 } 21276 /* Set receive buffer autosizing timestamp. */ 21277 if (tp->rfbuf_ts == 0 && 21278 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 21279 tp->rfbuf_ts = ms_cts; 21280 } 21281 /* Selective ACK's. */ 21282 if (tp->t_flags & TF_SACK_PERMIT) { 21283 if (flags & TH_SYN) 21284 to.to_flags |= TOF_SACKPERM; 21285 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 21286 tp->rcv_numsacks > 0) { 21287 to.to_flags |= TOF_SACK; 21288 to.to_nsacks = tp->rcv_numsacks; 21289 to.to_sacks = (u_char *)tp->sackblks; 21290 } 21291 } 21292 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21293 /* TCP-MD5 (RFC2385). */ 21294 if (tp->t_flags & TF_SIGNATURE) 21295 to.to_flags |= TOF_SIGNATURE; 21296 #endif 21297 21298 /* Processing the options. */ 21299 hdrlen += optlen = tcp_addoptions(&to, opt); 21300 /* 21301 * If we wanted a TFO option to be added, but it was unable 21302 * to fit, ensure no data is sent. 21303 */ 21304 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 21305 !(to.to_flags & TOF_FASTOPEN)) 21306 len = 0; 21307 } 21308 if (tp->t_port) { 21309 if (V_tcp_udp_tunneling_port == 0) { 21310 /* The port was removed?? */ 21311 SOCK_SENDBUF_UNLOCK(so); 21312 #ifdef TCP_ACCOUNTING 21313 crtsc = get_cyclecount(); 21314 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21315 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 21316 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 21317 } 21318 sched_unpin(); 21319 #endif 21320 return (EHOSTUNREACH); 21321 } 21322 hdrlen += sizeof(struct udphdr); 21323 } 21324 #ifdef INET6 21325 if (isipv6) 21326 ipoptlen = ip6_optlen(inp); 21327 else 21328 #endif 21329 if (inp->inp_options) 21330 ipoptlen = inp->inp_options->m_len - 21331 offsetof(struct ipoption, ipopt_list); 21332 else 21333 ipoptlen = 0; 21334 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21335 ipoptlen += ipsec_optlen; 21336 #endif 21337 21338 /* 21339 * Adjust data length if insertion of options will bump the packet 21340 * length beyond the t_maxseg length. Clear the FIN bit because we 21341 * cut off the tail of the segment. 21342 */ 21343 if (len + optlen + ipoptlen > tp->t_maxseg) { 21344 if (tso) { 21345 uint32_t if_hw_tsomax; 21346 uint32_t moff; 21347 int32_t max_len; 21348 21349 /* extract TSO information */ 21350 if_hw_tsomax = tp->t_tsomax; 21351 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 21352 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 21353 KASSERT(ipoptlen == 0, 21354 ("%s: TSO can't do IP options", __func__)); 21355 21356 /* 21357 * Check if we should limit by maximum payload 21358 * length: 21359 */ 21360 if (if_hw_tsomax != 0) { 21361 /* compute maximum TSO length */ 21362 max_len = (if_hw_tsomax - hdrlen - 21363 max_linkhdr); 21364 if (max_len <= 0) { 21365 len = 0; 21366 } else if (len > max_len) { 21367 if (doing_tlp == 0) 21368 sendalot = 1; 21369 len = max_len; 21370 mark = 2; 21371 } 21372 } 21373 /* 21374 * Prevent the last segment from being fractional 21375 * unless the send sockbuf can be emptied: 21376 */ 21377 max_len = (tp->t_maxseg - optlen); 21378 if ((sb_offset + len) < sbavail(sb)) { 21379 moff = len % (u_int)max_len; 21380 if (moff != 0) { 21381 mark = 3; 21382 len -= moff; 21383 } 21384 } 21385 /* 21386 * In case there are too many small fragments don't 21387 * use TSO: 21388 */ 21389 if (len <= max_len) { 21390 mark = 4; 21391 tso = 0; 21392 } 21393 /* 21394 * Send the FIN in a separate segment after the bulk 21395 * sending is done. We don't trust the TSO 21396 * implementations to clear the FIN flag on all but 21397 * the last segment. 21398 */ 21399 if (tp->t_flags & TF_NEEDFIN) { 21400 sendalot = 4; 21401 } 21402 } else { 21403 mark = 5; 21404 if (optlen + ipoptlen >= tp->t_maxseg) { 21405 /* 21406 * Since we don't have enough space to put 21407 * the IP header chain and the TCP header in 21408 * one packet as required by RFC 7112, don't 21409 * send it. Also ensure that at least one 21410 * byte of the payload can be put into the 21411 * TCP segment. 21412 */ 21413 SOCK_SENDBUF_UNLOCK(so); 21414 error = EMSGSIZE; 21415 sack_rxmit = 0; 21416 goto out; 21417 } 21418 len = tp->t_maxseg - optlen - ipoptlen; 21419 sendalot = 5; 21420 } 21421 } else { 21422 tso = 0; 21423 mark = 6; 21424 } 21425 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 21426 ("%s: len > IP_MAXPACKET", __func__)); 21427 #ifdef DIAGNOSTIC 21428 #ifdef INET6 21429 if (max_linkhdr + hdrlen > MCLBYTES) 21430 #else 21431 if (max_linkhdr + hdrlen > MHLEN) 21432 #endif 21433 panic("tcphdr too big"); 21434 #endif 21435 21436 /* 21437 * This KASSERT is here to catch edge cases at a well defined place. 21438 * Before, those had triggered (random) panic conditions further 21439 * down. 21440 */ 21441 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21442 if ((len == 0) && 21443 (flags & TH_FIN) && 21444 (sbused(sb))) { 21445 /* 21446 * We have outstanding data, don't send a fin by itself!. 21447 * 21448 * Check to see if we need to send a challenge ack. 21449 */ 21450 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 21451 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21452 /* 21453 * Ok less than or right at a MSS is 21454 * outstanding. The original FreeBSD stack would 21455 * have sent a FIN, which can speed things up for 21456 * a transactional application doing a MSG_WAITALL. 21457 * To speed things up since we do *not* send a FIN 21458 * if data is outstanding, we send a "challenge ack". 21459 * The idea behind that is instead of having to have 21460 * the peer wait for the delayed-ack timer to run off 21461 * we send an ack that makes the peer send us an ack. 21462 */ 21463 rack_send_ack_challange(rack); 21464 } 21465 goto just_return; 21466 } 21467 /* 21468 * Grab a header mbuf, attaching a copy of data to be transmitted, 21469 * and initialize the header from the template for sends on this 21470 * connection. 21471 */ 21472 hw_tls = tp->t_nic_ktls_xmit != 0; 21473 if (len) { 21474 uint32_t max_val; 21475 uint32_t moff; 21476 21477 if (pace_max_seg) 21478 max_val = pace_max_seg; 21479 else 21480 max_val = len; 21481 /* 21482 * We allow a limit on sending with hptsi. 21483 */ 21484 if (len > max_val) { 21485 mark = 7; 21486 len = max_val; 21487 } 21488 #ifdef INET6 21489 if (MHLEN < hdrlen + max_linkhdr) 21490 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 21491 else 21492 #endif 21493 m = m_gethdr(M_NOWAIT, MT_DATA); 21494 21495 if (m == NULL) { 21496 SOCK_SENDBUF_UNLOCK(so); 21497 error = ENOBUFS; 21498 sack_rxmit = 0; 21499 goto out; 21500 } 21501 m->m_data += max_linkhdr; 21502 m->m_len = hdrlen; 21503 21504 /* 21505 * Start the m_copy functions from the closest mbuf to the 21506 * sb_offset in the socket buffer chain. 21507 */ 21508 mb = sbsndptr_noadv(sb, sb_offset, &moff); 21509 s_mb = mb; 21510 s_moff = moff; 21511 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 21512 m_copydata(mb, moff, (int)len, 21513 mtod(m, caddr_t)+hdrlen); 21514 /* 21515 * If we are not retransmitting advance the 21516 * sndptr to help remember the next place in 21517 * the sb. 21518 */ 21519 if (rsm == NULL) 21520 sbsndptr_adv(sb, mb, len); 21521 m->m_len += len; 21522 } else { 21523 struct sockbuf *msb; 21524 21525 /* 21526 * If we are not retransmitting pass in msb so 21527 * the socket buffer can be advanced. Otherwise 21528 * set it to NULL if its a retransmission since 21529 * we don't want to change the sb remembered 21530 * location. 21531 */ 21532 if (rsm == NULL) 21533 msb = sb; 21534 else 21535 msb = NULL; 21536 m->m_next = tcp_m_copym( 21537 mb, moff, &len, 21538 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 21539 ((rsm == NULL) ? hw_tls : 0)); 21540 if (len <= (tp->t_maxseg - optlen)) { 21541 /* 21542 * Must have ran out of mbufs for the copy 21543 * shorten it to no longer need tso. Lets 21544 * not put on sendalot since we are low on 21545 * mbufs. 21546 */ 21547 tso = 0; 21548 } 21549 if (m->m_next == NULL) { 21550 SOCK_SENDBUF_UNLOCK(so); 21551 (void)m_free(m); 21552 error = ENOBUFS; 21553 sack_rxmit = 0; 21554 goto out; 21555 } 21556 } 21557 if (sack_rxmit) { 21558 if (rsm && (rsm->r_flags & RACK_TLP)) { 21559 /* 21560 * TLP should not count in retran count, but 21561 * in its own bin 21562 */ 21563 counter_u64_add(rack_tlp_retran, 1); 21564 counter_u64_add(rack_tlp_retran_bytes, len); 21565 } else { 21566 tp->t_sndrexmitpack++; 21567 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 21568 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 21569 } 21570 #ifdef STATS 21571 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 21572 len); 21573 #endif 21574 } else { 21575 KMOD_TCPSTAT_INC(tcps_sndpack); 21576 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 21577 #ifdef STATS 21578 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 21579 len); 21580 #endif 21581 } 21582 /* 21583 * If we're sending everything we've got, set PUSH. (This 21584 * will keep happy those implementations which only give 21585 * data to the user when a buffer fills or a PUSH comes in.) 21586 */ 21587 if (sb_offset + len == sbused(sb) && 21588 sbused(sb) && 21589 !(flags & TH_SYN)) { 21590 flags |= TH_PUSH; 21591 add_flag |= RACK_HAD_PUSH; 21592 } 21593 SOCK_SENDBUF_UNLOCK(so); 21594 } else { 21595 SOCK_SENDBUF_UNLOCK(so); 21596 if (tp->t_flags & TF_ACKNOW) 21597 KMOD_TCPSTAT_INC(tcps_sndacks); 21598 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 21599 KMOD_TCPSTAT_INC(tcps_sndctrl); 21600 else 21601 KMOD_TCPSTAT_INC(tcps_sndwinup); 21602 21603 m = m_gethdr(M_NOWAIT, MT_DATA); 21604 if (m == NULL) { 21605 error = ENOBUFS; 21606 sack_rxmit = 0; 21607 goto out; 21608 } 21609 #ifdef INET6 21610 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 21611 MHLEN >= hdrlen) { 21612 M_ALIGN(m, hdrlen); 21613 } else 21614 #endif 21615 m->m_data += max_linkhdr; 21616 m->m_len = hdrlen; 21617 } 21618 SOCK_SENDBUF_UNLOCK_ASSERT(so); 21619 m->m_pkthdr.rcvif = (struct ifnet *)0; 21620 #ifdef MAC 21621 mac_inpcb_create_mbuf(inp, m); 21622 #endif 21623 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21624 #ifdef INET6 21625 if (isipv6) 21626 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 21627 else 21628 #endif /* INET6 */ 21629 #ifdef INET 21630 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 21631 #endif 21632 th = rack->r_ctl.fsb.th; 21633 udp = rack->r_ctl.fsb.udp; 21634 if (udp) { 21635 #ifdef INET6 21636 if (isipv6) 21637 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21638 else 21639 #endif /* INET6 */ 21640 ulen = hdrlen + len - sizeof(struct ip); 21641 udp->uh_ulen = htons(ulen); 21642 } 21643 } else { 21644 #ifdef INET6 21645 if (isipv6) { 21646 ip6 = mtod(m, struct ip6_hdr *); 21647 if (tp->t_port) { 21648 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 21649 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21650 udp->uh_dport = tp->t_port; 21651 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21652 udp->uh_ulen = htons(ulen); 21653 th = (struct tcphdr *)(udp + 1); 21654 } else 21655 th = (struct tcphdr *)(ip6 + 1); 21656 tcpip_fillheaders(inp, tp->t_port, ip6, th); 21657 } else 21658 #endif /* INET6 */ 21659 { 21660 #ifdef INET 21661 ip = mtod(m, struct ip *); 21662 if (tp->t_port) { 21663 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 21664 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21665 udp->uh_dport = tp->t_port; 21666 ulen = hdrlen + len - sizeof(struct ip); 21667 udp->uh_ulen = htons(ulen); 21668 th = (struct tcphdr *)(udp + 1); 21669 } else 21670 th = (struct tcphdr *)(ip + 1); 21671 tcpip_fillheaders(inp, tp->t_port, ip, th); 21672 #endif 21673 } 21674 } 21675 /* 21676 * If we are starting a connection, send ECN setup SYN packet. If we 21677 * are on a retransmit, we may resend those bits a number of times 21678 * as per RFC 3168. 21679 */ 21680 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 21681 flags |= tcp_ecn_output_syn_sent(tp); 21682 } 21683 /* Also handle parallel SYN for ECN */ 21684 if (TCPS_HAVERCVDSYN(tp->t_state) && 21685 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 21686 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 21687 if ((tp->t_state == TCPS_SYN_RECEIVED) && 21688 (tp->t_flags2 & TF2_ECN_SND_ECE)) 21689 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 21690 #ifdef INET6 21691 if (isipv6) { 21692 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 21693 ip6->ip6_flow |= htonl(ect << 20); 21694 } 21695 else 21696 #endif 21697 { 21698 #ifdef INET 21699 ip->ip_tos &= ~IPTOS_ECN_MASK; 21700 ip->ip_tos |= ect; 21701 #endif 21702 } 21703 } 21704 th->th_seq = htonl(rack_seq); 21705 th->th_ack = htonl(tp->rcv_nxt); 21706 tcp_set_flags(th, flags); 21707 /* 21708 * Calculate receive window. Don't shrink window, but avoid silly 21709 * window syndrome. 21710 * If a RST segment is sent, advertise a window of zero. 21711 */ 21712 if (flags & TH_RST) { 21713 recwin = 0; 21714 } else { 21715 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 21716 recwin < (long)segsiz) { 21717 recwin = 0; 21718 } 21719 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 21720 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 21721 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 21722 } 21723 21724 /* 21725 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 21726 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 21727 * handled in syncache. 21728 */ 21729 if (flags & TH_SYN) 21730 th->th_win = htons((u_short) 21731 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 21732 else { 21733 /* Avoid shrinking window with window scaling. */ 21734 recwin = roundup2(recwin, 1 << tp->rcv_scale); 21735 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 21736 } 21737 /* 21738 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 21739 * window. This may cause the remote transmitter to stall. This 21740 * flag tells soreceive() to disable delayed acknowledgements when 21741 * draining the buffer. This can occur if the receiver is 21742 * attempting to read more data than can be buffered prior to 21743 * transmitting on the connection. 21744 */ 21745 if (th->th_win == 0) { 21746 tp->t_sndzerowin++; 21747 tp->t_flags |= TF_RXWIN0SENT; 21748 } else 21749 tp->t_flags &= ~TF_RXWIN0SENT; 21750 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 21751 /* Now are we using fsb?, if so copy the template data to the mbuf */ 21752 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21753 uint8_t *cpto; 21754 21755 cpto = mtod(m, uint8_t *); 21756 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 21757 /* 21758 * We have just copied in: 21759 * IP/IP6 21760 * <optional udphdr> 21761 * tcphdr (no options) 21762 * 21763 * We need to grab the correct pointers into the mbuf 21764 * for both the tcp header, and possibly the udp header (if tunneling). 21765 * We do this by using the offset in the copy buffer and adding it 21766 * to the mbuf base pointer (cpto). 21767 */ 21768 #ifdef INET6 21769 if (isipv6) 21770 ip6 = mtod(m, struct ip6_hdr *); 21771 else 21772 #endif /* INET6 */ 21773 #ifdef INET 21774 ip = mtod(m, struct ip *); 21775 #endif 21776 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 21777 /* If we have a udp header lets set it into the mbuf as well */ 21778 if (udp) 21779 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 21780 } 21781 if (optlen) { 21782 bcopy(opt, th + 1, optlen); 21783 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 21784 } 21785 /* 21786 * Put TCP length in extended header, and then checksum extended 21787 * header and data. 21788 */ 21789 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 21790 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21791 if (to.to_flags & TOF_SIGNATURE) { 21792 /* 21793 * Calculate MD5 signature and put it into the place 21794 * determined before. 21795 * NOTE: since TCP options buffer doesn't point into 21796 * mbuf's data, calculate offset and use it. 21797 */ 21798 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 21799 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 21800 /* 21801 * Do not send segment if the calculation of MD5 21802 * digest has failed. 21803 */ 21804 goto out; 21805 } 21806 } 21807 #endif 21808 #ifdef INET6 21809 if (isipv6) { 21810 /* 21811 * ip6_plen is not need to be filled now, and will be filled 21812 * in ip6_output. 21813 */ 21814 if (tp->t_port) { 21815 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 21816 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21817 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 21818 th->th_sum = htons(0); 21819 UDPSTAT_INC(udps_opackets); 21820 } else { 21821 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 21822 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21823 th->th_sum = in6_cksum_pseudo(ip6, 21824 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 21825 0); 21826 } 21827 } 21828 #endif 21829 #if defined(INET6) && defined(INET) 21830 else 21831 #endif 21832 #ifdef INET 21833 { 21834 if (tp->t_port) { 21835 m->m_pkthdr.csum_flags = CSUM_UDP; 21836 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21837 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 21838 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 21839 th->th_sum = htons(0); 21840 UDPSTAT_INC(udps_opackets); 21841 } else { 21842 m->m_pkthdr.csum_flags = CSUM_TCP; 21843 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21844 th->th_sum = in_pseudo(ip->ip_src.s_addr, 21845 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 21846 IPPROTO_TCP + len + optlen)); 21847 } 21848 /* IP version must be set here for ipv4/ipv6 checking later */ 21849 KASSERT(ip->ip_v == IPVERSION, 21850 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 21851 } 21852 #endif 21853 /* 21854 * Enable TSO and specify the size of the segments. The TCP pseudo 21855 * header checksum is always provided. XXX: Fixme: This is currently 21856 * not the case for IPv6. 21857 */ 21858 if (tso) { 21859 /* 21860 * Here we must use t_maxseg and the optlen since 21861 * the optlen may include SACK's (or DSACK). 21862 */ 21863 KASSERT(len > tp->t_maxseg - optlen, 21864 ("%s: len <= tso_segsz", __func__)); 21865 m->m_pkthdr.csum_flags |= CSUM_TSO; 21866 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 21867 } 21868 KASSERT(len + hdrlen == m_length(m, NULL), 21869 ("%s: mbuf chain different than expected: %d + %u != %u", 21870 __func__, len, hdrlen, m_length(m, NULL))); 21871 21872 #ifdef TCP_HHOOK 21873 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 21874 hhook_run_tcp_est_out(tp, th, &to, len, tso); 21875 #endif 21876 if ((rack->r_ctl.crte != NULL) && 21877 (rack->rc_hw_nobuf == 0) && 21878 tcp_bblogging_on(tp)) { 21879 rack_log_queue_level(tp, rack, len, &tv, cts); 21880 } 21881 /* We're getting ready to send; log now. */ 21882 if (tcp_bblogging_on(rack->rc_tp)) { 21883 union tcp_log_stackspecific log; 21884 21885 memset(&log, 0, sizeof(log)); 21886 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 21887 if (rack->rack_no_prr) 21888 log.u_bbr.flex1 = 0; 21889 else 21890 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 21891 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 21892 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 21893 log.u_bbr.flex4 = orig_len; 21894 /* Save off the early/late values */ 21895 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 21896 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 21897 log.u_bbr.bw_inuse = rack_get_bw(rack); 21898 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 21899 log.u_bbr.flex8 = 0; 21900 if (rsm) { 21901 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 21902 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 21903 counter_u64_add(rack_collapsed_win_rxt, 1); 21904 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 21905 } 21906 if (doing_tlp) 21907 log.u_bbr.flex8 = 2; 21908 else 21909 log.u_bbr.flex8 = 1; 21910 } else { 21911 if (doing_tlp) 21912 log.u_bbr.flex8 = 3; 21913 } 21914 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 21915 log.u_bbr.flex7 = mark; 21916 log.u_bbr.flex7 <<= 8; 21917 log.u_bbr.flex7 |= pass; 21918 log.u_bbr.pkts_out = tp->t_maxseg; 21919 log.u_bbr.timeStamp = cts; 21920 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 21921 if (rsm && (rsm->r_rtr_cnt > 0)) { 21922 /* 21923 * When we have a retransmit we want to log the 21924 * burst at send and flight at send from before. 21925 */ 21926 log.u_bbr.flex5 = rsm->r_fas; 21927 log.u_bbr.bbr_substate = rsm->r_bas; 21928 } else { 21929 /* 21930 * New transmits we log in flex5 the inflight again as 21931 * well as the number of segments in our send in the 21932 * substate field. 21933 */ 21934 log.u_bbr.flex5 = log.u_bbr.inflight; 21935 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 21936 } 21937 log.u_bbr.lt_epoch = cwnd_to_use; 21938 log.u_bbr.delivered = sendalot; 21939 log.u_bbr.rttProp = (uintptr_t)rsm; 21940 log.u_bbr.pkt_epoch = __LINE__; 21941 if (rsm) { 21942 log.u_bbr.delRate = rsm->r_flags; 21943 log.u_bbr.delRate <<= 31; 21944 log.u_bbr.delRate |= rack->r_must_retran; 21945 log.u_bbr.delRate <<= 1; 21946 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 21947 } else { 21948 log.u_bbr.delRate = rack->r_must_retran; 21949 log.u_bbr.delRate <<= 1; 21950 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 21951 } 21952 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 21953 len, &log, false, NULL, __func__, __LINE__, &tv); 21954 } else 21955 lgb = NULL; 21956 21957 /* 21958 * Fill in IP length and desired time to live and send to IP level. 21959 * There should be a better way to handle ttl and tos; we could keep 21960 * them in the template, but need a way to checksum without them. 21961 */ 21962 /* 21963 * m->m_pkthdr.len should have been set before cksum calcuration, 21964 * because in6_cksum() need it. 21965 */ 21966 #ifdef INET6 21967 if (isipv6) { 21968 /* 21969 * we separately set hoplimit for every segment, since the 21970 * user might want to change the value via setsockopt. Also, 21971 * desired default hop limit might be changed via Neighbor 21972 * Discovery. 21973 */ 21974 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 21975 21976 /* 21977 * Set the packet size here for the benefit of DTrace 21978 * probes. ip6_output() will set it properly; it's supposed 21979 * to include the option header lengths as well. 21980 */ 21981 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 21982 21983 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 21984 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 21985 else 21986 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 21987 21988 if (tp->t_state == TCPS_SYN_SENT) 21989 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 21990 21991 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 21992 /* TODO: IPv6 IP6TOS_ECT bit on */ 21993 error = ip6_output(m, 21994 inp->in6p_outputopts, 21995 &inp->inp_route6, 21996 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 21997 NULL, NULL, inp); 21998 21999 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 22000 mtu = inp->inp_route6.ro_nh->nh_mtu; 22001 } 22002 #endif /* INET6 */ 22003 #if defined(INET) && defined(INET6) 22004 else 22005 #endif 22006 #ifdef INET 22007 { 22008 ip->ip_len = htons(m->m_pkthdr.len); 22009 #ifdef INET6 22010 if (inp->inp_vflag & INP_IPV6PROTO) 22011 ip->ip_ttl = in6_selecthlim(inp, NULL); 22012 #endif /* INET6 */ 22013 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 22014 /* 22015 * If we do path MTU discovery, then we set DF on every 22016 * packet. This might not be the best thing to do according 22017 * to RFC3390 Section 2. However the tcp hostcache migitates 22018 * the problem so it affects only the first tcp connection 22019 * with a host. 22020 * 22021 * NB: Don't set DF on small MTU/MSS to have a safe 22022 * fallback. 22023 */ 22024 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 22025 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22026 if (tp->t_port == 0 || len < V_tcp_minmss) { 22027 ip->ip_off |= htons(IP_DF); 22028 } 22029 } else { 22030 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22031 } 22032 22033 if (tp->t_state == TCPS_SYN_SENT) 22034 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 22035 22036 TCP_PROBE5(send, NULL, tp, ip, tp, th); 22037 22038 error = ip_output(m, 22039 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22040 inp->inp_options, 22041 #else 22042 NULL, 22043 #endif 22044 &inp->inp_route, 22045 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 22046 inp); 22047 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 22048 mtu = inp->inp_route.ro_nh->nh_mtu; 22049 } 22050 #endif /* INET */ 22051 if (lgb) { 22052 lgb->tlb_errno = error; 22053 lgb = NULL; 22054 } 22055 22056 out: 22057 /* 22058 * In transmit state, time the transmission and arrange for the 22059 * retransmit. In persist state, just set snd_max. 22060 */ 22061 if ((rsm == NULL) && doing_tlp) 22062 add_flag |= RACK_TLP; 22063 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 22064 rack_to_usec_ts(&tv), 22065 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 22066 if (error == 0) { 22067 if (add_flag & RACK_IS_PCM) { 22068 /* We just launched a PCM */ 22069 /* rrs here log */ 22070 rack->pcm_in_progress = 1; 22071 rack->pcm_needed = 0; 22072 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 22073 } 22074 if (rsm == NULL) { 22075 if (rack->lt_bw_up == 0) { 22076 rack->r_ctl.lt_timemark = tcp_tv_to_lusec(&tv); 22077 rack->r_ctl.lt_seq = tp->snd_una; 22078 rack->lt_bw_up = 1; 22079 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 22080 /* 22081 * Need to record what we have since we are 22082 * approaching seq wrap. 22083 */ 22084 uint64_t tmark; 22085 22086 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 22087 rack->r_ctl.lt_seq = tp->snd_una; 22088 tmark = tcp_get_u64_usecs(&tv); 22089 if (tmark > rack->r_ctl.lt_timemark) { 22090 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 22091 rack->r_ctl.lt_timemark = tmark; 22092 } 22093 } 22094 } 22095 rack->forced_ack = 0; /* If we send something zap the FA flag */ 22096 counter_u64_add(rack_total_bytes, len); 22097 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 22098 if (rsm && doing_tlp) { 22099 rack->rc_last_sent_tlp_past_cumack = 0; 22100 rack->rc_last_sent_tlp_seq_valid = 1; 22101 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 22102 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 22103 } 22104 if (rack->rc_hw_nobuf) { 22105 rack->rc_hw_nobuf = 0; 22106 rack->r_ctl.rc_agg_delayed = 0; 22107 rack->r_early = 0; 22108 rack->r_late = 0; 22109 rack->r_ctl.rc_agg_early = 0; 22110 } 22111 if (rsm && (doing_tlp == 0)) { 22112 /* Set we retransmitted */ 22113 rack->rc_gp_saw_rec = 1; 22114 } else { 22115 if (cwnd_to_use > tp->snd_ssthresh) { 22116 /* Set we sent in CA */ 22117 rack->rc_gp_saw_ca = 1; 22118 } else { 22119 /* Set we sent in SS */ 22120 rack->rc_gp_saw_ss = 1; 22121 } 22122 } 22123 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22124 (tp->t_flags & TF_SACK_PERMIT) && 22125 tp->rcv_numsacks > 0) 22126 tcp_clean_dsack_blocks(tp); 22127 tot_len_this_send += len; 22128 if (len == 0) { 22129 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 22130 } else { 22131 int idx; 22132 22133 idx = (len / segsiz) + 3; 22134 if (idx >= TCP_MSS_ACCT_ATIMER) 22135 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 22136 else 22137 counter_u64_add(rack_out_size[idx], 1); 22138 } 22139 } 22140 if ((rack->rack_no_prr == 0) && 22141 sub_from_prr && 22142 (error == 0)) { 22143 if (rack->r_ctl.rc_prr_sndcnt >= len) 22144 rack->r_ctl.rc_prr_sndcnt -= len; 22145 else 22146 rack->r_ctl.rc_prr_sndcnt = 0; 22147 } 22148 sub_from_prr = 0; 22149 if (rsm != NULL) { 22150 if (doing_tlp) 22151 /* Make sure the TLP is added */ 22152 rsm->r_flags |= RACK_TLP; 22153 else 22154 /* If its a resend without TLP then it must not have the flag */ 22155 rsm->r_flags &= ~RACK_TLP; 22156 } 22157 if ((error == 0) && 22158 (len > 0) && 22159 (tp->snd_una == tp->snd_max)) 22160 rack->r_ctl.rc_tlp_rxt_last_time = cts; 22161 22162 { 22163 /* 22164 * This block is not associated with the above error == 0 test. 22165 * It is used to advance snd_max if we have a new transmit. 22166 */ 22167 tcp_seq startseq = tp->snd_max; 22168 22169 22170 if (rsm && (doing_tlp == 0)) 22171 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 22172 if (error) 22173 /* We don't log or do anything with errors */ 22174 goto nomore; 22175 if (doing_tlp == 0) { 22176 if (rsm == NULL) { 22177 /* 22178 * Not a retransmission of some 22179 * sort, new data is going out so 22180 * clear our TLP count and flag. 22181 */ 22182 rack->rc_tlp_in_progress = 0; 22183 rack->r_ctl.rc_tlp_cnt_out = 0; 22184 } 22185 } else { 22186 /* 22187 * We have just sent a TLP, mark that it is true 22188 * and make sure our in progress is set so we 22189 * continue to check the count. 22190 */ 22191 rack->rc_tlp_in_progress = 1; 22192 rack->r_ctl.rc_tlp_cnt_out++; 22193 } 22194 /* 22195 * If we are retransmitting we are done, snd_max 22196 * does not get updated. 22197 */ 22198 if (sack_rxmit) 22199 goto nomore; 22200 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 22201 /* 22202 * Update the time we just added data since 22203 * nothing was outstanding. 22204 */ 22205 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 22206 tp->t_acktime = ticks; 22207 } 22208 /* 22209 * Now for special SYN/FIN handling. 22210 */ 22211 if (flags & (TH_SYN | TH_FIN)) { 22212 if ((flags & TH_SYN) && 22213 ((tp->t_flags & TF_SENTSYN) == 0)) { 22214 tp->snd_max++; 22215 tp->t_flags |= TF_SENTSYN; 22216 } 22217 if ((flags & TH_FIN) && 22218 ((tp->t_flags & TF_SENTFIN) == 0)) { 22219 tp->snd_max++; 22220 tp->t_flags |= TF_SENTFIN; 22221 } 22222 } 22223 tp->snd_max += len; 22224 if (rack->rc_new_rnd_needed) { 22225 rack_new_round_starts(tp, rack, tp->snd_max); 22226 } 22227 /* 22228 * Time this transmission if not a retransmission and 22229 * not currently timing anything. 22230 * This is only relevant in case of switching back to 22231 * the base stack. 22232 */ 22233 if (tp->t_rtttime == 0) { 22234 tp->t_rtttime = ticks; 22235 tp->t_rtseq = startseq; 22236 KMOD_TCPSTAT_INC(tcps_segstimed); 22237 } 22238 if (len && 22239 ((tp->t_flags & TF_GPUTINPROG) == 0)) 22240 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 22241 /* 22242 * If we are doing FO we need to update the mbuf position and subtract 22243 * this happens when the peer sends us duplicate information and 22244 * we thus want to send a DSACK. 22245 * 22246 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 22247 * turned off? If not then we are going to echo multiple DSACK blocks 22248 * out (with the TSO), which we should not be doing. 22249 */ 22250 if (rack->r_fast_output && len) { 22251 if (rack->r_ctl.fsb.left_to_send > len) 22252 rack->r_ctl.fsb.left_to_send -= len; 22253 else 22254 rack->r_ctl.fsb.left_to_send = 0; 22255 if (rack->r_ctl.fsb.left_to_send < segsiz) 22256 rack->r_fast_output = 0; 22257 if (rack->r_fast_output) { 22258 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 22259 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 22260 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 22261 } 22262 } 22263 if (rack_pcm_blast == 0) { 22264 if ((orig_len > len) && 22265 (add_flag & RACK_IS_PCM) && 22266 (len < pace_max_seg) && 22267 ((pace_max_seg - len) > segsiz)) { 22268 /* 22269 * We are doing a PCM measurement and we did 22270 * not get enough data in the TSO to meet the 22271 * burst requirement. 22272 */ 22273 uint32_t n_len; 22274 22275 n_len = (orig_len - len); 22276 orig_len -= len; 22277 pace_max_seg -= len; 22278 len = n_len; 22279 sb_offset = tp->snd_max - tp->snd_una; 22280 /* Re-lock for the next spin */ 22281 SOCK_SENDBUF_LOCK(so); 22282 goto send; 22283 } 22284 } else { 22285 if ((orig_len > len) && 22286 (add_flag & RACK_IS_PCM) && 22287 ((orig_len - len) > segsiz)) { 22288 /* 22289 * We are doing a PCM measurement and we did 22290 * not get enough data in the TSO to meet the 22291 * burst requirement. 22292 */ 22293 uint32_t n_len; 22294 22295 n_len = (orig_len - len); 22296 orig_len -= len; 22297 len = n_len; 22298 sb_offset = tp->snd_max - tp->snd_una; 22299 /* Re-lock for the next spin */ 22300 SOCK_SENDBUF_LOCK(so); 22301 goto send; 22302 } 22303 } 22304 } 22305 nomore: 22306 if (error) { 22307 rack->r_ctl.rc_agg_delayed = 0; 22308 rack->r_early = 0; 22309 rack->r_late = 0; 22310 rack->r_ctl.rc_agg_early = 0; 22311 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 22312 /* 22313 * Failures do not advance the seq counter above. For the 22314 * case of ENOBUFS we will fall out and retry in 1ms with 22315 * the hpts. Everything else will just have to retransmit 22316 * with the timer. 22317 * 22318 * In any case, we do not want to loop around for another 22319 * send without a good reason. 22320 */ 22321 sendalot = 0; 22322 switch (error) { 22323 case EPERM: 22324 case EACCES: 22325 tp->t_softerror = error; 22326 #ifdef TCP_ACCOUNTING 22327 crtsc = get_cyclecount(); 22328 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22329 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22330 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22331 } 22332 sched_unpin(); 22333 #endif 22334 return (error); 22335 case ENOBUFS: 22336 /* 22337 * Pace us right away to retry in a some 22338 * time 22339 */ 22340 if (rack->r_ctl.crte != NULL) { 22341 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 22342 if (tcp_bblogging_on(rack->rc_tp)) 22343 rack_log_queue_level(tp, rack, len, &tv, cts); 22344 } else 22345 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 22346 pacing_delay = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 22347 if (rack->rc_enobuf < 0x7f) 22348 rack->rc_enobuf++; 22349 if (pacing_delay < (10 * HPTS_USEC_IN_MSEC)) 22350 pacing_delay = 10 * HPTS_USEC_IN_MSEC; 22351 if (rack->r_ctl.crte != NULL) { 22352 counter_u64_add(rack_saw_enobuf_hw, 1); 22353 tcp_rl_log_enobuf(rack->r_ctl.crte); 22354 } 22355 counter_u64_add(rack_saw_enobuf, 1); 22356 goto enobufs; 22357 case EMSGSIZE: 22358 /* 22359 * For some reason the interface we used initially 22360 * to send segments changed to another or lowered 22361 * its MTU. If TSO was active we either got an 22362 * interface without TSO capabilits or TSO was 22363 * turned off. If we obtained mtu from ip_output() 22364 * then update it and try again. 22365 */ 22366 if (tso) 22367 tp->t_flags &= ~TF_TSO; 22368 if (mtu != 0) { 22369 int saved_mtu; 22370 22371 saved_mtu = tp->t_maxseg; 22372 tcp_mss_update(tp, -1, mtu, NULL, NULL); 22373 if (saved_mtu > tp->t_maxseg) { 22374 goto again; 22375 } 22376 } 22377 pacing_delay = 10 * HPTS_USEC_IN_MSEC; 22378 rack_start_hpts_timer(rack, tp, cts, pacing_delay, 0, 0); 22379 #ifdef TCP_ACCOUNTING 22380 crtsc = get_cyclecount(); 22381 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22382 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22383 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22384 } 22385 sched_unpin(); 22386 #endif 22387 return (error); 22388 case ENETUNREACH: 22389 counter_u64_add(rack_saw_enetunreach, 1); 22390 /* FALLTHROUGH */ 22391 case EHOSTDOWN: 22392 case EHOSTUNREACH: 22393 case ENETDOWN: 22394 if (TCPS_HAVERCVDSYN(tp->t_state)) { 22395 tp->t_softerror = error; 22396 error = 0; 22397 } 22398 /* FALLTHROUGH */ 22399 default: 22400 pacing_delay = 10 * HPTS_USEC_IN_MSEC; 22401 rack_start_hpts_timer(rack, tp, cts, pacing_delay, 0, 0); 22402 #ifdef TCP_ACCOUNTING 22403 crtsc = get_cyclecount(); 22404 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22405 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22406 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22407 } 22408 sched_unpin(); 22409 #endif 22410 return (error); 22411 } 22412 } else { 22413 rack->rc_enobuf = 0; 22414 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 22415 rack->r_ctl.retran_during_recovery += len; 22416 } 22417 KMOD_TCPSTAT_INC(tcps_sndtotal); 22418 22419 /* 22420 * Data sent (as far as we can tell). If this advertises a larger 22421 * window than any other segment, then remember the size of the 22422 * advertised window. Any pending ACK has now been sent. 22423 */ 22424 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 22425 tp->rcv_adv = tp->rcv_nxt + recwin; 22426 22427 tp->last_ack_sent = tp->rcv_nxt; 22428 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 22429 enobufs: 22430 if (sendalot) { 22431 /* Do we need to turn off sendalot? */ 22432 if (pace_max_seg && 22433 (tot_len_this_send >= pace_max_seg)) { 22434 /* We hit our max. */ 22435 sendalot = 0; 22436 } 22437 } 22438 if ((error == 0) && (flags & TH_FIN)) 22439 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 22440 if (flags & TH_RST) { 22441 /* 22442 * We don't send again after sending a RST. 22443 */ 22444 pacing_delay = 0; 22445 sendalot = 0; 22446 if (error == 0) 22447 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 22448 } else if ((pacing_delay == 0) && (sendalot == 0) && tot_len_this_send) { 22449 /* 22450 * Get our pacing rate, if an error 22451 * occurred in sending (ENOBUF) we would 22452 * hit the else if with slot preset. Other 22453 * errors return. 22454 */ 22455 pacing_delay = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 22456 } 22457 /* We have sent clear the flag */ 22458 rack->r_ent_rec_ns = 0; 22459 if (rack->r_must_retran) { 22460 if (rsm) { 22461 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 22462 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 22463 /* 22464 * We have retransmitted all. 22465 */ 22466 rack->r_must_retran = 0; 22467 rack->r_ctl.rc_out_at_rto = 0; 22468 } 22469 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22470 /* 22471 * Sending new data will also kill 22472 * the loop. 22473 */ 22474 rack->r_must_retran = 0; 22475 rack->r_ctl.rc_out_at_rto = 0; 22476 } 22477 } 22478 rack->r_ctl.fsb.recwin = recwin; 22479 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 22480 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22481 /* 22482 * We hit an RTO and now have past snd_max at the RTO 22483 * clear all the WAS flags. 22484 */ 22485 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 22486 } 22487 if (pacing_delay) { 22488 /* set the rack tcb into the slot N */ 22489 if ((error == 0) && 22490 rack_use_rfo && 22491 ((flags & (TH_SYN|TH_FIN)) == 0) && 22492 (rsm == NULL) && 22493 (ipoptlen == 0) && 22494 (doing_tlp == 0) && 22495 rack->r_fsb_inited && 22496 TCPS_HAVEESTABLISHED(tp->t_state) && 22497 ((IN_RECOVERY(tp->t_flags)) == 0) && 22498 (rack->r_must_retran == 0) && 22499 ((tp->t_flags & TF_NEEDFIN) == 0) && 22500 (len > 0) && (orig_len > 0) && 22501 (orig_len > len) && 22502 ((orig_len - len) >= segsiz) && 22503 ((optlen == 0) || 22504 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22505 /* We can send at least one more MSS using our fsb */ 22506 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22507 segsiz, pace_max_seg, hw_tls, flags); 22508 } else 22509 rack->r_fast_output = 0; 22510 rack_log_fsb(rack, tp, so, flags, 22511 ipoptlen, orig_len, len, error, 22512 (rsm == NULL), optlen, __LINE__, 2); 22513 } else if (sendalot) { 22514 int ret; 22515 22516 sack_rxmit = 0; 22517 if ((error == 0) && 22518 rack_use_rfo && 22519 ((flags & (TH_SYN|TH_FIN)) == 0) && 22520 (rsm == NULL) && 22521 (doing_tlp == 0) && 22522 (ipoptlen == 0) && 22523 (rack->r_must_retran == 0) && 22524 rack->r_fsb_inited && 22525 TCPS_HAVEESTABLISHED(tp->t_state) && 22526 ((IN_RECOVERY(tp->t_flags)) == 0) && 22527 ((tp->t_flags & TF_NEEDFIN) == 0) && 22528 (len > 0) && (orig_len > 0) && 22529 (orig_len > len) && 22530 ((orig_len - len) >= segsiz) && 22531 ((optlen == 0) || 22532 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22533 /* we can use fast_output for more */ 22534 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22535 segsiz, pace_max_seg, hw_tls, flags); 22536 if (rack->r_fast_output) { 22537 error = 0; 22538 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__); 22539 if (ret >= 0) 22540 return (ret); 22541 else if (error) 22542 goto nomore; 22543 22544 } 22545 } 22546 goto again; 22547 } 22548 skip_all_send: 22549 /* Assure when we leave that snd_nxt will point to top */ 22550 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22551 tp->snd_nxt = tp->snd_max; 22552 rack_start_hpts_timer(rack, tp, cts, pacing_delay, tot_len_this_send, 0); 22553 #ifdef TCP_ACCOUNTING 22554 crtsc = get_cyclecount() - ts_val; 22555 if (tot_len_this_send) { 22556 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22557 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22558 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 22559 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 22560 } 22561 } else { 22562 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22563 tp->tcp_cnt_counters[SND_OUT_ACK]++; 22564 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 22565 } 22566 } 22567 sched_unpin(); 22568 #endif 22569 if (error == ENOBUFS) 22570 error = 0; 22571 return (error); 22572 } 22573 22574 static void 22575 rack_update_seg(struct tcp_rack *rack) 22576 { 22577 uint32_t orig_val; 22578 22579 orig_val = rack->r_ctl.rc_pace_max_segs; 22580 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 22581 if (orig_val != rack->r_ctl.rc_pace_max_segs) 22582 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 22583 } 22584 22585 static void 22586 rack_mtu_change(struct tcpcb *tp) 22587 { 22588 /* 22589 * The MSS may have changed 22590 */ 22591 struct tcp_rack *rack; 22592 struct rack_sendmap *rsm; 22593 22594 rack = (struct tcp_rack *)tp->t_fb_ptr; 22595 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 22596 /* 22597 * The MTU has changed we need to resend everything 22598 * since all we have sent is lost. We first fix 22599 * up the mtu though. 22600 */ 22601 rack_set_pace_segments(tp, rack, __LINE__, NULL); 22602 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 22603 rack_remxt_tmr(tp); 22604 rack->r_fast_output = 0; 22605 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 22606 rack->r_ctl.rc_sacked); 22607 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 22608 rack->r_must_retran = 1; 22609 /* Mark all inflight to needing to be rxt'd */ 22610 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 22611 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 22612 } 22613 } 22614 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 22615 /* We don't use snd_nxt to retransmit */ 22616 tp->snd_nxt = tp->snd_max; 22617 } 22618 22619 static int 22620 rack_set_dgp(struct tcp_rack *rack) 22621 { 22622 if (rack->dgp_on == 1) 22623 return(0); 22624 if ((rack->use_fixed_rate == 1) && 22625 (rack->rc_always_pace == 1)) { 22626 /* 22627 * We are already pacing another 22628 * way. 22629 */ 22630 return (EBUSY); 22631 } 22632 if (rack->rc_always_pace == 1) { 22633 rack_remove_pacing(rack); 22634 } 22635 if (tcp_incr_dgp_pacing_cnt() == 0) 22636 return (ENOSPC); 22637 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 22638 rack->rc_fillcw_apply_discount = 0; 22639 rack->dgp_on = 1; 22640 rack->rc_always_pace = 1; 22641 rack->rc_pace_dnd = 1; 22642 rack->use_fixed_rate = 0; 22643 if (rack->gp_ready) 22644 rack_set_cc_pacing(rack); 22645 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22646 rack->rack_attempt_hdwr_pace = 0; 22647 /* rxt settings */ 22648 rack->full_size_rxt = 1; 22649 rack->shape_rxt_to_pacing_min = 0; 22650 /* cmpack=1 */ 22651 rack->r_use_cmp_ack = 1; 22652 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 22653 rack->r_use_cmp_ack) 22654 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22655 /* scwnd=1 */ 22656 rack->rack_enable_scwnd = 1; 22657 /* dynamic=100 */ 22658 rack->rc_gp_dyn_mul = 1; 22659 /* gp_inc_ca */ 22660 rack->r_ctl.rack_per_of_gp_ca = 100; 22661 /* rrr_conf=3 */ 22662 rack->r_rr_config = 3; 22663 /* npush=2 */ 22664 rack->r_ctl.rc_no_push_at_mrtt = 2; 22665 /* fillcw=1 */ 22666 rack->rc_pace_to_cwnd = 1; 22667 rack->rc_pace_fill_if_rttin_range = 0; 22668 rack->rtt_limit_mul = 0; 22669 /* noprr=1 */ 22670 rack->rack_no_prr = 1; 22671 /* lscwnd=1 */ 22672 rack->r_limit_scw = 1; 22673 /* gp_inc_rec */ 22674 rack->r_ctl.rack_per_of_gp_rec = 90; 22675 return (0); 22676 } 22677 22678 static int 22679 rack_set_profile(struct tcp_rack *rack, int prof) 22680 { 22681 int err = EINVAL; 22682 if (prof == 1) { 22683 /* 22684 * Profile 1 is "standard" DGP. It ignores 22685 * client buffer level. 22686 */ 22687 err = rack_set_dgp(rack); 22688 if (err) 22689 return (err); 22690 } else if (prof == 6) { 22691 err = rack_set_dgp(rack); 22692 if (err) 22693 return (err); 22694 /* 22695 * Profile 6 tweaks DGP so that it will apply to 22696 * fill-cw the same settings that profile5 does 22697 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 22698 */ 22699 rack->rc_fillcw_apply_discount = 1; 22700 } else if (prof == 0) { 22701 /* This changes things back to the default settings */ 22702 if (rack->rc_always_pace == 1) { 22703 rack_remove_pacing(rack); 22704 } else { 22705 /* Make sure any stray flags are off */ 22706 rack->dgp_on = 0; 22707 rack->rc_hybrid_mode = 0; 22708 rack->use_fixed_rate = 0; 22709 } 22710 err = 0; 22711 if (rack_fill_cw_state) 22712 rack->rc_pace_to_cwnd = 1; 22713 else 22714 rack->rc_pace_to_cwnd = 0; 22715 22716 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 22717 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22718 rack->rc_always_pace = 1; 22719 if (rack->rack_hibeta) 22720 rack_set_cc_pacing(rack); 22721 } else 22722 rack->rc_always_pace = 0; 22723 if (rack_dsack_std_based & 0x1) { 22724 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 22725 rack->rc_rack_tmr_std_based = 1; 22726 } 22727 if (rack_dsack_std_based & 0x2) { 22728 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 22729 rack->rc_rack_use_dsack = 1; 22730 } 22731 if (rack_use_cmp_acks) 22732 rack->r_use_cmp_ack = 1; 22733 else 22734 rack->r_use_cmp_ack = 0; 22735 if (rack_disable_prr) 22736 rack->rack_no_prr = 1; 22737 else 22738 rack->rack_no_prr = 0; 22739 if (rack_gp_no_rec_chg) 22740 rack->rc_gp_no_rec_chg = 1; 22741 else 22742 rack->rc_gp_no_rec_chg = 0; 22743 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 22744 rack->r_mbuf_queue = 1; 22745 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 22746 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22747 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22748 } else { 22749 rack->r_mbuf_queue = 0; 22750 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 22751 } 22752 if (rack_enable_shared_cwnd) 22753 rack->rack_enable_scwnd = 1; 22754 else 22755 rack->rack_enable_scwnd = 0; 22756 if (rack_do_dyn_mul) { 22757 /* When dynamic adjustment is on CA needs to start at 100% */ 22758 rack->rc_gp_dyn_mul = 1; 22759 if (rack_do_dyn_mul >= 100) 22760 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 22761 } else { 22762 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 22763 rack->rc_gp_dyn_mul = 0; 22764 } 22765 rack->r_rr_config = 0; 22766 rack->r_ctl.rc_no_push_at_mrtt = 0; 22767 rack->rc_pace_fill_if_rttin_range = 0; 22768 rack->rtt_limit_mul = 0; 22769 22770 if (rack_enable_hw_pacing) 22771 rack->rack_hdw_pace_ena = 1; 22772 else 22773 rack->rack_hdw_pace_ena = 0; 22774 if (rack_disable_prr) 22775 rack->rack_no_prr = 1; 22776 else 22777 rack->rack_no_prr = 0; 22778 if (rack_limits_scwnd) 22779 rack->r_limit_scw = 1; 22780 else 22781 rack->r_limit_scw = 0; 22782 rack_init_retransmit_value(rack, rack_rxt_controls); 22783 err = 0; 22784 } 22785 return (err); 22786 } 22787 22788 static int 22789 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 22790 { 22791 struct deferred_opt_list *dol; 22792 22793 dol = malloc(sizeof(struct deferred_opt_list), 22794 M_TCPDO, M_NOWAIT|M_ZERO); 22795 if (dol == NULL) { 22796 /* 22797 * No space yikes -- fail out.. 22798 */ 22799 return (0); 22800 } 22801 dol->optname = sopt_name; 22802 dol->optval = loptval; 22803 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 22804 return (1); 22805 } 22806 22807 static int 22808 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 22809 { 22810 #ifdef TCP_REQUEST_TRK 22811 struct tcp_sendfile_track *sft; 22812 struct timeval tv; 22813 tcp_seq seq; 22814 int err; 22815 22816 microuptime(&tv); 22817 22818 /* Make sure no fixed rate is on */ 22819 rack->use_fixed_rate = 0; 22820 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 22821 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 22822 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 22823 /* Now allocate or find our entry that will have these settings */ 22824 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusec(&tv), 0); 22825 if (sft == NULL) { 22826 rack->rc_tp->tcp_hybrid_error++; 22827 /* no space, where would it have gone? */ 22828 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 22829 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 22830 return (ENOSPC); 22831 } 22832 /* mask our internal flags */ 22833 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 22834 /* The seq will be snd_una + everything in the buffer */ 22835 seq = sft->start_seq; 22836 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 22837 /* Disabling hybrid pacing */ 22838 if (rack->rc_hybrid_mode) { 22839 rack_set_profile(rack, 0); 22840 rack->rc_tp->tcp_hybrid_stop++; 22841 } 22842 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 22843 return (0); 22844 } 22845 if (rack->dgp_on == 0) { 22846 /* 22847 * If we have not yet turned DGP on, do so 22848 * now setting pure DGP mode, no buffer level 22849 * response. 22850 */ 22851 if ((err = rack_set_profile(rack, 1)) != 0){ 22852 /* Failed to turn pacing on */ 22853 rack->rc_tp->tcp_hybrid_error++; 22854 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 22855 return (err); 22856 } 22857 } 22858 /* 22859 * Now we must switch to hybrid mode as well which also 22860 * means moving to regular pacing. 22861 */ 22862 if (rack->rc_hybrid_mode == 0) { 22863 /* First time */ 22864 if (tcp_can_enable_pacing()) { 22865 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22866 rack->rc_hybrid_mode = 1; 22867 } else { 22868 return (ENOSPC); 22869 } 22870 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 22871 /* 22872 * This should be true. 22873 */ 22874 tcp_dec_dgp_pacing_cnt(); 22875 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 22876 } 22877 } 22878 /* Now set in our flags */ 22879 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 22880 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 22881 sft->cspr = hybrid->cspr; 22882 else 22883 sft->cspr = 0; 22884 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 22885 sft->hint_maxseg = hybrid->hint_maxseg; 22886 else 22887 sft->hint_maxseg = 0; 22888 rack->rc_tp->tcp_hybrid_start++; 22889 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 22890 return (0); 22891 #else 22892 return (ENOTSUP); 22893 #endif 22894 } 22895 22896 static int 22897 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 22898 { 22899 /* We pulled a SSI info log out what was there */ 22900 si->bytes_transmitted = tp->t_sndbytes; 22901 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 22902 return (0); 22903 } 22904 22905 static int 22906 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 22907 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 22908 22909 { 22910 struct epoch_tracker et; 22911 struct sockopt sopt; 22912 struct cc_newreno_opts opt; 22913 uint64_t val; 22914 int error = 0; 22915 uint16_t ca, ss; 22916 22917 switch (sopt_name) { 22918 case TCP_RACK_SET_RXT_OPTIONS: 22919 if (optval <= 2) { 22920 rack_init_retransmit_value(rack, optval); 22921 } else { 22922 /* 22923 * You must send in 0, 1 or 2 all else is 22924 * invalid. 22925 */ 22926 error = EINVAL; 22927 } 22928 break; 22929 case TCP_RACK_DSACK_OPT: 22930 RACK_OPTS_INC(tcp_rack_dsack_opt); 22931 if (optval & 0x1) { 22932 rack->rc_rack_tmr_std_based = 1; 22933 } else { 22934 rack->rc_rack_tmr_std_based = 0; 22935 } 22936 if (optval & 0x2) { 22937 rack->rc_rack_use_dsack = 1; 22938 } else { 22939 rack->rc_rack_use_dsack = 0; 22940 } 22941 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 22942 break; 22943 case TCP_RACK_PACING_DIVISOR: 22944 RACK_OPTS_INC(tcp_rack_pacing_divisor); 22945 if (optval == 0) { 22946 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 22947 } else { 22948 if (optval < RL_MIN_DIVISOR) 22949 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 22950 else 22951 rack->r_ctl.pace_len_divisor = optval; 22952 } 22953 break; 22954 case TCP_RACK_HI_BETA: 22955 RACK_OPTS_INC(tcp_rack_hi_beta); 22956 if (optval > 0) { 22957 rack->rack_hibeta = 1; 22958 if ((optval >= 50) && 22959 (optval <= 100)) { 22960 /* 22961 * User wants to set a custom beta. 22962 */ 22963 rack->r_ctl.saved_hibeta = optval; 22964 if (rack->rc_pacing_cc_set) 22965 rack_undo_cc_pacing(rack); 22966 rack->r_ctl.rc_saved_beta = optval; 22967 } 22968 if (rack->rc_pacing_cc_set == 0) 22969 rack_set_cc_pacing(rack); 22970 } else { 22971 rack->rack_hibeta = 0; 22972 if (rack->rc_pacing_cc_set) 22973 rack_undo_cc_pacing(rack); 22974 } 22975 break; 22976 case TCP_RACK_PACING_BETA: 22977 error = EINVAL; 22978 break; 22979 case TCP_RACK_TIMER_SLOP: 22980 RACK_OPTS_INC(tcp_rack_timer_slop); 22981 rack->r_ctl.timer_slop = optval; 22982 if (rack->rc_tp->t_srtt) { 22983 /* 22984 * If we have an SRTT lets update t_rxtcur 22985 * to have the new slop. 22986 */ 22987 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 22988 rack_rto_min, rack_rto_max, 22989 rack->r_ctl.timer_slop); 22990 } 22991 break; 22992 case TCP_RACK_PACING_BETA_ECN: 22993 RACK_OPTS_INC(tcp_rack_beta_ecn); 22994 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 22995 /* This only works for newreno. */ 22996 error = EINVAL; 22997 break; 22998 } 22999 if (rack->rc_pacing_cc_set) { 23000 /* 23001 * Set them into the real CC module 23002 * whats in the rack pcb is the old values 23003 * to be used on restoral/ 23004 */ 23005 sopt.sopt_dir = SOPT_SET; 23006 opt.name = CC_NEWRENO_BETA_ECN; 23007 opt.val = optval; 23008 if (CC_ALGO(tp)->ctl_output != NULL) 23009 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23010 else 23011 error = ENOENT; 23012 } else { 23013 /* 23014 * Not pacing yet so set it into our local 23015 * rack pcb storage. 23016 */ 23017 rack->r_ctl.rc_saved_beta_ecn = optval; 23018 } 23019 break; 23020 case TCP_DEFER_OPTIONS: 23021 RACK_OPTS_INC(tcp_defer_opt); 23022 if (optval) { 23023 if (rack->gp_ready) { 23024 /* Too late */ 23025 error = EINVAL; 23026 break; 23027 } 23028 rack->defer_options = 1; 23029 } else 23030 rack->defer_options = 0; 23031 break; 23032 case TCP_RACK_MEASURE_CNT: 23033 RACK_OPTS_INC(tcp_rack_measure_cnt); 23034 if (optval && (optval <= 0xff)) { 23035 rack->r_ctl.req_measurements = optval; 23036 } else 23037 error = EINVAL; 23038 break; 23039 case TCP_REC_ABC_VAL: 23040 RACK_OPTS_INC(tcp_rec_abc_val); 23041 if (optval > 0) 23042 rack->r_use_labc_for_rec = 1; 23043 else 23044 rack->r_use_labc_for_rec = 0; 23045 break; 23046 case TCP_RACK_ABC_VAL: 23047 RACK_OPTS_INC(tcp_rack_abc_val); 23048 if ((optval > 0) && (optval < 255)) 23049 rack->rc_labc = optval; 23050 else 23051 error = EINVAL; 23052 break; 23053 case TCP_HDWR_UP_ONLY: 23054 RACK_OPTS_INC(tcp_pacing_up_only); 23055 if (optval) 23056 rack->r_up_only = 1; 23057 else 23058 rack->r_up_only = 0; 23059 break; 23060 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 23061 RACK_OPTS_INC(tcp_fillcw_rate_cap); 23062 rack->r_ctl.fillcw_cap = loptval; 23063 break; 23064 case TCP_PACING_RATE_CAP: 23065 RACK_OPTS_INC(tcp_pacing_rate_cap); 23066 if ((rack->dgp_on == 1) && 23067 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23068 /* 23069 * If we are doing DGP we need to switch 23070 * to using the pacing limit. 23071 */ 23072 if (tcp_can_enable_pacing() == 0) { 23073 error = ENOSPC; 23074 break; 23075 } 23076 /* 23077 * Now change up the flags and counts to be correct. 23078 */ 23079 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23080 tcp_dec_dgp_pacing_cnt(); 23081 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23082 } 23083 rack->r_ctl.bw_rate_cap = loptval; 23084 break; 23085 case TCP_HYBRID_PACING: 23086 if (hybrid == NULL) { 23087 error = EINVAL; 23088 break; 23089 } 23090 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 23091 error = EPERM; 23092 break; 23093 } 23094 error = process_hybrid_pacing(rack, hybrid); 23095 break; 23096 case TCP_SIDECHAN_DIS: /* URL:scodm */ 23097 if (optval) 23098 rack->r_ctl.side_chan_dis_mask = optval; 23099 else 23100 rack->r_ctl.side_chan_dis_mask = 0; 23101 break; 23102 case TCP_RACK_PROFILE: 23103 RACK_OPTS_INC(tcp_profile); 23104 error = rack_set_profile(rack, optval); 23105 break; 23106 case TCP_USE_CMP_ACKS: 23107 RACK_OPTS_INC(tcp_use_cmp_acks); 23108 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 23109 /* You can't turn it off once its on! */ 23110 error = EINVAL; 23111 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 23112 rack->r_use_cmp_ack = 1; 23113 rack->r_mbuf_queue = 1; 23114 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23115 } 23116 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 23117 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23118 break; 23119 case TCP_SHARED_CWND_TIME_LIMIT: 23120 RACK_OPTS_INC(tcp_lscwnd); 23121 if (optval) 23122 rack->r_limit_scw = 1; 23123 else 23124 rack->r_limit_scw = 0; 23125 break; 23126 case TCP_RACK_DGP_IN_REC: 23127 error = EINVAL; 23128 break; 23129 case TCP_RACK_PACE_TO_FILL: 23130 RACK_OPTS_INC(tcp_fillcw); 23131 if (optval == 0) 23132 rack->rc_pace_to_cwnd = 0; 23133 else { 23134 rack->rc_pace_to_cwnd = 1; 23135 } 23136 if ((optval >= rack_gp_rtt_maxmul) && 23137 rack_gp_rtt_maxmul && 23138 (optval < 0xf)) { 23139 rack->rc_pace_fill_if_rttin_range = 1; 23140 rack->rtt_limit_mul = optval; 23141 } else { 23142 rack->rc_pace_fill_if_rttin_range = 0; 23143 rack->rtt_limit_mul = 0; 23144 } 23145 break; 23146 case TCP_RACK_NO_PUSH_AT_MAX: 23147 RACK_OPTS_INC(tcp_npush); 23148 if (optval == 0) 23149 rack->r_ctl.rc_no_push_at_mrtt = 0; 23150 else if (optval < 0xff) 23151 rack->r_ctl.rc_no_push_at_mrtt = optval; 23152 else 23153 error = EINVAL; 23154 break; 23155 case TCP_SHARED_CWND_ENABLE: 23156 RACK_OPTS_INC(tcp_rack_scwnd); 23157 if (optval == 0) 23158 rack->rack_enable_scwnd = 0; 23159 else 23160 rack->rack_enable_scwnd = 1; 23161 break; 23162 case TCP_RACK_MBUF_QUEUE: 23163 /* Now do we use the LRO mbuf-queue feature */ 23164 RACK_OPTS_INC(tcp_rack_mbufq); 23165 if (optval || rack->r_use_cmp_ack) 23166 rack->r_mbuf_queue = 1; 23167 else 23168 rack->r_mbuf_queue = 0; 23169 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23170 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23171 else 23172 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23173 break; 23174 case TCP_RACK_NONRXT_CFG_RATE: 23175 RACK_OPTS_INC(tcp_rack_cfg_rate); 23176 if (optval == 0) 23177 rack->rack_rec_nonrxt_use_cr = 0; 23178 else 23179 rack->rack_rec_nonrxt_use_cr = 1; 23180 break; 23181 case TCP_NO_PRR: 23182 RACK_OPTS_INC(tcp_rack_noprr); 23183 if (optval == 0) 23184 rack->rack_no_prr = 0; 23185 else if (optval == 1) 23186 rack->rack_no_prr = 1; 23187 else if (optval == 2) 23188 rack->no_prr_addback = 1; 23189 else 23190 error = EINVAL; 23191 break; 23192 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 23193 if (optval > 0) 23194 rack->cspr_is_fcc = 1; 23195 else 23196 rack->cspr_is_fcc = 0; 23197 break; 23198 case TCP_TIMELY_DYN_ADJ: 23199 RACK_OPTS_INC(tcp_timely_dyn); 23200 if (optval == 0) 23201 rack->rc_gp_dyn_mul = 0; 23202 else { 23203 rack->rc_gp_dyn_mul = 1; 23204 if (optval >= 100) { 23205 /* 23206 * If the user sets something 100 or more 23207 * its the gp_ca value. 23208 */ 23209 rack->r_ctl.rack_per_of_gp_ca = optval; 23210 } 23211 } 23212 break; 23213 case TCP_RACK_DO_DETECTION: 23214 error = EINVAL; 23215 break; 23216 case TCP_RACK_TLP_USE: 23217 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 23218 error = EINVAL; 23219 break; 23220 } 23221 RACK_OPTS_INC(tcp_tlp_use); 23222 rack->rack_tlp_threshold_use = optval; 23223 break; 23224 case TCP_RACK_TLP_REDUCE: 23225 /* RACK TLP cwnd reduction (bool) */ 23226 RACK_OPTS_INC(tcp_rack_tlp_reduce); 23227 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 23228 break; 23229 /* Pacing related ones */ 23230 case TCP_RACK_PACE_ALWAYS: 23231 /* 23232 * zero is old rack method, 1 is new 23233 * method using a pacing rate. 23234 */ 23235 RACK_OPTS_INC(tcp_rack_pace_always); 23236 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23237 error = EPERM; 23238 break; 23239 } 23240 if (optval > 0) { 23241 if (rack->rc_always_pace) { 23242 error = EALREADY; 23243 break; 23244 } else if (tcp_can_enable_pacing()) { 23245 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23246 rack->rc_always_pace = 1; 23247 if (rack->rack_hibeta) 23248 rack_set_cc_pacing(rack); 23249 } 23250 else { 23251 error = ENOSPC; 23252 break; 23253 } 23254 } else { 23255 if (rack->rc_always_pace == 1) { 23256 rack_remove_pacing(rack); 23257 } 23258 } 23259 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23260 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23261 else 23262 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23263 /* A rate may be set irate or other, if so set seg size */ 23264 rack_update_seg(rack); 23265 break; 23266 case TCP_BBR_RACK_INIT_RATE: 23267 RACK_OPTS_INC(tcp_initial_rate); 23268 val = optval; 23269 /* Change from kbits per second to bytes per second */ 23270 val *= 1000; 23271 val /= 8; 23272 rack->r_ctl.init_rate = val; 23273 if (rack->rc_always_pace) 23274 rack_update_seg(rack); 23275 break; 23276 case TCP_BBR_IWINTSO: 23277 error = EINVAL; 23278 break; 23279 case TCP_RACK_FORCE_MSEG: 23280 RACK_OPTS_INC(tcp_rack_force_max_seg); 23281 if (optval) 23282 rack->rc_force_max_seg = 1; 23283 else 23284 rack->rc_force_max_seg = 0; 23285 break; 23286 case TCP_RACK_PACE_MIN_SEG: 23287 RACK_OPTS_INC(tcp_rack_min_seg); 23288 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 23289 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23290 break; 23291 case TCP_RACK_PACE_MAX_SEG: 23292 /* Max segments size in a pace in bytes */ 23293 RACK_OPTS_INC(tcp_rack_max_seg); 23294 if ((rack->dgp_on == 1) && 23295 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23296 /* 23297 * If we set a max-seg and are doing DGP then 23298 * we now fall under the pacing limits not the 23299 * DGP ones. 23300 */ 23301 if (tcp_can_enable_pacing() == 0) { 23302 error = ENOSPC; 23303 break; 23304 } 23305 /* 23306 * Now change up the flags and counts to be correct. 23307 */ 23308 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23309 tcp_dec_dgp_pacing_cnt(); 23310 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23311 } 23312 if (optval <= MAX_USER_SET_SEG) 23313 rack->rc_user_set_max_segs = optval; 23314 else 23315 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 23316 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23317 break; 23318 case TCP_RACK_PACE_RATE_REC: 23319 /* Set the fixed pacing rate in Bytes per second ca */ 23320 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 23321 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23322 error = EPERM; 23323 break; 23324 } 23325 if (rack->dgp_on) { 23326 /* 23327 * We are already pacing another 23328 * way. 23329 */ 23330 error = EBUSY; 23331 break; 23332 } 23333 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23334 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23335 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23336 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23337 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23338 rack->use_fixed_rate = 1; 23339 if (rack->rack_hibeta) 23340 rack_set_cc_pacing(rack); 23341 rack_log_pacing_delay_calc(rack, 23342 rack->r_ctl.rc_fixed_pacing_rate_ss, 23343 rack->r_ctl.rc_fixed_pacing_rate_ca, 23344 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23345 __LINE__, NULL,0); 23346 break; 23347 23348 case TCP_RACK_PACE_RATE_SS: 23349 /* Set the fixed pacing rate in Bytes per second ca */ 23350 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 23351 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23352 error = EPERM; 23353 break; 23354 } 23355 if (rack->dgp_on) { 23356 /* 23357 * We are already pacing another 23358 * way. 23359 */ 23360 error = EBUSY; 23361 break; 23362 } 23363 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23364 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23365 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23366 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23367 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23368 rack->use_fixed_rate = 1; 23369 if (rack->rack_hibeta) 23370 rack_set_cc_pacing(rack); 23371 rack_log_pacing_delay_calc(rack, 23372 rack->r_ctl.rc_fixed_pacing_rate_ss, 23373 rack->r_ctl.rc_fixed_pacing_rate_ca, 23374 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23375 __LINE__, NULL, 0); 23376 break; 23377 23378 case TCP_RACK_PACE_RATE_CA: 23379 /* Set the fixed pacing rate in Bytes per second ca */ 23380 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 23381 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23382 error = EPERM; 23383 break; 23384 } 23385 if (rack->dgp_on) { 23386 /* 23387 * We are already pacing another 23388 * way. 23389 */ 23390 error = EBUSY; 23391 break; 23392 } 23393 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23394 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23395 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23396 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23397 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23398 rack->use_fixed_rate = 1; 23399 if (rack->rack_hibeta) 23400 rack_set_cc_pacing(rack); 23401 rack_log_pacing_delay_calc(rack, 23402 rack->r_ctl.rc_fixed_pacing_rate_ss, 23403 rack->r_ctl.rc_fixed_pacing_rate_ca, 23404 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23405 __LINE__, NULL, 0); 23406 break; 23407 case TCP_RACK_GP_INCREASE_REC: 23408 RACK_OPTS_INC(tcp_gp_inc_rec); 23409 rack->r_ctl.rack_per_of_gp_rec = optval; 23410 rack_log_pacing_delay_calc(rack, 23411 rack->r_ctl.rack_per_of_gp_ss, 23412 rack->r_ctl.rack_per_of_gp_ca, 23413 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23414 __LINE__, NULL, 0); 23415 break; 23416 case TCP_RACK_GP_INCREASE_CA: 23417 RACK_OPTS_INC(tcp_gp_inc_ca); 23418 ca = optval; 23419 if (ca < 100) { 23420 /* 23421 * We don't allow any reduction 23422 * over the GP b/w. 23423 */ 23424 error = EINVAL; 23425 break; 23426 } 23427 rack->r_ctl.rack_per_of_gp_ca = ca; 23428 rack_log_pacing_delay_calc(rack, 23429 rack->r_ctl.rack_per_of_gp_ss, 23430 rack->r_ctl.rack_per_of_gp_ca, 23431 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23432 __LINE__, NULL, 0); 23433 break; 23434 case TCP_RACK_GP_INCREASE_SS: 23435 RACK_OPTS_INC(tcp_gp_inc_ss); 23436 ss = optval; 23437 if (ss < 100) { 23438 /* 23439 * We don't allow any reduction 23440 * over the GP b/w. 23441 */ 23442 error = EINVAL; 23443 break; 23444 } 23445 rack->r_ctl.rack_per_of_gp_ss = ss; 23446 rack_log_pacing_delay_calc(rack, 23447 rack->r_ctl.rack_per_of_gp_ss, 23448 rack->r_ctl.rack_per_of_gp_ca, 23449 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23450 __LINE__, NULL, 0); 23451 break; 23452 case TCP_RACK_RR_CONF: 23453 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 23454 if (optval && optval <= 3) 23455 rack->r_rr_config = optval; 23456 else 23457 rack->r_rr_config = 0; 23458 break; 23459 case TCP_PACING_DND: /* URL:dnd */ 23460 if (optval > 0) 23461 rack->rc_pace_dnd = 1; 23462 else 23463 rack->rc_pace_dnd = 0; 23464 break; 23465 case TCP_HDWR_RATE_CAP: 23466 RACK_OPTS_INC(tcp_hdwr_rate_cap); 23467 if (optval) { 23468 if (rack->r_rack_hw_rate_caps == 0) 23469 rack->r_rack_hw_rate_caps = 1; 23470 else 23471 error = EALREADY; 23472 } else { 23473 rack->r_rack_hw_rate_caps = 0; 23474 } 23475 break; 23476 case TCP_DGP_UPPER_BOUNDS: 23477 { 23478 uint8_t val; 23479 val = optval & 0x0000ff; 23480 rack->r_ctl.rack_per_upper_bound_ca = val; 23481 val = (optval >> 16) & 0x0000ff; 23482 rack->r_ctl.rack_per_upper_bound_ss = val; 23483 break; 23484 } 23485 case TCP_SS_EEXIT: /* URL:eexit */ 23486 if (optval > 0) { 23487 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 23488 if (optval & 0x10000) { 23489 rack->r_ctl.gate_to_fs = 1; 23490 } else { 23491 rack->r_ctl.gate_to_fs = 0; 23492 } 23493 if (optval & 0x20000) { 23494 rack->r_ctl.use_gp_not_last = 1; 23495 } else { 23496 rack->r_ctl.use_gp_not_last = 0; 23497 } 23498 if (optval & 0xfffc0000) { 23499 uint32_t v; 23500 23501 v = (optval >> 18) & 0x00003fff; 23502 if (v >= 1000) 23503 rack->r_ctl.gp_gain_req = v; 23504 } 23505 } else { 23506 /* We do not do ss early exit at all */ 23507 rack->rc_initial_ss_comp = 1; 23508 rack->r_ctl.gp_rnd_thresh = 0; 23509 } 23510 break; 23511 case TCP_RACK_SPLIT_LIMIT: 23512 RACK_OPTS_INC(tcp_split_limit); 23513 rack->r_ctl.rc_split_limit = optval; 23514 break; 23515 case TCP_BBR_HDWR_PACE: 23516 RACK_OPTS_INC(tcp_hdwr_pacing); 23517 if (optval){ 23518 if (rack->rack_hdrw_pacing == 0) { 23519 rack->rack_hdw_pace_ena = 1; 23520 rack->rack_attempt_hdwr_pace = 0; 23521 } else 23522 error = EALREADY; 23523 } else { 23524 rack->rack_hdw_pace_ena = 0; 23525 #ifdef RATELIMIT 23526 if (rack->r_ctl.crte != NULL) { 23527 rack->rack_hdrw_pacing = 0; 23528 rack->rack_attempt_hdwr_pace = 0; 23529 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 23530 rack->r_ctl.crte = NULL; 23531 } 23532 #endif 23533 } 23534 break; 23535 /* End Pacing related ones */ 23536 case TCP_RACK_PRR_SENDALOT: 23537 /* Allow PRR to send more than one seg */ 23538 RACK_OPTS_INC(tcp_rack_prr_sendalot); 23539 rack->r_ctl.rc_prr_sendalot = optval; 23540 break; 23541 case TCP_RACK_MIN_TO: 23542 /* Minimum time between rack t-o's in ms */ 23543 RACK_OPTS_INC(tcp_rack_min_to); 23544 rack->r_ctl.rc_min_to = optval; 23545 break; 23546 case TCP_RACK_EARLY_SEG: 23547 /* If early recovery max segments */ 23548 RACK_OPTS_INC(tcp_rack_early_seg); 23549 rack->r_ctl.rc_early_recovery_segs = optval; 23550 break; 23551 case TCP_RACK_ENABLE_HYSTART: 23552 { 23553 if (optval) { 23554 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23555 if (rack_do_hystart > RACK_HYSTART_ON) 23556 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23557 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23558 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23559 } else { 23560 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23561 } 23562 } 23563 break; 23564 case TCP_RACK_REORD_THRESH: 23565 /* RACK reorder threshold (shift amount) */ 23566 RACK_OPTS_INC(tcp_rack_reord_thresh); 23567 if ((optval > 0) && (optval < 31)) 23568 rack->r_ctl.rc_reorder_shift = optval; 23569 else 23570 error = EINVAL; 23571 break; 23572 case TCP_RACK_REORD_FADE: 23573 /* Does reordering fade after ms time */ 23574 RACK_OPTS_INC(tcp_rack_reord_fade); 23575 rack->r_ctl.rc_reorder_fade = optval; 23576 break; 23577 case TCP_RACK_TLP_THRESH: 23578 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 23579 RACK_OPTS_INC(tcp_rack_tlp_thresh); 23580 if (optval) 23581 rack->r_ctl.rc_tlp_threshold = optval; 23582 else 23583 error = EINVAL; 23584 break; 23585 case TCP_BBR_USE_RACK_RR: 23586 RACK_OPTS_INC(tcp_rack_rr); 23587 if (optval) 23588 rack->use_rack_rr = 1; 23589 else 23590 rack->use_rack_rr = 0; 23591 break; 23592 case TCP_RACK_PKT_DELAY: 23593 /* RACK added ms i.e. rack-rtt + reord + N */ 23594 RACK_OPTS_INC(tcp_rack_pkt_delay); 23595 rack->r_ctl.rc_pkt_delay = optval; 23596 break; 23597 case TCP_DELACK: 23598 RACK_OPTS_INC(tcp_rack_delayed_ack); 23599 if (optval == 0) 23600 tp->t_delayed_ack = 0; 23601 else 23602 tp->t_delayed_ack = 1; 23603 if (tp->t_flags & TF_DELACK) { 23604 tp->t_flags &= ~TF_DELACK; 23605 tp->t_flags |= TF_ACKNOW; 23606 NET_EPOCH_ENTER(et); 23607 rack_output(tp); 23608 NET_EPOCH_EXIT(et); 23609 } 23610 break; 23611 23612 case TCP_BBR_RACK_RTT_USE: 23613 RACK_OPTS_INC(tcp_rack_rtt_use); 23614 if ((optval != USE_RTT_HIGH) && 23615 (optval != USE_RTT_LOW) && 23616 (optval != USE_RTT_AVG)) 23617 error = EINVAL; 23618 else 23619 rack->r_ctl.rc_rate_sample_method = optval; 23620 break; 23621 case TCP_HONOR_HPTS_MIN: 23622 RACK_OPTS_INC(tcp_honor_hpts); 23623 if (optval) { 23624 rack->r_use_hpts_min = 1; 23625 /* 23626 * Must be between 2 - 80% to be a reduction else 23627 * we keep the default (10%). 23628 */ 23629 if ((optval > 1) && (optval <= 80)) { 23630 rack->r_ctl.max_reduction = optval; 23631 } 23632 } else 23633 rack->r_use_hpts_min = 0; 23634 break; 23635 case TCP_REC_IS_DYN: /* URL:dynrec */ 23636 RACK_OPTS_INC(tcp_dyn_rec); 23637 if (optval) 23638 rack->rc_gp_no_rec_chg = 1; 23639 else 23640 rack->rc_gp_no_rec_chg = 0; 23641 break; 23642 case TCP_NO_TIMELY: 23643 RACK_OPTS_INC(tcp_notimely); 23644 if (optval) { 23645 rack->rc_skip_timely = 1; 23646 rack->r_ctl.rack_per_of_gp_rec = 90; 23647 rack->r_ctl.rack_per_of_gp_ca = 100; 23648 rack->r_ctl.rack_per_of_gp_ss = 250; 23649 } else { 23650 rack->rc_skip_timely = 0; 23651 } 23652 break; 23653 case TCP_GP_USE_LTBW: 23654 if (optval == 0) { 23655 rack->use_lesser_lt_bw = 0; 23656 rack->dis_lt_bw = 1; 23657 } else if (optval == 1) { 23658 rack->use_lesser_lt_bw = 1; 23659 rack->dis_lt_bw = 0; 23660 } else if (optval == 2) { 23661 rack->use_lesser_lt_bw = 0; 23662 rack->dis_lt_bw = 0; 23663 } 23664 break; 23665 case TCP_DATA_AFTER_CLOSE: 23666 RACK_OPTS_INC(tcp_data_after_close); 23667 if (optval) 23668 rack->rc_allow_data_af_clo = 1; 23669 else 23670 rack->rc_allow_data_af_clo = 0; 23671 break; 23672 default: 23673 break; 23674 } 23675 tcp_log_socket_option(tp, sopt_name, optval, error); 23676 return (error); 23677 } 23678 23679 static void 23680 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 23681 { 23682 /* 23683 * A new connection has been created (tp) and 23684 * the parent is the inpcb given. We want to 23685 * apply a read-lock to the parent (we are already 23686 * holding a write lock on the tp) and copy anything 23687 * out of the rack specific data as long as its tfb is 23688 * the same as ours i.e. we are the same stack. Otherwise 23689 * we just return. 23690 */ 23691 struct tcpcb *par; 23692 struct tcp_rack *dest, *src; 23693 int cnt = 0; 23694 23695 par = intotcpcb(parent); 23696 if (par->t_fb != tp->t_fb) { 23697 /* Not the same stack */ 23698 tcp_log_socket_option(tp, 0, 0, 1); 23699 return; 23700 } 23701 /* Ok if we reach here lets setup the two rack pointers */ 23702 dest = (struct tcp_rack *)tp->t_fb_ptr; 23703 src = (struct tcp_rack *)par->t_fb_ptr; 23704 if ((src == NULL) || (dest == NULL)) { 23705 /* Huh? */ 23706 tcp_log_socket_option(tp, 0, 0, 2); 23707 return; 23708 } 23709 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 23710 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 23711 if ((src->dgp_on) && (dest->dgp_on == 0)) { 23712 /* Profile 1 had to be set via sock opt */ 23713 rack_set_dgp(dest); 23714 cnt++; 23715 } 23716 /* TCP_RACK_SET_RXT_OPTIONS */ 23717 if (dest->full_size_rxt != src->full_size_rxt) { 23718 dest->full_size_rxt = src->full_size_rxt; 23719 cnt++; 23720 } 23721 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 23722 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 23723 cnt++; 23724 } 23725 /* TCP_RACK_DSACK_OPT */ 23726 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 23727 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 23728 cnt++; 23729 } 23730 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 23731 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 23732 cnt++; 23733 } 23734 /* TCP_RACK_PACING_DIVISOR */ 23735 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 23736 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 23737 cnt++; 23738 } 23739 /* TCP_RACK_HI_BETA */ 23740 if (src->rack_hibeta != dest->rack_hibeta) { 23741 cnt++; 23742 if (src->rack_hibeta) { 23743 dest->r_ctl.rc_saved_beta = src->r_ctl.rc_saved_beta; 23744 dest->rack_hibeta = 1; 23745 } else { 23746 dest->rack_hibeta = 0; 23747 } 23748 } 23749 /* TCP_RACK_TIMER_SLOP */ 23750 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 23751 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 23752 cnt++; 23753 } 23754 /* TCP_RACK_PACING_BETA_ECN */ 23755 if (dest->r_ctl.rc_saved_beta_ecn != src->r_ctl.rc_saved_beta_ecn) { 23756 dest->r_ctl.rc_saved_beta_ecn = src->r_ctl.rc_saved_beta_ecn; 23757 cnt++; 23758 } 23759 /* We do not do TCP_DEFER_OPTIONS */ 23760 /* TCP_RACK_MEASURE_CNT */ 23761 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 23762 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 23763 cnt++; 23764 } 23765 /* TCP_HDWR_UP_ONLY */ 23766 if (dest->r_up_only != src->r_up_only) { 23767 dest->r_up_only = src->r_up_only; 23768 cnt++; 23769 } 23770 /* TCP_FILLCW_RATE_CAP */ 23771 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 23772 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 23773 cnt++; 23774 } 23775 /* TCP_PACING_RATE_CAP */ 23776 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 23777 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 23778 cnt++; 23779 } 23780 /* A listener can't set TCP_HYBRID_PACING */ 23781 /* TCP_SIDECHAN_DIS */ 23782 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 23783 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 23784 cnt++; 23785 } 23786 /* TCP_SHARED_CWND_TIME_LIMIT */ 23787 if (dest->r_limit_scw != src->r_limit_scw) { 23788 dest->r_limit_scw = src->r_limit_scw; 23789 cnt++; 23790 } 23791 /* TCP_RACK_PACE_TO_FILL */ 23792 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 23793 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 23794 cnt++; 23795 } 23796 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 23797 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 23798 cnt++; 23799 } 23800 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 23801 dest->rtt_limit_mul = src->rtt_limit_mul; 23802 cnt++; 23803 } 23804 /* TCP_RACK_NO_PUSH_AT_MAX */ 23805 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 23806 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 23807 cnt++; 23808 } 23809 /* TCP_SHARED_CWND_ENABLE */ 23810 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 23811 dest->rack_enable_scwnd = src->rack_enable_scwnd; 23812 cnt++; 23813 } 23814 /* TCP_USE_CMP_ACKS */ 23815 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 23816 dest->r_use_cmp_ack = src->r_use_cmp_ack; 23817 cnt++; 23818 } 23819 23820 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23821 dest->r_mbuf_queue = src->r_mbuf_queue; 23822 cnt++; 23823 } 23824 /* TCP_RACK_MBUF_QUEUE */ 23825 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23826 dest->r_mbuf_queue = src->r_mbuf_queue; 23827 cnt++; 23828 } 23829 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 23830 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23831 } else { 23832 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23833 } 23834 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 23835 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23836 } 23837 /* TCP_RACK_NONRXT_CFG_RATE */ 23838 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 23839 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 23840 cnt++; 23841 } 23842 /* TCP_NO_PRR */ 23843 if (dest->rack_no_prr != src->rack_no_prr) { 23844 dest->rack_no_prr = src->rack_no_prr; 23845 cnt++; 23846 } 23847 if (dest->no_prr_addback != src->no_prr_addback) { 23848 dest->no_prr_addback = src->no_prr_addback; 23849 cnt++; 23850 } 23851 /* RACK_CSPR_IS_FCC */ 23852 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 23853 dest->cspr_is_fcc = src->cspr_is_fcc; 23854 cnt++; 23855 } 23856 /* TCP_TIMELY_DYN_ADJ */ 23857 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 23858 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 23859 cnt++; 23860 } 23861 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23862 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23863 cnt++; 23864 } 23865 /* TCP_RACK_TLP_USE */ 23866 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 23867 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 23868 cnt++; 23869 } 23870 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 23871 /* TCP_BBR_RACK_INIT_RATE */ 23872 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 23873 dest->r_ctl.init_rate = src->r_ctl.init_rate; 23874 cnt++; 23875 } 23876 /* TCP_RACK_FORCE_MSEG */ 23877 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 23878 dest->rc_force_max_seg = src->rc_force_max_seg; 23879 cnt++; 23880 } 23881 /* TCP_RACK_PACE_MIN_SEG */ 23882 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 23883 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 23884 cnt++; 23885 } 23886 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 23887 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 23888 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 23889 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 23890 cnt++; 23891 } 23892 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 23893 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 23894 cnt++; 23895 } 23896 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 23897 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 23898 cnt++; 23899 } 23900 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 23901 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 23902 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 23903 cnt++; 23904 } 23905 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23906 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23907 cnt++; 23908 } 23909 23910 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 23911 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 23912 cnt++; 23913 } 23914 /* TCP_RACK_RR_CONF */ 23915 if (dest->r_rr_config != src->r_rr_config) { 23916 dest->r_rr_config = src->r_rr_config; 23917 cnt++; 23918 } 23919 /* TCP_PACING_DND */ 23920 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 23921 dest->rc_pace_dnd = src->rc_pace_dnd; 23922 cnt++; 23923 } 23924 /* TCP_HDWR_RATE_CAP */ 23925 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 23926 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 23927 cnt++; 23928 } 23929 /* TCP_DGP_UPPER_BOUNDS */ 23930 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 23931 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 23932 cnt++; 23933 } 23934 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 23935 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 23936 cnt++; 23937 } 23938 /* TCP_SS_EEXIT */ 23939 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 23940 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 23941 cnt++; 23942 } 23943 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 23944 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 23945 cnt++; 23946 } 23947 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 23948 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 23949 cnt++; 23950 } 23951 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 23952 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 23953 cnt++; 23954 } 23955 /* TCP_BBR_HDWR_PACE */ 23956 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 23957 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 23958 cnt++; 23959 } 23960 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 23961 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 23962 cnt++; 23963 } 23964 /* TCP_RACK_PRR_SENDALOT */ 23965 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 23966 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 23967 cnt++; 23968 } 23969 /* TCP_RACK_MIN_TO */ 23970 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 23971 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 23972 cnt++; 23973 } 23974 /* TCP_RACK_EARLY_SEG */ 23975 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 23976 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 23977 cnt++; 23978 } 23979 /* TCP_RACK_ENABLE_HYSTART */ 23980 if (par->t_ccv.flags != tp->t_ccv.flags) { 23981 cnt++; 23982 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 23983 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23984 if (rack_do_hystart > RACK_HYSTART_ON) 23985 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23986 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23987 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23988 } else { 23989 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23990 } 23991 } 23992 /* TCP_RACK_REORD_THRESH */ 23993 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 23994 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 23995 cnt++; 23996 } 23997 /* TCP_RACK_REORD_FADE */ 23998 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 23999 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 24000 cnt++; 24001 } 24002 /* TCP_RACK_TLP_THRESH */ 24003 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 24004 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 24005 cnt++; 24006 } 24007 /* TCP_BBR_USE_RACK_RR */ 24008 if (dest->use_rack_rr != src->use_rack_rr) { 24009 dest->use_rack_rr = src->use_rack_rr; 24010 cnt++; 24011 } 24012 /* TCP_RACK_PKT_DELAY */ 24013 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 24014 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 24015 cnt++; 24016 } 24017 /* TCP_DELACK will get copied via the main code if applicable */ 24018 /* TCP_BBR_RACK_RTT_USE */ 24019 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 24020 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 24021 cnt++; 24022 } 24023 /* TCP_HONOR_HPTS_MIN */ 24024 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 24025 dest->r_use_hpts_min = src->r_use_hpts_min; 24026 cnt++; 24027 } 24028 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 24029 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 24030 cnt++; 24031 } 24032 /* TCP_REC_IS_DYN */ 24033 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 24034 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 24035 cnt++; 24036 } 24037 if (dest->rc_skip_timely != src->rc_skip_timely) { 24038 dest->rc_skip_timely = src->rc_skip_timely; 24039 cnt++; 24040 } 24041 /* TCP_DATA_AFTER_CLOSE */ 24042 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 24043 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 24044 cnt++; 24045 } 24046 /* TCP_GP_USE_LTBW */ 24047 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 24048 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 24049 cnt++; 24050 } 24051 if (dest->dis_lt_bw != src->dis_lt_bw) { 24052 dest->dis_lt_bw = src->dis_lt_bw; 24053 cnt++; 24054 } 24055 tcp_log_socket_option(tp, 0, cnt, 0); 24056 } 24057 24058 24059 static void 24060 rack_apply_deferred_options(struct tcp_rack *rack) 24061 { 24062 struct deferred_opt_list *dol, *sdol; 24063 uint32_t s_optval; 24064 24065 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 24066 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 24067 /* Disadvantage of deferal is you loose the error return */ 24068 s_optval = (uint32_t)dol->optval; 24069 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 24070 free(dol, M_TCPDO); 24071 } 24072 } 24073 24074 static void 24075 rack_hw_tls_change(struct tcpcb *tp, int chg) 24076 { 24077 /* Update HW tls state */ 24078 struct tcp_rack *rack; 24079 24080 rack = (struct tcp_rack *)tp->t_fb_ptr; 24081 if (chg) 24082 rack->r_ctl.fsb.hw_tls = 1; 24083 else 24084 rack->r_ctl.fsb.hw_tls = 0; 24085 } 24086 24087 static int 24088 rack_pru_options(struct tcpcb *tp, int flags) 24089 { 24090 if (flags & PRUS_OOB) 24091 return (EOPNOTSUPP); 24092 return (0); 24093 } 24094 24095 static bool 24096 rack_wake_check(struct tcpcb *tp) 24097 { 24098 struct tcp_rack *rack; 24099 struct timeval tv; 24100 uint32_t cts; 24101 24102 rack = (struct tcp_rack *)tp->t_fb_ptr; 24103 if (rack->r_ctl.rc_hpts_flags) { 24104 cts = tcp_get_usecs(&tv); 24105 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 24106 /* 24107 * Pacing timer is up, check if we are ready. 24108 */ 24109 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 24110 return (true); 24111 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 24112 /* 24113 * A timer is up, check if we are ready. 24114 */ 24115 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 24116 return (true); 24117 } 24118 } 24119 return (false); 24120 } 24121 24122 static struct tcp_function_block __tcp_rack = { 24123 .tfb_tcp_block_name = __XSTRING(STACKNAME), 24124 .tfb_tcp_output = rack_output, 24125 .tfb_do_queued_segments = ctf_do_queued_segments, 24126 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 24127 .tfb_tcp_do_segment = rack_do_segment, 24128 .tfb_tcp_ctloutput = rack_ctloutput, 24129 .tfb_tcp_fb_init = rack_init, 24130 .tfb_tcp_fb_fini = rack_fini, 24131 .tfb_tcp_timer_stop_all = rack_stopall, 24132 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 24133 .tfb_tcp_handoff_ok = rack_handoff_ok, 24134 .tfb_tcp_mtu_chg = rack_mtu_change, 24135 .tfb_pru_options = rack_pru_options, 24136 .tfb_hwtls_change = rack_hw_tls_change, 24137 .tfb_chg_query = rack_chg_query, 24138 .tfb_switch_failed = rack_switch_failed, 24139 .tfb_early_wake_check = rack_wake_check, 24140 .tfb_compute_pipe = rack_compute_pipe, 24141 .tfb_stack_info = rack_stack_information, 24142 .tfb_inherit = rack_inherit, 24143 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP | TCP_FUNC_DEFAULT_OK, 24144 24145 }; 24146 24147 /* 24148 * rack_ctloutput() must drop the inpcb lock before performing copyin on 24149 * socket option arguments. When it re-acquires the lock after the copy, it 24150 * has to revalidate that the connection is still valid for the socket 24151 * option. 24152 */ 24153 static int 24154 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24155 { 24156 struct inpcb *inp = tptoinpcb(tp); 24157 #ifdef INET 24158 struct ip *ip; 24159 #endif 24160 struct tcp_rack *rack; 24161 struct tcp_hybrid_req hybrid; 24162 uint64_t loptval; 24163 int32_t error = 0, optval; 24164 24165 rack = (struct tcp_rack *)tp->t_fb_ptr; 24166 if (rack == NULL) { 24167 INP_WUNLOCK(inp); 24168 return (EINVAL); 24169 } 24170 #ifdef INET 24171 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 24172 #endif 24173 24174 switch (sopt->sopt_level) { 24175 #ifdef INET6 24176 case IPPROTO_IPV6: 24177 MPASS(inp->inp_vflag & INP_IPV6PROTO); 24178 switch (sopt->sopt_name) { 24179 case IPV6_USE_MIN_MTU: 24180 tcp6_use_min_mtu(tp); 24181 break; 24182 } 24183 INP_WUNLOCK(inp); 24184 return (0); 24185 #endif 24186 #ifdef INET 24187 case IPPROTO_IP: 24188 switch (sopt->sopt_name) { 24189 case IP_TOS: 24190 /* 24191 * The DSCP codepoint has changed, update the fsb. 24192 */ 24193 ip->ip_tos = rack->rc_inp->inp_ip_tos; 24194 break; 24195 case IP_TTL: 24196 /* 24197 * The TTL has changed, update the fsb. 24198 */ 24199 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 24200 break; 24201 } 24202 INP_WUNLOCK(inp); 24203 return (0); 24204 #endif 24205 #ifdef SO_PEERPRIO 24206 case SOL_SOCKET: 24207 switch (sopt->sopt_name) { 24208 case SO_PEERPRIO: /* SC-URL:bs */ 24209 /* Already read in and sanity checked in sosetopt(). */ 24210 if (inp->inp_socket) { 24211 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 24212 } 24213 break; 24214 } 24215 INP_WUNLOCK(inp); 24216 return (0); 24217 #endif 24218 case IPPROTO_TCP: 24219 switch (sopt->sopt_name) { 24220 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 24221 /* Pacing related ones */ 24222 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 24223 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 24224 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 24225 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 24226 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 24227 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 24228 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 24229 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 24230 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 24231 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 24232 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 24233 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 24234 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 24235 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 24236 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 24237 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 24238 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24239 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 24240 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 24241 /* End pacing related */ 24242 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 24243 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 24244 case TCP_RACK_MIN_TO: /* URL:min_to */ 24245 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 24246 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 24247 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 24248 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 24249 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 24250 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 24251 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 24252 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 24253 case TCP_NO_PRR: /* URL:noprr */ 24254 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 24255 case TCP_DATA_AFTER_CLOSE: /* no URL */ 24256 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 24257 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 24258 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 24259 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 24260 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 24261 case TCP_RACK_PROFILE: /* URL:profile */ 24262 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24263 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 24264 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 24265 case TCP_RACK_ABC_VAL: /* URL:labc */ 24266 case TCP_REC_ABC_VAL: /* URL:reclabc */ 24267 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 24268 case TCP_DEFER_OPTIONS: /* URL:defer */ 24269 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 24270 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 24271 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 24272 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 24273 case TCP_RACK_HI_BETA: /* URL:hibeta */ 24274 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 24275 case TCP_SS_EEXIT: /* URL:eexit */ 24276 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 24277 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 24278 case TCP_PACING_DND: /* URL:dnd */ 24279 case TCP_NO_TIMELY: /* URL:notimely */ 24280 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24281 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 24282 case TCP_REC_IS_DYN: /* URL:dynrec */ 24283 case TCP_GP_USE_LTBW: /* URL:useltbw */ 24284 goto process_opt; 24285 break; 24286 default: 24287 /* Filter off all unknown options to the base stack */ 24288 return (tcp_default_ctloutput(tp, sopt)); 24289 break; 24290 } 24291 default: 24292 INP_WUNLOCK(inp); 24293 return (0); 24294 } 24295 process_opt: 24296 INP_WUNLOCK(inp); 24297 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24298 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 24299 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 24300 /* 24301 * We truncate it down to 32 bits for the socket-option trace this 24302 * means rates > 34Gbps won't show right, but thats probably ok. 24303 */ 24304 optval = (uint32_t)loptval; 24305 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 24306 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 24307 } else { 24308 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 24309 /* Save it in 64 bit form too */ 24310 loptval = optval; 24311 } 24312 if (error) 24313 return (error); 24314 INP_WLOCK(inp); 24315 if (tp->t_fb != &__tcp_rack) { 24316 INP_WUNLOCK(inp); 24317 return (ENOPROTOOPT); 24318 } 24319 if (rack->defer_options && (rack->gp_ready == 0) && 24320 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 24321 (sopt->sopt_name != TCP_HYBRID_PACING) && 24322 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 24323 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 24324 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 24325 /* Options are being deferred */ 24326 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 24327 INP_WUNLOCK(inp); 24328 return (0); 24329 } else { 24330 /* No memory to defer, fail */ 24331 INP_WUNLOCK(inp); 24332 return (ENOMEM); 24333 } 24334 } 24335 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 24336 INP_WUNLOCK(inp); 24337 return (error); 24338 } 24339 24340 static void 24341 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 24342 { 24343 24344 INP_WLOCK_ASSERT(tptoinpcb(tp)); 24345 bzero(ti, sizeof(*ti)); 24346 24347 ti->tcpi_state = tp->t_state; 24348 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 24349 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 24350 if (tp->t_flags & TF_SACK_PERMIT) 24351 ti->tcpi_options |= TCPI_OPT_SACK; 24352 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 24353 ti->tcpi_options |= TCPI_OPT_WSCALE; 24354 ti->tcpi_snd_wscale = tp->snd_scale; 24355 ti->tcpi_rcv_wscale = tp->rcv_scale; 24356 } 24357 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 24358 ti->tcpi_options |= TCPI_OPT_ECN; 24359 if (tp->t_flags & TF_FASTOPEN) 24360 ti->tcpi_options |= TCPI_OPT_TFO; 24361 /* still kept in ticks is t_rcvtime */ 24362 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 24363 /* Since we hold everything in precise useconds this is easy */ 24364 ti->tcpi_rtt = tp->t_srtt; 24365 ti->tcpi_rttvar = tp->t_rttvar; 24366 ti->tcpi_rto = tp->t_rxtcur; 24367 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 24368 ti->tcpi_snd_cwnd = tp->snd_cwnd; 24369 /* 24370 * FreeBSD-specific extension fields for tcp_info. 24371 */ 24372 ti->tcpi_rcv_space = tp->rcv_wnd; 24373 ti->tcpi_rcv_nxt = tp->rcv_nxt; 24374 ti->tcpi_snd_wnd = tp->snd_wnd; 24375 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 24376 ti->tcpi_snd_nxt = tp->snd_nxt; 24377 ti->tcpi_snd_mss = tp->t_maxseg; 24378 ti->tcpi_rcv_mss = tp->t_maxseg; 24379 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 24380 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 24381 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 24382 ti->tcpi_total_tlp = tp->t_sndtlppack; 24383 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 24384 ti->tcpi_rttmin = tp->t_rttlow; 24385 #ifdef NETFLIX_STATS 24386 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 24387 #endif 24388 #ifdef TCP_OFFLOAD 24389 if (tp->t_flags & TF_TOE) { 24390 ti->tcpi_options |= TCPI_OPT_TOE; 24391 tcp_offload_tcp_info(tp, ti); 24392 } 24393 #endif 24394 } 24395 24396 static int 24397 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24398 { 24399 struct inpcb *inp = tptoinpcb(tp); 24400 struct tcp_rack *rack; 24401 int32_t error, optval; 24402 uint64_t val, loptval; 24403 struct tcp_info ti; 24404 /* 24405 * Because all our options are either boolean or an int, we can just 24406 * pull everything into optval and then unlock and copy. If we ever 24407 * add a option that is not a int, then this will have quite an 24408 * impact to this routine. 24409 */ 24410 error = 0; 24411 rack = (struct tcp_rack *)tp->t_fb_ptr; 24412 if (rack == NULL) { 24413 INP_WUNLOCK(inp); 24414 return (EINVAL); 24415 } 24416 switch (sopt->sopt_name) { 24417 case TCP_INFO: 24418 /* First get the info filled */ 24419 rack_fill_info(tp, &ti); 24420 /* Fix up the rtt related fields if needed */ 24421 INP_WUNLOCK(inp); 24422 error = sooptcopyout(sopt, &ti, sizeof ti); 24423 return (error); 24424 /* 24425 * Beta is the congestion control value for NewReno that influences how 24426 * much of a backoff happens when loss is detected. It is normally set 24427 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 24428 * when you exit recovery. 24429 */ 24430 case TCP_RACK_PACING_BETA: 24431 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24432 error = EINVAL; 24433 else if (rack->rc_pacing_cc_set == 0) 24434 optval = rack->r_ctl.rc_saved_beta; 24435 else { 24436 /* 24437 * Reach out into the CC data and report back what 24438 * I have previously set. Yeah it looks hackish but 24439 * we don't want to report the saved values. 24440 */ 24441 if (tp->t_ccv.cc_data) 24442 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 24443 else 24444 error = EINVAL; 24445 } 24446 break; 24447 /* 24448 * Beta_ecn is the congestion control value for NewReno that influences how 24449 * much of a backoff happens when a ECN mark is detected. It is normally set 24450 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 24451 * you exit recovery. Note that classic ECN has a beta of 50, it is only 24452 * ABE Ecn that uses this "less" value, but we do too with pacing :) 24453 */ 24454 case TCP_RACK_PACING_BETA_ECN: 24455 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24456 error = EINVAL; 24457 else if (rack->rc_pacing_cc_set == 0) 24458 optval = rack->r_ctl.rc_saved_beta_ecn; 24459 else { 24460 /* 24461 * Reach out into the CC data and report back what 24462 * I have previously set. Yeah it looks hackish but 24463 * we don't want to report the saved values. 24464 */ 24465 if (tp->t_ccv.cc_data) 24466 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 24467 else 24468 error = EINVAL; 24469 } 24470 break; 24471 case TCP_RACK_DSACK_OPT: 24472 optval = 0; 24473 if (rack->rc_rack_tmr_std_based) { 24474 optval |= 1; 24475 } 24476 if (rack->rc_rack_use_dsack) { 24477 optval |= 2; 24478 } 24479 break; 24480 case TCP_RACK_ENABLE_HYSTART: 24481 { 24482 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24483 optval = RACK_HYSTART_ON; 24484 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 24485 optval = RACK_HYSTART_ON_W_SC; 24486 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 24487 optval = RACK_HYSTART_ON_W_SC_C; 24488 } else { 24489 optval = RACK_HYSTART_OFF; 24490 } 24491 } 24492 break; 24493 case TCP_RACK_DGP_IN_REC: 24494 error = EINVAL; 24495 break; 24496 case TCP_RACK_HI_BETA: 24497 optval = rack->rack_hibeta; 24498 break; 24499 case TCP_DEFER_OPTIONS: 24500 optval = rack->defer_options; 24501 break; 24502 case TCP_RACK_MEASURE_CNT: 24503 optval = rack->r_ctl.req_measurements; 24504 break; 24505 case TCP_REC_ABC_VAL: 24506 optval = rack->r_use_labc_for_rec; 24507 break; 24508 case TCP_RACK_ABC_VAL: 24509 optval = rack->rc_labc; 24510 break; 24511 case TCP_HDWR_UP_ONLY: 24512 optval= rack->r_up_only; 24513 break; 24514 case TCP_FILLCW_RATE_CAP: 24515 loptval = rack->r_ctl.fillcw_cap; 24516 break; 24517 case TCP_PACING_RATE_CAP: 24518 loptval = rack->r_ctl.bw_rate_cap; 24519 break; 24520 case TCP_RACK_PROFILE: 24521 /* You cannot retrieve a profile, its write only */ 24522 error = EINVAL; 24523 break; 24524 case TCP_SIDECHAN_DIS: 24525 optval = rack->r_ctl.side_chan_dis_mask; 24526 break; 24527 case TCP_HYBRID_PACING: 24528 /* You cannot retrieve hybrid pacing information, its write only */ 24529 error = EINVAL; 24530 break; 24531 case TCP_USE_CMP_ACKS: 24532 optval = rack->r_use_cmp_ack; 24533 break; 24534 case TCP_RACK_PACE_TO_FILL: 24535 optval = rack->rc_pace_to_cwnd; 24536 break; 24537 case TCP_RACK_NO_PUSH_AT_MAX: 24538 optval = rack->r_ctl.rc_no_push_at_mrtt; 24539 break; 24540 case TCP_SHARED_CWND_ENABLE: 24541 optval = rack->rack_enable_scwnd; 24542 break; 24543 case TCP_RACK_NONRXT_CFG_RATE: 24544 optval = rack->rack_rec_nonrxt_use_cr; 24545 break; 24546 case TCP_NO_PRR: 24547 if (rack->rack_no_prr == 1) 24548 optval = 1; 24549 else if (rack->no_prr_addback == 1) 24550 optval = 2; 24551 else 24552 optval = 0; 24553 break; 24554 case TCP_GP_USE_LTBW: 24555 if (rack->dis_lt_bw) { 24556 /* It is not used */ 24557 optval = 0; 24558 } else if (rack->use_lesser_lt_bw) { 24559 /* we use min() */ 24560 optval = 1; 24561 } else { 24562 /* we use max() */ 24563 optval = 2; 24564 } 24565 break; 24566 case TCP_RACK_DO_DETECTION: 24567 error = EINVAL; 24568 break; 24569 case TCP_RACK_MBUF_QUEUE: 24570 /* Now do we use the LRO mbuf-queue feature */ 24571 optval = rack->r_mbuf_queue; 24572 break; 24573 case RACK_CSPR_IS_FCC: 24574 optval = rack->cspr_is_fcc; 24575 break; 24576 case TCP_TIMELY_DYN_ADJ: 24577 optval = rack->rc_gp_dyn_mul; 24578 break; 24579 case TCP_BBR_IWINTSO: 24580 error = EINVAL; 24581 break; 24582 case TCP_RACK_TLP_REDUCE: 24583 /* RACK TLP cwnd reduction (bool) */ 24584 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 24585 break; 24586 case TCP_BBR_RACK_INIT_RATE: 24587 val = rack->r_ctl.init_rate; 24588 /* convert to kbits per sec */ 24589 val *= 8; 24590 val /= 1000; 24591 optval = (uint32_t)val; 24592 break; 24593 case TCP_RACK_FORCE_MSEG: 24594 optval = rack->rc_force_max_seg; 24595 break; 24596 case TCP_RACK_PACE_MIN_SEG: 24597 optval = rack->r_ctl.rc_user_set_min_segs; 24598 break; 24599 case TCP_RACK_PACE_MAX_SEG: 24600 /* Max segments in a pace */ 24601 optval = rack->rc_user_set_max_segs; 24602 break; 24603 case TCP_RACK_PACE_ALWAYS: 24604 /* Use the always pace method */ 24605 optval = rack->rc_always_pace; 24606 break; 24607 case TCP_RACK_PRR_SENDALOT: 24608 /* Allow PRR to send more than one seg */ 24609 optval = rack->r_ctl.rc_prr_sendalot; 24610 break; 24611 case TCP_RACK_MIN_TO: 24612 /* Minimum time between rack t-o's in ms */ 24613 optval = rack->r_ctl.rc_min_to; 24614 break; 24615 case TCP_RACK_SPLIT_LIMIT: 24616 optval = rack->r_ctl.rc_split_limit; 24617 break; 24618 case TCP_RACK_EARLY_SEG: 24619 /* If early recovery max segments */ 24620 optval = rack->r_ctl.rc_early_recovery_segs; 24621 break; 24622 case TCP_RACK_REORD_THRESH: 24623 /* RACK reorder threshold (shift amount) */ 24624 optval = rack->r_ctl.rc_reorder_shift; 24625 break; 24626 case TCP_SS_EEXIT: 24627 if (rack->r_ctl.gp_rnd_thresh) { 24628 uint32_t v; 24629 24630 v = rack->r_ctl.gp_gain_req; 24631 v <<= 17; 24632 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 24633 if (rack->r_ctl.gate_to_fs == 1) 24634 optval |= 0x10000; 24635 } else 24636 optval = 0; 24637 break; 24638 case TCP_RACK_REORD_FADE: 24639 /* Does reordering fade after ms time */ 24640 optval = rack->r_ctl.rc_reorder_fade; 24641 break; 24642 case TCP_BBR_USE_RACK_RR: 24643 /* Do we use the rack cheat for rxt */ 24644 optval = rack->use_rack_rr; 24645 break; 24646 case TCP_RACK_RR_CONF: 24647 optval = rack->r_rr_config; 24648 break; 24649 case TCP_HDWR_RATE_CAP: 24650 optval = rack->r_rack_hw_rate_caps; 24651 break; 24652 case TCP_BBR_HDWR_PACE: 24653 optval = rack->rack_hdw_pace_ena; 24654 break; 24655 case TCP_RACK_TLP_THRESH: 24656 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24657 optval = rack->r_ctl.rc_tlp_threshold; 24658 break; 24659 case TCP_RACK_PKT_DELAY: 24660 /* RACK added ms i.e. rack-rtt + reord + N */ 24661 optval = rack->r_ctl.rc_pkt_delay; 24662 break; 24663 case TCP_RACK_TLP_USE: 24664 optval = rack->rack_tlp_threshold_use; 24665 break; 24666 case TCP_PACING_DND: 24667 optval = rack->rc_pace_dnd; 24668 break; 24669 case TCP_RACK_PACE_RATE_CA: 24670 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 24671 break; 24672 case TCP_RACK_PACE_RATE_SS: 24673 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 24674 break; 24675 case TCP_RACK_PACE_RATE_REC: 24676 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 24677 break; 24678 case TCP_DGP_UPPER_BOUNDS: 24679 optval = rack->r_ctl.rack_per_upper_bound_ss; 24680 optval <<= 16; 24681 optval |= rack->r_ctl.rack_per_upper_bound_ca; 24682 break; 24683 case TCP_RACK_GP_INCREASE_SS: 24684 optval = rack->r_ctl.rack_per_of_gp_ca; 24685 break; 24686 case TCP_RACK_GP_INCREASE_CA: 24687 optval = rack->r_ctl.rack_per_of_gp_ss; 24688 break; 24689 case TCP_RACK_PACING_DIVISOR: 24690 optval = rack->r_ctl.pace_len_divisor; 24691 break; 24692 case TCP_BBR_RACK_RTT_USE: 24693 optval = rack->r_ctl.rc_rate_sample_method; 24694 break; 24695 case TCP_DELACK: 24696 optval = tp->t_delayed_ack; 24697 break; 24698 case TCP_DATA_AFTER_CLOSE: 24699 optval = rack->rc_allow_data_af_clo; 24700 break; 24701 case TCP_SHARED_CWND_TIME_LIMIT: 24702 optval = rack->r_limit_scw; 24703 break; 24704 case TCP_HONOR_HPTS_MIN: 24705 if (rack->r_use_hpts_min) 24706 optval = rack->r_ctl.max_reduction; 24707 else 24708 optval = 0; 24709 break; 24710 case TCP_REC_IS_DYN: 24711 optval = rack->rc_gp_no_rec_chg; 24712 break; 24713 case TCP_NO_TIMELY: 24714 optval = rack->rc_skip_timely; 24715 break; 24716 case TCP_RACK_TIMER_SLOP: 24717 optval = rack->r_ctl.timer_slop; 24718 break; 24719 default: 24720 return (tcp_default_ctloutput(tp, sopt)); 24721 break; 24722 } 24723 INP_WUNLOCK(inp); 24724 if (error == 0) { 24725 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24726 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 24727 error = sooptcopyout(sopt, &loptval, sizeof loptval); 24728 else 24729 error = sooptcopyout(sopt, &optval, sizeof optval); 24730 } 24731 return (error); 24732 } 24733 24734 static int 24735 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 24736 { 24737 if (sopt->sopt_dir == SOPT_SET) { 24738 return (rack_set_sockopt(tp, sopt)); 24739 } else if (sopt->sopt_dir == SOPT_GET) { 24740 return (rack_get_sockopt(tp, sopt)); 24741 } else { 24742 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 24743 } 24744 } 24745 24746 static const char *rack_stack_names[] = { 24747 __XSTRING(STACKNAME), 24748 #ifdef STACKALIAS 24749 __XSTRING(STACKALIAS), 24750 #endif 24751 }; 24752 24753 static int 24754 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 24755 { 24756 memset(mem, 0, size); 24757 return (0); 24758 } 24759 24760 static void 24761 rack_dtor(void *mem, int32_t size, void *arg) 24762 { 24763 24764 } 24765 24766 static bool rack_mod_inited = false; 24767 24768 static int 24769 tcp_addrack(module_t mod, int32_t type, void *data) 24770 { 24771 int32_t err = 0; 24772 int num_stacks; 24773 24774 switch (type) { 24775 case MOD_LOAD: 24776 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 24777 sizeof(struct rack_sendmap), 24778 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 24779 24780 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 24781 sizeof(struct tcp_rack), 24782 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 24783 24784 sysctl_ctx_init(&rack_sysctl_ctx); 24785 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 24786 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 24787 OID_AUTO, 24788 #ifdef STACKALIAS 24789 __XSTRING(STACKALIAS), 24790 #else 24791 __XSTRING(STACKNAME), 24792 #endif 24793 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 24794 ""); 24795 if (rack_sysctl_root == NULL) { 24796 printf("Failed to add sysctl node\n"); 24797 err = EFAULT; 24798 goto free_uma; 24799 } 24800 rack_init_sysctls(); 24801 num_stacks = nitems(rack_stack_names); 24802 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 24803 rack_stack_names, &num_stacks); 24804 if (err) { 24805 printf("Failed to register %s stack name for " 24806 "%s module\n", rack_stack_names[num_stacks], 24807 __XSTRING(MODNAME)); 24808 sysctl_ctx_free(&rack_sysctl_ctx); 24809 free_uma: 24810 uma_zdestroy(rack_zone); 24811 uma_zdestroy(rack_pcb_zone); 24812 rack_counter_destroy(); 24813 printf("Failed to register rack module -- err:%d\n", err); 24814 return (err); 24815 } 24816 tcp_lro_reg_mbufq(); 24817 rack_mod_inited = true; 24818 break; 24819 case MOD_QUIESCE: 24820 err = deregister_tcp_functions(&__tcp_rack, true, false); 24821 break; 24822 case MOD_UNLOAD: 24823 err = deregister_tcp_functions(&__tcp_rack, false, true); 24824 if (err == EBUSY) 24825 break; 24826 if (rack_mod_inited) { 24827 uma_zdestroy(rack_zone); 24828 uma_zdestroy(rack_pcb_zone); 24829 sysctl_ctx_free(&rack_sysctl_ctx); 24830 rack_counter_destroy(); 24831 rack_mod_inited = false; 24832 } 24833 tcp_lro_dereg_mbufq(); 24834 err = 0; 24835 break; 24836 default: 24837 return (EOPNOTSUPP); 24838 } 24839 return (err); 24840 } 24841 24842 static moduledata_t tcp_rack = { 24843 .name = __XSTRING(MODNAME), 24844 .evhand = tcp_addrack, 24845 .priv = 0 24846 }; 24847 24848 MODULE_VERSION(MODNAME, 1); 24849 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 24850 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 24851 24852 #endif /* #if !defined(INET) && !defined(INET6) */ 24853