1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> /* for proc0 declaration */ 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #ifdef STATS 52 #include <sys/qmath.h> 53 #include <sys/tree.h> 54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 55 #else 56 #include <sys/tree.h> 57 #endif 58 #include <sys/refcount.h> 59 #include <sys/queue.h> 60 #include <sys/tim_filter.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 #include <sys/protosw.h> 65 #ifdef TCP_ACCOUNTING 66 #include <sys/sched.h> 67 #include <machine/cpu.h> 68 #endif 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/route/nhop.h> 73 #include <net/vnet.h> 74 75 #define TCPSTATES /* for logging */ 76 77 #include <netinet/in.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/ip.h> 81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 83 #include <netinet/ip_var.h> 84 #include <netinet/ip6.h> 85 #include <netinet6/in6_pcb.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/tcp.h> 88 #define TCPOUTFLAGS 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_log_buf.h> 94 #include <netinet/tcp_syncache.h> 95 #include <netinet/tcp_hpts.h> 96 #include <netinet/tcp_ratelimit.h> 97 #include <netinet/tcp_accounting.h> 98 #include <netinet/tcpip.h> 99 #include <netinet/cc/cc.h> 100 #include <netinet/cc/cc_newreno.h> 101 #include <netinet/tcp_fastopen.h> 102 #include <netinet/tcp_lro.h> 103 #ifdef NETFLIX_SHARED_CWND 104 #include <netinet/tcp_shared_cwnd.h> 105 #endif 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_ecn.h> 113 114 #include <netipsec/ipsec_support.h> 115 116 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 117 #include <netipsec/ipsec.h> 118 #include <netipsec/ipsec6.h> 119 #endif /* IPSEC */ 120 121 #include <netinet/udp.h> 122 #include <netinet/udp_var.h> 123 #include <machine/in_cksum.h> 124 125 #ifdef MAC 126 #include <security/mac/mac_framework.h> 127 #endif 128 #include "sack_filter.h" 129 #include "tcp_rack.h" 130 #include "tailq_hash.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 146 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 150 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 151 152 struct sysctl_ctx_list rack_sysctl_ctx; 153 struct sysctl_oid *rack_sysctl_root; 154 155 #define CUM_ACKED 1 156 #define SACKED 2 157 158 /* 159 * The RACK module incorporates a number of 160 * TCP ideas that have been put out into the IETF 161 * over the last few years: 162 * - Matt Mathis's Rate Halving which slowly drops 163 * the congestion window so that the ack clock can 164 * be maintained during a recovery. 165 * - Yuchung Cheng's RACK TCP (for which its named) that 166 * will stop us using the number of dup acks and instead 167 * use time as the gage of when we retransmit. 168 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 169 * of Dukkipati et.al. 170 * RACK depends on SACK, so if an endpoint arrives that 171 * cannot do SACK the state machine below will shuttle the 172 * connection back to using the "default" TCP stack that is 173 * in FreeBSD. 174 * 175 * To implement RACK the original TCP stack was first decomposed 176 * into a functional state machine with individual states 177 * for each of the possible TCP connection states. The do_segment 178 * functions role in life is to mandate the connection supports SACK 179 * initially and then assure that the RACK state matches the conenction 180 * state before calling the states do_segment function. Each 181 * state is simplified due to the fact that the original do_segment 182 * has been decomposed and we *know* what state we are in (no 183 * switches on the state) and all tests for SACK are gone. This 184 * greatly simplifies what each state does. 185 * 186 * TCP output is also over-written with a new version since it 187 * must maintain the new rack scoreboard. 188 * 189 */ 190 static int32_t rack_tlp_thresh = 1; 191 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 192 static int32_t rack_tlp_use_greater = 1; 193 static int32_t rack_reorder_thresh = 2; 194 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 195 * - 60 seconds */ 196 static uint32_t rack_pcm_every_n_rounds = 100; 197 static uint32_t rack_pcm_blast = 0; 198 static uint32_t rack_pcm_is_enabled = 1; 199 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 200 201 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round has "gaining" */ 202 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 203 204 205 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 206 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 207 static int32_t rack_rxt_controls = 0; 208 static int32_t rack_fill_cw_state = 0; 209 static uint8_t rack_req_measurements = 1; 210 /* Attack threshold detections */ 211 static uint32_t rack_highest_sack_thresh_seen = 0; 212 static uint32_t rack_highest_move_thresh_seen = 0; 213 static uint32_t rack_merge_out_sacks_on_attack = 0; 214 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 215 static int32_t rack_hw_rate_caps = 0; /* 1; */ 216 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 217 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 218 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 219 static int32_t rack_hw_up_only = 0; 220 static int32_t rack_stats_gets_ms_rtt = 1; 221 static int32_t rack_prr_addbackmax = 2; 222 static int32_t rack_do_hystart = 0; 223 static int32_t rack_apply_rtt_with_reduced_conf = 0; 224 static int32_t rack_hibeta_setting = 0; 225 static int32_t rack_default_pacing_divisor = 250; 226 static uint16_t rack_pacing_min_seg = 0; 227 static int32_t rack_timely_off = 0; 228 229 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 230 static int32_t rack_pkt_delay = 1000; 231 static int32_t rack_send_a_lot_in_prr = 1; 232 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 233 static int32_t rack_verbose_logging = 0; 234 static int32_t rack_ignore_data_after_close = 1; 235 static int32_t rack_enable_shared_cwnd = 1; 236 static int32_t rack_use_cmp_acks = 1; 237 static int32_t rack_use_fsb = 1; 238 static int32_t rack_use_rfo = 1; 239 static int32_t rack_use_rsm_rfo = 1; 240 static int32_t rack_max_abc_post_recovery = 2; 241 static int32_t rack_client_low_buf = 0; 242 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 243 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 244 #ifdef TCP_ACCOUNTING 245 static int32_t rack_tcp_accounting = 0; 246 #endif 247 static int32_t rack_limits_scwnd = 1; 248 static int32_t rack_enable_mqueue_for_nonpaced = 0; 249 static int32_t rack_hybrid_allow_set_maxseg = 0; 250 static int32_t rack_disable_prr = 0; 251 static int32_t use_rack_rr = 1; 252 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 253 static int32_t rack_persist_min = 250000; /* 250usec */ 254 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 255 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 256 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ 257 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 258 static int32_t rack_limit_time_with_srtt = 0; 259 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 260 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 261 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 262 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 263 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 264 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 265 266 /* 267 * Currently regular tcp has a rto_min of 30ms 268 * the backoff goes 12 times so that ends up 269 * being a total of 122.850 seconds before a 270 * connection is killed. 271 */ 272 static uint32_t rack_def_data_window = 20; 273 static uint32_t rack_goal_bdp = 2; 274 static uint32_t rack_min_srtts = 1; 275 static uint32_t rack_min_measure_usec = 0; 276 static int32_t rack_tlp_min = 10000; /* 10ms */ 277 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 278 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 279 static const int32_t rack_free_cache = 2; 280 static int32_t rack_hptsi_segments = 40; 281 static int32_t rack_rate_sample_method = USE_RTT_LOW; 282 static int32_t rack_pace_every_seg = 0; 283 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 284 static int32_t rack_slot_reduction = 4; 285 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 286 static int32_t rack_cwnd_block_ends_measure = 0; 287 static int32_t rack_rwnd_block_ends_measure = 0; 288 static int32_t rack_def_profile = 0; 289 290 static int32_t rack_lower_cwnd_at_tlp = 0; 291 static int32_t rack_always_send_oldest = 0; 292 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 293 294 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 295 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 296 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 297 298 /* Probertt */ 299 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 300 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 301 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 302 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 303 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 304 305 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 306 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 307 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 308 static uint32_t rack_probertt_use_min_rtt_exit = 0; 309 static uint32_t rack_probe_rtt_sets_cwnd = 0; 310 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 311 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 312 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 313 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 314 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 315 static uint32_t rack_probertt_filter_life = 10000000; 316 static uint32_t rack_probertt_lower_within = 10; 317 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 318 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 319 static int32_t rack_probertt_clear_is = 1; 320 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 321 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 322 323 /* Part of pacing */ 324 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 325 326 /* Timely information: 327 * 328 * Here we have various control parameters on how 329 * timely may change the multiplier. rack_gain_p5_ub 330 * is associated with timely but not directly influencing 331 * the rate decision like the other variables. It controls 332 * the way fill-cw interacts with timely and caps how much 333 * timely can boost the fill-cw b/w. 334 * 335 * The other values are various boost/shrink numbers as well 336 * as potential caps when adjustments are made to the timely 337 * gain (returned by rack_get_output_gain(). Remember too that 338 * the gain returned can be overriden by other factors such as 339 * probeRTT as well as fixed-rate-pacing. 340 */ 341 static int32_t rack_gain_p5_ub = 250; 342 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 343 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 344 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 345 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 346 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 347 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 348 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 349 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 350 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 351 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 352 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 353 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 354 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 355 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 356 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 357 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 358 static int32_t rack_timely_no_stopping = 0; 359 static int32_t rack_down_raise_thresh = 100; 360 static int32_t rack_req_segs = 1; 361 static uint64_t rack_bw_rate_cap = 0; 362 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 363 364 365 /* Rack specific counters */ 366 counter_u64_t rack_saw_enobuf; 367 counter_u64_t rack_saw_enobuf_hw; 368 counter_u64_t rack_saw_enetunreach; 369 counter_u64_t rack_persists_sends; 370 counter_u64_t rack_persists_acks; 371 counter_u64_t rack_persists_loss; 372 counter_u64_t rack_persists_lost_ends; 373 counter_u64_t rack_total_bytes; 374 #ifdef INVARIANTS 375 counter_u64_t rack_adjust_map_bw; 376 #endif 377 /* Tail loss probe counters */ 378 counter_u64_t rack_tlp_tot; 379 counter_u64_t rack_tlp_newdata; 380 counter_u64_t rack_tlp_retran; 381 counter_u64_t rack_tlp_retran_bytes; 382 counter_u64_t rack_to_tot; 383 counter_u64_t rack_hot_alloc; 384 counter_u64_t rack_to_alloc; 385 counter_u64_t rack_to_alloc_hard; 386 counter_u64_t rack_to_alloc_emerg; 387 counter_u64_t rack_to_alloc_limited; 388 counter_u64_t rack_alloc_limited_conns; 389 counter_u64_t rack_split_limited; 390 counter_u64_t rack_rxt_clamps_cwnd; 391 counter_u64_t rack_rxt_clamps_cwnd_uniq; 392 393 counter_u64_t rack_multi_single_eq; 394 counter_u64_t rack_proc_non_comp_ack; 395 396 counter_u64_t rack_fto_send; 397 counter_u64_t rack_fto_rsm_send; 398 counter_u64_t rack_nfto_resend; 399 counter_u64_t rack_non_fto_send; 400 counter_u64_t rack_extended_rfo; 401 402 counter_u64_t rack_sack_proc_all; 403 counter_u64_t rack_sack_proc_short; 404 counter_u64_t rack_sack_proc_restart; 405 counter_u64_t rack_sack_attacks_detected; 406 counter_u64_t rack_sack_attacks_reversed; 407 counter_u64_t rack_sack_attacks_suspect; 408 counter_u64_t rack_sack_used_next_merge; 409 counter_u64_t rack_sack_splits; 410 counter_u64_t rack_sack_used_prev_merge; 411 counter_u64_t rack_sack_skipped_acked; 412 counter_u64_t rack_ack_total; 413 counter_u64_t rack_express_sack; 414 counter_u64_t rack_sack_total; 415 counter_u64_t rack_move_none; 416 counter_u64_t rack_move_some; 417 418 counter_u64_t rack_input_idle_reduces; 419 counter_u64_t rack_collapsed_win; 420 counter_u64_t rack_collapsed_win_seen; 421 counter_u64_t rack_collapsed_win_rxt; 422 counter_u64_t rack_collapsed_win_rxt_bytes; 423 counter_u64_t rack_try_scwnd; 424 counter_u64_t rack_hw_pace_init_fail; 425 counter_u64_t rack_hw_pace_lost; 426 427 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 428 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 429 430 431 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 432 433 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 434 (tv) = (value) + slop; \ 435 if ((u_long)(tv) < (u_long)(tvmin)) \ 436 (tv) = (tvmin); \ 437 if ((u_long)(tv) > (u_long)(tvmax)) \ 438 (tv) = (tvmax); \ 439 } while (0) 440 441 static void 442 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 443 444 static int 445 rack_process_ack(struct mbuf *m, struct tcphdr *th, 446 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 447 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 448 static int 449 rack_process_data(struct mbuf *m, struct tcphdr *th, 450 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 451 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 452 static void 453 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 454 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 455 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 456 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 457 uint8_t limit_type); 458 static struct rack_sendmap * 459 rack_check_recovery_mode(struct tcpcb *tp, 460 uint32_t tsused); 461 static uint32_t 462 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 463 static void 464 rack_cong_signal(struct tcpcb *tp, 465 uint32_t type, uint32_t ack, int ); 466 static void rack_counter_destroy(void); 467 static int 468 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 469 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 470 static void 471 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 472 static void 473 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 474 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 475 static void rack_dtor(void *mem, int32_t size, void *arg); 476 static void 477 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 478 uint32_t flex1, uint32_t flex2, 479 uint32_t flex3, uint32_t flex4, 480 uint32_t flex5, uint32_t flex6, 481 uint16_t flex7, uint8_t mod); 482 483 static void 484 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 485 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 486 struct rack_sendmap *rsm, uint8_t quality); 487 static struct rack_sendmap * 488 rack_find_high_nonack(struct tcp_rack *rack, 489 struct rack_sendmap *rsm); 490 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 491 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 492 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 493 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 494 static void 495 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 496 tcp_seq th_ack, int line, uint8_t quality); 497 static void 498 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 499 500 static uint32_t 501 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 502 static int32_t rack_handoff_ok(struct tcpcb *tp); 503 static int32_t rack_init(struct tcpcb *tp, void **ptr); 504 static void rack_init_sysctls(void); 505 506 static void 507 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 508 struct tcphdr *th, int entered_rec, int dup_ack_struck, 509 int *dsack_seen, int *sacks_seen); 510 static void 511 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 512 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 513 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 514 515 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 516 517 518 static void 519 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 520 struct rack_sendmap *rsm, uint32_t cts); 521 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 522 static int32_t rack_output(struct tcpcb *tp); 523 524 static uint32_t 525 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 526 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 527 uint32_t cts, uint32_t segsiz); 528 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 529 static void rack_remxt_tmr(struct tcpcb *tp); 530 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 531 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 532 static int32_t rack_stopall(struct tcpcb *tp); 533 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 534 static uint32_t 535 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 536 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 537 static void 538 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 539 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 540 static int 541 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 542 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 543 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 544 static int 545 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 546 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 547 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 548 549 static int 550 rack_do_closing(struct mbuf *m, struct tcphdr *th, 551 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 552 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 553 static int 554 rack_do_established(struct mbuf *m, struct tcphdr *th, 555 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 556 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 557 static int 558 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 559 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 560 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 561 static int 562 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 563 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 564 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 565 static int 566 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 567 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 568 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 569 static int 570 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 571 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 572 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 573 static int 574 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 575 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 576 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 577 static int 578 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 579 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 580 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 581 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 582 struct rack_sendmap * 583 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 584 uint32_t tsused); 585 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 586 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 587 static void 588 tcp_rack_partialack(struct tcpcb *tp); 589 static int 590 rack_set_profile(struct tcp_rack *rack, int prof); 591 static void 592 rack_apply_deferred_options(struct tcp_rack *rack); 593 594 int32_t rack_clear_counter=0; 595 596 static uint64_t 597 rack_get_lt_bw(struct tcp_rack *rack) 598 { 599 struct timeval tv; 600 uint64_t tim, bytes; 601 602 tim = rack->r_ctl.lt_bw_time; 603 bytes = rack->r_ctl.lt_bw_bytes; 604 if (rack->lt_bw_up) { 605 /* Include all the current bytes too */ 606 microuptime(&tv); 607 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 608 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 609 } 610 if ((bytes != 0) && (tim != 0)) 611 return ((bytes * (uint64_t)1000000) / tim); 612 else 613 return (0); 614 } 615 616 static void 617 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 618 { 619 struct sockopt sopt; 620 struct cc_newreno_opts opt; 621 struct tcpcb *tp; 622 uint32_t old_beta; 623 uint32_t old_beta_ecn; 624 int error = 0, failed = 0; 625 626 tp = rack->rc_tp; 627 if (tp->t_cc == NULL) { 628 /* Tcb is leaving */ 629 return; 630 } 631 rack->rc_pacing_cc_set = 1; 632 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 633 /* Not new-reno we can't play games with beta! */ 634 failed = 1; 635 goto out; 636 637 } 638 if (CC_ALGO(tp)->ctl_output == NULL) { 639 /* Huh, not using new-reno so no swaps.? */ 640 failed = 2; 641 goto out; 642 } 643 /* Get the current values out */ 644 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 645 sopt.sopt_dir = SOPT_GET; 646 opt.name = CC_NEWRENO_BETA; 647 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 648 if (error) { 649 failed = 3; 650 goto out; 651 } 652 old_beta = opt.val; 653 opt.name = CC_NEWRENO_BETA_ECN; 654 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 655 if (error) { 656 failed = 4; 657 goto out; 658 } 659 old_beta_ecn = opt.val; 660 661 /* Now lets set in the values we have stored */ 662 sopt.sopt_dir = SOPT_SET; 663 opt.name = CC_NEWRENO_BETA; 664 opt.val = rack->r_ctl.rc_saved_beta; 665 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 666 if (error) { 667 failed = 5; 668 goto out; 669 } 670 opt.name = CC_NEWRENO_BETA_ECN; 671 opt.val = rack->r_ctl.rc_saved_beta_ecn; 672 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 673 if (error) { 674 failed = 6; 675 goto out; 676 } 677 /* Save off the values for restoral */ 678 rack->r_ctl.rc_saved_beta = old_beta; 679 rack->r_ctl.rc_saved_beta_ecn = old_beta_ecn; 680 out: 681 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 682 union tcp_log_stackspecific log; 683 struct timeval tv; 684 struct newreno *ptr; 685 686 ptr = ((struct newreno *)tp->t_ccv.cc_data); 687 memset(&log, 0, sizeof(log)); 688 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 689 log.u_bbr.flex1 = ptr->beta; 690 log.u_bbr.flex2 = ptr->beta_ecn; 691 log.u_bbr.flex3 = ptr->newreno_flags; 692 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta; 693 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta_ecn; 694 log.u_bbr.flex6 = failed; 695 log.u_bbr.flex7 = rack->gp_ready; 696 log.u_bbr.flex7 <<= 1; 697 log.u_bbr.flex7 |= rack->use_fixed_rate; 698 log.u_bbr.flex7 <<= 1; 699 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 700 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 701 log.u_bbr.flex8 = flex8; 702 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 703 0, &log, false, NULL, NULL, 0, &tv); 704 } 705 } 706 707 static void 708 rack_set_cc_pacing(struct tcp_rack *rack) 709 { 710 if (rack->rc_pacing_cc_set) 711 return; 712 /* 713 * Use the swap utility placing in 3 for flex8 to id a 714 * set of a new set of values. 715 */ 716 rack->rc_pacing_cc_set = 1; 717 rack_swap_beta_values(rack, 3); 718 } 719 720 static void 721 rack_undo_cc_pacing(struct tcp_rack *rack) 722 { 723 if (rack->rc_pacing_cc_set == 0) 724 return; 725 /* 726 * Use the swap utility placing in 4 for flex8 to id a 727 * restoral of the old values. 728 */ 729 rack->rc_pacing_cc_set = 0; 730 rack_swap_beta_values(rack, 4); 731 } 732 733 static void 734 rack_remove_pacing(struct tcp_rack *rack) 735 { 736 if (rack->rc_pacing_cc_set) 737 rack_undo_cc_pacing(rack); 738 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 739 tcp_decrement_paced_conn(); 740 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 741 tcp_dec_dgp_pacing_cnt(); 742 rack->rc_always_pace = 0; 743 rack->r_ctl.pacing_method = RACK_PACING_NONE; 744 rack->dgp_on = 0; 745 rack->rc_hybrid_mode = 0; 746 rack->use_fixed_rate = 0; 747 } 748 749 static void 750 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 751 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 752 { 753 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 754 union tcp_log_stackspecific log; 755 struct timeval tv; 756 757 memset(&log, 0, sizeof(log)); 758 log.u_bbr.flex1 = seq_end; 759 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 760 log.u_bbr.flex3 = ack_end_t; 761 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 762 log.u_bbr.flex5 = send_end_t; 763 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 764 log.u_bbr.flex7 = mode; 765 log.u_bbr.flex8 = 69; 766 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 767 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 768 log.u_bbr.pkts_out = line; 769 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 770 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 771 log.u_bbr.epoch = rack->r_ctl.current_round; 772 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 773 if (rsm != NULL) { 774 log.u_bbr.applimited = rsm->r_start; 775 log.u_bbr.delivered = rsm->r_end; 776 log.u_bbr.epoch = rsm->r_flags; 777 } 778 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 779 TCP_LOG_EVENTP(rack->rc_tp, NULL, 780 &rack->rc_inp->inp_socket->so_rcv, 781 &rack->rc_inp->inp_socket->so_snd, 782 BBR_LOG_HPTSI_CALC, 0, 783 0, &log, false, &tv); 784 } 785 } 786 787 static int 788 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 789 { 790 uint32_t stat; 791 int32_t error; 792 793 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 794 if (error || req->newptr == NULL) 795 return error; 796 797 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 798 if (error) 799 return (error); 800 if (stat == 1) { 801 #ifdef INVARIANTS 802 printf("Clearing RACK counters\n"); 803 #endif 804 counter_u64_zero(rack_tlp_tot); 805 counter_u64_zero(rack_tlp_newdata); 806 counter_u64_zero(rack_tlp_retran); 807 counter_u64_zero(rack_tlp_retran_bytes); 808 counter_u64_zero(rack_to_tot); 809 counter_u64_zero(rack_saw_enobuf); 810 counter_u64_zero(rack_saw_enobuf_hw); 811 counter_u64_zero(rack_saw_enetunreach); 812 counter_u64_zero(rack_persists_sends); 813 counter_u64_zero(rack_total_bytes); 814 counter_u64_zero(rack_persists_acks); 815 counter_u64_zero(rack_persists_loss); 816 counter_u64_zero(rack_persists_lost_ends); 817 #ifdef INVARIANTS 818 counter_u64_zero(rack_adjust_map_bw); 819 #endif 820 counter_u64_zero(rack_to_alloc_hard); 821 counter_u64_zero(rack_to_alloc_emerg); 822 counter_u64_zero(rack_sack_proc_all); 823 counter_u64_zero(rack_fto_send); 824 counter_u64_zero(rack_fto_rsm_send); 825 counter_u64_zero(rack_extended_rfo); 826 counter_u64_zero(rack_hw_pace_init_fail); 827 counter_u64_zero(rack_hw_pace_lost); 828 counter_u64_zero(rack_non_fto_send); 829 counter_u64_zero(rack_nfto_resend); 830 counter_u64_zero(rack_sack_proc_short); 831 counter_u64_zero(rack_sack_proc_restart); 832 counter_u64_zero(rack_to_alloc); 833 counter_u64_zero(rack_to_alloc_limited); 834 counter_u64_zero(rack_alloc_limited_conns); 835 counter_u64_zero(rack_split_limited); 836 counter_u64_zero(rack_rxt_clamps_cwnd); 837 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 838 counter_u64_zero(rack_multi_single_eq); 839 counter_u64_zero(rack_proc_non_comp_ack); 840 counter_u64_zero(rack_sack_attacks_detected); 841 counter_u64_zero(rack_sack_attacks_reversed); 842 counter_u64_zero(rack_sack_attacks_suspect); 843 counter_u64_zero(rack_sack_used_next_merge); 844 counter_u64_zero(rack_sack_used_prev_merge); 845 counter_u64_zero(rack_sack_splits); 846 counter_u64_zero(rack_sack_skipped_acked); 847 counter_u64_zero(rack_ack_total); 848 counter_u64_zero(rack_express_sack); 849 counter_u64_zero(rack_sack_total); 850 counter_u64_zero(rack_move_none); 851 counter_u64_zero(rack_move_some); 852 counter_u64_zero(rack_try_scwnd); 853 counter_u64_zero(rack_collapsed_win); 854 counter_u64_zero(rack_collapsed_win_rxt); 855 counter_u64_zero(rack_collapsed_win_seen); 856 counter_u64_zero(rack_collapsed_win_rxt_bytes); 857 } else if (stat == 2) { 858 #ifdef INVARIANTS 859 printf("Clearing RACK option array\n"); 860 #endif 861 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 862 } else if (stat == 3) { 863 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 864 } else if (stat == 4) { 865 #ifdef INVARIANTS 866 printf("Clearing RACK out size array\n"); 867 #endif 868 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 869 } 870 rack_clear_counter = 0; 871 return (0); 872 } 873 874 static void 875 rack_init_sysctls(void) 876 { 877 struct sysctl_oid *rack_counters; 878 struct sysctl_oid *rack_attack; 879 struct sysctl_oid *rack_pacing; 880 struct sysctl_oid *rack_timely; 881 struct sysctl_oid *rack_timers; 882 struct sysctl_oid *rack_tlp; 883 struct sysctl_oid *rack_misc; 884 struct sysctl_oid *rack_features; 885 struct sysctl_oid *rack_measure; 886 struct sysctl_oid *rack_probertt; 887 struct sysctl_oid *rack_hw_pacing; 888 889 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 890 SYSCTL_CHILDREN(rack_sysctl_root), 891 OID_AUTO, 892 "sack_attack", 893 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 894 "Rack Sack Attack Counters and Controls"); 895 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 896 SYSCTL_CHILDREN(rack_sysctl_root), 897 OID_AUTO, 898 "stats", 899 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 900 "Rack Counters"); 901 SYSCTL_ADD_S32(&rack_sysctl_ctx, 902 SYSCTL_CHILDREN(rack_sysctl_root), 903 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 904 &rack_rate_sample_method , USE_RTT_LOW, 905 "What method should we use for rate sampling 0=high, 1=low "); 906 /* Probe rtt related controls */ 907 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 908 SYSCTL_CHILDREN(rack_sysctl_root), 909 OID_AUTO, 910 "probertt", 911 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 912 "ProbeRTT related Controls"); 913 SYSCTL_ADD_U16(&rack_sysctl_ctx, 914 SYSCTL_CHILDREN(rack_probertt), 915 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 916 &rack_atexit_prtt_hbp, 130, 917 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 918 SYSCTL_ADD_U16(&rack_sysctl_ctx, 919 SYSCTL_CHILDREN(rack_probertt), 920 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 921 &rack_atexit_prtt, 130, 922 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 923 SYSCTL_ADD_U16(&rack_sysctl_ctx, 924 SYSCTL_CHILDREN(rack_probertt), 925 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 926 &rack_per_of_gp_probertt, 60, 927 "What percentage of goodput do we pace at in probertt"); 928 SYSCTL_ADD_U16(&rack_sysctl_ctx, 929 SYSCTL_CHILDREN(rack_probertt), 930 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 931 &rack_per_of_gp_probertt_reduce, 10, 932 "What percentage of goodput do we reduce every gp_srtt"); 933 SYSCTL_ADD_U16(&rack_sysctl_ctx, 934 SYSCTL_CHILDREN(rack_probertt), 935 OID_AUTO, "gp_per_low", CTLFLAG_RW, 936 &rack_per_of_gp_lowthresh, 40, 937 "What percentage of goodput do we allow the multiplier to fall to"); 938 SYSCTL_ADD_U32(&rack_sysctl_ctx, 939 SYSCTL_CHILDREN(rack_probertt), 940 OID_AUTO, "time_between", CTLFLAG_RW, 941 & rack_time_between_probertt, 96000000, 942 "How many useconds between the lowest rtt falling must past before we enter probertt"); 943 SYSCTL_ADD_U32(&rack_sysctl_ctx, 944 SYSCTL_CHILDREN(rack_probertt), 945 OID_AUTO, "safety", CTLFLAG_RW, 946 &rack_probe_rtt_safety_val, 2000000, 947 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 948 SYSCTL_ADD_U32(&rack_sysctl_ctx, 949 SYSCTL_CHILDREN(rack_probertt), 950 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 951 &rack_probe_rtt_sets_cwnd, 0, 952 "Do we set the cwnd too (if always_lower is on)"); 953 SYSCTL_ADD_U32(&rack_sysctl_ctx, 954 SYSCTL_CHILDREN(rack_probertt), 955 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 956 &rack_max_drain_wait, 2, 957 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 958 SYSCTL_ADD_U32(&rack_sysctl_ctx, 959 SYSCTL_CHILDREN(rack_probertt), 960 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 961 &rack_must_drain, 1, 962 "We must drain this many gp_srtt's waiting for flight to reach goal"); 963 SYSCTL_ADD_U32(&rack_sysctl_ctx, 964 SYSCTL_CHILDREN(rack_probertt), 965 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 966 &rack_probertt_use_min_rtt_entry, 1, 967 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 968 SYSCTL_ADD_U32(&rack_sysctl_ctx, 969 SYSCTL_CHILDREN(rack_probertt), 970 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 971 &rack_probertt_use_min_rtt_exit, 0, 972 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 973 SYSCTL_ADD_U32(&rack_sysctl_ctx, 974 SYSCTL_CHILDREN(rack_probertt), 975 OID_AUTO, "length_div", CTLFLAG_RW, 976 &rack_probertt_gpsrtt_cnt_div, 0, 977 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 978 SYSCTL_ADD_U32(&rack_sysctl_ctx, 979 SYSCTL_CHILDREN(rack_probertt), 980 OID_AUTO, "length_mul", CTLFLAG_RW, 981 &rack_probertt_gpsrtt_cnt_mul, 0, 982 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 983 SYSCTL_ADD_U32(&rack_sysctl_ctx, 984 SYSCTL_CHILDREN(rack_probertt), 985 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 986 &rack_min_probertt_hold, 200000, 987 "What is the minimum time we hold probertt at target"); 988 SYSCTL_ADD_U32(&rack_sysctl_ctx, 989 SYSCTL_CHILDREN(rack_probertt), 990 OID_AUTO, "filter_life", CTLFLAG_RW, 991 &rack_probertt_filter_life, 10000000, 992 "What is the time for the filters life in useconds"); 993 SYSCTL_ADD_U32(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_probertt), 995 OID_AUTO, "lower_within", CTLFLAG_RW, 996 &rack_probertt_lower_within, 10, 997 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 998 SYSCTL_ADD_U32(&rack_sysctl_ctx, 999 SYSCTL_CHILDREN(rack_probertt), 1000 OID_AUTO, "must_move", CTLFLAG_RW, 1001 &rack_min_rtt_movement, 250, 1002 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1003 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1004 SYSCTL_CHILDREN(rack_probertt), 1005 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1006 &rack_probertt_clear_is, 1, 1007 "Do we clear I/S counts on exiting probe-rtt"); 1008 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_probertt), 1010 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1011 &rack_max_drain_hbp, 1, 1012 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1013 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_probertt), 1015 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1016 &rack_hbp_thresh, 3, 1017 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1018 /* Pacing related sysctls */ 1019 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1020 SYSCTL_CHILDREN(rack_sysctl_root), 1021 OID_AUTO, 1022 "pacing", 1023 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1024 "Pacing related Controls"); 1025 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1026 SYSCTL_CHILDREN(rack_pacing), 1027 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1028 &rack_pcm_is_enabled, 1, 1029 "Do we by default do PCM measurements?"); 1030 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1031 SYSCTL_CHILDREN(rack_pacing), 1032 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1033 &rack_pcm_every_n_rounds, 100, 1034 "How many rounds before we need to do a PCM measurement"); 1035 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1036 SYSCTL_CHILDREN(rack_pacing), 1037 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1038 &rack_pcm_blast, 0, 1039 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1040 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1041 SYSCTL_CHILDREN(rack_pacing), 1042 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1043 &rack_gp_gain_req, 1200, 1044 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1045 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1046 SYSCTL_CHILDREN(rack_pacing), 1047 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1048 &rack_rnd_cnt_req, 0x10005, 1049 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1050 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1051 SYSCTL_CHILDREN(rack_pacing), 1052 OID_AUTO, "no_timely", CTLFLAG_RW, 1053 &rack_timely_off, 0, 1054 "Do we not use timely in DGP?"); 1055 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1056 SYSCTL_CHILDREN(rack_pacing), 1057 OID_AUTO, "fillcw", CTLFLAG_RW, 1058 &rack_fill_cw_state, 0, 1059 "Enable fillcw on new connections (default=0 off)?"); 1060 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1061 SYSCTL_CHILDREN(rack_pacing), 1062 OID_AUTO, "min_burst", CTLFLAG_RW, 1063 &rack_pacing_min_seg, 0, 1064 "What is the min burst size for pacing (0 disables)?"); 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_pacing), 1067 OID_AUTO, "divisor", CTLFLAG_RW, 1068 &rack_default_pacing_divisor, 250, 1069 "What is the default divisor given to the rl code?"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_pacing), 1072 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1073 &rack_bw_multipler, 0, 1074 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_pacing), 1077 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1078 &rack_max_per_above, 30, 1079 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_pacing), 1082 OID_AUTO, "allow1mss", CTLFLAG_RW, 1083 &rack_pace_one_seg, 0, 1084 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_pacing), 1087 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1088 &rack_limit_time_with_srtt, 0, 1089 "Do we limit pacing time based on srtt"); 1090 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_pacing), 1092 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1093 &rack_per_of_gp_ss, 250, 1094 "If non zero, what percentage of goodput to pace at in slow start"); 1095 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_pacing), 1097 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1098 &rack_per_of_gp_ca, 150, 1099 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1100 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1101 SYSCTL_CHILDREN(rack_pacing), 1102 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1103 &rack_per_of_gp_rec, 200, 1104 "If non zero, what percentage of goodput to pace at in recovery"); 1105 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1106 SYSCTL_CHILDREN(rack_pacing), 1107 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1108 &rack_hptsi_segments, 40, 1109 "What size is the max for TSO segments in pacing and burst mitigation"); 1110 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1111 SYSCTL_CHILDREN(rack_pacing), 1112 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1113 &rack_slot_reduction, 4, 1114 "When doing only burst mitigation what is the reduce divisor"); 1115 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1116 SYSCTL_CHILDREN(rack_sysctl_root), 1117 OID_AUTO, "use_pacing", CTLFLAG_RW, 1118 &rack_pace_every_seg, 0, 1119 "If set we use pacing, if clear we use only the original burst mitigation"); 1120 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1121 SYSCTL_CHILDREN(rack_pacing), 1122 OID_AUTO, "rate_cap", CTLFLAG_RW, 1123 &rack_bw_rate_cap, 0, 1124 "If set we apply this value to the absolute rate cap used by pacing"); 1125 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1126 SYSCTL_CHILDREN(rack_pacing), 1127 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1128 &rack_fillcw_bw_cap, 3750000, 1129 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1130 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1131 SYSCTL_CHILDREN(rack_sysctl_root), 1132 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1133 &rack_req_measurements, 1, 1134 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1135 /* Hardware pacing */ 1136 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1137 SYSCTL_CHILDREN(rack_sysctl_root), 1138 OID_AUTO, 1139 "hdwr_pacing", 1140 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1141 "Pacing related Controls"); 1142 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1143 SYSCTL_CHILDREN(rack_hw_pacing), 1144 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1145 &rack_hw_rwnd_factor, 2, 1146 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1147 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1148 SYSCTL_CHILDREN(rack_hw_pacing), 1149 OID_AUTO, "precheck", CTLFLAG_RW, 1150 &rack_hw_check_queue, 0, 1151 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1152 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1153 SYSCTL_CHILDREN(rack_hw_pacing), 1154 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1155 &rack_enobuf_hw_boost_mult, 0, 1156 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1157 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1158 SYSCTL_CHILDREN(rack_hw_pacing), 1159 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1160 &rack_enobuf_hw_max, 2, 1161 "What is the max boost the pacing time if we see a ENOBUFS?"); 1162 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1163 SYSCTL_CHILDREN(rack_hw_pacing), 1164 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1165 &rack_enobuf_hw_min, 2, 1166 "What is the min boost the pacing time if we see a ENOBUFS?"); 1167 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1168 SYSCTL_CHILDREN(rack_hw_pacing), 1169 OID_AUTO, "enable", CTLFLAG_RW, 1170 &rack_enable_hw_pacing, 0, 1171 "Should RACK attempt to use hw pacing?"); 1172 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1173 SYSCTL_CHILDREN(rack_hw_pacing), 1174 OID_AUTO, "rate_cap", CTLFLAG_RW, 1175 &rack_hw_rate_caps, 0, 1176 "Does the highest hardware pacing rate cap the rate we will send at??"); 1177 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1178 SYSCTL_CHILDREN(rack_hw_pacing), 1179 OID_AUTO, "uncap_per", CTLFLAG_RW, 1180 &rack_hw_rate_cap_per, 0, 1181 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1182 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1183 SYSCTL_CHILDREN(rack_hw_pacing), 1184 OID_AUTO, "rate_min", CTLFLAG_RW, 1185 &rack_hw_rate_min, 0, 1186 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1187 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1188 SYSCTL_CHILDREN(rack_hw_pacing), 1189 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1190 &rack_hw_rate_to_low, 0, 1191 "If we fall below this rate, dis-engage hw pacing?"); 1192 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1193 SYSCTL_CHILDREN(rack_hw_pacing), 1194 OID_AUTO, "up_only", CTLFLAG_RW, 1195 &rack_hw_up_only, 0, 1196 "Do we allow hw pacing to lower the rate selected?"); 1197 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1198 SYSCTL_CHILDREN(rack_sysctl_root), 1199 OID_AUTO, 1200 "timely", 1201 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1202 "Rack Timely RTT Controls"); 1203 /* Timely based GP dynmics */ 1204 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1205 SYSCTL_CHILDREN(rack_timely), 1206 OID_AUTO, "upper", CTLFLAG_RW, 1207 &rack_gp_per_bw_mul_up, 2, 1208 "Rack timely upper range for equal b/w (in percentage)"); 1209 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1210 SYSCTL_CHILDREN(rack_timely), 1211 OID_AUTO, "lower", CTLFLAG_RW, 1212 &rack_gp_per_bw_mul_down, 4, 1213 "Rack timely lower range for equal b/w (in percentage)"); 1214 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1215 SYSCTL_CHILDREN(rack_timely), 1216 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1217 &rack_gp_rtt_maxmul, 3, 1218 "Rack timely multiplier of lowest rtt for rtt_max"); 1219 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1220 SYSCTL_CHILDREN(rack_timely), 1221 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1222 &rack_gp_rtt_mindiv, 4, 1223 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1224 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1225 SYSCTL_CHILDREN(rack_timely), 1226 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1227 &rack_gp_rtt_minmul, 1, 1228 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1229 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1230 SYSCTL_CHILDREN(rack_timely), 1231 OID_AUTO, "decrease", CTLFLAG_RW, 1232 &rack_gp_decrease_per, 80, 1233 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1234 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1235 SYSCTL_CHILDREN(rack_timely), 1236 OID_AUTO, "increase", CTLFLAG_RW, 1237 &rack_gp_increase_per, 2, 1238 "Rack timely increase perentage of our GP multiplication factor"); 1239 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1240 SYSCTL_CHILDREN(rack_timely), 1241 OID_AUTO, "lowerbound", CTLFLAG_RW, 1242 &rack_per_lower_bound, 50, 1243 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1244 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1245 SYSCTL_CHILDREN(rack_timely), 1246 OID_AUTO, "p5_upper", CTLFLAG_RW, 1247 &rack_gain_p5_ub, 250, 1248 "Profile 5 upper bound to timely gain"); 1249 1250 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_timely), 1252 OID_AUTO, "upperboundss", CTLFLAG_RW, 1253 &rack_per_upper_bound_ss, 0, 1254 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1255 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_timely), 1257 OID_AUTO, "upperboundca", CTLFLAG_RW, 1258 &rack_per_upper_bound_ca, 0, 1259 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1260 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1261 SYSCTL_CHILDREN(rack_timely), 1262 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1263 &rack_do_dyn_mul, 0, 1264 "Rack timely do we enable dynmaic timely goodput by default"); 1265 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1266 SYSCTL_CHILDREN(rack_timely), 1267 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1268 &rack_gp_no_rec_chg, 1, 1269 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1270 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1271 SYSCTL_CHILDREN(rack_timely), 1272 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1273 &rack_timely_dec_clear, 6, 1274 "Rack timely what threshold do we count to before another boost during b/w decent"); 1275 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1276 SYSCTL_CHILDREN(rack_timely), 1277 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1278 &rack_timely_max_push_rise, 3, 1279 "Rack timely how many times do we push up with b/w increase"); 1280 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1281 SYSCTL_CHILDREN(rack_timely), 1282 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1283 &rack_timely_max_push_drop, 3, 1284 "Rack timely how many times do we push back on b/w decent"); 1285 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1286 SYSCTL_CHILDREN(rack_timely), 1287 OID_AUTO, "min_segs", CTLFLAG_RW, 1288 &rack_timely_min_segs, 4, 1289 "Rack timely when setting the cwnd what is the min num segments"); 1290 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1291 SYSCTL_CHILDREN(rack_timely), 1292 OID_AUTO, "nonstop", CTLFLAG_RW, 1293 &rack_timely_no_stopping, 0, 1294 "Rack timely don't stop increase"); 1295 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1296 SYSCTL_CHILDREN(rack_timely), 1297 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1298 &rack_down_raise_thresh, 100, 1299 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1300 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1301 SYSCTL_CHILDREN(rack_timely), 1302 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1303 &rack_req_segs, 1, 1304 "Bottom dragging if not these many segments outstanding and room"); 1305 1306 /* TLP and Rack related parameters */ 1307 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1308 SYSCTL_CHILDREN(rack_sysctl_root), 1309 OID_AUTO, 1310 "tlp", 1311 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1312 "TLP and Rack related Controls"); 1313 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1314 SYSCTL_CHILDREN(rack_tlp), 1315 OID_AUTO, "use_rrr", CTLFLAG_RW, 1316 &use_rack_rr, 1, 1317 "Do we use Rack Rapid Recovery"); 1318 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1319 SYSCTL_CHILDREN(rack_tlp), 1320 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1321 &rack_max_abc_post_recovery, 2, 1322 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1323 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1324 SYSCTL_CHILDREN(rack_tlp), 1325 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1326 &rack_non_rxt_use_cr, 0, 1327 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1328 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1329 SYSCTL_CHILDREN(rack_tlp), 1330 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1331 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1332 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1333 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1334 SYSCTL_CHILDREN(rack_tlp), 1335 OID_AUTO, "limit", CTLFLAG_RW, 1336 &rack_tlp_limit, 2, 1337 "How many TLP's can be sent without sending new data"); 1338 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1339 SYSCTL_CHILDREN(rack_tlp), 1340 OID_AUTO, "use_greater", CTLFLAG_RW, 1341 &rack_tlp_use_greater, 1, 1342 "Should we use the rack_rtt time if its greater than srtt"); 1343 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1344 SYSCTL_CHILDREN(rack_tlp), 1345 OID_AUTO, "tlpminto", CTLFLAG_RW, 1346 &rack_tlp_min, 10000, 1347 "TLP minimum timeout per the specification (in microseconds)"); 1348 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1349 SYSCTL_CHILDREN(rack_tlp), 1350 OID_AUTO, "send_oldest", CTLFLAG_RW, 1351 &rack_always_send_oldest, 0, 1352 "Should we always send the oldest TLP and RACK-TLP"); 1353 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1354 SYSCTL_CHILDREN(rack_tlp), 1355 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1356 &rack_lower_cwnd_at_tlp, 0, 1357 "When a TLP completes a retran should we enter recovery"); 1358 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1359 SYSCTL_CHILDREN(rack_tlp), 1360 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1361 &rack_reorder_thresh, 2, 1362 "What factor for rack will be added when seeing reordering (shift right)"); 1363 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1364 SYSCTL_CHILDREN(rack_tlp), 1365 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1366 &rack_tlp_thresh, 1, 1367 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1368 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1369 SYSCTL_CHILDREN(rack_tlp), 1370 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1371 &rack_reorder_fade, 60000000, 1372 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1373 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1374 SYSCTL_CHILDREN(rack_tlp), 1375 OID_AUTO, "pktdelay", CTLFLAG_RW, 1376 &rack_pkt_delay, 1000, 1377 "Extra RACK time (in microseconds) besides reordering thresh"); 1378 1379 /* Timer related controls */ 1380 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1381 SYSCTL_CHILDREN(rack_sysctl_root), 1382 OID_AUTO, 1383 "timers", 1384 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1385 "Timer related controls"); 1386 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1387 SYSCTL_CHILDREN(rack_timers), 1388 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1389 &rack_ssthresh_rest_rto_rec, 0, 1390 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1391 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1392 SYSCTL_CHILDREN(rack_timers), 1393 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1394 &rack_rxt_scoreboard_clear_thresh, 2, 1395 "How many RTO's are allowed before we clear the scoreboard"); 1396 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1397 SYSCTL_CHILDREN(rack_timers), 1398 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1399 &rack_honors_hpts_min_to, 1, 1400 "Do rack pacing timers honor hpts min timeout"); 1401 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1402 SYSCTL_CHILDREN(rack_timers), 1403 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1404 &rack_max_reduce, 10, 1405 "Max percentage we will reduce slot by for pacing when we are behind"); 1406 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1407 SYSCTL_CHILDREN(rack_timers), 1408 OID_AUTO, "persmin", CTLFLAG_RW, 1409 &rack_persist_min, 250000, 1410 "What is the minimum time in microseconds between persists"); 1411 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1412 SYSCTL_CHILDREN(rack_timers), 1413 OID_AUTO, "persmax", CTLFLAG_RW, 1414 &rack_persist_max, 2000000, 1415 "What is the largest delay in microseconds between persists"); 1416 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1417 SYSCTL_CHILDREN(rack_timers), 1418 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1419 &rack_delayed_ack_time, 40000, 1420 "Delayed ack time (40ms in microseconds)"); 1421 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1422 SYSCTL_CHILDREN(rack_timers), 1423 OID_AUTO, "minrto", CTLFLAG_RW, 1424 &rack_rto_min, 30000, 1425 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1426 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1427 SYSCTL_CHILDREN(rack_timers), 1428 OID_AUTO, "maxrto", CTLFLAG_RW, 1429 &rack_rto_max, 4000000, 1430 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1431 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1432 SYSCTL_CHILDREN(rack_timers), 1433 OID_AUTO, "minto", CTLFLAG_RW, 1434 &rack_min_to, 1000, 1435 "Minimum rack timeout in microseconds"); 1436 /* Measure controls */ 1437 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1438 SYSCTL_CHILDREN(rack_sysctl_root), 1439 OID_AUTO, 1440 "measure", 1441 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1442 "Measure related controls"); 1443 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1444 SYSCTL_CHILDREN(rack_measure), 1445 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1446 &rack_wma_divisor, 8, 1447 "When doing b/w calculation what is the divisor for the WMA"); 1448 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1449 SYSCTL_CHILDREN(rack_measure), 1450 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1451 &rack_cwnd_block_ends_measure, 0, 1452 "Does a cwnd just-return end the measurement window (app limited)"); 1453 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1454 SYSCTL_CHILDREN(rack_measure), 1455 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1456 &rack_rwnd_block_ends_measure, 0, 1457 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1458 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1459 SYSCTL_CHILDREN(rack_measure), 1460 OID_AUTO, "min_target", CTLFLAG_RW, 1461 &rack_def_data_window, 20, 1462 "What is the minimum target window (in mss) for a GP measurements"); 1463 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1464 SYSCTL_CHILDREN(rack_measure), 1465 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1466 &rack_goal_bdp, 2, 1467 "What is the goal BDP to measure"); 1468 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1469 SYSCTL_CHILDREN(rack_measure), 1470 OID_AUTO, "min_srtts", CTLFLAG_RW, 1471 &rack_min_srtts, 1, 1472 "What is the goal BDP to measure"); 1473 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1474 SYSCTL_CHILDREN(rack_measure), 1475 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1476 &rack_min_measure_usec, 0, 1477 "What is the Minimum time time for a measurement if 0, this is off"); 1478 /* Features */ 1479 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1480 SYSCTL_CHILDREN(rack_sysctl_root), 1481 OID_AUTO, 1482 "features", 1483 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1484 "Feature controls"); 1485 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1486 SYSCTL_CHILDREN(rack_features), 1487 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1488 &rack_hybrid_allow_set_maxseg, 0, 1489 "Should hybrid pacing allow the setmss command"); 1490 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1491 SYSCTL_CHILDREN(rack_features), 1492 OID_AUTO, "cmpack", CTLFLAG_RW, 1493 &rack_use_cmp_acks, 1, 1494 "Should RACK have LRO send compressed acks"); 1495 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1496 SYSCTL_CHILDREN(rack_features), 1497 OID_AUTO, "fsb", CTLFLAG_RW, 1498 &rack_use_fsb, 1, 1499 "Should RACK use the fast send block?"); 1500 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1501 SYSCTL_CHILDREN(rack_features), 1502 OID_AUTO, "rfo", CTLFLAG_RW, 1503 &rack_use_rfo, 1, 1504 "Should RACK use rack_fast_output()?"); 1505 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1506 SYSCTL_CHILDREN(rack_features), 1507 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1508 &rack_use_rsm_rfo, 1, 1509 "Should RACK use rack_fast_rsm_output()?"); 1510 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1511 SYSCTL_CHILDREN(rack_features), 1512 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1513 &rack_enable_mqueue_for_nonpaced, 0, 1514 "Should RACK use mbuf queuing for non-paced connections"); 1515 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1516 SYSCTL_CHILDREN(rack_features), 1517 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1518 &rack_do_hystart, 0, 1519 "Should RACK enable HyStart++ on connections?"); 1520 /* Misc rack controls */ 1521 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_sysctl_root), 1523 OID_AUTO, 1524 "misc", 1525 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1526 "Misc related controls"); 1527 #ifdef TCP_ACCOUNTING 1528 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1529 SYSCTL_CHILDREN(rack_misc), 1530 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1531 &rack_tcp_accounting, 0, 1532 "Should we turn on TCP accounting for all rack sessions?"); 1533 #endif 1534 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1535 SYSCTL_CHILDREN(rack_misc), 1536 OID_AUTO, "dnd", CTLFLAG_RW, 1537 &rack_dnd_default, 0, 1538 "Do not disturb default for rack_rrr = 3"); 1539 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1540 SYSCTL_CHILDREN(rack_misc), 1541 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1542 &sad_seg_size_per, 800, 1543 "Percentage of segment size needed in a sack 800 = 80.0?"); 1544 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1545 SYSCTL_CHILDREN(rack_misc), 1546 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1547 &rack_rxt_controls, 0, 1548 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1549 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1550 SYSCTL_CHILDREN(rack_misc), 1551 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1552 &rack_hibeta_setting, 0, 1553 "Do we ue a high beta (80 instead of 50)?"); 1554 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1555 SYSCTL_CHILDREN(rack_misc), 1556 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1557 &rack_apply_rtt_with_reduced_conf, 0, 1558 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1559 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1560 SYSCTL_CHILDREN(rack_misc), 1561 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1562 &rack_dsack_std_based, 3, 1563 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1564 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1565 SYSCTL_CHILDREN(rack_misc), 1566 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1567 &rack_prr_addbackmax, 2, 1568 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1569 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1570 SYSCTL_CHILDREN(rack_misc), 1571 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1572 &rack_stats_gets_ms_rtt, 1, 1573 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1574 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1575 SYSCTL_CHILDREN(rack_misc), 1576 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1577 &rack_client_low_buf, 0, 1578 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1579 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1580 SYSCTL_CHILDREN(rack_misc), 1581 OID_AUTO, "defprofile", CTLFLAG_RW, 1582 &rack_def_profile, 0, 1583 "Should RACK use a default profile (0=no, num == profile num)?"); 1584 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1585 SYSCTL_CHILDREN(rack_misc), 1586 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1587 &rack_enable_shared_cwnd, 1, 1588 "Should RACK try to use the shared cwnd on connections where allowed"); 1589 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1590 SYSCTL_CHILDREN(rack_misc), 1591 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1592 &rack_limits_scwnd, 1, 1593 "Should RACK place low end time limits on the shared cwnd feature"); 1594 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1595 SYSCTL_CHILDREN(rack_misc), 1596 OID_AUTO, "no_prr", CTLFLAG_RW, 1597 &rack_disable_prr, 0, 1598 "Should RACK not use prr and only pace (must have pacing on)"); 1599 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1600 SYSCTL_CHILDREN(rack_misc), 1601 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1602 &rack_verbose_logging, 0, 1603 "Should RACK black box logging be verbose"); 1604 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1605 SYSCTL_CHILDREN(rack_misc), 1606 OID_AUTO, "data_after_close", CTLFLAG_RW, 1607 &rack_ignore_data_after_close, 1, 1608 "Do we hold off sending a RST until all pending data is ack'd"); 1609 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1610 SYSCTL_CHILDREN(rack_misc), 1611 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1612 &rack_sack_not_required, 1, 1613 "Do we allow rack to run on connections not supporting SACK"); 1614 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1615 SYSCTL_CHILDREN(rack_misc), 1616 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1617 &rack_send_a_lot_in_prr, 1, 1618 "Send a lot in prr"); 1619 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1620 SYSCTL_CHILDREN(rack_misc), 1621 OID_AUTO, "autoscale", CTLFLAG_RW, 1622 &rack_autosndbuf_inc, 20, 1623 "What percentage should rack scale up its snd buffer by?"); 1624 1625 1626 /* Sack Attacker detection stuff */ 1627 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1628 SYSCTL_CHILDREN(rack_attack), 1629 OID_AUTO, "merge_out", CTLFLAG_RW, 1630 &rack_merge_out_sacks_on_attack, 0, 1631 "Do we merge the sendmap when we decide we are being attacked?"); 1632 1633 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1634 SYSCTL_CHILDREN(rack_attack), 1635 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1636 &rack_highest_sack_thresh_seen, 0, 1637 "Highest sack to ack ratio seen"); 1638 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1639 SYSCTL_CHILDREN(rack_attack), 1640 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1641 &rack_highest_move_thresh_seen, 0, 1642 "Highest move to non-move ratio seen"); 1643 rack_ack_total = counter_u64_alloc(M_WAITOK); 1644 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1645 SYSCTL_CHILDREN(rack_attack), 1646 OID_AUTO, "acktotal", CTLFLAG_RD, 1647 &rack_ack_total, 1648 "Total number of Ack's"); 1649 rack_express_sack = counter_u64_alloc(M_WAITOK); 1650 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1651 SYSCTL_CHILDREN(rack_attack), 1652 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1653 &rack_express_sack, 1654 "Total expresss number of Sack's"); 1655 rack_sack_total = counter_u64_alloc(M_WAITOK); 1656 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1657 SYSCTL_CHILDREN(rack_attack), 1658 OID_AUTO, "sacktotal", CTLFLAG_RD, 1659 &rack_sack_total, 1660 "Total number of SACKs"); 1661 rack_move_none = counter_u64_alloc(M_WAITOK); 1662 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1663 SYSCTL_CHILDREN(rack_attack), 1664 OID_AUTO, "move_none", CTLFLAG_RD, 1665 &rack_move_none, 1666 "Total number of SACK index reuse of positions under threshold"); 1667 rack_move_some = counter_u64_alloc(M_WAITOK); 1668 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1669 SYSCTL_CHILDREN(rack_attack), 1670 OID_AUTO, "move_some", CTLFLAG_RD, 1671 &rack_move_some, 1672 "Total number of SACK index reuse of positions over threshold"); 1673 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1674 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1675 SYSCTL_CHILDREN(rack_attack), 1676 OID_AUTO, "attacks", CTLFLAG_RD, 1677 &rack_sack_attacks_detected, 1678 "Total number of SACK attackers that had sack disabled"); 1679 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1680 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1681 SYSCTL_CHILDREN(rack_attack), 1682 OID_AUTO, "reversed", CTLFLAG_RD, 1683 &rack_sack_attacks_reversed, 1684 "Total number of SACK attackers that were later determined false positive"); 1685 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1686 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1687 SYSCTL_CHILDREN(rack_attack), 1688 OID_AUTO, "suspect", CTLFLAG_RD, 1689 &rack_sack_attacks_suspect, 1690 "Total number of SACKs that triggered early detection"); 1691 1692 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1693 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1694 SYSCTL_CHILDREN(rack_attack), 1695 OID_AUTO, "nextmerge", CTLFLAG_RD, 1696 &rack_sack_used_next_merge, 1697 "Total number of times we used the next merge"); 1698 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1699 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1700 SYSCTL_CHILDREN(rack_attack), 1701 OID_AUTO, "prevmerge", CTLFLAG_RD, 1702 &rack_sack_used_prev_merge, 1703 "Total number of times we used the prev merge"); 1704 /* Counters */ 1705 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1706 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1707 SYSCTL_CHILDREN(rack_counters), 1708 OID_AUTO, "totalbytes", CTLFLAG_RD, 1709 &rack_total_bytes, 1710 "Total number of bytes sent"); 1711 rack_fto_send = counter_u64_alloc(M_WAITOK); 1712 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1713 SYSCTL_CHILDREN(rack_counters), 1714 OID_AUTO, "fto_send", CTLFLAG_RD, 1715 &rack_fto_send, "Total number of rack_fast_output sends"); 1716 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1717 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1718 SYSCTL_CHILDREN(rack_counters), 1719 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1720 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1721 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1722 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1723 SYSCTL_CHILDREN(rack_counters), 1724 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1725 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1726 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1727 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1728 SYSCTL_CHILDREN(rack_counters), 1729 OID_AUTO, "nfto_send", CTLFLAG_RD, 1730 &rack_non_fto_send, "Total number of rack_output first sends"); 1731 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1732 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1733 SYSCTL_CHILDREN(rack_counters), 1734 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1735 &rack_extended_rfo, "Total number of times we extended rfo"); 1736 1737 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1738 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1739 SYSCTL_CHILDREN(rack_counters), 1740 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1741 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1742 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1743 1744 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1745 SYSCTL_CHILDREN(rack_counters), 1746 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1747 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1748 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_counters), 1751 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1752 &rack_tlp_tot, 1753 "Total number of tail loss probe expirations"); 1754 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_counters), 1757 OID_AUTO, "tlp_new", CTLFLAG_RD, 1758 &rack_tlp_newdata, 1759 "Total number of tail loss probe sending new data"); 1760 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1762 SYSCTL_CHILDREN(rack_counters), 1763 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1764 &rack_tlp_retran, 1765 "Total number of tail loss probe sending retransmitted data"); 1766 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_counters), 1769 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1770 &rack_tlp_retran_bytes, 1771 "Total bytes of tail loss probe sending retransmitted data"); 1772 rack_to_tot = counter_u64_alloc(M_WAITOK); 1773 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1774 SYSCTL_CHILDREN(rack_counters), 1775 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1776 &rack_to_tot, 1777 "Total number of times the rack to expired"); 1778 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1779 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1780 SYSCTL_CHILDREN(rack_counters), 1781 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1782 &rack_saw_enobuf, 1783 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1784 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1785 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1786 SYSCTL_CHILDREN(rack_counters), 1787 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1788 &rack_saw_enobuf_hw, 1789 "Total number of times a send returned enobuf for hdwr paced connections"); 1790 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1791 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1792 SYSCTL_CHILDREN(rack_counters), 1793 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1794 &rack_saw_enetunreach, 1795 "Total number of times a send received a enetunreachable"); 1796 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1797 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1798 SYSCTL_CHILDREN(rack_counters), 1799 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1800 &rack_hot_alloc, 1801 "Total allocations from the top of our list"); 1802 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1803 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1804 SYSCTL_CHILDREN(rack_counters), 1805 OID_AUTO, "allocs", CTLFLAG_RD, 1806 &rack_to_alloc, 1807 "Total allocations of tracking structures"); 1808 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1809 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1810 SYSCTL_CHILDREN(rack_counters), 1811 OID_AUTO, "allochard", CTLFLAG_RD, 1812 &rack_to_alloc_hard, 1813 "Total allocations done with sleeping the hard way"); 1814 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1815 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1816 SYSCTL_CHILDREN(rack_counters), 1817 OID_AUTO, "allocemerg", CTLFLAG_RD, 1818 &rack_to_alloc_emerg, 1819 "Total allocations done from emergency cache"); 1820 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1821 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1822 SYSCTL_CHILDREN(rack_counters), 1823 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1824 &rack_to_alloc_limited, 1825 "Total allocations dropped due to limit"); 1826 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1827 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1828 SYSCTL_CHILDREN(rack_counters), 1829 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1830 &rack_alloc_limited_conns, 1831 "Connections with allocations dropped due to limit"); 1832 rack_split_limited = counter_u64_alloc(M_WAITOK); 1833 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1834 SYSCTL_CHILDREN(rack_counters), 1835 OID_AUTO, "split_limited", CTLFLAG_RD, 1836 &rack_split_limited, 1837 "Split allocations dropped due to limit"); 1838 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1839 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1840 SYSCTL_CHILDREN(rack_counters), 1841 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1842 &rack_rxt_clamps_cwnd, 1843 "Number of times that excessive rxt clamped the cwnd down"); 1844 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1845 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1846 SYSCTL_CHILDREN(rack_counters), 1847 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1848 &rack_rxt_clamps_cwnd_uniq, 1849 "Number of connections that have had excessive rxt clamped the cwnd down"); 1850 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1851 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1852 SYSCTL_CHILDREN(rack_counters), 1853 OID_AUTO, "persist_sends", CTLFLAG_RD, 1854 &rack_persists_sends, 1855 "Number of times we sent a persist probe"); 1856 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1857 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1858 SYSCTL_CHILDREN(rack_counters), 1859 OID_AUTO, "persist_acks", CTLFLAG_RD, 1860 &rack_persists_acks, 1861 "Number of times a persist probe was acked"); 1862 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1863 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1864 SYSCTL_CHILDREN(rack_counters), 1865 OID_AUTO, "persist_loss", CTLFLAG_RD, 1866 &rack_persists_loss, 1867 "Number of times we detected a lost persist probe (no ack)"); 1868 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1869 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1870 SYSCTL_CHILDREN(rack_counters), 1871 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1872 &rack_persists_lost_ends, 1873 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1874 #ifdef INVARIANTS 1875 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1876 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1877 SYSCTL_CHILDREN(rack_counters), 1878 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1879 &rack_adjust_map_bw, 1880 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1881 #endif 1882 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1883 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1884 SYSCTL_CHILDREN(rack_counters), 1885 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1886 &rack_multi_single_eq, 1887 "Number of compressed acks total represented"); 1888 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1889 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1890 SYSCTL_CHILDREN(rack_counters), 1891 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1892 &rack_proc_non_comp_ack, 1893 "Number of non compresseds acks that we processed"); 1894 1895 1896 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1898 SYSCTL_CHILDREN(rack_counters), 1899 OID_AUTO, "sack_long", CTLFLAG_RD, 1900 &rack_sack_proc_all, 1901 "Total times we had to walk whole list for sack processing"); 1902 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1903 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1904 SYSCTL_CHILDREN(rack_counters), 1905 OID_AUTO, "sack_restart", CTLFLAG_RD, 1906 &rack_sack_proc_restart, 1907 "Total times we had to walk whole list due to a restart"); 1908 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1909 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1910 SYSCTL_CHILDREN(rack_counters), 1911 OID_AUTO, "sack_short", CTLFLAG_RD, 1912 &rack_sack_proc_short, 1913 "Total times we took shortcut for sack processing"); 1914 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1915 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1916 SYSCTL_CHILDREN(rack_attack), 1917 OID_AUTO, "skipacked", CTLFLAG_RD, 1918 &rack_sack_skipped_acked, 1919 "Total number of times we skipped previously sacked"); 1920 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1921 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1922 SYSCTL_CHILDREN(rack_attack), 1923 OID_AUTO, "ofsplit", CTLFLAG_RD, 1924 &rack_sack_splits, 1925 "Total number of times we did the old fashion tree split"); 1926 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1927 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1928 SYSCTL_CHILDREN(rack_counters), 1929 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1930 &rack_input_idle_reduces, 1931 "Total number of idle reductions on input"); 1932 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1933 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1934 SYSCTL_CHILDREN(rack_counters), 1935 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1936 &rack_collapsed_win_seen, 1937 "Total number of collapsed window events seen (where our window shrinks)"); 1938 1939 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1940 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1941 SYSCTL_CHILDREN(rack_counters), 1942 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1943 &rack_collapsed_win, 1944 "Total number of collapsed window events where we mark packets"); 1945 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1946 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1947 SYSCTL_CHILDREN(rack_counters), 1948 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1949 &rack_collapsed_win_rxt, 1950 "Total number of packets that were retransmitted"); 1951 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1952 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1953 SYSCTL_CHILDREN(rack_counters), 1954 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1955 &rack_collapsed_win_rxt_bytes, 1956 "Total number of bytes that were retransmitted"); 1957 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1958 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1959 SYSCTL_CHILDREN(rack_counters), 1960 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1961 &rack_try_scwnd, 1962 "Total number of scwnd attempts"); 1963 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1964 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1965 OID_AUTO, "outsize", CTLFLAG_RD, 1966 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1967 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1968 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1969 OID_AUTO, "opts", CTLFLAG_RD, 1970 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1971 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1972 SYSCTL_CHILDREN(rack_sysctl_root), 1973 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1974 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1975 } 1976 1977 static uint32_t 1978 rc_init_window(struct tcp_rack *rack) 1979 { 1980 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1981 1982 } 1983 1984 static uint64_t 1985 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1986 { 1987 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1988 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1989 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1990 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1991 else 1992 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1993 } 1994 1995 static void 1996 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 1997 uint64_t data, uint8_t mod, uint16_t aux, 1998 struct tcp_sendfile_track *cur, int line) 1999 { 2000 #ifdef TCP_REQUEST_TRK 2001 int do_log = 0; 2002 2003 /* 2004 * The rate cap one is noisy and only should come out when normal BB logging 2005 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2006 * once per chunk and make up the BBpoint that can be turned on by the client. 2007 */ 2008 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2009 /* 2010 * The very noisy two need to only come out when 2011 * we have verbose logging on. 2012 */ 2013 if (rack_verbose_logging != 0) 2014 do_log = tcp_bblogging_on(rack->rc_tp); 2015 else 2016 do_log = 0; 2017 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2018 /* 2019 * All other less noisy logs here except the measure which 2020 * also needs to come out on the point and the log. 2021 */ 2022 do_log = tcp_bblogging_on(rack->rc_tp); 2023 } else { 2024 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2025 } 2026 2027 if (do_log) { 2028 union tcp_log_stackspecific log; 2029 struct timeval tv; 2030 uint64_t lt_bw; 2031 2032 /* Convert our ms to a microsecond */ 2033 memset(&log, 0, sizeof(log)); 2034 2035 log.u_bbr.cwnd_gain = line; 2036 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2037 log.u_bbr.rttProp = tim; 2038 log.u_bbr.bw_inuse = cbw; 2039 log.u_bbr.delRate = rack_get_gp_est(rack); 2040 lt_bw = rack_get_lt_bw(rack); 2041 log.u_bbr.flex1 = seq; 2042 log.u_bbr.pacing_gain = aux; 2043 /* lt_bw = < flex3 | flex2 > */ 2044 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2045 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2046 /* Record the last obtained us rtt in inflight */ 2047 if (cur == NULL) { 2048 /* Make sure we are looking at the right log if an overide comes in */ 2049 cur = rack->r_ctl.rc_last_sft; 2050 } 2051 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2052 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2053 else { 2054 /* Use the last known rtt i.e. the rack-rtt */ 2055 log.u_bbr.inflight = rack->rc_rack_rtt; 2056 } 2057 if (cur != NULL) { 2058 uint64_t off; 2059 2060 log.u_bbr.cur_del_rate = cur->deadline; 2061 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2062 /* start = < lost | pkt_epoch > */ 2063 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2064 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2065 log.u_bbr.flex6 = cur->start_seq; 2066 log.u_bbr.pkts_out = cur->end_seq; 2067 } else { 2068 /* start = < lost | pkt_epoch > */ 2069 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2070 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2071 /* end = < pkts_out | flex6 > */ 2072 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2073 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2074 } 2075 /* first_send = <lt_epoch | epoch> */ 2076 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2077 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2078 /* localtime = <delivered | applimited>*/ 2079 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2080 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2081 #ifdef TCP_REQUEST_TRK 2082 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2083 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2084 #endif 2085 log.u_bbr.inhpts = 1; 2086 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2087 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2088 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2089 } else { 2090 log.u_bbr.flex7 = 0xffff; 2091 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2092 } 2093 /* 2094 * Compose bbr_state to be a bit wise 0000ADHF 2095 * where A is the always_pace flag 2096 * where D is the dgp_on flag 2097 * where H is the hybrid_mode on flag 2098 * where F is the use_fixed_rate flag. 2099 */ 2100 log.u_bbr.bbr_state = rack->rc_always_pace; 2101 log.u_bbr.bbr_state <<= 1; 2102 log.u_bbr.bbr_state |= rack->dgp_on; 2103 log.u_bbr.bbr_state <<= 1; 2104 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2105 log.u_bbr.bbr_state <<= 1; 2106 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2107 log.u_bbr.flex8 = mod; 2108 tcp_log_event(rack->rc_tp, NULL, 2109 &rack->rc_inp->inp_socket->so_rcv, 2110 &rack->rc_inp->inp_socket->so_snd, 2111 TCP_HYBRID_PACING_LOG, 0, 2112 0, &log, false, NULL, __func__, __LINE__, &tv); 2113 2114 } 2115 #endif 2116 } 2117 2118 #ifdef TCP_REQUEST_TRK 2119 static void 2120 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2121 { 2122 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2123 union tcp_log_stackspecific log; 2124 struct timeval tv; 2125 uint64_t off; 2126 2127 /* Convert our ms to a microsecond */ 2128 memset(&log, 0, sizeof(log)); 2129 2130 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2131 log.u_bbr.delRate = cur->sent_at_fs; 2132 2133 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2134 /* 2135 * We did not get a new Rules Applied to set so 2136 * no overlapping send occured, this means the 2137 * current byte counts are correct. 2138 */ 2139 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2140 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2141 } else { 2142 /* 2143 * Overlapping send case, we switched to a new 2144 * send and did a rules applied. 2145 */ 2146 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2147 log.u_bbr.rttProp = cur->rxt_at_ls; 2148 } 2149 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2150 log.u_bbr.cwnd_gain = line; 2151 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2152 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2153 /* start = < flex1 | flex2 > */ 2154 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2155 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2156 /* end = < flex3 | flex4 > */ 2157 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2158 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2159 2160 /* localtime = <delivered | applimited>*/ 2161 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2162 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2163 /* client timestamp = <lt_epoch | epoch>*/ 2164 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2165 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2166 /* now set all the flags in */ 2167 log.u_bbr.pkts_out = cur->hybrid_flags; 2168 log.u_bbr.lost = cur->playout_ms; 2169 log.u_bbr.flex6 = cur->flags; 2170 /* 2171 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2172 * where a false retransmit occurred so first_send <-> lastsend may 2173 * include longer time then it actually took if we have a false rxt. 2174 */ 2175 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2176 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2177 /* 2178 * Compose bbr_state to be a bit wise 0000ADHF 2179 * where A is the always_pace flag 2180 * where D is the dgp_on flag 2181 * where H is the hybrid_mode on flag 2182 * where F is the use_fixed_rate flag. 2183 */ 2184 log.u_bbr.bbr_state = rack->rc_always_pace; 2185 log.u_bbr.bbr_state <<= 1; 2186 log.u_bbr.bbr_state |= rack->dgp_on; 2187 log.u_bbr.bbr_state <<= 1; 2188 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2189 log.u_bbr.bbr_state <<= 1; 2190 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2191 2192 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2193 tcp_log_event(rack->rc_tp, NULL, 2194 &rack->rc_inp->inp_socket->so_rcv, 2195 &rack->rc_inp->inp_socket->so_snd, 2196 TCP_HYBRID_PACING_LOG, 0, 2197 0, &log, false, NULL, __func__, __LINE__, &tv); 2198 } 2199 } 2200 #endif 2201 2202 static inline uint64_t 2203 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2204 { 2205 uint64_t ret_bw, ether; 2206 uint64_t u_segsiz; 2207 2208 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2209 if (rack->r_is_v6){ 2210 #ifdef INET6 2211 ether += sizeof(struct ip6_hdr); 2212 #endif 2213 ether += 14; /* eheader size 6+6+2 */ 2214 } else { 2215 #ifdef INET 2216 ether += sizeof(struct ip); 2217 #endif 2218 ether += 14; /* eheader size 6+6+2 */ 2219 } 2220 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2221 ret_bw = bw; 2222 ret_bw *= ether; 2223 ret_bw /= u_segsiz; 2224 return (ret_bw); 2225 } 2226 2227 static void 2228 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2229 { 2230 #ifdef TCP_REQUEST_TRK 2231 struct timeval tv; 2232 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2233 #endif 2234 2235 if (rack->r_ctl.bw_rate_cap == 0) 2236 return; 2237 #ifdef TCP_REQUEST_TRK 2238 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2239 (rack->r_ctl.rc_last_sft != NULL)) { 2240 /* 2241 * We have a dynamic cap. The original target 2242 * is in bw_rate_cap, but we need to look at 2243 * how long it is until we hit the deadline. 2244 */ 2245 struct tcp_sendfile_track *ent; 2246 2247 ent = rack->r_ctl.rc_last_sft; 2248 microuptime(&tv); 2249 timenow = tcp_tv_to_lusectick(&tv); 2250 if (timenow >= ent->deadline) { 2251 /* No time left we do DGP only */ 2252 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2253 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2254 rack->r_ctl.bw_rate_cap = 0; 2255 return; 2256 } 2257 /* We have the time */ 2258 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2259 if (timeleft < HPTS_MSEC_IN_SEC) { 2260 /* If there is less than a ms left just use DGPs rate */ 2261 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2262 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2263 rack->r_ctl.bw_rate_cap = 0; 2264 return; 2265 } 2266 /* 2267 * Now lets find the amount of data left to send. 2268 * 2269 * Now ideally we want to use the end_seq to figure out how much more 2270 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2271 */ 2272 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2273 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2274 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2275 else { 2276 /* TSNH, we should catch it at the send */ 2277 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2278 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2279 rack->r_ctl.bw_rate_cap = 0; 2280 return; 2281 } 2282 } else { 2283 /* 2284 * The hard way, figure out how much is gone and then 2285 * take that away from the total the client asked for 2286 * (thats off by tls overhead if this is tls). 2287 */ 2288 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2289 lengone = rack->rc_tp->snd_una - ent->start_seq; 2290 else 2291 lengone = 0; 2292 if (lengone < (ent->end - ent->start)) 2293 lenleft = (ent->end - ent->start) - lengone; 2294 else { 2295 /* TSNH, we should catch it at the send */ 2296 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2297 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2298 rack->r_ctl.bw_rate_cap = 0; 2299 return; 2300 } 2301 } 2302 if (lenleft == 0) { 2303 /* We have it all sent */ 2304 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2305 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2306 if (rack->r_ctl.bw_rate_cap) 2307 goto normal_ratecap; 2308 else 2309 return; 2310 } 2311 calcbw = lenleft * HPTS_USEC_IN_SEC; 2312 calcbw /= timeleft; 2313 /* Now we must compensate for IP/TCP overhead */ 2314 calcbw = rack_compensate_for_linerate(rack, calcbw); 2315 /* Update the bit rate cap */ 2316 rack->r_ctl.bw_rate_cap = calcbw; 2317 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2318 (rack_hybrid_allow_set_maxseg == 1) && 2319 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2320 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2321 uint32_t orig_max; 2322 2323 orig_max = rack->r_ctl.rc_pace_max_segs; 2324 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2325 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2326 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2327 } 2328 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2329 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2330 if ((calcbw > 0) && (*bw > calcbw)) { 2331 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2332 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2333 *capped = 1; 2334 *bw = calcbw; 2335 } 2336 return; 2337 } 2338 normal_ratecap: 2339 #endif 2340 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2341 #ifdef TCP_REQUEST_TRK 2342 if (rack->rc_hybrid_mode && 2343 rack->rc_catch_up && 2344 (rack->r_ctl.rc_last_sft != NULL) && 2345 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2346 (rack_hybrid_allow_set_maxseg == 1) && 2347 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2348 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2349 uint32_t orig_max; 2350 2351 orig_max = rack->r_ctl.rc_pace_max_segs; 2352 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2353 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2354 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2355 } 2356 #endif 2357 *capped = 1; 2358 *bw = rack->r_ctl.bw_rate_cap; 2359 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2360 *bw, 0, 0, 2361 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2362 } 2363 } 2364 2365 static uint64_t 2366 rack_get_gp_est(struct tcp_rack *rack) 2367 { 2368 uint64_t bw, lt_bw, ret_bw; 2369 2370 if (rack->rc_gp_filled == 0) { 2371 /* 2372 * We have yet no b/w measurement, 2373 * if we have a user set initial bw 2374 * return it. If we don't have that and 2375 * we have an srtt, use the tcp IW (10) to 2376 * calculate a fictional b/w over the SRTT 2377 * which is more or less a guess. Note 2378 * we don't use our IW from rack on purpose 2379 * so if we have like IW=30, we are not 2380 * calculating a "huge" b/w. 2381 */ 2382 uint64_t srtt; 2383 2384 if (rack->dis_lt_bw == 1) 2385 lt_bw = 0; 2386 else 2387 lt_bw = rack_get_lt_bw(rack); 2388 if (lt_bw) { 2389 /* 2390 * No goodput bw but a long-term b/w does exist 2391 * lets use that. 2392 */ 2393 ret_bw = lt_bw; 2394 goto compensate; 2395 } 2396 if (rack->r_ctl.init_rate) 2397 return (rack->r_ctl.init_rate); 2398 2399 /* Ok lets come up with the IW guess, if we have a srtt */ 2400 if (rack->rc_tp->t_srtt == 0) { 2401 /* 2402 * Go with old pacing method 2403 * i.e. burst mitigation only. 2404 */ 2405 return (0); 2406 } 2407 /* Ok lets get the initial TCP win (not racks) */ 2408 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2409 srtt = (uint64_t)rack->rc_tp->t_srtt; 2410 bw *= (uint64_t)USECS_IN_SECOND; 2411 bw /= srtt; 2412 ret_bw = bw; 2413 goto compensate; 2414 2415 } 2416 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2417 /* Averaging is done, we can return the value */ 2418 bw = rack->r_ctl.gp_bw; 2419 } else { 2420 /* Still doing initial average must calculate */ 2421 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2422 } 2423 if (rack->dis_lt_bw) { 2424 /* We are not using lt-bw */ 2425 ret_bw = bw; 2426 goto compensate; 2427 } 2428 lt_bw = rack_get_lt_bw(rack); 2429 if (lt_bw == 0) { 2430 /* If we don't have one then equate it to the gp_bw */ 2431 lt_bw = rack->r_ctl.gp_bw; 2432 } 2433 if (rack->use_lesser_lt_bw) { 2434 if (lt_bw < bw) 2435 ret_bw = lt_bw; 2436 else 2437 ret_bw = bw; 2438 } else { 2439 if (lt_bw > bw) 2440 ret_bw = lt_bw; 2441 else 2442 ret_bw = bw; 2443 } 2444 /* 2445 * Now lets compensate based on the TCP/IP overhead. Our 2446 * Goodput estimate does not include this so we must pace out 2447 * a bit faster since our pacing calculations do. The pacing 2448 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2449 * we are using to do this, so we do that here in the opposite 2450 * direction as well. This means that if we are tunneled and the 2451 * segsiz is say 1200 bytes we will get quite a boost, but its 2452 * compensated for in the pacing time the opposite way. 2453 */ 2454 compensate: 2455 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2456 return(ret_bw); 2457 } 2458 2459 2460 static uint64_t 2461 rack_get_bw(struct tcp_rack *rack) 2462 { 2463 uint64_t bw; 2464 2465 if (rack->use_fixed_rate) { 2466 /* Return the fixed pacing rate */ 2467 return (rack_get_fixed_pacing_bw(rack)); 2468 } 2469 bw = rack_get_gp_est(rack); 2470 return (bw); 2471 } 2472 2473 static uint16_t 2474 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2475 { 2476 if (rack->use_fixed_rate) { 2477 return (100); 2478 } else if (rack->in_probe_rtt && (rsm == NULL)) 2479 return (rack->r_ctl.rack_per_of_gp_probertt); 2480 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2481 rack->r_ctl.rack_per_of_gp_rec)) { 2482 if (rsm) { 2483 /* a retransmission always use the recovery rate */ 2484 return (rack->r_ctl.rack_per_of_gp_rec); 2485 } else if (rack->rack_rec_nonrxt_use_cr) { 2486 /* Directed to use the configured rate */ 2487 goto configured_rate; 2488 } else if (rack->rack_no_prr && 2489 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2490 /* No PRR, lets just use the b/w estimate only */ 2491 return (100); 2492 } else { 2493 /* 2494 * Here we may have a non-retransmit but we 2495 * have no overrides, so just use the recovery 2496 * rate (prr is in effect). 2497 */ 2498 return (rack->r_ctl.rack_per_of_gp_rec); 2499 } 2500 } 2501 configured_rate: 2502 /* For the configured rate we look at our cwnd vs the ssthresh */ 2503 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2504 return (rack->r_ctl.rack_per_of_gp_ss); 2505 else 2506 return (rack->r_ctl.rack_per_of_gp_ca); 2507 } 2508 2509 static void 2510 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2511 { 2512 /* 2513 * Types of logs (mod value) 2514 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2515 * 2 = a dsack round begins, persist is reset to 16. 2516 * 3 = a dsack round ends 2517 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2518 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2519 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2520 */ 2521 if (tcp_bblogging_on(rack->rc_tp)) { 2522 union tcp_log_stackspecific log; 2523 struct timeval tv; 2524 2525 memset(&log, 0, sizeof(log)); 2526 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2527 log.u_bbr.flex1 <<= 1; 2528 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2529 log.u_bbr.flex1 <<= 1; 2530 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2531 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2532 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2533 log.u_bbr.flex4 = flex4; 2534 log.u_bbr.flex5 = flex5; 2535 log.u_bbr.flex6 = flex6; 2536 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2537 log.u_bbr.flex8 = mod; 2538 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2539 log.u_bbr.epoch = rack->r_ctl.current_round; 2540 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2541 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2542 &rack->rc_inp->inp_socket->so_rcv, 2543 &rack->rc_inp->inp_socket->so_snd, 2544 RACK_DSACK_HANDLING, 0, 2545 0, &log, false, &tv); 2546 } 2547 } 2548 2549 static void 2550 rack_log_hdwr_pacing(struct tcp_rack *rack, 2551 uint64_t rate, uint64_t hw_rate, int line, 2552 int error, uint16_t mod) 2553 { 2554 if (tcp_bblogging_on(rack->rc_tp)) { 2555 union tcp_log_stackspecific log; 2556 struct timeval tv; 2557 const struct ifnet *ifp; 2558 uint64_t ifp64; 2559 2560 memset(&log, 0, sizeof(log)); 2561 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2562 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2563 if (rack->r_ctl.crte) { 2564 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2565 } else if (rack->rc_inp->inp_route.ro_nh && 2566 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2567 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2568 } else 2569 ifp = NULL; 2570 if (ifp) { 2571 ifp64 = (uintptr_t)ifp; 2572 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff); 2573 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff); 2574 } 2575 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2576 log.u_bbr.bw_inuse = rate; 2577 log.u_bbr.flex5 = line; 2578 log.u_bbr.flex6 = error; 2579 log.u_bbr.flex7 = mod; 2580 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2581 log.u_bbr.flex8 = rack->use_fixed_rate; 2582 log.u_bbr.flex8 <<= 1; 2583 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2584 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2585 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2586 if (rack->r_ctl.crte) 2587 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2588 else 2589 log.u_bbr.cur_del_rate = 0; 2590 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2591 log.u_bbr.epoch = rack->r_ctl.current_round; 2592 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2593 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2594 &rack->rc_inp->inp_socket->so_rcv, 2595 &rack->rc_inp->inp_socket->so_snd, 2596 BBR_LOG_HDWR_PACE, 0, 2597 0, &log, false, &tv); 2598 } 2599 } 2600 2601 static uint64_t 2602 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2603 { 2604 /* 2605 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2606 */ 2607 uint64_t bw_est, high_rate; 2608 uint64_t gain; 2609 2610 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2611 bw_est = bw * gain; 2612 bw_est /= (uint64_t)100; 2613 /* Never fall below the minimum (def 64kbps) */ 2614 if (bw_est < RACK_MIN_BW) 2615 bw_est = RACK_MIN_BW; 2616 if (rack->r_rack_hw_rate_caps) { 2617 /* Rate caps are in place */ 2618 if (rack->r_ctl.crte != NULL) { 2619 /* We have a hdwr rate already */ 2620 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2621 if (bw_est >= high_rate) { 2622 /* We are capping bw at the highest rate table entry */ 2623 if (rack_hw_rate_cap_per && 2624 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2625 rack->r_rack_hw_rate_caps = 0; 2626 goto done; 2627 } 2628 rack_log_hdwr_pacing(rack, 2629 bw_est, high_rate, __LINE__, 2630 0, 3); 2631 bw_est = high_rate; 2632 if (capped) 2633 *capped = 1; 2634 } 2635 } else if ((rack->rack_hdrw_pacing == 0) && 2636 (rack->rack_hdw_pace_ena) && 2637 (rack->rack_attempt_hdwr_pace == 0) && 2638 (rack->rc_inp->inp_route.ro_nh != NULL) && 2639 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2640 /* 2641 * Special case, we have not yet attempted hardware 2642 * pacing, and yet we may, when we do, find out if we are 2643 * above the highest rate. We need to know the maxbw for the interface 2644 * in question (if it supports ratelimiting). We get back 2645 * a 0, if the interface is not found in the RL lists. 2646 */ 2647 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2648 if (high_rate) { 2649 /* Yep, we have a rate is it above this rate? */ 2650 if (bw_est > high_rate) { 2651 bw_est = high_rate; 2652 if (capped) 2653 *capped = 1; 2654 } 2655 } 2656 } 2657 } 2658 done: 2659 return (bw_est); 2660 } 2661 2662 static void 2663 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2664 { 2665 if (tcp_bblogging_on(rack->rc_tp)) { 2666 union tcp_log_stackspecific log; 2667 struct timeval tv; 2668 2669 if ((mod != 1) && (rack_verbose_logging == 0)) { 2670 /* 2671 * We get 3 values currently for mod 2672 * 1 - We are retransmitting and this tells the reason. 2673 * 2 - We are clearing a dup-ack count. 2674 * 3 - We are incrementing a dup-ack count. 2675 * 2676 * The clear/increment are only logged 2677 * if you have BBverbose on. 2678 */ 2679 return; 2680 } 2681 memset(&log, 0, sizeof(log)); 2682 log.u_bbr.flex1 = tsused; 2683 log.u_bbr.flex2 = thresh; 2684 log.u_bbr.flex3 = rsm->r_flags; 2685 log.u_bbr.flex4 = rsm->r_dupack; 2686 log.u_bbr.flex5 = rsm->r_start; 2687 log.u_bbr.flex6 = rsm->r_end; 2688 log.u_bbr.flex8 = mod; 2689 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2690 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2691 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2692 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2693 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2694 log.u_bbr.pacing_gain = rack->r_must_retran; 2695 log.u_bbr.epoch = rack->r_ctl.current_round; 2696 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2697 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2698 &rack->rc_inp->inp_socket->so_rcv, 2699 &rack->rc_inp->inp_socket->so_snd, 2700 BBR_LOG_SETTINGS_CHG, 0, 2701 0, &log, false, &tv); 2702 } 2703 } 2704 2705 static void 2706 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2707 { 2708 if (tcp_bblogging_on(rack->rc_tp)) { 2709 union tcp_log_stackspecific log; 2710 struct timeval tv; 2711 2712 memset(&log, 0, sizeof(log)); 2713 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2714 log.u_bbr.flex2 = to; 2715 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2716 log.u_bbr.flex4 = slot; 2717 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2718 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2719 log.u_bbr.flex7 = rack->rc_in_persist; 2720 log.u_bbr.flex8 = which; 2721 if (rack->rack_no_prr) 2722 log.u_bbr.pkts_out = 0; 2723 else 2724 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2725 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2726 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2727 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2728 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2729 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2730 log.u_bbr.pacing_gain = rack->r_must_retran; 2731 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2732 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2733 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2734 log.u_bbr.lost = rack_rto_min; 2735 log.u_bbr.epoch = rack->r_ctl.roundends; 2736 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2737 log.u_bbr.bw_inuse <<= 32; 2738 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2739 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2740 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2741 &rack->rc_inp->inp_socket->so_rcv, 2742 &rack->rc_inp->inp_socket->so_snd, 2743 BBR_LOG_TIMERSTAR, 0, 2744 0, &log, false, &tv); 2745 } 2746 } 2747 2748 static void 2749 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2750 { 2751 if (tcp_bblogging_on(rack->rc_tp)) { 2752 union tcp_log_stackspecific log; 2753 struct timeval tv; 2754 2755 memset(&log, 0, sizeof(log)); 2756 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2757 log.u_bbr.flex8 = to_num; 2758 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2759 log.u_bbr.flex2 = rack->rc_rack_rtt; 2760 if (rsm == NULL) 2761 log.u_bbr.flex3 = 0; 2762 else 2763 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2764 if (rack->rack_no_prr) 2765 log.u_bbr.flex5 = 0; 2766 else 2767 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2768 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2769 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2770 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2771 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2772 log.u_bbr.pacing_gain = rack->r_must_retran; 2773 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2774 log.u_bbr.bw_inuse <<= 32; 2775 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2776 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2777 &rack->rc_inp->inp_socket->so_rcv, 2778 &rack->rc_inp->inp_socket->so_snd, 2779 BBR_LOG_RTO, 0, 2780 0, &log, false, &tv); 2781 } 2782 } 2783 2784 static void 2785 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2786 struct rack_sendmap *prev, 2787 struct rack_sendmap *rsm, 2788 struct rack_sendmap *next, 2789 int flag, uint32_t th_ack, int line) 2790 { 2791 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2792 union tcp_log_stackspecific log; 2793 struct timeval tv; 2794 2795 memset(&log, 0, sizeof(log)); 2796 log.u_bbr.flex8 = flag; 2797 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2798 log.u_bbr.cur_del_rate = (uintptr_t)prev; 2799 log.u_bbr.delRate = (uintptr_t)rsm; 2800 log.u_bbr.rttProp = (uintptr_t)next; 2801 log.u_bbr.flex7 = 0; 2802 if (prev) { 2803 log.u_bbr.flex1 = prev->r_start; 2804 log.u_bbr.flex2 = prev->r_end; 2805 log.u_bbr.flex7 |= 0x4; 2806 } 2807 if (rsm) { 2808 log.u_bbr.flex3 = rsm->r_start; 2809 log.u_bbr.flex4 = rsm->r_end; 2810 log.u_bbr.flex7 |= 0x2; 2811 } 2812 if (next) { 2813 log.u_bbr.flex5 = next->r_start; 2814 log.u_bbr.flex6 = next->r_end; 2815 log.u_bbr.flex7 |= 0x1; 2816 } 2817 log.u_bbr.applimited = line; 2818 log.u_bbr.pkts_out = th_ack; 2819 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2820 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2821 if (rack->rack_no_prr) 2822 log.u_bbr.lost = 0; 2823 else 2824 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2825 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2826 log.u_bbr.bw_inuse <<= 32; 2827 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2828 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2829 &rack->rc_inp->inp_socket->so_rcv, 2830 &rack->rc_inp->inp_socket->so_snd, 2831 TCP_LOG_MAPCHG, 0, 2832 0, &log, false, &tv); 2833 } 2834 } 2835 2836 static void 2837 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2838 struct rack_sendmap *rsm, int conf) 2839 { 2840 if (tcp_bblogging_on(tp)) { 2841 union tcp_log_stackspecific log; 2842 struct timeval tv; 2843 memset(&log, 0, sizeof(log)); 2844 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2845 log.u_bbr.flex1 = t; 2846 log.u_bbr.flex2 = len; 2847 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2848 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2849 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2850 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2851 log.u_bbr.flex7 = conf; 2852 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2853 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2854 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2855 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2856 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2857 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2858 if (rsm) { 2859 log.u_bbr.pkt_epoch = rsm->r_start; 2860 log.u_bbr.lost = rsm->r_end; 2861 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2862 /* We loose any upper of the 24 bits */ 2863 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2864 } else { 2865 /* Its a SYN */ 2866 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2867 log.u_bbr.lost = 0; 2868 log.u_bbr.cwnd_gain = 0; 2869 log.u_bbr.pacing_gain = 0; 2870 } 2871 /* Write out general bits of interest rrs here */ 2872 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2873 log.u_bbr.use_lt_bw <<= 1; 2874 log.u_bbr.use_lt_bw |= rack->forced_ack; 2875 log.u_bbr.use_lt_bw <<= 1; 2876 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2877 log.u_bbr.use_lt_bw <<= 1; 2878 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2879 log.u_bbr.use_lt_bw <<= 1; 2880 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2881 log.u_bbr.use_lt_bw <<= 1; 2882 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2883 log.u_bbr.use_lt_bw <<= 1; 2884 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2885 log.u_bbr.use_lt_bw <<= 1; 2886 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2887 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2888 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2889 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2890 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2891 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2892 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2893 log.u_bbr.bw_inuse <<= 32; 2894 if (rsm) 2895 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2896 TCP_LOG_EVENTP(tp, NULL, 2897 &rack->rc_inp->inp_socket->so_rcv, 2898 &rack->rc_inp->inp_socket->so_snd, 2899 BBR_LOG_BBRRTT, 0, 2900 0, &log, false, &tv); 2901 2902 2903 } 2904 } 2905 2906 static void 2907 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2908 { 2909 /* 2910 * Log the rtt sample we are 2911 * applying to the srtt algorithm in 2912 * useconds. 2913 */ 2914 if (tcp_bblogging_on(rack->rc_tp)) { 2915 union tcp_log_stackspecific log; 2916 struct timeval tv; 2917 2918 /* Convert our ms to a microsecond */ 2919 memset(&log, 0, sizeof(log)); 2920 log.u_bbr.flex1 = rtt; 2921 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2922 log.u_bbr.flex7 = 1; 2923 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2924 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2925 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2926 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2927 log.u_bbr.pacing_gain = rack->r_must_retran; 2928 /* 2929 * We capture in delRate the upper 32 bits as 2930 * the confidence level we had declared, and the 2931 * lower 32 bits as the actual RTT using the arrival 2932 * timestamp. 2933 */ 2934 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2935 log.u_bbr.delRate <<= 32; 2936 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2937 /* Lets capture all the things that make up t_rtxcur */ 2938 log.u_bbr.applimited = rack_rto_min; 2939 log.u_bbr.epoch = rack_rto_max; 2940 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2941 log.u_bbr.lost = rack_rto_min; 2942 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2943 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2944 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2945 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2946 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2947 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2948 &rack->rc_inp->inp_socket->so_rcv, 2949 &rack->rc_inp->inp_socket->so_snd, 2950 TCP_LOG_RTT, 0, 2951 0, &log, false, &tv); 2952 } 2953 } 2954 2955 static void 2956 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2957 { 2958 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2959 union tcp_log_stackspecific log; 2960 struct timeval tv; 2961 2962 /* Convert our ms to a microsecond */ 2963 memset(&log, 0, sizeof(log)); 2964 log.u_bbr.flex1 = rtt; 2965 log.u_bbr.flex2 = send_time; 2966 log.u_bbr.flex3 = ack_time; 2967 log.u_bbr.flex4 = where; 2968 log.u_bbr.flex7 = 2; 2969 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2970 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2971 log.u_bbr.bw_inuse <<= 32; 2972 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2973 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2974 &rack->rc_inp->inp_socket->so_rcv, 2975 &rack->rc_inp->inp_socket->so_snd, 2976 TCP_LOG_RTT, 0, 2977 0, &log, false, &tv); 2978 } 2979 } 2980 2981 2982 static void 2983 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 2984 { 2985 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2986 union tcp_log_stackspecific log; 2987 struct timeval tv; 2988 2989 /* Convert our ms to a microsecond */ 2990 memset(&log, 0, sizeof(log)); 2991 log.u_bbr.flex1 = idx; 2992 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 2993 log.u_bbr.flex3 = tsecho; 2994 log.u_bbr.flex7 = 3; 2995 log.u_bbr.rttProp = tsv; 2996 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2997 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2998 log.u_bbr.bw_inuse <<= 32; 2999 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3000 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3001 &rack->rc_inp->inp_socket->so_rcv, 3002 &rack->rc_inp->inp_socket->so_snd, 3003 TCP_LOG_RTT, 0, 3004 0, &log, false, &tv); 3005 } 3006 } 3007 3008 3009 static inline void 3010 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3011 { 3012 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3013 union tcp_log_stackspecific log; 3014 struct timeval tv; 3015 3016 memset(&log, 0, sizeof(log)); 3017 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3018 log.u_bbr.flex1 = line; 3019 log.u_bbr.flex2 = tick; 3020 log.u_bbr.flex3 = tp->t_maxunacktime; 3021 log.u_bbr.flex4 = tp->t_acktime; 3022 log.u_bbr.flex8 = event; 3023 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3024 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3025 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3026 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3027 log.u_bbr.pacing_gain = rack->r_must_retran; 3028 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3029 log.u_bbr.bw_inuse <<= 32; 3030 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3031 TCP_LOG_EVENTP(tp, NULL, 3032 &rack->rc_inp->inp_socket->so_rcv, 3033 &rack->rc_inp->inp_socket->so_snd, 3034 BBR_LOG_PROGRESS, 0, 3035 0, &log, false, &tv); 3036 } 3037 } 3038 3039 static void 3040 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 3041 { 3042 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3043 union tcp_log_stackspecific log; 3044 3045 memset(&log, 0, sizeof(log)); 3046 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3047 log.u_bbr.flex1 = slot; 3048 if (rack->rack_no_prr) 3049 log.u_bbr.flex2 = 0; 3050 else 3051 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3052 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3053 log.u_bbr.flex6 = line; 3054 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3055 log.u_bbr.flex8 = rack->rc_in_persist; 3056 log.u_bbr.timeStamp = cts; 3057 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3058 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3059 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3060 log.u_bbr.pacing_gain = rack->r_must_retran; 3061 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3062 &rack->rc_inp->inp_socket->so_rcv, 3063 &rack->rc_inp->inp_socket->so_snd, 3064 BBR_LOG_BBRSND, 0, 3065 0, &log, false, tv); 3066 } 3067 } 3068 3069 static void 3070 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3071 { 3072 if (tcp_bblogging_on(rack->rc_tp)) { 3073 union tcp_log_stackspecific log; 3074 struct timeval tv; 3075 3076 memset(&log, 0, sizeof(log)); 3077 log.u_bbr.flex1 = did_out; 3078 log.u_bbr.flex2 = nxt_pkt; 3079 log.u_bbr.flex3 = way_out; 3080 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3081 if (rack->rack_no_prr) 3082 log.u_bbr.flex5 = 0; 3083 else 3084 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3085 log.u_bbr.flex6 = nsegs; 3086 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3087 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3088 log.u_bbr.flex7 <<= 1; 3089 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3090 log.u_bbr.flex7 <<= 1; 3091 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3092 log.u_bbr.flex8 = rack->rc_in_persist; 3093 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3094 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3095 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3096 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3097 log.u_bbr.use_lt_bw <<= 1; 3098 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3099 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3100 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3101 log.u_bbr.pacing_gain = rack->r_must_retran; 3102 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3103 log.u_bbr.bw_inuse <<= 32; 3104 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3105 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3106 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3107 log.u_bbr.lost = rack->rc_tp->t_srtt; 3108 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3109 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3110 &rack->rc_inp->inp_socket->so_rcv, 3111 &rack->rc_inp->inp_socket->so_snd, 3112 BBR_LOG_DOSEG_DONE, 0, 3113 0, &log, false, &tv); 3114 } 3115 } 3116 3117 static void 3118 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3119 { 3120 if (tcp_bblogging_on(rack->rc_tp)) { 3121 union tcp_log_stackspecific log; 3122 struct timeval tv; 3123 3124 memset(&log, 0, sizeof(log)); 3125 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3126 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3127 log.u_bbr.flex4 = arg1; 3128 log.u_bbr.flex5 = arg2; 3129 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3130 log.u_bbr.flex6 = arg3; 3131 log.u_bbr.flex8 = frm; 3132 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3133 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3134 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3135 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3136 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3137 log.u_bbr.pacing_gain = rack->r_must_retran; 3138 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3139 &tptosocket(tp)->so_snd, 3140 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3141 } 3142 } 3143 3144 static void 3145 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3146 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3147 { 3148 if (tcp_bblogging_on(rack->rc_tp)) { 3149 union tcp_log_stackspecific log; 3150 struct timeval tv; 3151 3152 memset(&log, 0, sizeof(log)); 3153 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3154 log.u_bbr.flex1 = slot; 3155 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3156 log.u_bbr.flex4 = reason; 3157 if (rack->rack_no_prr) 3158 log.u_bbr.flex5 = 0; 3159 else 3160 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3161 log.u_bbr.flex7 = hpts_calling; 3162 log.u_bbr.flex8 = rack->rc_in_persist; 3163 log.u_bbr.lt_epoch = cwnd_to_use; 3164 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3165 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3166 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3167 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3168 log.u_bbr.pacing_gain = rack->r_must_retran; 3169 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3170 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3171 log.u_bbr.bw_inuse <<= 32; 3172 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3173 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3174 &rack->rc_inp->inp_socket->so_rcv, 3175 &rack->rc_inp->inp_socket->so_snd, 3176 BBR_LOG_JUSTRET, 0, 3177 tlen, &log, false, &tv); 3178 } 3179 } 3180 3181 static void 3182 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3183 struct timeval *tv, uint32_t flags_on_entry) 3184 { 3185 if (tcp_bblogging_on(rack->rc_tp)) { 3186 union tcp_log_stackspecific log; 3187 3188 memset(&log, 0, sizeof(log)); 3189 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3190 log.u_bbr.flex1 = line; 3191 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3192 log.u_bbr.flex3 = flags_on_entry; 3193 log.u_bbr.flex4 = us_cts; 3194 if (rack->rack_no_prr) 3195 log.u_bbr.flex5 = 0; 3196 else 3197 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3198 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3199 log.u_bbr.flex7 = hpts_removed; 3200 log.u_bbr.flex8 = 1; 3201 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3202 log.u_bbr.timeStamp = us_cts; 3203 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3204 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3205 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3206 log.u_bbr.pacing_gain = rack->r_must_retran; 3207 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3208 log.u_bbr.bw_inuse <<= 32; 3209 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3210 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3211 &rack->rc_inp->inp_socket->so_rcv, 3212 &rack->rc_inp->inp_socket->so_snd, 3213 BBR_LOG_TIMERCANC, 0, 3214 0, &log, false, tv); 3215 } 3216 } 3217 3218 static void 3219 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3220 uint32_t flex1, uint32_t flex2, 3221 uint32_t flex3, uint32_t flex4, 3222 uint32_t flex5, uint32_t flex6, 3223 uint16_t flex7, uint8_t mod) 3224 { 3225 if (tcp_bblogging_on(rack->rc_tp)) { 3226 union tcp_log_stackspecific log; 3227 struct timeval tv; 3228 3229 if (mod == 1) { 3230 /* No you can't use 1, its for the real to cancel */ 3231 return; 3232 } 3233 memset(&log, 0, sizeof(log)); 3234 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3235 log.u_bbr.flex1 = flex1; 3236 log.u_bbr.flex2 = flex2; 3237 log.u_bbr.flex3 = flex3; 3238 log.u_bbr.flex4 = flex4; 3239 log.u_bbr.flex5 = flex5; 3240 log.u_bbr.flex6 = flex6; 3241 log.u_bbr.flex7 = flex7; 3242 log.u_bbr.flex8 = mod; 3243 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3244 &rack->rc_inp->inp_socket->so_rcv, 3245 &rack->rc_inp->inp_socket->so_snd, 3246 BBR_LOG_TIMERCANC, 0, 3247 0, &log, false, &tv); 3248 } 3249 } 3250 3251 static void 3252 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3253 { 3254 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3255 union tcp_log_stackspecific log; 3256 struct timeval tv; 3257 3258 memset(&log, 0, sizeof(log)); 3259 log.u_bbr.flex1 = timers; 3260 log.u_bbr.flex2 = ret; 3261 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3262 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3263 log.u_bbr.flex5 = cts; 3264 if (rack->rack_no_prr) 3265 log.u_bbr.flex6 = 0; 3266 else 3267 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3268 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3269 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3270 log.u_bbr.pacing_gain = rack->r_must_retran; 3271 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3272 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3273 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3274 &rack->rc_inp->inp_socket->so_rcv, 3275 &rack->rc_inp->inp_socket->so_snd, 3276 BBR_LOG_TO_PROCESS, 0, 3277 0, &log, false, &tv); 3278 } 3279 } 3280 3281 static void 3282 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3283 { 3284 if (tcp_bblogging_on(rack->rc_tp)) { 3285 union tcp_log_stackspecific log; 3286 struct timeval tv; 3287 3288 memset(&log, 0, sizeof(log)); 3289 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3290 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3291 if (rack->rack_no_prr) 3292 log.u_bbr.flex3 = 0; 3293 else 3294 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3295 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3296 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3297 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3298 log.u_bbr.flex7 = line; 3299 log.u_bbr.flex8 = frm; 3300 log.u_bbr.pkts_out = orig_cwnd; 3301 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3302 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3303 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3304 log.u_bbr.use_lt_bw <<= 1; 3305 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3306 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3307 &rack->rc_inp->inp_socket->so_rcv, 3308 &rack->rc_inp->inp_socket->so_snd, 3309 BBR_LOG_BBRUPD, 0, 3310 0, &log, false, &tv); 3311 } 3312 } 3313 3314 static void 3315 rack_counter_destroy(void) 3316 { 3317 counter_u64_free(rack_total_bytes); 3318 counter_u64_free(rack_fto_send); 3319 counter_u64_free(rack_fto_rsm_send); 3320 counter_u64_free(rack_nfto_resend); 3321 counter_u64_free(rack_hw_pace_init_fail); 3322 counter_u64_free(rack_hw_pace_lost); 3323 counter_u64_free(rack_non_fto_send); 3324 counter_u64_free(rack_extended_rfo); 3325 counter_u64_free(rack_ack_total); 3326 counter_u64_free(rack_express_sack); 3327 counter_u64_free(rack_sack_total); 3328 counter_u64_free(rack_move_none); 3329 counter_u64_free(rack_move_some); 3330 counter_u64_free(rack_sack_attacks_detected); 3331 counter_u64_free(rack_sack_attacks_reversed); 3332 counter_u64_free(rack_sack_attacks_suspect); 3333 counter_u64_free(rack_sack_used_next_merge); 3334 counter_u64_free(rack_sack_used_prev_merge); 3335 counter_u64_free(rack_tlp_tot); 3336 counter_u64_free(rack_tlp_newdata); 3337 counter_u64_free(rack_tlp_retran); 3338 counter_u64_free(rack_tlp_retran_bytes); 3339 counter_u64_free(rack_to_tot); 3340 counter_u64_free(rack_saw_enobuf); 3341 counter_u64_free(rack_saw_enobuf_hw); 3342 counter_u64_free(rack_saw_enetunreach); 3343 counter_u64_free(rack_hot_alloc); 3344 counter_u64_free(rack_to_alloc); 3345 counter_u64_free(rack_to_alloc_hard); 3346 counter_u64_free(rack_to_alloc_emerg); 3347 counter_u64_free(rack_to_alloc_limited); 3348 counter_u64_free(rack_alloc_limited_conns); 3349 counter_u64_free(rack_split_limited); 3350 counter_u64_free(rack_multi_single_eq); 3351 counter_u64_free(rack_rxt_clamps_cwnd); 3352 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3353 counter_u64_free(rack_proc_non_comp_ack); 3354 counter_u64_free(rack_sack_proc_all); 3355 counter_u64_free(rack_sack_proc_restart); 3356 counter_u64_free(rack_sack_proc_short); 3357 counter_u64_free(rack_sack_skipped_acked); 3358 counter_u64_free(rack_sack_splits); 3359 counter_u64_free(rack_input_idle_reduces); 3360 counter_u64_free(rack_collapsed_win); 3361 counter_u64_free(rack_collapsed_win_rxt); 3362 counter_u64_free(rack_collapsed_win_rxt_bytes); 3363 counter_u64_free(rack_collapsed_win_seen); 3364 counter_u64_free(rack_try_scwnd); 3365 counter_u64_free(rack_persists_sends); 3366 counter_u64_free(rack_persists_acks); 3367 counter_u64_free(rack_persists_loss); 3368 counter_u64_free(rack_persists_lost_ends); 3369 #ifdef INVARIANTS 3370 counter_u64_free(rack_adjust_map_bw); 3371 #endif 3372 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3373 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3374 } 3375 3376 static struct rack_sendmap * 3377 rack_alloc(struct tcp_rack *rack) 3378 { 3379 struct rack_sendmap *rsm; 3380 3381 /* 3382 * First get the top of the list it in 3383 * theory is the "hottest" rsm we have, 3384 * possibly just freed by ack processing. 3385 */ 3386 if (rack->rc_free_cnt > rack_free_cache) { 3387 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3388 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3389 counter_u64_add(rack_hot_alloc, 1); 3390 rack->rc_free_cnt--; 3391 return (rsm); 3392 } 3393 /* 3394 * Once we get under our free cache we probably 3395 * no longer have a "hot" one available. Lets 3396 * get one from UMA. 3397 */ 3398 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3399 if (rsm) { 3400 rack->r_ctl.rc_num_maps_alloced++; 3401 counter_u64_add(rack_to_alloc, 1); 3402 return (rsm); 3403 } 3404 /* 3405 * Dig in to our aux rsm's (the last two) since 3406 * UMA failed to get us one. 3407 */ 3408 if (rack->rc_free_cnt) { 3409 counter_u64_add(rack_to_alloc_emerg, 1); 3410 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3411 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3412 rack->rc_free_cnt--; 3413 return (rsm); 3414 } 3415 return (NULL); 3416 } 3417 3418 static struct rack_sendmap * 3419 rack_alloc_full_limit(struct tcp_rack *rack) 3420 { 3421 if ((V_tcp_map_entries_limit > 0) && 3422 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3423 counter_u64_add(rack_to_alloc_limited, 1); 3424 if (!rack->alloc_limit_reported) { 3425 rack->alloc_limit_reported = 1; 3426 counter_u64_add(rack_alloc_limited_conns, 1); 3427 } 3428 return (NULL); 3429 } 3430 return (rack_alloc(rack)); 3431 } 3432 3433 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3434 static struct rack_sendmap * 3435 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3436 { 3437 struct rack_sendmap *rsm; 3438 3439 if (limit_type) { 3440 /* currently there is only one limit type */ 3441 if (rack->r_ctl.rc_split_limit > 0 && 3442 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3443 counter_u64_add(rack_split_limited, 1); 3444 if (!rack->alloc_limit_reported) { 3445 rack->alloc_limit_reported = 1; 3446 counter_u64_add(rack_alloc_limited_conns, 1); 3447 } 3448 return (NULL); 3449 } 3450 } 3451 3452 /* allocate and mark in the limit type, if set */ 3453 rsm = rack_alloc(rack); 3454 if (rsm != NULL && limit_type) { 3455 rsm->r_limit_type = limit_type; 3456 rack->r_ctl.rc_num_split_allocs++; 3457 } 3458 return (rsm); 3459 } 3460 3461 static void 3462 rack_free_trim(struct tcp_rack *rack) 3463 { 3464 struct rack_sendmap *rsm; 3465 3466 /* 3467 * Free up all the tail entries until 3468 * we get our list down to the limit. 3469 */ 3470 while (rack->rc_free_cnt > rack_free_cache) { 3471 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3472 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3473 rack->rc_free_cnt--; 3474 rack->r_ctl.rc_num_maps_alloced--; 3475 uma_zfree(rack_zone, rsm); 3476 } 3477 } 3478 3479 static void 3480 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3481 { 3482 if (rsm->r_flags & RACK_APP_LIMITED) { 3483 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3484 rack->r_ctl.rc_app_limited_cnt--; 3485 } 3486 } 3487 if (rsm->r_limit_type) { 3488 /* currently there is only one limit type */ 3489 rack->r_ctl.rc_num_split_allocs--; 3490 } 3491 if (rsm == rack->r_ctl.rc_first_appl) { 3492 rack->r_ctl.cleared_app_ack_seq = rsm->r_end; 3493 rack->r_ctl.cleared_app_ack = 1; 3494 if (rack->r_ctl.rc_app_limited_cnt == 0) 3495 rack->r_ctl.rc_first_appl = NULL; 3496 else 3497 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3498 } 3499 if (rsm == rack->r_ctl.rc_resend) 3500 rack->r_ctl.rc_resend = NULL; 3501 if (rsm == rack->r_ctl.rc_end_appl) 3502 rack->r_ctl.rc_end_appl = NULL; 3503 if (rack->r_ctl.rc_tlpsend == rsm) 3504 rack->r_ctl.rc_tlpsend = NULL; 3505 if (rack->r_ctl.rc_sacklast == rsm) 3506 rack->r_ctl.rc_sacklast = NULL; 3507 memset(rsm, 0, sizeof(struct rack_sendmap)); 3508 /* Make sure we are not going to overrun our count limit of 0xff */ 3509 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3510 rack_free_trim(rack); 3511 } 3512 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3513 rack->rc_free_cnt++; 3514 } 3515 3516 static uint32_t 3517 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3518 { 3519 uint64_t srtt, bw, len, tim; 3520 uint32_t segsiz, def_len, minl; 3521 3522 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3523 def_len = rack_def_data_window * segsiz; 3524 if (rack->rc_gp_filled == 0) { 3525 /* 3526 * We have no measurement (IW is in flight?) so 3527 * we can only guess using our data_window sysctl 3528 * value (usually 20MSS). 3529 */ 3530 return (def_len); 3531 } 3532 /* 3533 * Now we have a number of factors to consider. 3534 * 3535 * 1) We have a desired BDP which is usually 3536 * at least 2. 3537 * 2) We have a minimum number of rtt's usually 1 SRTT 3538 * but we allow it too to be more. 3539 * 3) We want to make sure a measurement last N useconds (if 3540 * we have set rack_min_measure_usec. 3541 * 3542 * We handle the first concern here by trying to create a data 3543 * window of max(rack_def_data_window, DesiredBDP). The 3544 * second concern we handle in not letting the measurement 3545 * window end normally until at least the required SRTT's 3546 * have gone by which is done further below in 3547 * rack_enough_for_measurement(). Finally the third concern 3548 * we also handle here by calculating how long that time 3549 * would take at the current BW and then return the 3550 * max of our first calculation and that length. Note 3551 * that if rack_min_measure_usec is 0, we don't deal 3552 * with concern 3. Also for both Concern 1 and 3 an 3553 * application limited period could end the measurement 3554 * earlier. 3555 * 3556 * So lets calculate the BDP with the "known" b/w using 3557 * the SRTT has our rtt and then multiply it by the 3558 * goal. 3559 */ 3560 bw = rack_get_bw(rack); 3561 srtt = (uint64_t)tp->t_srtt; 3562 len = bw * srtt; 3563 len /= (uint64_t)HPTS_USEC_IN_SEC; 3564 len *= max(1, rack_goal_bdp); 3565 /* Now we need to round up to the nearest MSS */ 3566 len = roundup(len, segsiz); 3567 if (rack_min_measure_usec) { 3568 /* Now calculate our min length for this b/w */ 3569 tim = rack_min_measure_usec; 3570 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3571 if (minl == 0) 3572 minl = 1; 3573 minl = roundup(minl, segsiz); 3574 if (len < minl) 3575 len = minl; 3576 } 3577 /* 3578 * Now if we have a very small window we want 3579 * to attempt to get the window that is 3580 * as small as possible. This happens on 3581 * low b/w connections and we don't want to 3582 * span huge numbers of rtt's between measurements. 3583 * 3584 * We basically include 2 over our "MIN window" so 3585 * that the measurement can be shortened (possibly) by 3586 * an ack'ed packet. 3587 */ 3588 if (len < def_len) 3589 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3590 else 3591 return (max((uint32_t)len, def_len)); 3592 3593 } 3594 3595 static int 3596 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3597 { 3598 uint32_t tim, srtts, segsiz; 3599 3600 /* 3601 * Has enough time passed for the GP measurement to be valid? 3602 */ 3603 if (SEQ_LT(th_ack, tp->gput_seq)) { 3604 /* Not enough bytes yet */ 3605 return (0); 3606 } 3607 if ((tp->snd_max == tp->snd_una) || 3608 (th_ack == tp->snd_max)){ 3609 /* 3610 * All is acked quality of all acked is 3611 * usually low or medium, but we in theory could split 3612 * all acked into two cases, where you got 3613 * a signifigant amount of your window and 3614 * where you did not. For now we leave it 3615 * but it is something to contemplate in the 3616 * future. The danger here is that delayed ack 3617 * is effecting the last byte (which is a 50:50 chance). 3618 */ 3619 *quality = RACK_QUALITY_ALLACKED; 3620 return (1); 3621 } 3622 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3623 /* 3624 * We obtained our entire window of data we wanted 3625 * no matter if we are in recovery or not then 3626 * its ok since expanding the window does not 3627 * make things fuzzy (or at least not as much). 3628 */ 3629 *quality = RACK_QUALITY_HIGH; 3630 return (1); 3631 } 3632 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3633 if (SEQ_LT(th_ack, tp->gput_ack) && 3634 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3635 /* Not enough bytes yet */ 3636 return (0); 3637 } 3638 if (rack->r_ctl.rc_first_appl && 3639 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3640 /* 3641 * We are up to the app limited send point 3642 * we have to measure irrespective of the time.. 3643 */ 3644 *quality = RACK_QUALITY_APPLIMITED; 3645 return (1); 3646 } 3647 /* Now what about time? */ 3648 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3649 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3650 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3651 /* 3652 * We do not allow a measurement if we are in recovery 3653 * that would shrink the goodput window we wanted. 3654 * This is to prevent cloudyness of when the last send 3655 * was actually made. 3656 */ 3657 *quality = RACK_QUALITY_HIGH; 3658 return (1); 3659 } 3660 /* Nope not even a full SRTT has passed */ 3661 return (0); 3662 } 3663 3664 static void 3665 rack_log_timely(struct tcp_rack *rack, 3666 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3667 uint64_t up_bnd, int line, uint8_t method) 3668 { 3669 if (tcp_bblogging_on(rack->rc_tp)) { 3670 union tcp_log_stackspecific log; 3671 struct timeval tv; 3672 3673 memset(&log, 0, sizeof(log)); 3674 log.u_bbr.flex1 = logged; 3675 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3676 log.u_bbr.flex2 <<= 4; 3677 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3678 log.u_bbr.flex2 <<= 4; 3679 log.u_bbr.flex2 |= rack->rc_gp_incr; 3680 log.u_bbr.flex2 <<= 4; 3681 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3682 log.u_bbr.flex3 = rack->rc_gp_incr; 3683 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3684 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3685 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3686 log.u_bbr.flex7 = rack->rc_gp_bwred; 3687 log.u_bbr.flex8 = method; 3688 log.u_bbr.cur_del_rate = cur_bw; 3689 log.u_bbr.delRate = low_bnd; 3690 log.u_bbr.bw_inuse = up_bnd; 3691 log.u_bbr.rttProp = rack_get_bw(rack); 3692 log.u_bbr.pkt_epoch = line; 3693 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3694 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3695 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3696 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3697 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3698 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3699 log.u_bbr.cwnd_gain <<= 1; 3700 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3701 log.u_bbr.cwnd_gain <<= 1; 3702 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3703 log.u_bbr.cwnd_gain <<= 1; 3704 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3705 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3706 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3707 &rack->rc_inp->inp_socket->so_rcv, 3708 &rack->rc_inp->inp_socket->so_snd, 3709 TCP_TIMELY_WORK, 0, 3710 0, &log, false, &tv); 3711 } 3712 } 3713 3714 static int 3715 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3716 { 3717 /* 3718 * Before we increase we need to know if 3719 * the estimate just made was less than 3720 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3721 * 3722 * If we already are pacing at a fast enough 3723 * rate to push us faster there is no sense of 3724 * increasing. 3725 * 3726 * We first caculate our actual pacing rate (ss or ca multiplier 3727 * times our cur_bw). 3728 * 3729 * Then we take the last measured rate and multipy by our 3730 * maximum pacing overage to give us a max allowable rate. 3731 * 3732 * If our act_rate is smaller than our max_allowable rate 3733 * then we should increase. Else we should hold steady. 3734 * 3735 */ 3736 uint64_t act_rate, max_allow_rate; 3737 3738 if (rack_timely_no_stopping) 3739 return (1); 3740 3741 if ((cur_bw == 0) || (last_bw_est == 0)) { 3742 /* 3743 * Initial startup case or 3744 * everything is acked case. 3745 */ 3746 rack_log_timely(rack, mult, cur_bw, 0, 0, 3747 __LINE__, 9); 3748 return (1); 3749 } 3750 if (mult <= 100) { 3751 /* 3752 * We can always pace at or slightly above our rate. 3753 */ 3754 rack_log_timely(rack, mult, cur_bw, 0, 0, 3755 __LINE__, 9); 3756 return (1); 3757 } 3758 act_rate = cur_bw * (uint64_t)mult; 3759 act_rate /= 100; 3760 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3761 max_allow_rate /= 100; 3762 if (act_rate < max_allow_rate) { 3763 /* 3764 * Here the rate we are actually pacing at 3765 * is smaller than 10% above our last measurement. 3766 * This means we are pacing below what we would 3767 * like to try to achieve (plus some wiggle room). 3768 */ 3769 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3770 __LINE__, 9); 3771 return (1); 3772 } else { 3773 /* 3774 * Here we are already pacing at least rack_max_per_above(10%) 3775 * what we are getting back. This indicates most likely 3776 * that we are being limited (cwnd/rwnd/app) and can't 3777 * get any more b/w. There is no sense of trying to 3778 * raise up the pacing rate its not speeding us up 3779 * and we already are pacing faster than we are getting. 3780 */ 3781 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3782 __LINE__, 8); 3783 return (0); 3784 } 3785 } 3786 3787 static void 3788 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3789 { 3790 /* 3791 * When we drag bottom, we want to assure 3792 * that no multiplier is below 1.0, if so 3793 * we want to restore it to at least that. 3794 */ 3795 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3796 /* This is unlikely we usually do not touch recovery */ 3797 rack->r_ctl.rack_per_of_gp_rec = 100; 3798 } 3799 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3800 rack->r_ctl.rack_per_of_gp_ca = 100; 3801 } 3802 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3803 rack->r_ctl.rack_per_of_gp_ss = 100; 3804 } 3805 } 3806 3807 static void 3808 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3809 { 3810 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3811 rack->r_ctl.rack_per_of_gp_ca = 100; 3812 } 3813 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3814 rack->r_ctl.rack_per_of_gp_ss = 100; 3815 } 3816 } 3817 3818 static void 3819 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3820 { 3821 int32_t calc, logged, plus; 3822 3823 logged = 0; 3824 3825 if (rack->rc_skip_timely) 3826 return; 3827 if (override) { 3828 /* 3829 * override is passed when we are 3830 * loosing b/w and making one last 3831 * gasp at trying to not loose out 3832 * to a new-reno flow. 3833 */ 3834 goto extra_boost; 3835 } 3836 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3837 if (rack->rc_gp_incr && 3838 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3839 /* 3840 * Reset and get 5 strokes more before the boost. Note 3841 * that the count is 0 based so we have to add one. 3842 */ 3843 extra_boost: 3844 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3845 rack->rc_gp_timely_inc_cnt = 0; 3846 } else 3847 plus = (uint32_t)rack_gp_increase_per; 3848 /* Must be at least 1% increase for true timely increases */ 3849 if ((plus < 1) && 3850 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3851 plus = 1; 3852 if (rack->rc_gp_saw_rec && 3853 (rack->rc_gp_no_rec_chg == 0) && 3854 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3855 rack->r_ctl.rack_per_of_gp_rec)) { 3856 /* We have been in recovery ding it too */ 3857 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3858 if (calc > 0xffff) 3859 calc = 0xffff; 3860 logged |= 1; 3861 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3862 if (rack->r_ctl.rack_per_upper_bound_ca && 3863 (rack->rc_dragged_bottom == 0) && 3864 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3865 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3866 } 3867 if (rack->rc_gp_saw_ca && 3868 (rack->rc_gp_saw_ss == 0) && 3869 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3870 rack->r_ctl.rack_per_of_gp_ca)) { 3871 /* In CA */ 3872 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3873 if (calc > 0xffff) 3874 calc = 0xffff; 3875 logged |= 2; 3876 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3877 if (rack->r_ctl.rack_per_upper_bound_ca && 3878 (rack->rc_dragged_bottom == 0) && 3879 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3880 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3881 } 3882 if (rack->rc_gp_saw_ss && 3883 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3884 rack->r_ctl.rack_per_of_gp_ss)) { 3885 /* In SS */ 3886 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3887 if (calc > 0xffff) 3888 calc = 0xffff; 3889 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3890 if (rack->r_ctl.rack_per_upper_bound_ss && 3891 (rack->rc_dragged_bottom == 0) && 3892 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3893 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3894 logged |= 4; 3895 } 3896 if (logged && 3897 (rack->rc_gp_incr == 0)){ 3898 /* Go into increment mode */ 3899 rack->rc_gp_incr = 1; 3900 rack->rc_gp_timely_inc_cnt = 0; 3901 } 3902 if (rack->rc_gp_incr && 3903 logged && 3904 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3905 rack->rc_gp_timely_inc_cnt++; 3906 } 3907 rack_log_timely(rack, logged, plus, 0, 0, 3908 __LINE__, 1); 3909 } 3910 3911 static uint32_t 3912 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3913 { 3914 /*- 3915 * norm_grad = rtt_diff / minrtt; 3916 * new_per = curper * (1 - B * norm_grad) 3917 * 3918 * B = rack_gp_decrease_per (default 80%) 3919 * rtt_dif = input var current rtt-diff 3920 * curper = input var current percentage 3921 * minrtt = from rack filter 3922 * 3923 * In order to do the floating point calculations above we 3924 * do an integer conversion. The code looks confusing so let me 3925 * translate it into something that use more variables and 3926 * is clearer for us humans :) 3927 * 3928 * uint64_t norm_grad, inverse, reduce_by, final_result; 3929 * uint32_t perf; 3930 * 3931 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 3932 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 3933 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 3934 * inverse /= 1000000; 3935 * reduce_by = (1000000 - inverse); 3936 * final_result = (cur_per * reduce_by) / 1000000; 3937 * perf = (uint32_t)final_result; 3938 */ 3939 uint64_t perf; 3940 3941 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3942 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3943 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3944 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3945 (uint64_t)1000000)) / 3946 (uint64_t)1000000); 3947 if (perf > curper) { 3948 /* TSNH */ 3949 perf = curper - 1; 3950 } 3951 return ((uint32_t)perf); 3952 } 3953 3954 static uint32_t 3955 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3956 { 3957 /* 3958 * highrttthresh 3959 * result = curper * (1 - (B * ( 1 - ------ )) 3960 * gp_srtt 3961 * 3962 * B = rack_gp_decrease_per (default .8 i.e. 80) 3963 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3964 */ 3965 uint64_t perf; 3966 uint32_t highrttthresh; 3967 3968 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3969 3970 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3971 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3972 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3973 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3974 if (tcp_bblogging_on(rack->rc_tp)) { 3975 uint64_t log1; 3976 3977 log1 = rtt; 3978 log1 <<= 32; 3979 log1 |= highrttthresh; 3980 rack_log_timely(rack, 3981 rack_gp_decrease_per, 3982 (uint64_t)curper, 3983 log1, 3984 perf, 3985 __LINE__, 3986 15); 3987 } 3988 return (perf); 3989 } 3990 3991 static void 3992 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3993 { 3994 uint64_t logvar, logvar2, logvar3; 3995 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3996 3997 if (rack->rc_skip_timely) 3998 return; 3999 if (rack->rc_gp_incr) { 4000 /* Turn off increment counting */ 4001 rack->rc_gp_incr = 0; 4002 rack->rc_gp_timely_inc_cnt = 0; 4003 } 4004 ss_red = ca_red = rec_red = 0; 4005 logged = 0; 4006 /* Calculate the reduction value */ 4007 if (rtt_diff < 0) { 4008 rtt_diff *= -1; 4009 } 4010 /* Must be at least 1% reduction */ 4011 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4012 /* We have been in recovery ding it too */ 4013 if (timely_says == 2) { 4014 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4015 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4016 if (alt < new_per) 4017 val = alt; 4018 else 4019 val = new_per; 4020 } else 4021 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4022 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4023 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4024 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4025 } else { 4026 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4027 rec_red = 0; 4028 } 4029 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4030 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4031 logged |= 1; 4032 } 4033 if (rack->rc_gp_saw_ss) { 4034 /* Sent in SS */ 4035 if (timely_says == 2) { 4036 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4037 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4038 if (alt < new_per) 4039 val = alt; 4040 else 4041 val = new_per; 4042 } else 4043 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4044 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4045 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4046 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4047 } else { 4048 ss_red = new_per; 4049 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4050 logvar = new_per; 4051 logvar <<= 32; 4052 logvar |= alt; 4053 logvar2 = (uint32_t)rtt; 4054 logvar2 <<= 32; 4055 logvar2 |= (uint32_t)rtt_diff; 4056 logvar3 = rack_gp_rtt_maxmul; 4057 logvar3 <<= 32; 4058 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4059 rack_log_timely(rack, timely_says, 4060 logvar2, logvar3, 4061 logvar, __LINE__, 10); 4062 } 4063 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4064 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4065 logged |= 4; 4066 } else if (rack->rc_gp_saw_ca) { 4067 /* Sent in CA */ 4068 if (timely_says == 2) { 4069 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4070 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4071 if (alt < new_per) 4072 val = alt; 4073 else 4074 val = new_per; 4075 } else 4076 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4077 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4078 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4079 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4080 } else { 4081 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4082 ca_red = 0; 4083 logvar = new_per; 4084 logvar <<= 32; 4085 logvar |= alt; 4086 logvar2 = (uint32_t)rtt; 4087 logvar2 <<= 32; 4088 logvar2 |= (uint32_t)rtt_diff; 4089 logvar3 = rack_gp_rtt_maxmul; 4090 logvar3 <<= 32; 4091 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4092 rack_log_timely(rack, timely_says, 4093 logvar2, logvar3, 4094 logvar, __LINE__, 10); 4095 } 4096 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4097 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4098 logged |= 2; 4099 } 4100 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4101 rack->rc_gp_timely_dec_cnt++; 4102 if (rack_timely_dec_clear && 4103 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4104 rack->rc_gp_timely_dec_cnt = 0; 4105 } 4106 logvar = ss_red; 4107 logvar <<= 32; 4108 logvar |= ca_red; 4109 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4110 __LINE__, 2); 4111 } 4112 4113 static void 4114 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4115 uint32_t rtt, uint32_t line, uint8_t reas) 4116 { 4117 if (tcp_bblogging_on(rack->rc_tp)) { 4118 union tcp_log_stackspecific log; 4119 struct timeval tv; 4120 4121 memset(&log, 0, sizeof(log)); 4122 log.u_bbr.flex1 = line; 4123 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4124 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4125 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4126 log.u_bbr.flex5 = rtt; 4127 log.u_bbr.flex6 = rack->rc_highly_buffered; 4128 log.u_bbr.flex6 <<= 1; 4129 log.u_bbr.flex6 |= rack->forced_ack; 4130 log.u_bbr.flex6 <<= 1; 4131 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4132 log.u_bbr.flex6 <<= 1; 4133 log.u_bbr.flex6 |= rack->in_probe_rtt; 4134 log.u_bbr.flex6 <<= 1; 4135 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4136 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4137 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4138 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4139 log.u_bbr.flex8 = reas; 4140 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4141 log.u_bbr.delRate = rack_get_bw(rack); 4142 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4143 log.u_bbr.cur_del_rate <<= 32; 4144 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4145 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4146 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4147 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4148 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4149 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4150 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4151 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4152 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4153 log.u_bbr.rttProp = us_cts; 4154 log.u_bbr.rttProp <<= 32; 4155 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4156 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4157 &rack->rc_inp->inp_socket->so_rcv, 4158 &rack->rc_inp->inp_socket->so_snd, 4159 BBR_LOG_RTT_SHRINKS, 0, 4160 0, &log, false, &rack->r_ctl.act_rcv_time); 4161 } 4162 } 4163 4164 static void 4165 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4166 { 4167 uint64_t bwdp; 4168 4169 bwdp = rack_get_bw(rack); 4170 bwdp *= (uint64_t)rtt; 4171 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4172 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4173 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4174 /* 4175 * A window protocol must be able to have 4 packets 4176 * outstanding as the floor in order to function 4177 * (especially considering delayed ack :D). 4178 */ 4179 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4180 } 4181 } 4182 4183 static void 4184 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4185 { 4186 /** 4187 * ProbeRTT is a bit different in rack_pacing than in 4188 * BBR. It is like BBR in that it uses the lowering of 4189 * the RTT as a signal that we saw something new and 4190 * counts from there for how long between. But it is 4191 * different in that its quite simple. It does not 4192 * play with the cwnd and wait until we get down 4193 * to N segments outstanding and hold that for 4194 * 200ms. Instead it just sets the pacing reduction 4195 * rate to a set percentage (70 by default) and hold 4196 * that for a number of recent GP Srtt's. 4197 */ 4198 uint32_t segsiz; 4199 4200 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4201 if (rack->rc_gp_dyn_mul == 0) 4202 return; 4203 4204 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4205 /* We are idle */ 4206 return; 4207 } 4208 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4209 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4210 /* 4211 * Stop the goodput now, the idea here is 4212 * that future measurements with in_probe_rtt 4213 * won't register if they are not greater so 4214 * we want to get what info (if any) is available 4215 * now. 4216 */ 4217 rack_do_goodput_measurement(rack->rc_tp, rack, 4218 rack->rc_tp->snd_una, __LINE__, 4219 RACK_QUALITY_PROBERTT); 4220 } 4221 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4222 rack->r_ctl.rc_time_probertt_entered = us_cts; 4223 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4224 rack->r_ctl.rc_pace_min_segs); 4225 rack->in_probe_rtt = 1; 4226 rack->measure_saw_probe_rtt = 1; 4227 rack->r_ctl.rc_time_probertt_starts = 0; 4228 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4229 if (rack_probertt_use_min_rtt_entry) 4230 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4231 else 4232 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4233 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4234 __LINE__, RACK_RTTS_ENTERPROBE); 4235 } 4236 4237 static void 4238 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4239 { 4240 struct rack_sendmap *rsm; 4241 uint32_t segsiz; 4242 4243 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4244 rack->r_ctl.rc_pace_min_segs); 4245 rack->in_probe_rtt = 0; 4246 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4247 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4248 /* 4249 * Stop the goodput now, the idea here is 4250 * that future measurements with in_probe_rtt 4251 * won't register if they are not greater so 4252 * we want to get what info (if any) is available 4253 * now. 4254 */ 4255 rack_do_goodput_measurement(rack->rc_tp, rack, 4256 rack->rc_tp->snd_una, __LINE__, 4257 RACK_QUALITY_PROBERTT); 4258 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4259 /* 4260 * We don't have enough data to make a measurement. 4261 * So lets just stop and start here after exiting 4262 * probe-rtt. We probably are not interested in 4263 * the results anyway. 4264 */ 4265 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4266 } 4267 /* 4268 * Measurements through the current snd_max are going 4269 * to be limited by the slower pacing rate. 4270 * 4271 * We need to mark these as app-limited so we 4272 * don't collapse the b/w. 4273 */ 4274 rsm = tqhash_max(rack->r_ctl.tqh); 4275 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4276 if (rack->r_ctl.rc_app_limited_cnt == 0) 4277 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4278 else { 4279 /* 4280 * Go out to the end app limited and mark 4281 * this new one as next and move the end_appl up 4282 * to this guy. 4283 */ 4284 if (rack->r_ctl.rc_end_appl) 4285 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4286 rack->r_ctl.rc_end_appl = rsm; 4287 } 4288 rsm->r_flags |= RACK_APP_LIMITED; 4289 rack->r_ctl.rc_app_limited_cnt++; 4290 } 4291 /* 4292 * Now, we need to examine our pacing rate multipliers. 4293 * If its under 100%, we need to kick it back up to 4294 * 100%. We also don't let it be over our "max" above 4295 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4296 * Note setting clamp_atexit_prtt to 0 has the effect 4297 * of setting CA/SS to 100% always at exit (which is 4298 * the default behavior). 4299 */ 4300 if (rack_probertt_clear_is) { 4301 rack->rc_gp_incr = 0; 4302 rack->rc_gp_bwred = 0; 4303 rack->rc_gp_timely_inc_cnt = 0; 4304 rack->rc_gp_timely_dec_cnt = 0; 4305 } 4306 /* Do we do any clamping at exit? */ 4307 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4308 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4309 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4310 } 4311 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4312 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4313 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4314 } 4315 /* 4316 * Lets set rtt_diff to 0, so that we will get a "boost" 4317 * after exiting. 4318 */ 4319 rack->r_ctl.rc_rtt_diff = 0; 4320 4321 /* Clear all flags so we start fresh */ 4322 rack->rc_tp->t_bytes_acked = 0; 4323 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4324 /* 4325 * If configured to, set the cwnd and ssthresh to 4326 * our targets. 4327 */ 4328 if (rack_probe_rtt_sets_cwnd) { 4329 uint64_t ebdp; 4330 uint32_t setto; 4331 4332 /* Set ssthresh so we get into CA once we hit our target */ 4333 if (rack_probertt_use_min_rtt_exit == 1) { 4334 /* Set to min rtt */ 4335 rack_set_prtt_target(rack, segsiz, 4336 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4337 } else if (rack_probertt_use_min_rtt_exit == 2) { 4338 /* Set to current gp rtt */ 4339 rack_set_prtt_target(rack, segsiz, 4340 rack->r_ctl.rc_gp_srtt); 4341 } else if (rack_probertt_use_min_rtt_exit == 3) { 4342 /* Set to entry gp rtt */ 4343 rack_set_prtt_target(rack, segsiz, 4344 rack->r_ctl.rc_entry_gp_rtt); 4345 } else { 4346 uint64_t sum; 4347 uint32_t setval; 4348 4349 sum = rack->r_ctl.rc_entry_gp_rtt; 4350 sum *= 10; 4351 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4352 if (sum >= 20) { 4353 /* 4354 * A highly buffered path needs 4355 * cwnd space for timely to work. 4356 * Lets set things up as if 4357 * we are heading back here again. 4358 */ 4359 setval = rack->r_ctl.rc_entry_gp_rtt; 4360 } else if (sum >= 15) { 4361 /* 4362 * Lets take the smaller of the 4363 * two since we are just somewhat 4364 * buffered. 4365 */ 4366 setval = rack->r_ctl.rc_gp_srtt; 4367 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4368 setval = rack->r_ctl.rc_entry_gp_rtt; 4369 } else { 4370 /* 4371 * Here we are not highly buffered 4372 * and should pick the min we can to 4373 * keep from causing loss. 4374 */ 4375 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4376 } 4377 rack_set_prtt_target(rack, segsiz, 4378 setval); 4379 } 4380 if (rack_probe_rtt_sets_cwnd > 1) { 4381 /* There is a percentage here to boost */ 4382 ebdp = rack->r_ctl.rc_target_probertt_flight; 4383 ebdp *= rack_probe_rtt_sets_cwnd; 4384 ebdp /= 100; 4385 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4386 } else 4387 setto = rack->r_ctl.rc_target_probertt_flight; 4388 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4389 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4390 /* Enforce a min */ 4391 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4392 } 4393 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4394 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4395 } 4396 rack_log_rtt_shrinks(rack, us_cts, 4397 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4398 __LINE__, RACK_RTTS_EXITPROBE); 4399 /* Clear times last so log has all the info */ 4400 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4401 rack->r_ctl.rc_time_probertt_entered = us_cts; 4402 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4403 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4404 } 4405 4406 static void 4407 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4408 { 4409 /* Check in on probe-rtt */ 4410 4411 if (rack->rc_gp_filled == 0) { 4412 /* We do not do p-rtt unless we have gp measurements */ 4413 return; 4414 } 4415 if (rack->in_probe_rtt) { 4416 uint64_t no_overflow; 4417 uint32_t endtime, must_stay; 4418 4419 if (rack->r_ctl.rc_went_idle_time && 4420 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4421 /* 4422 * We went idle during prtt, just exit now. 4423 */ 4424 rack_exit_probertt(rack, us_cts); 4425 } else if (rack_probe_rtt_safety_val && 4426 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4427 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4428 /* 4429 * Probe RTT safety value triggered! 4430 */ 4431 rack_log_rtt_shrinks(rack, us_cts, 4432 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4433 __LINE__, RACK_RTTS_SAFETY); 4434 rack_exit_probertt(rack, us_cts); 4435 } 4436 /* Calculate the max we will wait */ 4437 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4438 if (rack->rc_highly_buffered) 4439 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4440 /* Calculate the min we must wait */ 4441 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4442 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4443 TSTMP_LT(us_cts, endtime)) { 4444 uint32_t calc; 4445 /* Do we lower more? */ 4446 no_exit: 4447 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4448 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4449 else 4450 calc = 0; 4451 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4452 if (calc) { 4453 /* Maybe */ 4454 calc *= rack_per_of_gp_probertt_reduce; 4455 if (calc > rack_per_of_gp_probertt) 4456 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4457 else 4458 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4459 /* Limit it too */ 4460 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4461 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4462 } 4463 /* We must reach target or the time set */ 4464 return; 4465 } 4466 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4467 if ((TSTMP_LT(us_cts, must_stay) && 4468 rack->rc_highly_buffered) || 4469 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4470 rack->r_ctl.rc_target_probertt_flight)) { 4471 /* We are not past the must_stay time */ 4472 goto no_exit; 4473 } 4474 rack_log_rtt_shrinks(rack, us_cts, 4475 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4476 __LINE__, RACK_RTTS_REACHTARGET); 4477 rack->r_ctl.rc_time_probertt_starts = us_cts; 4478 if (rack->r_ctl.rc_time_probertt_starts == 0) 4479 rack->r_ctl.rc_time_probertt_starts = 1; 4480 /* Restore back to our rate we want to pace at in prtt */ 4481 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4482 } 4483 /* 4484 * Setup our end time, some number of gp_srtts plus 200ms. 4485 */ 4486 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4487 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4488 if (rack_probertt_gpsrtt_cnt_div) 4489 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4490 else 4491 endtime = 0; 4492 endtime += rack_min_probertt_hold; 4493 endtime += rack->r_ctl.rc_time_probertt_starts; 4494 if (TSTMP_GEQ(us_cts, endtime)) { 4495 /* yes, exit probertt */ 4496 rack_exit_probertt(rack, us_cts); 4497 } 4498 4499 } else if ((rack->rc_skip_timely == 0) && 4500 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4501 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4502 /* Go into probertt, its been too long since we went lower */ 4503 rack_enter_probertt(rack, us_cts); 4504 } 4505 } 4506 4507 static void 4508 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4509 uint32_t rtt, int32_t rtt_diff) 4510 { 4511 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4512 uint32_t losses; 4513 4514 if ((rack->rc_gp_dyn_mul == 0) || 4515 (rack->use_fixed_rate) || 4516 (rack->in_probe_rtt) || 4517 (rack->rc_always_pace == 0)) { 4518 /* No dynamic GP multiplier in play */ 4519 return; 4520 } 4521 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4522 cur_bw = rack_get_bw(rack); 4523 /* Calculate our up and down range */ 4524 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4525 up_bnd /= 100; 4526 up_bnd += rack->r_ctl.last_gp_comp_bw; 4527 4528 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4529 subfr /= 100; 4530 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4531 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4532 /* 4533 * This is the case where our RTT is above 4534 * the max target and we have been configured 4535 * to just do timely no bonus up stuff in that case. 4536 * 4537 * There are two configurations, set to 1, and we 4538 * just do timely if we are over our max. If its 4539 * set above 1 then we slam the multipliers down 4540 * to 100 and then decrement per timely. 4541 */ 4542 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4543 __LINE__, 3); 4544 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4545 rack_validate_multipliers_at_or_below_100(rack); 4546 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4547 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4548 /* 4549 * We are decreasing this is a bit complicated this 4550 * means we are loosing ground. This could be 4551 * because another flow entered and we are competing 4552 * for b/w with it. This will push the RTT up which 4553 * makes timely unusable unless we want to get shoved 4554 * into a corner and just be backed off (the age 4555 * old problem with delay based CC). 4556 * 4557 * On the other hand if it was a route change we 4558 * would like to stay somewhat contained and not 4559 * blow out the buffers. 4560 */ 4561 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4562 __LINE__, 3); 4563 rack->r_ctl.last_gp_comp_bw = cur_bw; 4564 if (rack->rc_gp_bwred == 0) { 4565 /* Go into reduction counting */ 4566 rack->rc_gp_bwred = 1; 4567 rack->rc_gp_timely_dec_cnt = 0; 4568 } 4569 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4570 /* 4571 * Push another time with a faster pacing 4572 * to try to gain back (we include override to 4573 * get a full raise factor). 4574 */ 4575 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4576 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4577 (timely_says == 0) || 4578 (rack_down_raise_thresh == 0)) { 4579 /* 4580 * Do an override up in b/w if we were 4581 * below the threshold or if the threshold 4582 * is zero we always do the raise. 4583 */ 4584 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4585 } else { 4586 /* Log it stays the same */ 4587 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4588 __LINE__, 11); 4589 } 4590 rack->rc_gp_timely_dec_cnt++; 4591 /* We are not incrementing really no-count */ 4592 rack->rc_gp_incr = 0; 4593 rack->rc_gp_timely_inc_cnt = 0; 4594 } else { 4595 /* 4596 * Lets just use the RTT 4597 * information and give up 4598 * pushing. 4599 */ 4600 goto use_timely; 4601 } 4602 } else if ((timely_says != 2) && 4603 !losses && 4604 (last_bw_est > up_bnd)) { 4605 /* 4606 * We are increasing b/w lets keep going, updating 4607 * our b/w and ignoring any timely input, unless 4608 * of course we are at our max raise (if there is one). 4609 */ 4610 4611 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4612 __LINE__, 3); 4613 rack->r_ctl.last_gp_comp_bw = cur_bw; 4614 if (rack->rc_gp_saw_ss && 4615 rack->r_ctl.rack_per_upper_bound_ss && 4616 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4617 /* 4618 * In cases where we can't go higher 4619 * we should just use timely. 4620 */ 4621 goto use_timely; 4622 } 4623 if (rack->rc_gp_saw_ca && 4624 rack->r_ctl.rack_per_upper_bound_ca && 4625 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4626 /* 4627 * In cases where we can't go higher 4628 * we should just use timely. 4629 */ 4630 goto use_timely; 4631 } 4632 rack->rc_gp_bwred = 0; 4633 rack->rc_gp_timely_dec_cnt = 0; 4634 /* You get a set number of pushes if timely is trying to reduce */ 4635 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4636 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4637 } else { 4638 /* Log it stays the same */ 4639 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4640 __LINE__, 12); 4641 } 4642 return; 4643 } else { 4644 /* 4645 * We are staying between the lower and upper range bounds 4646 * so use timely to decide. 4647 */ 4648 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4649 __LINE__, 3); 4650 use_timely: 4651 if (timely_says) { 4652 rack->rc_gp_incr = 0; 4653 rack->rc_gp_timely_inc_cnt = 0; 4654 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4655 !losses && 4656 (last_bw_est < low_bnd)) { 4657 /* We are loosing ground */ 4658 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4659 rack->rc_gp_timely_dec_cnt++; 4660 /* We are not incrementing really no-count */ 4661 rack->rc_gp_incr = 0; 4662 rack->rc_gp_timely_inc_cnt = 0; 4663 } else 4664 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4665 } else { 4666 rack->rc_gp_bwred = 0; 4667 rack->rc_gp_timely_dec_cnt = 0; 4668 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4669 } 4670 } 4671 } 4672 4673 static int32_t 4674 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4675 { 4676 int32_t timely_says; 4677 uint64_t log_mult, log_rtt_a_diff; 4678 4679 log_rtt_a_diff = rtt; 4680 log_rtt_a_diff <<= 32; 4681 log_rtt_a_diff |= (uint32_t)rtt_diff; 4682 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4683 rack_gp_rtt_maxmul)) { 4684 /* Reduce the b/w multiplier */ 4685 timely_says = 2; 4686 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4687 log_mult <<= 32; 4688 log_mult |= prev_rtt; 4689 rack_log_timely(rack, timely_says, log_mult, 4690 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4691 log_rtt_a_diff, __LINE__, 4); 4692 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4693 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4694 max(rack_gp_rtt_mindiv , 1)))) { 4695 /* Increase the b/w multiplier */ 4696 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4697 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4698 max(rack_gp_rtt_mindiv , 1)); 4699 log_mult <<= 32; 4700 log_mult |= prev_rtt; 4701 timely_says = 0; 4702 rack_log_timely(rack, timely_says, log_mult , 4703 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4704 log_rtt_a_diff, __LINE__, 5); 4705 } else { 4706 /* 4707 * Use a gradient to find it the timely gradient 4708 * is: 4709 * grad = rc_rtt_diff / min_rtt; 4710 * 4711 * anything below or equal to 0 will be 4712 * a increase indication. Anything above 4713 * zero is a decrease. Note we take care 4714 * of the actual gradient calculation 4715 * in the reduction (its not needed for 4716 * increase). 4717 */ 4718 log_mult = prev_rtt; 4719 if (rtt_diff <= 0) { 4720 /* 4721 * Rttdiff is less than zero, increase the 4722 * b/w multiplier (its 0 or negative) 4723 */ 4724 timely_says = 0; 4725 rack_log_timely(rack, timely_says, log_mult, 4726 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4727 } else { 4728 /* Reduce the b/w multiplier */ 4729 timely_says = 1; 4730 rack_log_timely(rack, timely_says, log_mult, 4731 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4732 } 4733 } 4734 return (timely_says); 4735 } 4736 4737 static __inline int 4738 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4739 { 4740 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4741 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4742 /** 4743 * This covers the case that the 4744 * resent is completely inside 4745 * the gp range or up to it. 4746 * |----------------| 4747 * |-----| <or> 4748 * |----| 4749 * <or> |---| 4750 */ 4751 return (1); 4752 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4753 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4754 /** 4755 * This covers the case of 4756 * |--------------| 4757 * |-------->| 4758 */ 4759 return (1); 4760 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4761 SEQ_LT(rsm->r_start, tp->gput_ack) && 4762 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4763 4764 /** 4765 * This covers the case of 4766 * |--------------| 4767 * |-------->| 4768 */ 4769 return (1); 4770 } 4771 return (0); 4772 } 4773 4774 static __inline void 4775 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4776 { 4777 4778 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4779 return; 4780 /* 4781 * We have a Goodput measurement in progress. Mark 4782 * the send if its within the window. If its not 4783 * in the window make sure it does not have the mark. 4784 */ 4785 if (rack_in_gp_window(tp, rsm)) 4786 rsm->r_flags |= RACK_IN_GP_WIN; 4787 else 4788 rsm->r_flags &= ~RACK_IN_GP_WIN; 4789 } 4790 4791 static __inline void 4792 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4793 { 4794 /* A GP measurement is ending, clear all marks on the send map*/ 4795 struct rack_sendmap *rsm = NULL; 4796 4797 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4798 if (rsm == NULL) { 4799 rsm = tqhash_min(rack->r_ctl.tqh); 4800 } 4801 /* Nothing left? */ 4802 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4803 rsm->r_flags &= ~RACK_IN_GP_WIN; 4804 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4805 } 4806 } 4807 4808 4809 static __inline void 4810 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4811 { 4812 struct rack_sendmap *rsm = NULL; 4813 4814 if (tp->snd_una == tp->snd_max) { 4815 /* Nothing outstanding yet, nothing to do here */ 4816 return; 4817 } 4818 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4819 /* 4820 * We are measuring ahead of some outstanding 4821 * data. We need to walk through up until we get 4822 * to gp_seq marking so that no rsm is set incorrectly 4823 * with RACK_IN_GP_WIN. 4824 */ 4825 rsm = tqhash_min(rack->r_ctl.tqh); 4826 while (rsm != NULL) { 4827 rack_mark_in_gp_win(tp, rsm); 4828 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4829 break; 4830 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4831 } 4832 } 4833 if (rsm == NULL) { 4834 /* 4835 * Need to find the GP seq, if rsm is 4836 * set we stopped as we hit it. 4837 */ 4838 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4839 if (rsm == NULL) 4840 return; 4841 rack_mark_in_gp_win(tp, rsm); 4842 } 4843 /* 4844 * Now we may need to mark already sent rsm, ahead of 4845 * gput_seq in the window since they may have been sent 4846 * *before* we started our measurment. The rsm, if non-null 4847 * has been marked (note if rsm would have been NULL we would have 4848 * returned in the previous block). So we go to the next, and continue 4849 * until we run out of entries or we exceed the gp_ack value. 4850 */ 4851 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4852 while (rsm) { 4853 rack_mark_in_gp_win(tp, rsm); 4854 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4855 break; 4856 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4857 } 4858 } 4859 4860 static void 4861 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 4862 { 4863 if (tcp_bblogging_on(rack->rc_tp)) { 4864 union tcp_log_stackspecific log; 4865 struct timeval tv; 4866 4867 memset(&log, 0, sizeof(log)); 4868 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4869 log.u_bbr.flex1 = add_part; 4870 log.u_bbr.flex2 = sub_part; 4871 log.u_bbr.flex3 = rack_wma_divisor; 4872 log.u_bbr.flex4 = srtt; 4873 log.u_bbr.flex7 = (uint16_t)line; 4874 log.u_bbr.flex8 = meth; 4875 log.u_bbr.delRate = rack->r_ctl.gp_bw; 4876 log.u_bbr.cur_del_rate = meas_bw; 4877 log.u_bbr.rttProp = utim; 4878 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4879 &rack->rc_inp->inp_socket->so_rcv, 4880 &rack->rc_inp->inp_socket->so_snd, 4881 BBR_LOG_THRESH_CALC, 0, 4882 0, &log, false, &rack->r_ctl.act_rcv_time); 4883 } 4884 } 4885 4886 static void 4887 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4888 tcp_seq th_ack, int line, uint8_t quality) 4889 { 4890 uint64_t tim, bytes_ps, stim, utim; 4891 uint32_t segsiz, bytes, reqbytes, us_cts; 4892 int32_t gput, new_rtt_diff, timely_says; 4893 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4894 int did_add = 0; 4895 4896 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4897 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4898 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4899 tim = us_cts - tp->gput_ts; 4900 else 4901 tim = 0; 4902 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4903 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4904 else 4905 stim = 0; 4906 /* 4907 * Use the larger of the send time or ack time. This prevents us 4908 * from being influenced by ack artifacts to come up with too 4909 * high of measurement. Note that since we are spanning over many more 4910 * bytes in most of our measurements hopefully that is less likely to 4911 * occur. 4912 */ 4913 if (tim > stim) 4914 utim = max(tim, 1); 4915 else 4916 utim = max(stim, 1); 4917 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4918 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 4919 if ((tim == 0) && (stim == 0)) { 4920 /* 4921 * Invalid measurement time, maybe 4922 * all on one ack/one send? 4923 */ 4924 bytes = 0; 4925 bytes_ps = 0; 4926 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4927 0, 0, 0, 10, __LINE__, NULL, quality); 4928 goto skip_measurement; 4929 } 4930 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4931 /* We never made a us_rtt measurement? */ 4932 bytes = 0; 4933 bytes_ps = 0; 4934 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4935 0, 0, 0, 10, __LINE__, NULL, quality); 4936 goto skip_measurement; 4937 } 4938 /* 4939 * Calculate the maximum possible b/w this connection 4940 * could have. We base our calculation on the lowest 4941 * rtt we have seen during the measurement and the 4942 * largest rwnd the client has given us in that time. This 4943 * forms a BDP that is the maximum that we could ever 4944 * get to the client. Anything larger is not valid. 4945 * 4946 * I originally had code here that rejected measurements 4947 * where the time was less than 1/2 the latest us_rtt. 4948 * But after thinking on that I realized its wrong since 4949 * say you had a 150Mbps or even 1Gbps link, and you 4950 * were a long way away.. example I am in Europe (100ms rtt) 4951 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4952 * bytes my time would be 1.2ms, and yet my rtt would say 4953 * the measurement was invalid the time was < 50ms. The 4954 * same thing is true for 150Mb (8ms of time). 4955 * 4956 * A better way I realized is to look at what the maximum 4957 * the connection could possibly do. This is gated on 4958 * the lowest RTT we have seen and the highest rwnd. 4959 * We should in theory never exceed that, if we are 4960 * then something on the path is storing up packets 4961 * and then feeding them all at once to our endpoint 4962 * messing up our measurement. 4963 */ 4964 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4965 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4966 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4967 if (SEQ_LT(th_ack, tp->gput_seq)) { 4968 /* No measurement can be made */ 4969 bytes = 0; 4970 bytes_ps = 0; 4971 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4972 0, 0, 0, 10, __LINE__, NULL, quality); 4973 goto skip_measurement; 4974 } else 4975 bytes = (th_ack - tp->gput_seq); 4976 bytes_ps = (uint64_t)bytes; 4977 /* 4978 * Don't measure a b/w for pacing unless we have gotten at least 4979 * an initial windows worth of data in this measurement interval. 4980 * 4981 * Small numbers of bytes get badly influenced by delayed ack and 4982 * other artifacts. Note we take the initial window or our 4983 * defined minimum GP (defaulting to 10 which hopefully is the 4984 * IW). 4985 */ 4986 if (rack->rc_gp_filled == 0) { 4987 /* 4988 * The initial estimate is special. We 4989 * have blasted out an IW worth of packets 4990 * without a real valid ack ts results. We 4991 * then setup the app_limited_needs_set flag, 4992 * this should get the first ack in (probably 2 4993 * MSS worth) to be recorded as the timestamp. 4994 * We thus allow a smaller number of bytes i.e. 4995 * IW - 2MSS. 4996 */ 4997 reqbytes -= (2 * segsiz); 4998 /* Also lets fill previous for our first measurement to be neutral */ 4999 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5000 } 5001 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 5002 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5003 rack->r_ctl.rc_app_limited_cnt, 5004 0, 0, 10, __LINE__, NULL, quality); 5005 goto skip_measurement; 5006 } 5007 /* 5008 * We now need to calculate the Timely like status so 5009 * we can update (possibly) the b/w multipliers. 5010 */ 5011 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5012 if (rack->rc_gp_filled == 0) { 5013 /* No previous reading */ 5014 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5015 } else { 5016 if (rack->measure_saw_probe_rtt == 0) { 5017 /* 5018 * We don't want a probertt to be counted 5019 * since it will be negative incorrectly. We 5020 * expect to be reducing the RTT when we 5021 * pace at a slower rate. 5022 */ 5023 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5024 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5025 } 5026 } 5027 timely_says = rack_make_timely_judgement(rack, 5028 rack->r_ctl.rc_gp_srtt, 5029 rack->r_ctl.rc_rtt_diff, 5030 rack->r_ctl.rc_prev_gp_srtt 5031 ); 5032 bytes_ps *= HPTS_USEC_IN_SEC; 5033 bytes_ps /= utim; 5034 if (bytes_ps > rack->r_ctl.last_max_bw) { 5035 /* 5036 * Something is on path playing 5037 * since this b/w is not possible based 5038 * on our BDP (highest rwnd and lowest rtt 5039 * we saw in the measurement window). 5040 * 5041 * Another option here would be to 5042 * instead skip the measurement. 5043 */ 5044 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5045 bytes_ps, rack->r_ctl.last_max_bw, 0, 5046 11, __LINE__, NULL, quality); 5047 bytes_ps = rack->r_ctl.last_max_bw; 5048 } 5049 /* We store gp for b/w in bytes per second */ 5050 if (rack->rc_gp_filled == 0) { 5051 /* Initial measurement */ 5052 if (bytes_ps) { 5053 rack->r_ctl.gp_bw = bytes_ps; 5054 rack->rc_gp_filled = 1; 5055 rack->r_ctl.num_measurements = 1; 5056 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5057 } else { 5058 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5059 rack->r_ctl.rc_app_limited_cnt, 5060 0, 0, 10, __LINE__, NULL, quality); 5061 } 5062 if (tcp_in_hpts(rack->rc_tp) && 5063 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5064 /* 5065 * Ok we can't trust the pacer in this case 5066 * where we transition from un-paced to paced. 5067 * Or for that matter when the burst mitigation 5068 * was making a wild guess and got it wrong. 5069 * Stop the pacer and clear up all the aggregate 5070 * delays etc. 5071 */ 5072 tcp_hpts_remove(rack->rc_tp); 5073 rack->r_ctl.rc_hpts_flags = 0; 5074 rack->r_ctl.rc_last_output_to = 0; 5075 } 5076 did_add = 2; 5077 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5078 /* Still a small number run an average */ 5079 rack->r_ctl.gp_bw += bytes_ps; 5080 addpart = rack->r_ctl.num_measurements; 5081 rack->r_ctl.num_measurements++; 5082 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5083 /* We have collected enough to move forward */ 5084 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5085 } 5086 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5087 did_add = 3; 5088 } else { 5089 /* 5090 * We want to take 1/wma of the goodput and add in to 7/8th 5091 * of the old value weighted by the srtt. So if your measurement 5092 * period is say 2 SRTT's long you would get 1/4 as the 5093 * value, if it was like 1/2 SRTT then you would get 1/16th. 5094 * 5095 * But we must be careful not to take too much i.e. if the 5096 * srtt is say 20ms and the measurement is taken over 5097 * 400ms our weight would be 400/20 i.e. 20. On the 5098 * other hand if we get a measurement over 1ms with a 5099 * 10ms rtt we only want to take a much smaller portion. 5100 */ 5101 uint8_t meth; 5102 5103 if (rack->r_ctl.num_measurements < 0xff) { 5104 rack->r_ctl.num_measurements++; 5105 } 5106 srtt = (uint64_t)tp->t_srtt; 5107 if (srtt == 0) { 5108 /* 5109 * Strange why did t_srtt go back to zero? 5110 */ 5111 if (rack->r_ctl.rc_rack_min_rtt) 5112 srtt = rack->r_ctl.rc_rack_min_rtt; 5113 else 5114 srtt = HPTS_USEC_IN_MSEC; 5115 } 5116 /* 5117 * XXXrrs: Note for reviewers, in playing with 5118 * dynamic pacing I discovered this GP calculation 5119 * as done originally leads to some undesired results. 5120 * Basically you can get longer measurements contributing 5121 * too much to the WMA. Thus I changed it if you are doing 5122 * dynamic adjustments to only do the aportioned adjustment 5123 * if we have a very small (time wise) measurement. Longer 5124 * measurements just get there weight (defaulting to 1/8) 5125 * add to the WMA. We may want to think about changing 5126 * this to always do that for both sides i.e. dynamic 5127 * and non-dynamic... but considering lots of folks 5128 * were playing with this I did not want to change the 5129 * calculation per.se. without your thoughts.. Lawerence? 5130 * Peter?? 5131 */ 5132 if (rack->rc_gp_dyn_mul == 0) { 5133 subpart = rack->r_ctl.gp_bw * utim; 5134 subpart /= (srtt * 8); 5135 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5136 /* 5137 * The b/w update takes no more 5138 * away then 1/2 our running total 5139 * so factor it in. 5140 */ 5141 addpart = bytes_ps * utim; 5142 addpart /= (srtt * 8); 5143 meth = 1; 5144 } else { 5145 /* 5146 * Don't allow a single measurement 5147 * to account for more than 1/2 of the 5148 * WMA. This could happen on a retransmission 5149 * where utim becomes huge compared to 5150 * srtt (multiple retransmissions when using 5151 * the sending rate which factors in all the 5152 * transmissions from the first one). 5153 */ 5154 subpart = rack->r_ctl.gp_bw / 2; 5155 addpart = bytes_ps / 2; 5156 meth = 2; 5157 } 5158 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5159 resid_bw = rack->r_ctl.gp_bw - subpart; 5160 rack->r_ctl.gp_bw = resid_bw + addpart; 5161 did_add = 1; 5162 } else { 5163 if ((utim / srtt) <= 1) { 5164 /* 5165 * The b/w update was over a small period 5166 * of time. The idea here is to prevent a small 5167 * measurement time period from counting 5168 * too much. So we scale it based on the 5169 * time so it attributes less than 1/rack_wma_divisor 5170 * of its measurement. 5171 */ 5172 subpart = rack->r_ctl.gp_bw * utim; 5173 subpart /= (srtt * rack_wma_divisor); 5174 addpart = bytes_ps * utim; 5175 addpart /= (srtt * rack_wma_divisor); 5176 meth = 3; 5177 } else { 5178 /* 5179 * The scaled measurement was long 5180 * enough so lets just add in the 5181 * portion of the measurement i.e. 1/rack_wma_divisor 5182 */ 5183 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5184 addpart = bytes_ps / rack_wma_divisor; 5185 meth = 4; 5186 } 5187 if ((rack->measure_saw_probe_rtt == 0) || 5188 (bytes_ps > rack->r_ctl.gp_bw)) { 5189 /* 5190 * For probe-rtt we only add it in 5191 * if its larger, all others we just 5192 * add in. 5193 */ 5194 did_add = 1; 5195 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5196 resid_bw = rack->r_ctl.gp_bw - subpart; 5197 rack->r_ctl.gp_bw = resid_bw + addpart; 5198 } 5199 } 5200 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5201 } 5202 /* 5203 * We only watch the growth of the GP during the initial startup 5204 * or first-slowstart that ensues. If we ever needed to watch 5205 * growth of gp outside of that period all we need to do is 5206 * remove the first clause of this if (rc_initial_ss_comp). 5207 */ 5208 if ((rack->rc_initial_ss_comp == 0) && 5209 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5210 uint64_t gp_est; 5211 5212 gp_est = bytes_ps; 5213 if (tcp_bblogging_on(rack->rc_tp)) { 5214 union tcp_log_stackspecific log; 5215 struct timeval tv; 5216 5217 memset(&log, 0, sizeof(log)); 5218 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5219 log.u_bbr.flex1 = rack->r_ctl.current_round; 5220 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5221 log.u_bbr.delRate = gp_est; 5222 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5223 log.u_bbr.flex8 = 41; 5224 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5225 0, &log, false, NULL, __func__, __LINE__,&tv); 5226 } 5227 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5228 (rack->r_ctl.last_gpest == 0)) { 5229 /* 5230 * The round we get our measurement averaging going 5231 * is the base round so it always is the source point 5232 * for when we had our first increment. From there on 5233 * we only record the round that had a rise. 5234 */ 5235 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5236 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5237 } else if (gp_est >= rack->r_ctl.last_gpest) { 5238 /* 5239 * Test to see if its gone up enough 5240 * to set the round count up to now. Note 5241 * that on the seeding of the 4th measurement we 5242 */ 5243 gp_est *= 1000; 5244 gp_est /= rack->r_ctl.last_gpest; 5245 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5246 /* 5247 * We went up enough to record the round. 5248 */ 5249 if (tcp_bblogging_on(rack->rc_tp)) { 5250 union tcp_log_stackspecific log; 5251 struct timeval tv; 5252 5253 memset(&log, 0, sizeof(log)); 5254 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5255 log.u_bbr.flex1 = rack->r_ctl.current_round; 5256 log.u_bbr.flex2 = (uint32_t)gp_est; 5257 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5258 log.u_bbr.delRate = gp_est; 5259 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5260 log.u_bbr.flex8 = 42; 5261 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5262 0, &log, false, NULL, __func__, __LINE__,&tv); 5263 } 5264 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5265 if (rack->r_ctl.use_gp_not_last == 1) 5266 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5267 else 5268 rack->r_ctl.last_gpest = bytes_ps; 5269 } 5270 } 5271 } 5272 if ((rack->gp_ready == 0) && 5273 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5274 /* We have enough measurements now */ 5275 rack->gp_ready = 1; 5276 if (rack->dgp_on || 5277 rack->rack_hibeta) 5278 rack_set_cc_pacing(rack); 5279 if (rack->defer_options) 5280 rack_apply_deferred_options(rack); 5281 } 5282 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5283 rack_get_bw(rack), 22, did_add, NULL, quality); 5284 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5285 5286 if ((rack->measure_saw_probe_rtt == 0) && 5287 rack->rc_gp_rtt_set) { 5288 if (rack->rc_skip_timely == 0) { 5289 rack_update_multiplier(rack, timely_says, bytes_ps, 5290 rack->r_ctl.rc_gp_srtt, 5291 rack->r_ctl.rc_rtt_diff); 5292 } 5293 } 5294 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5295 rack_get_bw(rack), 3, line, NULL, quality); 5296 rack_log_pacing_delay_calc(rack, 5297 bytes, /* flex2 */ 5298 tim, /* flex1 */ 5299 bytes_ps, /* bw_inuse */ 5300 rack->r_ctl.gp_bw, /* delRate */ 5301 rack_get_lt_bw(rack), /* rttProp */ 5302 20, line, NULL, 0); 5303 /* reset the gp srtt and setup the new prev */ 5304 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5305 /* Record the lost count for the next measurement */ 5306 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5307 skip_measurement: 5308 /* 5309 * We restart our diffs based on the gpsrtt in the 5310 * measurement window. 5311 */ 5312 rack->rc_gp_rtt_set = 0; 5313 rack->rc_gp_saw_rec = 0; 5314 rack->rc_gp_saw_ca = 0; 5315 rack->rc_gp_saw_ss = 0; 5316 rack->rc_dragged_bottom = 0; 5317 if (quality == RACK_QUALITY_HIGH) { 5318 /* 5319 * Gput in the stats world is in kbps where bytes_ps is 5320 * bytes per second so we do ((x * 8)/ 1000). 5321 */ 5322 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5323 #ifdef STATS 5324 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5325 gput); 5326 /* 5327 * XXXLAS: This is a temporary hack, and should be 5328 * chained off VOI_TCP_GPUT when stats(9) grows an 5329 * API to deal with chained VOIs. 5330 */ 5331 if (tp->t_stats_gput_prev > 0) 5332 stats_voi_update_abs_s32(tp->t_stats, 5333 VOI_TCP_GPUT_ND, 5334 ((gput - tp->t_stats_gput_prev) * 100) / 5335 tp->t_stats_gput_prev); 5336 #endif 5337 tp->t_stats_gput_prev = gput; 5338 } 5339 tp->t_flags &= ~TF_GPUTINPROG; 5340 /* 5341 * Now are we app limited now and there is space from where we 5342 * were to where we want to go? 5343 * 5344 * We don't do the other case i.e. non-applimited here since 5345 * the next send will trigger us picking up the missing data. 5346 */ 5347 if (rack->r_ctl.rc_first_appl && 5348 TCPS_HAVEESTABLISHED(tp->t_state) && 5349 rack->r_ctl.rc_app_limited_cnt && 5350 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5351 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5352 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5353 /* 5354 * Yep there is enough outstanding to make a measurement here. 5355 */ 5356 struct rack_sendmap *rsm; 5357 5358 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5359 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5360 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5361 rack->app_limited_needs_set = 0; 5362 tp->gput_seq = th_ack; 5363 if (rack->in_probe_rtt) 5364 rack->measure_saw_probe_rtt = 1; 5365 else if ((rack->measure_saw_probe_rtt) && 5366 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5367 rack->measure_saw_probe_rtt = 0; 5368 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5369 /* There is a full window to gain info from */ 5370 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5371 } else { 5372 /* We can only measure up to the applimited point */ 5373 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5374 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5375 /* 5376 * We don't have enough to make a measurement. 5377 */ 5378 tp->t_flags &= ~TF_GPUTINPROG; 5379 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5380 0, 0, 0, 6, __LINE__, NULL, quality); 5381 return; 5382 } 5383 } 5384 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5385 /* 5386 * We will get no more data into the SB 5387 * this means we need to have the data available 5388 * before we start a measurement. 5389 */ 5390 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5391 /* Nope not enough data. */ 5392 return; 5393 } 5394 } 5395 tp->t_flags |= TF_GPUTINPROG; 5396 /* 5397 * Now we need to find the timestamp of the send at tp->gput_seq 5398 * for the send based measurement. 5399 */ 5400 rack->r_ctl.rc_gp_cumack_ts = 0; 5401 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5402 if (rsm) { 5403 /* Ok send-based limit is set */ 5404 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5405 /* 5406 * Move back to include the earlier part 5407 * so our ack time lines up right (this may 5408 * make an overlapping measurement but thats 5409 * ok). 5410 */ 5411 tp->gput_seq = rsm->r_start; 5412 } 5413 if (rsm->r_flags & RACK_ACKED) { 5414 struct rack_sendmap *nrsm; 5415 5416 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5417 tp->gput_seq = rsm->r_end; 5418 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5419 if (nrsm) 5420 rsm = nrsm; 5421 else { 5422 rack->app_limited_needs_set = 1; 5423 } 5424 } else 5425 rack->app_limited_needs_set = 1; 5426 /* We always go from the first send */ 5427 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5428 } else { 5429 /* 5430 * If we don't find the rsm due to some 5431 * send-limit set the current time, which 5432 * basically disables the send-limit. 5433 */ 5434 struct timeval tv; 5435 5436 microuptime(&tv); 5437 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5438 } 5439 rack_tend_gp_marks(tp, rack); 5440 rack_log_pacing_delay_calc(rack, 5441 tp->gput_seq, 5442 tp->gput_ack, 5443 (uintptr_t)rsm, 5444 tp->gput_ts, 5445 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5446 9, 5447 __LINE__, rsm, quality); 5448 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5449 } else { 5450 /* 5451 * To make sure proper timestamp merging occurs, we need to clear 5452 * all GP marks if we don't start a measurement. 5453 */ 5454 rack_clear_gp_marks(tp, rack); 5455 } 5456 } 5457 5458 /* 5459 * CC wrapper hook functions 5460 */ 5461 static void 5462 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5463 uint16_t type, int32_t post_recovery) 5464 { 5465 uint32_t prior_cwnd, acked; 5466 struct tcp_log_buffer *lgb = NULL; 5467 uint8_t labc_to_use, quality; 5468 5469 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5470 tp->t_ccv.nsegs = nsegs; 5471 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5472 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5473 uint32_t max; 5474 5475 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5476 if (tp->t_ccv.bytes_this_ack > max) { 5477 tp->t_ccv.bytes_this_ack = max; 5478 } 5479 } 5480 #ifdef STATS 5481 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5482 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5483 #endif 5484 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5485 /* 5486 * We will ack all the data, time to end any 5487 * lt_bw_up we have running until something 5488 * new is sent. Note we need to use the actual 5489 * ack_rcv_time which with pacing may be different. 5490 */ 5491 uint64_t tmark; 5492 5493 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5494 rack->r_ctl.lt_seq = tp->snd_max; 5495 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 5496 if (tmark >= rack->r_ctl.lt_timemark) { 5497 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5498 } 5499 rack->r_ctl.lt_timemark = tmark; 5500 rack->lt_bw_up = 0; 5501 } 5502 quality = RACK_QUALITY_NONE; 5503 if ((tp->t_flags & TF_GPUTINPROG) && 5504 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5505 /* Measure the Goodput */ 5506 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5507 } 5508 /* Which way our we limited, if not cwnd limited no advance in CA */ 5509 if (tp->snd_cwnd <= tp->snd_wnd) 5510 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5511 else 5512 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5513 if (tp->snd_cwnd > tp->snd_ssthresh) { 5514 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5515 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5516 /* For the setting of a window past use the actual scwnd we are using */ 5517 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5518 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5519 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5520 } 5521 } else { 5522 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5523 tp->t_bytes_acked = 0; 5524 } 5525 prior_cwnd = tp->snd_cwnd; 5526 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5527 (rack_client_low_buf && rack->client_bufferlvl && 5528 (rack->client_bufferlvl < rack_client_low_buf))) 5529 labc_to_use = rack->rc_labc; 5530 else 5531 labc_to_use = rack_max_abc_post_recovery; 5532 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5533 union tcp_log_stackspecific log; 5534 struct timeval tv; 5535 5536 memset(&log, 0, sizeof(log)); 5537 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5538 log.u_bbr.flex1 = th_ack; 5539 log.u_bbr.flex2 = tp->t_ccv.flags; 5540 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5541 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5542 log.u_bbr.flex5 = labc_to_use; 5543 log.u_bbr.flex6 = prior_cwnd; 5544 log.u_bbr.flex7 = V_tcp_do_newsack; 5545 log.u_bbr.flex8 = 1; 5546 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5547 0, &log, false, NULL, __func__, __LINE__,&tv); 5548 } 5549 if (CC_ALGO(tp)->ack_received != NULL) { 5550 /* XXXLAS: Find a way to live without this */ 5551 tp->t_ccv.curack = th_ack; 5552 tp->t_ccv.labc = labc_to_use; 5553 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5554 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5555 } 5556 if (lgb) { 5557 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5558 } 5559 if (rack->r_must_retran) { 5560 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5561 /* 5562 * We now are beyond the rxt point so lets disable 5563 * the flag. 5564 */ 5565 rack->r_ctl.rc_out_at_rto = 0; 5566 rack->r_must_retran = 0; 5567 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5568 /* 5569 * Only decrement the rc_out_at_rto if the cwnd advances 5570 * at least a whole segment. Otherwise next time the peer 5571 * acks, we won't be able to send this generaly happens 5572 * when we are in Congestion Avoidance. 5573 */ 5574 if (acked <= rack->r_ctl.rc_out_at_rto){ 5575 rack->r_ctl.rc_out_at_rto -= acked; 5576 } else { 5577 rack->r_ctl.rc_out_at_rto = 0; 5578 } 5579 } 5580 } 5581 #ifdef STATS 5582 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5583 #endif 5584 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5585 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5586 } 5587 if ((rack->rc_initial_ss_comp == 0) && 5588 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5589 /* 5590 * The cwnd has grown beyond ssthresh we have 5591 * entered ca and completed our first Slowstart. 5592 */ 5593 rack->rc_initial_ss_comp = 1; 5594 } 5595 } 5596 5597 static void 5598 tcp_rack_partialack(struct tcpcb *tp) 5599 { 5600 struct tcp_rack *rack; 5601 5602 rack = (struct tcp_rack *)tp->t_fb_ptr; 5603 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5604 /* 5605 * If we are doing PRR and have enough 5606 * room to send <or> we are pacing and prr 5607 * is disabled we will want to see if we 5608 * can send data (by setting r_wanted_output to 5609 * true). 5610 */ 5611 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5612 rack->rack_no_prr) 5613 rack->r_wanted_output = 1; 5614 } 5615 5616 static void 5617 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 5618 { 5619 /* 5620 * Now exit recovery. 5621 */ 5622 EXIT_RECOVERY(tp->t_flags); 5623 } 5624 5625 static void 5626 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 5627 { 5628 struct tcp_rack *rack; 5629 uint32_t orig_cwnd; 5630 5631 orig_cwnd = tp->snd_cwnd; 5632 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5633 rack = (struct tcp_rack *)tp->t_fb_ptr; 5634 /* only alert CC if we alerted when we entered */ 5635 if (CC_ALGO(tp)->post_recovery != NULL) { 5636 tp->t_ccv.curack = th_ack; 5637 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 5638 if (tp->snd_cwnd < tp->snd_ssthresh) { 5639 /* 5640 * Rack has burst control and pacing 5641 * so lets not set this any lower than 5642 * snd_ssthresh per RFC-6582 (option 2). 5643 */ 5644 tp->snd_cwnd = tp->snd_ssthresh; 5645 } 5646 } 5647 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5648 union tcp_log_stackspecific log; 5649 struct timeval tv; 5650 5651 memset(&log, 0, sizeof(log)); 5652 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5653 log.u_bbr.flex1 = th_ack; 5654 log.u_bbr.flex2 = tp->t_ccv.flags; 5655 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5656 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5657 log.u_bbr.flex5 = V_tcp_abc_l_var; 5658 log.u_bbr.flex6 = orig_cwnd; 5659 log.u_bbr.flex7 = V_tcp_do_newsack; 5660 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 5661 log.u_bbr.flex8 = 2; 5662 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5663 0, &log, false, NULL, __func__, __LINE__, &tv); 5664 } 5665 if ((rack->rack_no_prr == 0) && 5666 (rack->no_prr_addback == 0) && 5667 (rack->r_ctl.rc_prr_sndcnt > 0)) { 5668 /* 5669 * Suck the next prr cnt back into cwnd, but 5670 * only do that if we are not application limited. 5671 */ 5672 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 5673 /* 5674 * We are allowed to add back to the cwnd the amount we did 5675 * not get out if: 5676 * a) no_prr_addback is off. 5677 * b) we are not app limited 5678 * c) we are doing prr 5679 * <and> 5680 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 5681 */ 5682 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 5683 rack->r_ctl.rc_prr_sndcnt); 5684 } 5685 rack->r_ctl.rc_prr_sndcnt = 0; 5686 rack_log_to_prr(rack, 1, 0, __LINE__); 5687 } 5688 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 5689 tp->snd_recover = tp->snd_una; 5690 if (rack->r_ctl.dsack_persist) { 5691 rack->r_ctl.dsack_persist--; 5692 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 5693 rack->r_ctl.num_dsack = 0; 5694 } 5695 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 5696 } 5697 if (rack->rto_from_rec == 1) { 5698 rack->rto_from_rec = 0; 5699 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 5700 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 5701 } 5702 rack_exit_recovery(tp, rack, 1); 5703 } 5704 5705 static void 5706 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 5707 { 5708 struct tcp_rack *rack; 5709 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 5710 5711 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5712 #ifdef STATS 5713 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 5714 #endif 5715 if (IN_RECOVERY(tp->t_flags) == 0) { 5716 in_rec_at_entry = 0; 5717 ssthresh_enter = tp->snd_ssthresh; 5718 cwnd_enter = tp->snd_cwnd; 5719 } else 5720 in_rec_at_entry = 1; 5721 rack = (struct tcp_rack *)tp->t_fb_ptr; 5722 switch (type) { 5723 case CC_NDUPACK: 5724 tp->t_flags &= ~TF_WASFRECOVERY; 5725 tp->t_flags &= ~TF_WASCRECOVERY; 5726 if (!IN_FASTRECOVERY(tp->t_flags)) { 5727 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 5728 if (rack->rc_initial_ss_comp == 0) { 5729 /* Yep it is the end of the initial slowstart */ 5730 rack->rc_initial_ss_comp = 1; 5731 } 5732 rack->r_ctl.rc_prr_delivered = 0; 5733 rack->r_ctl.rc_prr_out = 0; 5734 rack->r_fast_output = 0; 5735 if (rack->rack_no_prr == 0) { 5736 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5737 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 5738 } 5739 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 5740 tp->snd_recover = tp->snd_max; 5741 if (tp->t_flags2 & TF2_ECN_PERMIT) 5742 tp->t_flags2 |= TF2_ECN_SND_CWR; 5743 } 5744 break; 5745 case CC_ECN: 5746 if (!IN_CONGRECOVERY(tp->t_flags) || 5747 /* 5748 * Allow ECN reaction on ACK to CWR, if 5749 * that data segment was also CE marked. 5750 */ 5751 SEQ_GEQ(ack, tp->snd_recover)) { 5752 EXIT_CONGRECOVERY(tp->t_flags); 5753 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5754 rack->r_fast_output = 0; 5755 tp->snd_recover = tp->snd_max + 1; 5756 if (tp->t_flags2 & TF2_ECN_PERMIT) 5757 tp->t_flags2 |= TF2_ECN_SND_CWR; 5758 } 5759 break; 5760 case CC_RTO: 5761 tp->t_dupacks = 0; 5762 tp->t_bytes_acked = 0; 5763 rack->r_fast_output = 0; 5764 if (IN_RECOVERY(tp->t_flags)) 5765 rack_exit_recovery(tp, rack, 2); 5766 orig_cwnd = tp->snd_cwnd; 5767 rack_log_to_prr(rack, 16, orig_cwnd, line); 5768 if (CC_ALGO(tp)->cong_signal == NULL) { 5769 /* TSNH */ 5770 tp->snd_ssthresh = max(2, 5771 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5772 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5773 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5774 } 5775 if (tp->t_flags2 & TF2_ECN_PERMIT) 5776 tp->t_flags2 |= TF2_ECN_SND_CWR; 5777 break; 5778 case CC_RTO_ERR: 5779 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5780 /* RTO was unnecessary, so reset everything. */ 5781 tp->snd_cwnd = tp->snd_cwnd_prev; 5782 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5783 tp->snd_recover = tp->snd_recover_prev; 5784 if (tp->t_flags & TF_WASFRECOVERY) { 5785 ENTER_FASTRECOVERY(tp->t_flags); 5786 tp->t_flags &= ~TF_WASFRECOVERY; 5787 } 5788 if (tp->t_flags & TF_WASCRECOVERY) { 5789 ENTER_CONGRECOVERY(tp->t_flags); 5790 tp->t_flags &= ~TF_WASCRECOVERY; 5791 } 5792 tp->snd_nxt = tp->snd_max; 5793 tp->t_badrxtwin = 0; 5794 break; 5795 } 5796 if ((CC_ALGO(tp)->cong_signal != NULL) && 5797 (type != CC_RTO)){ 5798 tp->t_ccv.curack = ack; 5799 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 5800 } 5801 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5802 rack_log_to_prr(rack, 15, cwnd_enter, line); 5803 rack->r_ctl.dsack_byte_cnt = 0; 5804 rack->r_ctl.retran_during_recovery = 0; 5805 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5806 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5807 rack->r_ent_rec_ns = 1; 5808 } 5809 } 5810 5811 static inline void 5812 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5813 { 5814 uint32_t i_cwnd; 5815 5816 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5817 5818 if (CC_ALGO(tp)->after_idle != NULL) 5819 CC_ALGO(tp)->after_idle(&tp->t_ccv); 5820 5821 if (tp->snd_cwnd == 1) 5822 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5823 else 5824 i_cwnd = rc_init_window(rack); 5825 5826 /* 5827 * Being idle is no different than the initial window. If the cc 5828 * clamps it down below the initial window raise it to the initial 5829 * window. 5830 */ 5831 if (tp->snd_cwnd < i_cwnd) { 5832 tp->snd_cwnd = i_cwnd; 5833 } 5834 } 5835 5836 /* 5837 * Indicate whether this ack should be delayed. We can delay the ack if 5838 * following conditions are met: 5839 * - There is no delayed ack timer in progress. 5840 * - Our last ack wasn't a 0-sized window. We never want to delay 5841 * the ack that opens up a 0-sized window. 5842 * - LRO wasn't used for this segment. We make sure by checking that the 5843 * segment size is not larger than the MSS. 5844 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5845 * connection. 5846 */ 5847 #define DELAY_ACK(tp, tlen) \ 5848 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5849 ((tp->t_flags & TF_DELACK) == 0) && \ 5850 (tlen <= tp->t_maxseg) && \ 5851 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5852 5853 static struct rack_sendmap * 5854 rack_find_lowest_rsm(struct tcp_rack *rack) 5855 { 5856 struct rack_sendmap *rsm; 5857 5858 /* 5859 * Walk the time-order transmitted list looking for an rsm that is 5860 * not acked. This will be the one that was sent the longest time 5861 * ago that is still outstanding. 5862 */ 5863 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5864 if (rsm->r_flags & RACK_ACKED) { 5865 continue; 5866 } 5867 goto finish; 5868 } 5869 finish: 5870 return (rsm); 5871 } 5872 5873 static struct rack_sendmap * 5874 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5875 { 5876 struct rack_sendmap *prsm; 5877 5878 /* 5879 * Walk the sequence order list backward until we hit and arrive at 5880 * the highest seq not acked. In theory when this is called it 5881 * should be the last segment (which it was not). 5882 */ 5883 prsm = rsm; 5884 5885 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 5886 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5887 continue; 5888 } 5889 return (prsm); 5890 } 5891 return (NULL); 5892 } 5893 5894 static uint32_t 5895 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 5896 { 5897 int32_t lro; 5898 uint32_t thresh; 5899 5900 /* 5901 * lro is the flag we use to determine if we have seen reordering. 5902 * If it gets set we have seen reordering. The reorder logic either 5903 * works in one of two ways: 5904 * 5905 * If reorder-fade is configured, then we track the last time we saw 5906 * re-ordering occur. If we reach the point where enough time as 5907 * passed we no longer consider reordering has occuring. 5908 * 5909 * Or if reorder-face is 0, then once we see reordering we consider 5910 * the connection to alway be subject to reordering and just set lro 5911 * to 1. 5912 * 5913 * In the end if lro is non-zero we add the extra time for 5914 * reordering in. 5915 */ 5916 if (srtt == 0) 5917 srtt = 1; 5918 if (rack->r_ctl.rc_reorder_ts) { 5919 if (rack->r_ctl.rc_reorder_fade) { 5920 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5921 lro = cts - rack->r_ctl.rc_reorder_ts; 5922 if (lro == 0) { 5923 /* 5924 * No time as passed since the last 5925 * reorder, mark it as reordering. 5926 */ 5927 lro = 1; 5928 } 5929 } else { 5930 /* Negative time? */ 5931 lro = 0; 5932 } 5933 if (lro > rack->r_ctl.rc_reorder_fade) { 5934 /* Turn off reordering seen too */ 5935 rack->r_ctl.rc_reorder_ts = 0; 5936 lro = 0; 5937 } 5938 } else { 5939 /* Reodering does not fade */ 5940 lro = 1; 5941 } 5942 } else { 5943 lro = 0; 5944 } 5945 if (rack->rc_rack_tmr_std_based == 0) { 5946 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5947 } else { 5948 /* Standards based pkt-delay is 1/4 srtt */ 5949 thresh = srtt + (srtt >> 2); 5950 } 5951 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5952 /* It must be set, if not you get 1/4 rtt */ 5953 if (rack->r_ctl.rc_reorder_shift) 5954 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5955 else 5956 thresh += (srtt >> 2); 5957 } 5958 if (rack->rc_rack_use_dsack && 5959 lro && 5960 (rack->r_ctl.num_dsack > 0)) { 5961 /* 5962 * We only increase the reordering window if we 5963 * have seen reordering <and> we have a DSACK count. 5964 */ 5965 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5966 if (log_allowed) 5967 rack_log_dsack_event(rack, 4, line, srtt, thresh); 5968 } 5969 /* SRTT * 2 is the ceiling */ 5970 if (thresh > (srtt * 2)) { 5971 thresh = srtt * 2; 5972 } 5973 /* And we don't want it above the RTO max either */ 5974 if (thresh > rack_rto_max) { 5975 thresh = rack_rto_max; 5976 } 5977 if (log_allowed) 5978 rack_log_dsack_event(rack, 6, line, srtt, thresh); 5979 return (thresh); 5980 } 5981 5982 static uint32_t 5983 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5984 struct rack_sendmap *rsm, uint32_t srtt) 5985 { 5986 struct rack_sendmap *prsm; 5987 uint32_t thresh, len; 5988 int segsiz; 5989 5990 if (srtt == 0) 5991 srtt = 1; 5992 if (rack->r_ctl.rc_tlp_threshold) 5993 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5994 else 5995 thresh = (srtt * 2); 5996 5997 /* Get the previous sent packet, if any */ 5998 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5999 len = rsm->r_end - rsm->r_start; 6000 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6001 /* Exactly like the ID */ 6002 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6003 uint32_t alt_thresh; 6004 /* 6005 * Compensate for delayed-ack with the d-ack time. 6006 */ 6007 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6008 if (alt_thresh > thresh) 6009 thresh = alt_thresh; 6010 } 6011 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6012 /* 2.1 behavior */ 6013 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6014 if (prsm && (len <= segsiz)) { 6015 /* 6016 * Two packets outstanding, thresh should be (2*srtt) + 6017 * possible inter-packet delay (if any). 6018 */ 6019 uint32_t inter_gap = 0; 6020 int idx, nidx; 6021 6022 idx = rsm->r_rtr_cnt - 1; 6023 nidx = prsm->r_rtr_cnt - 1; 6024 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6025 /* Yes it was sent later (or at the same time) */ 6026 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6027 } 6028 thresh += inter_gap; 6029 } else if (len <= segsiz) { 6030 /* 6031 * Possibly compensate for delayed-ack. 6032 */ 6033 uint32_t alt_thresh; 6034 6035 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6036 if (alt_thresh > thresh) 6037 thresh = alt_thresh; 6038 } 6039 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6040 /* 2.2 behavior */ 6041 if (len <= segsiz) { 6042 uint32_t alt_thresh; 6043 /* 6044 * Compensate for delayed-ack with the d-ack time. 6045 */ 6046 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6047 if (alt_thresh > thresh) 6048 thresh = alt_thresh; 6049 } 6050 } 6051 /* Not above an RTO */ 6052 if (thresh > tp->t_rxtcur) { 6053 thresh = tp->t_rxtcur; 6054 } 6055 /* Not above a RTO max */ 6056 if (thresh > rack_rto_max) { 6057 thresh = rack_rto_max; 6058 } 6059 /* Apply user supplied min TLP */ 6060 if (thresh < rack_tlp_min) { 6061 thresh = rack_tlp_min; 6062 } 6063 return (thresh); 6064 } 6065 6066 static uint32_t 6067 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6068 { 6069 /* 6070 * We want the rack_rtt which is the 6071 * last rtt we measured. However if that 6072 * does not exist we fallback to the srtt (which 6073 * we probably will never do) and then as a last 6074 * resort we use RACK_INITIAL_RTO if no srtt is 6075 * yet set. 6076 */ 6077 if (rack->rc_rack_rtt) 6078 return (rack->rc_rack_rtt); 6079 else if (tp->t_srtt == 0) 6080 return (RACK_INITIAL_RTO); 6081 return (tp->t_srtt); 6082 } 6083 6084 static struct rack_sendmap * 6085 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6086 { 6087 /* 6088 * Check to see that we don't need to fall into recovery. We will 6089 * need to do so if our oldest transmit is past the time we should 6090 * have had an ack. 6091 */ 6092 struct tcp_rack *rack; 6093 struct rack_sendmap *rsm; 6094 int32_t idx; 6095 uint32_t srtt, thresh; 6096 6097 rack = (struct tcp_rack *)tp->t_fb_ptr; 6098 if (tqhash_empty(rack->r_ctl.tqh)) { 6099 return (NULL); 6100 } 6101 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6102 if (rsm == NULL) 6103 return (NULL); 6104 6105 6106 if (rsm->r_flags & RACK_ACKED) { 6107 rsm = rack_find_lowest_rsm(rack); 6108 if (rsm == NULL) 6109 return (NULL); 6110 } 6111 idx = rsm->r_rtr_cnt - 1; 6112 srtt = rack_grab_rtt(tp, rack); 6113 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6114 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6115 return (NULL); 6116 } 6117 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6118 return (NULL); 6119 } 6120 /* Ok if we reach here we are over-due and this guy can be sent */ 6121 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6122 return (rsm); 6123 } 6124 6125 static uint32_t 6126 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6127 { 6128 int32_t t; 6129 int32_t tt; 6130 uint32_t ret_val; 6131 6132 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6133 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6134 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6135 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6136 ret_val = (uint32_t)tt; 6137 return (ret_val); 6138 } 6139 6140 static uint32_t 6141 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6142 { 6143 /* 6144 * Start the FR timer, we do this based on getting the first one in 6145 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6146 * events we need to stop the running timer (if its running) before 6147 * starting the new one. 6148 */ 6149 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6150 uint32_t srtt_cur; 6151 int32_t idx; 6152 int32_t is_tlp_timer = 0; 6153 struct rack_sendmap *rsm; 6154 6155 if (rack->t_timers_stopped) { 6156 /* All timers have been stopped none are to run */ 6157 return (0); 6158 } 6159 if (rack->rc_in_persist) { 6160 /* We can't start any timer in persists */ 6161 return (rack_get_persists_timer_val(tp, rack)); 6162 } 6163 rack->rc_on_min_to = 0; 6164 if ((tp->t_state < TCPS_ESTABLISHED) || 6165 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6166 goto activate_rxt; 6167 } 6168 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6169 if ((rsm == NULL) || sup_rack) { 6170 /* Nothing on the send map or no rack */ 6171 activate_rxt: 6172 time_since_sent = 0; 6173 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6174 if (rsm) { 6175 /* 6176 * Should we discount the RTX timer any? 6177 * 6178 * We want to discount it the smallest amount. 6179 * If a timer (Rack/TLP or RXT) has gone off more 6180 * recently thats the discount we want to use (now - timer time). 6181 * If the retransmit of the oldest packet was more recent then 6182 * we want to use that (now - oldest-packet-last_transmit_time). 6183 * 6184 */ 6185 idx = rsm->r_rtr_cnt - 1; 6186 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6187 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6188 else 6189 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6190 if (TSTMP_GT(cts, tstmp_touse)) 6191 time_since_sent = cts - tstmp_touse; 6192 } 6193 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6194 sbavail(&tptosocket(tp)->so_snd)) { 6195 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6196 to = tp->t_rxtcur; 6197 if (to > time_since_sent) 6198 to -= time_since_sent; 6199 else 6200 to = rack->r_ctl.rc_min_to; 6201 if (to == 0) 6202 to = 1; 6203 /* Special case for KEEPINIT */ 6204 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6205 (TP_KEEPINIT(tp) != 0) && 6206 rsm) { 6207 /* 6208 * We have to put a ceiling on the rxt timer 6209 * of the keep-init timeout. 6210 */ 6211 uint32_t max_time, red; 6212 6213 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6214 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6215 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6216 if (red < max_time) 6217 max_time -= red; 6218 else 6219 max_time = 1; 6220 } 6221 /* Reduce timeout to the keep value if needed */ 6222 if (max_time < to) 6223 to = max_time; 6224 } 6225 return (to); 6226 } 6227 return (0); 6228 } 6229 if (rsm->r_flags & RACK_ACKED) { 6230 rsm = rack_find_lowest_rsm(rack); 6231 if (rsm == NULL) { 6232 /* No lowest? */ 6233 goto activate_rxt; 6234 } 6235 } 6236 /* Convert from ms to usecs */ 6237 if ((rsm->r_flags & RACK_SACK_PASSED) || 6238 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6239 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6240 if ((tp->t_flags & TF_SENTFIN) && 6241 ((tp->snd_max - tp->snd_una) == 1) && 6242 (rsm->r_flags & RACK_HAS_FIN)) { 6243 /* 6244 * We don't start a rack timer if all we have is a 6245 * FIN outstanding. 6246 */ 6247 goto activate_rxt; 6248 } 6249 if ((rack->use_rack_rr == 0) && 6250 (IN_FASTRECOVERY(tp->t_flags)) && 6251 (rack->rack_no_prr == 0) && 6252 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6253 /* 6254 * We are not cheating, in recovery and 6255 * not enough ack's to yet get our next 6256 * retransmission out. 6257 * 6258 * Note that classified attackers do not 6259 * get to use the rack-cheat. 6260 */ 6261 goto activate_tlp; 6262 } 6263 srtt = rack_grab_rtt(tp, rack); 6264 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6265 idx = rsm->r_rtr_cnt - 1; 6266 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6267 if (SEQ_GEQ(exp, cts)) { 6268 to = exp - cts; 6269 if (to < rack->r_ctl.rc_min_to) { 6270 to = rack->r_ctl.rc_min_to; 6271 if (rack->r_rr_config == 3) 6272 rack->rc_on_min_to = 1; 6273 } 6274 } else { 6275 to = rack->r_ctl.rc_min_to; 6276 if (rack->r_rr_config == 3) 6277 rack->rc_on_min_to = 1; 6278 } 6279 } else { 6280 /* Ok we need to do a TLP not RACK */ 6281 activate_tlp: 6282 if ((rack->rc_tlp_in_progress != 0) && 6283 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6284 /* 6285 * The previous send was a TLP and we have sent 6286 * N TLP's without sending new data. 6287 */ 6288 goto activate_rxt; 6289 } 6290 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6291 if (rsm == NULL) { 6292 /* We found no rsm to TLP with. */ 6293 goto activate_rxt; 6294 } 6295 if (rsm->r_flags & RACK_HAS_FIN) { 6296 /* If its a FIN we dont do TLP */ 6297 rsm = NULL; 6298 goto activate_rxt; 6299 } 6300 idx = rsm->r_rtr_cnt - 1; 6301 time_since_sent = 0; 6302 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6303 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6304 else 6305 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6306 if (TSTMP_GT(cts, tstmp_touse)) 6307 time_since_sent = cts - tstmp_touse; 6308 is_tlp_timer = 1; 6309 if (tp->t_srtt) { 6310 if ((rack->rc_srtt_measure_made == 0) && 6311 (tp->t_srtt == 1)) { 6312 /* 6313 * If another stack as run and set srtt to 1, 6314 * then the srtt was 0, so lets use the initial. 6315 */ 6316 srtt = RACK_INITIAL_RTO; 6317 } else { 6318 srtt_cur = tp->t_srtt; 6319 srtt = srtt_cur; 6320 } 6321 } else 6322 srtt = RACK_INITIAL_RTO; 6323 /* 6324 * If the SRTT is not keeping up and the 6325 * rack RTT has spiked we want to use 6326 * the last RTT not the smoothed one. 6327 */ 6328 if (rack_tlp_use_greater && 6329 tp->t_srtt && 6330 (srtt < rack_grab_rtt(tp, rack))) { 6331 srtt = rack_grab_rtt(tp, rack); 6332 } 6333 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6334 if (thresh > time_since_sent) { 6335 to = thresh - time_since_sent; 6336 } else { 6337 to = rack->r_ctl.rc_min_to; 6338 rack_log_alt_to_to_cancel(rack, 6339 thresh, /* flex1 */ 6340 time_since_sent, /* flex2 */ 6341 tstmp_touse, /* flex3 */ 6342 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6343 (uint32_t)rsm->r_tim_lastsent[idx], 6344 srtt, 6345 idx, 99); 6346 } 6347 if (to < rack_tlp_min) { 6348 to = rack_tlp_min; 6349 } 6350 if (to > TICKS_2_USEC(tcp_rexmit_max)) { 6351 /* 6352 * If the TLP time works out to larger than the max 6353 * RTO lets not do TLP.. just RTO. 6354 */ 6355 goto activate_rxt; 6356 } 6357 } 6358 if (is_tlp_timer == 0) { 6359 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6360 } else { 6361 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6362 } 6363 if (to == 0) 6364 to = 1; 6365 return (to); 6366 } 6367 6368 static void 6369 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6370 { 6371 if (rack->rc_in_persist == 0) { 6372 if (tp->t_flags & TF_GPUTINPROG) { 6373 /* 6374 * Stop the goodput now, the calling of the 6375 * measurement function clears the flag. 6376 */ 6377 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6378 RACK_QUALITY_PERSIST); 6379 } 6380 #ifdef NETFLIX_SHARED_CWND 6381 if (rack->r_ctl.rc_scw) { 6382 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6383 rack->rack_scwnd_is_idle = 1; 6384 } 6385 #endif 6386 rack->r_ctl.rc_went_idle_time = cts; 6387 if (rack->r_ctl.rc_went_idle_time == 0) 6388 rack->r_ctl.rc_went_idle_time = 1; 6389 if (rack->lt_bw_up) { 6390 /* Suspend our LT BW measurement */ 6391 uint64_t tmark; 6392 6393 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6394 rack->r_ctl.lt_seq = snd_una; 6395 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 6396 if (tmark >= rack->r_ctl.lt_timemark) { 6397 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6398 } 6399 rack->r_ctl.lt_timemark = tmark; 6400 rack->lt_bw_up = 0; 6401 rack->r_persist_lt_bw_off = 1; 6402 } 6403 rack_timer_cancel(tp, rack, cts, __LINE__); 6404 rack->r_ctl.persist_lost_ends = 0; 6405 rack->probe_not_answered = 0; 6406 rack->forced_ack = 0; 6407 tp->t_rxtshift = 0; 6408 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6409 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6410 rack->rc_in_persist = 1; 6411 } 6412 } 6413 6414 static void 6415 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6416 { 6417 if (tcp_in_hpts(rack->rc_tp)) { 6418 tcp_hpts_remove(rack->rc_tp); 6419 rack->r_ctl.rc_hpts_flags = 0; 6420 } 6421 #ifdef NETFLIX_SHARED_CWND 6422 if (rack->r_ctl.rc_scw) { 6423 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6424 rack->rack_scwnd_is_idle = 0; 6425 } 6426 #endif 6427 if (rack->rc_gp_dyn_mul && 6428 (rack->use_fixed_rate == 0) && 6429 (rack->rc_always_pace)) { 6430 /* 6431 * Do we count this as if a probe-rtt just 6432 * finished? 6433 */ 6434 uint32_t time_idle, idle_min; 6435 6436 time_idle = cts - rack->r_ctl.rc_went_idle_time; 6437 idle_min = rack_min_probertt_hold; 6438 if (rack_probertt_gpsrtt_cnt_div) { 6439 uint64_t extra; 6440 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 6441 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 6442 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 6443 idle_min += (uint32_t)extra; 6444 } 6445 if (time_idle >= idle_min) { 6446 /* Yes, we count it as a probe-rtt. */ 6447 uint32_t us_cts; 6448 6449 us_cts = tcp_get_usecs(NULL); 6450 if (rack->in_probe_rtt == 0) { 6451 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 6452 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 6453 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 6454 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 6455 } else { 6456 rack_exit_probertt(rack, us_cts); 6457 } 6458 } 6459 } 6460 if (rack->r_persist_lt_bw_off) { 6461 /* Continue where we left off */ 6462 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 6463 rack->lt_bw_up = 1; 6464 rack->r_persist_lt_bw_off = 0; 6465 } 6466 rack->rc_in_persist = 0; 6467 rack->r_ctl.rc_went_idle_time = 0; 6468 tp->t_rxtshift = 0; 6469 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6470 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6471 rack->r_ctl.rc_agg_delayed = 0; 6472 rack->r_early = 0; 6473 rack->r_late = 0; 6474 rack->r_ctl.rc_agg_early = 0; 6475 } 6476 6477 static void 6478 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 6479 struct hpts_diag *diag, struct timeval *tv) 6480 { 6481 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6482 union tcp_log_stackspecific log; 6483 6484 memset(&log, 0, sizeof(log)); 6485 log.u_bbr.flex1 = diag->p_nxt_slot; 6486 log.u_bbr.flex2 = diag->p_cur_slot; 6487 log.u_bbr.flex3 = diag->slot_req; 6488 log.u_bbr.flex4 = diag->inp_hptsslot; 6489 log.u_bbr.flex5 = diag->slot_remaining; 6490 log.u_bbr.flex6 = diag->need_new_to; 6491 log.u_bbr.flex7 = diag->p_hpts_active; 6492 log.u_bbr.flex8 = diag->p_on_min_sleep; 6493 /* Hijack other fields as needed */ 6494 log.u_bbr.epoch = diag->have_slept; 6495 log.u_bbr.lt_epoch = diag->yet_to_sleep; 6496 log.u_bbr.pkts_out = diag->co_ret; 6497 log.u_bbr.applimited = diag->hpts_sleep_time; 6498 log.u_bbr.delivered = diag->p_prev_slot; 6499 log.u_bbr.inflight = diag->p_runningslot; 6500 log.u_bbr.bw_inuse = diag->wheel_slot; 6501 log.u_bbr.rttProp = diag->wheel_cts; 6502 log.u_bbr.timeStamp = cts; 6503 log.u_bbr.delRate = diag->maxslots; 6504 log.u_bbr.cur_del_rate = diag->p_curtick; 6505 log.u_bbr.cur_del_rate <<= 32; 6506 log.u_bbr.cur_del_rate |= diag->p_lasttick; 6507 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6508 &rack->rc_inp->inp_socket->so_rcv, 6509 &rack->rc_inp->inp_socket->so_snd, 6510 BBR_LOG_HPTSDIAG, 0, 6511 0, &log, false, tv); 6512 } 6513 6514 } 6515 6516 static void 6517 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 6518 { 6519 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6520 union tcp_log_stackspecific log; 6521 struct timeval tv; 6522 6523 memset(&log, 0, sizeof(log)); 6524 log.u_bbr.flex1 = sb->sb_flags; 6525 log.u_bbr.flex2 = len; 6526 log.u_bbr.flex3 = sb->sb_state; 6527 log.u_bbr.flex8 = type; 6528 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6529 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6530 &rack->rc_inp->inp_socket->so_rcv, 6531 &rack->rc_inp->inp_socket->so_snd, 6532 TCP_LOG_SB_WAKE, 0, 6533 len, &log, false, &tv); 6534 } 6535 } 6536 6537 static void 6538 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 6539 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 6540 { 6541 struct hpts_diag diag; 6542 struct inpcb *inp = tptoinpcb(tp); 6543 struct timeval tv; 6544 uint32_t delayed_ack = 0; 6545 uint32_t hpts_timeout; 6546 uint32_t entry_slot = slot; 6547 uint8_t stopped; 6548 uint32_t left = 0; 6549 uint32_t us_cts; 6550 6551 if ((tp->t_state == TCPS_CLOSED) || 6552 (tp->t_state == TCPS_LISTEN)) { 6553 return; 6554 } 6555 if (tcp_in_hpts(tp)) { 6556 /* Already on the pacer */ 6557 return; 6558 } 6559 stopped = rack->rc_tmr_stopped; 6560 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 6561 left = rack->r_ctl.rc_timer_exp - cts; 6562 } 6563 rack->r_ctl.rc_timer_exp = 0; 6564 rack->r_ctl.rc_hpts_flags = 0; 6565 us_cts = tcp_get_usecs(&tv); 6566 /* Now early/late accounting */ 6567 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 6568 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 6569 /* 6570 * We have a early carry over set, 6571 * we can always add more time so we 6572 * can always make this compensation. 6573 * 6574 * Note if ack's are allowed to wake us do not 6575 * penalize the next timer for being awoke 6576 * by an ack aka the rc_agg_early (non-paced mode). 6577 */ 6578 slot += rack->r_ctl.rc_agg_early; 6579 rack->r_early = 0; 6580 rack->r_ctl.rc_agg_early = 0; 6581 } 6582 if ((rack->r_late) && 6583 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 6584 /* 6585 * This is harder, we can 6586 * compensate some but it 6587 * really depends on what 6588 * the current pacing time is. 6589 */ 6590 if (rack->r_ctl.rc_agg_delayed >= slot) { 6591 /* 6592 * We can't compensate for it all. 6593 * And we have to have some time 6594 * on the clock. We always have a min 6595 * 10 slots (10 x 10 i.e. 100 usecs). 6596 */ 6597 if (slot <= HPTS_TICKS_PER_SLOT) { 6598 /* We gain delay */ 6599 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 6600 slot = HPTS_TICKS_PER_SLOT; 6601 } else { 6602 /* We take off some */ 6603 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 6604 slot = HPTS_TICKS_PER_SLOT; 6605 } 6606 } else { 6607 slot -= rack->r_ctl.rc_agg_delayed; 6608 rack->r_ctl.rc_agg_delayed = 0; 6609 /* Make sure we have 100 useconds at minimum */ 6610 if (slot < HPTS_TICKS_PER_SLOT) { 6611 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 6612 slot = HPTS_TICKS_PER_SLOT; 6613 } 6614 if (rack->r_ctl.rc_agg_delayed == 0) 6615 rack->r_late = 0; 6616 } 6617 } else if (rack->r_late) { 6618 /* r_use_hpts_min is on and so is DGP */ 6619 uint32_t max_red; 6620 6621 max_red = (slot * rack->r_ctl.max_reduction) / 100; 6622 if (max_red >= rack->r_ctl.rc_agg_delayed) { 6623 slot -= rack->r_ctl.rc_agg_delayed; 6624 rack->r_ctl.rc_agg_delayed = 0; 6625 } else { 6626 slot -= max_red; 6627 rack->r_ctl.rc_agg_delayed -= max_red; 6628 } 6629 } 6630 if ((rack->r_use_hpts_min == 1) && 6631 (slot > 0) && 6632 (rack->dgp_on == 1)) { 6633 /* 6634 * We are enforcing a min pacing timer 6635 * based on our hpts min timeout. 6636 */ 6637 uint32_t min; 6638 6639 min = get_hpts_min_sleep_time(); 6640 if (min > slot) { 6641 slot = min; 6642 } 6643 } 6644 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 6645 if (tp->t_flags & TF_DELACK) { 6646 delayed_ack = TICKS_2_USEC(tcp_delacktime); 6647 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 6648 } 6649 if (delayed_ack && ((hpts_timeout == 0) || 6650 (delayed_ack < hpts_timeout))) 6651 hpts_timeout = delayed_ack; 6652 else 6653 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6654 /* 6655 * If no timers are going to run and we will fall off the hptsi 6656 * wheel, we resort to a keep-alive timer if its configured. 6657 */ 6658 if ((hpts_timeout == 0) && 6659 (slot == 0)) { 6660 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6661 (tp->t_state <= TCPS_CLOSING)) { 6662 /* 6663 * Ok we have no timer (persists, rack, tlp, rxt or 6664 * del-ack), we don't have segments being paced. So 6665 * all that is left is the keepalive timer. 6666 */ 6667 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6668 /* Get the established keep-alive time */ 6669 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 6670 } else { 6671 /* 6672 * Get the initial setup keep-alive time, 6673 * note that this is probably not going to 6674 * happen, since rack will be running a rxt timer 6675 * if a SYN of some sort is outstanding. It is 6676 * actually handled in rack_timeout_rxt(). 6677 */ 6678 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 6679 } 6680 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 6681 if (rack->in_probe_rtt) { 6682 /* 6683 * We want to instead not wake up a long time from 6684 * now but to wake up about the time we would 6685 * exit probe-rtt and initiate a keep-alive ack. 6686 * This will get us out of probe-rtt and update 6687 * our min-rtt. 6688 */ 6689 hpts_timeout = rack_min_probertt_hold; 6690 } 6691 } 6692 } 6693 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 6694 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 6695 /* 6696 * RACK, TLP, persists and RXT timers all are restartable 6697 * based on actions input .. i.e we received a packet (ack 6698 * or sack) and that changes things (rw, or snd_una etc). 6699 * Thus we can restart them with a new value. For 6700 * keep-alive, delayed_ack we keep track of what was left 6701 * and restart the timer with a smaller value. 6702 */ 6703 if (left < hpts_timeout) 6704 hpts_timeout = left; 6705 } 6706 if (hpts_timeout) { 6707 /* 6708 * Hack alert for now we can't time-out over 2,147,483 6709 * seconds (a bit more than 596 hours), which is probably ok 6710 * :). 6711 */ 6712 if (hpts_timeout > 0x7ffffffe) 6713 hpts_timeout = 0x7ffffffe; 6714 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 6715 } 6716 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 6717 if ((rack->gp_ready == 0) && 6718 (rack->use_fixed_rate == 0) && 6719 (hpts_timeout < slot) && 6720 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 6721 /* 6722 * We have no good estimate yet for the 6723 * old clunky burst mitigation or the 6724 * real pacing. And the tlp or rxt is smaller 6725 * than the pacing calculation. Lets not 6726 * pace that long since we know the calculation 6727 * so far is not accurate. 6728 */ 6729 slot = hpts_timeout; 6730 } 6731 /** 6732 * Turn off all the flags for queuing by default. The 6733 * flags have important meanings to what happens when 6734 * LRO interacts with the transport. Most likely (by default now) 6735 * mbuf_queueing and ack compression are on. So the transport 6736 * has a couple of flags that control what happens (if those 6737 * are not on then these flags won't have any effect since it 6738 * won't go through the queuing LRO path). 6739 * 6740 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 6741 * pacing output, so don't disturb. But 6742 * it also means LRO can wake me if there 6743 * is a SACK arrival. 6744 * 6745 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 6746 * with the above flag (QUEUE_READY) and 6747 * when present it says don't even wake me 6748 * if a SACK arrives. 6749 * 6750 * The idea behind these flags is that if we are pacing we 6751 * set the MBUF_QUEUE_READY and only get woken up if 6752 * a SACK arrives (which could change things) or if 6753 * our pacing timer expires. If, however, we have a rack 6754 * timer running, then we don't even want a sack to wake 6755 * us since the rack timer has to expire before we can send. 6756 * 6757 * Other cases should usually have none of the flags set 6758 * so LRO can call into us. 6759 */ 6760 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 6761 if (slot) { 6762 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 6763 rack->r_ctl.rc_last_output_to = us_cts + slot; 6764 /* 6765 * A pacing timer (slot) is being set, in 6766 * such a case we cannot send (we are blocked by 6767 * the timer). So lets tell LRO that it should not 6768 * wake us unless there is a SACK. Note this only 6769 * will be effective if mbuf queueing is on or 6770 * compressed acks are being processed. 6771 */ 6772 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 6773 /* 6774 * But wait if we have a Rack timer running 6775 * even a SACK should not disturb us (with 6776 * the exception of r_rr_config 3). 6777 */ 6778 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 6779 (IN_RECOVERY(tp->t_flags))) { 6780 if (rack->r_rr_config != 3) 6781 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6782 else if (rack->rc_pace_dnd) { 6783 /* 6784 * When DND is on, we only let a sack 6785 * interrupt us if we are not in recovery. 6786 * 6787 * If DND is off, then we never hit here 6788 * and let all sacks wake us up. 6789 * 6790 */ 6791 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6792 } 6793 } 6794 if (rack->rc_ack_can_sendout_data) { 6795 /* 6796 * Ahh but wait, this is that special case 6797 * where the pacing timer can be disturbed 6798 * backout the changes (used for non-paced 6799 * burst limiting). 6800 */ 6801 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 6802 TF2_MBUF_QUEUE_READY); 6803 } 6804 if ((rack->use_rack_rr) && 6805 (rack->r_rr_config < 2) && 6806 ((hpts_timeout) && (hpts_timeout < slot))) { 6807 /* 6808 * Arrange for the hpts to kick back in after the 6809 * t-o if the t-o does not cause a send. 6810 */ 6811 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6812 __LINE__, &diag); 6813 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6814 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6815 } else { 6816 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 6817 __LINE__, &diag); 6818 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6819 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6820 } 6821 } else if (hpts_timeout) { 6822 /* 6823 * With respect to t_flags2(?) here, lets let any new acks wake 6824 * us up here. Since we are not pacing (no pacing timer), output 6825 * can happen so we should let it. If its a Rack timer, then any inbound 6826 * packet probably won't change the sending (we will be blocked) 6827 * but it may change the prr stats so letting it in (the set defaults 6828 * at the start of this block) are good enough. 6829 */ 6830 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6831 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6832 __LINE__, &diag); 6833 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6834 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6835 } else { 6836 /* No timer starting */ 6837 #ifdef INVARIANTS 6838 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6839 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6840 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6841 } 6842 #endif 6843 } 6844 rack->rc_tmr_stopped = 0; 6845 if (slot) 6846 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 6847 } 6848 6849 static void 6850 rack_mark_lost(struct tcpcb *tp, 6851 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 6852 { 6853 struct rack_sendmap *nrsm; 6854 uint32_t thresh, exp; 6855 6856 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 6857 nrsm = rsm; 6858 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 6859 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 6860 /* Got up to all that were marked sack-passed */ 6861 break; 6862 } 6863 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 6864 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 6865 if (TSTMP_LT(exp, cts) || (exp == cts)) { 6866 /* We now consider it lost */ 6867 nrsm->r_flags |= RACK_WAS_LOST; 6868 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 6869 } else { 6870 /* Past here it won't be lost so stop */ 6871 break; 6872 } 6873 } 6874 } 6875 } 6876 6877 /* 6878 * RACK Timer, here we simply do logging and house keeping. 6879 * the normal rack_output() function will call the 6880 * appropriate thing to check if we need to do a RACK retransmit. 6881 * We return 1, saying don't proceed with rack_output only 6882 * when all timers have been stopped (destroyed PCB?). 6883 */ 6884 static int 6885 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6886 { 6887 /* 6888 * This timer simply provides an internal trigger to send out data. 6889 * The check_recovery_mode call will see if there are needed 6890 * retransmissions, if so we will enter fast-recovery. The output 6891 * call may or may not do the same thing depending on sysctl 6892 * settings. 6893 */ 6894 struct rack_sendmap *rsm; 6895 6896 counter_u64_add(rack_to_tot, 1); 6897 if (rack->r_state && (rack->r_state != tp->t_state)) 6898 rack_set_state(tp, rack); 6899 rack->rc_on_min_to = 0; 6900 rsm = rack_check_recovery_mode(tp, cts); 6901 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6902 if (rsm) { 6903 /* We need to stroke any lost that are now declared as lost */ 6904 rack_mark_lost(tp, rack, rsm, cts); 6905 rack->r_ctl.rc_resend = rsm; 6906 rack->r_timer_override = 1; 6907 if (rack->use_rack_rr) { 6908 /* 6909 * Don't accumulate extra pacing delay 6910 * we are allowing the rack timer to 6911 * over-ride pacing i.e. rrr takes precedence 6912 * if the pacing interval is longer than the rrr 6913 * time (in other words we get the min pacing 6914 * time versus rrr pacing time). 6915 */ 6916 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6917 } 6918 } 6919 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6920 if (rsm == NULL) { 6921 /* restart a timer and return 1 */ 6922 rack_start_hpts_timer(rack, tp, cts, 6923 0, 0, 0); 6924 return (1); 6925 } 6926 return (0); 6927 } 6928 6929 6930 6931 static void 6932 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6933 { 6934 6935 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 6936 /* 6937 * The trailing space changed, mbufs can grow 6938 * at the tail but they can't shrink from 6939 * it, KASSERT that. Adjust the orig_m_len to 6940 * compensate for this change. 6941 */ 6942 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 6943 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 6944 rsm->m, 6945 rsm, 6946 (intmax_t)M_TRAILINGROOM(rsm->m), 6947 rsm->orig_t_space, 6948 rsm->orig_m_len, 6949 rsm->m->m_len)); 6950 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 6951 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 6952 } 6953 if (rsm->m->m_len < rsm->orig_m_len) { 6954 /* 6955 * Mbuf shrank, trimmed off the top by an ack, our 6956 * offset changes. 6957 */ 6958 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 6959 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 6960 rsm->m, rsm->m->m_len, 6961 rsm, rsm->orig_m_len, 6962 rsm->soff)); 6963 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 6964 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6965 else 6966 rsm->soff = 0; 6967 rsm->orig_m_len = rsm->m->m_len; 6968 #ifdef INVARIANTS 6969 } else if (rsm->m->m_len > rsm->orig_m_len) { 6970 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 6971 rsm, rsm->m); 6972 #endif 6973 } 6974 } 6975 6976 static void 6977 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6978 { 6979 struct mbuf *m; 6980 uint32_t soff; 6981 6982 if (src_rsm->m && 6983 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 6984 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 6985 /* Fix up the orig_m_len and possibly the mbuf offset */ 6986 rack_adjust_orig_mlen(src_rsm); 6987 } 6988 m = src_rsm->m; 6989 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6990 while (soff >= m->m_len) { 6991 /* Move out past this mbuf */ 6992 soff -= m->m_len; 6993 m = m->m_next; 6994 KASSERT((m != NULL), 6995 ("rsm:%p nrsm:%p hit at soff:%u null m", 6996 src_rsm, rsm, soff)); 6997 if (m == NULL) { 6998 /* This should *not* happen which is why there is a kassert */ 6999 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7000 (src_rsm->r_start - rack->rc_tp->snd_una), 7001 &src_rsm->soff); 7002 src_rsm->orig_m_len = src_rsm->m->m_len; 7003 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7004 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7005 (rsm->r_start - rack->rc_tp->snd_una), 7006 &rsm->soff); 7007 rsm->orig_m_len = rsm->m->m_len; 7008 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7009 return; 7010 } 7011 } 7012 rsm->m = m; 7013 rsm->soff = soff; 7014 rsm->orig_m_len = m->m_len; 7015 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7016 } 7017 7018 static __inline void 7019 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7020 struct rack_sendmap *rsm, uint32_t start) 7021 { 7022 int idx; 7023 7024 nrsm->r_start = start; 7025 nrsm->r_end = rsm->r_end; 7026 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7027 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7028 nrsm->r_flags = rsm->r_flags; 7029 nrsm->r_dupack = rsm->r_dupack; 7030 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7031 nrsm->r_rtr_bytes = 0; 7032 nrsm->r_fas = rsm->r_fas; 7033 nrsm->r_bas = rsm->r_bas; 7034 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7035 nrsm->r_just_ret = rsm->r_just_ret; 7036 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7037 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7038 } 7039 /* Now if we have SYN flag we keep it on the left edge */ 7040 if (nrsm->r_flags & RACK_HAS_SYN) 7041 nrsm->r_flags &= ~RACK_HAS_SYN; 7042 /* Now if we have a FIN flag we keep it on the right edge */ 7043 if (rsm->r_flags & RACK_HAS_FIN) 7044 rsm->r_flags &= ~RACK_HAS_FIN; 7045 /* Push bit must go to the right edge as well */ 7046 if (rsm->r_flags & RACK_HAD_PUSH) 7047 rsm->r_flags &= ~RACK_HAD_PUSH; 7048 /* Clone over the state of the hw_tls flag */ 7049 nrsm->r_hw_tls = rsm->r_hw_tls; 7050 /* 7051 * Now we need to find nrsm's new location in the mbuf chain 7052 * we basically calculate a new offset, which is soff + 7053 * how much is left in original rsm. Then we walk out the mbuf 7054 * chain to find the righ position, it may be the same mbuf 7055 * or maybe not. 7056 */ 7057 KASSERT(((rsm->m != NULL) || 7058 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7059 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7060 if (rsm->m) 7061 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7062 } 7063 7064 static struct rack_sendmap * 7065 rack_merge_rsm(struct tcp_rack *rack, 7066 struct rack_sendmap *l_rsm, 7067 struct rack_sendmap *r_rsm) 7068 { 7069 /* 7070 * We are merging two ack'd RSM's, 7071 * the l_rsm is on the left (lower seq 7072 * values) and the r_rsm is on the right 7073 * (higher seq value). The simplest way 7074 * to merge these is to move the right 7075 * one into the left. I don't think there 7076 * is any reason we need to try to find 7077 * the oldest (or last oldest retransmitted). 7078 */ 7079 rack_log_map_chg(rack->rc_tp, rack, NULL, 7080 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7081 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7082 if (l_rsm->r_dupack < r_rsm->r_dupack) 7083 l_rsm->r_dupack = r_rsm->r_dupack; 7084 if (r_rsm->r_rtr_bytes) 7085 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7086 if (r_rsm->r_in_tmap) { 7087 /* This really should not happen */ 7088 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7089 r_rsm->r_in_tmap = 0; 7090 } 7091 7092 /* Now the flags */ 7093 if (r_rsm->r_flags & RACK_HAS_FIN) 7094 l_rsm->r_flags |= RACK_HAS_FIN; 7095 if (r_rsm->r_flags & RACK_TLP) 7096 l_rsm->r_flags |= RACK_TLP; 7097 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7098 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7099 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7100 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7101 /* 7102 * If both are app-limited then let the 7103 * free lower the count. If right is app 7104 * limited and left is not, transfer. 7105 */ 7106 l_rsm->r_flags |= RACK_APP_LIMITED; 7107 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7108 if (r_rsm == rack->r_ctl.rc_first_appl) 7109 rack->r_ctl.rc_first_appl = l_rsm; 7110 } 7111 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7112 /* 7113 * We keep the largest value, which is the newest 7114 * send. We do this in case a segment that is 7115 * joined together and not part of a GP estimate 7116 * later gets expanded into the GP estimate. 7117 * 7118 * We prohibit the merging of unlike kinds i.e. 7119 * all pieces that are in the GP estimate can be 7120 * merged and all pieces that are not in a GP estimate 7121 * can be merged, but not disimilar pieces. Combine 7122 * this with taking the highest here and we should 7123 * be ok unless of course the client reneges. Then 7124 * all bets are off. 7125 */ 7126 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7127 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7128 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7129 } 7130 /* 7131 * When merging two RSM's we also need to consider the ack time and keep 7132 * newest. If the ack gets merged into a measurement then that is the 7133 * one we will want to be using. 7134 */ 7135 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7136 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7137 7138 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7139 /* Transfer the split limit to the map we free */ 7140 r_rsm->r_limit_type = l_rsm->r_limit_type; 7141 l_rsm->r_limit_type = 0; 7142 } 7143 rack_free(rack, r_rsm); 7144 l_rsm->r_flags |= RACK_MERGED; 7145 return (l_rsm); 7146 } 7147 7148 /* 7149 * TLP Timer, here we simply setup what segment we want to 7150 * have the TLP expire on, the normal rack_output() will then 7151 * send it out. 7152 * 7153 * We return 1, saying don't proceed with rack_output only 7154 * when all timers have been stopped (destroyed PCB?). 7155 */ 7156 static int 7157 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7158 { 7159 /* 7160 * Tail Loss Probe. 7161 */ 7162 struct rack_sendmap *rsm = NULL; 7163 int insret __diagused; 7164 struct socket *so = tptosocket(tp); 7165 uint32_t amm; 7166 uint32_t out, avail; 7167 int collapsed_win = 0; 7168 7169 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7170 /* Its not time yet */ 7171 return (0); 7172 } 7173 if (ctf_progress_timeout_check(tp, true)) { 7174 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7175 return (-ETIMEDOUT); /* tcp_drop() */ 7176 } 7177 /* 7178 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7179 * need to figure out how to force a full MSS segment out. 7180 */ 7181 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7182 rack->r_ctl.retran_during_recovery = 0; 7183 rack->r_might_revert = 0; 7184 rack->r_ctl.dsack_byte_cnt = 0; 7185 counter_u64_add(rack_tlp_tot, 1); 7186 if (rack->r_state && (rack->r_state != tp->t_state)) 7187 rack_set_state(tp, rack); 7188 avail = sbavail(&so->so_snd); 7189 out = tp->snd_max - tp->snd_una; 7190 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7191 /* special case, we need a retransmission */ 7192 collapsed_win = 1; 7193 goto need_retran; 7194 } 7195 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7196 rack->r_ctl.dsack_persist--; 7197 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7198 rack->r_ctl.num_dsack = 0; 7199 } 7200 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7201 } 7202 if ((tp->t_flags & TF_GPUTINPROG) && 7203 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7204 /* 7205 * If this is the second in a row 7206 * TLP and we are doing a measurement 7207 * its time to abandon the measurement. 7208 * Something is likely broken on 7209 * the clients network and measuring a 7210 * broken network does us no good. 7211 */ 7212 tp->t_flags &= ~TF_GPUTINPROG; 7213 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7214 rack->r_ctl.rc_gp_srtt /*flex1*/, 7215 tp->gput_seq, 7216 0, 0, 18, __LINE__, NULL, 0); 7217 } 7218 /* 7219 * Check our send oldest always settings, and if 7220 * there is an oldest to send jump to the need_retran. 7221 */ 7222 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7223 goto need_retran; 7224 7225 if (avail > out) { 7226 /* New data is available */ 7227 amm = avail - out; 7228 if (amm > ctf_fixed_maxseg(tp)) { 7229 amm = ctf_fixed_maxseg(tp); 7230 if ((amm + out) > tp->snd_wnd) { 7231 /* We are rwnd limited */ 7232 goto need_retran; 7233 } 7234 } else if (amm < ctf_fixed_maxseg(tp)) { 7235 /* not enough to fill a MTU */ 7236 goto need_retran; 7237 } 7238 if (IN_FASTRECOVERY(tp->t_flags)) { 7239 /* Unlikely */ 7240 if (rack->rack_no_prr == 0) { 7241 if (out + amm <= tp->snd_wnd) { 7242 rack->r_ctl.rc_prr_sndcnt = amm; 7243 rack->r_ctl.rc_tlp_new_data = amm; 7244 rack_log_to_prr(rack, 4, 0, __LINE__); 7245 } 7246 } else 7247 goto need_retran; 7248 } else { 7249 /* Set the send-new override */ 7250 if (out + amm <= tp->snd_wnd) 7251 rack->r_ctl.rc_tlp_new_data = amm; 7252 else 7253 goto need_retran; 7254 } 7255 rack->r_ctl.rc_tlpsend = NULL; 7256 counter_u64_add(rack_tlp_newdata, 1); 7257 goto send; 7258 } 7259 need_retran: 7260 /* 7261 * Ok we need to arrange the last un-acked segment to be re-sent, or 7262 * optionally the first un-acked segment. 7263 */ 7264 if (collapsed_win == 0) { 7265 if (rack_always_send_oldest) 7266 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7267 else { 7268 rsm = tqhash_max(rack->r_ctl.tqh); 7269 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7270 rsm = rack_find_high_nonack(rack, rsm); 7271 } 7272 } 7273 if (rsm == NULL) { 7274 #ifdef TCP_BLACKBOX 7275 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7276 #endif 7277 goto out; 7278 } 7279 } else { 7280 /* 7281 * We had a collapsed window, lets find 7282 * the point before the collapse. 7283 */ 7284 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7285 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7286 else { 7287 rsm = tqhash_min(rack->r_ctl.tqh); 7288 } 7289 if (rsm == NULL) { 7290 /* Huh */ 7291 goto out; 7292 } 7293 } 7294 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7295 /* 7296 * We need to split this the last segment in two. 7297 */ 7298 struct rack_sendmap *nrsm; 7299 7300 nrsm = rack_alloc_full_limit(rack); 7301 if (nrsm == NULL) { 7302 /* 7303 * No memory to split, we will just exit and punt 7304 * off to the RXT timer. 7305 */ 7306 goto out; 7307 } 7308 rack_clone_rsm(rack, nrsm, rsm, 7309 (rsm->r_end - ctf_fixed_maxseg(tp))); 7310 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7311 #ifndef INVARIANTS 7312 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7313 #else 7314 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7315 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7316 nrsm, insret, rack, rsm); 7317 } 7318 #endif 7319 if (rsm->r_in_tmap) { 7320 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7321 nrsm->r_in_tmap = 1; 7322 } 7323 rsm = nrsm; 7324 } 7325 rack->r_ctl.rc_tlpsend = rsm; 7326 send: 7327 /* Make sure output path knows we are doing a TLP */ 7328 *doing_tlp = 1; 7329 rack->r_timer_override = 1; 7330 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7331 return (0); 7332 out: 7333 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7334 return (0); 7335 } 7336 7337 /* 7338 * Delayed ack Timer, here we simply need to setup the 7339 * ACK_NOW flag and remove the DELACK flag. From there 7340 * the output routine will send the ack out. 7341 * 7342 * We only return 1, saying don't proceed, if all timers 7343 * are stopped (destroyed PCB?). 7344 */ 7345 static int 7346 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7347 { 7348 7349 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7350 tp->t_flags &= ~TF_DELACK; 7351 tp->t_flags |= TF_ACKNOW; 7352 KMOD_TCPSTAT_INC(tcps_delack); 7353 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7354 return (0); 7355 } 7356 7357 static inline int 7358 rack_send_ack_challange(struct tcp_rack *rack) 7359 { 7360 struct tcptemp *t_template; 7361 7362 t_template = tcpip_maketemplate(rack->rc_inp); 7363 if (t_template) { 7364 if (rack->forced_ack == 0) { 7365 rack->forced_ack = 1; 7366 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7367 } else { 7368 rack->probe_not_answered = 1; 7369 } 7370 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 7371 &t_template->tt_t, (struct mbuf *)NULL, 7372 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 7373 free(t_template, M_TEMP); 7374 /* This does send an ack so kill any D-ack timer */ 7375 if (rack->rc_tp->t_flags & TF_DELACK) 7376 rack->rc_tp->t_flags &= ~TF_DELACK; 7377 return(1); 7378 } else 7379 return (0); 7380 7381 } 7382 7383 /* 7384 * Persists timer, here we simply send the 7385 * same thing as a keepalive will. 7386 * the one byte send. 7387 * 7388 * We only return 1, saying don't proceed, if all timers 7389 * are stopped (destroyed PCB?). 7390 */ 7391 static int 7392 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7393 { 7394 int32_t retval = 1; 7395 7396 if (rack->rc_in_persist == 0) 7397 return (0); 7398 if (ctf_progress_timeout_check(tp, false)) { 7399 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7400 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7401 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7402 return (-ETIMEDOUT); /* tcp_drop() */ 7403 } 7404 /* 7405 * Persistence timer into zero window. Force a byte to be output, if 7406 * possible. 7407 */ 7408 KMOD_TCPSTAT_INC(tcps_persisttimeo); 7409 /* 7410 * Hack: if the peer is dead/unreachable, we do not time out if the 7411 * window is closed. After a full backoff, drop the connection if 7412 * the idle time (no responses to probes) reaches the maximum 7413 * backoff that we would use if retransmitting. 7414 */ 7415 if (tp->t_rxtshift >= V_tcp_retries && 7416 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 7417 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 7418 KMOD_TCPSTAT_INC(tcps_persistdrop); 7419 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7420 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7421 retval = -ETIMEDOUT; /* tcp_drop() */ 7422 goto out; 7423 } 7424 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 7425 tp->snd_una == tp->snd_max) 7426 rack_exit_persist(tp, rack, cts); 7427 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 7428 /* 7429 * If the user has closed the socket then drop a persisting 7430 * connection after a much reduced timeout. 7431 */ 7432 if (tp->t_state > TCPS_CLOSE_WAIT && 7433 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 7434 KMOD_TCPSTAT_INC(tcps_persistdrop); 7435 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7436 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7437 retval = -ETIMEDOUT; /* tcp_drop() */ 7438 goto out; 7439 } 7440 if (rack_send_ack_challange(rack)) { 7441 /* only set it if we were answered */ 7442 if (rack->probe_not_answered) { 7443 counter_u64_add(rack_persists_loss, 1); 7444 rack->r_ctl.persist_lost_ends++; 7445 } 7446 counter_u64_add(rack_persists_sends, 1); 7447 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 7448 } 7449 if (tp->t_rxtshift < V_tcp_retries) 7450 tp->t_rxtshift++; 7451 out: 7452 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 7453 rack_start_hpts_timer(rack, tp, cts, 7454 0, 0, 0); 7455 return (retval); 7456 } 7457 7458 /* 7459 * If a keepalive goes off, we had no other timers 7460 * happening. We always return 1 here since this 7461 * routine either drops the connection or sends 7462 * out a segment with respond. 7463 */ 7464 static int 7465 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7466 { 7467 struct inpcb *inp = tptoinpcb(tp); 7468 7469 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 7470 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 7471 /* 7472 * Keep-alive timer went off; send something or drop connection if 7473 * idle for too long. 7474 */ 7475 KMOD_TCPSTAT_INC(tcps_keeptimeo); 7476 if (tp->t_state < TCPS_ESTABLISHED) 7477 goto dropit; 7478 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7479 tp->t_state <= TCPS_CLOSING) { 7480 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 7481 goto dropit; 7482 /* 7483 * Send a packet designed to force a response if the peer is 7484 * up and reachable: either an ACK if the connection is 7485 * still alive, or an RST if the peer has closed the 7486 * connection due to timeout or reboot. Using sequence 7487 * number tp->snd_una-1 causes the transmitted zero-length 7488 * segment to lie outside the receive window; by the 7489 * protocol spec, this requires the correspondent TCP to 7490 * respond. 7491 */ 7492 KMOD_TCPSTAT_INC(tcps_keepprobe); 7493 rack_send_ack_challange(rack); 7494 } 7495 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7496 return (1); 7497 dropit: 7498 KMOD_TCPSTAT_INC(tcps_keepdrops); 7499 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7500 return (-ETIMEDOUT); /* tcp_drop() */ 7501 } 7502 7503 /* 7504 * Retransmit helper function, clear up all the ack 7505 * flags and take care of important book keeping. 7506 */ 7507 static void 7508 rack_remxt_tmr(struct tcpcb *tp) 7509 { 7510 /* 7511 * The retransmit timer went off, all sack'd blocks must be 7512 * un-acked. 7513 */ 7514 struct rack_sendmap *rsm, *trsm = NULL; 7515 struct tcp_rack *rack; 7516 7517 rack = (struct tcp_rack *)tp->t_fb_ptr; 7518 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 7519 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 7520 rack->r_timer_override = 1; 7521 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 7522 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 7523 rack->r_late = 0; 7524 rack->r_early = 0; 7525 rack->r_ctl.rc_agg_delayed = 0; 7526 rack->r_ctl.rc_agg_early = 0; 7527 if (rack->r_state && (rack->r_state != tp->t_state)) 7528 rack_set_state(tp, rack); 7529 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 7530 /* 7531 * We do not clear the scoreboard until we have had 7532 * more than rack_rxt_scoreboard_clear_thresh time-outs. 7533 */ 7534 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7535 if (rack->r_ctl.rc_resend != NULL) 7536 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7537 7538 return; 7539 } 7540 /* 7541 * Ideally we would like to be able to 7542 * mark SACK-PASS on anything not acked here. 7543 * 7544 * However, if we do that we would burst out 7545 * all that data 1ms apart. This would be unwise, 7546 * so for now we will just let the normal rxt timer 7547 * and tlp timer take care of it. 7548 * 7549 * Also we really need to stick them back in sequence 7550 * order. This way we send in the proper order and any 7551 * sacks that come floating in will "re-ack" the data. 7552 * To do this we zap the tmap with an INIT and then 7553 * walk through and place every rsm in the tail queue 7554 * hash table back in its seq ordered place. 7555 */ 7556 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7557 7558 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 7559 rsm->r_dupack = 0; 7560 if (rack_verbose_logging) 7561 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7562 /* We must re-add it back to the tlist */ 7563 if (trsm == NULL) { 7564 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7565 } else { 7566 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 7567 } 7568 rsm->r_in_tmap = 1; 7569 trsm = rsm; 7570 if (rsm->r_flags & RACK_ACKED) 7571 rsm->r_flags |= RACK_WAS_ACKED; 7572 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 7573 rsm->r_flags |= RACK_MUST_RXT; 7574 } 7575 /* zero the lost since it's all gone */ 7576 rack->r_ctl.rc_considered_lost = 0; 7577 /* Clear the count (we just un-acked them) */ 7578 rack->r_ctl.rc_sacked = 0; 7579 rack->r_ctl.rc_sacklast = NULL; 7580 /* Clear the tlp rtx mark */ 7581 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7582 if (rack->r_ctl.rc_resend != NULL) 7583 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7584 rack->r_ctl.rc_prr_sndcnt = 0; 7585 rack_log_to_prr(rack, 6, 0, __LINE__); 7586 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7587 if (rack->r_ctl.rc_resend != NULL) 7588 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7589 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && 7590 ((tp->t_flags & TF_SENTFIN) == 0)) { 7591 /* 7592 * For non-sack customers new data 7593 * needs to go out as retransmits until 7594 * we retransmit up to snd_max. 7595 */ 7596 rack->r_must_retran = 1; 7597 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 7598 rack->r_ctl.rc_sacked); 7599 } 7600 } 7601 7602 static void 7603 rack_convert_rtts(struct tcpcb *tp) 7604 { 7605 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 7606 tp->t_rxtcur = RACK_REXMTVAL(tp); 7607 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7608 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 7609 } 7610 if (tp->t_rxtcur > rack_rto_max) { 7611 tp->t_rxtcur = rack_rto_max; 7612 } 7613 } 7614 7615 static void 7616 rack_cc_conn_init(struct tcpcb *tp) 7617 { 7618 struct tcp_rack *rack; 7619 uint32_t srtt; 7620 7621 rack = (struct tcp_rack *)tp->t_fb_ptr; 7622 srtt = tp->t_srtt; 7623 cc_conn_init(tp); 7624 /* 7625 * Now convert to rack's internal format, 7626 * if required. 7627 */ 7628 if ((srtt == 0) && (tp->t_srtt != 0)) 7629 rack_convert_rtts(tp); 7630 /* 7631 * We want a chance to stay in slowstart as 7632 * we create a connection. TCP spec says that 7633 * initially ssthresh is infinite. For our 7634 * purposes that is the snd_wnd. 7635 */ 7636 if (tp->snd_ssthresh < tp->snd_wnd) { 7637 tp->snd_ssthresh = tp->snd_wnd; 7638 } 7639 /* 7640 * We also want to assure a IW worth of 7641 * data can get inflight. 7642 */ 7643 if (rc_init_window(rack) < tp->snd_cwnd) 7644 tp->snd_cwnd = rc_init_window(rack); 7645 } 7646 7647 /* 7648 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 7649 * we will setup to retransmit the lowest seq number outstanding. 7650 */ 7651 static int 7652 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7653 { 7654 struct inpcb *inp = tptoinpcb(tp); 7655 int32_t rexmt; 7656 int32_t retval = 0; 7657 bool isipv6; 7658 7659 if ((tp->t_flags & TF_GPUTINPROG) && 7660 (tp->t_rxtshift)) { 7661 /* 7662 * We have had a second timeout 7663 * measurements on successive rxt's are not profitable. 7664 * It is unlikely to be of any use (the network is 7665 * broken or the client went away). 7666 */ 7667 tp->t_flags &= ~TF_GPUTINPROG; 7668 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7669 rack->r_ctl.rc_gp_srtt /*flex1*/, 7670 tp->gput_seq, 7671 0, 0, 18, __LINE__, NULL, 0); 7672 } 7673 if (ctf_progress_timeout_check(tp, false)) { 7674 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7675 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7676 return (-ETIMEDOUT); /* tcp_drop() */ 7677 } 7678 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 7679 rack->r_ctl.retran_during_recovery = 0; 7680 rack->rc_ack_required = 1; 7681 rack->r_ctl.dsack_byte_cnt = 0; 7682 if (IN_RECOVERY(tp->t_flags) && 7683 (rack->rto_from_rec == 0)) { 7684 /* 7685 * Mark that we had a rto while in recovery 7686 * and save the ssthresh so if we go back 7687 * into recovery we will have a chance 7688 * to slowstart back to the level. 7689 */ 7690 rack->rto_from_rec = 1; 7691 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 7692 } 7693 if (IN_FASTRECOVERY(tp->t_flags)) 7694 tp->t_flags |= TF_WASFRECOVERY; 7695 else 7696 tp->t_flags &= ~TF_WASFRECOVERY; 7697 if (IN_CONGRECOVERY(tp->t_flags)) 7698 tp->t_flags |= TF_WASCRECOVERY; 7699 else 7700 tp->t_flags &= ~TF_WASCRECOVERY; 7701 if (TCPS_HAVEESTABLISHED(tp->t_state) && 7702 (tp->snd_una == tp->snd_max)) { 7703 /* Nothing outstanding .. nothing to do */ 7704 return (0); 7705 } 7706 if (rack->r_ctl.dsack_persist) { 7707 rack->r_ctl.dsack_persist--; 7708 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7709 rack->r_ctl.num_dsack = 0; 7710 } 7711 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7712 } 7713 /* 7714 * Rack can only run one timer at a time, so we cannot 7715 * run a KEEPINIT (gating SYN sending) and a retransmit 7716 * timer for the SYN. So if we are in a front state and 7717 * have a KEEPINIT timer we need to check the first transmit 7718 * against now to see if we have exceeded the KEEPINIT time 7719 * (if one is set). 7720 */ 7721 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 7722 (TP_KEEPINIT(tp) != 0)) { 7723 struct rack_sendmap *rsm; 7724 7725 rsm = tqhash_min(rack->r_ctl.tqh); 7726 if (rsm) { 7727 /* Ok we have something outstanding to test keepinit with */ 7728 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 7729 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 7730 /* We have exceeded the KEEPINIT time */ 7731 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7732 goto drop_it; 7733 } 7734 } 7735 } 7736 /* 7737 * Retransmission timer went off. Message has not been acked within 7738 * retransmit interval. Back off to a longer retransmit interval 7739 * and retransmit one segment. 7740 */ 7741 if ((rack->r_ctl.rc_resend == NULL) || 7742 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 7743 /* 7744 * If the rwnd collapsed on 7745 * the one we are retransmitting 7746 * it does not count against the 7747 * rxt count. 7748 */ 7749 tp->t_rxtshift++; 7750 } 7751 rack_remxt_tmr(tp); 7752 if (tp->t_rxtshift > V_tcp_retries) { 7753 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7754 drop_it: 7755 tp->t_rxtshift = V_tcp_retries; 7756 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 7757 /* XXXGL: previously t_softerror was casted to uint16_t */ 7758 MPASS(tp->t_softerror >= 0); 7759 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 7760 goto out; /* tcp_drop() */ 7761 } 7762 if (tp->t_state == TCPS_SYN_SENT) { 7763 /* 7764 * If the SYN was retransmitted, indicate CWND to be limited 7765 * to 1 segment in cc_conn_init(). 7766 */ 7767 tp->snd_cwnd = 1; 7768 } else if (tp->t_rxtshift == 1) { 7769 /* 7770 * first retransmit; record ssthresh and cwnd so they can be 7771 * recovered if this turns out to be a "bad" retransmit. A 7772 * retransmit is considered "bad" if an ACK for this segment 7773 * is received within RTT/2 interval; the assumption here is 7774 * that the ACK was already in flight. See "On Estimating 7775 * End-to-End Network Path Properties" by Allman and Paxson 7776 * for more details. 7777 */ 7778 tp->snd_cwnd_prev = tp->snd_cwnd; 7779 tp->snd_ssthresh_prev = tp->snd_ssthresh; 7780 tp->snd_recover_prev = tp->snd_recover; 7781 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 7782 tp->t_flags |= TF_PREVVALID; 7783 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 7784 tp->t_flags &= ~TF_PREVVALID; 7785 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 7786 if ((tp->t_state == TCPS_SYN_SENT) || 7787 (tp->t_state == TCPS_SYN_RECEIVED)) 7788 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 7789 else 7790 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 7791 7792 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 7793 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 7794 /* 7795 * We enter the path for PLMTUD if connection is established or, if 7796 * connection is FIN_WAIT_1 status, reason for the last is that if 7797 * amount of data we send is very small, we could send it in couple 7798 * of packets and process straight to FIN. In that case we won't 7799 * catch ESTABLISHED state. 7800 */ 7801 #ifdef INET6 7802 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 7803 #else 7804 isipv6 = false; 7805 #endif 7806 if (((V_tcp_pmtud_blackhole_detect == 1) || 7807 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 7808 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 7809 ((tp->t_state == TCPS_ESTABLISHED) || 7810 (tp->t_state == TCPS_FIN_WAIT_1))) { 7811 /* 7812 * Idea here is that at each stage of mtu probe (usually, 7813 * 1448 -> 1188 -> 524) should be given 2 chances to recover 7814 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 7815 * should take care of that. 7816 */ 7817 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 7818 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 7819 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 7820 tp->t_rxtshift % 2 == 0)) { 7821 /* 7822 * Enter Path MTU Black-hole Detection mechanism: - 7823 * Disable Path MTU Discovery (IP "DF" bit). - 7824 * Reduce MTU to lower value than what we negotiated 7825 * with peer. 7826 */ 7827 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7828 /* Record that we may have found a black hole. */ 7829 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7830 /* Keep track of previous MSS. */ 7831 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7832 } 7833 7834 /* 7835 * Reduce the MSS to blackhole value or to the 7836 * default in an attempt to retransmit. 7837 */ 7838 #ifdef INET6 7839 if (isipv6 && 7840 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7841 /* Use the sysctl tuneable blackhole MSS. */ 7842 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7843 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7844 } else if (isipv6) { 7845 /* Use the default MSS. */ 7846 tp->t_maxseg = V_tcp_v6mssdflt; 7847 /* 7848 * Disable Path MTU Discovery when we switch 7849 * to minmss. 7850 */ 7851 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7852 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7853 } 7854 #endif 7855 #if defined(INET6) && defined(INET) 7856 else 7857 #endif 7858 #ifdef INET 7859 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7860 /* Use the sysctl tuneable blackhole MSS. */ 7861 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7862 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7863 } else { 7864 /* Use the default MSS. */ 7865 tp->t_maxseg = V_tcp_mssdflt; 7866 /* 7867 * Disable Path MTU Discovery when we switch 7868 * to minmss. 7869 */ 7870 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7871 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7872 } 7873 #endif 7874 } else { 7875 /* 7876 * If further retransmissions are still unsuccessful 7877 * with a lowered MTU, maybe this isn't a blackhole 7878 * and we restore the previous MSS and blackhole 7879 * detection flags. The limit '6' is determined by 7880 * giving each probe stage (1448, 1188, 524) 2 7881 * chances to recover. 7882 */ 7883 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7884 (tp->t_rxtshift >= 6)) { 7885 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7886 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7887 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7888 if (tp->t_maxseg < V_tcp_mssdflt) { 7889 /* 7890 * The MSS is so small we should not 7891 * process incoming SACK's since we are 7892 * subject to attack in such a case. 7893 */ 7894 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 7895 } else { 7896 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 7897 } 7898 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7899 } 7900 } 7901 } 7902 /* 7903 * Disable RFC1323 and SACK if we haven't got any response to 7904 * our third SYN to work-around some broken terminal servers 7905 * (most of which have hopefully been retired) that have bad VJ 7906 * header compression code which trashes TCP segments containing 7907 * unknown-to-them TCP options. 7908 */ 7909 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7910 (tp->t_rxtshift == 3)) 7911 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7912 /* 7913 * If we backed off this far, our srtt estimate is probably bogus. 7914 * Clobber it so we'll take the next rtt measurement as our srtt; 7915 * move the current srtt into rttvar to keep the current retransmit 7916 * times until then. 7917 */ 7918 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7919 #ifdef INET6 7920 if ((inp->inp_vflag & INP_IPV6) != 0) 7921 in6_losing(inp); 7922 else 7923 #endif 7924 in_losing(inp); 7925 tp->t_rttvar += tp->t_srtt; 7926 tp->t_srtt = 0; 7927 } 7928 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7929 tp->snd_recover = tp->snd_max; 7930 tp->t_flags |= TF_ACKNOW; 7931 tp->t_rtttime = 0; 7932 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 7933 out: 7934 return (retval); 7935 } 7936 7937 static int 7938 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7939 { 7940 int32_t ret = 0; 7941 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7942 7943 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7944 (tp->t_flags & TF_GPUTINPROG)) { 7945 /* 7946 * We have a goodput in progress 7947 * and we have entered a late state. 7948 * Do we have enough data in the sb 7949 * to handle the GPUT request? 7950 */ 7951 uint32_t bytes; 7952 7953 bytes = tp->gput_ack - tp->gput_seq; 7954 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7955 bytes += tp->gput_seq - tp->snd_una; 7956 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 7957 /* 7958 * There are not enough bytes in the socket 7959 * buffer that have been sent to cover this 7960 * measurement. Cancel it. 7961 */ 7962 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7963 rack->r_ctl.rc_gp_srtt /*flex1*/, 7964 tp->gput_seq, 7965 0, 0, 18, __LINE__, NULL, 0); 7966 tp->t_flags &= ~TF_GPUTINPROG; 7967 } 7968 } 7969 if (timers == 0) { 7970 return (0); 7971 } 7972 if (tp->t_state == TCPS_LISTEN) { 7973 /* no timers on listen sockets */ 7974 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7975 return (0); 7976 return (1); 7977 } 7978 if ((timers & PACE_TMR_RACK) && 7979 rack->rc_on_min_to) { 7980 /* 7981 * For the rack timer when we 7982 * are on a min-timeout (which means rrr_conf = 3) 7983 * we don't want to check the timer. It may 7984 * be going off for a pace and thats ok we 7985 * want to send the retransmit (if its ready). 7986 * 7987 * If its on a normal rack timer (non-min) then 7988 * we will check if its expired. 7989 */ 7990 goto skip_time_check; 7991 } 7992 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7993 uint32_t left; 7994 7995 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 7996 ret = -1; 7997 rack_log_to_processing(rack, cts, ret, 0); 7998 return (0); 7999 } 8000 if (hpts_calling == 0) { 8001 /* 8002 * A user send or queued mbuf (sack) has called us? We 8003 * return 0 and let the pacing guards 8004 * deal with it if they should or 8005 * should not cause a send. 8006 */ 8007 ret = -2; 8008 rack_log_to_processing(rack, cts, ret, 0); 8009 return (0); 8010 } 8011 /* 8012 * Ok our timer went off early and we are not paced false 8013 * alarm, go back to sleep. We make sure we don't have 8014 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8015 * flag in place. 8016 */ 8017 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8018 ret = -3; 8019 left = rack->r_ctl.rc_timer_exp - cts; 8020 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8021 rack_log_to_processing(rack, cts, ret, left); 8022 return (1); 8023 } 8024 skip_time_check: 8025 rack->rc_tmr_stopped = 0; 8026 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8027 if (timers & PACE_TMR_DELACK) { 8028 ret = rack_timeout_delack(tp, rack, cts); 8029 } else if (timers & PACE_TMR_RACK) { 8030 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8031 rack->r_fast_output = 0; 8032 ret = rack_timeout_rack(tp, rack, cts); 8033 } else if (timers & PACE_TMR_TLP) { 8034 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8035 rack->r_fast_output = 0; 8036 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8037 } else if (timers & PACE_TMR_RXT) { 8038 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8039 rack->r_fast_output = 0; 8040 ret = rack_timeout_rxt(tp, rack, cts); 8041 } else if (timers & PACE_TMR_PERSIT) { 8042 ret = rack_timeout_persist(tp, rack, cts); 8043 } else if (timers & PACE_TMR_KEEP) { 8044 ret = rack_timeout_keepalive(tp, rack, cts); 8045 } 8046 rack_log_to_processing(rack, cts, ret, timers); 8047 return (ret); 8048 } 8049 8050 static void 8051 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8052 { 8053 struct timeval tv; 8054 uint32_t us_cts, flags_on_entry; 8055 uint8_t hpts_removed = 0; 8056 8057 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8058 us_cts = tcp_get_usecs(&tv); 8059 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8060 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8061 ((tp->snd_max - tp->snd_una) == 0))) { 8062 tcp_hpts_remove(rack->rc_tp); 8063 hpts_removed = 1; 8064 /* If we were not delayed cancel out the flag. */ 8065 if ((tp->snd_max - tp->snd_una) == 0) 8066 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8067 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8068 } 8069 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8070 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8071 if (tcp_in_hpts(rack->rc_tp) && 8072 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8073 /* 8074 * Canceling timer's when we have no output being 8075 * paced. We also must remove ourselves from the 8076 * hpts. 8077 */ 8078 tcp_hpts_remove(rack->rc_tp); 8079 hpts_removed = 1; 8080 } 8081 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8082 } 8083 if (hpts_removed == 0) 8084 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8085 } 8086 8087 static int 8088 rack_stopall(struct tcpcb *tp) 8089 { 8090 struct tcp_rack *rack; 8091 8092 rack = (struct tcp_rack *)tp->t_fb_ptr; 8093 rack->t_timers_stopped = 1; 8094 8095 tcp_hpts_remove(tp); 8096 8097 return (0); 8098 } 8099 8100 static void 8101 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8102 { 8103 /* 8104 * Assure no timers are running. 8105 */ 8106 if (tcp_timer_active(tp, TT_PERSIST)) { 8107 /* We enter in persists, set the flag appropriately */ 8108 rack->rc_in_persist = 1; 8109 } 8110 if (tcp_in_hpts(rack->rc_tp)) { 8111 tcp_hpts_remove(rack->rc_tp); 8112 } 8113 } 8114 8115 static void 8116 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8117 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8118 { 8119 int32_t idx; 8120 8121 rsm->r_rtr_cnt++; 8122 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8123 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8124 rsm->r_flags |= RACK_OVERMAX; 8125 } 8126 rsm->r_act_rxt_cnt++; 8127 /* Peg the count/index */ 8128 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8129 rsm->r_dupack = 0; 8130 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8131 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8132 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8133 } 8134 if (rsm->r_flags & RACK_WAS_LOST) { 8135 /* 8136 * We retransmitted it putting it back in flight 8137 * remove the lost desgination and reduce the 8138 * bytes considered lost. 8139 */ 8140 rsm->r_flags &= ~RACK_WAS_LOST; 8141 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 8142 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 8143 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 8144 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 8145 else 8146 rack->r_ctl.rc_considered_lost = 0; 8147 } 8148 idx = rsm->r_rtr_cnt - 1; 8149 rsm->r_tim_lastsent[idx] = ts; 8150 /* 8151 * Here we don't add in the len of send, since its already 8152 * in snduna <->snd_max. 8153 */ 8154 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8155 rack->r_ctl.rc_sacked); 8156 if (rsm->r_flags & RACK_ACKED) { 8157 /* Problably MTU discovery messing with us */ 8158 rsm->r_flags &= ~RACK_ACKED; 8159 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8160 } 8161 if (rsm->r_in_tmap) { 8162 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8163 rsm->r_in_tmap = 0; 8164 } 8165 /* Lets make sure it really is in or not the GP window */ 8166 rack_mark_in_gp_win(tp, rsm); 8167 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8168 rsm->r_in_tmap = 1; 8169 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8170 /* Take off the must retransmit flag, if its on */ 8171 if (rsm->r_flags & RACK_MUST_RXT) { 8172 if (rack->r_must_retran) 8173 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8174 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8175 /* 8176 * We have retransmitted all we need. Clear 8177 * any must retransmit flags. 8178 */ 8179 rack->r_must_retran = 0; 8180 rack->r_ctl.rc_out_at_rto = 0; 8181 } 8182 rsm->r_flags &= ~RACK_MUST_RXT; 8183 } 8184 /* Remove any collapsed flag */ 8185 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8186 if (rsm->r_flags & RACK_SACK_PASSED) { 8187 /* We have retransmitted due to the SACK pass */ 8188 rsm->r_flags &= ~RACK_SACK_PASSED; 8189 rsm->r_flags |= RACK_WAS_SACKPASS; 8190 } 8191 } 8192 8193 static uint32_t 8194 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8195 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8196 { 8197 /* 8198 * We (re-)transmitted starting at rsm->r_start for some length 8199 * (possibly less than r_end. 8200 */ 8201 struct rack_sendmap *nrsm; 8202 int insret __diagused; 8203 uint32_t c_end; 8204 int32_t len; 8205 8206 len = *lenp; 8207 c_end = rsm->r_start + len; 8208 if (SEQ_GEQ(c_end, rsm->r_end)) { 8209 /* 8210 * We retransmitted the whole piece or more than the whole 8211 * slopping into the next rsm. 8212 */ 8213 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8214 if (c_end == rsm->r_end) { 8215 *lenp = 0; 8216 return (0); 8217 } else { 8218 int32_t act_len; 8219 8220 /* Hangs over the end return whats left */ 8221 act_len = rsm->r_end - rsm->r_start; 8222 *lenp = (len - act_len); 8223 return (rsm->r_end); 8224 } 8225 /* We don't get out of this block. */ 8226 } 8227 /* 8228 * Here we retransmitted less than the whole thing which means we 8229 * have to split this into what was transmitted and what was not. 8230 */ 8231 nrsm = rack_alloc_full_limit(rack); 8232 if (nrsm == NULL) { 8233 /* 8234 * We can't get memory, so lets not proceed. 8235 */ 8236 *lenp = 0; 8237 return (0); 8238 } 8239 /* 8240 * So here we are going to take the original rsm and make it what we 8241 * retransmitted. nrsm will be the tail portion we did not 8242 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8243 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8244 * 1, 6 and the new piece will be 6, 11. 8245 */ 8246 rack_clone_rsm(rack, nrsm, rsm, c_end); 8247 nrsm->r_dupack = 0; 8248 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8249 #ifndef INVARIANTS 8250 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8251 #else 8252 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8253 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8254 nrsm, insret, rack, rsm); 8255 } 8256 #endif 8257 if (rsm->r_in_tmap) { 8258 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8259 nrsm->r_in_tmap = 1; 8260 } 8261 rsm->r_flags &= (~RACK_HAS_FIN); 8262 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8263 /* Log a split of rsm into rsm and nrsm */ 8264 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8265 *lenp = 0; 8266 return (0); 8267 } 8268 8269 static void 8270 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8271 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8272 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 8273 uint32_t s_moff, int hw_tls, int segsiz) 8274 { 8275 struct tcp_rack *rack; 8276 struct rack_sendmap *rsm, *nrsm; 8277 int insret __diagused; 8278 8279 register uint32_t snd_max, snd_una; 8280 8281 /* 8282 * Add to the RACK log of packets in flight or retransmitted. If 8283 * there is a TS option we will use the TS echoed, if not we will 8284 * grab a TS. 8285 * 8286 * Retransmissions will increment the count and move the ts to its 8287 * proper place. Note that if options do not include TS's then we 8288 * won't be able to effectively use the ACK for an RTT on a retran. 8289 * 8290 * Notes about r_start and r_end. Lets consider a send starting at 8291 * sequence 1 for 10 bytes. In such an example the r_start would be 8292 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8293 * This means that r_end is actually the first sequence for the next 8294 * slot (11). 8295 * 8296 */ 8297 /* 8298 * If err is set what do we do XXXrrs? should we not add the thing? 8299 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8300 * i.e. proceed with add ** do this for now. 8301 */ 8302 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8303 if (err) 8304 /* 8305 * We don't log errors -- we could but snd_max does not 8306 * advance in this case either. 8307 */ 8308 return; 8309 8310 if (th_flags & TH_RST) { 8311 /* 8312 * We don't log resets and we return immediately from 8313 * sending 8314 */ 8315 return; 8316 } 8317 rack = (struct tcp_rack *)tp->t_fb_ptr; 8318 snd_una = tp->snd_una; 8319 snd_max = tp->snd_max; 8320 if (th_flags & (TH_SYN | TH_FIN)) { 8321 /* 8322 * The call to rack_log_output is made before bumping 8323 * snd_max. This means we can record one extra byte on a SYN 8324 * or FIN if seq_out is adding more on and a FIN is present 8325 * (and we are not resending). 8326 */ 8327 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 8328 len++; 8329 if (th_flags & TH_FIN) 8330 len++; 8331 } 8332 if (SEQ_LEQ((seq_out + len), snd_una)) { 8333 /* Are sending an old segment to induce an ack (keep-alive)? */ 8334 return; 8335 } 8336 if (SEQ_LT(seq_out, snd_una)) { 8337 /* huh? should we panic? */ 8338 uint32_t end; 8339 8340 end = seq_out + len; 8341 seq_out = snd_una; 8342 if (SEQ_GEQ(end, seq_out)) 8343 len = end - seq_out; 8344 else 8345 len = 0; 8346 } 8347 if (len == 0) { 8348 /* We don't log zero window probes */ 8349 return; 8350 } 8351 if (IN_FASTRECOVERY(tp->t_flags)) { 8352 rack->r_ctl.rc_prr_out += len; 8353 } 8354 /* First question is it a retransmission or new? */ 8355 if (seq_out == snd_max) { 8356 /* Its new */ 8357 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 8358 again: 8359 rsm = rack_alloc(rack); 8360 if (rsm == NULL) { 8361 /* 8362 * Hmm out of memory and the tcb got destroyed while 8363 * we tried to wait. 8364 */ 8365 return; 8366 } 8367 if (th_flags & TH_FIN) { 8368 rsm->r_flags = RACK_HAS_FIN|add_flag; 8369 } else { 8370 rsm->r_flags = add_flag; 8371 } 8372 if (hw_tls) 8373 rsm->r_hw_tls = 1; 8374 rsm->r_tim_lastsent[0] = cts; 8375 rsm->r_rtr_cnt = 1; 8376 rsm->r_act_rxt_cnt = 0; 8377 rsm->r_rtr_bytes = 0; 8378 if (th_flags & TH_SYN) { 8379 /* The data space is one beyond snd_una */ 8380 rsm->r_flags |= RACK_HAS_SYN; 8381 } 8382 rsm->r_start = seq_out; 8383 rsm->r_end = rsm->r_start + len; 8384 rack_mark_in_gp_win(tp, rsm); 8385 rsm->r_dupack = 0; 8386 /* 8387 * save off the mbuf location that 8388 * sndmbuf_noadv returned (which is 8389 * where we started copying from).. 8390 */ 8391 rsm->m = s_mb; 8392 rsm->soff = s_moff; 8393 /* 8394 * Here we do add in the len of send, since its not yet 8395 * reflected in in snduna <->snd_max 8396 */ 8397 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 8398 rack->r_ctl.rc_sacked) + 8399 (rsm->r_end - rsm->r_start)); 8400 if ((rack->rc_initial_ss_comp == 0) && 8401 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 8402 rack->r_ctl.ss_hi_fs = rsm->r_fas; 8403 } 8404 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 8405 if (rsm->m) { 8406 if (rsm->m->m_len <= rsm->soff) { 8407 /* 8408 * XXXrrs Question, will this happen? 8409 * 8410 * If sbsndptr is set at the correct place 8411 * then s_moff should always be somewhere 8412 * within rsm->m. But if the sbsndptr was 8413 * off then that won't be true. If it occurs 8414 * we need to walkout to the correct location. 8415 */ 8416 struct mbuf *lm; 8417 8418 lm = rsm->m; 8419 while (lm->m_len <= rsm->soff) { 8420 rsm->soff -= lm->m_len; 8421 lm = lm->m_next; 8422 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 8423 __func__, rack, s_moff, s_mb, rsm->soff)); 8424 } 8425 rsm->m = lm; 8426 } 8427 rsm->orig_m_len = rsm->m->m_len; 8428 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 8429 } else { 8430 rsm->orig_m_len = 0; 8431 rsm->orig_t_space = 0; 8432 } 8433 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 8434 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8435 /* Log a new rsm */ 8436 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 8437 #ifndef INVARIANTS 8438 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 8439 #else 8440 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 8441 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8442 nrsm, insret, rack, rsm); 8443 } 8444 #endif 8445 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8446 rsm->r_in_tmap = 1; 8447 if (rsm->r_flags & RACK_IS_PCM) { 8448 rack->r_ctl.pcm_i.send_time = cts; 8449 rack->r_ctl.pcm_i.eseq = rsm->r_end; 8450 /* First time through we set the start too */ 8451 if (rack->pcm_in_progress == 0) 8452 rack->r_ctl.pcm_i.sseq = rsm->r_start; 8453 } 8454 /* 8455 * Special case detection, is there just a single 8456 * packet outstanding when we are not in recovery? 8457 * 8458 * If this is true mark it so. 8459 */ 8460 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 8461 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 8462 struct rack_sendmap *prsm; 8463 8464 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 8465 if (prsm) 8466 prsm->r_one_out_nr = 1; 8467 } 8468 return; 8469 } 8470 /* 8471 * If we reach here its a retransmission and we need to find it. 8472 */ 8473 more: 8474 if (hintrsm && (hintrsm->r_start == seq_out)) { 8475 rsm = hintrsm; 8476 hintrsm = NULL; 8477 } else { 8478 /* No hints sorry */ 8479 rsm = NULL; 8480 } 8481 if ((rsm) && (rsm->r_start == seq_out)) { 8482 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8483 if (len == 0) { 8484 return; 8485 } else { 8486 goto more; 8487 } 8488 } 8489 /* Ok it was not the last pointer go through it the hard way. */ 8490 refind: 8491 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 8492 if (rsm) { 8493 if (rsm->r_start == seq_out) { 8494 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8495 if (len == 0) { 8496 return; 8497 } else { 8498 goto refind; 8499 } 8500 } 8501 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 8502 /* Transmitted within this piece */ 8503 /* 8504 * Ok we must split off the front and then let the 8505 * update do the rest 8506 */ 8507 nrsm = rack_alloc_full_limit(rack); 8508 if (nrsm == NULL) { 8509 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 8510 return; 8511 } 8512 /* 8513 * copy rsm to nrsm and then trim the front of rsm 8514 * to not include this part. 8515 */ 8516 rack_clone_rsm(rack, nrsm, rsm, seq_out); 8517 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8518 #ifndef INVARIANTS 8519 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8520 #else 8521 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8522 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8523 nrsm, insret, rack, rsm); 8524 } 8525 #endif 8526 if (rsm->r_in_tmap) { 8527 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8528 nrsm->r_in_tmap = 1; 8529 } 8530 rsm->r_flags &= (~RACK_HAS_FIN); 8531 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 8532 if (len == 0) { 8533 return; 8534 } else if (len > 0) 8535 goto refind; 8536 } 8537 } 8538 /* 8539 * Hmm not found in map did they retransmit both old and on into the 8540 * new? 8541 */ 8542 if (seq_out == tp->snd_max) { 8543 goto again; 8544 } else if (SEQ_LT(seq_out, tp->snd_max)) { 8545 #ifdef INVARIANTS 8546 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 8547 seq_out, len, tp->snd_una, tp->snd_max); 8548 printf("Starting Dump of all rack entries\n"); 8549 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8550 printf("rsm:%p start:%u end:%u\n", 8551 rsm, rsm->r_start, rsm->r_end); 8552 } 8553 printf("Dump complete\n"); 8554 panic("seq_out not found rack:%p tp:%p", 8555 rack, tp); 8556 #endif 8557 } else { 8558 #ifdef INVARIANTS 8559 /* 8560 * Hmm beyond sndmax? (only if we are using the new rtt-pack 8561 * flag) 8562 */ 8563 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 8564 seq_out, len, tp->snd_max, tp); 8565 #endif 8566 } 8567 } 8568 8569 /* 8570 * Record one of the RTT updates from an ack into 8571 * our sample structure. 8572 */ 8573 8574 static void 8575 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 8576 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 8577 { 8578 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8579 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 8580 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 8581 } 8582 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8583 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 8584 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 8585 } 8586 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 8587 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 8588 rack->r_ctl.rc_gp_lowrtt = us_rtt; 8589 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 8590 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 8591 } 8592 if ((confidence == 1) && 8593 ((rsm == NULL) || 8594 (rsm->r_just_ret) || 8595 (rsm->r_one_out_nr && 8596 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 8597 /* 8598 * If the rsm had a just return 8599 * hit it then we can't trust the 8600 * rtt measurement for buffer deterimination 8601 * Note that a confidence of 2, indicates 8602 * SACK'd which overrides the r_just_ret or 8603 * the r_one_out_nr. If it was a CUM-ACK and 8604 * we had only two outstanding, but get an 8605 * ack for only 1. Then that also lowers our 8606 * confidence. 8607 */ 8608 confidence = 0; 8609 } 8610 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8611 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 8612 if (rack->r_ctl.rack_rs.confidence == 0) { 8613 /* 8614 * We take anything with no current confidence 8615 * saved. 8616 */ 8617 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8618 rack->r_ctl.rack_rs.confidence = confidence; 8619 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8620 } else if (confidence != 0) { 8621 /* 8622 * Once we have a confident number, 8623 * we can update it with a smaller 8624 * value since this confident number 8625 * may include the DSACK time until 8626 * the next segment (the second one) arrived. 8627 */ 8628 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8629 rack->r_ctl.rack_rs.confidence = confidence; 8630 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8631 } 8632 } 8633 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 8634 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 8635 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 8636 rack->r_ctl.rack_rs.rs_rtt_cnt++; 8637 } 8638 8639 /* 8640 * Collect new round-trip time estimate 8641 * and update averages and current timeout. 8642 */ 8643 static void 8644 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 8645 { 8646 int32_t delta; 8647 int32_t rtt; 8648 8649 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 8650 /* No valid sample */ 8651 return; 8652 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 8653 /* We are to use the lowest RTT seen in a single ack */ 8654 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 8655 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 8656 /* We are to use the highest RTT seen in a single ack */ 8657 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 8658 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 8659 /* We are to use the average RTT seen in a single ack */ 8660 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 8661 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 8662 } else { 8663 #ifdef INVARIANTS 8664 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 8665 #endif 8666 return; 8667 } 8668 if (rtt == 0) 8669 rtt = 1; 8670 if (rack->rc_gp_rtt_set == 0) { 8671 /* 8672 * With no RTT we have to accept 8673 * even one we are not confident of. 8674 */ 8675 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 8676 rack->rc_gp_rtt_set = 1; 8677 } else if (rack->r_ctl.rack_rs.confidence) { 8678 /* update the running gp srtt */ 8679 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 8680 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 8681 } 8682 if (rack->r_ctl.rack_rs.confidence) { 8683 /* 8684 * record the low and high for highly buffered path computation, 8685 * we only do this if we are confident (not a retransmission). 8686 */ 8687 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 8688 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8689 } 8690 if (rack->rc_highly_buffered == 0) { 8691 /* 8692 * Currently once we declare a path has 8693 * highly buffered there is no going 8694 * back, which may be a problem... 8695 */ 8696 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 8697 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 8698 rack->r_ctl.rc_highest_us_rtt, 8699 rack->r_ctl.rc_lowest_us_rtt, 8700 RACK_RTTS_SEEHBP); 8701 rack->rc_highly_buffered = 1; 8702 } 8703 } 8704 } 8705 if ((rack->r_ctl.rack_rs.confidence) || 8706 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 8707 /* 8708 * If we are highly confident of it <or> it was 8709 * never retransmitted we accept it as the last us_rtt. 8710 */ 8711 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8712 /* The lowest rtt can be set if its was not retransmited */ 8713 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 8714 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8715 if (rack->r_ctl.rc_lowest_us_rtt == 0) 8716 rack->r_ctl.rc_lowest_us_rtt = 1; 8717 } 8718 } 8719 rack = (struct tcp_rack *)tp->t_fb_ptr; 8720 if (tp->t_srtt != 0) { 8721 /* 8722 * We keep a simple srtt in microseconds, like our rtt 8723 * measurement. We don't need to do any tricks with shifting 8724 * etc. Instead we just add in 1/8th of the new measurement 8725 * and subtract out 1/8 of the old srtt. We do the same with 8726 * the variance after finding the absolute value of the 8727 * difference between this sample and the current srtt. 8728 */ 8729 delta = tp->t_srtt - rtt; 8730 /* Take off 1/8th of the current sRTT */ 8731 tp->t_srtt -= (tp->t_srtt >> 3); 8732 /* Add in 1/8th of the new RTT just measured */ 8733 tp->t_srtt += (rtt >> 3); 8734 if (tp->t_srtt <= 0) 8735 tp->t_srtt = 1; 8736 /* Now lets make the absolute value of the variance */ 8737 if (delta < 0) 8738 delta = -delta; 8739 /* Subtract out 1/8th */ 8740 tp->t_rttvar -= (tp->t_rttvar >> 3); 8741 /* Add in 1/8th of the new variance we just saw */ 8742 tp->t_rttvar += (delta >> 3); 8743 if (tp->t_rttvar <= 0) 8744 tp->t_rttvar = 1; 8745 } else { 8746 /* 8747 * No rtt measurement yet - use the unsmoothed rtt. Set the 8748 * variance to half the rtt (so our first retransmit happens 8749 * at 3*rtt). 8750 */ 8751 tp->t_srtt = rtt; 8752 tp->t_rttvar = rtt >> 1; 8753 } 8754 rack->rc_srtt_measure_made = 1; 8755 KMOD_TCPSTAT_INC(tcps_rttupdated); 8756 if (tp->t_rttupdated < UCHAR_MAX) 8757 tp->t_rttupdated++; 8758 #ifdef STATS 8759 if (rack_stats_gets_ms_rtt == 0) { 8760 /* Send in the microsecond rtt used for rxt timeout purposes */ 8761 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 8762 } else if (rack_stats_gets_ms_rtt == 1) { 8763 /* Send in the millisecond rtt used for rxt timeout purposes */ 8764 int32_t ms_rtt; 8765 8766 /* Round up */ 8767 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8768 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8769 } else if (rack_stats_gets_ms_rtt == 2) { 8770 /* Send in the millisecond rtt has close to the path RTT as we can get */ 8771 int32_t ms_rtt; 8772 8773 /* Round up */ 8774 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8775 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8776 } else { 8777 /* Send in the microsecond rtt has close to the path RTT as we can get */ 8778 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8779 } 8780 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8781 #endif 8782 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 8783 /* 8784 * the retransmit should happen at rtt + 4 * rttvar. Because of the 8785 * way we do the smoothing, srtt and rttvar will each average +1/2 8786 * tick of bias. When we compute the retransmit timer, we want 1/2 8787 * tick of rounding and 1 extra tick because of +-1/2 tick 8788 * uncertainty in the firing of the timer. The bias will give us 8789 * exactly the 1.5 tick we need. But, because the bias is 8790 * statistical, we have to test that we don't drop below the minimum 8791 * feasible timer (which is 2 ticks). 8792 */ 8793 tp->t_rxtshift = 0; 8794 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8795 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 8796 rack_log_rtt_sample(rack, rtt); 8797 tp->t_softerror = 0; 8798 } 8799 8800 8801 static void 8802 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 8803 { 8804 /* 8805 * Apply to filter the inbound us-rtt at us_cts. 8806 */ 8807 uint32_t old_rtt; 8808 8809 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 8810 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 8811 us_rtt, us_cts); 8812 if (old_rtt > us_rtt) { 8813 /* We just hit a new lower rtt time */ 8814 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 8815 __LINE__, RACK_RTTS_NEWRTT); 8816 /* 8817 * Only count it if its lower than what we saw within our 8818 * calculated range. 8819 */ 8820 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 8821 if (rack_probertt_lower_within && 8822 rack->rc_gp_dyn_mul && 8823 (rack->use_fixed_rate == 0) && 8824 (rack->rc_always_pace)) { 8825 /* 8826 * We are seeing a new lower rtt very close 8827 * to the time that we would have entered probe-rtt. 8828 * This is probably due to the fact that a peer flow 8829 * has entered probe-rtt. Lets go in now too. 8830 */ 8831 uint32_t val; 8832 8833 val = rack_probertt_lower_within * rack_time_between_probertt; 8834 val /= 100; 8835 if ((rack->in_probe_rtt == 0) && 8836 (rack->rc_skip_timely == 0) && 8837 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 8838 rack_enter_probertt(rack, us_cts); 8839 } 8840 } 8841 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 8842 } 8843 } 8844 } 8845 8846 static int 8847 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 8848 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 8849 { 8850 uint32_t us_rtt; 8851 int32_t i, all; 8852 uint32_t t, len_acked; 8853 8854 if ((rsm->r_flags & RACK_ACKED) || 8855 (rsm->r_flags & RACK_WAS_ACKED)) 8856 /* Already done */ 8857 return (0); 8858 if (rsm->r_no_rtt_allowed) { 8859 /* Not allowed */ 8860 return (0); 8861 } 8862 if (ack_type == CUM_ACKED) { 8863 if (SEQ_GT(th_ack, rsm->r_end)) { 8864 len_acked = rsm->r_end - rsm->r_start; 8865 all = 1; 8866 } else { 8867 len_acked = th_ack - rsm->r_start; 8868 all = 0; 8869 } 8870 } else { 8871 len_acked = rsm->r_end - rsm->r_start; 8872 all = 0; 8873 } 8874 if (rsm->r_rtr_cnt == 1) { 8875 8876 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8877 if ((int)t <= 0) 8878 t = 1; 8879 if (!tp->t_rttlow || tp->t_rttlow > t) 8880 tp->t_rttlow = t; 8881 if (!rack->r_ctl.rc_rack_min_rtt || 8882 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8883 rack->r_ctl.rc_rack_min_rtt = t; 8884 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8885 rack->r_ctl.rc_rack_min_rtt = 1; 8886 } 8887 } 8888 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8889 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8890 else 8891 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8892 if (us_rtt == 0) 8893 us_rtt = 1; 8894 if (CC_ALGO(tp)->rttsample != NULL) { 8895 /* Kick the RTT to the CC */ 8896 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8897 } 8898 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8899 if (ack_type == SACKED) { 8900 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8901 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8902 } else { 8903 /* 8904 * We need to setup what our confidence 8905 * is in this ack. 8906 * 8907 * If the rsm was app limited and it is 8908 * less than a mss in length (the end 8909 * of the send) then we have a gap. If we 8910 * were app limited but say we were sending 8911 * multiple MSS's then we are more confident 8912 * int it. 8913 * 8914 * When we are not app-limited then we see if 8915 * the rsm is being included in the current 8916 * measurement, we tell this by the app_limited_needs_set 8917 * flag. 8918 * 8919 * Note that being cwnd blocked is not applimited 8920 * as well as the pacing delay between packets which 8921 * are sending only 1 or 2 MSS's also will show up 8922 * in the RTT. We probably need to examine this algorithm 8923 * a bit more and enhance it to account for the delay 8924 * between rsm's. We could do that by saving off the 8925 * pacing delay of each rsm (in an rsm) and then 8926 * factoring that in somehow though for now I am 8927 * not sure how :) 8928 */ 8929 int calc_conf = 0; 8930 8931 if (rsm->r_flags & RACK_APP_LIMITED) { 8932 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8933 calc_conf = 0; 8934 else 8935 calc_conf = 1; 8936 } else if (rack->app_limited_needs_set == 0) { 8937 calc_conf = 1; 8938 } else { 8939 calc_conf = 0; 8940 } 8941 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8942 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8943 calc_conf, rsm, rsm->r_rtr_cnt); 8944 } 8945 if ((rsm->r_flags & RACK_TLP) && 8946 (!IN_FASTRECOVERY(tp->t_flags))) { 8947 /* Segment was a TLP and our retrans matched */ 8948 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8949 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 8950 } 8951 } 8952 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 8953 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8954 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 8955 /* New more recent rack_tmit_time */ 8956 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8957 if (rack->r_ctl.rc_rack_tmit_time == 0) 8958 rack->r_ctl.rc_rack_tmit_time = 1; 8959 rack->rc_rack_rtt = t; 8960 } 8961 return (1); 8962 } 8963 /* 8964 * We clear the soft/rxtshift since we got an ack. 8965 * There is no assurance we will call the commit() function 8966 * so we need to clear these to avoid incorrect handling. 8967 */ 8968 tp->t_rxtshift = 0; 8969 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8970 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8971 tp->t_softerror = 0; 8972 if (to && (to->to_flags & TOF_TS) && 8973 (ack_type == CUM_ACKED) && 8974 (to->to_tsecr) && 8975 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8976 /* 8977 * Now which timestamp does it match? In this block the ACK 8978 * must be coming from a previous transmission. 8979 */ 8980 for (i = 0; i < rsm->r_rtr_cnt; i++) { 8981 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 8982 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8983 if ((int)t <= 0) 8984 t = 1; 8985 if (CC_ALGO(tp)->rttsample != NULL) { 8986 /* 8987 * Kick the RTT to the CC, here 8988 * we lie a bit in that we know the 8989 * retransmission is correct even though 8990 * we retransmitted. This is because 8991 * we match the timestamps. 8992 */ 8993 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 8994 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 8995 else 8996 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 8997 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8998 } 8999 if ((i + 1) < rsm->r_rtr_cnt) { 9000 /* 9001 * The peer ack'd from our previous 9002 * transmission. We have a spurious 9003 * retransmission and thus we dont 9004 * want to update our rack_rtt. 9005 * 9006 * Hmm should there be a CC revert here? 9007 * 9008 */ 9009 return (0); 9010 } 9011 if (!tp->t_rttlow || tp->t_rttlow > t) 9012 tp->t_rttlow = t; 9013 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9014 rack->r_ctl.rc_rack_min_rtt = t; 9015 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9016 rack->r_ctl.rc_rack_min_rtt = 1; 9017 } 9018 } 9019 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9020 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9021 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9022 /* New more recent rack_tmit_time */ 9023 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9024 if (rack->r_ctl.rc_rack_tmit_time == 0) 9025 rack->r_ctl.rc_rack_tmit_time = 1; 9026 rack->rc_rack_rtt = t; 9027 } 9028 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9029 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9030 rsm->r_rtr_cnt); 9031 return (1); 9032 } 9033 } 9034 /* If we are logging log out the sendmap */ 9035 if (tcp_bblogging_on(rack->rc_tp)) { 9036 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9037 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9038 } 9039 } 9040 goto ts_not_found; 9041 } else { 9042 /* 9043 * Ok its a SACK block that we retransmitted. or a windows 9044 * machine without timestamps. We can tell nothing from the 9045 * time-stamp since its not there or the time the peer last 9046 * received a segment that moved forward its cum-ack point. 9047 */ 9048 ts_not_found: 9049 i = rsm->r_rtr_cnt - 1; 9050 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9051 if ((int)t <= 0) 9052 t = 1; 9053 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9054 /* 9055 * We retransmitted and the ack came back in less 9056 * than the smallest rtt we have observed. We most 9057 * likely did an improper retransmit as outlined in 9058 * 6.2 Step 2 point 2 in the rack-draft so we 9059 * don't want to update our rack_rtt. We in 9060 * theory (in future) might want to think about reverting our 9061 * cwnd state but we won't for now. 9062 */ 9063 return (0); 9064 } else if (rack->r_ctl.rc_rack_min_rtt) { 9065 /* 9066 * We retransmitted it and the retransmit did the 9067 * job. 9068 */ 9069 if (!rack->r_ctl.rc_rack_min_rtt || 9070 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9071 rack->r_ctl.rc_rack_min_rtt = t; 9072 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9073 rack->r_ctl.rc_rack_min_rtt = 1; 9074 } 9075 } 9076 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9077 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9078 (uint32_t)rsm->r_tim_lastsent[i]))) { 9079 /* New more recent rack_tmit_time */ 9080 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9081 if (rack->r_ctl.rc_rack_tmit_time == 0) 9082 rack->r_ctl.rc_rack_tmit_time = 1; 9083 rack->rc_rack_rtt = t; 9084 } 9085 return (1); 9086 } 9087 } 9088 return (0); 9089 } 9090 9091 /* 9092 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9093 */ 9094 static void 9095 rack_log_sack_passed(struct tcpcb *tp, 9096 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9097 { 9098 struct rack_sendmap *nrsm; 9099 uint32_t thresh; 9100 9101 /* Get our rxt threshold for lost consideration */ 9102 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9103 /* Now start looking at rsm's */ 9104 nrsm = rsm; 9105 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9106 rack_head, r_tnext) { 9107 if (nrsm == rsm) { 9108 /* Skip original segment he is acked */ 9109 continue; 9110 } 9111 if (nrsm->r_flags & RACK_ACKED) { 9112 /* 9113 * Skip ack'd segments, though we 9114 * should not see these, since tmap 9115 * should not have ack'd segments. 9116 */ 9117 continue; 9118 } 9119 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9120 /* 9121 * If the peer dropped the rwnd on 9122 * these then we don't worry about them. 9123 */ 9124 continue; 9125 } 9126 /* Check lost state */ 9127 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9128 uint32_t exp; 9129 9130 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9131 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9132 /* We consider it lost */ 9133 nrsm->r_flags |= RACK_WAS_LOST; 9134 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9135 } 9136 } 9137 if (nrsm->r_flags & RACK_SACK_PASSED) { 9138 /* 9139 * We found one that is already marked 9140 * passed, we have been here before and 9141 * so all others below this are marked. 9142 */ 9143 break; 9144 } 9145 nrsm->r_flags |= RACK_SACK_PASSED; 9146 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9147 } 9148 } 9149 9150 static void 9151 rack_need_set_test(struct tcpcb *tp, 9152 struct tcp_rack *rack, 9153 struct rack_sendmap *rsm, 9154 tcp_seq th_ack, 9155 int line, 9156 int use_which) 9157 { 9158 struct rack_sendmap *s_rsm; 9159 9160 if ((tp->t_flags & TF_GPUTINPROG) && 9161 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9162 /* 9163 * We were app limited, and this ack 9164 * butts up or goes beyond the point where we want 9165 * to start our next measurement. We need 9166 * to record the new gput_ts as here and 9167 * possibly update the start sequence. 9168 */ 9169 uint32_t seq, ts; 9170 9171 if (rsm->r_rtr_cnt > 1) { 9172 /* 9173 * This is a retransmit, can we 9174 * really make any assessment at this 9175 * point? We are not really sure of 9176 * the timestamp, is it this or the 9177 * previous transmission? 9178 * 9179 * Lets wait for something better that 9180 * is not retransmitted. 9181 */ 9182 return; 9183 } 9184 seq = tp->gput_seq; 9185 ts = tp->gput_ts; 9186 rack->app_limited_needs_set = 0; 9187 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9188 /* Do we start at a new end? */ 9189 if ((use_which == RACK_USE_BEG) && 9190 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9191 /* 9192 * When we get an ACK that just eats 9193 * up some of the rsm, we set RACK_USE_BEG 9194 * since whats at r_start (i.e. th_ack) 9195 * is left unacked and thats where the 9196 * measurement now starts. 9197 */ 9198 tp->gput_seq = rsm->r_start; 9199 } 9200 if ((use_which == RACK_USE_END) && 9201 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9202 /* 9203 * We use the end when the cumack 9204 * is moving forward and completely 9205 * deleting the rsm passed so basically 9206 * r_end holds th_ack. 9207 * 9208 * For SACK's we also want to use the end 9209 * since this piece just got sacked and 9210 * we want to target anything after that 9211 * in our measurement. 9212 */ 9213 tp->gput_seq = rsm->r_end; 9214 } 9215 if (use_which == RACK_USE_END_OR_THACK) { 9216 /* 9217 * special case for ack moving forward, 9218 * not a sack, we need to move all the 9219 * way up to where this ack cum-ack moves 9220 * to. 9221 */ 9222 if (SEQ_GT(th_ack, rsm->r_end)) 9223 tp->gput_seq = th_ack; 9224 else 9225 tp->gput_seq = rsm->r_end; 9226 } 9227 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9228 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9229 else 9230 s_rsm = NULL; 9231 /* 9232 * Pick up the correct send time if we can the rsm passed in 9233 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9234 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9235 * find a different seq i.e. the next send up. 9236 * 9237 * If that has not been sent, s_rsm will be NULL and we must 9238 * arrange it so this function will get called again by setting 9239 * app_limited_needs_set. 9240 */ 9241 if (s_rsm) 9242 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9243 else { 9244 /* If we hit here we have to have *not* sent tp->gput_seq */ 9245 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9246 /* Set it up so we will go through here again */ 9247 rack->app_limited_needs_set = 1; 9248 } 9249 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9250 /* 9251 * We moved beyond this guy's range, re-calculate 9252 * the new end point. 9253 */ 9254 if (rack->rc_gp_filled == 0) { 9255 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9256 } else { 9257 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9258 } 9259 } 9260 /* 9261 * We are moving the goal post, we may be able to clear the 9262 * measure_saw_probe_rtt flag. 9263 */ 9264 if ((rack->in_probe_rtt == 0) && 9265 (rack->measure_saw_probe_rtt) && 9266 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9267 rack->measure_saw_probe_rtt = 0; 9268 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9269 seq, tp->gput_seq, 9270 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9271 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9272 5, line, NULL, 0); 9273 if (rack->rc_gp_filled && 9274 ((tp->gput_ack - tp->gput_seq) < 9275 max(rc_init_window(rack), (MIN_GP_WIN * 9276 ctf_fixed_maxseg(tp))))) { 9277 uint32_t ideal_amount; 9278 9279 ideal_amount = rack_get_measure_window(tp, rack); 9280 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9281 /* 9282 * There is no sense of continuing this measurement 9283 * because its too small to gain us anything we 9284 * trust. Skip it and that way we can start a new 9285 * measurement quicker. 9286 */ 9287 tp->t_flags &= ~TF_GPUTINPROG; 9288 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9289 0, 0, 9290 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9291 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9292 6, __LINE__, NULL, 0); 9293 } else { 9294 /* 9295 * Reset the window further out. 9296 */ 9297 tp->gput_ack = tp->gput_seq + ideal_amount; 9298 } 9299 } 9300 rack_tend_gp_marks(tp, rack); 9301 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9302 } 9303 } 9304 9305 static inline int 9306 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9307 { 9308 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 9309 /* Behind our TLP definition or right at */ 9310 return (0); 9311 } 9312 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 9313 /* The start is beyond or right at our end of TLP definition */ 9314 return (0); 9315 } 9316 /* It has to be a sub-part of the original TLP recorded */ 9317 return (1); 9318 } 9319 9320 static uint32_t 9321 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 9322 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 9323 uint32_t segsiz) 9324 { 9325 uint32_t start, end, changed = 0; 9326 struct rack_sendmap stack_map; 9327 struct rack_sendmap *rsm, *nrsm, *prev, *next; 9328 int insret __diagused; 9329 int32_t used_ref = 1; 9330 int can_use_hookery = 0; 9331 9332 start = sack->start; 9333 end = sack->end; 9334 rsm = *prsm; 9335 9336 do_rest_ofb: 9337 if ((rsm == NULL) || 9338 (SEQ_LT(end, rsm->r_start)) || 9339 (SEQ_GEQ(start, rsm->r_end)) || 9340 (SEQ_LT(start, rsm->r_start))) { 9341 /* 9342 * We are not in the right spot, 9343 * find the correct spot in the tree. 9344 */ 9345 used_ref = 0; 9346 rsm = tqhash_find(rack->r_ctl.tqh, start); 9347 } 9348 if (rsm == NULL) { 9349 /* TSNH */ 9350 goto out; 9351 } 9352 /* Ok we have an ACK for some piece of this rsm */ 9353 if (rsm->r_start != start) { 9354 if ((rsm->r_flags & RACK_ACKED) == 0) { 9355 /* 9356 * Before any splitting or hookery is 9357 * done is it a TLP of interest i.e. rxt? 9358 */ 9359 if ((rsm->r_flags & RACK_TLP) && 9360 (rsm->r_rtr_cnt > 1)) { 9361 /* 9362 * We are splitting a rxt TLP, check 9363 * if we need to save off the start/end 9364 */ 9365 if (rack->rc_last_tlp_acked_set && 9366 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9367 /* 9368 * We already turned this on since we are inside 9369 * the previous one was a partially sack now we 9370 * are getting another one (maybe all of it). 9371 * 9372 */ 9373 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9374 /* 9375 * Lets make sure we have all of it though. 9376 */ 9377 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9378 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9379 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9380 rack->r_ctl.last_tlp_acked_end); 9381 } 9382 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9383 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9384 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9385 rack->r_ctl.last_tlp_acked_end); 9386 } 9387 } else { 9388 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9389 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9390 rack->rc_last_tlp_past_cumack = 0; 9391 rack->rc_last_tlp_acked_set = 1; 9392 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9393 } 9394 } 9395 /** 9396 * Need to split this in two pieces the before and after, 9397 * the before remains in the map, the after must be 9398 * added. In other words we have: 9399 * rsm |--------------| 9400 * sackblk |-------> 9401 * rsm will become 9402 * rsm |---| 9403 * and nrsm will be the sacked piece 9404 * nrsm |----------| 9405 * 9406 * But before we start down that path lets 9407 * see if the sack spans over on top of 9408 * the next guy and it is already sacked. 9409 * 9410 */ 9411 /* 9412 * Hookery can only be used if the two entries 9413 * are in the same bucket and neither one of 9414 * them staddle the bucket line. 9415 */ 9416 next = tqhash_next(rack->r_ctl.tqh, rsm); 9417 if (next && 9418 (rsm->bindex == next->bindex) && 9419 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9420 ((next->r_flags & RACK_STRADDLE) == 0) && 9421 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9422 ((next->r_flags & RACK_IS_PCM) == 0) && 9423 (rsm->r_flags & RACK_IN_GP_WIN) && 9424 (next->r_flags & RACK_IN_GP_WIN)) 9425 can_use_hookery = 1; 9426 else 9427 can_use_hookery = 0; 9428 if (next && can_use_hookery && 9429 (next->r_flags & RACK_ACKED) && 9430 SEQ_GEQ(end, next->r_start)) { 9431 /** 9432 * So the next one is already acked, and 9433 * we can thus by hookery use our stack_map 9434 * to reflect the piece being sacked and 9435 * then adjust the two tree entries moving 9436 * the start and ends around. So we start like: 9437 * rsm |------------| (not-acked) 9438 * next |-----------| (acked) 9439 * sackblk |--------> 9440 * We want to end like so: 9441 * rsm |------| (not-acked) 9442 * next |-----------------| (acked) 9443 * nrsm |-----| 9444 * Where nrsm is a temporary stack piece we 9445 * use to update all the gizmos. 9446 */ 9447 /* Copy up our fudge block */ 9448 nrsm = &stack_map; 9449 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9450 /* Now adjust our tree blocks */ 9451 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 9452 next->r_start = start; 9453 rsm->r_flags |= RACK_SHUFFLED; 9454 next->r_flags |= RACK_SHUFFLED; 9455 /* Now we must adjust back where next->m is */ 9456 rack_setup_offset_for_rsm(rack, rsm, next); 9457 /* 9458 * Which timestamp do we keep? It is rather 9459 * important in GP measurements to have the 9460 * accurate end of the send window. 9461 * 9462 * We keep the largest value, which is the newest 9463 * send. We do this in case a segment that is 9464 * joined together and not part of a GP estimate 9465 * later gets expanded into the GP estimate. 9466 * 9467 * We prohibit the merging of unlike kinds i.e. 9468 * all pieces that are in the GP estimate can be 9469 * merged and all pieces that are not in a GP estimate 9470 * can be merged, but not disimilar pieces. Combine 9471 * this with taking the highest here and we should 9472 * be ok unless of course the client reneges. Then 9473 * all bets are off. 9474 */ 9475 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 9476 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 9477 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 9478 /* 9479 * And we must keep the newest ack arrival time. 9480 */ 9481 if (next->r_ack_arrival < 9482 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9483 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9484 9485 9486 /* We don't need to adjust rsm, it did not change */ 9487 /* Clear out the dup ack count of the remainder */ 9488 rsm->r_dupack = 0; 9489 rsm->r_just_ret = 0; 9490 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9491 /* Now lets make sure our fudge block is right */ 9492 nrsm->r_start = start; 9493 /* Now lets update all the stats and such */ 9494 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9495 if (rack->app_limited_needs_set) 9496 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9497 changed += (nrsm->r_end - nrsm->r_start); 9498 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9499 if (rsm->r_flags & RACK_WAS_LOST) { 9500 int my_chg; 9501 9502 my_chg = (nrsm->r_end - nrsm->r_start); 9503 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9504 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9505 if (my_chg <= rack->r_ctl.rc_considered_lost) 9506 rack->r_ctl.rc_considered_lost -= my_chg; 9507 else 9508 rack->r_ctl.rc_considered_lost = 0; 9509 } 9510 if (nrsm->r_flags & RACK_SACK_PASSED) { 9511 rack->r_ctl.rc_reorder_ts = cts; 9512 if (rack->r_ctl.rc_reorder_ts == 0) 9513 rack->r_ctl.rc_reorder_ts = 1; 9514 } 9515 /* 9516 * Now we want to go up from rsm (the 9517 * one left un-acked) to the next one 9518 * in the tmap. We do this so when 9519 * we walk backwards we include marking 9520 * sack-passed on rsm (The one passed in 9521 * is skipped since it is generally called 9522 * on something sacked before removing it 9523 * from the tmap). 9524 */ 9525 if (rsm->r_in_tmap) { 9526 nrsm = TAILQ_NEXT(rsm, r_tnext); 9527 /* 9528 * Now that we have the next 9529 * one walk backwards from there. 9530 */ 9531 if (nrsm && nrsm->r_in_tmap) 9532 rack_log_sack_passed(tp, rack, nrsm, cts); 9533 } 9534 /* Now are we done? */ 9535 if (SEQ_LT(end, next->r_end) || 9536 (end == next->r_end)) { 9537 /* Done with block */ 9538 goto out; 9539 } 9540 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 9541 counter_u64_add(rack_sack_used_next_merge, 1); 9542 /* Postion for the next block */ 9543 start = next->r_end; 9544 rsm = tqhash_next(rack->r_ctl.tqh, next); 9545 if (rsm == NULL) 9546 goto out; 9547 } else { 9548 /** 9549 * We can't use any hookery here, so we 9550 * need to split the map. We enter like 9551 * so: 9552 * rsm |--------| 9553 * sackblk |-----> 9554 * We will add the new block nrsm and 9555 * that will be the new portion, and then 9556 * fall through after reseting rsm. So we 9557 * split and look like this: 9558 * rsm |----| 9559 * sackblk |-----> 9560 * nrsm |---| 9561 * We then fall through reseting 9562 * rsm to nrsm, so the next block 9563 * picks it up. 9564 */ 9565 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9566 if (nrsm == NULL) { 9567 /* 9568 * failed XXXrrs what can we do but loose the sack 9569 * info? 9570 */ 9571 goto out; 9572 } 9573 counter_u64_add(rack_sack_splits, 1); 9574 rack_clone_rsm(rack, nrsm, rsm, start); 9575 rsm->r_just_ret = 0; 9576 #ifndef INVARIANTS 9577 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9578 #else 9579 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9580 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9581 nrsm, insret, rack, rsm); 9582 } 9583 #endif 9584 if (rsm->r_in_tmap) { 9585 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9586 nrsm->r_in_tmap = 1; 9587 } 9588 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 9589 rsm->r_flags &= (~RACK_HAS_FIN); 9590 /* Position us to point to the new nrsm that starts the sack blk */ 9591 rsm = nrsm; 9592 } 9593 } else { 9594 /* Already sacked this piece */ 9595 counter_u64_add(rack_sack_skipped_acked, 1); 9596 if (end == rsm->r_end) { 9597 /* Done with block */ 9598 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9599 goto out; 9600 } else if (SEQ_LT(end, rsm->r_end)) { 9601 /* A partial sack to a already sacked block */ 9602 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9603 goto out; 9604 } else { 9605 /* 9606 * The end goes beyond this guy 9607 * reposition the start to the 9608 * next block. 9609 */ 9610 start = rsm->r_end; 9611 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9612 if (rsm == NULL) 9613 goto out; 9614 } 9615 } 9616 } 9617 if (SEQ_GEQ(end, rsm->r_end)) { 9618 /** 9619 * The end of this block is either beyond this guy or right 9620 * at this guy. I.e.: 9621 * rsm --- |-----| 9622 * end |-----| 9623 * <or> 9624 * end |---------| 9625 */ 9626 if ((rsm->r_flags & RACK_ACKED) == 0) { 9627 /* 9628 * Is it a TLP of interest? 9629 */ 9630 if ((rsm->r_flags & RACK_TLP) && 9631 (rsm->r_rtr_cnt > 1)) { 9632 /* 9633 * We are splitting a rxt TLP, check 9634 * if we need to save off the start/end 9635 */ 9636 if (rack->rc_last_tlp_acked_set && 9637 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9638 /* 9639 * We already turned this on since we are inside 9640 * the previous one was a partially sack now we 9641 * are getting another one (maybe all of it). 9642 */ 9643 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9644 /* 9645 * Lets make sure we have all of it though. 9646 */ 9647 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9648 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9649 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9650 rack->r_ctl.last_tlp_acked_end); 9651 } 9652 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9653 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9654 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9655 rack->r_ctl.last_tlp_acked_end); 9656 } 9657 } else { 9658 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9659 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9660 rack->rc_last_tlp_past_cumack = 0; 9661 rack->rc_last_tlp_acked_set = 1; 9662 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9663 } 9664 } 9665 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9666 changed += (rsm->r_end - rsm->r_start); 9667 /* You get a count for acking a whole segment or more */ 9668 if (rsm->r_flags & RACK_WAS_LOST) { 9669 int my_chg; 9670 9671 my_chg = (rsm->r_end - rsm->r_start); 9672 rsm->r_flags &= ~RACK_WAS_LOST; 9673 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9674 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9675 if (my_chg <= rack->r_ctl.rc_considered_lost) 9676 rack->r_ctl.rc_considered_lost -= my_chg; 9677 else 9678 rack->r_ctl.rc_considered_lost = 0; 9679 } 9680 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9681 if (rsm->r_in_tmap) /* should be true */ 9682 rack_log_sack_passed(tp, rack, rsm, cts); 9683 /* Is Reordering occuring? */ 9684 if (rsm->r_flags & RACK_SACK_PASSED) { 9685 rsm->r_flags &= ~RACK_SACK_PASSED; 9686 rack->r_ctl.rc_reorder_ts = cts; 9687 if (rack->r_ctl.rc_reorder_ts == 0) 9688 rack->r_ctl.rc_reorder_ts = 1; 9689 } 9690 if (rack->app_limited_needs_set) 9691 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9692 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9693 rsm->r_flags |= RACK_ACKED; 9694 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9695 if (rsm->r_in_tmap) { 9696 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9697 rsm->r_in_tmap = 0; 9698 } 9699 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 9700 } else { 9701 counter_u64_add(rack_sack_skipped_acked, 1); 9702 } 9703 if (end == rsm->r_end) { 9704 /* This block only - done, setup for next */ 9705 goto out; 9706 } 9707 /* 9708 * There is more not coverend by this rsm move on 9709 * to the next block in the tail queue hash table. 9710 */ 9711 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 9712 start = rsm->r_end; 9713 rsm = nrsm; 9714 if (rsm == NULL) 9715 goto out; 9716 goto do_rest_ofb; 9717 } 9718 /** 9719 * The end of this sack block is smaller than 9720 * our rsm i.e.: 9721 * rsm --- |-----| 9722 * end |--| 9723 */ 9724 if ((rsm->r_flags & RACK_ACKED) == 0) { 9725 /* 9726 * Is it a TLP of interest? 9727 */ 9728 if ((rsm->r_flags & RACK_TLP) && 9729 (rsm->r_rtr_cnt > 1)) { 9730 /* 9731 * We are splitting a rxt TLP, check 9732 * if we need to save off the start/end 9733 */ 9734 if (rack->rc_last_tlp_acked_set && 9735 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9736 /* 9737 * We already turned this on since we are inside 9738 * the previous one was a partially sack now we 9739 * are getting another one (maybe all of it). 9740 */ 9741 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9742 /* 9743 * Lets make sure we have all of it though. 9744 */ 9745 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9746 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9747 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9748 rack->r_ctl.last_tlp_acked_end); 9749 } 9750 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9751 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9752 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9753 rack->r_ctl.last_tlp_acked_end); 9754 } 9755 } else { 9756 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9757 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9758 rack->rc_last_tlp_past_cumack = 0; 9759 rack->rc_last_tlp_acked_set = 1; 9760 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9761 } 9762 } 9763 /* 9764 * Hookery can only be used if the two entries 9765 * are in the same bucket and neither one of 9766 * them staddle the bucket line. 9767 */ 9768 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 9769 if (prev && 9770 (rsm->bindex == prev->bindex) && 9771 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9772 ((prev->r_flags & RACK_STRADDLE) == 0) && 9773 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9774 ((prev->r_flags & RACK_IS_PCM) == 0) && 9775 (rsm->r_flags & RACK_IN_GP_WIN) && 9776 (prev->r_flags & RACK_IN_GP_WIN)) 9777 can_use_hookery = 1; 9778 else 9779 can_use_hookery = 0; 9780 if (prev && can_use_hookery && 9781 (prev->r_flags & RACK_ACKED)) { 9782 /** 9783 * Goal, we want the right remainder of rsm to shrink 9784 * in place and span from (rsm->r_start = end) to rsm->r_end. 9785 * We want to expand prev to go all the way 9786 * to prev->r_end <- end. 9787 * so in the tree we have before: 9788 * prev |--------| (acked) 9789 * rsm |-------| (non-acked) 9790 * sackblk |-| 9791 * We churn it so we end up with 9792 * prev |----------| (acked) 9793 * rsm |-----| (non-acked) 9794 * nrsm |-| (temporary) 9795 * 9796 * Note if either prev/rsm is a TLP we don't 9797 * do this. 9798 */ 9799 nrsm = &stack_map; 9800 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9801 tqhash_update_end(rack->r_ctl.tqh, prev, end); 9802 rsm->r_start = end; 9803 rsm->r_flags |= RACK_SHUFFLED; 9804 prev->r_flags |= RACK_SHUFFLED; 9805 /* Now adjust nrsm (stack copy) to be 9806 * the one that is the small 9807 * piece that was "sacked". 9808 */ 9809 nrsm->r_end = end; 9810 rsm->r_dupack = 0; 9811 /* 9812 * Which timestamp do we keep? It is rather 9813 * important in GP measurements to have the 9814 * accurate end of the send window. 9815 * 9816 * We keep the largest value, which is the newest 9817 * send. We do this in case a segment that is 9818 * joined together and not part of a GP estimate 9819 * later gets expanded into the GP estimate. 9820 * 9821 * We prohibit the merging of unlike kinds i.e. 9822 * all pieces that are in the GP estimate can be 9823 * merged and all pieces that are not in a GP estimate 9824 * can be merged, but not disimilar pieces. Combine 9825 * this with taking the highest here and we should 9826 * be ok unless of course the client reneges. Then 9827 * all bets are off. 9828 */ 9829 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 9830 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 9831 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9832 } 9833 /* 9834 * And we must keep the newest ack arrival time. 9835 */ 9836 9837 if(prev->r_ack_arrival < 9838 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9839 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9840 9841 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9842 /* 9843 * Now that the rsm has had its start moved forward 9844 * lets go ahead and get its new place in the world. 9845 */ 9846 rack_setup_offset_for_rsm(rack, prev, rsm); 9847 /* 9848 * Now nrsm is our new little piece 9849 * that is acked (which was merged 9850 * to prev). Update the rtt and changed 9851 * based on that. Also check for reordering. 9852 */ 9853 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9854 if (rack->app_limited_needs_set) 9855 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9856 changed += (nrsm->r_end - nrsm->r_start); 9857 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9858 if (rsm->r_flags & RACK_WAS_LOST) { 9859 int my_chg; 9860 9861 my_chg = (nrsm->r_end - nrsm->r_start); 9862 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9863 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9864 if (my_chg <= rack->r_ctl.rc_considered_lost) 9865 rack->r_ctl.rc_considered_lost -= my_chg; 9866 else 9867 rack->r_ctl.rc_considered_lost = 0; 9868 } 9869 if (nrsm->r_flags & RACK_SACK_PASSED) { 9870 rack->r_ctl.rc_reorder_ts = cts; 9871 if (rack->r_ctl.rc_reorder_ts == 0) 9872 rack->r_ctl.rc_reorder_ts = 1; 9873 } 9874 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 9875 rsm = prev; 9876 counter_u64_add(rack_sack_used_prev_merge, 1); 9877 } else { 9878 /** 9879 * This is the case where our previous 9880 * block is not acked either, so we must 9881 * split the block in two. 9882 */ 9883 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9884 if (nrsm == NULL) { 9885 /* failed rrs what can we do but loose the sack info? */ 9886 goto out; 9887 } 9888 if ((rsm->r_flags & RACK_TLP) && 9889 (rsm->r_rtr_cnt > 1)) { 9890 /* 9891 * We are splitting a rxt TLP, check 9892 * if we need to save off the start/end 9893 */ 9894 if (rack->rc_last_tlp_acked_set && 9895 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9896 /* 9897 * We already turned this on since this block is inside 9898 * the previous one was a partially sack now we 9899 * are getting another one (maybe all of it). 9900 */ 9901 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9902 /* 9903 * Lets make sure we have all of it though. 9904 */ 9905 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9906 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9907 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9908 rack->r_ctl.last_tlp_acked_end); 9909 } 9910 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9911 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9912 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9913 rack->r_ctl.last_tlp_acked_end); 9914 } 9915 } else { 9916 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9917 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9918 rack->rc_last_tlp_acked_set = 1; 9919 rack->rc_last_tlp_past_cumack = 0; 9920 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9921 } 9922 } 9923 /** 9924 * In this case nrsm becomes 9925 * nrsm->r_start = end; 9926 * nrsm->r_end = rsm->r_end; 9927 * which is un-acked. 9928 * <and> 9929 * rsm->r_end = nrsm->r_start; 9930 * i.e. the remaining un-acked 9931 * piece is left on the left 9932 * hand side. 9933 * 9934 * So we start like this 9935 * rsm |----------| (not acked) 9936 * sackblk |---| 9937 * build it so we have 9938 * rsm |---| (acked) 9939 * nrsm |------| (not acked) 9940 */ 9941 counter_u64_add(rack_sack_splits, 1); 9942 rack_clone_rsm(rack, nrsm, rsm, end); 9943 rsm->r_flags &= (~RACK_HAS_FIN); 9944 rsm->r_just_ret = 0; 9945 #ifndef INVARIANTS 9946 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9947 #else 9948 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9949 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 9950 nrsm, insret, rack, rsm); 9951 } 9952 #endif 9953 if (rsm->r_in_tmap) { 9954 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9955 nrsm->r_in_tmap = 1; 9956 } 9957 nrsm->r_dupack = 0; 9958 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 9959 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9960 changed += (rsm->r_end - rsm->r_start); 9961 if (rsm->r_flags & RACK_WAS_LOST) { 9962 int my_chg; 9963 9964 my_chg = (rsm->r_end - rsm->r_start); 9965 rsm->r_flags &= ~RACK_WAS_LOST; 9966 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9967 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9968 if (my_chg <= rack->r_ctl.rc_considered_lost) 9969 rack->r_ctl.rc_considered_lost -= my_chg; 9970 else 9971 rack->r_ctl.rc_considered_lost = 0; 9972 } 9973 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9974 9975 if (rsm->r_in_tmap) /* should be true */ 9976 rack_log_sack_passed(tp, rack, rsm, cts); 9977 /* Is Reordering occuring? */ 9978 if (rsm->r_flags & RACK_SACK_PASSED) { 9979 rsm->r_flags &= ~RACK_SACK_PASSED; 9980 rack->r_ctl.rc_reorder_ts = cts; 9981 if (rack->r_ctl.rc_reorder_ts == 0) 9982 rack->r_ctl.rc_reorder_ts = 1; 9983 } 9984 if (rack->app_limited_needs_set) 9985 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9986 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9987 rsm->r_flags |= RACK_ACKED; 9988 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9989 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 9990 if (rsm->r_in_tmap) { 9991 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9992 rsm->r_in_tmap = 0; 9993 } 9994 } 9995 } else if (start != end){ 9996 /* 9997 * The block was already acked. 9998 */ 9999 counter_u64_add(rack_sack_skipped_acked, 1); 10000 } 10001 out: 10002 if (rsm && 10003 ((rsm->r_flags & RACK_TLP) == 0) && 10004 (rsm->r_flags & RACK_ACKED)) { 10005 /* 10006 * Now can we merge where we worked 10007 * with either the previous or 10008 * next block? 10009 */ 10010 next = tqhash_next(rack->r_ctl.tqh, rsm); 10011 while (next) { 10012 if (next->r_flags & RACK_TLP) 10013 break; 10014 /* Only allow merges between ones in or out of GP window */ 10015 if ((next->r_flags & RACK_IN_GP_WIN) && 10016 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10017 break; 10018 } 10019 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10020 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10021 break; 10022 } 10023 if (rsm->bindex != next->bindex) 10024 break; 10025 if (rsm->r_flags & RACK_STRADDLE) 10026 break; 10027 if (rsm->r_flags & RACK_IS_PCM) 10028 break; 10029 if (next->r_flags & RACK_STRADDLE) 10030 break; 10031 if (next->r_flags & RACK_IS_PCM) 10032 break; 10033 if (next->r_flags & RACK_ACKED) { 10034 /* yep this and next can be merged */ 10035 rsm = rack_merge_rsm(rack, rsm, next); 10036 next = tqhash_next(rack->r_ctl.tqh, rsm); 10037 } else 10038 break; 10039 } 10040 /* Now what about the previous? */ 10041 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10042 while (prev) { 10043 if (prev->r_flags & RACK_TLP) 10044 break; 10045 /* Only allow merges between ones in or out of GP window */ 10046 if ((prev->r_flags & RACK_IN_GP_WIN) && 10047 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10048 break; 10049 } 10050 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10051 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10052 break; 10053 } 10054 if (rsm->bindex != prev->bindex) 10055 break; 10056 if (rsm->r_flags & RACK_STRADDLE) 10057 break; 10058 if (rsm->r_flags & RACK_IS_PCM) 10059 break; 10060 if (prev->r_flags & RACK_STRADDLE) 10061 break; 10062 if (prev->r_flags & RACK_IS_PCM) 10063 break; 10064 if (prev->r_flags & RACK_ACKED) { 10065 /* yep the previous and this can be merged */ 10066 rsm = rack_merge_rsm(rack, prev, rsm); 10067 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10068 } else 10069 break; 10070 } 10071 } 10072 if (used_ref == 0) { 10073 counter_u64_add(rack_sack_proc_all, 1); 10074 } else { 10075 counter_u64_add(rack_sack_proc_short, 1); 10076 } 10077 /* Save off the next one for quick reference. */ 10078 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10079 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10080 return (changed); 10081 } 10082 10083 static void inline 10084 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10085 { 10086 struct rack_sendmap *tmap; 10087 10088 tmap = NULL; 10089 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10090 /* Its no longer sacked, mark it so */ 10091 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10092 #ifdef INVARIANTS 10093 if (rsm->r_in_tmap) { 10094 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10095 rack, rsm, rsm->r_flags); 10096 } 10097 #endif 10098 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10099 /* Rebuild it into our tmap */ 10100 if (tmap == NULL) { 10101 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10102 tmap = rsm; 10103 } else { 10104 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10105 tmap = rsm; 10106 } 10107 tmap->r_in_tmap = 1; 10108 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10109 } 10110 /* 10111 * Now lets possibly clear the sack filter so we start 10112 * recognizing sacks that cover this area. 10113 */ 10114 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10115 10116 } 10117 10118 10119 static void inline 10120 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10121 { 10122 /* 10123 * We look at advancing the end send time for our GP 10124 * measurement tracking only as the cumulative acknowledgment 10125 * moves forward. You might wonder about this, why not 10126 * at every transmission or retransmission within the 10127 * GP window update the rc_gp_cumack_ts? Well its rather 10128 * nuanced but basically the GP window *may* expand (as 10129 * it does below) or worse and harder to track it may shrink. 10130 * 10131 * This last makes it impossible to track at the time of 10132 * the send, since you may set forward your rc_gp_cumack_ts 10133 * when you send, because that send *is* in your currently 10134 * "guessed" window, but then it shrinks. Now which was 10135 * the send time of the last bytes in the window, by the 10136 * time you ask that question that part of the sendmap 10137 * is freed. So you don't know and you will have too 10138 * long of send window. Instead by updating the time 10139 * marker only when the cumack advances this assures us 10140 * that we will have only the sends in the window of our 10141 * GP measurement. 10142 * 10143 * Another complication from this is the 10144 * merging of sendmap entries. During SACK processing this 10145 * can happen to conserve the sendmap size. That breaks 10146 * everything down in tracking the send window of the GP 10147 * estimate. So to prevent that and keep it working with 10148 * a tiny bit more limited merging, we only allow like 10149 * types to be merged. I.e. if two sends are in the GP window 10150 * then its ok to merge them together. If two sends are not 10151 * in the GP window its ok to merge them together too. Though 10152 * one send in and one send out cannot be merged. We combine 10153 * this with never allowing the shrinking of the GP window when 10154 * we are in recovery so that we can properly calculate the 10155 * sending times. 10156 * 10157 * This all of course seems complicated, because it is.. :) 10158 * 10159 * The cum-ack is being advanced upon the sendmap. 10160 * If we are not doing a GP estimate don't 10161 * proceed. 10162 */ 10163 uint64_t ts; 10164 10165 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10166 return; 10167 /* 10168 * If this sendmap entry is going 10169 * beyond the measurement window we had picked, 10170 * expand the measurement window by that much. 10171 */ 10172 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10173 tp->gput_ack = rsm->r_end; 10174 } 10175 /* 10176 * If we have not setup a ack, then we 10177 * have no idea if the newly acked pieces 10178 * will be "in our seq measurement range". If 10179 * it is when we clear the app_limited_needs_set 10180 * flag the timestamp will be updated. 10181 */ 10182 if (rack->app_limited_needs_set) 10183 return; 10184 /* 10185 * Finally, we grab out the latest timestamp 10186 * that this packet was sent and then see 10187 * if: 10188 * a) The packet touches are newly defined GP range. 10189 * b) The time is greater than (newer) than the 10190 * one we currently have. If so we update 10191 * our sending end time window. 10192 * 10193 * Note we *do not* do this at send time. The reason 10194 * is that if you do you *may* pick up a newer timestamp 10195 * for a range you are not going to measure. We project 10196 * out how far and then sometimes modify that to be 10197 * smaller. If that occurs then you will have a send 10198 * that does not belong to the range included. 10199 */ 10200 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10201 rack->r_ctl.rc_gp_cumack_ts) 10202 return; 10203 if (rack_in_gp_window(tp, rsm)) { 10204 rack->r_ctl.rc_gp_cumack_ts = ts; 10205 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10206 __LINE__, from, rsm); 10207 } 10208 } 10209 10210 static void 10211 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10212 { 10213 struct rack_sendmap *rsm; 10214 /* 10215 * The ACK point is advancing to th_ack, we must drop off 10216 * the packets in the rack log and calculate any eligble 10217 * RTT's. 10218 */ 10219 10220 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 10221 /* 10222 * If we have some sack blocks in the filter 10223 * lets prune them out by calling sfb with no blocks. 10224 */ 10225 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 10226 } 10227 if (SEQ_GT(th_ack, tp->snd_una)) { 10228 /* Clear any app ack remembered settings */ 10229 rack->r_ctl.cleared_app_ack = 0; 10230 } 10231 rack->r_wanted_output = 1; 10232 if (SEQ_GT(th_ack, tp->snd_una)) 10233 rack->r_ctl.last_cumack_advance = acktime; 10234 10235 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10236 if ((rack->rc_last_tlp_acked_set == 1)&& 10237 (rack->rc_last_tlp_past_cumack == 1) && 10238 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10239 /* 10240 * We have reached the point where our last rack 10241 * tlp retransmit sequence is ahead of the cum-ack. 10242 * This can only happen when the cum-ack moves all 10243 * the way around (its been a full 2^^31+1 bytes 10244 * or more since we sent a retransmitted TLP). Lets 10245 * turn off the valid flag since its not really valid. 10246 * 10247 * Note since sack's also turn on this event we have 10248 * a complication, we have to wait to age it out until 10249 * the cum-ack is by the TLP before checking which is 10250 * what the next else clause does. 10251 */ 10252 rack_log_dsack_event(rack, 9, __LINE__, 10253 rack->r_ctl.last_tlp_acked_start, 10254 rack->r_ctl.last_tlp_acked_end); 10255 rack->rc_last_tlp_acked_set = 0; 10256 rack->rc_last_tlp_past_cumack = 0; 10257 } else if ((rack->rc_last_tlp_acked_set == 1) && 10258 (rack->rc_last_tlp_past_cumack == 0) && 10259 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10260 /* 10261 * It is safe to start aging TLP's out. 10262 */ 10263 rack->rc_last_tlp_past_cumack = 1; 10264 } 10265 /* We do the same for the tlp send seq as well */ 10266 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10267 (rack->rc_last_sent_tlp_past_cumack == 1) && 10268 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10269 rack_log_dsack_event(rack, 9, __LINE__, 10270 rack->r_ctl.last_sent_tlp_seq, 10271 (rack->r_ctl.last_sent_tlp_seq + 10272 rack->r_ctl.last_sent_tlp_len)); 10273 rack->rc_last_sent_tlp_seq_valid = 0; 10274 rack->rc_last_sent_tlp_past_cumack = 0; 10275 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10276 (rack->rc_last_sent_tlp_past_cumack == 0) && 10277 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10278 /* 10279 * It is safe to start aging TLP's send. 10280 */ 10281 rack->rc_last_sent_tlp_past_cumack = 1; 10282 } 10283 more: 10284 rsm = tqhash_min(rack->r_ctl.tqh); 10285 if (rsm == NULL) { 10286 if ((th_ack - 1) == tp->iss) { 10287 /* 10288 * For the SYN incoming case we will not 10289 * have called tcp_output for the sending of 10290 * the SYN, so there will be no map. All 10291 * other cases should probably be a panic. 10292 */ 10293 return; 10294 } 10295 if (tp->t_flags & TF_SENTFIN) { 10296 /* if we sent a FIN we often will not have map */ 10297 return; 10298 } 10299 #ifdef INVARIANTS 10300 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 10301 tp, 10302 tp->t_state, th_ack, rack, 10303 tp->snd_una, tp->snd_max); 10304 #endif 10305 return; 10306 } 10307 if (SEQ_LT(th_ack, rsm->r_start)) { 10308 /* Huh map is missing this */ 10309 #ifdef INVARIANTS 10310 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 10311 rsm->r_start, 10312 th_ack, tp->t_state, rack->r_state); 10313 #endif 10314 return; 10315 } 10316 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 10317 10318 /* Now was it a retransmitted TLP? */ 10319 if ((rsm->r_flags & RACK_TLP) && 10320 (rsm->r_rtr_cnt > 1)) { 10321 /* 10322 * Yes, this rsm was a TLP and retransmitted, remember that 10323 * since if a DSACK comes back on this we don't want 10324 * to think of it as a reordered segment. This may 10325 * get updated again with possibly even other TLPs 10326 * in flight, but thats ok. Only when we don't send 10327 * a retransmitted TLP for 1/2 the sequences space 10328 * will it get turned off (above). 10329 */ 10330 if (rack->rc_last_tlp_acked_set && 10331 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10332 /* 10333 * We already turned this on since the end matches, 10334 * the previous one was a partially ack now we 10335 * are getting another one (maybe all of it). 10336 */ 10337 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10338 /* 10339 * Lets make sure we have all of it though. 10340 */ 10341 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10342 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10343 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10344 rack->r_ctl.last_tlp_acked_end); 10345 } 10346 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10347 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10348 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10349 rack->r_ctl.last_tlp_acked_end); 10350 } 10351 } else { 10352 rack->rc_last_tlp_past_cumack = 1; 10353 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10354 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10355 rack->rc_last_tlp_acked_set = 1; 10356 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10357 } 10358 } 10359 /* Now do we consume the whole thing? */ 10360 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 10361 if (SEQ_GEQ(th_ack, rsm->r_end)) { 10362 /* Its all consumed. */ 10363 uint32_t left; 10364 uint8_t newly_acked; 10365 10366 if (rsm->r_flags & RACK_WAS_LOST) { 10367 /* 10368 * This can happen when we marked it as lost 10369 * and yet before retransmitting we get an ack 10370 * which can happen due to reordering. 10371 */ 10372 rsm->r_flags &= ~RACK_WAS_LOST; 10373 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 10374 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10375 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 10376 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 10377 else 10378 rack->r_ctl.rc_considered_lost = 0; 10379 } 10380 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 10381 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 10382 rsm->r_rtr_bytes = 0; 10383 /* 10384 * Record the time of highest cumack sent if its in our measurement 10385 * window and possibly bump out the end. 10386 */ 10387 rack_rsm_sender_update(rack, tp, rsm, 4); 10388 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 10389 if (rsm->r_in_tmap) { 10390 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10391 rsm->r_in_tmap = 0; 10392 } 10393 newly_acked = 1; 10394 if (rsm->r_flags & RACK_ACKED) { 10395 /* 10396 * It was acked on the scoreboard -- remove 10397 * it from total 10398 */ 10399 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10400 newly_acked = 0; 10401 } else if (rsm->r_flags & RACK_SACK_PASSED) { 10402 /* 10403 * There are segments ACKED on the 10404 * scoreboard further up. We are seeing 10405 * reordering. 10406 */ 10407 rsm->r_flags &= ~RACK_SACK_PASSED; 10408 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10409 rsm->r_flags |= RACK_ACKED; 10410 rack->r_ctl.rc_reorder_ts = cts; 10411 if (rack->r_ctl.rc_reorder_ts == 0) 10412 rack->r_ctl.rc_reorder_ts = 1; 10413 if (rack->r_ent_rec_ns) { 10414 /* 10415 * We have sent no more, and we saw an sack 10416 * then ack arrive. 10417 */ 10418 rack->r_might_revert = 1; 10419 } 10420 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10421 } else { 10422 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10423 } 10424 if ((rsm->r_flags & RACK_TO_REXT) && 10425 (tp->t_flags & TF_RCVD_TSTMP) && 10426 (to->to_flags & TOF_TS) && 10427 (to->to_tsecr != 0) && 10428 (tp->t_flags & TF_PREVVALID)) { 10429 /* 10430 * We can use the timestamp to see 10431 * if this retransmission was from the 10432 * first transmit. If so we made a mistake. 10433 */ 10434 tp->t_flags &= ~TF_PREVVALID; 10435 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 10436 /* The first transmit is what this ack is for */ 10437 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 10438 } 10439 } 10440 left = th_ack - rsm->r_end; 10441 if (rack->app_limited_needs_set && newly_acked) 10442 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 10443 /* Free back to zone */ 10444 rack_free(rack, rsm); 10445 if (left) { 10446 goto more; 10447 } 10448 /* Check for reneging */ 10449 rsm = tqhash_min(rack->r_ctl.tqh); 10450 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 10451 /* 10452 * The peer has moved snd_una up to 10453 * the edge of this send, i.e. one 10454 * that it had previously acked. The only 10455 * way that can be true if the peer threw 10456 * away data (space issues) that it had 10457 * previously sacked (else it would have 10458 * given us snd_una up to (rsm->r_end). 10459 * We need to undo the acked markings here. 10460 * 10461 * Note we have to look to make sure th_ack is 10462 * our rsm->r_start in case we get an old ack 10463 * where th_ack is behind snd_una. 10464 */ 10465 rack_peer_reneges(rack, rsm, th_ack); 10466 } 10467 return; 10468 } 10469 if (rsm->r_flags & RACK_ACKED) { 10470 /* 10471 * It was acked on the scoreboard -- remove it from 10472 * total for the part being cum-acked. 10473 */ 10474 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 10475 } else { 10476 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 10477 } 10478 /* And what about the lost flag? */ 10479 if (rsm->r_flags & RACK_WAS_LOST) { 10480 /* 10481 * This can happen when we marked it as lost 10482 * and yet before retransmitting we get an ack 10483 * which can happen due to reordering. In this 10484 * case its only a partial ack of the send. 10485 */ 10486 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), 10487 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); 10488 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) 10489 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; 10490 else 10491 rack->r_ctl.rc_considered_lost = 0; 10492 } 10493 /* 10494 * Clear the dup ack count for 10495 * the piece that remains. 10496 */ 10497 rsm->r_dupack = 0; 10498 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10499 if (rsm->r_rtr_bytes) { 10500 /* 10501 * It was retransmitted adjust the 10502 * sack holes for what was acked. 10503 */ 10504 int ack_am; 10505 10506 ack_am = (th_ack - rsm->r_start); 10507 if (ack_am >= rsm->r_rtr_bytes) { 10508 rack->r_ctl.rc_holes_rxt -= ack_am; 10509 rsm->r_rtr_bytes -= ack_am; 10510 } 10511 } 10512 /* 10513 * Update where the piece starts and record 10514 * the time of send of highest cumack sent if 10515 * its in our GP range. 10516 */ 10517 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 10518 /* Now we need to move our offset forward too */ 10519 if (rsm->m && 10520 ((rsm->orig_m_len != rsm->m->m_len) || 10521 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 10522 /* Fix up the orig_m_len and possibly the mbuf offset */ 10523 rack_adjust_orig_mlen(rsm); 10524 } 10525 rsm->soff += (th_ack - rsm->r_start); 10526 rack_rsm_sender_update(rack, tp, rsm, 5); 10527 /* The trim will move th_ack into r_start for us */ 10528 tqhash_trim(rack->r_ctl.tqh, th_ack); 10529 /* Now do we need to move the mbuf fwd too? */ 10530 { 10531 struct mbuf *m; 10532 uint32_t soff; 10533 10534 m = rsm->m; 10535 soff = rsm->soff; 10536 if (m) { 10537 while (soff >= m->m_len) { 10538 soff -= m->m_len; 10539 KASSERT((m->m_next != NULL), 10540 (" rsm:%p off:%u soff:%u m:%p", 10541 rsm, rsm->soff, soff, m)); 10542 m = m->m_next; 10543 if (m == NULL) { 10544 /* 10545 * This is a fall-back that prevents a panic. In reality 10546 * we should be able to walk the mbuf's and find our place. 10547 * At this point snd_una has not been updated with the sbcut() yet 10548 * but tqhash_trim did update rsm->r_start so the offset calcuation 10549 * should work fine. This is undesirable since we will take cache 10550 * hits to access the socket buffer. And even more puzzling is that 10551 * it happens occasionally. It should not :( 10552 */ 10553 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 10554 (rsm->r_start - tp->snd_una), 10555 &soff); 10556 break; 10557 } 10558 } 10559 /* 10560 * Now save in our updated values. 10561 */ 10562 rsm->m = m; 10563 rsm->soff = soff; 10564 rsm->orig_m_len = rsm->m->m_len; 10565 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 10566 } 10567 } 10568 if (rack->app_limited_needs_set && 10569 SEQ_GEQ(th_ack, tp->gput_seq)) 10570 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 10571 } 10572 10573 static void 10574 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 10575 { 10576 struct rack_sendmap *rsm; 10577 int sack_pass_fnd = 0; 10578 10579 if (rack->r_might_revert) { 10580 /* 10581 * Ok we have reordering, have not sent anything, we 10582 * might want to revert the congestion state if nothing 10583 * further has SACK_PASSED on it. Lets check. 10584 * 10585 * We also get here when we have DSACKs come in for 10586 * all the data that we FR'd. Note that a rxt or tlp 10587 * timer clears this from happening. 10588 */ 10589 10590 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 10591 if (rsm->r_flags & RACK_SACK_PASSED) { 10592 sack_pass_fnd = 1; 10593 break; 10594 } 10595 } 10596 if (sack_pass_fnd == 0) { 10597 /* 10598 * We went into recovery 10599 * incorrectly due to reordering! 10600 */ 10601 int orig_cwnd; 10602 10603 rack->r_ent_rec_ns = 0; 10604 orig_cwnd = tp->snd_cwnd; 10605 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 10606 tp->snd_recover = tp->snd_una; 10607 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 10608 if (IN_RECOVERY(tp->t_flags)) { 10609 rack_exit_recovery(tp, rack, 3); 10610 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 10611 /* 10612 * We were in recovery, had an RTO 10613 * and then re-entered recovery (more sack's arrived) 10614 * and we have properly recorded the old ssthresh from 10615 * the first recovery. We want to be able to slow-start 10616 * back to this level. The ssthresh from the timeout 10617 * and then back into recovery will end up most likely 10618 * to be min(cwnd=1mss, 2mss). Which makes it basically 10619 * so we get no slow-start after our RTO. 10620 */ 10621 rack->rto_from_rec = 0; 10622 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 10623 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 10624 } 10625 } 10626 } 10627 rack->r_might_revert = 0; 10628 } 10629 } 10630 10631 10632 static int 10633 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 10634 { 10635 10636 uint32_t am, l_end; 10637 int was_tlp = 0; 10638 10639 if (SEQ_GT(end, start)) 10640 am = end - start; 10641 else 10642 am = 0; 10643 if ((rack->rc_last_tlp_acked_set ) && 10644 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 10645 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 10646 /* 10647 * The DSACK is because of a TLP which we don't 10648 * do anything with the reordering window over since 10649 * it was not reordering that caused the DSACK but 10650 * our previous retransmit TLP. 10651 */ 10652 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10653 was_tlp = 1; 10654 goto skip_dsack_round; 10655 } 10656 if (rack->rc_last_sent_tlp_seq_valid) { 10657 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 10658 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 10659 (SEQ_LEQ(end, l_end))) { 10660 /* 10661 * This dsack is from the last sent TLP, ignore it 10662 * for reordering purposes. 10663 */ 10664 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10665 was_tlp = 1; 10666 goto skip_dsack_round; 10667 } 10668 } 10669 if (rack->rc_dsack_round_seen == 0) { 10670 rack->rc_dsack_round_seen = 1; 10671 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 10672 rack->r_ctl.num_dsack++; 10673 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 10674 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 10675 } 10676 skip_dsack_round: 10677 /* 10678 * We keep track of how many DSACK blocks we get 10679 * after a recovery incident. 10680 */ 10681 rack->r_ctl.dsack_byte_cnt += am; 10682 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 10683 rack->r_ctl.retran_during_recovery && 10684 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 10685 /* 10686 * False recovery most likely culprit is reordering. If 10687 * nothing else is missing we need to revert. 10688 */ 10689 rack->r_might_revert = 1; 10690 rack_handle_might_revert(rack->rc_tp, rack); 10691 rack->r_might_revert = 0; 10692 rack->r_ctl.retran_during_recovery = 0; 10693 rack->r_ctl.dsack_byte_cnt = 0; 10694 } 10695 return (was_tlp); 10696 } 10697 10698 static uint32_t 10699 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 10700 { 10701 return (((tp->snd_max - snd_una) - 10702 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 10703 } 10704 10705 static int32_t 10706 rack_compute_pipe(struct tcpcb *tp) 10707 { 10708 return ((int32_t)do_rack_compute_pipe(tp, 10709 (struct tcp_rack *)tp->t_fb_ptr, 10710 tp->snd_una)); 10711 } 10712 10713 static void 10714 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 10715 { 10716 /* Deal with changed and PRR here (in recovery only) */ 10717 uint32_t pipe, snd_una; 10718 10719 rack->r_ctl.rc_prr_delivered += changed; 10720 10721 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 10722 /* 10723 * It is all outstanding, we are application limited 10724 * and thus we don't need more room to send anything. 10725 * Note we use tp->snd_una here and not th_ack because 10726 * the data as yet not been cut from the sb. 10727 */ 10728 rack->r_ctl.rc_prr_sndcnt = 0; 10729 return; 10730 } 10731 /* Compute prr_sndcnt */ 10732 if (SEQ_GT(tp->snd_una, th_ack)) { 10733 snd_una = tp->snd_una; 10734 } else { 10735 snd_una = th_ack; 10736 } 10737 pipe = do_rack_compute_pipe(tp, rack, snd_una); 10738 if (pipe > tp->snd_ssthresh) { 10739 long sndcnt; 10740 10741 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 10742 if (rack->r_ctl.rc_prr_recovery_fs > 0) 10743 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 10744 else { 10745 rack->r_ctl.rc_prr_sndcnt = 0; 10746 rack_log_to_prr(rack, 9, 0, __LINE__); 10747 sndcnt = 0; 10748 } 10749 sndcnt++; 10750 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 10751 sndcnt -= rack->r_ctl.rc_prr_out; 10752 else 10753 sndcnt = 0; 10754 rack->r_ctl.rc_prr_sndcnt = sndcnt; 10755 rack_log_to_prr(rack, 10, 0, __LINE__); 10756 } else { 10757 uint32_t limit; 10758 10759 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 10760 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 10761 else 10762 limit = 0; 10763 if (changed > limit) 10764 limit = changed; 10765 limit += ctf_fixed_maxseg(tp); 10766 if (tp->snd_ssthresh > pipe) { 10767 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 10768 rack_log_to_prr(rack, 11, 0, __LINE__); 10769 } else { 10770 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 10771 rack_log_to_prr(rack, 12, 0, __LINE__); 10772 } 10773 } 10774 } 10775 10776 static void 10777 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 10778 int *dsack_seen, int *sacks_seen) 10779 { 10780 uint32_t changed; 10781 struct tcp_rack *rack; 10782 struct rack_sendmap *rsm; 10783 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 10784 register uint32_t th_ack; 10785 int32_t i, j, k, num_sack_blks = 0; 10786 uint32_t cts, acked, ack_point; 10787 int loop_start = 0; 10788 uint32_t tsused; 10789 uint32_t segsiz; 10790 10791 10792 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10793 if (tcp_get_flags(th) & TH_RST) { 10794 /* We don't log resets */ 10795 return; 10796 } 10797 rack = (struct tcp_rack *)tp->t_fb_ptr; 10798 cts = tcp_get_usecs(NULL); 10799 rsm = tqhash_min(rack->r_ctl.tqh); 10800 changed = 0; 10801 th_ack = th->th_ack; 10802 segsiz = ctf_fixed_maxseg(rack->rc_tp); 10803 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 10804 /* 10805 * You only get credit for 10806 * MSS and greater (and you get extra 10807 * credit for larger cum-ack moves). 10808 */ 10809 int ac; 10810 10811 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 10812 counter_u64_add(rack_ack_total, ac); 10813 } 10814 if (SEQ_GT(th_ack, tp->snd_una)) { 10815 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 10816 tp->t_acktime = ticks; 10817 } 10818 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 10819 changed = th_ack - rsm->r_start; 10820 if (changed) { 10821 rack_process_to_cumack(tp, rack, th_ack, cts, to, 10822 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 10823 } 10824 if ((to->to_flags & TOF_SACK) == 0) { 10825 /* We are done nothing left and no sack. */ 10826 rack_handle_might_revert(tp, rack); 10827 /* 10828 * For cases where we struck a dup-ack 10829 * with no SACK, add to the changes so 10830 * PRR will work right. 10831 */ 10832 if (dup_ack_struck && (changed == 0)) { 10833 changed += ctf_fixed_maxseg(rack->rc_tp); 10834 } 10835 goto out; 10836 } 10837 /* Sack block processing */ 10838 if (SEQ_GT(th_ack, tp->snd_una)) 10839 ack_point = th_ack; 10840 else 10841 ack_point = tp->snd_una; 10842 for (i = 0; i < to->to_nsacks; i++) { 10843 bcopy((to->to_sacks + i * TCPOLEN_SACK), 10844 &sack, sizeof(sack)); 10845 sack.start = ntohl(sack.start); 10846 sack.end = ntohl(sack.end); 10847 if (SEQ_GT(sack.end, sack.start) && 10848 SEQ_GT(sack.start, ack_point) && 10849 SEQ_LT(sack.start, tp->snd_max) && 10850 SEQ_GT(sack.end, ack_point) && 10851 SEQ_LEQ(sack.end, tp->snd_max)) { 10852 sack_blocks[num_sack_blks] = sack; 10853 num_sack_blks++; 10854 } else if (SEQ_LEQ(sack.start, th_ack) && 10855 SEQ_LEQ(sack.end, th_ack)) { 10856 int was_tlp; 10857 10858 if (dsack_seen != NULL) 10859 *dsack_seen = 1; 10860 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 10861 /* 10862 * Its a D-SACK block. 10863 */ 10864 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 10865 } 10866 } 10867 if (rack->rc_dsack_round_seen) { 10868 /* Is the dsack roound over? */ 10869 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 10870 /* Yes it is */ 10871 rack->rc_dsack_round_seen = 0; 10872 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 10873 } 10874 } 10875 /* 10876 * Sort the SACK blocks so we can update the rack scoreboard with 10877 * just one pass. 10878 */ 10879 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 10880 num_sack_blks, th->th_ack); 10881 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 10882 if (sacks_seen != NULL) 10883 *sacks_seen = num_sack_blks; 10884 if (num_sack_blks == 0) { 10885 /* Nothing to sack, but we need to update counts */ 10886 goto out_with_totals; 10887 } 10888 /* Its a sack of some sort */ 10889 if (num_sack_blks < 2) { 10890 /* Only one, we don't need to sort */ 10891 goto do_sack_work; 10892 } 10893 /* Sort the sacks */ 10894 for (i = 0; i < num_sack_blks; i++) { 10895 for (j = i + 1; j < num_sack_blks; j++) { 10896 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 10897 sack = sack_blocks[i]; 10898 sack_blocks[i] = sack_blocks[j]; 10899 sack_blocks[j] = sack; 10900 } 10901 } 10902 } 10903 /* 10904 * Now are any of the sack block ends the same (yes some 10905 * implementations send these)? 10906 */ 10907 again: 10908 if (num_sack_blks == 0) 10909 goto out_with_totals; 10910 if (num_sack_blks > 1) { 10911 for (i = 0; i < num_sack_blks; i++) { 10912 for (j = i + 1; j < num_sack_blks; j++) { 10913 if (sack_blocks[i].end == sack_blocks[j].end) { 10914 /* 10915 * Ok these two have the same end we 10916 * want the smallest end and then 10917 * throw away the larger and start 10918 * again. 10919 */ 10920 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 10921 /* 10922 * The second block covers 10923 * more area use that 10924 */ 10925 sack_blocks[i].start = sack_blocks[j].start; 10926 } 10927 /* 10928 * Now collapse out the dup-sack and 10929 * lower the count 10930 */ 10931 for (k = (j + 1); k < num_sack_blks; k++) { 10932 sack_blocks[j].start = sack_blocks[k].start; 10933 sack_blocks[j].end = sack_blocks[k].end; 10934 j++; 10935 } 10936 num_sack_blks--; 10937 goto again; 10938 } 10939 } 10940 } 10941 } 10942 do_sack_work: 10943 /* 10944 * First lets look to see if 10945 * we have retransmitted and 10946 * can use the transmit next? 10947 */ 10948 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 10949 if (rsm && 10950 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 10951 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 10952 /* 10953 * We probably did the FR and the next 10954 * SACK in continues as we would expect. 10955 */ 10956 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); 10957 if (acked) { 10958 rack->r_wanted_output = 1; 10959 changed += acked; 10960 } 10961 if (num_sack_blks == 1) { 10962 /* 10963 * This is what we would expect from 10964 * a normal implementation to happen 10965 * after we have retransmitted the FR, 10966 * i.e the sack-filter pushes down 10967 * to 1 block and the next to be retransmitted 10968 * is the sequence in the sack block (has more 10969 * are acked). Count this as ACK'd data to boost 10970 * up the chances of recovering any false positives. 10971 */ 10972 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 10973 counter_u64_add(rack_express_sack, 1); 10974 goto out_with_totals; 10975 } else { 10976 /* 10977 * Start the loop through the 10978 * rest of blocks, past the first block. 10979 */ 10980 loop_start = 1; 10981 } 10982 } 10983 counter_u64_add(rack_sack_total, 1); 10984 rsm = rack->r_ctl.rc_sacklast; 10985 for (i = loop_start; i < num_sack_blks; i++) { 10986 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); 10987 if (acked) { 10988 rack->r_wanted_output = 1; 10989 changed += acked; 10990 } 10991 } 10992 out_with_totals: 10993 if (num_sack_blks > 1) { 10994 /* 10995 * You get an extra stroke if 10996 * you have more than one sack-blk, this 10997 * could be where we are skipping forward 10998 * and the sack-filter is still working, or 10999 * it could be an attacker constantly 11000 * moving us. 11001 */ 11002 counter_u64_add(rack_move_some, 1); 11003 } 11004 out: 11005 if (changed) { 11006 /* Something changed cancel the rack timer */ 11007 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11008 } 11009 tsused = tcp_get_usecs(NULL); 11010 rsm = tcp_rack_output(tp, rack, tsused); 11011 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11012 rsm && 11013 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11014 /* Enter recovery */ 11015 entered_recovery = 1; 11016 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 11017 /* 11018 * When we enter recovery we need to assure we send 11019 * one packet. 11020 */ 11021 if (rack->rack_no_prr == 0) { 11022 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11023 rack_log_to_prr(rack, 8, 0, __LINE__); 11024 } 11025 rack->r_timer_override = 1; 11026 rack->r_early = 0; 11027 rack->r_ctl.rc_agg_early = 0; 11028 } else if (IN_FASTRECOVERY(tp->t_flags) && 11029 rsm && 11030 (rack->r_rr_config == 3)) { 11031 /* 11032 * Assure we can output and we get no 11033 * remembered pace time except the retransmit. 11034 */ 11035 rack->r_timer_override = 1; 11036 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11037 rack->r_ctl.rc_resend = rsm; 11038 } 11039 if (IN_FASTRECOVERY(tp->t_flags) && 11040 (rack->rack_no_prr == 0) && 11041 (entered_recovery == 0)) { 11042 rack_update_prr(tp, rack, changed, th_ack); 11043 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11044 ((tcp_in_hpts(rack->rc_tp) == 0) && 11045 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11046 /* 11047 * If you are pacing output you don't want 11048 * to override. 11049 */ 11050 rack->r_early = 0; 11051 rack->r_ctl.rc_agg_early = 0; 11052 rack->r_timer_override = 1; 11053 } 11054 } 11055 } 11056 11057 static void 11058 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 11059 { 11060 struct rack_sendmap *rsm; 11061 11062 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11063 while (rsm) { 11064 /* 11065 * We need to skip anything already set 11066 * to be retransmitted. 11067 */ 11068 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11069 (rsm->r_flags & RACK_MUST_RXT)) { 11070 rsm = TAILQ_NEXT(rsm, r_tnext); 11071 continue; 11072 } 11073 break; 11074 } 11075 if (rsm && (rsm->r_dupack < 0xff)) { 11076 rsm->r_dupack++; 11077 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11078 struct timeval tv; 11079 uint32_t cts; 11080 /* 11081 * Here we see if we need to retransmit. For 11082 * a SACK type connection if enough time has passed 11083 * we will get a return of the rsm. For a non-sack 11084 * connection we will get the rsm returned if the 11085 * dupack value is 3 or more. 11086 */ 11087 cts = tcp_get_usecs(&tv); 11088 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11089 if (rack->r_ctl.rc_resend != NULL) { 11090 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11091 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11092 th_ack, __LINE__); 11093 } 11094 rack->r_wanted_output = 1; 11095 rack->r_timer_override = 1; 11096 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11097 } 11098 } else { 11099 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11100 } 11101 } 11102 } 11103 11104 static void 11105 rack_check_bottom_drag(struct tcpcb *tp, 11106 struct tcp_rack *rack, 11107 struct socket *so) 11108 { 11109 /* 11110 * So what is dragging bottom? 11111 * 11112 * Dragging bottom means you were under pacing and had a 11113 * delay in processing inbound acks waiting on our pacing 11114 * timer to expire. While you were waiting all of the acknowledgments 11115 * for the packets you sent have arrived. This means we are pacing 11116 * way underneath the bottleneck to the point where our Goodput 11117 * measurements stop working, since they require more than one 11118 * ack (usually at least 8 packets worth with multiple acks so we can 11119 * gauge the inter-ack times). If that occurs we have a real problem 11120 * since we are stuck in a hole that we can't get out of without 11121 * something speeding us up. 11122 * 11123 * We also check to see if we are widdling down to just one segment 11124 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 11125 * then we are adding the delayed ack interval into our measurments and 11126 * we need to speed up slightly. 11127 */ 11128 uint32_t segsiz, minseg; 11129 11130 segsiz = ctf_fixed_maxseg(tp); 11131 minseg = segsiz; 11132 if (tp->snd_max == tp->snd_una) { 11133 /* 11134 * We are doing dynamic pacing and we are way 11135 * under. Basically everything got acked while 11136 * we were still waiting on the pacer to expire. 11137 * 11138 * This means we need to boost the b/w in 11139 * addition to any earlier boosting of 11140 * the multiplier. 11141 */ 11142 uint64_t lt_bw; 11143 11144 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 11145 lt_bw = rack_get_lt_bw(rack); 11146 rack->rc_dragged_bottom = 1; 11147 rack_validate_multipliers_at_or_above100(rack); 11148 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11149 (rack->dis_lt_bw == 0) && 11150 (rack->use_lesser_lt_bw == 0) && 11151 (lt_bw > 0)) { 11152 /* 11153 * Lets use the long-term b/w we have 11154 * been getting as a base. 11155 */ 11156 if (rack->rc_gp_filled == 0) { 11157 if (lt_bw > ONE_POINT_TWO_MEG) { 11158 /* 11159 * If we have no measurement 11160 * don't let us set in more than 11161 * 1.2Mbps. If we are still too 11162 * low after pacing with this we 11163 * will hopefully have a max b/w 11164 * available to sanity check things. 11165 */ 11166 lt_bw = ONE_POINT_TWO_MEG; 11167 } 11168 rack->r_ctl.rc_rtt_diff = 0; 11169 rack->r_ctl.gp_bw = lt_bw; 11170 rack->rc_gp_filled = 1; 11171 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11172 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11173 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11174 } else if (lt_bw > rack->r_ctl.gp_bw) { 11175 rack->r_ctl.rc_rtt_diff = 0; 11176 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11177 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11178 rack->r_ctl.gp_bw = lt_bw; 11179 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11180 } else 11181 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11182 if ((rack->gp_ready == 0) && 11183 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11184 /* We have enough measurements now */ 11185 rack->gp_ready = 1; 11186 if (rack->dgp_on || 11187 rack->rack_hibeta) 11188 rack_set_cc_pacing(rack); 11189 if (rack->defer_options) 11190 rack_apply_deferred_options(rack); 11191 } 11192 } else { 11193 /* 11194 * zero rtt possibly?, settle for just an old increase. 11195 */ 11196 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11197 } 11198 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11199 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11200 minseg)) && 11201 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11202 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11203 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11204 (segsiz * rack_req_segs))) { 11205 /* 11206 * We are doing dynamic GP pacing and 11207 * we have everything except 1MSS or less 11208 * bytes left out. We are still pacing away. 11209 * And there is data that could be sent, This 11210 * means we are inserting delayed ack time in 11211 * our measurements because we are pacing too slow. 11212 */ 11213 rack_validate_multipliers_at_or_above100(rack); 11214 rack->rc_dragged_bottom = 1; 11215 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11216 } 11217 } 11218 11219 #ifdef TCP_REQUEST_TRK 11220 static void 11221 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11222 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11223 { 11224 int do_log; 11225 11226 do_log = tcp_bblogging_on(rack->rc_tp); 11227 if (do_log == 0) { 11228 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11229 return; 11230 /* We only allow the three below with point logging on */ 11231 if ((mod != HYBRID_LOG_RULES_APP) && 11232 (mod != HYBRID_LOG_RULES_SET) && 11233 (mod != HYBRID_LOG_REQ_COMP)) 11234 return; 11235 11236 } 11237 if (do_log) { 11238 union tcp_log_stackspecific log; 11239 struct timeval tv; 11240 11241 /* Convert our ms to a microsecond */ 11242 memset(&log, 0, sizeof(log)); 11243 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11244 log.u_bbr.flex1 = seq; 11245 log.u_bbr.cwnd_gain = line; 11246 if (cur != NULL) { 11247 uint64_t off; 11248 11249 log.u_bbr.flex2 = cur->start_seq; 11250 log.u_bbr.flex3 = cur->end_seq; 11251 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11252 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11253 log.u_bbr.flex6 = cur->flags; 11254 log.u_bbr.pkts_out = cur->hybrid_flags; 11255 log.u_bbr.rttProp = cur->timestamp; 11256 log.u_bbr.cur_del_rate = cur->cspr; 11257 log.u_bbr.bw_inuse = cur->start; 11258 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11259 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11260 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11261 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11262 log.u_bbr.inhpts = 1; 11263 #ifdef TCP_REQUEST_TRK 11264 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11265 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11266 #endif 11267 } else { 11268 log.u_bbr.flex2 = err; 11269 } 11270 /* 11271 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11272 */ 11273 log.u_bbr.flex7 = rack->rc_catch_up; 11274 log.u_bbr.flex7 <<= 1; 11275 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11276 log.u_bbr.flex7 <<= 1; 11277 log.u_bbr.flex7 |= rack->dgp_on; 11278 /* 11279 * Compose bbr_state to be a bit wise 0000ADHF 11280 * where A is the always_pace flag 11281 * where D is the dgp_on flag 11282 * where H is the hybrid_mode on flag 11283 * where F is the use_fixed_rate flag. 11284 */ 11285 log.u_bbr.bbr_state = rack->rc_always_pace; 11286 log.u_bbr.bbr_state <<= 1; 11287 log.u_bbr.bbr_state |= rack->dgp_on; 11288 log.u_bbr.bbr_state <<= 1; 11289 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 11290 log.u_bbr.bbr_state <<= 1; 11291 log.u_bbr.bbr_state |= rack->use_fixed_rate; 11292 log.u_bbr.flex8 = mod; 11293 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11294 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11295 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11296 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 11297 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 11298 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 11299 tcp_log_event(rack->rc_tp, NULL, 11300 &rack->rc_inp->inp_socket->so_rcv, 11301 &rack->rc_inp->inp_socket->so_snd, 11302 TCP_HYBRID_PACING_LOG, 0, 11303 0, &log, false, NULL, __func__, __LINE__, &tv); 11304 } 11305 } 11306 #endif 11307 11308 #ifdef TCP_REQUEST_TRK 11309 static void 11310 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11311 { 11312 struct tcp_sendfile_track *rc_cur, *orig_ent; 11313 struct tcpcb *tp; 11314 int err = 0; 11315 11316 orig_ent = rack->r_ctl.rc_last_sft; 11317 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 11318 if (rc_cur == NULL) { 11319 /* If not in the beginning what about the end piece */ 11320 if (rack->rc_hybrid_mode) 11321 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11322 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 11323 } else { 11324 err = 12345; 11325 } 11326 /* If we find no parameters we are in straight DGP mode */ 11327 if(rc_cur == NULL) { 11328 /* None found for this seq, just DGP for now */ 11329 if (rack->rc_hybrid_mode) { 11330 rack->r_ctl.client_suggested_maxseg = 0; 11331 rack->rc_catch_up = 0; 11332 if (rack->cspr_is_fcc == 0) 11333 rack->r_ctl.bw_rate_cap = 0; 11334 else 11335 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11336 } 11337 if (rack->rc_hybrid_mode) { 11338 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11339 } 11340 if (rack->r_ctl.rc_last_sft) { 11341 rack->r_ctl.rc_last_sft = NULL; 11342 } 11343 return; 11344 } 11345 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 11346 /* This entry was never setup for hybrid pacing on/off etc */ 11347 if (rack->rc_hybrid_mode) { 11348 rack->r_ctl.client_suggested_maxseg = 0; 11349 rack->rc_catch_up = 0; 11350 rack->r_ctl.bw_rate_cap = 0; 11351 } 11352 if (rack->r_ctl.rc_last_sft) { 11353 rack->r_ctl.rc_last_sft = NULL; 11354 } 11355 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11356 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 11357 rc_cur->first_send = cts; 11358 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 11359 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11360 } 11361 return; 11362 } 11363 /* 11364 * Ok if we have a new entry *or* have never 11365 * set up an entry we need to proceed. If 11366 * we have already set it up this entry we 11367 * just continue along with what we already 11368 * setup. 11369 */ 11370 tp = rack->rc_tp; 11371 if ((rack->r_ctl.rc_last_sft != NULL) && 11372 (rack->r_ctl.rc_last_sft == rc_cur)) { 11373 /* Its already in place */ 11374 if (rack->rc_hybrid_mode) 11375 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 11376 return; 11377 } 11378 if (rack->rc_hybrid_mode == 0) { 11379 rack->r_ctl.rc_last_sft = rc_cur; 11380 if (orig_ent) { 11381 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11382 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11383 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11384 } 11385 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11386 return; 11387 } 11388 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 11389 /* Compensate for all the header overhead's */ 11390 if (rack->cspr_is_fcc == 0) 11391 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11392 else 11393 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11394 } else { 11395 if (rack->rc_hybrid_mode) { 11396 if (rack->cspr_is_fcc == 0) 11397 rack->r_ctl.bw_rate_cap = 0; 11398 else 11399 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11400 } 11401 } 11402 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 11403 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 11404 else 11405 rack->r_ctl.client_suggested_maxseg = 0; 11406 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 11407 /* 11408 * It is the same timestamp as the previous one 11409 * add the hybrid flag that will indicate we use 11410 * sendtime not arrival time for catch-up mode. 11411 */ 11412 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 11413 } 11414 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 11415 (rc_cur->cspr > 0)) { 11416 uint64_t len; 11417 11418 rack->rc_catch_up = 1; 11419 /* 11420 * Calculate the deadline time, first set the 11421 * time to when the request arrived. 11422 */ 11423 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 11424 /* 11425 * For cases where its a duplicate tm (we received more 11426 * than one request for a tm) we want to use now, the point 11427 * where we are just sending the first bit of the request. 11428 */ 11429 rc_cur->deadline = cts; 11430 } else { 11431 /* 11432 * Here we have a different tm from the last request 11433 * so we want to use arrival time as our base. 11434 */ 11435 rc_cur->deadline = rc_cur->localtime; 11436 } 11437 /* 11438 * Next calculate the length and compensate for 11439 * TLS if need be. 11440 */ 11441 len = rc_cur->end - rc_cur->start; 11442 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 11443 /* 11444 * This session is doing TLS. Take a swag guess 11445 * at the overhead. 11446 */ 11447 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 11448 } 11449 /* 11450 * Now considering the size, and the cspr, what is the time that 11451 * would be required at the cspr rate. Here we use the raw 11452 * cspr value since the client only looks at the raw data. We 11453 * do use len which includes TLS overhead, but not the TCP/IP etc. 11454 * That will get made up for in the CU pacing rate set. 11455 */ 11456 len *= HPTS_USEC_IN_SEC; 11457 len /= rc_cur->cspr; 11458 rc_cur->deadline += len; 11459 } else { 11460 rack->rc_catch_up = 0; 11461 rc_cur->deadline = 0; 11462 } 11463 if (rack->r_ctl.client_suggested_maxseg != 0) { 11464 /* 11465 * We need to reset the max pace segs if we have a 11466 * client_suggested_maxseg. 11467 */ 11468 rack_set_pace_segments(tp, rack, __LINE__, NULL); 11469 } 11470 if (orig_ent) { 11471 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11472 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11473 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11474 } 11475 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11476 /* Remember it for next time and for CU mode */ 11477 rack->r_ctl.rc_last_sft = rc_cur; 11478 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 11479 } 11480 #endif 11481 11482 static void 11483 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11484 { 11485 #ifdef TCP_REQUEST_TRK 11486 struct tcp_sendfile_track *ent; 11487 11488 ent = rack->r_ctl.rc_last_sft; 11489 if ((ent == NULL) || 11490 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 11491 (SEQ_GEQ(seq, ent->end_seq))) { 11492 /* Time to update the track. */ 11493 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 11494 ent = rack->r_ctl.rc_last_sft; 11495 } 11496 /* Out of all */ 11497 if (ent == NULL) { 11498 return; 11499 } 11500 if (SEQ_LT(ent->end_seq, (seq + len))) { 11501 /* 11502 * This is the case where our end_seq guess 11503 * was wrong. This is usually due to TLS having 11504 * more bytes then our guess. It could also be the 11505 * case that the client sent in two requests closely 11506 * and the SB is full of both so we are sending part 11507 * of each (end|beg). In such a case lets move this 11508 * guys end to match the end of this send. That 11509 * way it will complete when all of it is acked. 11510 */ 11511 ent->end_seq = (seq + len); 11512 if (rack->rc_hybrid_mode) 11513 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 11514 } 11515 /* Now validate we have set the send time of this one */ 11516 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11517 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 11518 ent->first_send = cts; 11519 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 11520 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11521 } 11522 #endif 11523 } 11524 11525 static void 11526 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 11527 { 11528 /* 11529 * The fast output path is enabled and we 11530 * have moved the cumack forward. Lets see if 11531 * we can expand forward the fast path length by 11532 * that amount. What we would ideally like to 11533 * do is increase the number of bytes in the 11534 * fast path block (left_to_send) by the 11535 * acked amount. However we have to gate that 11536 * by two factors: 11537 * 1) The amount outstanding and the rwnd of the peer 11538 * (i.e. we don't want to exceed the rwnd of the peer). 11539 * <and> 11540 * 2) The amount of data left in the socket buffer (i.e. 11541 * we can't send beyond what is in the buffer). 11542 * 11543 * Note that this does not take into account any increase 11544 * in the cwnd. We will only extend the fast path by 11545 * what was acked. 11546 */ 11547 uint32_t new_total, gating_val; 11548 11549 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 11550 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 11551 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 11552 if (new_total <= gating_val) { 11553 /* We can increase left_to_send by the acked amount */ 11554 counter_u64_add(rack_extended_rfo, 1); 11555 rack->r_ctl.fsb.left_to_send = new_total; 11556 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 11557 ("rack:%p left_to_send:%u sbavail:%u out:%u", 11558 rack, rack->r_ctl.fsb.left_to_send, 11559 sbavail(&rack->rc_inp->inp_socket->so_snd), 11560 (tp->snd_max - tp->snd_una))); 11561 11562 } 11563 } 11564 11565 static void 11566 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 11567 { 11568 /* 11569 * Here any sendmap entry that points to the 11570 * beginning mbuf must be adjusted to the correct 11571 * offset. This must be called with: 11572 * 1) The socket buffer locked 11573 * 2) snd_una adjusted to its new position. 11574 * 11575 * Note that (2) implies rack_ack_received has also 11576 * been called and all the sbcut's have been done. 11577 * 11578 * We grab the first mbuf in the socket buffer and 11579 * then go through the front of the sendmap, recalculating 11580 * the stored offset for any sendmap entry that has 11581 * that mbuf. We must use the sb functions to do this 11582 * since its possible an add was done has well as 11583 * the subtraction we may have just completed. This should 11584 * not be a penalty though, since we just referenced the sb 11585 * to go in and trim off the mbufs that we freed (of course 11586 * there will be a penalty for the sendmap references though). 11587 * 11588 * Note also with INVARIANT on, we validate with a KASSERT 11589 * that the first sendmap entry has a soff of 0. 11590 * 11591 */ 11592 struct mbuf *m; 11593 struct rack_sendmap *rsm; 11594 tcp_seq snd_una; 11595 #ifdef INVARIANTS 11596 int first_processed = 0; 11597 #endif 11598 11599 snd_una = rack->rc_tp->snd_una; 11600 SOCKBUF_LOCK_ASSERT(sb); 11601 m = sb->sb_mb; 11602 rsm = tqhash_min(rack->r_ctl.tqh); 11603 if ((rsm == NULL) || (m == NULL)) { 11604 /* Nothing outstanding */ 11605 return; 11606 } 11607 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 11608 KASSERT((rsm->m == m), 11609 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 11610 rack, sb, rsm)); 11611 while (rsm->m && (rsm->m == m)) { 11612 /* one to adjust */ 11613 #ifdef INVARIANTS 11614 struct mbuf *tm; 11615 uint32_t soff; 11616 11617 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 11618 if ((rsm->orig_m_len != m->m_len) || 11619 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 11620 rack_adjust_orig_mlen(rsm); 11621 } 11622 if (first_processed == 0) { 11623 KASSERT((rsm->soff == 0), 11624 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 11625 rack, rsm)); 11626 first_processed = 1; 11627 } 11628 if ((rsm->soff != soff) || (rsm->m != tm)) { 11629 /* 11630 * This is not a fatal error, we anticipate it 11631 * might happen (the else code), so we count it here 11632 * so that under invariant we can see that it really 11633 * does happen. 11634 */ 11635 counter_u64_add(rack_adjust_map_bw, 1); 11636 } 11637 rsm->m = tm; 11638 rsm->soff = soff; 11639 if (tm) { 11640 rsm->orig_m_len = rsm->m->m_len; 11641 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11642 } else { 11643 rsm->orig_m_len = 0; 11644 rsm->orig_t_space = 0; 11645 } 11646 #else 11647 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 11648 if (rsm->m) { 11649 rsm->orig_m_len = rsm->m->m_len; 11650 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11651 } else { 11652 rsm->orig_m_len = 0; 11653 rsm->orig_t_space = 0; 11654 } 11655 #endif 11656 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 11657 if (rsm == NULL) 11658 break; 11659 } 11660 } 11661 11662 #ifdef TCP_REQUEST_TRK 11663 static inline void 11664 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 11665 { 11666 struct tcp_sendfile_track *ent; 11667 int i; 11668 11669 if ((rack->rc_hybrid_mode == 0) && 11670 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 11671 /* 11672 * Just do normal completions hybrid pacing is not on 11673 * and CLDL is off as well. 11674 */ 11675 tcp_req_check_for_comp(rack->rc_tp, th_ack); 11676 return; 11677 } 11678 /* 11679 * Originally I was just going to find the th_ack associated 11680 * with an entry. But then I realized a large strech ack could 11681 * in theory ack two or more requests at once. So instead we 11682 * need to find all entries that are completed by th_ack not 11683 * just a single entry and do our logging. 11684 */ 11685 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11686 while (ent != NULL) { 11687 /* 11688 * We may be doing hybrid pacing or CLDL and need more details possibly 11689 * so we do it manually instead of calling 11690 * tcp_req_check_for_comp() 11691 */ 11692 uint64_t laa, tim, data, cbw, ftim; 11693 11694 /* Ok this ack frees it */ 11695 rack_log_hybrid(rack, th_ack, 11696 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 11697 rack_log_hybrid_sends(rack, ent, __LINE__); 11698 /* calculate the time based on the ack arrival */ 11699 data = ent->end - ent->start; 11700 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 11701 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 11702 if (ent->first_send > ent->localtime) 11703 ftim = ent->first_send; 11704 else 11705 ftim = ent->localtime; 11706 } else { 11707 /* TSNH */ 11708 ftim = ent->localtime; 11709 } 11710 if (laa > ent->localtime) 11711 tim = laa - ftim; 11712 else 11713 tim = 0; 11714 cbw = data * HPTS_USEC_IN_SEC; 11715 if (tim > 0) 11716 cbw /= tim; 11717 else 11718 cbw = 0; 11719 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 11720 /* 11721 * Check to see if we are freeing what we are pointing to send wise 11722 * if so be sure to NULL the pointer so we know we are no longer 11723 * set to anything. 11724 */ 11725 if (ent == rack->r_ctl.rc_last_sft) { 11726 rack->r_ctl.rc_last_sft = NULL; 11727 if (rack->rc_hybrid_mode) { 11728 rack->rc_catch_up = 0; 11729 if (rack->cspr_is_fcc == 0) 11730 rack->r_ctl.bw_rate_cap = 0; 11731 else 11732 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11733 rack->r_ctl.client_suggested_maxseg = 0; 11734 } 11735 } 11736 /* Generate the log that the tcp_netflix call would have */ 11737 tcp_req_log_req_info(rack->rc_tp, ent, 11738 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 11739 /* Free it and see if there is another one */ 11740 tcp_req_free_a_slot(rack->rc_tp, ent); 11741 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11742 } 11743 } 11744 #endif 11745 11746 11747 /* 11748 * Return value of 1, we do not need to call rack_process_data(). 11749 * return value of 0, rack_process_data can be called. 11750 * For ret_val if its 0 the TCP is locked, if its non-zero 11751 * its unlocked and probably unsafe to touch the TCB. 11752 */ 11753 static int 11754 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11755 struct tcpcb *tp, struct tcpopt *to, 11756 uint32_t tiwin, int32_t tlen, 11757 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 11758 { 11759 int32_t ourfinisacked = 0; 11760 int32_t nsegs, acked_amount; 11761 int32_t acked; 11762 struct mbuf *mfree; 11763 struct tcp_rack *rack; 11764 int32_t under_pacing = 0; 11765 int32_t post_recovery = 0; 11766 uint32_t p_cwnd; 11767 11768 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11769 11770 rack = (struct tcp_rack *)tp->t_fb_ptr; 11771 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { 11772 /* Checking SEG.ACK against ISS is definitely redundant. */ 11773 tp->t_flags2 |= TF2_NO_ISS_CHECK; 11774 } 11775 if (!V_tcp_insecure_ack) { 11776 tcp_seq seq_min; 11777 bool ghost_ack_check; 11778 11779 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { 11780 /* Check for too old ACKs (RFC 5961, Section 5.2). */ 11781 seq_min = tp->snd_una - tp->max_sndwnd; 11782 ghost_ack_check = false; 11783 } else { 11784 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { 11785 /* Checking for ghost ACKs is stricter. */ 11786 seq_min = tp->iss + 1; 11787 ghost_ack_check = true; 11788 } else { 11789 /* 11790 * Checking for too old ACKs (RFC 5961, 11791 * Section 5.2) is stricter. 11792 */ 11793 seq_min = tp->snd_una - tp->max_sndwnd; 11794 ghost_ack_check = false; 11795 } 11796 } 11797 if (SEQ_LT(th->th_ack, seq_min)) { 11798 if (ghost_ack_check) 11799 TCPSTAT_INC(tcps_rcvghostack); 11800 else 11801 TCPSTAT_INC(tcps_rcvacktooold); 11802 /* Send challenge ACK. */ 11803 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11804 rack->r_wanted_output = 1; 11805 return (1); 11806 } 11807 } 11808 if (SEQ_GT(th->th_ack, tp->snd_max)) { 11809 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11810 rack->r_wanted_output = 1; 11811 return (1); 11812 } 11813 if (rack->gp_ready && 11814 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11815 under_pacing = 1; 11816 } 11817 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 11818 int in_rec, dup_ack_struck = 0; 11819 int dsack_seen = 0, sacks_seen = 0; 11820 11821 in_rec = IN_FASTRECOVERY(tp->t_flags); 11822 if (rack->rc_in_persist) { 11823 tp->t_rxtshift = 0; 11824 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11825 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11826 } 11827 11828 if ((th->th_ack == tp->snd_una) && 11829 (tiwin == tp->snd_wnd) && 11830 (orig_tlen == 0) && 11831 ((to->to_flags & TOF_SACK) == 0)) { 11832 rack_strike_dupack(rack, th->th_ack); 11833 dup_ack_struck = 1; 11834 } 11835 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 11836 dup_ack_struck, &dsack_seen, &sacks_seen); 11837 11838 } 11839 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11840 /* 11841 * Old ack, behind (or duplicate to) the last one rcv'd 11842 * Note: We mark reordering is occuring if its 11843 * less than and we have not closed our window. 11844 */ 11845 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 11846 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 11847 if (rack->r_ctl.rc_reorder_ts == 0) 11848 rack->r_ctl.rc_reorder_ts = 1; 11849 } 11850 return (0); 11851 } 11852 /* 11853 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 11854 * something we sent. 11855 */ 11856 if (tp->t_flags & TF_NEEDSYN) { 11857 /* 11858 * T/TCP: Connection was half-synchronized, and our SYN has 11859 * been ACK'd (so connection is now fully synchronized). Go 11860 * to non-starred state, increment snd_una for ACK of SYN, 11861 * and check if we can do window scaling. 11862 */ 11863 tp->t_flags &= ~TF_NEEDSYN; 11864 tp->snd_una++; 11865 /* Do window scaling? */ 11866 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11867 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11868 tp->rcv_scale = tp->request_r_scale; 11869 /* Send window already scaled. */ 11870 } 11871 } 11872 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11873 11874 acked = BYTES_THIS_ACK(tp, th); 11875 if (acked) { 11876 /* 11877 * Any time we move the cum-ack forward clear 11878 * keep-alive tied probe-not-answered. The 11879 * persists clears its own on entry. 11880 */ 11881 rack->probe_not_answered = 0; 11882 } 11883 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11884 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11885 /* 11886 * If we just performed our first retransmit, and the ACK arrives 11887 * within our recovery window, then it was a mistake to do the 11888 * retransmit in the first place. Recover our original cwnd and 11889 * ssthresh, and proceed to transmit where we left off. 11890 */ 11891 if ((tp->t_flags & TF_PREVVALID) && 11892 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11893 tp->t_flags &= ~TF_PREVVALID; 11894 if (tp->t_rxtshift == 1 && 11895 (int)(ticks - tp->t_badrxtwin) < 0) 11896 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 11897 } 11898 if (acked) { 11899 /* assure we are not backed off */ 11900 tp->t_rxtshift = 0; 11901 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11902 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11903 rack->rc_tlp_in_progress = 0; 11904 rack->r_ctl.rc_tlp_cnt_out = 0; 11905 /* 11906 * If it is the RXT timer we want to 11907 * stop it, so we can restart a TLP. 11908 */ 11909 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11910 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11911 #ifdef TCP_REQUEST_TRK 11912 rack_req_check_for_comp(rack, th->th_ack); 11913 #endif 11914 } 11915 /* 11916 * If we have a timestamp reply, update smoothed round trip time. If 11917 * no timestamp is present but transmit timer is running and timed 11918 * sequence number was acked, update smoothed round trip time. Since 11919 * we now have an rtt measurement, cancel the timer backoff (cf., 11920 * Phil Karn's retransmit alg.). Recompute the initial retransmit 11921 * timer. 11922 * 11923 * Some boxes send broken timestamp replies during the SYN+ACK 11924 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11925 * and blow up the retransmit timer. 11926 */ 11927 /* 11928 * If all outstanding data is acked, stop retransmit timer and 11929 * remember to restart (more output or persist). If there is more 11930 * data to be acked, restart retransmit timer, using current 11931 * (possibly backed-off) value. 11932 */ 11933 if (acked == 0) { 11934 if (ofia) 11935 *ofia = ourfinisacked; 11936 return (0); 11937 } 11938 if (IN_RECOVERY(tp->t_flags)) { 11939 if (SEQ_LT(th->th_ack, tp->snd_recover) && 11940 (SEQ_LT(th->th_ack, tp->snd_max))) { 11941 tcp_rack_partialack(tp); 11942 } else { 11943 rack_post_recovery(tp, th->th_ack); 11944 post_recovery = 1; 11945 /* 11946 * Grab the segsiz, multiply by 2 and add the snd_cwnd 11947 * that is the max the CC should add if we are exiting 11948 * recovery and doing a late add. 11949 */ 11950 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 11951 p_cwnd <<= 1; 11952 p_cwnd += tp->snd_cwnd; 11953 } 11954 } else if ((rack->rto_from_rec == 1) && 11955 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 11956 /* 11957 * We were in recovery, hit a rxt timeout 11958 * and never re-entered recovery. The timeout(s) 11959 * made up all the lost data. In such a case 11960 * we need to clear the rto_from_rec flag. 11961 */ 11962 rack->rto_from_rec = 0; 11963 } 11964 /* 11965 * Let the congestion control algorithm update congestion control 11966 * related information. This typically means increasing the 11967 * congestion window. 11968 */ 11969 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 11970 if (post_recovery && 11971 (tp->snd_cwnd > p_cwnd)) { 11972 /* Must be non-newreno (cubic) getting too ahead of itself */ 11973 tp->snd_cwnd = p_cwnd; 11974 } 11975 SOCK_SENDBUF_LOCK(so); 11976 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 11977 tp->snd_wnd -= acked_amount; 11978 mfree = sbcut_locked(&so->so_snd, acked_amount); 11979 if ((sbused(&so->so_snd) == 0) && 11980 (acked > acked_amount) && 11981 (tp->t_state >= TCPS_FIN_WAIT_1) && 11982 (tp->t_flags & TF_SENTFIN)) { 11983 /* 11984 * We must be sure our fin 11985 * was sent and acked (we can be 11986 * in FIN_WAIT_1 without having 11987 * sent the fin). 11988 */ 11989 ourfinisacked = 1; 11990 } 11991 tp->snd_una = th->th_ack; 11992 /* wakeups? */ 11993 if (acked_amount && sbavail(&so->so_snd)) 11994 rack_adjust_sendmap_head(rack, &so->so_snd); 11995 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11996 /* NB: sowwakeup_locked() does an implicit unlock. */ 11997 sowwakeup_locked(so); 11998 m_freem(mfree); 11999 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12000 tp->snd_recover = tp->snd_una; 12001 12002 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12003 tp->snd_nxt = tp->snd_max; 12004 } 12005 if (under_pacing && 12006 (rack->use_fixed_rate == 0) && 12007 (rack->in_probe_rtt == 0) && 12008 rack->rc_gp_dyn_mul && 12009 rack->rc_always_pace) { 12010 /* Check if we are dragging bottom */ 12011 rack_check_bottom_drag(tp, rack, so); 12012 } 12013 if (tp->snd_una == tp->snd_max) { 12014 /* Nothing left outstanding */ 12015 tp->t_flags &= ~TF_PREVVALID; 12016 if (rack->r_ctl.rc_went_idle_time == 0) 12017 rack->r_ctl.rc_went_idle_time = 1; 12018 rack->r_ctl.retran_during_recovery = 0; 12019 rack->r_ctl.dsack_byte_cnt = 0; 12020 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12021 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12022 tp->t_acktime = 0; 12023 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12024 rack->rc_suspicious = 0; 12025 /* Set need output so persist might get set */ 12026 rack->r_wanted_output = 1; 12027 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12028 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12029 (sbavail(&so->so_snd) == 0) && 12030 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12031 /* 12032 * The socket was gone and the 12033 * peer sent data (now or in the past), time to 12034 * reset him. 12035 */ 12036 *ret_val = 1; 12037 /* tcp_close will kill the inp pre-log the Reset */ 12038 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12039 tp = tcp_close(tp); 12040 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12041 return (1); 12042 } 12043 } 12044 if (ofia) 12045 *ofia = ourfinisacked; 12046 return (0); 12047 } 12048 12049 12050 static void 12051 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12052 int dir, uint32_t flags, struct rack_sendmap *rsm) 12053 { 12054 if (tcp_bblogging_on(rack->rc_tp)) { 12055 union tcp_log_stackspecific log; 12056 struct timeval tv; 12057 12058 memset(&log, 0, sizeof(log)); 12059 log.u_bbr.flex1 = cnt; 12060 log.u_bbr.flex2 = split; 12061 log.u_bbr.flex3 = out; 12062 log.u_bbr.flex4 = line; 12063 log.u_bbr.flex5 = rack->r_must_retran; 12064 log.u_bbr.flex6 = flags; 12065 log.u_bbr.flex7 = rack->rc_has_collapsed; 12066 log.u_bbr.flex8 = dir; /* 12067 * 1 is collapsed, 0 is uncollapsed, 12068 * 2 is log of a rsm being marked, 3 is a split. 12069 */ 12070 if (rsm == NULL) 12071 log.u_bbr.rttProp = 0; 12072 else 12073 log.u_bbr.rttProp = (uintptr_t)rsm; 12074 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12075 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12076 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12077 &rack->rc_inp->inp_socket->so_rcv, 12078 &rack->rc_inp->inp_socket->so_snd, 12079 TCP_RACK_LOG_COLLAPSE, 0, 12080 0, &log, false, &tv); 12081 } 12082 } 12083 12084 static void 12085 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12086 { 12087 /* 12088 * Here all we do is mark the collapsed point and set the flag. 12089 * This may happen again and again, but there is no 12090 * sense splitting our map until we know where the 12091 * peer finally lands in the collapse. 12092 */ 12093 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12094 if ((rack->rc_has_collapsed == 0) || 12095 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12096 counter_u64_add(rack_collapsed_win_seen, 1); 12097 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12098 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12099 rack->rc_has_collapsed = 1; 12100 rack->r_collapse_point_valid = 1; 12101 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12102 } 12103 12104 static void 12105 rack_un_collapse_window(struct tcp_rack *rack, int line) 12106 { 12107 struct rack_sendmap *nrsm, *rsm; 12108 int cnt = 0, split = 0; 12109 int insret __diagused; 12110 12111 12112 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12113 rack->rc_has_collapsed = 0; 12114 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12115 if (rsm == NULL) { 12116 /* Nothing to do maybe the peer ack'ed it all */ 12117 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12118 return; 12119 } 12120 /* Now do we need to split this one? */ 12121 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12122 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12123 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12124 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12125 if (nrsm == NULL) { 12126 /* We can't get a rsm, mark all? */ 12127 nrsm = rsm; 12128 goto no_split; 12129 } 12130 /* Clone it */ 12131 split = 1; 12132 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12133 #ifndef INVARIANTS 12134 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12135 #else 12136 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12137 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12138 nrsm, insret, rack, rsm); 12139 } 12140 #endif 12141 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12142 rack->r_ctl.last_collapse_point, __LINE__); 12143 if (rsm->r_in_tmap) { 12144 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12145 nrsm->r_in_tmap = 1; 12146 } 12147 /* 12148 * Set in the new RSM as the 12149 * collapsed starting point 12150 */ 12151 rsm = nrsm; 12152 } 12153 12154 no_split: 12155 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12156 cnt++; 12157 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12158 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12159 cnt++; 12160 } 12161 if (cnt) { 12162 counter_u64_add(rack_collapsed_win, 1); 12163 } 12164 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12165 } 12166 12167 static void 12168 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12169 int32_t tlen, int32_t tfo_syn) 12170 { 12171 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12172 rack_timer_cancel(tp, rack, 12173 rack->r_ctl.rc_rcvtime, __LINE__); 12174 tp->t_flags |= TF_DELACK; 12175 } else { 12176 rack->r_wanted_output = 1; 12177 tp->t_flags |= TF_ACKNOW; 12178 } 12179 } 12180 12181 static void 12182 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12183 { 12184 /* 12185 * If fast output is in progress, lets validate that 12186 * the new window did not shrink on us and make it 12187 * so fast output should end. 12188 */ 12189 if (rack->r_fast_output) { 12190 uint32_t out; 12191 12192 /* 12193 * Calculate what we will send if left as is 12194 * and compare that to our send window. 12195 */ 12196 out = ctf_outstanding(tp); 12197 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12198 /* ok we have an issue */ 12199 if (out >= tp->snd_wnd) { 12200 /* Turn off fast output the window is met or collapsed */ 12201 rack->r_fast_output = 0; 12202 } else { 12203 /* we have some room left */ 12204 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12205 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12206 /* If not at least 1 full segment never mind */ 12207 rack->r_fast_output = 0; 12208 } 12209 } 12210 } 12211 } 12212 } 12213 12214 /* 12215 * Return value of 1, the TCB is unlocked and most 12216 * likely gone, return value of 0, the TCP is still 12217 * locked. 12218 */ 12219 static int 12220 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12221 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12222 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12223 { 12224 /* 12225 * Update window information. Don't look at window if no ACK: TAC's 12226 * send garbage on first SYN. 12227 */ 12228 int32_t nsegs; 12229 int32_t tfo_syn; 12230 struct tcp_rack *rack; 12231 12232 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12233 12234 rack = (struct tcp_rack *)tp->t_fb_ptr; 12235 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12236 if ((thflags & TH_ACK) && 12237 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12238 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12239 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12240 /* keep track of pure window updates */ 12241 if (tlen == 0 && 12242 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12243 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12244 tp->snd_wnd = tiwin; 12245 rack_validate_fo_sendwin_up(tp, rack); 12246 tp->snd_wl1 = th->th_seq; 12247 tp->snd_wl2 = th->th_ack; 12248 if (tp->snd_wnd > tp->max_sndwnd) 12249 tp->max_sndwnd = tp->snd_wnd; 12250 rack->r_wanted_output = 1; 12251 } else if (thflags & TH_ACK) { 12252 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12253 tp->snd_wnd = tiwin; 12254 rack_validate_fo_sendwin_up(tp, rack); 12255 tp->snd_wl1 = th->th_seq; 12256 tp->snd_wl2 = th->th_ack; 12257 } 12258 } 12259 if (tp->snd_wnd < ctf_outstanding(tp)) 12260 /* The peer collapsed the window */ 12261 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12262 else if (rack->rc_has_collapsed) 12263 rack_un_collapse_window(rack, __LINE__); 12264 if ((rack->r_collapse_point_valid) && 12265 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12266 rack->r_collapse_point_valid = 0; 12267 /* Was persist timer active and now we have window space? */ 12268 if ((rack->rc_in_persist != 0) && 12269 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12270 rack->r_ctl.rc_pace_min_segs))) { 12271 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12272 tp->snd_nxt = tp->snd_max; 12273 /* Make sure we output to start the timer */ 12274 rack->r_wanted_output = 1; 12275 } 12276 /* Do we enter persists? */ 12277 if ((rack->rc_in_persist == 0) && 12278 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12279 TCPS_HAVEESTABLISHED(tp->t_state) && 12280 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12281 sbavail(&tptosocket(tp)->so_snd) && 12282 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12283 /* 12284 * Here the rwnd is less than 12285 * the pacing size, we are established, 12286 * nothing is outstanding, and there is 12287 * data to send. Enter persists. 12288 */ 12289 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12290 } 12291 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12292 m_freem(m); 12293 return (0); 12294 } 12295 /* 12296 * don't process the URG bit, ignore them drag 12297 * along the up. 12298 */ 12299 tp->rcv_up = tp->rcv_nxt; 12300 12301 /* 12302 * Process the segment text, merging it into the TCP sequencing 12303 * queue, and arranging for acknowledgment of receipt if necessary. 12304 * This process logically involves adjusting tp->rcv_wnd as data is 12305 * presented to the user (this happens in tcp_usrreq.c, case 12306 * PRU_RCVD). If a FIN has already been received on this connection 12307 * then we just ignore the text. 12308 */ 12309 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12310 (tp->t_flags & TF_FASTOPEN)); 12311 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12312 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12313 tcp_seq save_start = th->th_seq; 12314 tcp_seq save_rnxt = tp->rcv_nxt; 12315 int save_tlen = tlen; 12316 12317 m_adj(m, drop_hdrlen); /* delayed header drop */ 12318 /* 12319 * Insert segment which includes th into TCP reassembly 12320 * queue with control block tp. Set thflags to whether 12321 * reassembly now includes a segment with FIN. This handles 12322 * the common case inline (segment is the next to be 12323 * received on an established connection, and the queue is 12324 * empty), avoiding linkage into and removal from the queue 12325 * and repetition of various conversions. Set DELACK for 12326 * segments received in order, but ack immediately when 12327 * segments are out of order (so fast retransmit can work). 12328 */ 12329 if (th->th_seq == tp->rcv_nxt && 12330 SEGQ_EMPTY(tp) && 12331 (TCPS_HAVEESTABLISHED(tp->t_state) || 12332 tfo_syn)) { 12333 #ifdef NETFLIX_SB_LIMITS 12334 u_int mcnt, appended; 12335 12336 if (so->so_rcv.sb_shlim) { 12337 mcnt = m_memcnt(m); 12338 appended = 0; 12339 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12340 CFO_NOSLEEP, NULL) == false) { 12341 counter_u64_add(tcp_sb_shlim_fails, 1); 12342 m_freem(m); 12343 return (0); 12344 } 12345 } 12346 #endif 12347 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 12348 tp->rcv_nxt += tlen; 12349 if (tlen && 12350 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12351 (tp->t_fbyte_in == 0)) { 12352 tp->t_fbyte_in = ticks; 12353 if (tp->t_fbyte_in == 0) 12354 tp->t_fbyte_in = 1; 12355 if (tp->t_fbyte_out && tp->t_fbyte_in) 12356 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12357 } 12358 thflags = tcp_get_flags(th) & TH_FIN; 12359 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12360 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12361 SOCK_RECVBUF_LOCK(so); 12362 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12363 m_freem(m); 12364 } else { 12365 int32_t newsize; 12366 12367 if (tlen > 0) { 12368 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12369 if (newsize) 12370 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12371 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12372 } 12373 #ifdef NETFLIX_SB_LIMITS 12374 appended = 12375 #endif 12376 sbappendstream_locked(&so->so_rcv, m, 0); 12377 } 12378 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12379 /* NB: sorwakeup_locked() does an implicit unlock. */ 12380 sorwakeup_locked(so); 12381 #ifdef NETFLIX_SB_LIMITS 12382 if (so->so_rcv.sb_shlim && appended != mcnt) 12383 counter_fo_release(so->so_rcv.sb_shlim, 12384 mcnt - appended); 12385 #endif 12386 } else { 12387 /* 12388 * XXX: Due to the header drop above "th" is 12389 * theoretically invalid by now. Fortunately 12390 * m_adj() doesn't actually frees any mbufs when 12391 * trimming from the head. 12392 */ 12393 tcp_seq temp = save_start; 12394 12395 thflags = tcp_reass(tp, th, &temp, &tlen, m); 12396 tp->t_flags |= TF_ACKNOW; 12397 if (tp->t_flags & TF_WAKESOR) { 12398 tp->t_flags &= ~TF_WAKESOR; 12399 /* NB: sorwakeup_locked() does an implicit unlock. */ 12400 sorwakeup_locked(so); 12401 } 12402 } 12403 if ((tp->t_flags & TF_SACK_PERMIT) && 12404 (save_tlen > 0) && 12405 TCPS_HAVEESTABLISHED(tp->t_state)) { 12406 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 12407 /* 12408 * DSACK actually handled in the fastpath 12409 * above. 12410 */ 12411 tcp_update_sack_list(tp, save_start, 12412 save_start + save_tlen); 12413 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 12414 if ((tp->rcv_numsacks >= 1) && 12415 (tp->sackblks[0].end == save_start)) { 12416 /* 12417 * Partial overlap, recorded at todrop 12418 * above. 12419 */ 12420 tcp_update_sack_list(tp, 12421 tp->sackblks[0].start, 12422 tp->sackblks[0].end); 12423 } else { 12424 tcp_update_dsack_list(tp, save_start, 12425 save_start + save_tlen); 12426 } 12427 } else if (tlen >= save_tlen) { 12428 /* Update of sackblks. */ 12429 tcp_update_dsack_list(tp, save_start, 12430 save_start + save_tlen); 12431 } else if (tlen > 0) { 12432 tcp_update_dsack_list(tp, save_start, 12433 save_start + tlen); 12434 } 12435 } 12436 } else { 12437 m_freem(m); 12438 thflags &= ~TH_FIN; 12439 } 12440 12441 /* 12442 * If FIN is received ACK the FIN and let the user know that the 12443 * connection is closing. 12444 */ 12445 if (thflags & TH_FIN) { 12446 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12447 /* The socket upcall is handled by socantrcvmore. */ 12448 socantrcvmore(so); 12449 /* 12450 * If connection is half-synchronized (ie NEEDSYN 12451 * flag on) then delay ACK, so it may be piggybacked 12452 * when SYN is sent. Otherwise, since we received a 12453 * FIN then no more input can be expected, send ACK 12454 * now. 12455 */ 12456 if (tp->t_flags & TF_NEEDSYN) { 12457 rack_timer_cancel(tp, rack, 12458 rack->r_ctl.rc_rcvtime, __LINE__); 12459 tp->t_flags |= TF_DELACK; 12460 } else { 12461 tp->t_flags |= TF_ACKNOW; 12462 } 12463 tp->rcv_nxt++; 12464 } 12465 switch (tp->t_state) { 12466 /* 12467 * In SYN_RECEIVED and ESTABLISHED STATES enter the 12468 * CLOSE_WAIT state. 12469 */ 12470 case TCPS_SYN_RECEIVED: 12471 tp->t_starttime = ticks; 12472 /* FALLTHROUGH */ 12473 case TCPS_ESTABLISHED: 12474 rack_timer_cancel(tp, rack, 12475 rack->r_ctl.rc_rcvtime, __LINE__); 12476 tcp_state_change(tp, TCPS_CLOSE_WAIT); 12477 break; 12478 12479 /* 12480 * If still in FIN_WAIT_1 STATE FIN has not been 12481 * acked so enter the CLOSING state. 12482 */ 12483 case TCPS_FIN_WAIT_1: 12484 rack_timer_cancel(tp, rack, 12485 rack->r_ctl.rc_rcvtime, __LINE__); 12486 tcp_state_change(tp, TCPS_CLOSING); 12487 break; 12488 12489 /* 12490 * In FIN_WAIT_2 state enter the TIME_WAIT state, 12491 * starting the time-wait timer, turning off the 12492 * other standard timers. 12493 */ 12494 case TCPS_FIN_WAIT_2: 12495 rack_timer_cancel(tp, rack, 12496 rack->r_ctl.rc_rcvtime, __LINE__); 12497 tcp_twstart(tp); 12498 return (1); 12499 } 12500 } 12501 /* 12502 * Return any desired output. 12503 */ 12504 if ((tp->t_flags & TF_ACKNOW) || 12505 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 12506 rack->r_wanted_output = 1; 12507 } 12508 return (0); 12509 } 12510 12511 /* 12512 * Here nothing is really faster, its just that we 12513 * have broken out the fast-data path also just like 12514 * the fast-ack. 12515 */ 12516 static int 12517 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 12518 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12519 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 12520 { 12521 int32_t nsegs; 12522 int32_t newsize = 0; /* automatic sockbuf scaling */ 12523 struct tcp_rack *rack; 12524 #ifdef NETFLIX_SB_LIMITS 12525 u_int mcnt, appended; 12526 #endif 12527 12528 /* 12529 * If last ACK falls within this segment's sequence numbers, record 12530 * the timestamp. NOTE that the test is modified according to the 12531 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12532 */ 12533 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 12534 return (0); 12535 } 12536 if (tiwin && tiwin != tp->snd_wnd) { 12537 return (0); 12538 } 12539 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 12540 return (0); 12541 } 12542 if (__predict_false((to->to_flags & TOF_TS) && 12543 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 12544 return (0); 12545 } 12546 if (__predict_false((th->th_ack != tp->snd_una))) { 12547 return (0); 12548 } 12549 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 12550 return (0); 12551 } 12552 if ((to->to_flags & TOF_TS) != 0 && 12553 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12554 tp->ts_recent_age = tcp_ts_getticks(); 12555 tp->ts_recent = to->to_tsval; 12556 } 12557 rack = (struct tcp_rack *)tp->t_fb_ptr; 12558 /* 12559 * This is a pure, in-sequence data packet with nothing on the 12560 * reassembly queue and we have enough buffer space to take it. 12561 */ 12562 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12563 12564 #ifdef NETFLIX_SB_LIMITS 12565 if (so->so_rcv.sb_shlim) { 12566 mcnt = m_memcnt(m); 12567 appended = 0; 12568 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12569 CFO_NOSLEEP, NULL) == false) { 12570 counter_u64_add(tcp_sb_shlim_fails, 1); 12571 m_freem(m); 12572 return (1); 12573 } 12574 } 12575 #endif 12576 /* Clean receiver SACK report if present */ 12577 if (tp->rcv_numsacks) 12578 tcp_clean_sackreport(tp); 12579 KMOD_TCPSTAT_INC(tcps_preddat); 12580 tp->rcv_nxt += tlen; 12581 if (tlen && 12582 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12583 (tp->t_fbyte_in == 0)) { 12584 tp->t_fbyte_in = ticks; 12585 if (tp->t_fbyte_in == 0) 12586 tp->t_fbyte_in = 1; 12587 if (tp->t_fbyte_out && tp->t_fbyte_in) 12588 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12589 } 12590 /* 12591 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 12592 */ 12593 tp->snd_wl1 = th->th_seq; 12594 /* 12595 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 12596 */ 12597 tp->rcv_up = tp->rcv_nxt; 12598 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12599 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12600 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12601 12602 /* Add data to socket buffer. */ 12603 SOCK_RECVBUF_LOCK(so); 12604 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12605 m_freem(m); 12606 } else { 12607 /* 12608 * Set new socket buffer size. Give up when limit is 12609 * reached. 12610 */ 12611 if (newsize) 12612 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12613 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12614 m_adj(m, drop_hdrlen); /* delayed header drop */ 12615 #ifdef NETFLIX_SB_LIMITS 12616 appended = 12617 #endif 12618 sbappendstream_locked(&so->so_rcv, m, 0); 12619 ctf_calc_rwin(so, tp); 12620 } 12621 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12622 /* NB: sorwakeup_locked() does an implicit unlock. */ 12623 sorwakeup_locked(so); 12624 #ifdef NETFLIX_SB_LIMITS 12625 if (so->so_rcv.sb_shlim && mcnt != appended) 12626 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 12627 #endif 12628 rack_handle_delayed_ack(tp, rack, tlen, 0); 12629 if (tp->snd_una == tp->snd_max) 12630 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12631 return (1); 12632 } 12633 12634 /* 12635 * This subfunction is used to try to highly optimize the 12636 * fast path. We again allow window updates that are 12637 * in sequence to remain in the fast-path. We also add 12638 * in the __predict's to attempt to help the compiler. 12639 * Note that if we return a 0, then we can *not* process 12640 * it and the caller should push the packet into the 12641 * slow-path. 12642 */ 12643 static int 12644 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12645 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12646 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 12647 { 12648 int32_t acked; 12649 int32_t nsegs; 12650 int32_t under_pacing = 0; 12651 struct tcp_rack *rack; 12652 12653 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12654 /* Old ack, behind (or duplicate to) the last one rcv'd */ 12655 return (0); 12656 } 12657 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 12658 /* Above what we have sent? */ 12659 return (0); 12660 } 12661 if (__predict_false(tiwin == 0)) { 12662 /* zero window */ 12663 return (0); 12664 } 12665 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 12666 /* We need a SYN or a FIN, unlikely.. */ 12667 return (0); 12668 } 12669 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 12670 /* Timestamp is behind .. old ack with seq wrap? */ 12671 return (0); 12672 } 12673 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 12674 /* Still recovering */ 12675 return (0); 12676 } 12677 rack = (struct tcp_rack *)tp->t_fb_ptr; 12678 if (rack->r_ctl.rc_sacked) { 12679 /* We have sack holes on our scoreboard */ 12680 return (0); 12681 } 12682 /* Ok if we reach here, we can process a fast-ack */ 12683 if (rack->gp_ready && 12684 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12685 under_pacing = 1; 12686 } 12687 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12688 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 12689 /* Did the window get updated? */ 12690 if (tiwin != tp->snd_wnd) { 12691 tp->snd_wnd = tiwin; 12692 rack_validate_fo_sendwin_up(tp, rack); 12693 tp->snd_wl1 = th->th_seq; 12694 if (tp->snd_wnd > tp->max_sndwnd) 12695 tp->max_sndwnd = tp->snd_wnd; 12696 } 12697 /* Do we exit persists? */ 12698 if ((rack->rc_in_persist != 0) && 12699 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12700 rack->r_ctl.rc_pace_min_segs))) { 12701 rack_exit_persist(tp, rack, cts); 12702 } 12703 /* Do we enter persists? */ 12704 if ((rack->rc_in_persist == 0) && 12705 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12706 TCPS_HAVEESTABLISHED(tp->t_state) && 12707 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12708 sbavail(&tptosocket(tp)->so_snd) && 12709 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12710 /* 12711 * Here the rwnd is less than 12712 * the pacing size, we are established, 12713 * nothing is outstanding, and there is 12714 * data to send. Enter persists. 12715 */ 12716 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 12717 } 12718 /* 12719 * If last ACK falls within this segment's sequence numbers, record 12720 * the timestamp. NOTE that the test is modified according to the 12721 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12722 */ 12723 if ((to->to_flags & TOF_TS) != 0 && 12724 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12725 tp->ts_recent_age = tcp_ts_getticks(); 12726 tp->ts_recent = to->to_tsval; 12727 } 12728 /* 12729 * This is a pure ack for outstanding data. 12730 */ 12731 KMOD_TCPSTAT_INC(tcps_predack); 12732 12733 /* 12734 * "bad retransmit" recovery. 12735 */ 12736 if ((tp->t_flags & TF_PREVVALID) && 12737 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12738 tp->t_flags &= ~TF_PREVVALID; 12739 if (tp->t_rxtshift == 1 && 12740 (int)(ticks - tp->t_badrxtwin) < 0) 12741 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12742 } 12743 /* 12744 * Recalculate the transmit timer / rtt. 12745 * 12746 * Some boxes send broken timestamp replies during the SYN+ACK 12747 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12748 * and blow up the retransmit timer. 12749 */ 12750 acked = BYTES_THIS_ACK(tp, th); 12751 12752 #ifdef TCP_HHOOK 12753 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 12754 hhook_run_tcp_est_in(tp, th, to); 12755 #endif 12756 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12757 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12758 if (acked) { 12759 struct mbuf *mfree; 12760 12761 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 12762 SOCK_SENDBUF_LOCK(so); 12763 mfree = sbcut_locked(&so->so_snd, acked); 12764 tp->snd_una = th->th_ack; 12765 /* Note we want to hold the sb lock through the sendmap adjust */ 12766 rack_adjust_sendmap_head(rack, &so->so_snd); 12767 /* Wake up the socket if we have room to write more */ 12768 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12769 sowwakeup_locked(so); 12770 m_freem(mfree); 12771 tp->t_rxtshift = 0; 12772 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12773 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12774 rack->rc_tlp_in_progress = 0; 12775 rack->r_ctl.rc_tlp_cnt_out = 0; 12776 /* 12777 * If it is the RXT timer we want to 12778 * stop it, so we can restart a TLP. 12779 */ 12780 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12781 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12782 12783 #ifdef TCP_REQUEST_TRK 12784 rack_req_check_for_comp(rack, th->th_ack); 12785 #endif 12786 } 12787 /* 12788 * Let the congestion control algorithm update congestion control 12789 * related information. This typically means increasing the 12790 * congestion window. 12791 */ 12792 if (tp->snd_wnd < ctf_outstanding(tp)) { 12793 /* The peer collapsed the window */ 12794 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12795 } else if (rack->rc_has_collapsed) 12796 rack_un_collapse_window(rack, __LINE__); 12797 if ((rack->r_collapse_point_valid) && 12798 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 12799 rack->r_collapse_point_valid = 0; 12800 /* 12801 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 12802 */ 12803 tp->snd_wl2 = th->th_ack; 12804 tp->t_dupacks = 0; 12805 m_freem(m); 12806 /* ND6_HINT(tp); *//* Some progress has been made. */ 12807 12808 /* 12809 * If all outstanding data are acked, stop retransmit timer, 12810 * otherwise restart timer using current (possibly backed-off) 12811 * value. If process is waiting for space, wakeup/selwakeup/signal. 12812 * If data are ready to send, let tcp_output decide between more 12813 * output or persist. 12814 */ 12815 if (under_pacing && 12816 (rack->use_fixed_rate == 0) && 12817 (rack->in_probe_rtt == 0) && 12818 rack->rc_gp_dyn_mul && 12819 rack->rc_always_pace) { 12820 /* Check if we are dragging bottom */ 12821 rack_check_bottom_drag(tp, rack, so); 12822 } 12823 if (tp->snd_una == tp->snd_max) { 12824 tp->t_flags &= ~TF_PREVVALID; 12825 rack->r_ctl.retran_during_recovery = 0; 12826 rack->rc_suspicious = 0; 12827 rack->r_ctl.dsack_byte_cnt = 0; 12828 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12829 if (rack->r_ctl.rc_went_idle_time == 0) 12830 rack->r_ctl.rc_went_idle_time = 1; 12831 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12832 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12833 tp->t_acktime = 0; 12834 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12835 } 12836 if (acked && rack->r_fast_output) 12837 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 12838 if (sbavail(&so->so_snd)) { 12839 rack->r_wanted_output = 1; 12840 } 12841 return (1); 12842 } 12843 12844 /* 12845 * Return value of 1, the TCB is unlocked and most 12846 * likely gone, return value of 0, the TCP is still 12847 * locked. 12848 */ 12849 static int 12850 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 12851 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12852 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12853 { 12854 int32_t ret_val = 0; 12855 int32_t orig_tlen = tlen; 12856 int32_t todrop; 12857 int32_t ourfinisacked = 0; 12858 struct tcp_rack *rack; 12859 12860 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12861 12862 ctf_calc_rwin(so, tp); 12863 /* 12864 * If the state is SYN_SENT: if seg contains an ACK, but not for our 12865 * SYN, drop the input. if seg contains a RST, then drop the 12866 * connection. if seg does not contain SYN, then drop it. Otherwise 12867 * this is an acceptable SYN segment initialize tp->rcv_nxt and 12868 * tp->irs if seg contains ack then advance tp->snd_una if seg 12869 * contains an ECE and ECN support is enabled, the stream is ECN 12870 * capable. if SYN has been acked change to ESTABLISHED else 12871 * SYN_RCVD state arrange for segment to be acked (eventually) 12872 * continue processing rest of data/controls. 12873 */ 12874 if ((thflags & TH_ACK) && 12875 (SEQ_LEQ(th->th_ack, tp->iss) || 12876 SEQ_GT(th->th_ack, tp->snd_max))) { 12877 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 12878 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12879 return (1); 12880 } 12881 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 12882 TCP_PROBE5(connect__refused, NULL, tp, 12883 mtod(m, const char *), tp, th); 12884 tp = tcp_drop(tp, ECONNREFUSED); 12885 ctf_do_drop(m, tp); 12886 return (1); 12887 } 12888 if (thflags & TH_RST) { 12889 ctf_do_drop(m, tp); 12890 return (1); 12891 } 12892 if (!(thflags & TH_SYN)) { 12893 ctf_do_drop(m, tp); 12894 return (1); 12895 } 12896 tp->irs = th->th_seq; 12897 tcp_rcvseqinit(tp); 12898 rack = (struct tcp_rack *)tp->t_fb_ptr; 12899 if (thflags & TH_ACK) { 12900 int tfo_partial = 0; 12901 12902 KMOD_TCPSTAT_INC(tcps_connects); 12903 soisconnected(so); 12904 #ifdef MAC 12905 mac_socketpeer_set_from_mbuf(m, so); 12906 #endif 12907 /* Do window scaling on this connection? */ 12908 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12909 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12910 tp->rcv_scale = tp->request_r_scale; 12911 } 12912 tp->rcv_adv += min(tp->rcv_wnd, 12913 TCP_MAXWIN << tp->rcv_scale); 12914 /* 12915 * If not all the data that was sent in the TFO SYN 12916 * has been acked, resend the remainder right away. 12917 */ 12918 if ((tp->t_flags & TF_FASTOPEN) && 12919 (tp->snd_una != tp->snd_max)) { 12920 /* Was it a partial ack? */ 12921 if (SEQ_LT(th->th_ack, tp->snd_max)) 12922 tfo_partial = 1; 12923 } 12924 /* 12925 * If there's data, delay ACK; if there's also a FIN ACKNOW 12926 * will be turned on later. 12927 */ 12928 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 12929 rack_timer_cancel(tp, rack, 12930 rack->r_ctl.rc_rcvtime, __LINE__); 12931 tp->t_flags |= TF_DELACK; 12932 } else { 12933 rack->r_wanted_output = 1; 12934 tp->t_flags |= TF_ACKNOW; 12935 } 12936 12937 tcp_ecn_input_syn_sent(tp, thflags, iptos); 12938 12939 if (SEQ_GT(th->th_ack, tp->snd_una)) { 12940 /* 12941 * We advance snd_una for the 12942 * fast open case. If th_ack is 12943 * acknowledging data beyond 12944 * snd_una we can't just call 12945 * ack-processing since the 12946 * data stream in our send-map 12947 * will start at snd_una + 1 (one 12948 * beyond the SYN). If its just 12949 * equal we don't need to do that 12950 * and there is no send_map. 12951 */ 12952 tp->snd_una++; 12953 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 12954 /* 12955 * We sent a SYN with data, and thus have a 12956 * sendmap entry with a SYN set. Lets find it 12957 * and take off the send bit and the byte and 12958 * set it up to be what we send (send it next). 12959 */ 12960 struct rack_sendmap *rsm; 12961 12962 rsm = tqhash_min(rack->r_ctl.tqh); 12963 if (rsm) { 12964 if (rsm->r_flags & RACK_HAS_SYN) { 12965 rsm->r_flags &= ~RACK_HAS_SYN; 12966 rsm->r_start++; 12967 } 12968 rack->r_ctl.rc_resend = rsm; 12969 } 12970 } 12971 } 12972 /* 12973 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 12974 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 12975 */ 12976 tp->t_starttime = ticks; 12977 if (tp->t_flags & TF_NEEDFIN) { 12978 tcp_state_change(tp, TCPS_FIN_WAIT_1); 12979 tp->t_flags &= ~TF_NEEDFIN; 12980 thflags &= ~TH_SYN; 12981 } else { 12982 tcp_state_change(tp, TCPS_ESTABLISHED); 12983 TCP_PROBE5(connect__established, NULL, tp, 12984 mtod(m, const char *), tp, th); 12985 rack_cc_conn_init(tp); 12986 } 12987 } else { 12988 /* 12989 * Received initial SYN in SYN-SENT[*] state => simultaneous 12990 * open. If segment contains CC option and there is a 12991 * cached CC, apply TAO test. If it succeeds, connection is * 12992 * half-synchronized. Otherwise, do 3-way handshake: 12993 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 12994 * there was no CC option, clear cached CC value. 12995 */ 12996 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 12997 tcp_state_change(tp, TCPS_SYN_RECEIVED); 12998 } 12999 /* 13000 * Advance th->th_seq to correspond to first data byte. If data, 13001 * trim to stay within window, dropping FIN if necessary. 13002 */ 13003 th->th_seq++; 13004 if (tlen > tp->rcv_wnd) { 13005 todrop = tlen - tp->rcv_wnd; 13006 m_adj(m, -todrop); 13007 tlen = tp->rcv_wnd; 13008 thflags &= ~TH_FIN; 13009 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13010 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13011 } 13012 tp->snd_wl1 = th->th_seq - 1; 13013 tp->rcv_up = th->th_seq; 13014 /* 13015 * Client side of transaction: already sent SYN and data. If the 13016 * remote host used T/TCP to validate the SYN, our data will be 13017 * ACK'd; if so, enter normal data segment processing in the middle 13018 * of step 5, ack processing. Otherwise, goto step 6. 13019 */ 13020 if (thflags & TH_ACK) { 13021 /* For syn-sent we need to possibly update the rtt */ 13022 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13023 uint32_t t, mcts; 13024 13025 mcts = tcp_ts_getticks(); 13026 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13027 if (!tp->t_rttlow || tp->t_rttlow > t) 13028 tp->t_rttlow = t; 13029 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13030 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13031 tcp_rack_xmit_timer_commit(rack, tp); 13032 } 13033 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 13034 return (ret_val); 13035 /* We may have changed to FIN_WAIT_1 above */ 13036 if (tp->t_state == TCPS_FIN_WAIT_1) { 13037 /* 13038 * In FIN_WAIT_1 STATE in addition to the processing 13039 * for the ESTABLISHED state if our FIN is now 13040 * acknowledged then enter FIN_WAIT_2. 13041 */ 13042 if (ourfinisacked) { 13043 /* 13044 * If we can't receive any more data, then 13045 * closing user can proceed. Starting the 13046 * timer is contrary to the specification, 13047 * but if we don't get a FIN we'll hang 13048 * forever. 13049 * 13050 * XXXjl: we should release the tp also, and 13051 * use a compressed state. 13052 */ 13053 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13054 soisdisconnected(so); 13055 tcp_timer_activate(tp, TT_2MSL, 13056 (tcp_fast_finwait2_recycle ? 13057 tcp_finwait2_timeout : 13058 TP_MAXIDLE(tp))); 13059 } 13060 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13061 } 13062 } 13063 } 13064 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13065 tiwin, thflags, nxt_pkt)); 13066 } 13067 13068 /* 13069 * Return value of 1, the TCB is unlocked and most 13070 * likely gone, return value of 0, the TCP is still 13071 * locked. 13072 */ 13073 static int 13074 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13075 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13076 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13077 { 13078 struct tcp_rack *rack; 13079 int32_t orig_tlen = tlen; 13080 int32_t ret_val = 0; 13081 int32_t ourfinisacked = 0; 13082 13083 rack = (struct tcp_rack *)tp->t_fb_ptr; 13084 ctf_calc_rwin(so, tp); 13085 if ((thflags & TH_RST) || 13086 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13087 return (ctf_process_rst(m, th, so, tp)); 13088 if ((thflags & TH_ACK) && 13089 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13090 SEQ_GT(th->th_ack, tp->snd_max))) { 13091 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13092 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13093 return (1); 13094 } 13095 if (tp->t_flags & TF_FASTOPEN) { 13096 /* 13097 * When a TFO connection is in SYN_RECEIVED, the 13098 * only valid packets are the initial SYN, a 13099 * retransmit/copy of the initial SYN (possibly with 13100 * a subset of the original data), a valid ACK, a 13101 * FIN, or a RST. 13102 */ 13103 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13104 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13105 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13106 return (1); 13107 } else if (thflags & TH_SYN) { 13108 /* non-initial SYN is ignored */ 13109 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13110 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13111 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13112 ctf_do_drop(m, NULL); 13113 return (0); 13114 } 13115 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13116 ctf_do_drop(m, NULL); 13117 return (0); 13118 } 13119 } 13120 13121 /* 13122 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13123 * it's less than ts_recent, drop it. 13124 */ 13125 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13126 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13127 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13128 return (ret_val); 13129 } 13130 /* 13131 * In the SYN-RECEIVED state, validate that the packet belongs to 13132 * this connection before trimming the data to fit the receive 13133 * window. Check the sequence number versus IRS since we know the 13134 * sequence numbers haven't wrapped. This is a partial fix for the 13135 * "LAND" DoS attack. 13136 */ 13137 if (SEQ_LT(th->th_seq, tp->irs)) { 13138 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13139 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13140 return (1); 13141 } 13142 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13143 return (ret_val); 13144 } 13145 /* 13146 * If last ACK falls within this segment's sequence numbers, record 13147 * its timestamp. NOTE: 1) That the test incorporates suggestions 13148 * from the latest proposal of the tcplw@cray.com list (Braden 13149 * 1993/04/26). 2) That updating only on newer timestamps interferes 13150 * with our earlier PAWS tests, so this check should be solely 13151 * predicated on the sequence space of this segment. 3) That we 13152 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13153 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13154 * SEG.Len, This modified check allows us to overcome RFC1323's 13155 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13156 * p.869. In such cases, we can still calculate the RTT correctly 13157 * when RCV.NXT == Last.ACK.Sent. 13158 */ 13159 if ((to->to_flags & TOF_TS) != 0 && 13160 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13161 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13162 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13163 tp->ts_recent_age = tcp_ts_getticks(); 13164 tp->ts_recent = to->to_tsval; 13165 } 13166 tp->snd_wnd = tiwin; 13167 rack_validate_fo_sendwin_up(tp, rack); 13168 /* 13169 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13170 * is on (half-synchronized state), then queue data for later 13171 * processing; else drop segment and return. 13172 */ 13173 if ((thflags & TH_ACK) == 0) { 13174 if (tp->t_flags & TF_FASTOPEN) { 13175 rack_cc_conn_init(tp); 13176 } 13177 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13178 tiwin, thflags, nxt_pkt)); 13179 } 13180 KMOD_TCPSTAT_INC(tcps_connects); 13181 if (tp->t_flags & TF_SONOTCONN) { 13182 tp->t_flags &= ~TF_SONOTCONN; 13183 soisconnected(so); 13184 } 13185 /* Do window scaling? */ 13186 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13187 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13188 tp->rcv_scale = tp->request_r_scale; 13189 } 13190 /* 13191 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13192 * FIN-WAIT-1 13193 */ 13194 tp->t_starttime = ticks; 13195 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 13196 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13197 tp->t_tfo_pending = NULL; 13198 } 13199 if (tp->t_flags & TF_NEEDFIN) { 13200 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13201 tp->t_flags &= ~TF_NEEDFIN; 13202 } else { 13203 tcp_state_change(tp, TCPS_ESTABLISHED); 13204 TCP_PROBE5(accept__established, NULL, tp, 13205 mtod(m, const char *), tp, th); 13206 /* 13207 * TFO connections call cc_conn_init() during SYN 13208 * processing. Calling it again here for such connections 13209 * is not harmless as it would undo the snd_cwnd reduction 13210 * that occurs when a TFO SYN|ACK is retransmitted. 13211 */ 13212 if (!(tp->t_flags & TF_FASTOPEN)) 13213 rack_cc_conn_init(tp); 13214 } 13215 /* 13216 * Account for the ACK of our SYN prior to 13217 * regular ACK processing below, except for 13218 * simultaneous SYN, which is handled later. 13219 */ 13220 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13221 tp->snd_una++; 13222 /* 13223 * If segment contains data or ACK, will call tcp_reass() later; if 13224 * not, do so now to pass queued data to user. 13225 */ 13226 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13227 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13228 (struct mbuf *)0); 13229 if (tp->t_flags & TF_WAKESOR) { 13230 tp->t_flags &= ~TF_WAKESOR; 13231 /* NB: sorwakeup_locked() does an implicit unlock. */ 13232 sorwakeup_locked(so); 13233 } 13234 } 13235 tp->snd_wl1 = th->th_seq - 1; 13236 /* For syn-recv we need to possibly update the rtt */ 13237 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13238 uint32_t t, mcts; 13239 13240 mcts = tcp_ts_getticks(); 13241 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13242 if (!tp->t_rttlow || tp->t_rttlow > t) 13243 tp->t_rttlow = t; 13244 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13245 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13246 tcp_rack_xmit_timer_commit(rack, tp); 13247 } 13248 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13249 return (ret_val); 13250 } 13251 if (tp->t_state == TCPS_FIN_WAIT_1) { 13252 /* We could have went to FIN_WAIT_1 (or EST) above */ 13253 /* 13254 * In FIN_WAIT_1 STATE in addition to the processing for the 13255 * ESTABLISHED state if our FIN is now acknowledged then 13256 * enter FIN_WAIT_2. 13257 */ 13258 if (ourfinisacked) { 13259 /* 13260 * If we can't receive any more data, then closing 13261 * user can proceed. Starting the timer is contrary 13262 * to the specification, but if we don't get a FIN 13263 * we'll hang forever. 13264 * 13265 * XXXjl: we should release the tp also, and use a 13266 * compressed state. 13267 */ 13268 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13269 soisdisconnected(so); 13270 tcp_timer_activate(tp, TT_2MSL, 13271 (tcp_fast_finwait2_recycle ? 13272 tcp_finwait2_timeout : 13273 TP_MAXIDLE(tp))); 13274 } 13275 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13276 } 13277 } 13278 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13279 tiwin, thflags, nxt_pkt)); 13280 } 13281 13282 /* 13283 * Return value of 1, the TCB is unlocked and most 13284 * likely gone, return value of 0, the TCP is still 13285 * locked. 13286 */ 13287 static int 13288 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13289 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13290 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13291 { 13292 int32_t ret_val = 0; 13293 int32_t orig_tlen = tlen; 13294 struct tcp_rack *rack; 13295 13296 /* 13297 * Header prediction: check for the two common cases of a 13298 * uni-directional data xfer. If the packet has no control flags, 13299 * is in-sequence, the window didn't change and we're not 13300 * retransmitting, it's a candidate. If the length is zero and the 13301 * ack moved forward, we're the sender side of the xfer. Just free 13302 * the data acked & wake any higher level process that was blocked 13303 * waiting for space. If the length is non-zero and the ack didn't 13304 * move, we're the receiver side. If we're getting packets in-order 13305 * (the reassembly queue is empty), add the data toc The socket 13306 * buffer and note that we need a delayed ack. Make sure that the 13307 * hidden state-flags are also off. Since we check for 13308 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13309 */ 13310 rack = (struct tcp_rack *)tp->t_fb_ptr; 13311 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13312 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13313 __predict_true(SEGQ_EMPTY(tp)) && 13314 __predict_true(th->th_seq == tp->rcv_nxt)) { 13315 if (tlen == 0) { 13316 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13317 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13318 return (0); 13319 } 13320 } else { 13321 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13322 tiwin, nxt_pkt, iptos)) { 13323 return (0); 13324 } 13325 } 13326 } 13327 ctf_calc_rwin(so, tp); 13328 13329 if ((thflags & TH_RST) || 13330 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13331 return (ctf_process_rst(m, th, so, tp)); 13332 13333 /* 13334 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13335 * synchronized state. 13336 */ 13337 if (thflags & TH_SYN) { 13338 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13339 return (ret_val); 13340 } 13341 /* 13342 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13343 * it's less than ts_recent, drop it. 13344 */ 13345 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13346 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13347 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13348 return (ret_val); 13349 } 13350 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13351 return (ret_val); 13352 } 13353 /* 13354 * If last ACK falls within this segment's sequence numbers, record 13355 * its timestamp. NOTE: 1) That the test incorporates suggestions 13356 * from the latest proposal of the tcplw@cray.com list (Braden 13357 * 1993/04/26). 2) That updating only on newer timestamps interferes 13358 * with our earlier PAWS tests, so this check should be solely 13359 * predicated on the sequence space of this segment. 3) That we 13360 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13361 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13362 * SEG.Len, This modified check allows us to overcome RFC1323's 13363 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13364 * p.869. In such cases, we can still calculate the RTT correctly 13365 * when RCV.NXT == Last.ACK.Sent. 13366 */ 13367 if ((to->to_flags & TOF_TS) != 0 && 13368 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13369 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13370 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13371 tp->ts_recent_age = tcp_ts_getticks(); 13372 tp->ts_recent = to->to_tsval; 13373 } 13374 /* 13375 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13376 * is on (half-synchronized state), then queue data for later 13377 * processing; else drop segment and return. 13378 */ 13379 if ((thflags & TH_ACK) == 0) { 13380 if (tp->t_flags & TF_NEEDSYN) { 13381 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13382 tiwin, thflags, nxt_pkt)); 13383 13384 } else if (tp->t_flags & TF_ACKNOW) { 13385 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13386 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13387 return (ret_val); 13388 } else { 13389 ctf_do_drop(m, NULL); 13390 return (0); 13391 } 13392 } 13393 /* 13394 * Ack processing. 13395 */ 13396 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13397 return (ret_val); 13398 } 13399 if (sbavail(&so->so_snd)) { 13400 if (ctf_progress_timeout_check(tp, true)) { 13401 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 13402 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13403 return (1); 13404 } 13405 } 13406 /* State changes only happen in rack_process_data() */ 13407 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13408 tiwin, thflags, nxt_pkt)); 13409 } 13410 13411 /* 13412 * Return value of 1, the TCB is unlocked and most 13413 * likely gone, return value of 0, the TCP is still 13414 * locked. 13415 */ 13416 static int 13417 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 13418 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13419 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13420 { 13421 int32_t ret_val = 0; 13422 int32_t orig_tlen = tlen; 13423 13424 ctf_calc_rwin(so, tp); 13425 if ((thflags & TH_RST) || 13426 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13427 return (ctf_process_rst(m, th, so, tp)); 13428 /* 13429 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13430 * synchronized state. 13431 */ 13432 if (thflags & TH_SYN) { 13433 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13434 return (ret_val); 13435 } 13436 /* 13437 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13438 * it's less than ts_recent, drop it. 13439 */ 13440 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13441 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13442 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13443 return (ret_val); 13444 } 13445 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13446 return (ret_val); 13447 } 13448 /* 13449 * If last ACK falls within this segment's sequence numbers, record 13450 * its timestamp. NOTE: 1) That the test incorporates suggestions 13451 * from the latest proposal of the tcplw@cray.com list (Braden 13452 * 1993/04/26). 2) That updating only on newer timestamps interferes 13453 * with our earlier PAWS tests, so this check should be solely 13454 * predicated on the sequence space of this segment. 3) That we 13455 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13456 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13457 * SEG.Len, This modified check allows us to overcome RFC1323's 13458 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13459 * p.869. In such cases, we can still calculate the RTT correctly 13460 * when RCV.NXT == Last.ACK.Sent. 13461 */ 13462 if ((to->to_flags & TOF_TS) != 0 && 13463 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13464 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13465 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13466 tp->ts_recent_age = tcp_ts_getticks(); 13467 tp->ts_recent = to->to_tsval; 13468 } 13469 /* 13470 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13471 * is on (half-synchronized state), then queue data for later 13472 * processing; else drop segment and return. 13473 */ 13474 if ((thflags & TH_ACK) == 0) { 13475 if (tp->t_flags & TF_NEEDSYN) { 13476 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13477 tiwin, thflags, nxt_pkt)); 13478 13479 } else if (tp->t_flags & TF_ACKNOW) { 13480 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13481 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13482 return (ret_val); 13483 } else { 13484 ctf_do_drop(m, NULL); 13485 return (0); 13486 } 13487 } 13488 /* 13489 * Ack processing. 13490 */ 13491 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13492 return (ret_val); 13493 } 13494 if (sbavail(&so->so_snd)) { 13495 if (ctf_progress_timeout_check(tp, true)) { 13496 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13497 tp, tick, PROGRESS_DROP, __LINE__); 13498 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13499 return (1); 13500 } 13501 } 13502 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13503 tiwin, thflags, nxt_pkt)); 13504 } 13505 13506 static int 13507 rack_check_data_after_close(struct mbuf *m, 13508 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 13509 { 13510 struct tcp_rack *rack; 13511 13512 rack = (struct tcp_rack *)tp->t_fb_ptr; 13513 if (rack->rc_allow_data_af_clo == 0) { 13514 close_now: 13515 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13516 /* tcp_close will kill the inp pre-log the Reset */ 13517 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13518 tp = tcp_close(tp); 13519 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 13520 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 13521 return (1); 13522 } 13523 if (sbavail(&so->so_snd) == 0) 13524 goto close_now; 13525 /* Ok we allow data that is ignored and a followup reset */ 13526 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13527 tp->rcv_nxt = th->th_seq + *tlen; 13528 tp->t_flags2 |= TF2_DROP_AF_DATA; 13529 rack->r_wanted_output = 1; 13530 *tlen = 0; 13531 return (0); 13532 } 13533 13534 /* 13535 * Return value of 1, the TCB is unlocked and most 13536 * likely gone, return value of 0, the TCP is still 13537 * locked. 13538 */ 13539 static int 13540 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 13541 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13542 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13543 { 13544 int32_t ret_val = 0; 13545 int32_t orig_tlen = tlen; 13546 int32_t ourfinisacked = 0; 13547 13548 ctf_calc_rwin(so, tp); 13549 13550 if ((thflags & TH_RST) || 13551 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13552 return (ctf_process_rst(m, th, so, tp)); 13553 /* 13554 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13555 * synchronized state. 13556 */ 13557 if (thflags & TH_SYN) { 13558 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13559 return (ret_val); 13560 } 13561 /* 13562 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13563 * it's less than ts_recent, drop it. 13564 */ 13565 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13566 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13567 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13568 return (ret_val); 13569 } 13570 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13571 return (ret_val); 13572 } 13573 /* 13574 * If new data are received on a connection after the user processes 13575 * are gone, then RST the other end. 13576 */ 13577 if ((tp->t_flags & TF_CLOSED) && tlen && 13578 rack_check_data_after_close(m, tp, &tlen, th, so)) 13579 return (1); 13580 /* 13581 * If last ACK falls within this segment's sequence numbers, record 13582 * its timestamp. NOTE: 1) That the test incorporates suggestions 13583 * from the latest proposal of the tcplw@cray.com list (Braden 13584 * 1993/04/26). 2) That updating only on newer timestamps interferes 13585 * with our earlier PAWS tests, so this check should be solely 13586 * predicated on the sequence space of this segment. 3) That we 13587 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13588 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13589 * SEG.Len, This modified check allows us to overcome RFC1323's 13590 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13591 * p.869. In such cases, we can still calculate the RTT correctly 13592 * when RCV.NXT == Last.ACK.Sent. 13593 */ 13594 if ((to->to_flags & TOF_TS) != 0 && 13595 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13596 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13597 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13598 tp->ts_recent_age = tcp_ts_getticks(); 13599 tp->ts_recent = to->to_tsval; 13600 } 13601 /* 13602 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13603 * is on (half-synchronized state), then queue data for later 13604 * processing; else drop segment and return. 13605 */ 13606 if ((thflags & TH_ACK) == 0) { 13607 if (tp->t_flags & TF_NEEDSYN) { 13608 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13609 tiwin, thflags, nxt_pkt)); 13610 } else if (tp->t_flags & TF_ACKNOW) { 13611 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13612 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13613 return (ret_val); 13614 } else { 13615 ctf_do_drop(m, NULL); 13616 return (0); 13617 } 13618 } 13619 /* 13620 * Ack processing. 13621 */ 13622 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13623 return (ret_val); 13624 } 13625 if (ourfinisacked) { 13626 /* 13627 * If we can't receive any more data, then closing user can 13628 * proceed. Starting the timer is contrary to the 13629 * specification, but if we don't get a FIN we'll hang 13630 * forever. 13631 * 13632 * XXXjl: we should release the tp also, and use a 13633 * compressed state. 13634 */ 13635 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13636 soisdisconnected(so); 13637 tcp_timer_activate(tp, TT_2MSL, 13638 (tcp_fast_finwait2_recycle ? 13639 tcp_finwait2_timeout : 13640 TP_MAXIDLE(tp))); 13641 } 13642 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13643 } 13644 if (sbavail(&so->so_snd)) { 13645 if (ctf_progress_timeout_check(tp, true)) { 13646 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13647 tp, tick, PROGRESS_DROP, __LINE__); 13648 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13649 return (1); 13650 } 13651 } 13652 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13653 tiwin, thflags, nxt_pkt)); 13654 } 13655 13656 /* 13657 * Return value of 1, the TCB is unlocked and most 13658 * likely gone, return value of 0, the TCP is still 13659 * locked. 13660 */ 13661 static int 13662 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 13663 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13664 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13665 { 13666 int32_t ret_val = 0; 13667 int32_t orig_tlen = tlen; 13668 int32_t ourfinisacked = 0; 13669 13670 ctf_calc_rwin(so, tp); 13671 13672 if ((thflags & TH_RST) || 13673 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13674 return (ctf_process_rst(m, th, so, tp)); 13675 /* 13676 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13677 * synchronized state. 13678 */ 13679 if (thflags & TH_SYN) { 13680 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13681 return (ret_val); 13682 } 13683 /* 13684 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13685 * it's less than ts_recent, drop it. 13686 */ 13687 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13688 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13689 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13690 return (ret_val); 13691 } 13692 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13693 return (ret_val); 13694 } 13695 /* 13696 * If last ACK falls within this segment's sequence numbers, record 13697 * its timestamp. NOTE: 1) That the test incorporates suggestions 13698 * from the latest proposal of the tcplw@cray.com list (Braden 13699 * 1993/04/26). 2) That updating only on newer timestamps interferes 13700 * with our earlier PAWS tests, so this check should be solely 13701 * predicated on the sequence space of this segment. 3) That we 13702 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13703 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13704 * SEG.Len, This modified check allows us to overcome RFC1323's 13705 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13706 * p.869. In such cases, we can still calculate the RTT correctly 13707 * when RCV.NXT == Last.ACK.Sent. 13708 */ 13709 if ((to->to_flags & TOF_TS) != 0 && 13710 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13711 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13712 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13713 tp->ts_recent_age = tcp_ts_getticks(); 13714 tp->ts_recent = to->to_tsval; 13715 } 13716 /* 13717 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13718 * is on (half-synchronized state), then queue data for later 13719 * processing; else drop segment and return. 13720 */ 13721 if ((thflags & TH_ACK) == 0) { 13722 if (tp->t_flags & TF_NEEDSYN) { 13723 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13724 tiwin, thflags, nxt_pkt)); 13725 } else if (tp->t_flags & TF_ACKNOW) { 13726 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13727 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13728 return (ret_val); 13729 } else { 13730 ctf_do_drop(m, NULL); 13731 return (0); 13732 } 13733 } 13734 /* 13735 * Ack processing. 13736 */ 13737 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13738 return (ret_val); 13739 } 13740 if (ourfinisacked) { 13741 tcp_twstart(tp); 13742 m_freem(m); 13743 return (1); 13744 } 13745 if (sbavail(&so->so_snd)) { 13746 if (ctf_progress_timeout_check(tp, true)) { 13747 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13748 tp, tick, PROGRESS_DROP, __LINE__); 13749 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13750 return (1); 13751 } 13752 } 13753 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13754 tiwin, thflags, nxt_pkt)); 13755 } 13756 13757 /* 13758 * Return value of 1, the TCB is unlocked and most 13759 * likely gone, return value of 0, the TCP is still 13760 * locked. 13761 */ 13762 static int 13763 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13764 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13765 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13766 { 13767 int32_t ret_val = 0; 13768 int32_t orig_tlen; 13769 int32_t ourfinisacked = 0; 13770 13771 ctf_calc_rwin(so, tp); 13772 13773 if ((thflags & TH_RST) || 13774 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13775 return (ctf_process_rst(m, th, so, tp)); 13776 /* 13777 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13778 * synchronized state. 13779 */ 13780 if (thflags & TH_SYN) { 13781 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13782 return (ret_val); 13783 } 13784 /* 13785 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13786 * it's less than ts_recent, drop it. 13787 */ 13788 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13789 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13790 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13791 return (ret_val); 13792 } 13793 orig_tlen = tlen; 13794 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13795 return (ret_val); 13796 } 13797 /* 13798 * If last ACK falls within this segment's sequence numbers, record 13799 * its timestamp. NOTE: 1) That the test incorporates suggestions 13800 * from the latest proposal of the tcplw@cray.com list (Braden 13801 * 1993/04/26). 2) That updating only on newer timestamps interferes 13802 * with our earlier PAWS tests, so this check should be solely 13803 * predicated on the sequence space of this segment. 3) That we 13804 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13805 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13806 * SEG.Len, This modified check allows us to overcome RFC1323's 13807 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13808 * p.869. In such cases, we can still calculate the RTT correctly 13809 * when RCV.NXT == Last.ACK.Sent. 13810 */ 13811 if ((to->to_flags & TOF_TS) != 0 && 13812 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13813 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13814 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13815 tp->ts_recent_age = tcp_ts_getticks(); 13816 tp->ts_recent = to->to_tsval; 13817 } 13818 /* 13819 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13820 * is on (half-synchronized state), then queue data for later 13821 * processing; else drop segment and return. 13822 */ 13823 if ((thflags & TH_ACK) == 0) { 13824 if (tp->t_flags & TF_NEEDSYN) { 13825 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13826 tiwin, thflags, nxt_pkt)); 13827 } else if (tp->t_flags & TF_ACKNOW) { 13828 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13829 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13830 return (ret_val); 13831 } else { 13832 ctf_do_drop(m, NULL); 13833 return (0); 13834 } 13835 } 13836 /* 13837 * case TCPS_LAST_ACK: Ack processing. 13838 */ 13839 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13840 return (ret_val); 13841 } 13842 if (ourfinisacked) { 13843 tp = tcp_close(tp); 13844 ctf_do_drop(m, tp); 13845 return (1); 13846 } 13847 if (sbavail(&so->so_snd)) { 13848 if (ctf_progress_timeout_check(tp, true)) { 13849 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13850 tp, tick, PROGRESS_DROP, __LINE__); 13851 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13852 return (1); 13853 } 13854 } 13855 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13856 tiwin, thflags, nxt_pkt)); 13857 } 13858 13859 /* 13860 * Return value of 1, the TCB is unlocked and most 13861 * likely gone, return value of 0, the TCP is still 13862 * locked. 13863 */ 13864 static int 13865 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 13866 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13867 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13868 { 13869 int32_t ret_val = 0; 13870 int32_t orig_tlen = tlen; 13871 int32_t ourfinisacked = 0; 13872 13873 ctf_calc_rwin(so, tp); 13874 13875 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 13876 if ((thflags & TH_RST) || 13877 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13878 return (ctf_process_rst(m, th, so, tp)); 13879 /* 13880 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13881 * synchronized state. 13882 */ 13883 if (thflags & TH_SYN) { 13884 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13885 return (ret_val); 13886 } 13887 /* 13888 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13889 * it's less than ts_recent, drop it. 13890 */ 13891 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13892 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13893 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13894 return (ret_val); 13895 } 13896 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13897 return (ret_val); 13898 } 13899 /* 13900 * If new data are received on a connection after the user processes 13901 * are gone, then RST the other end. 13902 */ 13903 if ((tp->t_flags & TF_CLOSED) && tlen && 13904 rack_check_data_after_close(m, tp, &tlen, th, so)) 13905 return (1); 13906 /* 13907 * If last ACK falls within this segment's sequence numbers, record 13908 * its timestamp. NOTE: 1) That the test incorporates suggestions 13909 * from the latest proposal of the tcplw@cray.com list (Braden 13910 * 1993/04/26). 2) That updating only on newer timestamps interferes 13911 * with our earlier PAWS tests, so this check should be solely 13912 * predicated on the sequence space of this segment. 3) That we 13913 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13914 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13915 * SEG.Len, This modified check allows us to overcome RFC1323's 13916 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13917 * p.869. In such cases, we can still calculate the RTT correctly 13918 * when RCV.NXT == Last.ACK.Sent. 13919 */ 13920 if ((to->to_flags & TOF_TS) != 0 && 13921 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13922 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13923 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13924 tp->ts_recent_age = tcp_ts_getticks(); 13925 tp->ts_recent = to->to_tsval; 13926 } 13927 /* 13928 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13929 * is on (half-synchronized state), then queue data for later 13930 * processing; else drop segment and return. 13931 */ 13932 if ((thflags & TH_ACK) == 0) { 13933 if (tp->t_flags & TF_NEEDSYN) { 13934 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13935 tiwin, thflags, nxt_pkt)); 13936 } else if (tp->t_flags & TF_ACKNOW) { 13937 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13938 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13939 return (ret_val); 13940 } else { 13941 ctf_do_drop(m, NULL); 13942 return (0); 13943 } 13944 } 13945 /* 13946 * Ack processing. 13947 */ 13948 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13949 return (ret_val); 13950 } 13951 if (sbavail(&so->so_snd)) { 13952 if (ctf_progress_timeout_check(tp, true)) { 13953 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13954 tp, tick, PROGRESS_DROP, __LINE__); 13955 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13956 return (1); 13957 } 13958 } 13959 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13960 tiwin, thflags, nxt_pkt)); 13961 } 13962 13963 static void inline 13964 rack_clear_rate_sample(struct tcp_rack *rack) 13965 { 13966 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 13967 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 13968 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 13969 } 13970 13971 static void 13972 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 13973 { 13974 uint64_t bw_est, rate_wanted; 13975 int chged = 0; 13976 uint32_t user_max, orig_min, orig_max; 13977 13978 #ifdef TCP_REQUEST_TRK 13979 if (rack->rc_hybrid_mode && 13980 (rack->r_ctl.rc_pace_max_segs != 0) && 13981 (rack_hybrid_allow_set_maxseg == 1) && 13982 (rack->r_ctl.rc_last_sft != NULL)) { 13983 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 13984 return; 13985 } 13986 #endif 13987 orig_min = rack->r_ctl.rc_pace_min_segs; 13988 orig_max = rack->r_ctl.rc_pace_max_segs; 13989 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 13990 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 13991 chged = 1; 13992 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 13993 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 13994 if (user_max != rack->r_ctl.rc_pace_max_segs) 13995 chged = 1; 13996 } 13997 if (rack->rc_force_max_seg) { 13998 rack->r_ctl.rc_pace_max_segs = user_max; 13999 } else if (rack->use_fixed_rate) { 14000 bw_est = rack_get_bw(rack); 14001 if ((rack->r_ctl.crte == NULL) || 14002 (bw_est != rack->r_ctl.crte->rate)) { 14003 rack->r_ctl.rc_pace_max_segs = user_max; 14004 } else { 14005 /* We are pacing right at the hardware rate */ 14006 uint32_t segsiz, pace_one; 14007 14008 if (rack_pace_one_seg || 14009 (rack->r_ctl.rc_user_set_min_segs == 1)) 14010 pace_one = 1; 14011 else 14012 pace_one = 0; 14013 segsiz = min(ctf_fixed_maxseg(tp), 14014 rack->r_ctl.rc_pace_min_segs); 14015 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14016 tp, bw_est, segsiz, pace_one, 14017 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14018 } 14019 } else if (rack->rc_always_pace) { 14020 if (rack->r_ctl.gp_bw || 14021 rack->r_ctl.init_rate) { 14022 /* We have a rate of some sort set */ 14023 uint32_t orig; 14024 14025 bw_est = rack_get_bw(rack); 14026 orig = rack->r_ctl.rc_pace_max_segs; 14027 if (fill_override) 14028 rate_wanted = *fill_override; 14029 else 14030 rate_wanted = rack_get_gp_est(rack); 14031 if (rate_wanted) { 14032 /* We have something */ 14033 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14034 rate_wanted, 14035 ctf_fixed_maxseg(rack->rc_tp)); 14036 } else 14037 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14038 if (orig != rack->r_ctl.rc_pace_max_segs) 14039 chged = 1; 14040 } else if ((rack->r_ctl.gp_bw == 0) && 14041 (rack->r_ctl.rc_pace_max_segs == 0)) { 14042 /* 14043 * If we have nothing limit us to bursting 14044 * out IW sized pieces. 14045 */ 14046 chged = 1; 14047 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14048 } 14049 } 14050 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14051 chged = 1; 14052 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14053 } 14054 if (chged) 14055 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14056 } 14057 14058 14059 static void 14060 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14061 { 14062 #ifdef INET6 14063 struct ip6_hdr *ip6 = NULL; 14064 #endif 14065 #ifdef INET 14066 struct ip *ip = NULL; 14067 #endif 14068 struct udphdr *udp = NULL; 14069 14070 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14071 #ifdef INET6 14072 if (rack->r_is_v6) { 14073 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14074 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14075 if (tp->t_port) { 14076 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14077 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14078 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14079 udp->uh_dport = tp->t_port; 14080 rack->r_ctl.fsb.udp = udp; 14081 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14082 } else 14083 { 14084 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14085 rack->r_ctl.fsb.udp = NULL; 14086 } 14087 tcpip_fillheaders(rack->rc_inp, 14088 tp->t_port, 14089 ip6, rack->r_ctl.fsb.th); 14090 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14091 } else 14092 #endif /* INET6 */ 14093 #ifdef INET 14094 { 14095 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14096 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14097 if (tp->t_port) { 14098 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14099 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14100 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14101 udp->uh_dport = tp->t_port; 14102 rack->r_ctl.fsb.udp = udp; 14103 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14104 } else 14105 { 14106 rack->r_ctl.fsb.udp = NULL; 14107 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14108 } 14109 tcpip_fillheaders(rack->rc_inp, 14110 tp->t_port, 14111 ip, rack->r_ctl.fsb.th); 14112 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14113 } 14114 #endif 14115 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14116 (long)TCP_MAXWIN << tp->rcv_scale); 14117 rack->r_fsb_inited = 1; 14118 } 14119 14120 static int 14121 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14122 { 14123 /* 14124 * Allocate the larger of spaces V6 if available else just 14125 * V4 and include udphdr (overbook) 14126 */ 14127 #ifdef INET6 14128 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14129 #else 14130 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14131 #endif 14132 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14133 M_TCPFSB, M_NOWAIT|M_ZERO); 14134 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14135 return (ENOMEM); 14136 } 14137 rack->r_fsb_inited = 0; 14138 return (0); 14139 } 14140 14141 static void 14142 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14143 { 14144 /* 14145 * Types of logs (mod value) 14146 * 20 - Initial round setup 14147 * 21 - Rack declares a new round. 14148 */ 14149 struct tcpcb *tp; 14150 14151 tp = rack->rc_tp; 14152 if (tcp_bblogging_on(tp)) { 14153 union tcp_log_stackspecific log; 14154 struct timeval tv; 14155 14156 memset(&log, 0, sizeof(log)); 14157 log.u_bbr.flex1 = rack->r_ctl.current_round; 14158 log.u_bbr.flex2 = rack->r_ctl.roundends; 14159 log.u_bbr.flex3 = high_seq; 14160 log.u_bbr.flex4 = tp->snd_max; 14161 log.u_bbr.flex8 = mod; 14162 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14163 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14164 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14165 TCP_LOG_EVENTP(tp, NULL, 14166 &tptosocket(tp)->so_rcv, 14167 &tptosocket(tp)->so_snd, 14168 TCP_HYSTART, 0, 14169 0, &log, false, &tv); 14170 } 14171 } 14172 14173 static void 14174 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14175 { 14176 rack->rack_deferred_inited = 1; 14177 rack->r_ctl.roundends = tp->snd_max; 14178 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14179 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14180 } 14181 14182 static void 14183 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14184 { 14185 /* Retransmit bit controls. 14186 * 14187 * The setting of these values control one of 14188 * three settings you can have and dictate 14189 * how rack does retransmissions. Note this 14190 * is in *any* mode i.e. pacing on or off DGP 14191 * fixed rate pacing, or just bursting rack. 14192 * 14193 * 1 - Use full sized retransmits i.e. limit 14194 * the size to whatever the pace_max_segments 14195 * size is. 14196 * 14197 * 2 - Use pacer min granularity as a guide to 14198 * the size combined with the current calculated 14199 * goodput b/w measurement. So for example if 14200 * the goodput is measured at 20Mbps we would 14201 * calculate 8125 (pacer minimum 250usec in 14202 * that b/w) and then round it up to the next 14203 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14204 * 14205 * 0 - The rack default 1 MSS (anything not 0/1/2 14206 * fall here too if we are setting via rack_init()). 14207 * 14208 */ 14209 if (ctl == 1) { 14210 rack->full_size_rxt = 1; 14211 rack->shape_rxt_to_pacing_min = 0; 14212 } else if (ctl == 2) { 14213 rack->full_size_rxt = 0; 14214 rack->shape_rxt_to_pacing_min = 1; 14215 } else { 14216 rack->full_size_rxt = 0; 14217 rack->shape_rxt_to_pacing_min = 0; 14218 } 14219 } 14220 14221 static void 14222 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14223 uint32_t flex1, 14224 uint32_t flex2, 14225 uint32_t flex3) 14226 { 14227 if (tcp_bblogging_on(rack->rc_tp)) { 14228 union tcp_log_stackspecific log; 14229 struct timeval tv; 14230 14231 memset(&log, 0, sizeof(log)); 14232 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14233 log.u_bbr.flex8 = mod; 14234 log.u_bbr.flex1 = flex1; 14235 log.u_bbr.flex2 = flex2; 14236 log.u_bbr.flex3 = flex3; 14237 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14238 0, &log, false, NULL, __func__, __LINE__, &tv); 14239 } 14240 } 14241 14242 static int 14243 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14244 { 14245 struct tcp_rack *rack; 14246 struct rack_sendmap *rsm; 14247 int i; 14248 14249 14250 rack = (struct tcp_rack *)tp->t_fb_ptr; 14251 switch (reqr->req) { 14252 case TCP_QUERY_SENDMAP: 14253 if ((reqr->req_param == tp->snd_max) || 14254 (tp->snd_max == tp->snd_una)){ 14255 /* Unlikely */ 14256 return (0); 14257 } 14258 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14259 if (rsm == NULL) { 14260 /* Can't find that seq -- unlikely */ 14261 return (0); 14262 } 14263 reqr->sendmap_start = rsm->r_start; 14264 reqr->sendmap_end = rsm->r_end; 14265 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14266 reqr->sendmap_fas = rsm->r_fas; 14267 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14268 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14269 for(i=0; i<reqr->sendmap_send_cnt; i++) 14270 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14271 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14272 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14273 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14274 reqr->sendmap_dupacks = rsm->r_dupack; 14275 rack_log_chg_info(tp, rack, 1, 14276 rsm->r_start, 14277 rsm->r_end, 14278 rsm->r_flags); 14279 return(1); 14280 break; 14281 case TCP_QUERY_TIMERS_UP: 14282 if (rack->r_ctl.rc_hpts_flags == 0) { 14283 /* no timers up */ 14284 return (0); 14285 } 14286 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14287 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14288 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14289 } 14290 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14291 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 14292 } 14293 rack_log_chg_info(tp, rack, 2, 14294 rack->r_ctl.rc_hpts_flags, 14295 rack->r_ctl.rc_last_output_to, 14296 rack->r_ctl.rc_timer_exp); 14297 return (1); 14298 break; 14299 case TCP_QUERY_RACK_TIMES: 14300 /* Reordering items */ 14301 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 14302 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 14303 /* Timerstamps and timers */ 14304 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 14305 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 14306 reqr->rack_rtt = rack->rc_rack_rtt; 14307 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 14308 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 14309 /* PRR data */ 14310 reqr->rack_sacked = rack->r_ctl.rc_sacked; 14311 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 14312 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 14313 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 14314 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 14315 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 14316 /* TLP and persists info */ 14317 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 14318 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 14319 if (rack->rc_in_persist) { 14320 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 14321 reqr->rack_in_persist = 1; 14322 } else { 14323 reqr->rack_time_went_idle = 0; 14324 reqr->rack_in_persist = 0; 14325 } 14326 if (rack->r_wanted_output) 14327 reqr->rack_wanted_output = 1; 14328 else 14329 reqr->rack_wanted_output = 0; 14330 return (1); 14331 break; 14332 default: 14333 return (-EINVAL); 14334 } 14335 } 14336 14337 static void 14338 rack_switch_failed(struct tcpcb *tp) 14339 { 14340 /* 14341 * This method gets called if a stack switch was 14342 * attempted and it failed. We are left 14343 * but our hpts timers were stopped and we 14344 * need to validate time units and t_flags2. 14345 */ 14346 struct tcp_rack *rack; 14347 struct timeval tv; 14348 uint32_t cts; 14349 uint32_t toval; 14350 struct hpts_diag diag; 14351 14352 rack = (struct tcp_rack *)tp->t_fb_ptr; 14353 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 14354 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14355 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14356 else 14357 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14358 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14359 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14360 if (tp->t_in_hpts > IHPTS_NONE) { 14361 /* Strange */ 14362 return; 14363 } 14364 cts = tcp_get_usecs(&tv); 14365 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14366 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 14367 toval = rack->r_ctl.rc_last_output_to - cts; 14368 } else { 14369 /* one slot please */ 14370 toval = HPTS_TICKS_PER_SLOT; 14371 } 14372 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14373 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 14374 toval = rack->r_ctl.rc_timer_exp - cts; 14375 } else { 14376 /* one slot please */ 14377 toval = HPTS_TICKS_PER_SLOT; 14378 } 14379 } else 14380 toval = HPTS_TICKS_PER_SLOT; 14381 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 14382 __LINE__, &diag); 14383 rack_log_hpts_diag(rack, cts, &diag, &tv); 14384 } 14385 14386 static int 14387 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 14388 { 14389 struct rack_sendmap *rsm, *ersm; 14390 int insret __diagused; 14391 /* 14392 * When initing outstanding, we must be quite careful 14393 * to not refer to tp->t_fb_ptr. This has the old rack 14394 * pointer in it, not the "new" one (when we are doing 14395 * a stack switch). 14396 */ 14397 14398 14399 if (tp->t_fb->tfb_chg_query == NULL) { 14400 /* Create a send map for the current outstanding data */ 14401 14402 rsm = rack_alloc(rack); 14403 if (rsm == NULL) { 14404 uma_zfree(rack_pcb_zone, ptr); 14405 return (ENOMEM); 14406 } 14407 rsm->r_no_rtt_allowed = 1; 14408 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 14409 rsm->r_rtr_cnt = 1; 14410 rsm->r_rtr_bytes = 0; 14411 if (tp->t_flags & TF_SENTFIN) 14412 rsm->r_flags |= RACK_HAS_FIN; 14413 rsm->r_end = tp->snd_max; 14414 if (tp->snd_una == tp->iss) { 14415 /* The data space is one beyond snd_una */ 14416 rsm->r_flags |= RACK_HAS_SYN; 14417 rsm->r_start = tp->iss; 14418 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 14419 } else 14420 rsm->r_start = tp->snd_una; 14421 rsm->r_dupack = 0; 14422 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 14423 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 14424 if (rsm->m) { 14425 rsm->orig_m_len = rsm->m->m_len; 14426 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14427 } else { 14428 rsm->orig_m_len = 0; 14429 rsm->orig_t_space = 0; 14430 } 14431 } else { 14432 /* 14433 * This can happen if we have a stand-alone FIN or 14434 * SYN. 14435 */ 14436 rsm->m = NULL; 14437 rsm->orig_m_len = 0; 14438 rsm->orig_t_space = 0; 14439 rsm->soff = 0; 14440 } 14441 #ifdef INVARIANTS 14442 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14443 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14444 insret, rack, rsm); 14445 } 14446 #else 14447 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14448 #endif 14449 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14450 rsm->r_in_tmap = 1; 14451 } else { 14452 /* We have a query mechanism, lets use it */ 14453 struct tcp_query_resp qr; 14454 int i; 14455 tcp_seq at; 14456 14457 at = tp->snd_una; 14458 while (at != tp->snd_max) { 14459 memset(&qr, 0, sizeof(qr)); 14460 qr.req = TCP_QUERY_SENDMAP; 14461 qr.req_param = at; 14462 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 14463 break; 14464 /* Move forward */ 14465 at = qr.sendmap_end; 14466 /* Now lets build the entry for this one */ 14467 rsm = rack_alloc(rack); 14468 if (rsm == NULL) { 14469 uma_zfree(rack_pcb_zone, ptr); 14470 return (ENOMEM); 14471 } 14472 memset(rsm, 0, sizeof(struct rack_sendmap)); 14473 /* Now configure the rsm and insert it */ 14474 rsm->r_dupack = qr.sendmap_dupacks; 14475 rsm->r_start = qr.sendmap_start; 14476 rsm->r_end = qr.sendmap_end; 14477 if (qr.sendmap_fas) 14478 rsm->r_fas = qr.sendmap_end; 14479 else 14480 rsm->r_fas = rsm->r_start - tp->snd_una; 14481 /* 14482 * We have carefully aligned the bits 14483 * so that all we have to do is copy over 14484 * the bits with the mask. 14485 */ 14486 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 14487 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 14488 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 14489 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 14490 for (i=0 ; i<rsm->r_rtr_cnt; i++) 14491 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 14492 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 14493 (rsm->r_start - tp->snd_una), &rsm->soff); 14494 if (rsm->m) { 14495 rsm->orig_m_len = rsm->m->m_len; 14496 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14497 } else { 14498 rsm->orig_m_len = 0; 14499 rsm->orig_t_space = 0; 14500 } 14501 #ifdef INVARIANTS 14502 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14503 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14504 insret, rack, rsm); 14505 } 14506 #else 14507 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14508 #endif 14509 if ((rsm->r_flags & RACK_ACKED) == 0) { 14510 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 14511 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 14512 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 14513 /* 14514 * If the existing ersm was sent at 14515 * a later time than the new one, then 14516 * the new one should appear ahead of this 14517 * ersm. 14518 */ 14519 rsm->r_in_tmap = 1; 14520 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 14521 break; 14522 } 14523 } 14524 if (rsm->r_in_tmap == 0) { 14525 /* 14526 * Not found so shove it on the tail. 14527 */ 14528 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14529 rsm->r_in_tmap = 1; 14530 } 14531 } else { 14532 if ((rack->r_ctl.rc_sacklast == NULL) || 14533 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 14534 rack->r_ctl.rc_sacklast = rsm; 14535 } 14536 } 14537 rack_log_chg_info(tp, rack, 3, 14538 rsm->r_start, 14539 rsm->r_end, 14540 rsm->r_flags); 14541 } 14542 } 14543 return (0); 14544 } 14545 14546 14547 static int32_t 14548 rack_init(struct tcpcb *tp, void **ptr) 14549 { 14550 struct inpcb *inp = tptoinpcb(tp); 14551 struct tcp_rack *rack = NULL; 14552 uint32_t iwin, snt, us_cts; 14553 size_t sz; 14554 int err, no_query; 14555 14556 tcp_hpts_init(tp); 14557 14558 /* 14559 * First are we the initial or are we a switched stack? 14560 * If we are initing via tcp_newtcppcb the ptr passed 14561 * will be tp->t_fb_ptr. If its a stack switch that 14562 * has a previous stack we can query it will be a local 14563 * var that will in the end be set into t_fb_ptr. 14564 */ 14565 if (ptr == &tp->t_fb_ptr) 14566 no_query = 1; 14567 else 14568 no_query = 0; 14569 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 14570 if (*ptr == NULL) { 14571 /* 14572 * We need to allocate memory but cant. The INP and INP_INFO 14573 * locks and they are recursive (happens during setup. So a 14574 * scheme to drop the locks fails :( 14575 * 14576 */ 14577 return(ENOMEM); 14578 } 14579 memset(*ptr, 0, sizeof(struct tcp_rack)); 14580 rack = (struct tcp_rack *)*ptr; 14581 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 14582 if (rack->r_ctl.tqh == NULL) { 14583 uma_zfree(rack_pcb_zone, rack); 14584 return(ENOMEM); 14585 } 14586 tqhash_init(rack->r_ctl.tqh); 14587 TAILQ_INIT(&rack->r_ctl.rc_free); 14588 TAILQ_INIT(&rack->r_ctl.rc_tmap); 14589 rack->rc_tp = tp; 14590 rack->rc_inp = inp; 14591 /* Set the flag */ 14592 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 14593 /* Probably not needed but lets be sure */ 14594 rack_clear_rate_sample(rack); 14595 /* 14596 * Save off the default values, socket options will poke 14597 * at these if pacing is not on or we have not yet 14598 * reached where pacing is on (gp_ready/fixed enabled). 14599 * When they get set into the CC module (when gp_ready 14600 * is enabled or we enable fixed) then we will set these 14601 * values into the CC and place in here the old values 14602 * so we have a restoral. Then we will set the flag 14603 * rc_pacing_cc_set. That way whenever we turn off pacing 14604 * or switch off this stack, we will know to go restore 14605 * the saved values. 14606 * 14607 * We specifically put into the beta the ecn value for pacing. 14608 */ 14609 rack->rc_new_rnd_needed = 1; 14610 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 14611 /* We want abe like behavior as well */ 14612 14613 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 14614 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 14615 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 14616 if (rack_fill_cw_state) 14617 rack->rc_pace_to_cwnd = 1; 14618 if (rack_pacing_min_seg) 14619 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 14620 if (use_rack_rr) 14621 rack->use_rack_rr = 1; 14622 if (rack_dnd_default) { 14623 rack->rc_pace_dnd = 1; 14624 } 14625 if (V_tcp_delack_enabled) 14626 tp->t_delayed_ack = 1; 14627 else 14628 tp->t_delayed_ack = 0; 14629 #ifdef TCP_ACCOUNTING 14630 if (rack_tcp_accounting) { 14631 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 14632 } 14633 #endif 14634 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 14635 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 14636 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 14637 if (rack->r_ctl.pcm_s == NULL) { 14638 rack->r_ctl.pcm_i.cnt_alloc = 0; 14639 } 14640 #ifdef NETFLIX_STATS 14641 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; 14642 #endif 14643 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 14644 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 14645 if (rack_enable_shared_cwnd) 14646 rack->rack_enable_scwnd = 1; 14647 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 14648 rack->rc_user_set_max_segs = rack_hptsi_segments; 14649 rack->r_ctl.max_reduction = rack_max_reduce; 14650 rack->rc_force_max_seg = 0; 14651 TAILQ_INIT(&rack->r_ctl.opt_list); 14652 rack->r_ctl.rc_saved_beta = V_newreno_beta_ecn; 14653 rack->r_ctl.rc_saved_beta_ecn = V_newreno_beta_ecn; 14654 if (rack_hibeta_setting) { 14655 rack->rack_hibeta = 1; 14656 if ((rack_hibeta_setting >= 50) && 14657 (rack_hibeta_setting <= 100)) { 14658 rack->r_ctl.rc_saved_beta = rack_hibeta_setting; 14659 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 14660 } 14661 } else { 14662 rack->r_ctl.saved_hibeta = 50; 14663 } 14664 /* 14665 * We initialize to all ones so we never match 0 14666 * just in case the client sends in 0, it hopefully 14667 * will never have all 1's in ms :-) 14668 */ 14669 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 14670 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 14671 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 14672 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 14673 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 14674 rack->r_ctl.rc_highest_us_rtt = 0; 14675 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 14676 rack->pcm_enabled = rack_pcm_is_enabled; 14677 if (rack_fillcw_bw_cap) 14678 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 14679 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 14680 if (rack_use_cmp_acks) 14681 rack->r_use_cmp_ack = 1; 14682 if (rack_disable_prr) 14683 rack->rack_no_prr = 1; 14684 if (rack_gp_no_rec_chg) 14685 rack->rc_gp_no_rec_chg = 1; 14686 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 14687 rack->r_ctl.pacing_method |= RACK_REG_PACING; 14688 rack->rc_always_pace = 1; 14689 if (rack->rack_hibeta) 14690 rack_set_cc_pacing(rack); 14691 } else 14692 rack->rc_always_pace = 0; 14693 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 14694 rack->r_mbuf_queue = 1; 14695 else 14696 rack->r_mbuf_queue = 0; 14697 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14698 if (rack_limits_scwnd) 14699 rack->r_limit_scw = 1; 14700 else 14701 rack->r_limit_scw = 0; 14702 rack_init_retransmit_value(rack, rack_rxt_controls); 14703 rack->rc_labc = V_tcp_abc_l_var; 14704 if (rack_honors_hpts_min_to) 14705 rack->r_use_hpts_min = 1; 14706 if (tp->snd_una != 0) { 14707 rack->rc_sendvars_notset = 0; 14708 /* 14709 * Make sure any TCP timers are not running. 14710 */ 14711 tcp_timer_stop(tp); 14712 } else { 14713 /* 14714 * Server side, we are called from the 14715 * syn-cache. This means none of the 14716 * snd_una/max are set yet so we have 14717 * to defer this until the first send. 14718 */ 14719 rack->rc_sendvars_notset = 1; 14720 } 14721 14722 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 14723 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 14724 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 14725 rack->r_ctl.rc_min_to = rack_min_to; 14726 microuptime(&rack->r_ctl.act_rcv_time); 14727 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 14728 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 14729 if (rack_hw_up_only) 14730 rack->r_up_only = 1; 14731 if (rack_do_dyn_mul) { 14732 /* When dynamic adjustment is on CA needs to start at 100% */ 14733 rack->rc_gp_dyn_mul = 1; 14734 if (rack_do_dyn_mul >= 100) 14735 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 14736 } else 14737 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 14738 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 14739 if (rack_timely_off) { 14740 rack->rc_skip_timely = 1; 14741 } 14742 if (rack->rc_skip_timely) { 14743 rack->r_ctl.rack_per_of_gp_rec = 90; 14744 rack->r_ctl.rack_per_of_gp_ca = 100; 14745 rack->r_ctl.rack_per_of_gp_ss = 250; 14746 } 14747 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 14748 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14749 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14750 14751 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 14752 rack_probertt_filter_life); 14753 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14754 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 14755 rack->r_ctl.rc_time_of_last_probertt = us_cts; 14756 rack->r_ctl.rc_went_idle_time = us_cts; 14757 rack->r_ctl.rc_time_probertt_starts = 0; 14758 14759 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 14760 if (rack_rnd_cnt_req & 0x10000) 14761 rack->r_ctl.gate_to_fs = 1; 14762 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 14763 if ((rack_rnd_cnt_req & 0x100) > 0) { 14764 14765 } 14766 if (rack_dsack_std_based & 0x1) { 14767 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 14768 rack->rc_rack_tmr_std_based = 1; 14769 } 14770 if (rack_dsack_std_based & 0x2) { 14771 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 14772 rack->rc_rack_use_dsack = 1; 14773 } 14774 /* We require at least one measurement, even if the sysctl is 0 */ 14775 if (rack_req_measurements) 14776 rack->r_ctl.req_measurements = rack_req_measurements; 14777 else 14778 rack->r_ctl.req_measurements = 1; 14779 if (rack_enable_hw_pacing) 14780 rack->rack_hdw_pace_ena = 1; 14781 if (rack_hw_rate_caps) 14782 rack->r_rack_hw_rate_caps = 1; 14783 if (rack_non_rxt_use_cr) 14784 rack->rack_rec_nonrxt_use_cr = 1; 14785 /* Lets setup the fsb block */ 14786 err = rack_init_fsb(tp, rack); 14787 if (err) { 14788 uma_zfree(rack_pcb_zone, *ptr); 14789 *ptr = NULL; 14790 return (err); 14791 } 14792 if (rack_do_hystart) { 14793 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 14794 if (rack_do_hystart > 1) 14795 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 14796 if (rack_do_hystart > 2) 14797 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 14798 } 14799 /* Log what we will do with queries */ 14800 rack_log_chg_info(tp, rack, 7, 14801 no_query, 0, 0); 14802 if (rack_def_profile) 14803 rack_set_profile(rack, rack_def_profile); 14804 /* Cancel the GP measurement in progress */ 14805 tp->t_flags &= ~TF_GPUTINPROG; 14806 if ((tp->t_state != TCPS_CLOSED) && 14807 (tp->t_state != TCPS_TIME_WAIT)) { 14808 /* 14809 * We are already open, we may 14810 * need to adjust a few things. 14811 */ 14812 if (SEQ_GT(tp->snd_max, tp->iss)) 14813 snt = tp->snd_max - tp->iss; 14814 else 14815 snt = 0; 14816 iwin = rc_init_window(rack); 14817 if ((snt < iwin) && 14818 (no_query == 1)) { 14819 /* We are not past the initial window 14820 * on the first init (i.e. a stack switch 14821 * has not yet occured) so we need to make 14822 * sure cwnd and ssthresh is correct. 14823 */ 14824 if (tp->snd_cwnd < iwin) 14825 tp->snd_cwnd = iwin; 14826 /* 14827 * If we are within the initial window 14828 * we want ssthresh to be unlimited. Setting 14829 * it to the rwnd (which the default stack does 14830 * and older racks) is not really a good idea 14831 * since we want to be in SS and grow both the 14832 * cwnd and the rwnd (via dynamic rwnd growth). If 14833 * we set it to the rwnd then as the peer grows its 14834 * rwnd we will be stuck in CA and never hit SS. 14835 * 14836 * Its far better to raise it up high (this takes the 14837 * risk that there as been a loss already, probably 14838 * we should have an indicator in all stacks of loss 14839 * but we don't), but considering the normal use this 14840 * is a risk worth taking. The consequences of not 14841 * hitting SS are far worse than going one more time 14842 * into it early on (before we have sent even a IW). 14843 * It is highly unlikely that we will have had a loss 14844 * before getting the IW out. 14845 */ 14846 tp->snd_ssthresh = 0xffffffff; 14847 } 14848 /* 14849 * Any init based on sequence numbers 14850 * should be done in the deferred init path 14851 * since we can be CLOSED and not have them 14852 * inited when rack_init() is called. We 14853 * are not closed so lets call it. 14854 */ 14855 rack_deferred_init(tp, rack); 14856 } 14857 if ((tp->t_state != TCPS_CLOSED) && 14858 (tp->t_state != TCPS_TIME_WAIT) && 14859 (no_query == 0) && 14860 (tp->snd_una != tp->snd_max)) { 14861 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 14862 if (err) { 14863 *ptr = NULL; 14864 return(err); 14865 } 14866 } 14867 rack_stop_all_timers(tp, rack); 14868 /* Setup all the t_flags2 */ 14869 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14870 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14871 else 14872 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14873 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14874 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14875 /* 14876 * Timers in Rack are kept in microseconds so lets 14877 * convert any initial incoming variables 14878 * from ticks into usecs. Note that we 14879 * also change the values of t_srtt and t_rttvar, if 14880 * they are non-zero. They are kept with a 5 14881 * bit decimal so we have to carefully convert 14882 * these to get the full precision. 14883 */ 14884 rack_convert_rtts(tp); 14885 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 14886 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 14887 /* We do not start any timers on DROPPED connections */ 14888 if (tp->t_fb->tfb_chg_query == NULL) { 14889 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14890 } else { 14891 struct tcp_query_resp qr; 14892 int ret; 14893 14894 memset(&qr, 0, sizeof(qr)); 14895 14896 /* Get the misc time stamps and such for rack */ 14897 qr.req = TCP_QUERY_RACK_TIMES; 14898 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14899 if (ret == 1) { 14900 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 14901 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 14902 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 14903 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 14904 rack->rc_rack_rtt = qr.rack_rtt; 14905 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 14906 rack->r_ctl.rc_sacked = qr.rack_sacked; 14907 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 14908 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 14909 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 14910 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 14911 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 14912 if (qr.rack_tlp_out) { 14913 rack->rc_tlp_in_progress = 1; 14914 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 14915 } else { 14916 rack->rc_tlp_in_progress = 0; 14917 rack->r_ctl.rc_tlp_cnt_out = 0; 14918 } 14919 if (qr.rack_srtt_measured) 14920 rack->rc_srtt_measure_made = 1; 14921 if (qr.rack_in_persist == 1) { 14922 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 14923 #ifdef NETFLIX_SHARED_CWND 14924 if (rack->r_ctl.rc_scw) { 14925 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 14926 rack->rack_scwnd_is_idle = 1; 14927 } 14928 #endif 14929 rack->r_ctl.persist_lost_ends = 0; 14930 rack->probe_not_answered = 0; 14931 rack->forced_ack = 0; 14932 tp->t_rxtshift = 0; 14933 rack->rc_in_persist = 1; 14934 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 14935 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 14936 } 14937 if (qr.rack_wanted_output) 14938 rack->r_wanted_output = 1; 14939 rack_log_chg_info(tp, rack, 6, 14940 qr.rack_min_rtt, 14941 qr.rack_rtt, 14942 qr.rack_reorder_ts); 14943 } 14944 /* Get the old stack timers */ 14945 qr.req_param = 0; 14946 qr.req = TCP_QUERY_TIMERS_UP; 14947 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14948 if (ret) { 14949 /* 14950 * non-zero return means we have a timer('s) 14951 * to start. Zero means no timer (no keepalive 14952 * I suppose). 14953 */ 14954 uint32_t tov = 0; 14955 14956 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 14957 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 14958 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 14959 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 14960 tov = qr.timer_pacing_to - us_cts; 14961 else 14962 tov = HPTS_TICKS_PER_SLOT; 14963 } 14964 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 14965 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 14966 if (tov == 0) { 14967 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 14968 tov = qr.timer_timer_exp - us_cts; 14969 else 14970 tov = HPTS_TICKS_PER_SLOT; 14971 } 14972 } 14973 rack_log_chg_info(tp, rack, 4, 14974 rack->r_ctl.rc_hpts_flags, 14975 rack->r_ctl.rc_last_output_to, 14976 rack->r_ctl.rc_timer_exp); 14977 if (tov) { 14978 struct hpts_diag diag; 14979 14980 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 14981 __LINE__, &diag); 14982 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 14983 } 14984 } 14985 } 14986 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 14987 __LINE__, RACK_RTTS_INIT); 14988 } 14989 return (0); 14990 } 14991 14992 static int 14993 rack_handoff_ok(struct tcpcb *tp) 14994 { 14995 if ((tp->t_state == TCPS_CLOSED) || 14996 (tp->t_state == TCPS_LISTEN)) { 14997 /* Sure no problem though it may not stick */ 14998 return (0); 14999 } 15000 if ((tp->t_state == TCPS_SYN_SENT) || 15001 (tp->t_state == TCPS_SYN_RECEIVED)) { 15002 /* 15003 * We really don't know if you support sack, 15004 * you have to get to ESTAB or beyond to tell. 15005 */ 15006 return (EAGAIN); 15007 } 15008 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15009 /* 15010 * Rack will only send a FIN after all data is acknowledged. 15011 * So in this case we have more data outstanding. We can't 15012 * switch stacks until either all data and only the FIN 15013 * is left (in which case rack_init() now knows how 15014 * to deal with that) <or> all is acknowledged and we 15015 * are only left with incoming data, though why you 15016 * would want to switch to rack after all data is acknowledged 15017 * I have no idea (rrs)! 15018 */ 15019 return (EAGAIN); 15020 } 15021 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15022 return (0); 15023 } 15024 /* 15025 * If we reach here we don't do SACK on this connection so we can 15026 * never do rack. 15027 */ 15028 return (EINVAL); 15029 } 15030 15031 static void 15032 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15033 { 15034 15035 if (tp->t_fb_ptr) { 15036 uint32_t cnt_free = 0; 15037 struct tcp_rack *rack; 15038 struct rack_sendmap *rsm; 15039 15040 tcp_handle_orphaned_packets(tp); 15041 tp->t_flags &= ~TF_FORCEDATA; 15042 rack = (struct tcp_rack *)tp->t_fb_ptr; 15043 rack_log_pacing_delay_calc(rack, 15044 0, 15045 0, 15046 0, 15047 rack_get_gp_est(rack), /* delRate */ 15048 rack_get_lt_bw(rack), /* rttProp */ 15049 20, __LINE__, NULL, 0); 15050 #ifdef NETFLIX_SHARED_CWND 15051 if (rack->r_ctl.rc_scw) { 15052 uint32_t limit; 15053 15054 if (rack->r_limit_scw) 15055 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15056 else 15057 limit = 0; 15058 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15059 rack->r_ctl.rc_scw_index, 15060 limit); 15061 rack->r_ctl.rc_scw = NULL; 15062 } 15063 #endif 15064 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15065 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15066 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15067 rack->r_ctl.fsb.th = NULL; 15068 } 15069 if (rack->rc_always_pace == 1) { 15070 rack_remove_pacing(rack); 15071 } 15072 /* Clean up any options if they were not applied */ 15073 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15074 struct deferred_opt_list *dol; 15075 15076 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15077 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15078 free(dol, M_TCPDO); 15079 } 15080 /* rack does not use force data but other stacks may clear it */ 15081 if (rack->r_ctl.crte != NULL) { 15082 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15083 rack->rack_hdrw_pacing = 0; 15084 rack->r_ctl.crte = NULL; 15085 } 15086 #ifdef TCP_BLACKBOX 15087 tcp_log_flowend(tp); 15088 #endif 15089 /* 15090 * Lets take a different approach to purging just 15091 * get each one and free it like a cum-ack would and 15092 * not use a foreach loop. 15093 */ 15094 rsm = tqhash_min(rack->r_ctl.tqh); 15095 while (rsm) { 15096 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15097 rack->r_ctl.rc_num_maps_alloced--; 15098 uma_zfree(rack_zone, rsm); 15099 rsm = tqhash_min(rack->r_ctl.tqh); 15100 } 15101 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15102 while (rsm) { 15103 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15104 rack->r_ctl.rc_num_maps_alloced--; 15105 rack->rc_free_cnt--; 15106 cnt_free++; 15107 uma_zfree(rack_zone, rsm); 15108 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15109 } 15110 if (rack->r_ctl.pcm_s != NULL) { 15111 free(rack->r_ctl.pcm_s, M_TCPPCM); 15112 rack->r_ctl.pcm_s = NULL; 15113 rack->r_ctl.pcm_i.cnt_alloc = 0; 15114 rack->r_ctl.pcm_i.cnt = 0; 15115 } 15116 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15117 (tcp_bblogging_on(tp))) { 15118 union tcp_log_stackspecific log; 15119 struct timeval tv; 15120 15121 memset(&log, 0, sizeof(log)); 15122 log.u_bbr.flex8 = 10; 15123 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15124 log.u_bbr.flex2 = rack->rc_free_cnt; 15125 log.u_bbr.flex3 = cnt_free; 15126 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15127 rsm = tqhash_min(rack->r_ctl.tqh); 15128 log.u_bbr.delRate = (uintptr_t)rsm; 15129 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15130 log.u_bbr.cur_del_rate = (uintptr_t)rsm; 15131 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15132 log.u_bbr.pkt_epoch = __LINE__; 15133 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15134 0, &log, false, NULL, NULL, 0, &tv); 15135 } 15136 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15137 ("rack:%p num_aloc:%u after freeing all?", 15138 rack, 15139 rack->r_ctl.rc_num_maps_alloced)); 15140 rack->rc_free_cnt = 0; 15141 free(rack->r_ctl.tqh, M_TCPFSB); 15142 rack->r_ctl.tqh = NULL; 15143 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15144 tp->t_fb_ptr = NULL; 15145 } 15146 /* Make sure snd_nxt is correctly set */ 15147 tp->snd_nxt = tp->snd_max; 15148 } 15149 15150 static void 15151 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15152 { 15153 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15154 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15155 } 15156 switch (tp->t_state) { 15157 case TCPS_SYN_SENT: 15158 rack->r_state = TCPS_SYN_SENT; 15159 rack->r_substate = rack_do_syn_sent; 15160 break; 15161 case TCPS_SYN_RECEIVED: 15162 rack->r_state = TCPS_SYN_RECEIVED; 15163 rack->r_substate = rack_do_syn_recv; 15164 break; 15165 case TCPS_ESTABLISHED: 15166 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15167 rack->r_state = TCPS_ESTABLISHED; 15168 rack->r_substate = rack_do_established; 15169 break; 15170 case TCPS_CLOSE_WAIT: 15171 rack->r_state = TCPS_CLOSE_WAIT; 15172 rack->r_substate = rack_do_close_wait; 15173 break; 15174 case TCPS_FIN_WAIT_1: 15175 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15176 rack->r_state = TCPS_FIN_WAIT_1; 15177 rack->r_substate = rack_do_fin_wait_1; 15178 break; 15179 case TCPS_CLOSING: 15180 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15181 rack->r_state = TCPS_CLOSING; 15182 rack->r_substate = rack_do_closing; 15183 break; 15184 case TCPS_LAST_ACK: 15185 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15186 rack->r_state = TCPS_LAST_ACK; 15187 rack->r_substate = rack_do_lastack; 15188 break; 15189 case TCPS_FIN_WAIT_2: 15190 rack->r_state = TCPS_FIN_WAIT_2; 15191 rack->r_substate = rack_do_fin_wait_2; 15192 break; 15193 case TCPS_LISTEN: 15194 case TCPS_CLOSED: 15195 case TCPS_TIME_WAIT: 15196 default: 15197 break; 15198 }; 15199 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15200 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15201 15202 } 15203 15204 static void 15205 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15206 { 15207 /* 15208 * We received an ack, and then did not 15209 * call send or were bounced out due to the 15210 * hpts was running. Now a timer is up as well, is 15211 * it the right timer? 15212 */ 15213 struct rack_sendmap *rsm; 15214 int tmr_up; 15215 15216 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15217 if (tcp_in_hpts(rack->rc_tp) == 0) { 15218 /* 15219 * Ok we probably need some timer up, but no 15220 * matter what the mask we are not in hpts. We 15221 * may have received an old ack and thus did nothing. 15222 */ 15223 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15224 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15225 return; 15226 } 15227 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15228 return; 15229 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15230 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15231 (tmr_up == PACE_TMR_RXT)) { 15232 /* Should be an RXT */ 15233 return; 15234 } 15235 if (rsm == NULL) { 15236 /* Nothing outstanding? */ 15237 if (tp->t_flags & TF_DELACK) { 15238 if (tmr_up == PACE_TMR_DELACK) 15239 /* We are supposed to have delayed ack up and we do */ 15240 return; 15241 } else if (((V_tcp_always_keepalive || 15242 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 15243 (tp->t_state <= TCPS_CLOSING)) && 15244 (tmr_up == PACE_TMR_KEEP) && 15245 (tp->snd_max == tp->snd_una)) { 15246 /* We should have keep alive up and we do */ 15247 return; 15248 } 15249 } 15250 if (SEQ_GT(tp->snd_max, tp->snd_una) && 15251 ((tmr_up == PACE_TMR_TLP) || 15252 (tmr_up == PACE_TMR_RACK) || 15253 (tmr_up == PACE_TMR_RXT))) { 15254 /* 15255 * Either a Rack, TLP or RXT is fine if we 15256 * have outstanding data. 15257 */ 15258 return; 15259 } else if (tmr_up == PACE_TMR_DELACK) { 15260 /* 15261 * If the delayed ack was going to go off 15262 * before the rtx/tlp/rack timer were going to 15263 * expire, then that would be the timer in control. 15264 * Note we don't check the time here trusting the 15265 * code is correct. 15266 */ 15267 return; 15268 } 15269 /* 15270 * Ok the timer originally started is not what we want now. 15271 * We will force the hpts to be stopped if any, and restart 15272 * with the slot set to what was in the saved slot. 15273 */ 15274 if (tcp_in_hpts(rack->rc_tp)) { 15275 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15276 uint32_t us_cts; 15277 15278 us_cts = tcp_get_usecs(NULL); 15279 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 15280 rack->r_early = 1; 15281 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 15282 } 15283 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 15284 } 15285 tcp_hpts_remove(rack->rc_tp); 15286 } 15287 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15288 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15289 } 15290 15291 15292 static void 15293 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 15294 { 15295 if ((SEQ_LT(tp->snd_wl1, seq) || 15296 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 15297 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 15298 /* keep track of pure window updates */ 15299 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 15300 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 15301 tp->snd_wnd = tiwin; 15302 rack_validate_fo_sendwin_up(tp, rack); 15303 tp->snd_wl1 = seq; 15304 tp->snd_wl2 = ack; 15305 if (tp->snd_wnd > tp->max_sndwnd) 15306 tp->max_sndwnd = tp->snd_wnd; 15307 rack->r_wanted_output = 1; 15308 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 15309 tp->snd_wnd = tiwin; 15310 rack_validate_fo_sendwin_up(tp, rack); 15311 tp->snd_wl1 = seq; 15312 tp->snd_wl2 = ack; 15313 } else { 15314 /* Not a valid win update */ 15315 return; 15316 } 15317 if (tp->snd_wnd > tp->max_sndwnd) 15318 tp->max_sndwnd = tp->snd_wnd; 15319 /* Do we exit persists? */ 15320 if ((rack->rc_in_persist != 0) && 15321 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 15322 rack->r_ctl.rc_pace_min_segs))) { 15323 rack_exit_persist(tp, rack, cts); 15324 } 15325 /* Do we enter persists? */ 15326 if ((rack->rc_in_persist == 0) && 15327 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 15328 TCPS_HAVEESTABLISHED(tp->t_state) && 15329 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 15330 sbavail(&tptosocket(tp)->so_snd) && 15331 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 15332 /* 15333 * Here the rwnd is less than 15334 * the pacing size, we are established, 15335 * nothing is outstanding, and there is 15336 * data to send. Enter persists. 15337 */ 15338 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 15339 } 15340 } 15341 15342 static void 15343 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 15344 { 15345 15346 if (tcp_bblogging_on(rack->rc_tp)) { 15347 struct inpcb *inp = tptoinpcb(tp); 15348 union tcp_log_stackspecific log; 15349 struct timeval ltv; 15350 char tcp_hdr_buf[60]; 15351 struct tcphdr *th; 15352 struct timespec ts; 15353 uint32_t orig_snd_una; 15354 uint8_t xx = 0; 15355 15356 #ifdef TCP_REQUEST_TRK 15357 struct tcp_sendfile_track *tcp_req; 15358 15359 if (SEQ_GT(ae->ack, tp->snd_una)) { 15360 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 15361 } else { 15362 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 15363 } 15364 #endif 15365 memset(&log, 0, sizeof(log)); 15366 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 15367 if (rack->rack_no_prr == 0) 15368 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15369 else 15370 log.u_bbr.flex1 = 0; 15371 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 15372 log.u_bbr.use_lt_bw <<= 1; 15373 log.u_bbr.use_lt_bw |= rack->r_might_revert; 15374 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 15375 log.u_bbr.bbr_state = rack->rc_free_cnt; 15376 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15377 log.u_bbr.pkts_out = tp->t_maxseg; 15378 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 15379 log.u_bbr.flex7 = 1; 15380 log.u_bbr.lost = ae->flags; 15381 log.u_bbr.cwnd_gain = ackval; 15382 log.u_bbr.pacing_gain = 0x2; 15383 if (ae->flags & TSTMP_HDWR) { 15384 /* Record the hardware timestamp if present */ 15385 log.u_bbr.flex3 = M_TSTMP; 15386 ts.tv_sec = ae->timestamp / 1000000000; 15387 ts.tv_nsec = ae->timestamp % 1000000000; 15388 ltv.tv_sec = ts.tv_sec; 15389 ltv.tv_usec = ts.tv_nsec / 1000; 15390 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 15391 } else if (ae->flags & TSTMP_LRO) { 15392 /* Record the LRO the arrival timestamp */ 15393 log.u_bbr.flex3 = M_TSTMP_LRO; 15394 ts.tv_sec = ae->timestamp / 1000000000; 15395 ts.tv_nsec = ae->timestamp % 1000000000; 15396 ltv.tv_sec = ts.tv_sec; 15397 ltv.tv_usec = ts.tv_nsec / 1000; 15398 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 15399 } 15400 log.u_bbr.timeStamp = tcp_get_usecs(<v); 15401 /* Log the rcv time */ 15402 log.u_bbr.delRate = ae->timestamp; 15403 #ifdef TCP_REQUEST_TRK 15404 log.u_bbr.applimited = tp->t_tcpreq_closed; 15405 log.u_bbr.applimited <<= 8; 15406 log.u_bbr.applimited |= tp->t_tcpreq_open; 15407 log.u_bbr.applimited <<= 8; 15408 log.u_bbr.applimited |= tp->t_tcpreq_req; 15409 if (tcp_req) { 15410 /* Copy out any client req info */ 15411 /* seconds */ 15412 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 15413 /* useconds */ 15414 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 15415 log.u_bbr.rttProp = tcp_req->timestamp; 15416 log.u_bbr.cur_del_rate = tcp_req->start; 15417 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 15418 log.u_bbr.flex8 |= 1; 15419 } else { 15420 log.u_bbr.flex8 |= 2; 15421 log.u_bbr.bw_inuse = tcp_req->end; 15422 } 15423 log.u_bbr.flex6 = tcp_req->start_seq; 15424 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 15425 log.u_bbr.flex8 |= 4; 15426 log.u_bbr.epoch = tcp_req->end_seq; 15427 } 15428 } 15429 #endif 15430 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 15431 th = (struct tcphdr *)tcp_hdr_buf; 15432 th->th_seq = ae->seq; 15433 th->th_ack = ae->ack; 15434 th->th_win = ae->win; 15435 /* Now fill in the ports */ 15436 th->th_sport = inp->inp_fport; 15437 th->th_dport = inp->inp_lport; 15438 tcp_set_flags(th, ae->flags); 15439 /* Now do we have a timestamp option? */ 15440 if (ae->flags & HAS_TSTMP) { 15441 u_char *cp; 15442 uint32_t val; 15443 15444 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 15445 cp = (u_char *)(th + 1); 15446 *cp = TCPOPT_NOP; 15447 cp++; 15448 *cp = TCPOPT_NOP; 15449 cp++; 15450 *cp = TCPOPT_TIMESTAMP; 15451 cp++; 15452 *cp = TCPOLEN_TIMESTAMP; 15453 cp++; 15454 val = htonl(ae->ts_value); 15455 bcopy((char *)&val, 15456 (char *)cp, sizeof(uint32_t)); 15457 val = htonl(ae->ts_echo); 15458 bcopy((char *)&val, 15459 (char *)(cp + 4), sizeof(uint32_t)); 15460 } else 15461 th->th_off = (sizeof(struct tcphdr) >> 2); 15462 15463 /* 15464 * For sane logging we need to play a little trick. 15465 * If the ack were fully processed we would have moved 15466 * snd_una to high_seq, but since compressed acks are 15467 * processed in two phases, at this point (logging) snd_una 15468 * won't be advanced. So we would see multiple acks showing 15469 * the advancement. We can prevent that by "pretending" that 15470 * snd_una was advanced and then un-advancing it so that the 15471 * logging code has the right value for tlb_snd_una. 15472 */ 15473 if (tp->snd_una != high_seq) { 15474 orig_snd_una = tp->snd_una; 15475 tp->snd_una = high_seq; 15476 xx = 1; 15477 } else 15478 xx = 0; 15479 TCP_LOG_EVENTP(tp, th, 15480 &tptosocket(tp)->so_rcv, 15481 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 15482 0, &log, true, <v); 15483 if (xx) { 15484 tp->snd_una = orig_snd_una; 15485 } 15486 } 15487 15488 } 15489 15490 static void 15491 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 15492 { 15493 uint32_t us_rtt; 15494 /* 15495 * A persist or keep-alive was forced out, update our 15496 * min rtt time. Note now worry about lost responses. 15497 * When a subsequent keep-alive or persist times out 15498 * and forced_ack is still on, then the last probe 15499 * was not responded to. In such cases we have a 15500 * sysctl that controls the behavior. Either we apply 15501 * the rtt but with reduced confidence (0). Or we just 15502 * plain don't apply the rtt estimate. Having data flow 15503 * will clear the probe_not_answered flag i.e. cum-ack 15504 * move forward <or> exiting and reentering persists. 15505 */ 15506 15507 rack->forced_ack = 0; 15508 rack->rc_tp->t_rxtshift = 0; 15509 if ((rack->rc_in_persist && 15510 (tiwin == rack->rc_tp->snd_wnd)) || 15511 (rack->rc_in_persist == 0)) { 15512 /* 15513 * In persists only apply the RTT update if this is 15514 * a response to our window probe. And that 15515 * means the rwnd sent must match the current 15516 * snd_wnd. If it does not, then we got a 15517 * window update ack instead. For keepalive 15518 * we allow the answer no matter what the window. 15519 * 15520 * Note that if the probe_not_answered is set then 15521 * the forced_ack_ts is the oldest one i.e. the first 15522 * probe sent that might have been lost. This assures 15523 * us that if we do calculate an RTT it is longer not 15524 * some short thing. 15525 */ 15526 if (rack->rc_in_persist) 15527 counter_u64_add(rack_persists_acks, 1); 15528 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 15529 if (us_rtt == 0) 15530 us_rtt = 1; 15531 if (rack->probe_not_answered == 0) { 15532 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15533 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 15534 } else { 15535 /* We have a retransmitted probe here too */ 15536 if (rack_apply_rtt_with_reduced_conf) { 15537 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15538 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 15539 } 15540 } 15541 } 15542 } 15543 15544 static void 15545 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15546 { 15547 /* 15548 * The next send has occurred mark the end of the round 15549 * as when that data gets acknowledged. We can 15550 * also do common things we might need to do when 15551 * a round begins. 15552 */ 15553 rack->r_ctl.roundends = tp->snd_max; 15554 rack->rc_new_rnd_needed = 0; 15555 rack_log_hystart_event(rack, tp->snd_max, 4); 15556 } 15557 15558 15559 static void 15560 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 15561 uint32_t flex3) 15562 { 15563 if (tcp_bblogging_on(rack->rc_tp)) { 15564 union tcp_log_stackspecific log; 15565 struct timeval tv; 15566 15567 (void)tcp_get_usecs(&tv); 15568 memset(&log, 0, sizeof(log)); 15569 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 15570 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15571 log.u_bbr.flex8 = mod; 15572 log.u_bbr.flex1 = flex1; 15573 log.u_bbr.flex2 = flex2; 15574 log.u_bbr.flex3 = flex3; 15575 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 15576 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 15577 log.u_bbr.bbr_substate = rack->pcm_needed; 15578 log.u_bbr.bbr_substate <<= 1; 15579 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 15580 log.u_bbr.bbr_substate <<= 1; 15581 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 15582 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 15583 0, &log, false, NULL, NULL, 0, &tv); 15584 } 15585 } 15586 15587 static void 15588 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15589 { 15590 /* 15591 * The round (current_round) has ended. We now 15592 * setup for the next round by incrementing the 15593 * round numnber and doing any round specific 15594 * things. 15595 */ 15596 rack_log_hystart_event(rack, high_seq, 21); 15597 rack->r_ctl.current_round++; 15598 /* New round (current_round) begins at next send */ 15599 rack->rc_new_rnd_needed = 1; 15600 if ((rack->pcm_enabled == 1) && 15601 (rack->pcm_needed == 0) && 15602 (rack->pcm_in_progress == 0)) { 15603 /* 15604 * If we have enabled PCM, then we need to 15605 * check if the round has adanced to the state 15606 * where one is required. 15607 */ 15608 int rnds; 15609 15610 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 15611 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 15612 rack->pcm_needed = 1; 15613 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15614 } else if (rack_verbose_logging) { 15615 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15616 } 15617 } 15618 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 15619 /* We have hystart enabled send the round info in */ 15620 if (CC_ALGO(tp)->newround != NULL) { 15621 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 15622 } 15623 } 15624 /* 15625 * For DGP an initial startup check. We want to validate 15626 * that we are not just pushing on slow-start and just 15627 * not gaining.. i.e. filling buffers without getting any 15628 * boost in b/w during the inital slow-start. 15629 */ 15630 if (rack->dgp_on && 15631 (rack->rc_initial_ss_comp == 0) && 15632 (tp->snd_cwnd < tp->snd_ssthresh) && 15633 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 15634 (rack->r_ctl.gp_rnd_thresh > 0) && 15635 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 15636 15637 /* 15638 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 15639 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 15640 * exit SS. 15641 * 15642 * Pick up the flight size now as we enter slowstart (not the 15643 * cwnd which may be inflated). 15644 */ 15645 rack->rc_initial_ss_comp = 1; 15646 15647 if (tcp_bblogging_on(rack->rc_tp)) { 15648 union tcp_log_stackspecific log; 15649 struct timeval tv; 15650 15651 memset(&log, 0, sizeof(log)); 15652 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15653 log.u_bbr.flex1 = rack->r_ctl.current_round; 15654 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 15655 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 15656 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 15657 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 15658 log.u_bbr.flex8 = 40; 15659 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 15660 0, &log, false, NULL, __func__, __LINE__,&tv); 15661 } 15662 if ((rack->r_ctl.gate_to_fs == 1) && 15663 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 15664 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 15665 } 15666 tp->snd_ssthresh = tp->snd_cwnd - 1; 15667 /* Turn off any fast output running */ 15668 rack->r_fast_output = 0; 15669 } 15670 } 15671 15672 static int 15673 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 15674 { 15675 /* 15676 * Handle a "special" compressed ack mbuf. Each incoming 15677 * ack has only four possible dispositions: 15678 * 15679 * A) It moves the cum-ack forward 15680 * B) It is behind the cum-ack. 15681 * C) It is a window-update ack. 15682 * D) It is a dup-ack. 15683 * 15684 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 15685 * in the incoming mbuf. We also need to still pay attention 15686 * to nxt_pkt since there may be another packet after this 15687 * one. 15688 */ 15689 #ifdef TCP_ACCOUNTING 15690 uint64_t ts_val; 15691 uint64_t rdstc; 15692 #endif 15693 int segsiz; 15694 struct timespec ts; 15695 struct tcp_rack *rack; 15696 struct tcp_ackent *ae; 15697 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 15698 int cnt, i, did_out, ourfinisacked = 0; 15699 struct tcpopt to_holder, *to = NULL; 15700 #ifdef TCP_ACCOUNTING 15701 int win_up_req = 0; 15702 #endif 15703 int nsegs = 0; 15704 int under_pacing = 0; 15705 int post_recovery = 0; 15706 #ifdef TCP_ACCOUNTING 15707 sched_pin(); 15708 #endif 15709 rack = (struct tcp_rack *)tp->t_fb_ptr; 15710 if (rack->gp_ready && 15711 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 15712 under_pacing = 1; 15713 15714 if (rack->r_state != tp->t_state) 15715 rack_set_state(tp, rack); 15716 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 15717 (tp->t_flags & TF_GPUTINPROG)) { 15718 /* 15719 * We have a goodput in progress 15720 * and we have entered a late state. 15721 * Do we have enough data in the sb 15722 * to handle the GPUT request? 15723 */ 15724 uint32_t bytes; 15725 15726 bytes = tp->gput_ack - tp->gput_seq; 15727 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 15728 bytes += tp->gput_seq - tp->snd_una; 15729 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 15730 /* 15731 * There are not enough bytes in the socket 15732 * buffer that have been sent to cover this 15733 * measurement. Cancel it. 15734 */ 15735 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 15736 rack->r_ctl.rc_gp_srtt /*flex1*/, 15737 tp->gput_seq, 15738 0, 0, 18, __LINE__, NULL, 0); 15739 tp->t_flags &= ~TF_GPUTINPROG; 15740 } 15741 } 15742 to = &to_holder; 15743 to->to_flags = 0; 15744 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 15745 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 15746 cnt = m->m_len / sizeof(struct tcp_ackent); 15747 counter_u64_add(rack_multi_single_eq, cnt); 15748 high_seq = tp->snd_una; 15749 the_win = tp->snd_wnd; 15750 win_seq = tp->snd_wl1; 15751 win_upd_ack = tp->snd_wl2; 15752 cts = tcp_tv_to_usectick(tv); 15753 ms_cts = tcp_tv_to_mssectick(tv); 15754 rack->r_ctl.rc_rcvtime = cts; 15755 segsiz = ctf_fixed_maxseg(tp); 15756 if ((rack->rc_gp_dyn_mul) && 15757 (rack->use_fixed_rate == 0) && 15758 (rack->rc_always_pace)) { 15759 /* Check in on probertt */ 15760 rack_check_probe_rtt(rack, cts); 15761 } 15762 for (i = 0; i < cnt; i++) { 15763 #ifdef TCP_ACCOUNTING 15764 ts_val = get_cyclecount(); 15765 #endif 15766 rack_clear_rate_sample(rack); 15767 ae = ((mtod(m, struct tcp_ackent *)) + i); 15768 if (ae->flags & TH_FIN) 15769 rack_log_pacing_delay_calc(rack, 15770 0, 15771 0, 15772 0, 15773 rack_get_gp_est(rack), /* delRate */ 15774 rack_get_lt_bw(rack), /* rttProp */ 15775 20, __LINE__, NULL, 0); 15776 /* Setup the window */ 15777 tiwin = ae->win << tp->snd_scale; 15778 if (tiwin > rack->r_ctl.rc_high_rwnd) 15779 rack->r_ctl.rc_high_rwnd = tiwin; 15780 /* figure out the type of ack */ 15781 if (SEQ_LT(ae->ack, high_seq)) { 15782 /* Case B*/ 15783 ae->ack_val_set = ACK_BEHIND; 15784 } else if (SEQ_GT(ae->ack, high_seq)) { 15785 /* Case A */ 15786 ae->ack_val_set = ACK_CUMACK; 15787 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 15788 /* Case D */ 15789 ae->ack_val_set = ACK_DUPACK; 15790 } else { 15791 /* Case C */ 15792 ae->ack_val_set = ACK_RWND; 15793 } 15794 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 15795 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 15796 /* Validate timestamp */ 15797 if (ae->flags & HAS_TSTMP) { 15798 /* Setup for a timestamp */ 15799 to->to_flags = TOF_TS; 15800 ae->ts_echo -= tp->ts_offset; 15801 to->to_tsecr = ae->ts_echo; 15802 to->to_tsval = ae->ts_value; 15803 /* 15804 * If echoed timestamp is later than the current time, fall back to 15805 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 15806 * were used when this connection was established. 15807 */ 15808 if (TSTMP_GT(ae->ts_echo, ms_cts)) 15809 to->to_tsecr = 0; 15810 if (tp->ts_recent && 15811 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 15812 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 15813 #ifdef TCP_ACCOUNTING 15814 rdstc = get_cyclecount(); 15815 if (rdstc > ts_val) { 15816 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15817 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15818 } 15819 } 15820 #endif 15821 continue; 15822 } 15823 } 15824 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 15825 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 15826 tp->ts_recent_age = tcp_ts_getticks(); 15827 tp->ts_recent = ae->ts_value; 15828 } 15829 } else { 15830 /* Setup for a no options */ 15831 to->to_flags = 0; 15832 } 15833 /* Update the rcv time and perform idle reduction possibly */ 15834 if (tp->t_idle_reduce && 15835 (tp->snd_max == tp->snd_una) && 15836 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 15837 counter_u64_add(rack_input_idle_reduces, 1); 15838 rack_cc_after_idle(rack, tp); 15839 } 15840 tp->t_rcvtime = ticks; 15841 /* Now what about ECN of a chain of pure ACKs? */ 15842 if (tcp_ecn_input_segment(tp, ae->flags, 0, 15843 tcp_packets_this_ack(tp, ae->ack), 15844 ae->codepoint)) 15845 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 15846 #ifdef TCP_ACCOUNTING 15847 /* Count for the specific type of ack in */ 15848 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15849 tp->tcp_cnt_counters[ae->ack_val_set]++; 15850 } 15851 #endif 15852 /* 15853 * Note how we could move up these in the determination 15854 * above, but we don't so that way the timestamp checks (and ECN) 15855 * is done first before we do any processing on the ACK. 15856 * The non-compressed path through the code has this 15857 * weakness (noted by @jtl) that it actually does some 15858 * processing before verifying the timestamp information. 15859 * We don't take that path here which is why we set 15860 * the ack_val_set first, do the timestamp and ecn 15861 * processing, and then look at what we have setup. 15862 */ 15863 if (ae->ack_val_set == ACK_BEHIND) { 15864 /* 15865 * Case B flag reordering, if window is not closed 15866 * or it could be a keep-alive or persists 15867 */ 15868 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 15869 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15870 if (rack->r_ctl.rc_reorder_ts == 0) 15871 rack->r_ctl.rc_reorder_ts = 1; 15872 } 15873 } else if (ae->ack_val_set == ACK_DUPACK) { 15874 /* Case D */ 15875 rack_strike_dupack(rack, ae->ack); 15876 } else if (ae->ack_val_set == ACK_RWND) { 15877 /* Case C */ 15878 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15879 ts.tv_sec = ae->timestamp / 1000000000; 15880 ts.tv_nsec = ae->timestamp % 1000000000; 15881 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15882 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15883 } else { 15884 rack->r_ctl.act_rcv_time = *tv; 15885 } 15886 if (rack->forced_ack) { 15887 rack_handle_probe_response(rack, tiwin, 15888 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 15889 } 15890 #ifdef TCP_ACCOUNTING 15891 win_up_req = 1; 15892 #endif 15893 win_upd_ack = ae->ack; 15894 win_seq = ae->seq; 15895 the_win = tiwin; 15896 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15897 } else { 15898 /* Case A */ 15899 if (SEQ_GT(ae->ack, tp->snd_max)) { 15900 /* 15901 * We just send an ack since the incoming 15902 * ack is beyond the largest seq we sent. 15903 */ 15904 if ((tp->t_flags & TF_ACKNOW) == 0) { 15905 ctf_ack_war_checks(tp); 15906 if (tp->t_flags && TF_ACKNOW) 15907 rack->r_wanted_output = 1; 15908 } 15909 } else { 15910 nsegs++; 15911 /* If the window changed setup to update */ 15912 if (tiwin != tp->snd_wnd) { 15913 win_upd_ack = ae->ack; 15914 win_seq = ae->seq; 15915 the_win = tiwin; 15916 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15917 } 15918 #ifdef TCP_ACCOUNTING 15919 /* Account for the acks */ 15920 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15921 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 15922 } 15923 #endif 15924 high_seq = ae->ack; 15925 /* Setup our act_rcv_time */ 15926 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15927 ts.tv_sec = ae->timestamp / 1000000000; 15928 ts.tv_nsec = ae->timestamp % 1000000000; 15929 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15930 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15931 } else { 15932 rack->r_ctl.act_rcv_time = *tv; 15933 } 15934 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 15935 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 15936 #ifdef TCP_REQUEST_TRK 15937 rack_req_check_for_comp(rack, high_seq); 15938 #endif 15939 if (rack->rc_dsack_round_seen) { 15940 /* Is the dsack round over? */ 15941 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 15942 /* Yes it is */ 15943 rack->rc_dsack_round_seen = 0; 15944 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 15945 } 15946 } 15947 } 15948 } 15949 /* And lets be sure to commit the rtt measurements for this ack */ 15950 tcp_rack_xmit_timer_commit(rack, tp); 15951 #ifdef TCP_ACCOUNTING 15952 rdstc = get_cyclecount(); 15953 if (rdstc > ts_val) { 15954 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15955 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15956 if (ae->ack_val_set == ACK_CUMACK) 15957 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 15958 } 15959 } 15960 #endif 15961 } 15962 #ifdef TCP_ACCOUNTING 15963 ts_val = get_cyclecount(); 15964 #endif 15965 /* Tend to any collapsed window */ 15966 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 15967 /* The peer collapsed the window */ 15968 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 15969 } else if (rack->rc_has_collapsed) 15970 rack_un_collapse_window(rack, __LINE__); 15971 if ((rack->r_collapse_point_valid) && 15972 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 15973 rack->r_collapse_point_valid = 0; 15974 acked_amount = acked = (high_seq - tp->snd_una); 15975 if (acked) { 15976 /* 15977 * The draft (v3) calls for us to use SEQ_GEQ, but that 15978 * causes issues when we are just going app limited. Lets 15979 * instead use SEQ_GT <or> where its equal but more data 15980 * is outstanding. 15981 * 15982 * Also make sure we are on the last ack of a series. We 15983 * have to have all the ack's processed in queue to know 15984 * if there is something left outstanding. 15985 * 15986 */ 15987 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 15988 (rack->rc_new_rnd_needed == 0) && 15989 (nxt_pkt == 0)) { 15990 /* 15991 * We have crossed into a new round with 15992 * this th_ack value. 15993 */ 15994 rack_new_round_setup(tp, rack, high_seq); 15995 } 15996 /* 15997 * Clear the probe not answered flag 15998 * since cum-ack moved forward. 15999 */ 16000 rack->probe_not_answered = 0; 16001 if (tp->t_flags & TF_NEEDSYN) { 16002 /* 16003 * T/TCP: Connection was half-synchronized, and our SYN has 16004 * been ACK'd (so connection is now fully synchronized). Go 16005 * to non-starred state, increment snd_una for ACK of SYN, 16006 * and check if we can do window scaling. 16007 */ 16008 tp->t_flags &= ~TF_NEEDSYN; 16009 tp->snd_una++; 16010 acked_amount = acked = (high_seq - tp->snd_una); 16011 } 16012 if (acked > sbavail(&so->so_snd)) 16013 acked_amount = sbavail(&so->so_snd); 16014 if (IN_FASTRECOVERY(tp->t_flags) && 16015 (rack->rack_no_prr == 0)) 16016 rack_update_prr(tp, rack, acked_amount, high_seq); 16017 if (IN_RECOVERY(tp->t_flags)) { 16018 if (SEQ_LT(high_seq, tp->snd_recover) && 16019 (SEQ_LT(high_seq, tp->snd_max))) { 16020 tcp_rack_partialack(tp); 16021 } else { 16022 rack_post_recovery(tp, high_seq); 16023 post_recovery = 1; 16024 } 16025 } else if ((rack->rto_from_rec == 1) && 16026 SEQ_GEQ(high_seq, tp->snd_recover)) { 16027 /* 16028 * We were in recovery, hit a rxt timeout 16029 * and never re-entered recovery. The timeout(s) 16030 * made up all the lost data. In such a case 16031 * we need to clear the rto_from_rec flag. 16032 */ 16033 rack->rto_from_rec = 0; 16034 } 16035 /* Handle the rack-log-ack part (sendmap) */ 16036 if ((sbused(&so->so_snd) == 0) && 16037 (acked > acked_amount) && 16038 (tp->t_state >= TCPS_FIN_WAIT_1) && 16039 (tp->t_flags & TF_SENTFIN)) { 16040 /* 16041 * We must be sure our fin 16042 * was sent and acked (we can be 16043 * in FIN_WAIT_1 without having 16044 * sent the fin). 16045 */ 16046 ourfinisacked = 1; 16047 /* 16048 * Lets make sure snd_una is updated 16049 * since most likely acked_amount = 0 (it 16050 * should be). 16051 */ 16052 tp->snd_una = high_seq; 16053 } 16054 /* Did we make a RTO error? */ 16055 if ((tp->t_flags & TF_PREVVALID) && 16056 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16057 tp->t_flags &= ~TF_PREVVALID; 16058 if (tp->t_rxtshift == 1 && 16059 (int)(ticks - tp->t_badrxtwin) < 0) 16060 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16061 } 16062 /* Handle the data in the socket buffer */ 16063 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16064 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16065 if (acked_amount > 0) { 16066 uint32_t p_cwnd; 16067 struct mbuf *mfree; 16068 16069 if (post_recovery) { 16070 /* 16071 * Grab the segsiz, multiply by 2 and add the snd_cwnd 16072 * that is the max the CC should add if we are exiting 16073 * recovery and doing a late add. 16074 */ 16075 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16076 p_cwnd <<= 1; 16077 p_cwnd += tp->snd_cwnd; 16078 } 16079 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 16080 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 16081 /* Must be non-newreno (cubic) getting too ahead of itself */ 16082 tp->snd_cwnd = p_cwnd; 16083 } 16084 SOCK_SENDBUF_LOCK(so); 16085 mfree = sbcut_locked(&so->so_snd, acked_amount); 16086 tp->snd_una = high_seq; 16087 /* Note we want to hold the sb lock through the sendmap adjust */ 16088 rack_adjust_sendmap_head(rack, &so->so_snd); 16089 /* Wake up the socket if we have room to write more */ 16090 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16091 sowwakeup_locked(so); 16092 m_freem(mfree); 16093 } 16094 /* update progress */ 16095 tp->t_acktime = ticks; 16096 rack_log_progress_event(rack, tp, tp->t_acktime, 16097 PROGRESS_UPDATE, __LINE__); 16098 /* Clear out shifts and such */ 16099 tp->t_rxtshift = 0; 16100 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16101 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16102 rack->rc_tlp_in_progress = 0; 16103 rack->r_ctl.rc_tlp_cnt_out = 0; 16104 /* Send recover and snd_nxt must be dragged along */ 16105 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16106 tp->snd_recover = tp->snd_una; 16107 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16108 tp->snd_nxt = tp->snd_max; 16109 /* 16110 * If the RXT timer is running we want to 16111 * stop it, so we can restart a TLP (or new RXT). 16112 */ 16113 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16114 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16115 tp->snd_wl2 = high_seq; 16116 tp->t_dupacks = 0; 16117 if (under_pacing && 16118 (rack->use_fixed_rate == 0) && 16119 (rack->in_probe_rtt == 0) && 16120 rack->rc_gp_dyn_mul && 16121 rack->rc_always_pace) { 16122 /* Check if we are dragging bottom */ 16123 rack_check_bottom_drag(tp, rack, so); 16124 } 16125 if (tp->snd_una == tp->snd_max) { 16126 tp->t_flags &= ~TF_PREVVALID; 16127 rack->r_ctl.retran_during_recovery = 0; 16128 rack->rc_suspicious = 0; 16129 rack->r_ctl.dsack_byte_cnt = 0; 16130 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16131 if (rack->r_ctl.rc_went_idle_time == 0) 16132 rack->r_ctl.rc_went_idle_time = 1; 16133 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16134 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16135 tp->t_acktime = 0; 16136 /* Set so we might enter persists... */ 16137 rack->r_wanted_output = 1; 16138 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16139 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16140 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16141 (sbavail(&so->so_snd) == 0) && 16142 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16143 /* 16144 * The socket was gone and the 16145 * peer sent data (not now in the past), time to 16146 * reset him. 16147 */ 16148 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16149 /* tcp_close will kill the inp pre-log the Reset */ 16150 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16151 #ifdef TCP_ACCOUNTING 16152 rdstc = get_cyclecount(); 16153 if (rdstc > ts_val) { 16154 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16155 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16156 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16157 } 16158 } 16159 #endif 16160 m_freem(m); 16161 tp = tcp_close(tp); 16162 if (tp == NULL) { 16163 #ifdef TCP_ACCOUNTING 16164 sched_unpin(); 16165 #endif 16166 return (1); 16167 } 16168 /* 16169 * We would normally do drop-with-reset which would 16170 * send back a reset. We can't since we don't have 16171 * all the needed bits. Instead lets arrange for 16172 * a call to tcp_output(). That way since we 16173 * are in the closed state we will generate a reset. 16174 * 16175 * Note if tcp_accounting is on we don't unpin since 16176 * we do that after the goto label. 16177 */ 16178 goto send_out_a_rst; 16179 } 16180 if ((sbused(&so->so_snd) == 0) && 16181 (tp->t_state >= TCPS_FIN_WAIT_1) && 16182 (tp->t_flags & TF_SENTFIN)) { 16183 /* 16184 * If we can't receive any more data, then closing user can 16185 * proceed. Starting the timer is contrary to the 16186 * specification, but if we don't get a FIN we'll hang 16187 * forever. 16188 * 16189 */ 16190 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16191 soisdisconnected(so); 16192 tcp_timer_activate(tp, TT_2MSL, 16193 (tcp_fast_finwait2_recycle ? 16194 tcp_finwait2_timeout : 16195 TP_MAXIDLE(tp))); 16196 } 16197 if (ourfinisacked == 0) { 16198 /* 16199 * We don't change to fin-wait-2 if we have our fin acked 16200 * which means we are probably in TCPS_CLOSING. 16201 */ 16202 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16203 } 16204 } 16205 } 16206 /* Wake up the socket if we have room to write more */ 16207 if (sbavail(&so->so_snd)) { 16208 rack->r_wanted_output = 1; 16209 if (ctf_progress_timeout_check(tp, true)) { 16210 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16211 tp, tick, PROGRESS_DROP, __LINE__); 16212 /* 16213 * We cheat here and don't send a RST, we should send one 16214 * when the pacer drops the connection. 16215 */ 16216 #ifdef TCP_ACCOUNTING 16217 rdstc = get_cyclecount(); 16218 if (rdstc > ts_val) { 16219 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16220 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16221 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16222 } 16223 } 16224 sched_unpin(); 16225 #endif 16226 (void)tcp_drop(tp, ETIMEDOUT); 16227 m_freem(m); 16228 return (1); 16229 } 16230 } 16231 if (ourfinisacked) { 16232 switch(tp->t_state) { 16233 case TCPS_CLOSING: 16234 #ifdef TCP_ACCOUNTING 16235 rdstc = get_cyclecount(); 16236 if (rdstc > ts_val) { 16237 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16238 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16239 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16240 } 16241 } 16242 sched_unpin(); 16243 #endif 16244 tcp_twstart(tp); 16245 m_freem(m); 16246 return (1); 16247 break; 16248 case TCPS_LAST_ACK: 16249 #ifdef TCP_ACCOUNTING 16250 rdstc = get_cyclecount(); 16251 if (rdstc > ts_val) { 16252 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16253 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16254 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16255 } 16256 } 16257 sched_unpin(); 16258 #endif 16259 tp = tcp_close(tp); 16260 ctf_do_drop(m, tp); 16261 return (1); 16262 break; 16263 case TCPS_FIN_WAIT_1: 16264 #ifdef TCP_ACCOUNTING 16265 rdstc = get_cyclecount(); 16266 if (rdstc > ts_val) { 16267 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16268 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16269 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16270 } 16271 } 16272 #endif 16273 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16274 soisdisconnected(so); 16275 tcp_timer_activate(tp, TT_2MSL, 16276 (tcp_fast_finwait2_recycle ? 16277 tcp_finwait2_timeout : 16278 TP_MAXIDLE(tp))); 16279 } 16280 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16281 break; 16282 default: 16283 break; 16284 } 16285 } 16286 if (rack->r_fast_output) { 16287 /* 16288 * We re doing fast output.. can we expand that? 16289 */ 16290 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 16291 } 16292 #ifdef TCP_ACCOUNTING 16293 rdstc = get_cyclecount(); 16294 if (rdstc > ts_val) { 16295 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16296 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16297 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16298 } 16299 } 16300 16301 } else if (win_up_req) { 16302 rdstc = get_cyclecount(); 16303 if (rdstc > ts_val) { 16304 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16305 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 16306 } 16307 } 16308 #endif 16309 } 16310 /* Now is there a next packet, if so we are done */ 16311 m_freem(m); 16312 did_out = 0; 16313 if (nxt_pkt) { 16314 #ifdef TCP_ACCOUNTING 16315 sched_unpin(); 16316 #endif 16317 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 16318 return (0); 16319 } 16320 rack_handle_might_revert(tp, rack); 16321 ctf_calc_rwin(so, tp); 16322 if ((rack->r_wanted_output != 0) || 16323 (rack->r_fast_output != 0) || 16324 (tp->t_flags & TF_ACKNOW )) { 16325 send_out_a_rst: 16326 if (tcp_output(tp) < 0) { 16327 #ifdef TCP_ACCOUNTING 16328 sched_unpin(); 16329 #endif 16330 return (1); 16331 } 16332 did_out = 1; 16333 } 16334 if (tp->t_flags2 & TF2_HPTS_CALLS) 16335 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16336 rack_free_trim(rack); 16337 #ifdef TCP_ACCOUNTING 16338 sched_unpin(); 16339 #endif 16340 rack_timer_audit(tp, rack, &so->so_snd); 16341 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 16342 return (0); 16343 } 16344 16345 #define TCP_LRO_TS_OPTION \ 16346 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 16347 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 16348 16349 static int 16350 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16351 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 16352 struct timeval *tv) 16353 { 16354 struct inpcb *inp = tptoinpcb(tp); 16355 struct socket *so = tptosocket(tp); 16356 #ifdef TCP_ACCOUNTING 16357 uint64_t ts_val; 16358 #endif 16359 int32_t thflags, retval, did_out = 0; 16360 int32_t way_out = 0; 16361 /* 16362 * cts - is the current time from tv (caller gets ts) in microseconds. 16363 * ms_cts - is the current time from tv in milliseconds. 16364 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 16365 */ 16366 uint32_t cts, us_cts, ms_cts; 16367 uint32_t tiwin; 16368 struct timespec ts; 16369 struct tcpopt to; 16370 struct tcp_rack *rack; 16371 struct rack_sendmap *rsm; 16372 int32_t prev_state = 0; 16373 int no_output = 0; 16374 int slot_remaining = 0; 16375 #ifdef TCP_ACCOUNTING 16376 int ack_val_set = 0xf; 16377 #endif 16378 int nsegs; 16379 16380 NET_EPOCH_ASSERT(); 16381 INP_WLOCK_ASSERT(inp); 16382 16383 /* 16384 * tv passed from common code is from either M_TSTMP_LRO or 16385 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 16386 */ 16387 rack = (struct tcp_rack *)tp->t_fb_ptr; 16388 if (rack->rack_deferred_inited == 0) { 16389 /* 16390 * If we are the connecting socket we will 16391 * hit rack_init() when no sequence numbers 16392 * are setup. This makes it so we must defer 16393 * some initialization. Call that now. 16394 */ 16395 rack_deferred_init(tp, rack); 16396 } 16397 /* 16398 * Check to see if we need to skip any output plans. This 16399 * can happen in the non-LRO path where we are pacing and 16400 * must process the ack coming in but need to defer sending 16401 * anything becase a pacing timer is running. 16402 */ 16403 us_cts = tcp_tv_to_usectick(tv); 16404 if (m->m_flags & M_ACKCMP) { 16405 /* 16406 * All compressed ack's are ack's by definition so 16407 * remove any ack required flag and then do the processing. 16408 */ 16409 rack->rc_ack_required = 0; 16410 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 16411 } 16412 thflags = tcp_get_flags(th); 16413 if ((rack->rc_always_pace == 1) && 16414 (rack->rc_ack_can_sendout_data == 0) && 16415 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16416 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 16417 /* 16418 * Ok conditions are right for queuing the packets 16419 * but we do have to check the flags in the inp, it 16420 * could be, if a sack is present, we want to be awoken and 16421 * so should process the packets. 16422 */ 16423 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 16424 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 16425 no_output = 1; 16426 } else { 16427 /* 16428 * If there is no options, or just a 16429 * timestamp option, we will want to queue 16430 * the packets. This is the same that LRO does 16431 * and will need to change with accurate ECN. 16432 */ 16433 uint32_t *ts_ptr; 16434 int optlen; 16435 16436 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 16437 ts_ptr = (uint32_t *)(th + 1); 16438 if ((optlen == 0) || 16439 ((optlen == TCPOLEN_TSTAMP_APPA) && 16440 (*ts_ptr == TCP_LRO_TS_OPTION))) 16441 no_output = 1; 16442 } 16443 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 16444 /* 16445 * It is unrealistic to think we can pace in less than 16446 * the minimum granularity of the pacer (def:250usec). So 16447 * if we have less than that time remaining we should go 16448 * ahead and allow output to be "early". We will attempt to 16449 * make up for it in any pacing time we try to apply on 16450 * the outbound packet. 16451 */ 16452 no_output = 0; 16453 } 16454 } 16455 /* 16456 * If there is a RST or FIN lets dump out the bw 16457 * with a FIN the connection may go on but we 16458 * may not. 16459 */ 16460 if ((thflags & TH_FIN) || (thflags & TH_RST)) 16461 rack_log_pacing_delay_calc(rack, 16462 rack->r_ctl.gp_bw, 16463 0, 16464 0, 16465 rack_get_gp_est(rack), /* delRate */ 16466 rack_get_lt_bw(rack), /* rttProp */ 16467 20, __LINE__, NULL, 0); 16468 if (m->m_flags & M_ACKCMP) { 16469 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 16470 } 16471 cts = tcp_tv_to_usectick(tv); 16472 ms_cts = tcp_tv_to_mssectick(tv); 16473 nsegs = m->m_pkthdr.lro_nsegs; 16474 counter_u64_add(rack_proc_non_comp_ack, 1); 16475 #ifdef TCP_ACCOUNTING 16476 sched_pin(); 16477 if (thflags & TH_ACK) 16478 ts_val = get_cyclecount(); 16479 #endif 16480 if ((m->m_flags & M_TSTMP) || 16481 (m->m_flags & M_TSTMP_LRO)) { 16482 mbuf_tstmp2timespec(m, &ts); 16483 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16484 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16485 } else 16486 rack->r_ctl.act_rcv_time = *tv; 16487 kern_prefetch(rack, &prev_state); 16488 prev_state = 0; 16489 /* 16490 * Unscale the window into a 32-bit value. For the SYN_SENT state 16491 * the scale is zero. 16492 */ 16493 tiwin = th->th_win << tp->snd_scale; 16494 #ifdef TCP_ACCOUNTING 16495 if (thflags & TH_ACK) { 16496 /* 16497 * We have a tradeoff here. We can either do what we are 16498 * doing i.e. pinning to this CPU and then doing the accounting 16499 * <or> we could do a critical enter, setup the rdtsc and cpu 16500 * as in below, and then validate we are on the same CPU on 16501 * exit. I have choosen to not do the critical enter since 16502 * that often will gain you a context switch, and instead lock 16503 * us (line above this if) to the same CPU with sched_pin(). This 16504 * means we may be context switched out for a higher priority 16505 * interupt but we won't be moved to another CPU. 16506 * 16507 * If this occurs (which it won't very often since we most likely 16508 * are running this code in interupt context and only a higher 16509 * priority will bump us ... clock?) we will falsely add in 16510 * to the time the interupt processing time plus the ack processing 16511 * time. This is ok since its a rare event. 16512 */ 16513 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 16514 ctf_fixed_maxseg(tp)); 16515 } 16516 #endif 16517 /* 16518 * Parse options on any incoming segment. 16519 */ 16520 memset(&to, 0, sizeof(to)); 16521 tcp_dooptions(&to, (u_char *)(th + 1), 16522 (th->th_off << 2) - sizeof(struct tcphdr), 16523 (thflags & TH_SYN) ? TO_SYN : 0); 16524 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 16525 __func__)); 16526 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 16527 __func__)); 16528 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 16529 /* 16530 * We don't look at sack's from the 16531 * peer because the MSS is too small which 16532 * can subject us to an attack. 16533 */ 16534 to.to_flags &= ~TOF_SACK; 16535 } 16536 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16537 (tp->t_flags & TF_GPUTINPROG)) { 16538 /* 16539 * We have a goodput in progress 16540 * and we have entered a late state. 16541 * Do we have enough data in the sb 16542 * to handle the GPUT request? 16543 */ 16544 uint32_t bytes; 16545 16546 bytes = tp->gput_ack - tp->gput_seq; 16547 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16548 bytes += tp->gput_seq - tp->snd_una; 16549 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16550 /* 16551 * There are not enough bytes in the socket 16552 * buffer that have been sent to cover this 16553 * measurement. Cancel it. 16554 */ 16555 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16556 rack->r_ctl.rc_gp_srtt /*flex1*/, 16557 tp->gput_seq, 16558 0, 0, 18, __LINE__, NULL, 0); 16559 tp->t_flags &= ~TF_GPUTINPROG; 16560 } 16561 } 16562 if (tcp_bblogging_on(rack->rc_tp)) { 16563 union tcp_log_stackspecific log; 16564 struct timeval ltv; 16565 #ifdef TCP_REQUEST_TRK 16566 struct tcp_sendfile_track *tcp_req; 16567 16568 if (SEQ_GT(th->th_ack, tp->snd_una)) { 16569 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 16570 } else { 16571 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 16572 } 16573 #endif 16574 memset(&log, 0, sizeof(log)); 16575 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16576 if (rack->rack_no_prr == 0) 16577 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16578 else 16579 log.u_bbr.flex1 = 0; 16580 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16581 log.u_bbr.use_lt_bw <<= 1; 16582 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16583 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16584 log.u_bbr.bbr_state = rack->rc_free_cnt; 16585 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16586 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 16587 log.u_bbr.flex3 = m->m_flags; 16588 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16589 log.u_bbr.lost = thflags; 16590 log.u_bbr.pacing_gain = 0x1; 16591 #ifdef TCP_ACCOUNTING 16592 log.u_bbr.cwnd_gain = ack_val_set; 16593 #endif 16594 log.u_bbr.flex7 = 2; 16595 if (m->m_flags & M_TSTMP) { 16596 /* Record the hardware timestamp if present */ 16597 mbuf_tstmp2timespec(m, &ts); 16598 ltv.tv_sec = ts.tv_sec; 16599 ltv.tv_usec = ts.tv_nsec / 1000; 16600 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16601 } else if (m->m_flags & M_TSTMP_LRO) { 16602 /* Record the LRO the arrival timestamp */ 16603 mbuf_tstmp2timespec(m, &ts); 16604 ltv.tv_sec = ts.tv_sec; 16605 ltv.tv_usec = ts.tv_nsec / 1000; 16606 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16607 } 16608 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16609 /* Log the rcv time */ 16610 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 16611 #ifdef TCP_REQUEST_TRK 16612 log.u_bbr.applimited = tp->t_tcpreq_closed; 16613 log.u_bbr.applimited <<= 8; 16614 log.u_bbr.applimited |= tp->t_tcpreq_open; 16615 log.u_bbr.applimited <<= 8; 16616 log.u_bbr.applimited |= tp->t_tcpreq_req; 16617 if (tcp_req) { 16618 /* Copy out any client req info */ 16619 /* seconds */ 16620 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16621 /* useconds */ 16622 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16623 log.u_bbr.rttProp = tcp_req->timestamp; 16624 log.u_bbr.cur_del_rate = tcp_req->start; 16625 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16626 log.u_bbr.flex8 |= 1; 16627 } else { 16628 log.u_bbr.flex8 |= 2; 16629 log.u_bbr.bw_inuse = tcp_req->end; 16630 } 16631 log.u_bbr.flex6 = tcp_req->start_seq; 16632 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16633 log.u_bbr.flex8 |= 4; 16634 log.u_bbr.epoch = tcp_req->end_seq; 16635 } 16636 } 16637 #endif 16638 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 16639 tlen, &log, true, <v); 16640 } 16641 /* Remove ack required flag if set, we have one */ 16642 if (thflags & TH_ACK) 16643 rack->rc_ack_required = 0; 16644 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16645 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 16646 way_out = 4; 16647 retval = 0; 16648 m_freem(m); 16649 goto done_with_input; 16650 } 16651 /* 16652 * If a segment with the ACK-bit set arrives in the SYN-SENT state 16653 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 16654 */ 16655 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 16656 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 16657 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 16658 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 16659 #ifdef TCP_ACCOUNTING 16660 sched_unpin(); 16661 #endif 16662 return (1); 16663 } 16664 /* 16665 * If timestamps were negotiated during SYN/ACK and a 16666 * segment without a timestamp is received, silently drop 16667 * the segment, unless it is a RST segment or missing timestamps are 16668 * tolerated. 16669 * See section 3.2 of RFC 7323. 16670 */ 16671 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 16672 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 16673 way_out = 5; 16674 retval = 0; 16675 m_freem(m); 16676 goto done_with_input; 16677 } 16678 /* 16679 * Segment received on connection. Reset idle time and keep-alive 16680 * timer. XXX: This should be done after segment validation to 16681 * ignore broken/spoofed segs. 16682 */ 16683 if (tp->t_idle_reduce && 16684 (tp->snd_max == tp->snd_una) && 16685 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16686 counter_u64_add(rack_input_idle_reduces, 1); 16687 rack_cc_after_idle(rack, tp); 16688 } 16689 tp->t_rcvtime = ticks; 16690 #ifdef STATS 16691 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 16692 #endif 16693 if (tiwin > rack->r_ctl.rc_high_rwnd) 16694 rack->r_ctl.rc_high_rwnd = tiwin; 16695 /* 16696 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 16697 * this to occur after we've validated the segment. 16698 */ 16699 if (tcp_ecn_input_segment(tp, thflags, tlen, 16700 tcp_packets_this_ack(tp, th->th_ack), 16701 iptos)) 16702 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 16703 16704 /* 16705 * If echoed timestamp is later than the current time, fall back to 16706 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16707 * were used when this connection was established. 16708 */ 16709 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 16710 to.to_tsecr -= tp->ts_offset; 16711 if (TSTMP_GT(to.to_tsecr, ms_cts)) 16712 to.to_tsecr = 0; 16713 } 16714 if ((rack->r_rcvpath_rtt_up == 1) && 16715 (to.to_flags & TOF_TS) && 16716 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 16717 uint32_t rtt = 0; 16718 16719 /* 16720 * We are receiving only and thus not sending 16721 * data to do an RTT. We set a flag when we first 16722 * sent this TS to the peer. We now have it back 16723 * and have an RTT to share. We log it as a conf 16724 * 4, we are not so sure about it.. since we 16725 * may have lost an ack. 16726 */ 16727 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 16728 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 16729 rack->r_rcvpath_rtt_up = 0; 16730 /* Submit and commit the timer */ 16731 if (rtt > 0) { 16732 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 16733 tcp_rack_xmit_timer_commit(rack, tp); 16734 } 16735 } 16736 /* 16737 * If its the first time in we need to take care of options and 16738 * verify we can do SACK for rack! 16739 */ 16740 if (rack->r_state == 0) { 16741 /* Should be init'd by rack_init() */ 16742 KASSERT(rack->rc_inp != NULL, 16743 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 16744 if (rack->rc_inp == NULL) { 16745 rack->rc_inp = inp; 16746 } 16747 16748 /* 16749 * Process options only when we get SYN/ACK back. The SYN 16750 * case for incoming connections is handled in tcp_syncache. 16751 * According to RFC1323 the window field in a SYN (i.e., a 16752 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 16753 * this is traditional behavior, may need to be cleaned up. 16754 */ 16755 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 16756 /* Handle parallel SYN for ECN */ 16757 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 16758 if ((to.to_flags & TOF_SCALE) && 16759 (tp->t_flags & TF_REQ_SCALE)) { 16760 tp->t_flags |= TF_RCVD_SCALE; 16761 tp->snd_scale = to.to_wscale; 16762 } else 16763 tp->t_flags &= ~TF_REQ_SCALE; 16764 /* 16765 * Initial send window. It will be updated with the 16766 * next incoming segment to the scaled value. 16767 */ 16768 tp->snd_wnd = th->th_win; 16769 rack_validate_fo_sendwin_up(tp, rack); 16770 if ((to.to_flags & TOF_TS) && 16771 (tp->t_flags & TF_REQ_TSTMP)) { 16772 tp->t_flags |= TF_RCVD_TSTMP; 16773 tp->ts_recent = to.to_tsval; 16774 tp->ts_recent_age = cts; 16775 } else 16776 tp->t_flags &= ~TF_REQ_TSTMP; 16777 if (to.to_flags & TOF_MSS) { 16778 tcp_mss(tp, to.to_mss); 16779 } 16780 if ((tp->t_flags & TF_SACK_PERMIT) && 16781 (to.to_flags & TOF_SACKPERM) == 0) 16782 tp->t_flags &= ~TF_SACK_PERMIT; 16783 if (tp->t_flags & TF_FASTOPEN) { 16784 if (to.to_flags & TOF_FASTOPEN) { 16785 uint16_t mss; 16786 16787 if (to.to_flags & TOF_MSS) 16788 mss = to.to_mss; 16789 else 16790 if ((inp->inp_vflag & INP_IPV6) != 0) 16791 mss = TCP6_MSS; 16792 else 16793 mss = TCP_MSS; 16794 tcp_fastopen_update_cache(tp, mss, 16795 to.to_tfo_len, to.to_tfo_cookie); 16796 } else 16797 tcp_fastopen_disable_path(tp); 16798 } 16799 } 16800 /* 16801 * At this point we are at the initial call. Here we decide 16802 * if we are doing RACK or not. We do this by seeing if 16803 * TF_SACK_PERMIT is set and the sack-not-required is clear. 16804 * The code now does do dup-ack counting so if you don't 16805 * switch back you won't get rack & TLP, but you will still 16806 * get this stack. 16807 */ 16808 16809 if ((rack_sack_not_required == 0) && 16810 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 16811 tcp_switch_back_to_default(tp); 16812 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 16813 tlen, iptos); 16814 #ifdef TCP_ACCOUNTING 16815 sched_unpin(); 16816 #endif 16817 return (1); 16818 } 16819 tcp_set_hpts(tp); 16820 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 16821 } 16822 if (thflags & TH_FIN) 16823 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 16824 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16825 if ((rack->rc_gp_dyn_mul) && 16826 (rack->use_fixed_rate == 0) && 16827 (rack->rc_always_pace)) { 16828 /* Check in on probertt */ 16829 rack_check_probe_rtt(rack, cts); 16830 } 16831 rack_clear_rate_sample(rack); 16832 if ((rack->forced_ack) && 16833 ((tcp_get_flags(th) & TH_RST) == 0)) { 16834 rack_handle_probe_response(rack, tiwin, us_cts); 16835 } 16836 /* 16837 * This is the one exception case where we set the rack state 16838 * always. All other times (timers etc) we must have a rack-state 16839 * set (so we assure we have done the checks above for SACK). 16840 */ 16841 rack->r_ctl.rc_rcvtime = cts; 16842 if (rack->r_state != tp->t_state) 16843 rack_set_state(tp, rack); 16844 if (SEQ_GT(th->th_ack, tp->snd_una) && 16845 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 16846 kern_prefetch(rsm, &prev_state); 16847 prev_state = rack->r_state; 16848 if ((thflags & TH_RST) && 16849 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 16850 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 16851 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 16852 /* The connection will be killed by a reset check the tracepoint */ 16853 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 16854 } 16855 retval = (*rack->r_substate) (m, th, so, 16856 tp, &to, drop_hdrlen, 16857 tlen, tiwin, thflags, nxt_pkt, iptos); 16858 if (retval == 0) { 16859 /* 16860 * If retval is 1 the tcb is unlocked and most likely the tp 16861 * is gone. 16862 */ 16863 INP_WLOCK_ASSERT(inp); 16864 if ((rack->rc_gp_dyn_mul) && 16865 (rack->rc_always_pace) && 16866 (rack->use_fixed_rate == 0) && 16867 rack->in_probe_rtt && 16868 (rack->r_ctl.rc_time_probertt_starts == 0)) { 16869 /* 16870 * If we are going for target, lets recheck before 16871 * we output. 16872 */ 16873 rack_check_probe_rtt(rack, cts); 16874 } 16875 if (rack->set_pacing_done_a_iw == 0) { 16876 /* How much has been acked? */ 16877 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 16878 /* We have enough to set in the pacing segment size */ 16879 rack->set_pacing_done_a_iw = 1; 16880 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16881 } 16882 } 16883 tcp_rack_xmit_timer_commit(rack, tp); 16884 #ifdef TCP_ACCOUNTING 16885 /* 16886 * If we set the ack_val_se to what ack processing we are doing 16887 * we also want to track how many cycles we burned. Note 16888 * the bits after tcp_output we let be "free". This is because 16889 * we are also tracking the tcp_output times as well. Note the 16890 * use of 0xf here since we only have 11 counter (0 - 0xa) and 16891 * 0xf cannot be returned and is what we initialize it too to 16892 * indicate we are not doing the tabulations. 16893 */ 16894 if (ack_val_set != 0xf) { 16895 uint64_t crtsc; 16896 16897 crtsc = get_cyclecount(); 16898 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16899 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 16900 } 16901 } 16902 #endif 16903 if ((nxt_pkt == 0) && (no_output == 0)) { 16904 if ((rack->r_wanted_output != 0) || 16905 (tp->t_flags & TF_ACKNOW) || 16906 (rack->r_fast_output != 0)) { 16907 16908 do_output_now: 16909 if (tcp_output(tp) < 0) { 16910 #ifdef TCP_ACCOUNTING 16911 sched_unpin(); 16912 #endif 16913 return (1); 16914 } 16915 did_out = 1; 16916 } 16917 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16918 rack_free_trim(rack); 16919 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 16920 goto do_output_now; 16921 } else if ((no_output == 1) && 16922 (nxt_pkt == 0) && 16923 (tcp_in_hpts(rack->rc_tp) == 0)) { 16924 /* 16925 * We are not in hpts and we had a pacing timer up. Use 16926 * the remaining time (slot_remaining) to restart the timer. 16927 */ 16928 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 16929 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 16930 rack_free_trim(rack); 16931 } 16932 /* Clear the flag, it may have been cleared by output but we may not have */ 16933 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 16934 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16935 /* 16936 * The draft (v3) calls for us to use SEQ_GEQ, but that 16937 * causes issues when we are just going app limited. Lets 16938 * instead use SEQ_GT <or> where its equal but more data 16939 * is outstanding. 16940 * 16941 * Also make sure we are on the last ack of a series. We 16942 * have to have all the ack's processed in queue to know 16943 * if there is something left outstanding. 16944 */ 16945 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 16946 (rack->rc_new_rnd_needed == 0) && 16947 (nxt_pkt == 0)) { 16948 /* 16949 * We have crossed into a new round with 16950 * the new snd_unae. 16951 */ 16952 rack_new_round_setup(tp, rack, tp->snd_una); 16953 } 16954 if ((nxt_pkt == 0) && 16955 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 16956 (SEQ_GT(tp->snd_max, tp->snd_una) || 16957 (tp->t_flags & TF_DELACK) || 16958 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16959 (tp->t_state <= TCPS_CLOSING)))) { 16960 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 16961 if ((tp->snd_max == tp->snd_una) && 16962 ((tp->t_flags & TF_DELACK) == 0) && 16963 (tcp_in_hpts(rack->rc_tp)) && 16964 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 16965 /* keep alive not needed if we are hptsi output yet */ 16966 ; 16967 } else { 16968 int late = 0; 16969 if (tcp_in_hpts(tp)) { 16970 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16971 us_cts = tcp_get_usecs(NULL); 16972 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 16973 rack->r_early = 1; 16974 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 16975 } else 16976 late = 1; 16977 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16978 } 16979 tcp_hpts_remove(tp); 16980 } 16981 if (late && (did_out == 0)) { 16982 /* 16983 * We are late in the sending 16984 * and we did not call the output 16985 * (this probably should not happen). 16986 */ 16987 goto do_output_now; 16988 } 16989 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16990 } 16991 way_out = 1; 16992 } else if (nxt_pkt == 0) { 16993 /* Do we have the correct timer running? */ 16994 rack_timer_audit(tp, rack, &so->so_snd); 16995 way_out = 2; 16996 } 16997 done_with_input: 16998 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 16999 if (did_out) 17000 rack->r_wanted_output = 0; 17001 } 17002 17003 #ifdef TCP_ACCOUNTING 17004 sched_unpin(); 17005 #endif 17006 return (retval); 17007 } 17008 17009 static void 17010 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17011 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17012 { 17013 struct timeval tv; 17014 17015 /* First lets see if we have old packets */ 17016 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17017 if (ctf_do_queued_segments(tp, 1)) { 17018 m_freem(m); 17019 return; 17020 } 17021 } 17022 if (m->m_flags & M_TSTMP_LRO) { 17023 mbuf_tstmp2timeval(m, &tv); 17024 } else { 17025 /* Should not be should we kassert instead? */ 17026 tcp_get_usecs(&tv); 17027 } 17028 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17029 &tv) == 0) { 17030 INP_WUNLOCK(tptoinpcb(tp)); 17031 } 17032 } 17033 17034 struct rack_sendmap * 17035 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17036 { 17037 struct rack_sendmap *rsm = NULL; 17038 int32_t idx; 17039 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17040 17041 /* Return the next guy to be re-transmitted */ 17042 if (tqhash_empty(rack->r_ctl.tqh)) { 17043 return (NULL); 17044 } 17045 if (tp->t_flags & TF_SENTFIN) { 17046 /* retran the end FIN? */ 17047 return (NULL); 17048 } 17049 /* ok lets look at this one */ 17050 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17051 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17052 return (rsm); 17053 } 17054 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17055 goto check_it; 17056 } 17057 rsm = rack_find_lowest_rsm(rack); 17058 if (rsm == NULL) { 17059 return (NULL); 17060 } 17061 check_it: 17062 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 17063 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17064 /* 17065 * No sack so we automatically do the 3 strikes and 17066 * retransmit (no rack timer would be started). 17067 */ 17068 return (rsm); 17069 } 17070 if (rsm->r_flags & RACK_ACKED) { 17071 return (NULL); 17072 } 17073 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17074 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17075 /* Its not yet ready */ 17076 return (NULL); 17077 } 17078 srtt = rack_grab_rtt(tp, rack); 17079 idx = rsm->r_rtr_cnt - 1; 17080 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17081 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 17082 if ((tsused == ts_low) || 17083 (TSTMP_LT(tsused, ts_low))) { 17084 /* No time since sending */ 17085 return (NULL); 17086 } 17087 if ((tsused - ts_low) < thresh) { 17088 /* It has not been long enough yet */ 17089 return (NULL); 17090 } 17091 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17092 ((rsm->r_flags & RACK_SACK_PASSED))) { 17093 /* 17094 * We have passed the dup-ack threshold <or> 17095 * a SACK has indicated this is missing. 17096 * Note that if you are a declared attacker 17097 * it is only the dup-ack threshold that 17098 * will cause retransmits. 17099 */ 17100 /* log retransmit reason */ 17101 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17102 rack->r_fast_output = 0; 17103 return (rsm); 17104 } 17105 return (NULL); 17106 } 17107 17108 static void 17109 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, 17110 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17111 int line, struct rack_sendmap *rsm, uint8_t quality) 17112 { 17113 if (tcp_bblogging_on(rack->rc_tp)) { 17114 union tcp_log_stackspecific log; 17115 struct timeval tv; 17116 17117 if (rack_verbose_logging == 0) { 17118 /* 17119 * We are not verbose screen out all but 17120 * ones we always want. 17121 */ 17122 if ((method != 2) && 17123 (method != 3) && 17124 (method != 7) && 17125 (method != 89) && 17126 (method != 14) && 17127 (method != 20)) { 17128 return; 17129 } 17130 } 17131 memset(&log, 0, sizeof(log)); 17132 log.u_bbr.flex1 = slot; 17133 log.u_bbr.flex2 = len; 17134 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17135 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17136 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17137 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17138 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17139 log.u_bbr.use_lt_bw <<= 1; 17140 log.u_bbr.use_lt_bw |= rack->r_late; 17141 log.u_bbr.use_lt_bw <<= 1; 17142 log.u_bbr.use_lt_bw |= rack->r_early; 17143 log.u_bbr.use_lt_bw <<= 1; 17144 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17145 log.u_bbr.use_lt_bw <<= 1; 17146 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17147 log.u_bbr.use_lt_bw <<= 1; 17148 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17149 log.u_bbr.use_lt_bw <<= 1; 17150 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17151 log.u_bbr.use_lt_bw <<= 1; 17152 log.u_bbr.use_lt_bw |= rack->gp_ready; 17153 log.u_bbr.pkt_epoch = line; 17154 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17155 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17156 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17157 log.u_bbr.bw_inuse = bw_est; 17158 log.u_bbr.delRate = bw; 17159 if (rack->r_ctl.gp_bw == 0) 17160 log.u_bbr.cur_del_rate = 0; 17161 else 17162 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17163 log.u_bbr.rttProp = len_time; 17164 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17165 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17166 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17167 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17168 /* We are in slow start */ 17169 log.u_bbr.flex7 = 1; 17170 } else { 17171 /* we are on congestion avoidance */ 17172 log.u_bbr.flex7 = 0; 17173 } 17174 log.u_bbr.flex8 = method; 17175 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17176 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17177 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17178 log.u_bbr.cwnd_gain <<= 1; 17179 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17180 log.u_bbr.cwnd_gain <<= 1; 17181 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17182 log.u_bbr.cwnd_gain <<= 1; 17183 log.u_bbr.cwnd_gain |= rack->use_fixed_rate; 17184 log.u_bbr.cwnd_gain <<= 1; 17185 log.u_bbr.cwnd_gain |= rack->rc_always_pace; 17186 log.u_bbr.cwnd_gain <<= 1; 17187 log.u_bbr.cwnd_gain |= rack->gp_ready; 17188 log.u_bbr.bbr_substate = quality; 17189 log.u_bbr.bbr_state = rack->dgp_on; 17190 log.u_bbr.bbr_state <<= 1; 17191 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17192 log.u_bbr.bbr_state <<= 2; 17193 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17194 &rack->rc_inp->inp_socket->so_rcv, 17195 &rack->rc_inp->inp_socket->so_snd, 17196 BBR_LOG_HPTSI_CALC, 0, 17197 0, &log, false, &tv); 17198 } 17199 } 17200 17201 static uint32_t 17202 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17203 { 17204 uint32_t new_tso, user_max, pace_one; 17205 17206 user_max = rack->rc_user_set_max_segs * mss; 17207 if (rack->rc_force_max_seg) { 17208 return (user_max); 17209 } 17210 if (rack->use_fixed_rate && 17211 ((rack->r_ctl.crte == NULL) || 17212 (bw != rack->r_ctl.crte->rate))) { 17213 /* Use the user mss since we are not exactly matched */ 17214 return (user_max); 17215 } 17216 if (rack_pace_one_seg || 17217 (rack->r_ctl.rc_user_set_min_segs == 1)) 17218 pace_one = 1; 17219 else 17220 pace_one = 0; 17221 17222 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17223 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17224 if (new_tso > user_max) 17225 new_tso = user_max; 17226 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17227 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17228 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17229 } 17230 if (rack->r_ctl.rc_user_set_min_segs && 17231 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17232 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17233 return (new_tso); 17234 } 17235 17236 static uint64_t 17237 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17238 { 17239 uint64_t reduced_win; 17240 uint32_t gain; 17241 17242 if (window_input < rc_init_window(rack)) { 17243 /* 17244 * The cwnd is collapsed to 17245 * nearly zero, maybe because of a time-out? 17246 * Lets drop back to the lt-bw. 17247 */ 17248 reduced_win = rack_get_lt_bw(rack); 17249 /* Set the flag so the caller knows its a rate and not a reduced window */ 17250 *rate_set = 1; 17251 gain = 100; 17252 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 17253 /* 17254 * If we are in recover our cwnd needs to be less for 17255 * our pacing consideration. 17256 */ 17257 if (rack->rack_hibeta == 0) { 17258 reduced_win = window_input / 2; 17259 gain = 50; 17260 } else { 17261 reduced_win = window_input * rack->r_ctl.saved_hibeta; 17262 reduced_win /= 100; 17263 gain = rack->r_ctl.saved_hibeta; 17264 } 17265 } else { 17266 /* 17267 * Apply Timely factor to increase/decrease the 17268 * amount we are pacing at. 17269 */ 17270 gain = rack_get_output_gain(rack, NULL); 17271 if (gain > rack_gain_p5_ub) { 17272 gain = rack_gain_p5_ub; 17273 } 17274 reduced_win = window_input * gain; 17275 reduced_win /= 100; 17276 } 17277 if (gain_b != NULL) 17278 *gain_b = gain; 17279 /* 17280 * What is being returned here is a trimmed down 17281 * window values in all cases where rate_set is left 17282 * at 0. In one case we actually return the rate (lt_bw). 17283 * the "reduced_win" is returned as a slimmed down cwnd that 17284 * is then calculated by the caller into a rate when rate_set 17285 * is 0. 17286 */ 17287 return (reduced_win); 17288 } 17289 17290 static int32_t 17291 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 17292 { 17293 uint64_t lentim, fill_bw; 17294 17295 rack->r_via_fill_cw = 0; 17296 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 17297 return (slot); 17298 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 17299 return (slot); 17300 if (rack->r_ctl.rc_last_us_rtt == 0) 17301 return (slot); 17302 if (rack->rc_pace_fill_if_rttin_range && 17303 (rack->r_ctl.rc_last_us_rtt >= 17304 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 17305 /* The rtt is huge, N * smallest, lets not fill */ 17306 return (slot); 17307 } 17308 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 17309 return (slot); 17310 /* 17311 * first lets calculate the b/w based on the last us-rtt 17312 * and the the smallest send window. 17313 */ 17314 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17315 if (rack->rc_fillcw_apply_discount) { 17316 uint32_t rate_set = 0; 17317 17318 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 17319 if (rate_set) { 17320 goto at_lt_bw; 17321 } 17322 } 17323 /* Take the rwnd if its smaller */ 17324 if (fill_bw > rack->rc_tp->snd_wnd) 17325 fill_bw = rack->rc_tp->snd_wnd; 17326 /* Now lets make it into a b/w */ 17327 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 17328 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17329 /* Adjust to any cap */ 17330 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 17331 fill_bw = rack->r_ctl.fillcw_cap; 17332 17333 at_lt_bw: 17334 if (rack_bw_multipler > 0) { 17335 /* 17336 * We want to limit fill-cw to the some multiplier 17337 * of the max(lt_bw, gp_est). The normal default 17338 * is 0 for off, so a sysctl has enabled it. 17339 */ 17340 uint64_t lt_bw, gp, rate; 17341 17342 gp = rack_get_gp_est(rack); 17343 lt_bw = rack_get_lt_bw(rack); 17344 if (lt_bw > gp) 17345 rate = lt_bw; 17346 else 17347 rate = gp; 17348 rate *= rack_bw_multipler; 17349 rate /= 100; 17350 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 17351 union tcp_log_stackspecific log; 17352 struct timeval tv; 17353 17354 memset(&log, 0, sizeof(log)); 17355 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17356 log.u_bbr.flex1 = rack_bw_multipler; 17357 log.u_bbr.flex2 = len; 17358 log.u_bbr.cur_del_rate = gp; 17359 log.u_bbr.delRate = lt_bw; 17360 log.u_bbr.bw_inuse = rate; 17361 log.u_bbr.rttProp = fill_bw; 17362 log.u_bbr.flex8 = 44; 17363 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 17364 BBR_LOG_CWND, 0, 17365 0, &log, false, NULL, 17366 __func__, __LINE__, &tv); 17367 } 17368 if (fill_bw > rate) 17369 fill_bw = rate; 17370 } 17371 /* We are below the min b/w */ 17372 if (non_paced) 17373 *rate_wanted = fill_bw; 17374 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 17375 return (slot); 17376 rack->r_via_fill_cw = 1; 17377 if (rack->r_rack_hw_rate_caps && 17378 (rack->r_ctl.crte != NULL)) { 17379 uint64_t high_rate; 17380 17381 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 17382 if (fill_bw > high_rate) { 17383 /* We are capping bw at the highest rate table entry */ 17384 if (*rate_wanted > high_rate) { 17385 /* The original rate was also capped */ 17386 rack->r_via_fill_cw = 0; 17387 } 17388 rack_log_hdwr_pacing(rack, 17389 fill_bw, high_rate, __LINE__, 17390 0, 3); 17391 fill_bw = high_rate; 17392 if (capped) 17393 *capped = 1; 17394 } 17395 } else if ((rack->r_ctl.crte == NULL) && 17396 (rack->rack_hdrw_pacing == 0) && 17397 (rack->rack_hdw_pace_ena) && 17398 rack->r_rack_hw_rate_caps && 17399 (rack->rack_attempt_hdwr_pace == 0) && 17400 (rack->rc_inp->inp_route.ro_nh != NULL) && 17401 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17402 /* 17403 * Ok we may have a first attempt that is greater than our top rate 17404 * lets check. 17405 */ 17406 uint64_t high_rate; 17407 17408 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 17409 if (high_rate) { 17410 if (fill_bw > high_rate) { 17411 fill_bw = high_rate; 17412 if (capped) 17413 *capped = 1; 17414 } 17415 } 17416 } 17417 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 17418 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 17419 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 17420 fill_bw = rack->r_ctl.bw_rate_cap; 17421 } 17422 /* 17423 * Ok fill_bw holds our mythical b/w to fill the cwnd 17424 * in an rtt (unless it was capped), what does that 17425 * time wise equate too? 17426 */ 17427 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 17428 lentim /= fill_bw; 17429 *rate_wanted = fill_bw; 17430 if (non_paced || (lentim < slot)) { 17431 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 17432 0, lentim, 12, __LINE__, NULL, 0); 17433 return ((int32_t)lentim); 17434 } else 17435 return (slot); 17436 } 17437 17438 static int32_t 17439 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 17440 { 17441 uint64_t srtt; 17442 int32_t slot = 0; 17443 int can_start_hw_pacing = 1; 17444 int err; 17445 int pace_one; 17446 17447 if (rack_pace_one_seg || 17448 (rack->r_ctl.rc_user_set_min_segs == 1)) 17449 pace_one = 1; 17450 else 17451 pace_one = 0; 17452 if (rack->rc_always_pace == 0) { 17453 /* 17454 * We use the most optimistic possible cwnd/srtt for 17455 * sending calculations. This will make our 17456 * calculation anticipate getting more through 17457 * quicker then possible. But thats ok we don't want 17458 * the peer to have a gap in data sending. 17459 */ 17460 uint64_t cwnd, tr_perms = 0; 17461 int32_t reduce; 17462 17463 old_method: 17464 /* 17465 * We keep no precise pacing with the old method 17466 * instead we use the pacer to mitigate bursts. 17467 */ 17468 if (rack->r_ctl.rc_rack_min_rtt) 17469 srtt = rack->r_ctl.rc_rack_min_rtt; 17470 else 17471 srtt = max(tp->t_srtt, 1); 17472 if (rack->r_ctl.rc_rack_largest_cwnd) 17473 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 17474 else 17475 cwnd = rack->r_ctl.cwnd_to_use; 17476 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 17477 tr_perms = (cwnd * 1000) / srtt; 17478 if (tr_perms == 0) { 17479 tr_perms = ctf_fixed_maxseg(tp); 17480 } 17481 /* 17482 * Calculate how long this will take to drain, if 17483 * the calculation comes out to zero, thats ok we 17484 * will use send_a_lot to possibly spin around for 17485 * more increasing tot_len_this_send to the point 17486 * that its going to require a pace, or we hit the 17487 * cwnd. Which in that case we are just waiting for 17488 * a ACK. 17489 */ 17490 slot = len / tr_perms; 17491 /* Now do we reduce the time so we don't run dry? */ 17492 if (slot && rack_slot_reduction) { 17493 reduce = (slot / rack_slot_reduction); 17494 if (reduce < slot) { 17495 slot -= reduce; 17496 } else 17497 slot = 0; 17498 } else 17499 reduce = 0; 17500 slot *= HPTS_USEC_IN_MSEC; 17501 if (rack->rc_pace_to_cwnd) { 17502 uint64_t rate_wanted = 0; 17503 17504 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 17505 rack->rc_ack_can_sendout_data = 1; 17506 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 17507 } else 17508 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 17509 /*******************************************************/ 17510 /* RRS: We insert non-paced call to stats here for len */ 17511 /*******************************************************/ 17512 } else { 17513 uint64_t bw_est, res, lentim, rate_wanted; 17514 uint32_t segs, oh; 17515 int capped = 0; 17516 int prev_fill; 17517 17518 if ((rack->r_rr_config == 1) && rsm) { 17519 return (rack->r_ctl.rc_min_to); 17520 } 17521 if (rack->use_fixed_rate) { 17522 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 17523 } else if ((rack->r_ctl.init_rate == 0) && 17524 (rack->r_ctl.gp_bw == 0)) { 17525 /* no way to yet do an estimate */ 17526 bw_est = rate_wanted = 0; 17527 } else if (rack->dgp_on) { 17528 bw_est = rack_get_bw(rack); 17529 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 17530 } else { 17531 uint32_t gain, rate_set = 0; 17532 17533 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17534 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 17535 if (rate_set == 0) { 17536 if (rate_wanted > rack->rc_tp->snd_wnd) 17537 rate_wanted = rack->rc_tp->snd_wnd; 17538 /* Now lets make it into a b/w */ 17539 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 17540 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17541 } 17542 bw_est = rate_wanted; 17543 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 17544 rack->r_ctl.cwnd_to_use, 17545 rate_wanted, bw_est, 17546 rack->r_ctl.rc_last_us_rtt, 17547 88, __LINE__, NULL, gain); 17548 } 17549 if (((bw_est == 0) || (rate_wanted == 0) || (rack->gp_ready == 0)) && 17550 (rack->use_fixed_rate == 0)) { 17551 /* 17552 * No way yet to make a b/w estimate or 17553 * our raise is set incorrectly. 17554 */ 17555 goto old_method; 17556 } 17557 rack_rate_cap_bw(rack, &rate_wanted, &capped); 17558 /* We need to account for all the overheads */ 17559 segs = (len + segsiz - 1) / segsiz; 17560 /* 17561 * We need the diff between 1514 bytes (e-mtu with e-hdr) 17562 * and how much data we put in each packet. Yes this 17563 * means we may be off if we are larger than 1500 bytes 17564 * or smaller. But this just makes us more conservative. 17565 */ 17566 17567 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 17568 if (rack->r_is_v6) { 17569 #ifdef INET6 17570 oh += sizeof(struct ip6_hdr); 17571 #endif 17572 } else { 17573 #ifdef INET 17574 oh += sizeof(struct ip); 17575 #endif 17576 } 17577 /* We add a fixed 14 for the ethernet header */ 17578 oh += 14; 17579 segs *= oh; 17580 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 17581 res = lentim / rate_wanted; 17582 slot = (uint32_t)res; 17583 if (rack_hw_rate_min && 17584 (rate_wanted < rack_hw_rate_min)) { 17585 can_start_hw_pacing = 0; 17586 if (rack->r_ctl.crte) { 17587 /* 17588 * Ok we need to release it, we 17589 * have fallen too low. 17590 */ 17591 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17592 rack->r_ctl.crte = NULL; 17593 rack->rack_attempt_hdwr_pace = 0; 17594 rack->rack_hdrw_pacing = 0; 17595 } 17596 } 17597 if (rack->r_ctl.crte && 17598 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17599 /* 17600 * We want more than the hardware can give us, 17601 * don't start any hw pacing. 17602 */ 17603 can_start_hw_pacing = 0; 17604 if (rack->r_rack_hw_rate_caps == 0) { 17605 /* 17606 * Ok we need to release it, we 17607 * want more than the card can give us and 17608 * no rate cap is in place. Set it up so 17609 * when we want less we can retry. 17610 */ 17611 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17612 rack->r_ctl.crte = NULL; 17613 rack->rack_attempt_hdwr_pace = 0; 17614 rack->rack_hdrw_pacing = 0; 17615 } 17616 } 17617 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 17618 /* 17619 * We lost our rate somehow, this can happen 17620 * if the interface changed underneath us. 17621 */ 17622 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17623 rack->r_ctl.crte = NULL; 17624 /* Lets re-allow attempting to setup pacing */ 17625 rack->rack_hdrw_pacing = 0; 17626 rack->rack_attempt_hdwr_pace = 0; 17627 rack_log_hdwr_pacing(rack, 17628 rate_wanted, bw_est, __LINE__, 17629 0, 6); 17630 } 17631 prev_fill = rack->r_via_fill_cw; 17632 if ((rack->rc_pace_to_cwnd) && 17633 (capped == 0) && 17634 (rack->dgp_on == 1) && 17635 (rack->use_fixed_rate == 0) && 17636 (rack->in_probe_rtt == 0) && 17637 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 17638 /* 17639 * We want to pace at our rate *or* faster to 17640 * fill the cwnd to the max if its not full. 17641 */ 17642 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 17643 /* Re-check to make sure we are not exceeding our max b/w */ 17644 if ((rack->r_ctl.crte != NULL) && 17645 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17646 /* 17647 * We want more than the hardware can give us, 17648 * don't start any hw pacing. 17649 */ 17650 can_start_hw_pacing = 0; 17651 if (rack->r_rack_hw_rate_caps == 0) { 17652 /* 17653 * Ok we need to release it, we 17654 * want more than the card can give us and 17655 * no rate cap is in place. Set it up so 17656 * when we want less we can retry. 17657 */ 17658 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17659 rack->r_ctl.crte = NULL; 17660 rack->rack_attempt_hdwr_pace = 0; 17661 rack->rack_hdrw_pacing = 0; 17662 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 17663 } 17664 } 17665 } 17666 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 17667 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17668 if ((rack->rack_hdw_pace_ena) && 17669 (can_start_hw_pacing > 0) && 17670 (rack->rack_hdrw_pacing == 0) && 17671 (rack->rack_attempt_hdwr_pace == 0)) { 17672 /* 17673 * Lets attempt to turn on hardware pacing 17674 * if we can. 17675 */ 17676 rack->rack_attempt_hdwr_pace = 1; 17677 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 17678 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17679 rate_wanted, 17680 RS_PACING_GEQ, 17681 &err, &rack->r_ctl.crte_prev_rate); 17682 if (rack->r_ctl.crte) { 17683 rack->rack_hdrw_pacing = 1; 17684 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 17685 pace_one, rack->r_ctl.crte, 17686 NULL, rack->r_ctl.pace_len_divisor); 17687 rack_log_hdwr_pacing(rack, 17688 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17689 err, 0); 17690 rack->r_ctl.last_hw_bw_req = rate_wanted; 17691 } else { 17692 counter_u64_add(rack_hw_pace_init_fail, 1); 17693 } 17694 } else if (rack->rack_hdrw_pacing && 17695 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 17696 /* Do we need to adjust our rate? */ 17697 const struct tcp_hwrate_limit_table *nrte; 17698 17699 if (rack->r_up_only && 17700 (rate_wanted < rack->r_ctl.crte->rate)) { 17701 /** 17702 * We have four possible states here 17703 * having to do with the previous time 17704 * and this time. 17705 * previous | this-time 17706 * A) 0 | 0 -- fill_cw not in the picture 17707 * B) 1 | 0 -- we were doing a fill-cw but now are not 17708 * C) 1 | 1 -- all rates from fill_cw 17709 * D) 0 | 1 -- we were doing non-fill and now we are filling 17710 * 17711 * For case A, C and D we don't allow a drop. But for 17712 * case B where we now our on our steady rate we do 17713 * allow a drop. 17714 * 17715 */ 17716 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 17717 goto done_w_hdwr; 17718 } 17719 if ((rate_wanted > rack->r_ctl.crte->rate) || 17720 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 17721 if (rack_hw_rate_to_low && 17722 (bw_est < rack_hw_rate_to_low)) { 17723 /* 17724 * The pacing rate is too low for hardware, but 17725 * do allow hardware pacing to be restarted. 17726 */ 17727 rack_log_hdwr_pacing(rack, 17728 bw_est, rack->r_ctl.crte->rate, __LINE__, 17729 0, 5); 17730 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17731 rack->r_ctl.crte = NULL; 17732 rack->rack_attempt_hdwr_pace = 0; 17733 rack->rack_hdrw_pacing = 0; 17734 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17735 goto done_w_hdwr; 17736 } 17737 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 17738 rack->rc_tp, 17739 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17740 rate_wanted, 17741 RS_PACING_GEQ, 17742 &err, &rack->r_ctl.crte_prev_rate); 17743 if (nrte == NULL) { 17744 /* 17745 * Lost the rate, lets drop hardware pacing 17746 * period. 17747 */ 17748 rack->rack_hdrw_pacing = 0; 17749 rack->r_ctl.crte = NULL; 17750 rack_log_hdwr_pacing(rack, 17751 rate_wanted, 0, __LINE__, 17752 err, 1); 17753 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17754 counter_u64_add(rack_hw_pace_lost, 1); 17755 } else if (nrte != rack->r_ctl.crte) { 17756 rack->r_ctl.crte = nrte; 17757 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 17758 segsiz, pace_one, rack->r_ctl.crte, 17759 NULL, rack->r_ctl.pace_len_divisor); 17760 rack_log_hdwr_pacing(rack, 17761 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17762 err, 2); 17763 rack->r_ctl.last_hw_bw_req = rate_wanted; 17764 } 17765 } else { 17766 /* We just need to adjust the segment size */ 17767 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17768 rack_log_hdwr_pacing(rack, 17769 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17770 0, 4); 17771 rack->r_ctl.last_hw_bw_req = rate_wanted; 17772 } 17773 } 17774 } 17775 done_w_hdwr: 17776 if (rack_limit_time_with_srtt && 17777 (rack->use_fixed_rate == 0) && 17778 (rack->rack_hdrw_pacing == 0)) { 17779 /* 17780 * Sanity check, we do not allow the pacing delay 17781 * to be longer than the SRTT of the path. If it is 17782 * a slow path, then adding a packet should increase 17783 * the RTT and compensate for this i.e. the srtt will 17784 * be greater so the allowed pacing time will be greater. 17785 * 17786 * Note this restriction is not for where a peak rate 17787 * is set, we are doing fixed pacing or hardware pacing. 17788 */ 17789 if (rack->rc_tp->t_srtt) 17790 srtt = rack->rc_tp->t_srtt; 17791 else 17792 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 17793 if (srtt < (uint64_t)slot) { 17794 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 17795 slot = srtt; 17796 } 17797 } 17798 /*******************************************************************/ 17799 /* RRS: We insert paced call to stats here for len and rate_wanted */ 17800 /*******************************************************************/ 17801 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 17802 } 17803 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 17804 /* 17805 * If this rate is seeing enobufs when it 17806 * goes to send then either the nic is out 17807 * of gas or we are mis-estimating the time 17808 * somehow and not letting the queue empty 17809 * completely. Lets add to the pacing time. 17810 */ 17811 int hw_boost_delay; 17812 17813 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 17814 if (hw_boost_delay > rack_enobuf_hw_max) 17815 hw_boost_delay = rack_enobuf_hw_max; 17816 else if (hw_boost_delay < rack_enobuf_hw_min) 17817 hw_boost_delay = rack_enobuf_hw_min; 17818 slot += hw_boost_delay; 17819 } 17820 return (slot); 17821 } 17822 17823 static void 17824 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 17825 tcp_seq startseq, uint32_t sb_offset) 17826 { 17827 struct rack_sendmap *my_rsm = NULL; 17828 17829 if (tp->t_state < TCPS_ESTABLISHED) { 17830 /* 17831 * We don't start any measurements if we are 17832 * not at least established. 17833 */ 17834 return; 17835 } 17836 if (tp->t_state >= TCPS_FIN_WAIT_1) { 17837 /* 17838 * We will get no more data into the SB 17839 * this means we need to have the data available 17840 * before we start a measurement. 17841 */ 17842 17843 if (sbavail(&tptosocket(tp)->so_snd) < 17844 max(rc_init_window(rack), 17845 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 17846 /* Nope not enough data */ 17847 return; 17848 } 17849 } 17850 tp->t_flags |= TF_GPUTINPROG; 17851 rack->r_ctl.rc_gp_cumack_ts = 0; 17852 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 17853 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 17854 tp->gput_seq = startseq; 17855 rack->app_limited_needs_set = 0; 17856 if (rack->in_probe_rtt) 17857 rack->measure_saw_probe_rtt = 1; 17858 else if ((rack->measure_saw_probe_rtt) && 17859 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 17860 rack->measure_saw_probe_rtt = 0; 17861 if (rack->rc_gp_filled) 17862 tp->gput_ts = rack->r_ctl.last_cumack_advance; 17863 else { 17864 /* Special case initial measurement */ 17865 struct timeval tv; 17866 17867 tp->gput_ts = tcp_get_usecs(&tv); 17868 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 17869 } 17870 /* 17871 * We take a guess out into the future, 17872 * if we have no measurement and no 17873 * initial rate, we measure the first 17874 * initial-windows worth of data to 17875 * speed up getting some GP measurement and 17876 * thus start pacing. 17877 */ 17878 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 17879 rack->app_limited_needs_set = 1; 17880 tp->gput_ack = startseq + max(rc_init_window(rack), 17881 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 17882 rack_log_pacing_delay_calc(rack, 17883 tp->gput_seq, 17884 tp->gput_ack, 17885 0, 17886 tp->gput_ts, 17887 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 17888 9, 17889 __LINE__, NULL, 0); 17890 rack_tend_gp_marks(tp, rack); 17891 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 17892 return; 17893 } 17894 if (sb_offset) { 17895 /* 17896 * We are out somewhere in the sb 17897 * can we use the already outstanding data? 17898 */ 17899 17900 if (rack->r_ctl.rc_app_limited_cnt == 0) { 17901 /* 17902 * Yes first one is good and in this case 17903 * the tp->gput_ts is correctly set based on 17904 * the last ack that arrived (no need to 17905 * set things up when an ack comes in). 17906 */ 17907 my_rsm = tqhash_min(rack->r_ctl.tqh); 17908 if ((my_rsm == NULL) || 17909 (my_rsm->r_rtr_cnt != 1)) { 17910 /* retransmission? */ 17911 goto use_latest; 17912 } 17913 } else { 17914 if (rack->r_ctl.rc_first_appl == NULL) { 17915 /* 17916 * If rc_first_appl is NULL 17917 * then the cnt should be 0. 17918 * This is probably an error, maybe 17919 * a KASSERT would be approprate. 17920 */ 17921 goto use_latest; 17922 } 17923 /* 17924 * If we have a marker pointer to the last one that is 17925 * app limited we can use that, but we need to set 17926 * things up so that when it gets ack'ed we record 17927 * the ack time (if its not already acked). 17928 */ 17929 rack->app_limited_needs_set = 1; 17930 /* 17931 * We want to get to the rsm that is either 17932 * next with space i.e. over 1 MSS or the one 17933 * after that (after the app-limited). 17934 */ 17935 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 17936 if (my_rsm) { 17937 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 17938 /* Have to use the next one */ 17939 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17940 else { 17941 /* Use after the first MSS of it is acked */ 17942 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 17943 goto start_set; 17944 } 17945 } 17946 if ((my_rsm == NULL) || 17947 (my_rsm->r_rtr_cnt != 1)) { 17948 /* 17949 * Either its a retransmit or 17950 * the last is the app-limited one. 17951 */ 17952 goto use_latest; 17953 } 17954 } 17955 tp->gput_seq = my_rsm->r_start; 17956 start_set: 17957 if (my_rsm->r_flags & RACK_ACKED) { 17958 /* 17959 * This one has been acked use the arrival ack time 17960 */ 17961 struct rack_sendmap *nrsm; 17962 17963 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 17964 rack->app_limited_needs_set = 0; 17965 /* 17966 * Ok in this path we need to use the r_end now 17967 * since this guy is the starting ack. 17968 */ 17969 tp->gput_seq = my_rsm->r_end; 17970 /* 17971 * We also need to adjust up the sendtime 17972 * to the send of the next data after my_rsm. 17973 */ 17974 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17975 if (nrsm != NULL) 17976 my_rsm = nrsm; 17977 else { 17978 /* 17979 * The next as not been sent, thats the 17980 * case for using the latest. 17981 */ 17982 goto use_latest; 17983 } 17984 } 17985 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 17986 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 17987 rack->r_ctl.rc_gp_cumack_ts = 0; 17988 if ((rack->r_ctl.cleared_app_ack == 1) && 17989 (SEQ_GEQ(rack->r_ctl.cleared_app_ack, tp->gput_seq))) { 17990 /* 17991 * We just cleared an application limited period 17992 * so the next seq out needs to skip the first 17993 * ack. 17994 */ 17995 rack->app_limited_needs_set = 1; 17996 rack->r_ctl.cleared_app_ack = 0; 17997 } 17998 rack_log_pacing_delay_calc(rack, 17999 tp->gput_seq, 18000 tp->gput_ack, 18001 (uintptr_t)my_rsm, 18002 tp->gput_ts, 18003 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18004 9, 18005 __LINE__, my_rsm, 0); 18006 /* Now lets make sure all are marked as they should be */ 18007 rack_tend_gp_marks(tp, rack); 18008 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18009 return; 18010 } 18011 18012 use_latest: 18013 /* 18014 * We don't know how long we may have been 18015 * idle or if this is the first-send. Lets 18016 * setup the flag so we will trim off 18017 * the first ack'd data so we get a true 18018 * measurement. 18019 */ 18020 rack->app_limited_needs_set = 1; 18021 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18022 rack->r_ctl.rc_gp_cumack_ts = 0; 18023 /* Find this guy so we can pull the send time */ 18024 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18025 if (my_rsm) { 18026 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18027 if (my_rsm->r_flags & RACK_ACKED) { 18028 /* 18029 * Unlikely since its probably what was 18030 * just transmitted (but I am paranoid). 18031 */ 18032 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18033 rack->app_limited_needs_set = 0; 18034 } 18035 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18036 /* This also is unlikely */ 18037 tp->gput_seq = my_rsm->r_start; 18038 } 18039 } else { 18040 /* 18041 * TSNH unless we have some send-map limit, 18042 * and even at that it should not be hitting 18043 * that limit (we should have stopped sending). 18044 */ 18045 struct timeval tv; 18046 18047 microuptime(&tv); 18048 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18049 } 18050 rack_tend_gp_marks(tp, rack); 18051 rack_log_pacing_delay_calc(rack, 18052 tp->gput_seq, 18053 tp->gput_ack, 18054 (uintptr_t)my_rsm, 18055 tp->gput_ts, 18056 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18057 9, __LINE__, NULL, 0); 18058 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18059 } 18060 18061 static inline uint32_t 18062 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18063 uint32_t avail, int32_t sb_offset) 18064 { 18065 uint32_t len; 18066 uint32_t sendwin; 18067 18068 if (tp->snd_wnd > cwnd_to_use) 18069 sendwin = cwnd_to_use; 18070 else 18071 sendwin = tp->snd_wnd; 18072 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18073 /* We never want to go over our peers rcv-window */ 18074 len = 0; 18075 } else { 18076 uint32_t flight; 18077 18078 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18079 if (flight >= sendwin) { 18080 /* 18081 * We have in flight what we are allowed by cwnd (if 18082 * it was rwnd blocking it would have hit above out 18083 * >= tp->snd_wnd). 18084 */ 18085 return (0); 18086 } 18087 len = sendwin - flight; 18088 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18089 /* We would send too much (beyond the rwnd) */ 18090 len = tp->snd_wnd - ctf_outstanding(tp); 18091 } 18092 if ((len + sb_offset) > avail) { 18093 /* 18094 * We don't have that much in the SB, how much is 18095 * there? 18096 */ 18097 len = avail - sb_offset; 18098 } 18099 } 18100 return (len); 18101 } 18102 18103 static void 18104 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18105 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18106 int rsm_is_null, int optlen, int line, uint16_t mode) 18107 { 18108 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18109 union tcp_log_stackspecific log; 18110 struct timeval tv; 18111 18112 memset(&log, 0, sizeof(log)); 18113 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18114 log.u_bbr.flex1 = error; 18115 log.u_bbr.flex2 = flags; 18116 log.u_bbr.flex3 = rsm_is_null; 18117 log.u_bbr.flex4 = ipoptlen; 18118 log.u_bbr.flex5 = tp->rcv_numsacks; 18119 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18120 log.u_bbr.flex7 = optlen; 18121 log.u_bbr.flex8 = rack->r_fsb_inited; 18122 log.u_bbr.applimited = rack->r_fast_output; 18123 log.u_bbr.bw_inuse = rack_get_bw(rack); 18124 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18125 log.u_bbr.cwnd_gain = mode; 18126 log.u_bbr.pkts_out = orig_len; 18127 log.u_bbr.lt_epoch = len; 18128 log.u_bbr.delivered = line; 18129 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18130 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18131 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 18132 len, &log, false, NULL, __func__, __LINE__, &tv); 18133 } 18134 } 18135 18136 18137 static struct mbuf * 18138 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 18139 struct rack_fast_send_blk *fsb, 18140 int32_t seglimit, int32_t segsize, int hw_tls) 18141 { 18142 #ifdef KERN_TLS 18143 struct ktls_session *tls, *ntls; 18144 #ifdef INVARIANTS 18145 struct mbuf *start; 18146 #endif 18147 #endif 18148 struct mbuf *m, *n, **np, *smb; 18149 struct mbuf *top; 18150 int32_t off, soff; 18151 int32_t len = *plen; 18152 int32_t fragsize; 18153 int32_t len_cp = 0; 18154 uint32_t mlen, frags; 18155 18156 soff = off = the_off; 18157 smb = m = the_m; 18158 np = ⊤ 18159 top = NULL; 18160 #ifdef KERN_TLS 18161 if (hw_tls && (m->m_flags & M_EXTPG)) 18162 tls = m->m_epg_tls; 18163 else 18164 tls = NULL; 18165 #ifdef INVARIANTS 18166 start = m; 18167 #endif 18168 #endif 18169 while (len > 0) { 18170 if (m == NULL) { 18171 *plen = len_cp; 18172 break; 18173 } 18174 #ifdef KERN_TLS 18175 if (hw_tls) { 18176 if (m->m_flags & M_EXTPG) 18177 ntls = m->m_epg_tls; 18178 else 18179 ntls = NULL; 18180 18181 /* 18182 * Avoid mixing TLS records with handshake 18183 * data or TLS records from different 18184 * sessions. 18185 */ 18186 if (tls != ntls) { 18187 MPASS(m != start); 18188 *plen = len_cp; 18189 break; 18190 } 18191 } 18192 #endif 18193 mlen = min(len, m->m_len - off); 18194 if (seglimit) { 18195 /* 18196 * For M_EXTPG mbufs, add 3 segments 18197 * + 1 in case we are crossing page boundaries 18198 * + 2 in case the TLS hdr/trailer are used 18199 * It is cheaper to just add the segments 18200 * than it is to take the cache miss to look 18201 * at the mbuf ext_pgs state in detail. 18202 */ 18203 if (m->m_flags & M_EXTPG) { 18204 fragsize = min(segsize, PAGE_SIZE); 18205 frags = 3; 18206 } else { 18207 fragsize = segsize; 18208 frags = 0; 18209 } 18210 18211 /* Break if we really can't fit anymore. */ 18212 if ((frags + 1) >= seglimit) { 18213 *plen = len_cp; 18214 break; 18215 } 18216 18217 /* 18218 * Reduce size if you can't copy the whole 18219 * mbuf. If we can't copy the whole mbuf, also 18220 * adjust len so the loop will end after this 18221 * mbuf. 18222 */ 18223 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 18224 mlen = (seglimit - frags - 1) * fragsize; 18225 len = mlen; 18226 *plen = len_cp + len; 18227 } 18228 frags += howmany(mlen, fragsize); 18229 if (frags == 0) 18230 frags++; 18231 seglimit -= frags; 18232 KASSERT(seglimit > 0, 18233 ("%s: seglimit went too low", __func__)); 18234 } 18235 n = m_get(M_NOWAIT, m->m_type); 18236 *np = n; 18237 if (n == NULL) 18238 goto nospace; 18239 n->m_len = mlen; 18240 soff += mlen; 18241 len_cp += n->m_len; 18242 if (m->m_flags & (M_EXT | M_EXTPG)) { 18243 n->m_data = m->m_data + off; 18244 mb_dupcl(n, m); 18245 } else { 18246 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 18247 (u_int)n->m_len); 18248 } 18249 len -= n->m_len; 18250 off = 0; 18251 m = m->m_next; 18252 np = &n->m_next; 18253 if (len || (soff == smb->m_len)) { 18254 /* 18255 * We have more so we move forward or 18256 * we have consumed the entire mbuf and 18257 * len has fell to 0. 18258 */ 18259 soff = 0; 18260 smb = m; 18261 } 18262 18263 } 18264 if (fsb != NULL) { 18265 fsb->m = smb; 18266 fsb->off = soff; 18267 if (smb) { 18268 /* 18269 * Save off the size of the mbuf. We do 18270 * this so that we can recognize when it 18271 * has been trimmed by sbcut() as acks 18272 * come in. 18273 */ 18274 fsb->o_m_len = smb->m_len; 18275 fsb->o_t_len = M_TRAILINGROOM(smb); 18276 } else { 18277 /* 18278 * This is the case where the next mbuf went to NULL. This 18279 * means with this copy we have sent everything in the sb. 18280 * In theory we could clear the fast_output flag, but lets 18281 * not since its possible that we could get more added 18282 * and acks that call the extend function which would let 18283 * us send more. 18284 */ 18285 fsb->o_m_len = 0; 18286 fsb->o_t_len = 0; 18287 } 18288 } 18289 return (top); 18290 nospace: 18291 if (top) 18292 m_freem(top); 18293 return (NULL); 18294 18295 } 18296 18297 /* 18298 * This is a copy of m_copym(), taking the TSO segment size/limit 18299 * constraints into account, and advancing the sndptr as it goes. 18300 */ 18301 static struct mbuf * 18302 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 18303 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 18304 { 18305 struct mbuf *m, *n; 18306 int32_t soff; 18307 18308 m = rack->r_ctl.fsb.m; 18309 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 18310 /* 18311 * The trailing space changed, mbufs can grow 18312 * at the tail but they can't shrink from 18313 * it, KASSERT that. Adjust the orig_m_len to 18314 * compensate for this change. 18315 */ 18316 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 18317 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 18318 m, 18319 rack, 18320 (intmax_t)M_TRAILINGROOM(m), 18321 rack->r_ctl.fsb.o_t_len, 18322 rack->r_ctl.fsb.o_m_len, 18323 m->m_len)); 18324 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 18325 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 18326 } 18327 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 18328 /* 18329 * Mbuf shrank, trimmed off the top by an ack, our 18330 * offset changes. 18331 */ 18332 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 18333 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 18334 m, m->m_len, 18335 rack, rack->r_ctl.fsb.o_m_len, 18336 rack->r_ctl.fsb.off)); 18337 18338 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 18339 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 18340 else 18341 rack->r_ctl.fsb.off = 0; 18342 rack->r_ctl.fsb.o_m_len = m->m_len; 18343 #ifdef INVARIANTS 18344 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 18345 panic("rack:%p m:%p m_len grew outside of t_space compensation", 18346 rack, m); 18347 #endif 18348 } 18349 soff = rack->r_ctl.fsb.off; 18350 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 18351 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 18352 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 18353 __FUNCTION__, 18354 rack, *plen, m, m->m_len)); 18355 /* Save off the right location before we copy and advance */ 18356 *s_soff = soff; 18357 *s_mb = rack->r_ctl.fsb.m; 18358 n = rack_fo_base_copym(m, soff, plen, 18359 &rack->r_ctl.fsb, 18360 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 18361 return (n); 18362 } 18363 18364 /* Log the buffer level */ 18365 static void 18366 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 18367 int len, struct timeval *tv, 18368 uint32_t cts) 18369 { 18370 uint32_t p_rate = 0, p_queue = 0, err = 0; 18371 union tcp_log_stackspecific log; 18372 18373 #ifdef RATELIMIT 18374 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18375 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18376 #endif 18377 memset(&log, 0, sizeof(log)); 18378 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18379 log.u_bbr.flex1 = p_rate; 18380 log.u_bbr.flex2 = p_queue; 18381 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18382 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18383 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18384 log.u_bbr.flex7 = 99; 18385 log.u_bbr.flex8 = 0; 18386 log.u_bbr.pkts_out = err; 18387 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18388 log.u_bbr.timeStamp = cts; 18389 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18390 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18391 len, &log, false, NULL, __func__, __LINE__, tv); 18392 18393 } 18394 18395 static uint32_t 18396 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 18397 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 18398 { 18399 uint64_t lentime = 0; 18400 #ifdef RATELIMIT 18401 uint32_t p_rate = 0, p_queue = 0, err; 18402 union tcp_log_stackspecific log; 18403 uint64_t bw; 18404 18405 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18406 /* Failed or queue is zero */ 18407 if (err || (p_queue == 0)) { 18408 lentime = 0; 18409 goto out; 18410 } 18411 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18412 if (err) { 18413 lentime = 0; 18414 goto out; 18415 } 18416 /* 18417 * If we reach here we have some bytes in 18418 * the queue. The number returned is a value 18419 * between 0 and 0xffff where ffff is full 18420 * and 0 is empty. So how best to make this into 18421 * something usable? 18422 * 18423 * The "safer" way is lets take the b/w gotten 18424 * from the query (which should be our b/w rate) 18425 * and pretend that a full send (our rc_pace_max_segs) 18426 * is outstanding. We factor it so its as if a full 18427 * number of our MSS segment is terms of full 18428 * ethernet segments are outstanding. 18429 */ 18430 bw = p_rate / 8; 18431 if (bw) { 18432 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 18433 lentime *= ETHERNET_SEGMENT_SIZE; 18434 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 18435 lentime /= bw; 18436 } else { 18437 /* TSNH -- KASSERT? */ 18438 lentime = 0; 18439 } 18440 out: 18441 if (tcp_bblogging_on(tp)) { 18442 memset(&log, 0, sizeof(log)); 18443 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18444 log.u_bbr.flex1 = p_rate; 18445 log.u_bbr.flex2 = p_queue; 18446 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18447 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18448 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18449 log.u_bbr.flex7 = 99; 18450 log.u_bbr.flex8 = 0; 18451 log.u_bbr.pkts_out = err; 18452 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18453 log.u_bbr.cur_del_rate = lentime; 18454 log.u_bbr.timeStamp = cts; 18455 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18456 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18457 len, &log, false, NULL, __func__, __LINE__,tv); 18458 } 18459 #endif 18460 return ((uint32_t)lentime); 18461 } 18462 18463 static int 18464 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 18465 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 18466 { 18467 /* 18468 * Enter the fast retransmit path. We are given that a sched_pin is 18469 * in place (if accounting is compliled in) and the cycle count taken 18470 * at the entry is in the ts_val. The concept her is that the rsm 18471 * now holds the mbuf offsets and such so we can directly transmit 18472 * without a lot of overhead, the len field is already set for 18473 * us to prohibit us from sending too much (usually its 1MSS). 18474 */ 18475 struct ip *ip = NULL; 18476 struct udphdr *udp = NULL; 18477 struct tcphdr *th = NULL; 18478 struct mbuf *m = NULL; 18479 struct inpcb *inp; 18480 uint8_t *cpto; 18481 struct tcp_log_buffer *lgb; 18482 #ifdef TCP_ACCOUNTING 18483 uint64_t crtsc; 18484 int cnt_thru = 1; 18485 #endif 18486 struct tcpopt to; 18487 u_char opt[TCP_MAXOLEN]; 18488 uint32_t hdrlen, optlen; 18489 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 18490 uint16_t flags; 18491 uint32_t if_hw_tsomaxsegcount = 0, startseq; 18492 uint32_t if_hw_tsomaxsegsize; 18493 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 18494 18495 #ifdef INET6 18496 struct ip6_hdr *ip6 = NULL; 18497 18498 if (rack->r_is_v6) { 18499 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18500 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18501 } else 18502 #endif /* INET6 */ 18503 { 18504 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18505 hdrlen = sizeof(struct tcpiphdr); 18506 } 18507 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 18508 goto failed; 18509 } 18510 if (doing_tlp) { 18511 /* Its a TLP add the flag, it may already be there but be sure */ 18512 rsm->r_flags |= RACK_TLP; 18513 } else { 18514 /* If it was a TLP it is not not on this retransmit */ 18515 rsm->r_flags &= ~RACK_TLP; 18516 } 18517 startseq = rsm->r_start; 18518 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 18519 inp = rack->rc_inp; 18520 to.to_flags = 0; 18521 flags = tcp_outflags[tp->t_state]; 18522 if (flags & (TH_SYN|TH_RST)) { 18523 goto failed; 18524 } 18525 if (rsm->r_flags & RACK_HAS_FIN) { 18526 /* We can't send a FIN here */ 18527 goto failed; 18528 } 18529 if (flags & TH_FIN) { 18530 /* We never send a FIN */ 18531 flags &= ~TH_FIN; 18532 } 18533 if (tp->t_flags & TF_RCVD_TSTMP) { 18534 to.to_tsval = ms_cts + tp->ts_offset; 18535 to.to_tsecr = tp->ts_recent; 18536 to.to_flags = TOF_TS; 18537 } 18538 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18539 /* TCP-MD5 (RFC2385). */ 18540 if (tp->t_flags & TF_SIGNATURE) 18541 to.to_flags |= TOF_SIGNATURE; 18542 #endif 18543 optlen = tcp_addoptions(&to, opt); 18544 hdrlen += optlen; 18545 udp = rack->r_ctl.fsb.udp; 18546 if (udp) 18547 hdrlen += sizeof(struct udphdr); 18548 if (rack->r_ctl.rc_pace_max_segs) 18549 max_val = rack->r_ctl.rc_pace_max_segs; 18550 else if (rack->rc_user_set_max_segs) 18551 max_val = rack->rc_user_set_max_segs * segsiz; 18552 else 18553 max_val = len; 18554 if ((tp->t_flags & TF_TSO) && 18555 V_tcp_do_tso && 18556 (len > segsiz) && 18557 (tp->t_port == 0)) 18558 tso = 1; 18559 #ifdef INET6 18560 if (MHLEN < hdrlen + max_linkhdr) 18561 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18562 else 18563 #endif 18564 m = m_gethdr(M_NOWAIT, MT_DATA); 18565 if (m == NULL) 18566 goto failed; 18567 m->m_data += max_linkhdr; 18568 m->m_len = hdrlen; 18569 th = rack->r_ctl.fsb.th; 18570 /* Establish the len to send */ 18571 if (len > max_val) 18572 len = max_val; 18573 if ((tso) && (len + optlen > segsiz)) { 18574 uint32_t if_hw_tsomax; 18575 int32_t max_len; 18576 18577 /* extract TSO information */ 18578 if_hw_tsomax = tp->t_tsomax; 18579 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18580 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18581 /* 18582 * Check if we should limit by maximum payload 18583 * length: 18584 */ 18585 if (if_hw_tsomax != 0) { 18586 /* compute maximum TSO length */ 18587 max_len = (if_hw_tsomax - hdrlen - 18588 max_linkhdr); 18589 if (max_len <= 0) { 18590 goto failed; 18591 } else if (len > max_len) { 18592 len = max_len; 18593 } 18594 } 18595 if (len <= segsiz) { 18596 /* 18597 * In case there are too many small fragments don't 18598 * use TSO: 18599 */ 18600 tso = 0; 18601 } 18602 } else { 18603 tso = 0; 18604 } 18605 if ((tso == 0) && (len > segsiz)) 18606 len = segsiz; 18607 (void)tcp_get_usecs(tv); 18608 if ((len == 0) || 18609 (len <= MHLEN - hdrlen - max_linkhdr)) { 18610 goto failed; 18611 } 18612 th->th_seq = htonl(rsm->r_start); 18613 th->th_ack = htonl(tp->rcv_nxt); 18614 /* 18615 * The PUSH bit should only be applied 18616 * if the full retransmission is made. If 18617 * we are sending less than this is the 18618 * left hand edge and should not have 18619 * the PUSH bit. 18620 */ 18621 if ((rsm->r_flags & RACK_HAD_PUSH) && 18622 (len == (rsm->r_end - rsm->r_start))) 18623 flags |= TH_PUSH; 18624 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 18625 if (th->th_win == 0) { 18626 tp->t_sndzerowin++; 18627 tp->t_flags |= TF_RXWIN0SENT; 18628 } else 18629 tp->t_flags &= ~TF_RXWIN0SENT; 18630 if (rsm->r_flags & RACK_TLP) { 18631 /* 18632 * TLP should not count in retran count, but 18633 * in its own bin 18634 */ 18635 counter_u64_add(rack_tlp_retran, 1); 18636 counter_u64_add(rack_tlp_retran_bytes, len); 18637 } else { 18638 tp->t_sndrexmitpack++; 18639 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18640 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18641 } 18642 #ifdef STATS 18643 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18644 len); 18645 #endif 18646 if (rsm->m == NULL) 18647 goto failed; 18648 if (rsm->m && 18649 ((rsm->orig_m_len != rsm->m->m_len) || 18650 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 18651 /* Fix up the orig_m_len and possibly the mbuf offset */ 18652 rack_adjust_orig_mlen(rsm); 18653 } 18654 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 18655 if (len <= segsiz) { 18656 /* 18657 * Must have ran out of mbufs for the copy 18658 * shorten it to no longer need tso. Lets 18659 * not put on sendalot since we are low on 18660 * mbufs. 18661 */ 18662 tso = 0; 18663 } 18664 if ((m->m_next == NULL) || (len <= 0)){ 18665 goto failed; 18666 } 18667 if (udp) { 18668 if (rack->r_is_v6) 18669 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18670 else 18671 ulen = hdrlen + len - sizeof(struct ip); 18672 udp->uh_ulen = htons(ulen); 18673 } 18674 m->m_pkthdr.rcvif = (struct ifnet *)0; 18675 if (TCPS_HAVERCVDSYN(tp->t_state) && 18676 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18677 int ect = tcp_ecn_output_established(tp, &flags, len, true); 18678 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18679 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18680 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18681 #ifdef INET6 18682 if (rack->r_is_v6) { 18683 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18684 ip6->ip6_flow |= htonl(ect << 20); 18685 } 18686 else 18687 #endif 18688 { 18689 ip->ip_tos &= ~IPTOS_ECN_MASK; 18690 ip->ip_tos |= ect; 18691 } 18692 } 18693 if (rack->r_ctl.crte != NULL) { 18694 /* See if we can send via the hw queue */ 18695 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 18696 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 18697 if (slot == 0) 18698 ip_sendflag = 0; 18699 } 18700 tcp_set_flags(th, flags); 18701 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18702 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18703 if (to.to_flags & TOF_SIGNATURE) { 18704 /* 18705 * Calculate MD5 signature and put it into the place 18706 * determined before. 18707 * NOTE: since TCP options buffer doesn't point into 18708 * mbuf's data, calculate offset and use it. 18709 */ 18710 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18711 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18712 /* 18713 * Do not send segment if the calculation of MD5 18714 * digest has failed. 18715 */ 18716 goto failed; 18717 } 18718 } 18719 #endif 18720 #ifdef INET6 18721 if (rack->r_is_v6) { 18722 if (tp->t_port) { 18723 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18724 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18725 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18726 th->th_sum = htons(0); 18727 UDPSTAT_INC(udps_opackets); 18728 } else { 18729 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18730 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18731 th->th_sum = in6_cksum_pseudo(ip6, 18732 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18733 0); 18734 } 18735 } 18736 #endif 18737 #if defined(INET6) && defined(INET) 18738 else 18739 #endif 18740 #ifdef INET 18741 { 18742 if (tp->t_port) { 18743 m->m_pkthdr.csum_flags = CSUM_UDP; 18744 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18745 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18746 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18747 th->th_sum = htons(0); 18748 UDPSTAT_INC(udps_opackets); 18749 } else { 18750 m->m_pkthdr.csum_flags = CSUM_TCP; 18751 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18752 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18753 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18754 IPPROTO_TCP + len + optlen)); 18755 } 18756 /* IP version must be set here for ipv4/ipv6 checking later */ 18757 KASSERT(ip->ip_v == IPVERSION, 18758 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18759 } 18760 #endif 18761 if (tso) { 18762 /* 18763 * Here we use segsiz since we have no added options besides 18764 * any standard timestamp options (no DSACKs or SACKS are sent 18765 * via either fast-path). 18766 */ 18767 KASSERT(len > segsiz, 18768 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 18769 m->m_pkthdr.csum_flags |= CSUM_TSO; 18770 m->m_pkthdr.tso_segsz = segsiz; 18771 } 18772 #ifdef INET6 18773 if (rack->r_is_v6) { 18774 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 18775 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18776 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18777 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18778 else 18779 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18780 } 18781 #endif 18782 #if defined(INET) && defined(INET6) 18783 else 18784 #endif 18785 #ifdef INET 18786 { 18787 ip->ip_len = htons(m->m_pkthdr.len); 18788 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 18789 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18790 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18791 if (tp->t_port == 0 || len < V_tcp_minmss) { 18792 ip->ip_off |= htons(IP_DF); 18793 } 18794 } else { 18795 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18796 } 18797 } 18798 #endif 18799 if (doing_tlp == 0) { 18800 /* Set we retransmitted */ 18801 rack->rc_gp_saw_rec = 1; 18802 } else { 18803 /* Its a TLP set ca or ss */ 18804 if (tp->snd_cwnd > tp->snd_ssthresh) { 18805 /* Set we sent in CA */ 18806 rack->rc_gp_saw_ca = 1; 18807 } else { 18808 /* Set we sent in SS */ 18809 rack->rc_gp_saw_ss = 1; 18810 } 18811 } 18812 /* Time to copy in our header */ 18813 cpto = mtod(m, uint8_t *); 18814 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18815 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18816 if (optlen) { 18817 bcopy(opt, th + 1, optlen); 18818 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18819 } else { 18820 th->th_off = sizeof(struct tcphdr) >> 2; 18821 } 18822 if (tcp_bblogging_on(rack->rc_tp)) { 18823 union tcp_log_stackspecific log; 18824 18825 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18826 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18827 counter_u64_add(rack_collapsed_win_rxt, 1); 18828 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18829 } 18830 memset(&log, 0, sizeof(log)); 18831 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18832 if (rack->rack_no_prr) 18833 log.u_bbr.flex1 = 0; 18834 else 18835 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18836 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18837 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18838 log.u_bbr.flex4 = max_val; 18839 /* Save off the early/late values */ 18840 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18841 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18842 log.u_bbr.bw_inuse = rack_get_bw(rack); 18843 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 18844 if (doing_tlp == 0) 18845 log.u_bbr.flex8 = 1; 18846 else 18847 log.u_bbr.flex8 = 2; 18848 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18849 log.u_bbr.flex7 = 55; 18850 log.u_bbr.pkts_out = tp->t_maxseg; 18851 log.u_bbr.timeStamp = cts; 18852 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18853 if (rsm->r_rtr_cnt > 0) { 18854 /* 18855 * When we have a retransmit we want to log the 18856 * burst at send and flight at send from before. 18857 */ 18858 log.u_bbr.flex5 = rsm->r_fas; 18859 log.u_bbr.bbr_substate = rsm->r_bas; 18860 } else { 18861 /* 18862 * This is currently unlikely until we do the 18863 * packet pair probes but I will add it for completeness. 18864 */ 18865 log.u_bbr.flex5 = log.u_bbr.inflight; 18866 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 18867 } 18868 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 18869 log.u_bbr.delivered = 0; 18870 log.u_bbr.rttProp = (uintptr_t)rsm; 18871 log.u_bbr.delRate = rsm->r_flags; 18872 log.u_bbr.delRate <<= 31; 18873 log.u_bbr.delRate |= rack->r_must_retran; 18874 log.u_bbr.delRate <<= 1; 18875 log.u_bbr.delRate |= 1; 18876 log.u_bbr.pkt_epoch = __LINE__; 18877 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 18878 len, &log, false, NULL, __func__, __LINE__, tv); 18879 } else 18880 lgb = NULL; 18881 if ((rack->r_ctl.crte != NULL) && 18882 tcp_bblogging_on(tp)) { 18883 rack_log_queue_level(tp, rack, len, tv, cts); 18884 } 18885 #ifdef INET6 18886 if (rack->r_is_v6) { 18887 error = ip6_output(m, inp->in6p_outputopts, 18888 &inp->inp_route6, 18889 ip_sendflag, NULL, NULL, inp); 18890 } 18891 else 18892 #endif 18893 #ifdef INET 18894 { 18895 error = ip_output(m, NULL, 18896 &inp->inp_route, 18897 ip_sendflag, 0, inp); 18898 } 18899 #endif 18900 m = NULL; 18901 if (lgb) { 18902 lgb->tlb_errno = error; 18903 lgb = NULL; 18904 } 18905 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 18906 tp->snd_nxt = tp->snd_max; 18907 if (error) { 18908 goto failed; 18909 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 18910 rack->rc_hw_nobuf = 0; 18911 rack->r_ctl.rc_agg_delayed = 0; 18912 rack->r_early = 0; 18913 rack->r_late = 0; 18914 rack->r_ctl.rc_agg_early = 0; 18915 } 18916 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 18917 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 18918 if (doing_tlp) { 18919 rack->rc_tlp_in_progress = 1; 18920 rack->r_ctl.rc_tlp_cnt_out++; 18921 } 18922 if (error == 0) { 18923 counter_u64_add(rack_total_bytes, len); 18924 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 18925 if (doing_tlp) { 18926 rack->rc_last_sent_tlp_past_cumack = 0; 18927 rack->rc_last_sent_tlp_seq_valid = 1; 18928 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18929 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18930 } 18931 if (rack->r_ctl.rc_prr_sndcnt >= len) 18932 rack->r_ctl.rc_prr_sndcnt -= len; 18933 else 18934 rack->r_ctl.rc_prr_sndcnt = 0; 18935 } 18936 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18937 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18938 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18939 rack->r_ctl.retran_during_recovery += len; 18940 { 18941 int idx; 18942 18943 idx = (len / segsiz) + 3; 18944 if (idx >= TCP_MSS_ACCT_ATIMER) 18945 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18946 else 18947 counter_u64_add(rack_out_size[idx], 1); 18948 } 18949 if (tp->t_rtttime == 0) { 18950 tp->t_rtttime = ticks; 18951 tp->t_rtseq = startseq; 18952 KMOD_TCPSTAT_INC(tcps_segstimed); 18953 } 18954 counter_u64_add(rack_fto_rsm_send, 1); 18955 if (error && (error == ENOBUFS)) { 18956 if (rack->r_ctl.crte != NULL) { 18957 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 18958 if (tcp_bblogging_on(rack->rc_tp)) 18959 rack_log_queue_level(tp, rack, len, tv, cts); 18960 } else 18961 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 18962 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18963 if (rack->rc_enobuf < 0x7f) 18964 rack->rc_enobuf++; 18965 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18966 slot = 10 * HPTS_USEC_IN_MSEC; 18967 if (rack->r_ctl.crte != NULL) { 18968 counter_u64_add(rack_saw_enobuf_hw, 1); 18969 tcp_rl_log_enobuf(rack->r_ctl.crte); 18970 } 18971 counter_u64_add(rack_saw_enobuf, 1); 18972 } else { 18973 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 18974 } 18975 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 18976 #ifdef TCP_ACCOUNTING 18977 crtsc = get_cyclecount(); 18978 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18979 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 18980 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 18981 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 18982 } 18983 sched_unpin(); 18984 #endif 18985 return (0); 18986 failed: 18987 if (m) 18988 m_free(m); 18989 return (-1); 18990 } 18991 18992 static void 18993 rack_sndbuf_autoscale(struct tcp_rack *rack) 18994 { 18995 /* 18996 * Automatic sizing of send socket buffer. Often the send buffer 18997 * size is not optimally adjusted to the actual network conditions 18998 * at hand (delay bandwidth product). Setting the buffer size too 18999 * small limits throughput on links with high bandwidth and high 19000 * delay (eg. trans-continental/oceanic links). Setting the 19001 * buffer size too big consumes too much real kernel memory, 19002 * especially with many connections on busy servers. 19003 * 19004 * The criteria to step up the send buffer one notch are: 19005 * 1. receive window of remote host is larger than send buffer 19006 * (with a fudge factor of 5/4th); 19007 * 2. send buffer is filled to 7/8th with data (so we actually 19008 * have data to make use of it); 19009 * 3. send buffer fill has not hit maximal automatic size; 19010 * 4. our send window (slow start and cogestion controlled) is 19011 * larger than sent but unacknowledged data in send buffer. 19012 * 19013 * Note that the rack version moves things much faster since 19014 * we want to avoid hitting cache lines in the rack_fast_output() 19015 * path so this is called much less often and thus moves 19016 * the SB forward by a percentage. 19017 */ 19018 struct socket *so; 19019 struct tcpcb *tp; 19020 uint32_t sendwin, scaleup; 19021 19022 tp = rack->rc_tp; 19023 so = rack->rc_inp->inp_socket; 19024 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19025 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19026 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19027 sbused(&so->so_snd) >= 19028 (so->so_snd.sb_hiwat / 8 * 7) && 19029 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19030 sendwin >= (sbused(&so->so_snd) - 19031 (tp->snd_max - tp->snd_una))) { 19032 if (rack_autosndbuf_inc) 19033 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19034 else 19035 scaleup = V_tcp_autosndbuf_inc; 19036 if (scaleup < V_tcp_autosndbuf_inc) 19037 scaleup = V_tcp_autosndbuf_inc; 19038 scaleup += so->so_snd.sb_hiwat; 19039 if (scaleup > V_tcp_autosndbuf_max) 19040 scaleup = V_tcp_autosndbuf_max; 19041 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19042 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19043 } 19044 } 19045 } 19046 19047 static int 19048 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19049 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long *tot_len, int *send_err, int line) 19050 { 19051 /* 19052 * Enter to do fast output. We are given that the sched_pin is 19053 * in place (if accounting is compiled in) and the cycle count taken 19054 * at entry is in place in ts_val. The idea here is that 19055 * we know how many more bytes needs to be sent (presumably either 19056 * during pacing or to fill the cwnd and that was greater than 19057 * the max-burst). We have how much to send and all the info we 19058 * need to just send. 19059 */ 19060 #ifdef INET 19061 struct ip *ip = NULL; 19062 #endif 19063 struct udphdr *udp = NULL; 19064 struct tcphdr *th = NULL; 19065 struct mbuf *m, *s_mb; 19066 struct inpcb *inp; 19067 uint8_t *cpto; 19068 struct tcp_log_buffer *lgb; 19069 #ifdef TCP_ACCOUNTING 19070 uint64_t crtsc; 19071 #endif 19072 struct tcpopt to; 19073 u_char opt[TCP_MAXOLEN]; 19074 uint32_t hdrlen, optlen; 19075 #ifdef TCP_ACCOUNTING 19076 int cnt_thru = 1; 19077 #endif 19078 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19079 uint16_t flags; 19080 uint32_t s_soff; 19081 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19082 uint32_t if_hw_tsomaxsegsize; 19083 uint32_t add_flag = RACK_SENT_FP; 19084 #ifdef INET6 19085 struct ip6_hdr *ip6 = NULL; 19086 19087 if (rack->r_is_v6) { 19088 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19089 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19090 } else 19091 #endif /* INET6 */ 19092 { 19093 #ifdef INET 19094 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19095 hdrlen = sizeof(struct tcpiphdr); 19096 #endif 19097 } 19098 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19099 m = NULL; 19100 goto failed; 19101 } 19102 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19103 startseq = tp->snd_max; 19104 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19105 inp = rack->rc_inp; 19106 len = rack->r_ctl.fsb.left_to_send; 19107 to.to_flags = 0; 19108 flags = rack->r_ctl.fsb.tcp_flags; 19109 if (tp->t_flags & TF_RCVD_TSTMP) { 19110 to.to_tsval = ms_cts + tp->ts_offset; 19111 to.to_tsecr = tp->ts_recent; 19112 to.to_flags = TOF_TS; 19113 } 19114 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19115 /* TCP-MD5 (RFC2385). */ 19116 if (tp->t_flags & TF_SIGNATURE) 19117 to.to_flags |= TOF_SIGNATURE; 19118 #endif 19119 optlen = tcp_addoptions(&to, opt); 19120 hdrlen += optlen; 19121 udp = rack->r_ctl.fsb.udp; 19122 if (udp) 19123 hdrlen += sizeof(struct udphdr); 19124 if (rack->r_ctl.rc_pace_max_segs) 19125 max_val = rack->r_ctl.rc_pace_max_segs; 19126 else if (rack->rc_user_set_max_segs) 19127 max_val = rack->rc_user_set_max_segs * segsiz; 19128 else 19129 max_val = len; 19130 if ((tp->t_flags & TF_TSO) && 19131 V_tcp_do_tso && 19132 (len > segsiz) && 19133 (tp->t_port == 0)) 19134 tso = 1; 19135 again: 19136 #ifdef INET6 19137 if (MHLEN < hdrlen + max_linkhdr) 19138 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19139 else 19140 #endif 19141 m = m_gethdr(M_NOWAIT, MT_DATA); 19142 if (m == NULL) 19143 goto failed; 19144 m->m_data += max_linkhdr; 19145 m->m_len = hdrlen; 19146 th = rack->r_ctl.fsb.th; 19147 /* Establish the len to send */ 19148 if (len > max_val) 19149 len = max_val; 19150 if ((tso) && (len + optlen > segsiz)) { 19151 uint32_t if_hw_tsomax; 19152 int32_t max_len; 19153 19154 /* extract TSO information */ 19155 if_hw_tsomax = tp->t_tsomax; 19156 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19157 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19158 /* 19159 * Check if we should limit by maximum payload 19160 * length: 19161 */ 19162 if (if_hw_tsomax != 0) { 19163 /* compute maximum TSO length */ 19164 max_len = (if_hw_tsomax - hdrlen - 19165 max_linkhdr); 19166 if (max_len <= 0) { 19167 goto failed; 19168 } else if (len > max_len) { 19169 len = max_len; 19170 } 19171 } 19172 if (len <= segsiz) { 19173 /* 19174 * In case there are too many small fragments don't 19175 * use TSO: 19176 */ 19177 tso = 0; 19178 } 19179 } else { 19180 tso = 0; 19181 } 19182 if ((tso == 0) && (len > segsiz)) 19183 len = segsiz; 19184 (void)tcp_get_usecs(tv); 19185 if ((len == 0) || 19186 (len <= MHLEN - hdrlen - max_linkhdr)) { 19187 goto failed; 19188 } 19189 sb_offset = tp->snd_max - tp->snd_una; 19190 th->th_seq = htonl(tp->snd_max); 19191 th->th_ack = htonl(tp->rcv_nxt); 19192 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19193 if (th->th_win == 0) { 19194 tp->t_sndzerowin++; 19195 tp->t_flags |= TF_RXWIN0SENT; 19196 } else 19197 tp->t_flags &= ~TF_RXWIN0SENT; 19198 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 19199 KMOD_TCPSTAT_INC(tcps_sndpack); 19200 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 19201 #ifdef STATS 19202 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 19203 len); 19204 #endif 19205 if (rack->r_ctl.fsb.m == NULL) 19206 goto failed; 19207 19208 /* s_mb and s_soff are saved for rack_log_output */ 19209 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 19210 &s_mb, &s_soff); 19211 if (len <= segsiz) { 19212 /* 19213 * Must have ran out of mbufs for the copy 19214 * shorten it to no longer need tso. Lets 19215 * not put on sendalot since we are low on 19216 * mbufs. 19217 */ 19218 tso = 0; 19219 } 19220 if (rack->r_ctl.fsb.rfo_apply_push && 19221 (len == rack->r_ctl.fsb.left_to_send)) { 19222 flags |= TH_PUSH; 19223 add_flag |= RACK_HAD_PUSH; 19224 } 19225 if ((m->m_next == NULL) || (len <= 0)){ 19226 goto failed; 19227 } 19228 if (udp) { 19229 if (rack->r_is_v6) 19230 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19231 else 19232 ulen = hdrlen + len - sizeof(struct ip); 19233 udp->uh_ulen = htons(ulen); 19234 } 19235 m->m_pkthdr.rcvif = (struct ifnet *)0; 19236 if (TCPS_HAVERCVDSYN(tp->t_state) && 19237 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19238 int ect = tcp_ecn_output_established(tp, &flags, len, false); 19239 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19240 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19241 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19242 #ifdef INET6 19243 if (rack->r_is_v6) { 19244 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19245 ip6->ip6_flow |= htonl(ect << 20); 19246 } 19247 else 19248 #endif 19249 { 19250 #ifdef INET 19251 ip->ip_tos &= ~IPTOS_ECN_MASK; 19252 ip->ip_tos |= ect; 19253 #endif 19254 } 19255 } 19256 tcp_set_flags(th, flags); 19257 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19258 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19259 if (to.to_flags & TOF_SIGNATURE) { 19260 /* 19261 * Calculate MD5 signature and put it into the place 19262 * determined before. 19263 * NOTE: since TCP options buffer doesn't point into 19264 * mbuf's data, calculate offset and use it. 19265 */ 19266 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19267 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19268 /* 19269 * Do not send segment if the calculation of MD5 19270 * digest has failed. 19271 */ 19272 goto failed; 19273 } 19274 } 19275 #endif 19276 #ifdef INET6 19277 if (rack->r_is_v6) { 19278 if (tp->t_port) { 19279 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19280 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19281 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19282 th->th_sum = htons(0); 19283 UDPSTAT_INC(udps_opackets); 19284 } else { 19285 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19286 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19287 th->th_sum = in6_cksum_pseudo(ip6, 19288 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19289 0); 19290 } 19291 } 19292 #endif 19293 #if defined(INET6) && defined(INET) 19294 else 19295 #endif 19296 #ifdef INET 19297 { 19298 if (tp->t_port) { 19299 m->m_pkthdr.csum_flags = CSUM_UDP; 19300 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19301 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19302 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19303 th->th_sum = htons(0); 19304 UDPSTAT_INC(udps_opackets); 19305 } else { 19306 m->m_pkthdr.csum_flags = CSUM_TCP; 19307 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19308 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19309 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19310 IPPROTO_TCP + len + optlen)); 19311 } 19312 /* IP version must be set here for ipv4/ipv6 checking later */ 19313 KASSERT(ip->ip_v == IPVERSION, 19314 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19315 } 19316 #endif 19317 if (tso) { 19318 /* 19319 * Here we use segsiz since we have no added options besides 19320 * any standard timestamp options (no DSACKs or SACKS are sent 19321 * via either fast-path). 19322 */ 19323 KASSERT(len > segsiz, 19324 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19325 m->m_pkthdr.csum_flags |= CSUM_TSO; 19326 m->m_pkthdr.tso_segsz = segsiz; 19327 } 19328 #ifdef INET6 19329 if (rack->r_is_v6) { 19330 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19331 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19332 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19333 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19334 else 19335 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19336 } 19337 #endif 19338 #if defined(INET) && defined(INET6) 19339 else 19340 #endif 19341 #ifdef INET 19342 { 19343 ip->ip_len = htons(m->m_pkthdr.len); 19344 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19345 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19346 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19347 if (tp->t_port == 0 || len < V_tcp_minmss) { 19348 ip->ip_off |= htons(IP_DF); 19349 } 19350 } else { 19351 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19352 } 19353 } 19354 #endif 19355 if (tp->snd_cwnd > tp->snd_ssthresh) { 19356 /* Set we sent in CA */ 19357 rack->rc_gp_saw_ca = 1; 19358 } else { 19359 /* Set we sent in SS */ 19360 rack->rc_gp_saw_ss = 1; 19361 } 19362 /* Time to copy in our header */ 19363 cpto = mtod(m, uint8_t *); 19364 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19365 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19366 if (optlen) { 19367 bcopy(opt, th + 1, optlen); 19368 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19369 } else { 19370 th->th_off = sizeof(struct tcphdr) >> 2; 19371 } 19372 if ((rack->r_ctl.crte != NULL) && 19373 tcp_bblogging_on(tp)) { 19374 rack_log_queue_level(tp, rack, len, tv, cts); 19375 } 19376 if (tcp_bblogging_on(rack->rc_tp)) { 19377 union tcp_log_stackspecific log; 19378 19379 memset(&log, 0, sizeof(log)); 19380 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19381 if (rack->rack_no_prr) 19382 log.u_bbr.flex1 = 0; 19383 else 19384 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19385 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19386 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19387 log.u_bbr.flex4 = max_val; 19388 /* Save off the early/late values */ 19389 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19390 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19391 log.u_bbr.bw_inuse = rack_get_bw(rack); 19392 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19393 log.u_bbr.flex8 = 0; 19394 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19395 log.u_bbr.flex7 = 44; 19396 log.u_bbr.pkts_out = tp->t_maxseg; 19397 log.u_bbr.timeStamp = cts; 19398 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19399 log.u_bbr.flex5 = log.u_bbr.inflight; 19400 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19401 log.u_bbr.delivered = rack->r_ctl.fsb.left_to_send; 19402 log.u_bbr.rttProp = 0; 19403 log.u_bbr.delRate = rack->r_must_retran; 19404 log.u_bbr.delRate <<= 1; 19405 log.u_bbr.pkt_epoch = line; 19406 /* For fast output no retrans so just inflight and how many mss we send */ 19407 log.u_bbr.flex5 = log.u_bbr.inflight; 19408 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19409 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19410 len, &log, false, NULL, __func__, __LINE__, tv); 19411 } else 19412 lgb = NULL; 19413 #ifdef INET6 19414 if (rack->r_is_v6) { 19415 error = ip6_output(m, inp->in6p_outputopts, 19416 &inp->inp_route6, 19417 0, NULL, NULL, inp); 19418 } 19419 #endif 19420 #if defined(INET) && defined(INET6) 19421 else 19422 #endif 19423 #ifdef INET 19424 { 19425 error = ip_output(m, NULL, 19426 &inp->inp_route, 19427 0, 0, inp); 19428 } 19429 #endif 19430 if (lgb) { 19431 lgb->tlb_errno = error; 19432 lgb = NULL; 19433 } 19434 if (error) { 19435 *send_err = error; 19436 m = NULL; 19437 goto failed; 19438 } else if (rack->rc_hw_nobuf) { 19439 rack->rc_hw_nobuf = 0; 19440 rack->r_ctl.rc_agg_delayed = 0; 19441 rack->r_early = 0; 19442 rack->r_late = 0; 19443 rack->r_ctl.rc_agg_early = 0; 19444 } 19445 if ((error == 0) && (rack->lt_bw_up == 0)) { 19446 /* Unlikely */ 19447 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 19448 rack->r_ctl.lt_seq = tp->snd_una; 19449 rack->lt_bw_up = 1; 19450 } else if ((error == 0) && 19451 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 19452 /* 19453 * Need to record what we have since we are 19454 * approaching seq wrap. 19455 */ 19456 struct timeval tv; 19457 uint64_t tmark; 19458 19459 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 19460 rack->r_ctl.lt_seq = tp->snd_una; 19461 tmark = tcp_get_u64_usecs(&tv); 19462 if (tmark > rack->r_ctl.lt_timemark) { 19463 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 19464 rack->r_ctl.lt_timemark = tmark; 19465 } 19466 } 19467 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 19468 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 19469 if (tp->snd_una == tp->snd_max) { 19470 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19471 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19472 tp->t_acktime = ticks; 19473 } 19474 counter_u64_add(rack_total_bytes, len); 19475 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 19476 19477 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19478 *tot_len += len; 19479 if ((tp->t_flags & TF_GPUTINPROG) == 0) 19480 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 19481 tp->snd_max += len; 19482 tp->snd_nxt = tp->snd_max; 19483 if (rack->rc_new_rnd_needed) { 19484 rack_new_round_starts(tp, rack, tp->snd_max); 19485 } 19486 { 19487 int idx; 19488 19489 idx = (len / segsiz) + 3; 19490 if (idx >= TCP_MSS_ACCT_ATIMER) 19491 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19492 else 19493 counter_u64_add(rack_out_size[idx], 1); 19494 } 19495 if (len <= rack->r_ctl.fsb.left_to_send) 19496 rack->r_ctl.fsb.left_to_send -= len; 19497 else 19498 rack->r_ctl.fsb.left_to_send = 0; 19499 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19500 rack->r_fast_output = 0; 19501 rack->r_ctl.fsb.left_to_send = 0; 19502 /* At the end of fast_output scale up the sb */ 19503 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); 19504 rack_sndbuf_autoscale(rack); 19505 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); 19506 } 19507 if (tp->t_rtttime == 0) { 19508 tp->t_rtttime = ticks; 19509 tp->t_rtseq = startseq; 19510 KMOD_TCPSTAT_INC(tcps_segstimed); 19511 } 19512 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 19513 (max_val > len) && 19514 (*tot_len < rack->r_ctl.rc_pace_max_segs) && 19515 (tso == 0)) { 19516 max_val -= len; 19517 len = segsiz; 19518 th = rack->r_ctl.fsb.th; 19519 #ifdef TCP_ACCOUNTING 19520 cnt_thru++; 19521 #endif 19522 goto again; 19523 } 19524 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19525 counter_u64_add(rack_fto_send, 1); 19526 slot = rack_get_pacing_delay(rack, tp, *tot_len, NULL, segsiz, __LINE__); 19527 rack_start_hpts_timer(rack, tp, cts, slot, *tot_len, 0); 19528 #ifdef TCP_ACCOUNTING 19529 crtsc = get_cyclecount(); 19530 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19531 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19532 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19533 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((*tot_len + segsiz - 1) / segsiz); 19534 } 19535 sched_unpin(); 19536 #endif 19537 return (0); 19538 failed: 19539 if (m) 19540 m_free(m); 19541 rack->r_fast_output = 0; 19542 return (-1); 19543 } 19544 19545 static inline void 19546 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 19547 struct sockbuf *sb, 19548 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 19549 bool hw_tls, 19550 uint16_t flags) 19551 { 19552 rack->r_fast_output = 1; 19553 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19554 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19555 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 19556 rack->r_ctl.fsb.tcp_flags = flags; 19557 rack->r_ctl.fsb.left_to_send = orig_len - len; 19558 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 19559 /* Less than a full sized pace, lets not */ 19560 rack->r_fast_output = 0; 19561 return; 19562 } else { 19563 /* Round down to the nearest pace_max_seg */ 19564 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 19565 } 19566 if (hw_tls) 19567 rack->r_ctl.fsb.hw_tls = 1; 19568 else 19569 rack->r_ctl.fsb.hw_tls = 0; 19570 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19571 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19572 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19573 (tp->snd_max - tp->snd_una))); 19574 if (rack->r_ctl.fsb.left_to_send < segsiz) 19575 rack->r_fast_output = 0; 19576 else { 19577 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19578 rack->r_ctl.fsb.rfo_apply_push = 1; 19579 else 19580 rack->r_ctl.fsb.rfo_apply_push = 0; 19581 } 19582 } 19583 19584 static uint32_t 19585 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 19586 { 19587 uint64_t min_time; 19588 uint32_t maxlen; 19589 19590 min_time = (uint64_t)get_hpts_min_sleep_time(); 19591 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 19592 maxlen = roundup(maxlen, segsiz); 19593 return (maxlen); 19594 } 19595 19596 static struct rack_sendmap * 19597 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 19598 { 19599 struct rack_sendmap *rsm = NULL; 19600 int thresh; 19601 19602 restart: 19603 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 19604 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 19605 /* Nothing, strange turn off validity */ 19606 rack->r_collapse_point_valid = 0; 19607 return (NULL); 19608 } 19609 /* Can we send it yet? */ 19610 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 19611 /* 19612 * Receiver window has not grown enough for 19613 * the segment to be put on the wire. 19614 */ 19615 return (NULL); 19616 } 19617 if (rsm->r_flags & RACK_ACKED) { 19618 /* 19619 * It has been sacked, lets move to the 19620 * next one if possible. 19621 */ 19622 rack->r_ctl.last_collapse_point = rsm->r_end; 19623 /* Are we done? */ 19624 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 19625 rack->r_ctl.high_collapse_point)) { 19626 rack->r_collapse_point_valid = 0; 19627 return (NULL); 19628 } 19629 goto restart; 19630 } 19631 /* Now has it been long enough ? */ 19632 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 19633 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 19634 rack_log_collapse(rack, rsm->r_start, 19635 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19636 thresh, __LINE__, 6, rsm->r_flags, rsm); 19637 return (rsm); 19638 } 19639 /* Not enough time */ 19640 rack_log_collapse(rack, rsm->r_start, 19641 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19642 thresh, __LINE__, 7, rsm->r_flags, rsm); 19643 return (NULL); 19644 } 19645 19646 static inline void 19647 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 19648 { 19649 if ((rack->full_size_rxt == 0) && 19650 (rack->shape_rxt_to_pacing_min == 0) && 19651 (*len >= segsiz)) { 19652 *len = segsiz; 19653 } else if (rack->shape_rxt_to_pacing_min && 19654 rack->gp_ready) { 19655 /* We use pacing min as shaping len req */ 19656 uint32_t maxlen; 19657 19658 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 19659 if (*len > maxlen) 19660 *len = maxlen; 19661 } else { 19662 /* 19663 * The else is full_size_rxt is on so send it all 19664 * note we do need to check this for exceeding 19665 * our max segment size due to the fact that 19666 * we do sometimes merge chunks together i.e. 19667 * we cannot just assume that we will never have 19668 * a chunk greater than pace_max_seg 19669 */ 19670 if (*len > pace_max_seg) 19671 *len = pace_max_seg; 19672 } 19673 } 19674 19675 static int 19676 rack_output(struct tcpcb *tp) 19677 { 19678 struct socket *so; 19679 uint32_t recwin; 19680 uint32_t sb_offset, s_moff = 0; 19681 int32_t len, error = 0; 19682 uint16_t flags; 19683 struct mbuf *m, *s_mb = NULL; 19684 struct mbuf *mb; 19685 uint32_t if_hw_tsomaxsegcount = 0; 19686 uint32_t if_hw_tsomaxsegsize; 19687 int32_t segsiz, minseg; 19688 long tot_len_this_send = 0; 19689 #ifdef INET 19690 struct ip *ip = NULL; 19691 #endif 19692 struct udphdr *udp = NULL; 19693 struct tcp_rack *rack; 19694 struct tcphdr *th; 19695 uint8_t pass = 0; 19696 uint8_t mark = 0; 19697 uint8_t check_done = 0; 19698 uint8_t wanted_cookie = 0; 19699 u_char opt[TCP_MAXOLEN]; 19700 unsigned ipoptlen, optlen, hdrlen, ulen=0; 19701 uint32_t rack_seq; 19702 19703 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 19704 unsigned ipsec_optlen = 0; 19705 19706 #endif 19707 int32_t idle, sendalot; 19708 uint32_t tot_idle; 19709 int32_t sub_from_prr = 0; 19710 volatile int32_t sack_rxmit; 19711 struct rack_sendmap *rsm = NULL; 19712 int32_t tso, mtu; 19713 struct tcpopt to; 19714 int32_t slot = 0; 19715 int32_t sup_rack = 0; 19716 uint32_t cts, ms_cts, delayed, early; 19717 uint32_t add_flag = RACK_SENT_SP; 19718 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 19719 uint8_t doing_tlp = 0; 19720 uint32_t cwnd_to_use, pace_max_seg; 19721 int32_t do_a_prefetch = 0; 19722 int32_t prefetch_rsm = 0; 19723 int32_t orig_len = 0; 19724 struct timeval tv; 19725 int32_t prefetch_so_done = 0; 19726 struct tcp_log_buffer *lgb; 19727 struct inpcb *inp = tptoinpcb(tp); 19728 struct sockbuf *sb; 19729 uint64_t ts_val = 0; 19730 #ifdef TCP_ACCOUNTING 19731 uint64_t crtsc; 19732 #endif 19733 #ifdef INET6 19734 struct ip6_hdr *ip6 = NULL; 19735 int32_t isipv6; 19736 #endif 19737 bool hpts_calling, hw_tls = false; 19738 19739 NET_EPOCH_ASSERT(); 19740 INP_WLOCK_ASSERT(inp); 19741 19742 /* setup and take the cache hits here */ 19743 rack = (struct tcp_rack *)tp->t_fb_ptr; 19744 #ifdef TCP_ACCOUNTING 19745 sched_pin(); 19746 ts_val = get_cyclecount(); 19747 #endif 19748 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 19749 tp->t_flags2 &= ~TF2_HPTS_CALLS; 19750 #ifdef TCP_OFFLOAD 19751 if (tp->t_flags & TF_TOE) { 19752 #ifdef TCP_ACCOUNTING 19753 sched_unpin(); 19754 #endif 19755 return (tcp_offload_output(tp)); 19756 } 19757 #endif 19758 if (rack->rack_deferred_inited == 0) { 19759 /* 19760 * If we are the connecting socket we will 19761 * hit rack_init() when no sequence numbers 19762 * are setup. This makes it so we must defer 19763 * some initialization. Call that now. 19764 */ 19765 rack_deferred_init(tp, rack); 19766 } 19767 /* 19768 * For TFO connections in SYN_RECEIVED, only allow the initial 19769 * SYN|ACK and those sent by the retransmit timer. 19770 */ 19771 if ((tp->t_flags & TF_FASTOPEN) && 19772 (tp->t_state == TCPS_SYN_RECEIVED) && 19773 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 19774 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 19775 #ifdef TCP_ACCOUNTING 19776 sched_unpin(); 19777 #endif 19778 return (0); 19779 } 19780 #ifdef INET6 19781 if (rack->r_state) { 19782 /* Use the cache line loaded if possible */ 19783 isipv6 = rack->r_is_v6; 19784 } else { 19785 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 19786 } 19787 #endif 19788 early = 0; 19789 cts = tcp_get_usecs(&tv); 19790 ms_cts = tcp_tv_to_mssectick(&tv); 19791 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 19792 tcp_in_hpts(rack->rc_tp)) { 19793 /* 19794 * We are on the hpts for some timer but not hptsi output. 19795 * Remove from the hpts unconditionally. 19796 */ 19797 rack_timer_cancel(tp, rack, cts, __LINE__); 19798 } 19799 /* Are we pacing and late? */ 19800 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19801 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 19802 /* We are delayed */ 19803 delayed = cts - rack->r_ctl.rc_last_output_to; 19804 } else { 19805 delayed = 0; 19806 } 19807 /* Do the timers, which may override the pacer */ 19808 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 19809 int retval; 19810 19811 retval = rack_process_timers(tp, rack, cts, hpts_calling, 19812 &doing_tlp); 19813 if (retval != 0) { 19814 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 19815 #ifdef TCP_ACCOUNTING 19816 sched_unpin(); 19817 #endif 19818 /* 19819 * If timers want tcp_drop(), then pass error out, 19820 * otherwise suppress it. 19821 */ 19822 return (retval < 0 ? retval : 0); 19823 } 19824 } 19825 if (rack->rc_in_persist) { 19826 if (tcp_in_hpts(rack->rc_tp) == 0) { 19827 /* Timer is not running */ 19828 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19829 } 19830 #ifdef TCP_ACCOUNTING 19831 sched_unpin(); 19832 #endif 19833 return (0); 19834 } 19835 if ((rack->rc_ack_required == 1) && 19836 (rack->r_timer_override == 0)){ 19837 /* A timeout occurred and no ack has arrived */ 19838 if (tcp_in_hpts(rack->rc_tp) == 0) { 19839 /* Timer is not running */ 19840 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19841 } 19842 #ifdef TCP_ACCOUNTING 19843 sched_unpin(); 19844 #endif 19845 return (0); 19846 } 19847 if ((rack->r_timer_override) || 19848 (rack->rc_ack_can_sendout_data) || 19849 (delayed) || 19850 (tp->t_state < TCPS_ESTABLISHED)) { 19851 rack->rc_ack_can_sendout_data = 0; 19852 if (tcp_in_hpts(rack->rc_tp)) 19853 tcp_hpts_remove(rack->rc_tp); 19854 } else if (tcp_in_hpts(rack->rc_tp)) { 19855 /* 19856 * On the hpts you can't pass even if ACKNOW is on, we will 19857 * when the hpts fires. 19858 */ 19859 #ifdef TCP_ACCOUNTING 19860 crtsc = get_cyclecount(); 19861 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19862 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 19863 tp->tcp_cnt_counters[SND_BLOCKED]++; 19864 } 19865 sched_unpin(); 19866 #endif 19867 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 19868 return (0); 19869 } 19870 /* Finish out both pacing early and late accounting */ 19871 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19872 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 19873 early = rack->r_ctl.rc_last_output_to - cts; 19874 } else 19875 early = 0; 19876 if (delayed && (rack->rc_always_pace == 1)) { 19877 rack->r_ctl.rc_agg_delayed += delayed; 19878 rack->r_late = 1; 19879 } else if (early && (rack->rc_always_pace == 1)) { 19880 rack->r_ctl.rc_agg_early += early; 19881 rack->r_early = 1; 19882 } else if (rack->rc_always_pace == 0) { 19883 /* Non-paced we are not late */ 19884 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 19885 rack->r_early = rack->r_late = 0; 19886 } 19887 /* Now that early/late accounting is done turn off the flag */ 19888 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 19889 rack->r_wanted_output = 0; 19890 rack->r_timer_override = 0; 19891 if ((tp->t_state != rack->r_state) && 19892 TCPS_HAVEESTABLISHED(tp->t_state)) { 19893 rack_set_state(tp, rack); 19894 } 19895 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19896 minseg = segsiz; 19897 if (rack->r_ctl.rc_pace_max_segs == 0) 19898 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 19899 else 19900 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 19901 if ((rack->r_fast_output) && 19902 (doing_tlp == 0) && 19903 (tp->rcv_numsacks == 0)) { 19904 int ret; 19905 19906 error = 0; 19907 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__); 19908 if (ret > 0) 19909 return(ret); 19910 else if (error) { 19911 inp = rack->rc_inp; 19912 so = inp->inp_socket; 19913 sb = &so->so_snd; 19914 goto nomore; 19915 } else { 19916 /* Return == 0, if there is more we can send tot_len wise fall through and send */ 19917 if (tot_len_this_send >= pace_max_seg) 19918 return (ret); 19919 #ifdef TCP_ACCOUNTING 19920 /* We need to re-pin since fast_output un-pined */ 19921 sched_pin(); 19922 ts_val = get_cyclecount(); 19923 #endif 19924 /* Fall back out so we can send any more that may bring us to pace_max_seg */ 19925 } 19926 } 19927 inp = rack->rc_inp; 19928 /* 19929 * For TFO connections in SYN_SENT or SYN_RECEIVED, 19930 * only allow the initial SYN or SYN|ACK and those sent 19931 * by the retransmit timer. 19932 */ 19933 if ((tp->t_flags & TF_FASTOPEN) && 19934 ((tp->t_state == TCPS_SYN_RECEIVED) || 19935 (tp->t_state == TCPS_SYN_SENT)) && 19936 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 19937 (tp->t_rxtshift == 0)) { /* not a retransmit */ 19938 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19939 #ifdef TCP_ACCOUNTING 19940 sched_unpin(); 19941 #endif 19942 return (0); 19943 } 19944 /* 19945 * Determine length of data that should be transmitted, and flags 19946 * that will be used. If there is some data or critical controls 19947 * (SYN, RST) to send, then transmit; otherwise, investigate 19948 * further. 19949 */ 19950 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 19951 if (tp->t_idle_reduce) { 19952 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 19953 rack_cc_after_idle(rack, tp); 19954 } 19955 tp->t_flags &= ~TF_LASTIDLE; 19956 if (idle) { 19957 if (tp->t_flags & TF_MORETOCOME) { 19958 tp->t_flags |= TF_LASTIDLE; 19959 idle = 0; 19960 } 19961 } 19962 if ((tp->snd_una == tp->snd_max) && 19963 rack->r_ctl.rc_went_idle_time && 19964 (cts > rack->r_ctl.rc_went_idle_time)) { 19965 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 19966 if (tot_idle > rack_min_probertt_hold) { 19967 /* Count as a probe rtt */ 19968 if (rack->in_probe_rtt == 0) { 19969 rack->r_ctl.rc_lower_rtt_us_cts = cts; 19970 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 19971 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 19972 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 19973 } else { 19974 rack_exit_probertt(rack, cts); 19975 } 19976 } 19977 } else 19978 tot_idle = 0; 19979 if (rack_use_fsb && 19980 (rack->r_ctl.fsb.tcp_ip_hdr) && 19981 (rack->r_fsb_inited == 0) && 19982 (rack->r_state != TCPS_CLOSED)) 19983 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 19984 if (rack->rc_sendvars_notset == 1) { 19985 rack->rc_sendvars_notset = 0; 19986 /* 19987 * Make sure any TCP timers (keep-alive) is not running. 19988 */ 19989 tcp_timer_stop(tp); 19990 } 19991 if ((rack->rack_no_prr == 1) && 19992 (rack->rc_always_pace == 0)) { 19993 /* 19994 * Sanity check before sending, if we have 19995 * no-pacing enabled and prr is turned off that 19996 * is a logistics error. Correct this by turnning 19997 * prr back on. A user *must* set some form of 19998 * pacing in order to turn PRR off. We do this 19999 * in the output path so that we can avoid socket 20000 * option ordering issues that would occur if we 20001 * tried to do it while setting rack_no_prr on. 20002 */ 20003 rack->rack_no_prr = 0; 20004 } 20005 if ((rack->pcm_enabled == 1) && 20006 (rack->pcm_needed == 0) && 20007 (tot_idle > 0)) { 20008 /* 20009 * We have been idle some micro seconds. We need 20010 * to factor this in to see if a PCM is needed. 20011 */ 20012 uint32_t rtts_idle, rnds; 20013 20014 if (tp->t_srtt) 20015 rtts_idle = tot_idle / tp->t_srtt; 20016 else 20017 rtts_idle = 0; 20018 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 20019 rack->r_ctl.pcm_idle_rounds += rtts_idle; 20020 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 20021 rack->pcm_needed = 1; 20022 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 20023 } 20024 } 20025 again: 20026 sendalot = 0; 20027 cts = tcp_get_usecs(&tv); 20028 ms_cts = tcp_tv_to_mssectick(&tv); 20029 tso = 0; 20030 mtu = 0; 20031 if (TCPS_HAVEESTABLISHED(tp->t_state) && 20032 (rack->r_ctl.pcm_max_seg == 0)) { 20033 /* 20034 * We set in our first send so we know that the ctf_fixed_maxseg 20035 * has been fully set. If we do it in rack_init() we most likely 20036 * see 512 bytes so we end up at 5120, not desirable. 20037 */ 20038 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 20039 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 20040 /* 20041 * Assure our initial PCM probe is at least 10 MSS. 20042 */ 20043 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 20044 } 20045 } 20046 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 20047 uint32_t rw_avail, cwa; 20048 20049 if (tp->snd_wnd > ctf_outstanding(tp)) 20050 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 20051 else 20052 rw_avail = 0; 20053 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 20054 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20055 else 20056 cwa = 0; 20057 if ((cwa >= rack->r_ctl.pcm_max_seg) && 20058 (rw_avail > rack->r_ctl.pcm_max_seg)) { 20059 /* Raise up the max seg for this trip through */ 20060 pace_max_seg = rack->r_ctl.pcm_max_seg; 20061 /* Disable any fast output */ 20062 rack->r_fast_output = 0; 20063 } 20064 if (rack_verbose_logging) { 20065 rack_log_pcm(rack, 4, 20066 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 20067 } 20068 } 20069 sb_offset = tp->snd_max - tp->snd_una; 20070 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20071 flags = tcp_outflags[tp->t_state]; 20072 while (rack->rc_free_cnt < rack_free_cache) { 20073 rsm = rack_alloc(rack); 20074 if (rsm == NULL) { 20075 if (hpts_calling) 20076 /* Retry in a ms */ 20077 slot = (1 * HPTS_USEC_IN_MSEC); 20078 so = inp->inp_socket; 20079 sb = &so->so_snd; 20080 goto just_return_nolock; 20081 } 20082 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 20083 rack->rc_free_cnt++; 20084 rsm = NULL; 20085 } 20086 sack_rxmit = 0; 20087 len = 0; 20088 rsm = NULL; 20089 if (flags & TH_RST) { 20090 SOCK_SENDBUF_LOCK(inp->inp_socket); 20091 so = inp->inp_socket; 20092 sb = &so->so_snd; 20093 goto send; 20094 } 20095 if (rack->r_ctl.rc_resend) { 20096 /* Retransmit timer */ 20097 rsm = rack->r_ctl.rc_resend; 20098 rack->r_ctl.rc_resend = NULL; 20099 len = rsm->r_end - rsm->r_start; 20100 sack_rxmit = 1; 20101 sendalot = 0; 20102 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20103 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20104 __func__, __LINE__, 20105 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20106 sb_offset = rsm->r_start - tp->snd_una; 20107 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20108 } else if (rack->r_collapse_point_valid && 20109 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 20110 /* 20111 * If an RSM is returned then enough time has passed 20112 * for us to retransmit it. Move up the collapse point, 20113 * since this rsm has its chance to retransmit now. 20114 */ 20115 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 20116 rack->r_ctl.last_collapse_point = rsm->r_end; 20117 /* Are we done? */ 20118 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20119 rack->r_ctl.high_collapse_point)) 20120 rack->r_collapse_point_valid = 0; 20121 sack_rxmit = 1; 20122 /* We are not doing a TLP */ 20123 doing_tlp = 0; 20124 len = rsm->r_end - rsm->r_start; 20125 sb_offset = rsm->r_start - tp->snd_una; 20126 sendalot = 0; 20127 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20128 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 20129 /* We have a retransmit that takes precedence */ 20130 if ((!IN_FASTRECOVERY(tp->t_flags)) && 20131 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 20132 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 20133 /* Enter recovery if not induced by a time-out */ 20134 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 20135 } 20136 #ifdef INVARIANTS 20137 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 20138 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 20139 tp, rack, rsm, rsm->r_start, tp->snd_una); 20140 } 20141 #endif 20142 len = rsm->r_end - rsm->r_start; 20143 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20144 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20145 __func__, __LINE__, 20146 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20147 sb_offset = rsm->r_start - tp->snd_una; 20148 sendalot = 0; 20149 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20150 if (len > 0) { 20151 sack_rxmit = 1; 20152 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 20153 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 20154 min(len, segsiz)); 20155 } 20156 } else if (rack->r_ctl.rc_tlpsend) { 20157 /* Tail loss probe */ 20158 long cwin; 20159 long tlen; 20160 20161 /* 20162 * Check if we can do a TLP with a RACK'd packet 20163 * this can happen if we are not doing the rack 20164 * cheat and we skipped to a TLP and it 20165 * went off. 20166 */ 20167 rsm = rack->r_ctl.rc_tlpsend; 20168 /* We are doing a TLP make sure the flag is preent */ 20169 rsm->r_flags |= RACK_TLP; 20170 rack->r_ctl.rc_tlpsend = NULL; 20171 sack_rxmit = 1; 20172 tlen = rsm->r_end - rsm->r_start; 20173 if (tlen > segsiz) 20174 tlen = segsiz; 20175 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20176 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20177 __func__, __LINE__, 20178 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20179 sb_offset = rsm->r_start - tp->snd_una; 20180 cwin = min(tp->snd_wnd, tlen); 20181 len = cwin; 20182 } 20183 if (rack->r_must_retran && 20184 (doing_tlp == 0) && 20185 (SEQ_GT(tp->snd_max, tp->snd_una)) && 20186 (rsm == NULL)) { 20187 /* 20188 * There are two different ways that we 20189 * can get into this block: 20190 * a) This is a non-sack connection, we had a time-out 20191 * and thus r_must_retran was set and everything 20192 * left outstanding as been marked for retransmit. 20193 * b) The MTU of the path shrank, so that everything 20194 * was marked to be retransmitted with the smaller 20195 * mtu and r_must_retran was set. 20196 * 20197 * This means that we expect the sendmap (outstanding) 20198 * to all be marked must. We can use the tmap to 20199 * look at them. 20200 * 20201 */ 20202 int sendwin, flight; 20203 20204 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 20205 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 20206 if (flight >= sendwin) { 20207 /* 20208 * We can't send yet. 20209 */ 20210 so = inp->inp_socket; 20211 sb = &so->so_snd; 20212 goto just_return_nolock; 20213 } 20214 /* 20215 * This is the case a/b mentioned above. All 20216 * outstanding/not-acked should be marked. 20217 * We can use the tmap to find them. 20218 */ 20219 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 20220 if (rsm == NULL) { 20221 /* TSNH */ 20222 rack->r_must_retran = 0; 20223 rack->r_ctl.rc_out_at_rto = 0; 20224 so = inp->inp_socket; 20225 sb = &so->so_snd; 20226 goto just_return_nolock; 20227 } 20228 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 20229 /* 20230 * The first one does not have the flag, did we collapse 20231 * further up in our list? 20232 */ 20233 rack->r_must_retran = 0; 20234 rack->r_ctl.rc_out_at_rto = 0; 20235 rsm = NULL; 20236 sack_rxmit = 0; 20237 } else { 20238 sack_rxmit = 1; 20239 len = rsm->r_end - rsm->r_start; 20240 sb_offset = rsm->r_start - tp->snd_una; 20241 sendalot = 0; 20242 if ((rack->full_size_rxt == 0) && 20243 (rack->shape_rxt_to_pacing_min == 0) && 20244 (len >= segsiz)) 20245 len = segsiz; 20246 else if (rack->shape_rxt_to_pacing_min && 20247 rack->gp_ready) { 20248 /* We use pacing min as shaping len req */ 20249 uint32_t maxlen; 20250 20251 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20252 if (len > maxlen) 20253 len = maxlen; 20254 } 20255 /* 20256 * Delay removing the flag RACK_MUST_RXT so 20257 * that the fastpath for retransmit will 20258 * work with this rsm. 20259 */ 20260 } 20261 } 20262 /* 20263 * Enforce a connection sendmap count limit if set 20264 * as long as we are not retransmiting. 20265 */ 20266 if ((rsm == NULL) && 20267 (V_tcp_map_entries_limit > 0) && 20268 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 20269 counter_u64_add(rack_to_alloc_limited, 1); 20270 if (!rack->alloc_limit_reported) { 20271 rack->alloc_limit_reported = 1; 20272 counter_u64_add(rack_alloc_limited_conns, 1); 20273 } 20274 so = inp->inp_socket; 20275 sb = &so->so_snd; 20276 goto just_return_nolock; 20277 } 20278 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 20279 /* we are retransmitting the fin */ 20280 len--; 20281 if (len) { 20282 /* 20283 * When retransmitting data do *not* include the 20284 * FIN. This could happen from a TLP probe. 20285 */ 20286 flags &= ~TH_FIN; 20287 } 20288 } 20289 if (rsm && rack->r_fsb_inited && 20290 rack_use_rsm_rfo && 20291 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 20292 int ret; 20293 20294 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 20295 if (ret == 0) 20296 return (0); 20297 } 20298 so = inp->inp_socket; 20299 sb = &so->so_snd; 20300 if (do_a_prefetch == 0) { 20301 kern_prefetch(sb, &do_a_prefetch); 20302 do_a_prefetch = 1; 20303 } 20304 #ifdef NETFLIX_SHARED_CWND 20305 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 20306 rack->rack_enable_scwnd) { 20307 /* We are doing cwnd sharing */ 20308 if (rack->gp_ready && 20309 (rack->rack_attempted_scwnd == 0) && 20310 (rack->r_ctl.rc_scw == NULL) && 20311 tp->t_lib) { 20312 /* The pcbid is in, lets make an attempt */ 20313 counter_u64_add(rack_try_scwnd, 1); 20314 rack->rack_attempted_scwnd = 1; 20315 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 20316 &rack->r_ctl.rc_scw_index, 20317 segsiz); 20318 } 20319 if (rack->r_ctl.rc_scw && 20320 (rack->rack_scwnd_is_idle == 1) && 20321 sbavail(&so->so_snd)) { 20322 /* we are no longer out of data */ 20323 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 20324 rack->rack_scwnd_is_idle = 0; 20325 } 20326 if (rack->r_ctl.rc_scw) { 20327 /* First lets update and get the cwnd */ 20328 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 20329 rack->r_ctl.rc_scw_index, 20330 tp->snd_cwnd, tp->snd_wnd, segsiz); 20331 } 20332 } 20333 #endif 20334 /* 20335 * Get standard flags, and add SYN or FIN if requested by 'hidden' 20336 * state flags. 20337 */ 20338 if (tp->t_flags & TF_NEEDFIN) 20339 flags |= TH_FIN; 20340 if (tp->t_flags & TF_NEEDSYN) 20341 flags |= TH_SYN; 20342 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 20343 void *end_rsm; 20344 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 20345 if (end_rsm) 20346 kern_prefetch(end_rsm, &prefetch_rsm); 20347 prefetch_rsm = 1; 20348 } 20349 SOCK_SENDBUF_LOCK(so); 20350 if ((sack_rxmit == 0) && 20351 (TCPS_HAVEESTABLISHED(tp->t_state) || 20352 (tp->t_flags & TF_FASTOPEN))) { 20353 /* 20354 * We are not retransmitting (sack_rxmit is 0) so we 20355 * are sending new data. This is always based on snd_max. 20356 * Now in theory snd_max may be equal to snd_una, if so 20357 * then nothing is outstanding and the offset would be 0. 20358 */ 20359 uint32_t avail; 20360 20361 avail = sbavail(sb); 20362 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 20363 sb_offset = tp->snd_max - tp->snd_una; 20364 else 20365 sb_offset = 0; 20366 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 20367 if (rack->r_ctl.rc_tlp_new_data) { 20368 /* TLP is forcing out new data */ 20369 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 20370 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 20371 } 20372 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 20373 if (tp->snd_wnd > sb_offset) 20374 len = tp->snd_wnd - sb_offset; 20375 else 20376 len = 0; 20377 } else { 20378 len = rack->r_ctl.rc_tlp_new_data; 20379 } 20380 rack->r_ctl.rc_tlp_new_data = 0; 20381 } else { 20382 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 20383 } 20384 if ((rack->r_ctl.crte == NULL) && 20385 IN_FASTRECOVERY(tp->t_flags) && 20386 (rack->full_size_rxt == 0) && 20387 (rack->shape_rxt_to_pacing_min == 0) && 20388 (len > segsiz)) { 20389 /* 20390 * For prr=off, we need to send only 1 MSS 20391 * at a time. We do this because another sack could 20392 * be arriving that causes us to send retransmits and 20393 * we don't want to be on a long pace due to a larger send 20394 * that keeps us from sending out the retransmit. 20395 */ 20396 len = segsiz; 20397 } else if (rack->shape_rxt_to_pacing_min && 20398 rack->gp_ready) { 20399 /* We use pacing min as shaping len req */ 20400 uint32_t maxlen; 20401 20402 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20403 if (len > maxlen) 20404 len = maxlen; 20405 }/* The else is full_size_rxt is on so send it all */ 20406 } else { 20407 uint32_t outstanding; 20408 /* 20409 * We are inside of a Fast recovery episode, this 20410 * is caused by a SACK or 3 dup acks. At this point 20411 * we have sent all the retransmissions and we rely 20412 * on PRR to dictate what we will send in the form of 20413 * new data. 20414 */ 20415 20416 outstanding = tp->snd_max - tp->snd_una; 20417 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 20418 if (tp->snd_wnd > outstanding) { 20419 len = tp->snd_wnd - outstanding; 20420 /* Check to see if we have the data */ 20421 if ((sb_offset + len) > avail) { 20422 /* It does not all fit */ 20423 if (avail > sb_offset) 20424 len = avail - sb_offset; 20425 else 20426 len = 0; 20427 } 20428 } else { 20429 len = 0; 20430 } 20431 } else if (avail > sb_offset) { 20432 len = avail - sb_offset; 20433 } else { 20434 len = 0; 20435 } 20436 if (len > 0) { 20437 if (len > rack->r_ctl.rc_prr_sndcnt) { 20438 len = rack->r_ctl.rc_prr_sndcnt; 20439 } 20440 if (len > 0) { 20441 sub_from_prr = 1; 20442 } 20443 } 20444 if (len > segsiz) { 20445 /* 20446 * We should never send more than a MSS when 20447 * retransmitting or sending new data in prr 20448 * mode unless the override flag is on. Most 20449 * likely the PRR algorithm is not going to 20450 * let us send a lot as well :-) 20451 */ 20452 if (rack->r_ctl.rc_prr_sendalot == 0) { 20453 len = segsiz; 20454 } 20455 } else if (len < segsiz) { 20456 /* 20457 * Do we send any? The idea here is if the 20458 * send empty's the socket buffer we want to 20459 * do it. However if not then lets just wait 20460 * for our prr_sndcnt to get bigger. 20461 */ 20462 long leftinsb; 20463 20464 leftinsb = sbavail(sb) - sb_offset; 20465 if (leftinsb > len) { 20466 /* This send does not empty the sb */ 20467 len = 0; 20468 } 20469 } 20470 } 20471 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 20472 /* 20473 * If you have not established 20474 * and are not doing FAST OPEN 20475 * no data please. 20476 */ 20477 if ((sack_rxmit == 0) && 20478 !(tp->t_flags & TF_FASTOPEN)) { 20479 len = 0; 20480 sb_offset = 0; 20481 } 20482 } 20483 if (prefetch_so_done == 0) { 20484 kern_prefetch(so, &prefetch_so_done); 20485 prefetch_so_done = 1; 20486 } 20487 orig_len = len; 20488 /* 20489 * Lop off SYN bit if it has already been sent. However, if this is 20490 * SYN-SENT state and if segment contains data and if we don't know 20491 * that foreign host supports TAO, suppress sending segment. 20492 */ 20493 if ((flags & TH_SYN) && 20494 SEQ_GT(tp->snd_max, tp->snd_una) && 20495 ((sack_rxmit == 0) && 20496 (tp->t_rxtshift == 0))) { 20497 /* 20498 * When sending additional segments following a TFO SYN|ACK, 20499 * do not include the SYN bit. 20500 */ 20501 if ((tp->t_flags & TF_FASTOPEN) && 20502 (tp->t_state == TCPS_SYN_RECEIVED)) 20503 flags &= ~TH_SYN; 20504 } 20505 /* 20506 * Be careful not to send data and/or FIN on SYN segments. This 20507 * measure is needed to prevent interoperability problems with not 20508 * fully conformant TCP implementations. 20509 */ 20510 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 20511 len = 0; 20512 flags &= ~TH_FIN; 20513 } 20514 /* 20515 * On TFO sockets, ensure no data is sent in the following cases: 20516 * 20517 * - When retransmitting SYN|ACK on a passively-created socket 20518 * 20519 * - When retransmitting SYN on an actively created socket 20520 * 20521 * - When sending a zero-length cookie (cookie request) on an 20522 * actively created socket 20523 * 20524 * - When the socket is in the CLOSED state (RST is being sent) 20525 */ 20526 if ((tp->t_flags & TF_FASTOPEN) && 20527 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 20528 ((tp->t_state == TCPS_SYN_SENT) && 20529 (tp->t_tfo_client_cookie_len == 0)) || 20530 (flags & TH_RST))) { 20531 sack_rxmit = 0; 20532 len = 0; 20533 } 20534 /* Without fast-open there should never be data sent on a SYN */ 20535 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 20536 len = 0; 20537 } 20538 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 20539 /* We only send 1 MSS if we have a DSACK block */ 20540 add_flag |= RACK_SENT_W_DSACK; 20541 len = segsiz; 20542 } 20543 if (len <= 0) { 20544 /* 20545 * We have nothing to send, or the window shrank, or 20546 * is closed, do we need to go into persists? 20547 */ 20548 len = 0; 20549 if ((tp->snd_wnd == 0) && 20550 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20551 (tp->snd_una == tp->snd_max) && 20552 (sb_offset < (int)sbavail(sb))) { 20553 rack_enter_persist(tp, rack, cts, tp->snd_una); 20554 } 20555 } else if ((rsm == NULL) && 20556 (doing_tlp == 0) && 20557 (len < pace_max_seg)) { 20558 /* 20559 * We are not sending a maximum sized segment for 20560 * some reason. Should we not send anything (think 20561 * sws or persists)? 20562 */ 20563 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20564 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20565 (len < minseg) && 20566 (len < (int)(sbavail(sb) - sb_offset))) { 20567 /* 20568 * Here the rwnd is less than 20569 * the minimum pacing size, this is not a retransmit, 20570 * we are established and 20571 * the send is not the last in the socket buffer 20572 * we send nothing, and we may enter persists 20573 * if nothing is outstanding. 20574 */ 20575 len = 0; 20576 if (tp->snd_max == tp->snd_una) { 20577 /* 20578 * Nothing out we can 20579 * go into persists. 20580 */ 20581 rack_enter_persist(tp, rack, cts, tp->snd_una); 20582 } 20583 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 20584 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20585 (len < (int)(sbavail(sb) - sb_offset)) && 20586 (len < minseg)) { 20587 /* 20588 * Here we are not retransmitting, and 20589 * the cwnd is not so small that we could 20590 * not send at least a min size (rxt timer 20591 * not having gone off), We have 2 segments or 20592 * more already in flight, its not the tail end 20593 * of the socket buffer and the cwnd is blocking 20594 * us from sending out a minimum pacing segment size. 20595 * Lets not send anything. 20596 */ 20597 len = 0; 20598 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 20599 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20600 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20601 (len < (int)(sbavail(sb) - sb_offset)) && 20602 (TCPS_HAVEESTABLISHED(tp->t_state))) { 20603 /* 20604 * Here we have a send window but we have 20605 * filled it up and we can't send another pacing segment. 20606 * We also have in flight more than 2 segments 20607 * and we are not completing the sb i.e. we allow 20608 * the last bytes of the sb to go out even if 20609 * its not a full pacing segment. 20610 */ 20611 len = 0; 20612 } else if ((rack->r_ctl.crte != NULL) && 20613 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 20614 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 20615 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 20616 (len < (int)(sbavail(sb) - sb_offset))) { 20617 /* 20618 * Here we are doing hardware pacing, this is not a TLP, 20619 * we are not sending a pace max segment size, there is rwnd 20620 * room to send at least N pace_max_seg, the cwnd is greater 20621 * than or equal to a full pacing segments plus 4 mss and we have 2 or 20622 * more segments in flight and its not the tail of the socket buffer. 20623 * 20624 * We don't want to send instead we need to get more ack's in to 20625 * allow us to send a full pacing segment. Normally, if we are pacing 20626 * about the right speed, we should have finished our pacing 20627 * send as most of the acks have come back if we are at the 20628 * right rate. This is a bit fuzzy since return path delay 20629 * can delay the acks, which is why we want to make sure we 20630 * have cwnd space to have a bit more than a max pace segments in flight. 20631 * 20632 * If we have not gotten our acks back we are pacing at too high a 20633 * rate delaying will not hurt and will bring our GP estimate down by 20634 * injecting the delay. If we don't do this we will send 20635 * 2 MSS out in response to the acks being clocked in which 20636 * defeats the point of hw-pacing (i.e. to help us get 20637 * larger TSO's out). 20638 */ 20639 len = 0; 20640 } 20641 20642 } 20643 /* len will be >= 0 after this point. */ 20644 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 20645 rack_sndbuf_autoscale(rack); 20646 /* 20647 * Decide if we can use TCP Segmentation Offloading (if supported by 20648 * hardware). 20649 * 20650 * TSO may only be used if we are in a pure bulk sending state. The 20651 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 20652 * options prevent using TSO. With TSO the TCP header is the same 20653 * (except for the sequence number) for all generated packets. This 20654 * makes it impossible to transmit any options which vary per 20655 * generated segment or packet. 20656 * 20657 * IPv4 handling has a clear separation of ip options and ip header 20658 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 20659 * the right thing below to provide length of just ip options and thus 20660 * checking for ipoptlen is enough to decide if ip options are present. 20661 */ 20662 ipoptlen = 0; 20663 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20664 /* 20665 * Pre-calculate here as we save another lookup into the darknesses 20666 * of IPsec that way and can actually decide if TSO is ok. 20667 */ 20668 #ifdef INET6 20669 if (isipv6 && IPSEC_ENABLED(ipv6)) 20670 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 20671 #ifdef INET 20672 else 20673 #endif 20674 #endif /* INET6 */ 20675 #ifdef INET 20676 if (IPSEC_ENABLED(ipv4)) 20677 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 20678 #endif /* INET */ 20679 #endif 20680 20681 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20682 ipoptlen += ipsec_optlen; 20683 #endif 20684 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 20685 (tp->t_port == 0) && 20686 ((tp->t_flags & TF_SIGNATURE) == 0) && 20687 sack_rxmit == 0 && 20688 ipoptlen == 0) 20689 tso = 1; 20690 { 20691 uint32_t outstanding __unused; 20692 20693 outstanding = tp->snd_max - tp->snd_una; 20694 if (tp->t_flags & TF_SENTFIN) { 20695 /* 20696 * If we sent a fin, snd_max is 1 higher than 20697 * snd_una 20698 */ 20699 outstanding--; 20700 } 20701 if (sack_rxmit) { 20702 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 20703 flags &= ~TH_FIN; 20704 } 20705 } 20706 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 20707 (long)TCP_MAXWIN << tp->rcv_scale); 20708 20709 /* 20710 * Sender silly window avoidance. We transmit under the following 20711 * conditions when len is non-zero: 20712 * 20713 * - We have a full segment (or more with TSO) - This is the last 20714 * buffer in a write()/send() and we are either idle or running 20715 * NODELAY - we've timed out (e.g. persist timer) - we have more 20716 * then 1/2 the maximum send window's worth of data (receiver may be 20717 * limited the window size) - we need to retransmit 20718 */ 20719 if (len) { 20720 if (len >= segsiz) { 20721 goto send; 20722 } 20723 /* 20724 * NOTE! on localhost connections an 'ack' from the remote 20725 * end may occur synchronously with the output and cause us 20726 * to flush a buffer queued with moretocome. XXX 20727 * 20728 */ 20729 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 20730 (idle || (tp->t_flags & TF_NODELAY)) && 20731 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20732 (tp->t_flags & TF_NOPUSH) == 0) { 20733 pass = 2; 20734 goto send; 20735 } 20736 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 20737 pass = 22; 20738 goto send; 20739 } 20740 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 20741 pass = 4; 20742 goto send; 20743 } 20744 if (sack_rxmit) { 20745 pass = 6; 20746 goto send; 20747 } 20748 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 20749 (ctf_outstanding(tp) < (segsiz * 2))) { 20750 /* 20751 * We have less than two MSS outstanding (delayed ack) 20752 * and our rwnd will not let us send a full sized 20753 * MSS. Lets go ahead and let this small segment 20754 * out because we want to try to have at least two 20755 * packets inflight to not be caught by delayed ack. 20756 */ 20757 pass = 12; 20758 goto send; 20759 } 20760 } 20761 /* 20762 * Sending of standalone window updates. 20763 * 20764 * Window updates are important when we close our window due to a 20765 * full socket buffer and are opening it again after the application 20766 * reads data from it. Once the window has opened again and the 20767 * remote end starts to send again the ACK clock takes over and 20768 * provides the most current window information. 20769 * 20770 * We must avoid the silly window syndrome whereas every read from 20771 * the receive buffer, no matter how small, causes a window update 20772 * to be sent. We also should avoid sending a flurry of window 20773 * updates when the socket buffer had queued a lot of data and the 20774 * application is doing small reads. 20775 * 20776 * Prevent a flurry of pointless window updates by only sending an 20777 * update when we can increase the advertized window by more than 20778 * 1/4th of the socket buffer capacity. When the buffer is getting 20779 * full or is very small be more aggressive and send an update 20780 * whenever we can increase by two mss sized segments. In all other 20781 * situations the ACK's to new incoming data will carry further 20782 * window increases. 20783 * 20784 * Don't send an independent window update if a delayed ACK is 20785 * pending (it will get piggy-backed on it) or the remote side 20786 * already has done a half-close and won't send more data. Skip 20787 * this if the connection is in T/TCP half-open state. 20788 */ 20789 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 20790 !(tp->t_flags & TF_DELACK) && 20791 !TCPS_HAVERCVDFIN(tp->t_state)) { 20792 /* 20793 * "adv" is the amount we could increase the window, taking 20794 * into account that we are limited by TCP_MAXWIN << 20795 * tp->rcv_scale. 20796 */ 20797 int32_t adv; 20798 int oldwin; 20799 20800 adv = recwin; 20801 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 20802 oldwin = (tp->rcv_adv - tp->rcv_nxt); 20803 if (adv > oldwin) 20804 adv -= oldwin; 20805 else { 20806 /* We can't increase the window */ 20807 adv = 0; 20808 } 20809 } else 20810 oldwin = 0; 20811 20812 /* 20813 * If the new window size ends up being the same as or less 20814 * than the old size when it is scaled, then don't force 20815 * a window update. 20816 */ 20817 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 20818 goto dontupdate; 20819 20820 if (adv >= (int32_t)(2 * segsiz) && 20821 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 20822 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 20823 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 20824 pass = 7; 20825 goto send; 20826 } 20827 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 20828 pass = 23; 20829 goto send; 20830 } 20831 } 20832 dontupdate: 20833 20834 /* 20835 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 20836 * is also a catch-all for the retransmit timer timeout case. 20837 */ 20838 if (tp->t_flags & TF_ACKNOW) { 20839 pass = 8; 20840 goto send; 20841 } 20842 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 20843 pass = 9; 20844 goto send; 20845 } 20846 /* 20847 * If our state indicates that FIN should be sent and we have not 20848 * yet done so, then we need to send. 20849 */ 20850 if ((flags & TH_FIN) && 20851 (tp->snd_max == tp->snd_una)) { 20852 pass = 11; 20853 goto send; 20854 } 20855 /* 20856 * No reason to send a segment, just return. 20857 */ 20858 just_return: 20859 SOCK_SENDBUF_UNLOCK(so); 20860 just_return_nolock: 20861 { 20862 int app_limited = CTF_JR_SENT_DATA; 20863 20864 if ((tp->t_flags & TF_FASTOPEN) == 0 && 20865 (flags & TH_FIN) && 20866 (len == 0) && 20867 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 20868 ((tp->snd_max - tp->snd_una) <= segsiz)) { 20869 /* 20870 * Ok less than or right at a MSS is 20871 * outstanding. The original FreeBSD stack would 20872 * have sent a FIN, which can speed things up for 20873 * a transactional application doing a MSG_WAITALL. 20874 * To speed things up since we do *not* send a FIN 20875 * if data is outstanding, we send a "challenge ack". 20876 * The idea behind that is instead of having to have 20877 * the peer wait for the delayed-ack timer to run off 20878 * we send an ack that makes the peer send us an ack. 20879 */ 20880 rack_send_ack_challange(rack); 20881 } 20882 if (tot_len_this_send > 0) { 20883 rack->r_ctl.fsb.recwin = recwin; 20884 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 20885 if ((error == 0) && 20886 rack_use_rfo && 20887 ((flags & (TH_SYN|TH_FIN)) == 0) && 20888 (ipoptlen == 0) && 20889 rack->r_fsb_inited && 20890 TCPS_HAVEESTABLISHED(tp->t_state) && 20891 ((IN_RECOVERY(tp->t_flags)) == 0) && 20892 (doing_tlp == 0) && 20893 (rack->r_must_retran == 0) && 20894 ((tp->t_flags & TF_NEEDFIN) == 0) && 20895 (len > 0) && (orig_len > 0) && 20896 (orig_len > len) && 20897 ((orig_len - len) >= segsiz) && 20898 ((optlen == 0) || 20899 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 20900 /* We can send at least one more MSS using our fsb */ 20901 rack_setup_fast_output(tp, rack, sb, len, orig_len, 20902 segsiz, pace_max_seg, hw_tls, flags); 20903 } else 20904 rack->r_fast_output = 0; 20905 rack_log_fsb(rack, tp, so, flags, 20906 ipoptlen, orig_len, len, 0, 20907 1, optlen, __LINE__, 1); 20908 /* Assure when we leave that snd_nxt will point to top */ 20909 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 20910 tp->snd_nxt = tp->snd_max; 20911 } else { 20912 int end_window = 0; 20913 uint32_t seq = tp->gput_ack; 20914 20915 rsm = tqhash_max(rack->r_ctl.tqh); 20916 if (rsm) { 20917 /* 20918 * Mark the last sent that we just-returned (hinting 20919 * that delayed ack may play a role in any rtt measurement). 20920 */ 20921 rsm->r_just_ret = 1; 20922 } 20923 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 20924 rack->r_ctl.rc_agg_delayed = 0; 20925 rack->r_early = 0; 20926 rack->r_late = 0; 20927 rack->r_ctl.rc_agg_early = 0; 20928 if ((ctf_outstanding(tp) + 20929 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 20930 minseg)) >= tp->snd_wnd) { 20931 /* We are limited by the rwnd */ 20932 app_limited = CTF_JR_RWND_LIMITED; 20933 if (IN_FASTRECOVERY(tp->t_flags)) 20934 rack->r_ctl.rc_prr_sndcnt = 0; 20935 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 20936 /* We are limited by whats available -- app limited */ 20937 app_limited = CTF_JR_APP_LIMITED; 20938 if (IN_FASTRECOVERY(tp->t_flags)) 20939 rack->r_ctl.rc_prr_sndcnt = 0; 20940 } else if ((idle == 0) && 20941 ((tp->t_flags & TF_NODELAY) == 0) && 20942 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20943 (len < segsiz)) { 20944 /* 20945 * No delay is not on and the 20946 * user is sending less than 1MSS. This 20947 * brings out SWS avoidance so we 20948 * don't send. Another app-limited case. 20949 */ 20950 app_limited = CTF_JR_APP_LIMITED; 20951 } else if (tp->t_flags & TF_NOPUSH) { 20952 /* 20953 * The user has requested no push of 20954 * the last segment and we are 20955 * at the last segment. Another app 20956 * limited case. 20957 */ 20958 app_limited = CTF_JR_APP_LIMITED; 20959 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 20960 /* Its the cwnd */ 20961 app_limited = CTF_JR_CWND_LIMITED; 20962 } else if (IN_FASTRECOVERY(tp->t_flags) && 20963 (rack->rack_no_prr == 0) && 20964 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 20965 app_limited = CTF_JR_PRR; 20966 } else { 20967 /* Now why here are we not sending? */ 20968 #ifdef NOW 20969 #ifdef INVARIANTS 20970 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 20971 #endif 20972 #endif 20973 app_limited = CTF_JR_ASSESSING; 20974 } 20975 /* 20976 * App limited in some fashion, for our pacing GP 20977 * measurements we don't want any gap (even cwnd). 20978 * Close down the measurement window. 20979 */ 20980 if (rack_cwnd_block_ends_measure && 20981 ((app_limited == CTF_JR_CWND_LIMITED) || 20982 (app_limited == CTF_JR_PRR))) { 20983 /* 20984 * The reason we are not sending is 20985 * the cwnd (or prr). We have been configured 20986 * to end the measurement window in 20987 * this case. 20988 */ 20989 end_window = 1; 20990 } else if (rack_rwnd_block_ends_measure && 20991 (app_limited == CTF_JR_RWND_LIMITED)) { 20992 /* 20993 * We are rwnd limited and have been 20994 * configured to end the measurement 20995 * window in this case. 20996 */ 20997 end_window = 1; 20998 } else if (app_limited == CTF_JR_APP_LIMITED) { 20999 /* 21000 * A true application limited period, we have 21001 * ran out of data. 21002 */ 21003 end_window = 1; 21004 } else if (app_limited == CTF_JR_ASSESSING) { 21005 /* 21006 * In the assessing case we hit the end of 21007 * the if/else and had no known reason 21008 * This will panic us under invariants.. 21009 * 21010 * If we get this out in logs we need to 21011 * investagate which reason we missed. 21012 */ 21013 end_window = 1; 21014 } 21015 if (end_window) { 21016 uint8_t log = 0; 21017 21018 /* Adjust the Gput measurement */ 21019 if ((tp->t_flags & TF_GPUTINPROG) && 21020 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21021 tp->gput_ack = tp->snd_max; 21022 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21023 /* 21024 * There is not enough to measure. 21025 */ 21026 tp->t_flags &= ~TF_GPUTINPROG; 21027 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 21028 rack->r_ctl.rc_gp_srtt /*flex1*/, 21029 tp->gput_seq, 21030 0, 0, 18, __LINE__, NULL, 0); 21031 } else 21032 log = 1; 21033 } 21034 /* Mark the last packet has app limited */ 21035 rsm = tqhash_max(rack->r_ctl.tqh); 21036 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 21037 if (rack->r_ctl.rc_app_limited_cnt == 0) 21038 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 21039 else { 21040 /* 21041 * Go out to the end app limited and mark 21042 * this new one as next and move the end_appl up 21043 * to this guy. 21044 */ 21045 if (rack->r_ctl.rc_end_appl) 21046 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 21047 rack->r_ctl.rc_end_appl = rsm; 21048 } 21049 rsm->r_flags |= RACK_APP_LIMITED; 21050 rack->r_ctl.rc_app_limited_cnt++; 21051 } 21052 if (log) 21053 rack_log_pacing_delay_calc(rack, 21054 rack->r_ctl.rc_app_limited_cnt, seq, 21055 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 21056 } 21057 } 21058 /* Check if we need to go into persists or not */ 21059 if ((tp->snd_max == tp->snd_una) && 21060 TCPS_HAVEESTABLISHED(tp->t_state) && 21061 sbavail(sb) && 21062 (sbavail(sb) > tp->snd_wnd) && 21063 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 21064 /* Yes lets make sure to move to persist before timer-start */ 21065 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 21066 } 21067 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 21068 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 21069 } 21070 #ifdef NETFLIX_SHARED_CWND 21071 if ((sbavail(sb) == 0) && 21072 rack->r_ctl.rc_scw) { 21073 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21074 rack->rack_scwnd_is_idle = 1; 21075 } 21076 #endif 21077 #ifdef TCP_ACCOUNTING 21078 if (tot_len_this_send > 0) { 21079 crtsc = get_cyclecount(); 21080 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21081 tp->tcp_cnt_counters[SND_OUT_DATA]++; 21082 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21083 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 21084 } 21085 } else { 21086 crtsc = get_cyclecount(); 21087 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21088 tp->tcp_cnt_counters[SND_LIMITED]++; 21089 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 21090 } 21091 } 21092 sched_unpin(); 21093 #endif 21094 return (0); 21095 21096 send: 21097 if ((rack->r_ctl.crte != NULL) && 21098 (rsm == NULL) && 21099 ((rack->rc_hw_nobuf == 1) || 21100 (rack_hw_check_queue && (check_done == 0)))) { 21101 /* 21102 * We only want to do this once with the hw_check_queue, 21103 * for the enobuf case we would only do it once if 21104 * we come around to again, the flag will be clear. 21105 */ 21106 check_done = 1; 21107 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 21108 if (slot) { 21109 rack->r_ctl.rc_agg_delayed = 0; 21110 rack->r_ctl.rc_agg_early = 0; 21111 rack->r_early = 0; 21112 rack->r_late = 0; 21113 SOCK_SENDBUF_UNLOCK(so); 21114 goto skip_all_send; 21115 } 21116 } 21117 if (rsm || sack_rxmit) 21118 counter_u64_add(rack_nfto_resend, 1); 21119 else 21120 counter_u64_add(rack_non_fto_send, 1); 21121 if ((flags & TH_FIN) && 21122 sbavail(sb)) { 21123 /* 21124 * We do not transmit a FIN 21125 * with data outstanding. We 21126 * need to make it so all data 21127 * is acked first. 21128 */ 21129 flags &= ~TH_FIN; 21130 if (TCPS_HAVEESTABLISHED(tp->t_state) && 21131 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 21132 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21133 /* 21134 * Ok less than or right at a MSS is 21135 * outstanding. The original FreeBSD stack would 21136 * have sent a FIN, which can speed things up for 21137 * a transactional application doing a MSG_WAITALL. 21138 * To speed things up since we do *not* send a FIN 21139 * if data is outstanding, we send a "challenge ack". 21140 * The idea behind that is instead of having to have 21141 * the peer wait for the delayed-ack timer to run off 21142 * we send an ack that makes the peer send us an ack. 21143 */ 21144 rack_send_ack_challange(rack); 21145 } 21146 } 21147 /* Enforce stack imposed max seg size if we have one */ 21148 if (pace_max_seg && 21149 (len > pace_max_seg)) { 21150 mark = 1; 21151 len = pace_max_seg; 21152 } 21153 if ((rsm == NULL) && 21154 (rack->pcm_in_progress == 0) && 21155 (rack->r_ctl.pcm_max_seg > 0) && 21156 (len >= rack->r_ctl.pcm_max_seg)) { 21157 /* It is large enough for a measurement */ 21158 add_flag |= RACK_IS_PCM; 21159 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 21160 } else if (rack_verbose_logging) { 21161 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 21162 } 21163 21164 SOCKBUF_LOCK_ASSERT(sb); 21165 if (len > 0) { 21166 if (len >= segsiz) 21167 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 21168 else 21169 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 21170 } 21171 /* 21172 * Before ESTABLISHED, force sending of initial options unless TCP 21173 * set not to do any options. NOTE: we assume that the IP/TCP header 21174 * plus TCP options always fit in a single mbuf, leaving room for a 21175 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 21176 * + optlen <= MCLBYTES 21177 */ 21178 optlen = 0; 21179 #ifdef INET6 21180 if (isipv6) 21181 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 21182 else 21183 #endif 21184 hdrlen = sizeof(struct tcpiphdr); 21185 21186 /* 21187 * Ok what seq are we sending from. If we have 21188 * no rsm to use, then we look at various bits, 21189 * if we are putting out a SYN it will be ISS. 21190 * If we are retransmitting a FIN it will 21191 * be snd_max-1 else its snd_max. 21192 */ 21193 if (rsm == NULL) { 21194 if (flags & TH_SYN) 21195 rack_seq = tp->iss; 21196 else if ((flags & TH_FIN) && 21197 (tp->t_flags & TF_SENTFIN)) 21198 rack_seq = tp->snd_max - 1; 21199 else 21200 rack_seq = tp->snd_max; 21201 } else { 21202 rack_seq = rsm->r_start; 21203 } 21204 /* 21205 * Compute options for segment. We only have to care about SYN and 21206 * established connection segments. Options for SYN-ACK segments 21207 * are handled in TCP syncache. 21208 */ 21209 to.to_flags = 0; 21210 if ((tp->t_flags & TF_NOOPT) == 0) { 21211 /* Maximum segment size. */ 21212 if (flags & TH_SYN) { 21213 to.to_mss = tcp_mssopt(&inp->inp_inc); 21214 if (tp->t_port) 21215 to.to_mss -= V_tcp_udp_tunneling_overhead; 21216 to.to_flags |= TOF_MSS; 21217 21218 /* 21219 * On SYN or SYN|ACK transmits on TFO connections, 21220 * only include the TFO option if it is not a 21221 * retransmit, as the presence of the TFO option may 21222 * have caused the original SYN or SYN|ACK to have 21223 * been dropped by a middlebox. 21224 */ 21225 if ((tp->t_flags & TF_FASTOPEN) && 21226 (tp->t_rxtshift == 0)) { 21227 if (tp->t_state == TCPS_SYN_RECEIVED) { 21228 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 21229 to.to_tfo_cookie = 21230 (u_int8_t *)&tp->t_tfo_cookie.server; 21231 to.to_flags |= TOF_FASTOPEN; 21232 wanted_cookie = 1; 21233 } else if (tp->t_state == TCPS_SYN_SENT) { 21234 to.to_tfo_len = 21235 tp->t_tfo_client_cookie_len; 21236 to.to_tfo_cookie = 21237 tp->t_tfo_cookie.client; 21238 to.to_flags |= TOF_FASTOPEN; 21239 wanted_cookie = 1; 21240 /* 21241 * If we wind up having more data to 21242 * send with the SYN than can fit in 21243 * one segment, don't send any more 21244 * until the SYN|ACK comes back from 21245 * the other end. 21246 */ 21247 sendalot = 0; 21248 } 21249 } 21250 } 21251 /* Window scaling. */ 21252 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 21253 to.to_wscale = tp->request_r_scale; 21254 to.to_flags |= TOF_SCALE; 21255 } 21256 /* Timestamps. */ 21257 if ((tp->t_flags & TF_RCVD_TSTMP) || 21258 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 21259 uint32_t ts_to_use; 21260 21261 if ((rack->r_rcvpath_rtt_up == 1) && 21262 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 21263 /* 21264 * When we are doing a rcv_rtt probe all 21265 * other timestamps use the next msec. This 21266 * is safe since our previous ack is in the 21267 * air and we will just have a few more 21268 * on the next ms. This assures that only 21269 * the one ack has the ms_cts that was on 21270 * our ack-probe. 21271 */ 21272 ts_to_use = ms_cts + 1; 21273 } else { 21274 ts_to_use = ms_cts; 21275 } 21276 to.to_tsval = ts_to_use + tp->ts_offset; 21277 to.to_tsecr = tp->ts_recent; 21278 to.to_flags |= TOF_TS; 21279 if ((len == 0) && 21280 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21281 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 21282 (tp->snd_una == tp->snd_max) && 21283 (flags & TH_ACK) && 21284 (sbavail(sb) == 0) && 21285 (rack->r_ctl.current_round != 0) && 21286 ((flags & (TH_SYN|TH_FIN)) == 0) && 21287 (rack->r_rcvpath_rtt_up == 0)) { 21288 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 21289 rack->r_ctl.last_time_of_arm_rcv = cts; 21290 rack->r_rcvpath_rtt_up = 1; 21291 /* Subtract 1 from seq to force a response */ 21292 rack_seq--; 21293 } 21294 } 21295 /* Set receive buffer autosizing timestamp. */ 21296 if (tp->rfbuf_ts == 0 && 21297 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 21298 tp->rfbuf_ts = ms_cts; 21299 } 21300 /* Selective ACK's. */ 21301 if (tp->t_flags & TF_SACK_PERMIT) { 21302 if (flags & TH_SYN) 21303 to.to_flags |= TOF_SACKPERM; 21304 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 21305 tp->rcv_numsacks > 0) { 21306 to.to_flags |= TOF_SACK; 21307 to.to_nsacks = tp->rcv_numsacks; 21308 to.to_sacks = (u_char *)tp->sackblks; 21309 } 21310 } 21311 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21312 /* TCP-MD5 (RFC2385). */ 21313 if (tp->t_flags & TF_SIGNATURE) 21314 to.to_flags |= TOF_SIGNATURE; 21315 #endif 21316 21317 /* Processing the options. */ 21318 hdrlen += optlen = tcp_addoptions(&to, opt); 21319 /* 21320 * If we wanted a TFO option to be added, but it was unable 21321 * to fit, ensure no data is sent. 21322 */ 21323 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 21324 !(to.to_flags & TOF_FASTOPEN)) 21325 len = 0; 21326 } 21327 if (tp->t_port) { 21328 if (V_tcp_udp_tunneling_port == 0) { 21329 /* The port was removed?? */ 21330 SOCK_SENDBUF_UNLOCK(so); 21331 #ifdef TCP_ACCOUNTING 21332 crtsc = get_cyclecount(); 21333 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21334 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 21335 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 21336 } 21337 sched_unpin(); 21338 #endif 21339 return (EHOSTUNREACH); 21340 } 21341 hdrlen += sizeof(struct udphdr); 21342 } 21343 #ifdef INET6 21344 if (isipv6) 21345 ipoptlen = ip6_optlen(inp); 21346 else 21347 #endif 21348 if (inp->inp_options) 21349 ipoptlen = inp->inp_options->m_len - 21350 offsetof(struct ipoption, ipopt_list); 21351 else 21352 ipoptlen = 0; 21353 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21354 ipoptlen += ipsec_optlen; 21355 #endif 21356 21357 /* 21358 * Adjust data length if insertion of options will bump the packet 21359 * length beyond the t_maxseg length. Clear the FIN bit because we 21360 * cut off the tail of the segment. 21361 */ 21362 if (len + optlen + ipoptlen > tp->t_maxseg) { 21363 if (tso) { 21364 uint32_t if_hw_tsomax; 21365 uint32_t moff; 21366 int32_t max_len; 21367 21368 /* extract TSO information */ 21369 if_hw_tsomax = tp->t_tsomax; 21370 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 21371 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 21372 KASSERT(ipoptlen == 0, 21373 ("%s: TSO can't do IP options", __func__)); 21374 21375 /* 21376 * Check if we should limit by maximum payload 21377 * length: 21378 */ 21379 if (if_hw_tsomax != 0) { 21380 /* compute maximum TSO length */ 21381 max_len = (if_hw_tsomax - hdrlen - 21382 max_linkhdr); 21383 if (max_len <= 0) { 21384 len = 0; 21385 } else if (len > max_len) { 21386 if (doing_tlp == 0) 21387 sendalot = 1; 21388 len = max_len; 21389 mark = 2; 21390 } 21391 } 21392 /* 21393 * Prevent the last segment from being fractional 21394 * unless the send sockbuf can be emptied: 21395 */ 21396 max_len = (tp->t_maxseg - optlen); 21397 if ((sb_offset + len) < sbavail(sb)) { 21398 moff = len % (u_int)max_len; 21399 if (moff != 0) { 21400 mark = 3; 21401 len -= moff; 21402 } 21403 } 21404 /* 21405 * In case there are too many small fragments don't 21406 * use TSO: 21407 */ 21408 if (len <= max_len) { 21409 mark = 4; 21410 tso = 0; 21411 } 21412 /* 21413 * Send the FIN in a separate segment after the bulk 21414 * sending is done. We don't trust the TSO 21415 * implementations to clear the FIN flag on all but 21416 * the last segment. 21417 */ 21418 if (tp->t_flags & TF_NEEDFIN) { 21419 sendalot = 4; 21420 } 21421 } else { 21422 mark = 5; 21423 if (optlen + ipoptlen >= tp->t_maxseg) { 21424 /* 21425 * Since we don't have enough space to put 21426 * the IP header chain and the TCP header in 21427 * one packet as required by RFC 7112, don't 21428 * send it. Also ensure that at least one 21429 * byte of the payload can be put into the 21430 * TCP segment. 21431 */ 21432 SOCK_SENDBUF_UNLOCK(so); 21433 error = EMSGSIZE; 21434 sack_rxmit = 0; 21435 goto out; 21436 } 21437 len = tp->t_maxseg - optlen - ipoptlen; 21438 sendalot = 5; 21439 } 21440 } else { 21441 tso = 0; 21442 mark = 6; 21443 } 21444 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 21445 ("%s: len > IP_MAXPACKET", __func__)); 21446 #ifdef DIAGNOSTIC 21447 #ifdef INET6 21448 if (max_linkhdr + hdrlen > MCLBYTES) 21449 #else 21450 if (max_linkhdr + hdrlen > MHLEN) 21451 #endif 21452 panic("tcphdr too big"); 21453 #endif 21454 21455 /* 21456 * This KASSERT is here to catch edge cases at a well defined place. 21457 * Before, those had triggered (random) panic conditions further 21458 * down. 21459 */ 21460 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21461 if ((len == 0) && 21462 (flags & TH_FIN) && 21463 (sbused(sb))) { 21464 /* 21465 * We have outstanding data, don't send a fin by itself!. 21466 * 21467 * Check to see if we need to send a challenge ack. 21468 */ 21469 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 21470 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21471 /* 21472 * Ok less than or right at a MSS is 21473 * outstanding. The original FreeBSD stack would 21474 * have sent a FIN, which can speed things up for 21475 * a transactional application doing a MSG_WAITALL. 21476 * To speed things up since we do *not* send a FIN 21477 * if data is outstanding, we send a "challenge ack". 21478 * The idea behind that is instead of having to have 21479 * the peer wait for the delayed-ack timer to run off 21480 * we send an ack that makes the peer send us an ack. 21481 */ 21482 rack_send_ack_challange(rack); 21483 } 21484 goto just_return; 21485 } 21486 /* 21487 * Grab a header mbuf, attaching a copy of data to be transmitted, 21488 * and initialize the header from the template for sends on this 21489 * connection. 21490 */ 21491 hw_tls = tp->t_nic_ktls_xmit != 0; 21492 if (len) { 21493 uint32_t max_val; 21494 uint32_t moff; 21495 21496 if (pace_max_seg) 21497 max_val = pace_max_seg; 21498 else 21499 max_val = len; 21500 /* 21501 * We allow a limit on sending with hptsi. 21502 */ 21503 if (len > max_val) { 21504 mark = 7; 21505 len = max_val; 21506 } 21507 #ifdef INET6 21508 if (MHLEN < hdrlen + max_linkhdr) 21509 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 21510 else 21511 #endif 21512 m = m_gethdr(M_NOWAIT, MT_DATA); 21513 21514 if (m == NULL) { 21515 SOCK_SENDBUF_UNLOCK(so); 21516 error = ENOBUFS; 21517 sack_rxmit = 0; 21518 goto out; 21519 } 21520 m->m_data += max_linkhdr; 21521 m->m_len = hdrlen; 21522 21523 /* 21524 * Start the m_copy functions from the closest mbuf to the 21525 * sb_offset in the socket buffer chain. 21526 */ 21527 mb = sbsndptr_noadv(sb, sb_offset, &moff); 21528 s_mb = mb; 21529 s_moff = moff; 21530 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 21531 m_copydata(mb, moff, (int)len, 21532 mtod(m, caddr_t)+hdrlen); 21533 /* 21534 * If we are not retransmitting advance the 21535 * sndptr to help remember the next place in 21536 * the sb. 21537 */ 21538 if (rsm == NULL) 21539 sbsndptr_adv(sb, mb, len); 21540 m->m_len += len; 21541 } else { 21542 struct sockbuf *msb; 21543 21544 /* 21545 * If we are not retransmitting pass in msb so 21546 * the socket buffer can be advanced. Otherwise 21547 * set it to NULL if its a retransmission since 21548 * we don't want to change the sb remembered 21549 * location. 21550 */ 21551 if (rsm == NULL) 21552 msb = sb; 21553 else 21554 msb = NULL; 21555 m->m_next = tcp_m_copym( 21556 mb, moff, &len, 21557 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 21558 ((rsm == NULL) ? hw_tls : 0) 21559 #ifdef NETFLIX_COPY_ARGS 21560 , &s_mb, &s_moff 21561 #endif 21562 ); 21563 if (len <= (tp->t_maxseg - optlen)) { 21564 /* 21565 * Must have ran out of mbufs for the copy 21566 * shorten it to no longer need tso. Lets 21567 * not put on sendalot since we are low on 21568 * mbufs. 21569 */ 21570 tso = 0; 21571 } 21572 if (m->m_next == NULL) { 21573 SOCK_SENDBUF_UNLOCK(so); 21574 (void)m_free(m); 21575 error = ENOBUFS; 21576 sack_rxmit = 0; 21577 goto out; 21578 } 21579 } 21580 if (sack_rxmit) { 21581 if (rsm && (rsm->r_flags & RACK_TLP)) { 21582 /* 21583 * TLP should not count in retran count, but 21584 * in its own bin 21585 */ 21586 counter_u64_add(rack_tlp_retran, 1); 21587 counter_u64_add(rack_tlp_retran_bytes, len); 21588 } else { 21589 tp->t_sndrexmitpack++; 21590 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 21591 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 21592 } 21593 #ifdef STATS 21594 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 21595 len); 21596 #endif 21597 } else { 21598 KMOD_TCPSTAT_INC(tcps_sndpack); 21599 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 21600 #ifdef STATS 21601 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 21602 len); 21603 #endif 21604 } 21605 /* 21606 * If we're sending everything we've got, set PUSH. (This 21607 * will keep happy those implementations which only give 21608 * data to the user when a buffer fills or a PUSH comes in.) 21609 */ 21610 if (sb_offset + len == sbused(sb) && 21611 sbused(sb) && 21612 !(flags & TH_SYN)) { 21613 flags |= TH_PUSH; 21614 add_flag |= RACK_HAD_PUSH; 21615 } 21616 SOCK_SENDBUF_UNLOCK(so); 21617 } else { 21618 SOCK_SENDBUF_UNLOCK(so); 21619 if (tp->t_flags & TF_ACKNOW) 21620 KMOD_TCPSTAT_INC(tcps_sndacks); 21621 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 21622 KMOD_TCPSTAT_INC(tcps_sndctrl); 21623 else 21624 KMOD_TCPSTAT_INC(tcps_sndwinup); 21625 21626 m = m_gethdr(M_NOWAIT, MT_DATA); 21627 if (m == NULL) { 21628 error = ENOBUFS; 21629 sack_rxmit = 0; 21630 goto out; 21631 } 21632 #ifdef INET6 21633 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 21634 MHLEN >= hdrlen) { 21635 M_ALIGN(m, hdrlen); 21636 } else 21637 #endif 21638 m->m_data += max_linkhdr; 21639 m->m_len = hdrlen; 21640 } 21641 SOCK_SENDBUF_UNLOCK_ASSERT(so); 21642 m->m_pkthdr.rcvif = (struct ifnet *)0; 21643 #ifdef MAC 21644 mac_inpcb_create_mbuf(inp, m); 21645 #endif 21646 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21647 #ifdef INET6 21648 if (isipv6) 21649 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 21650 else 21651 #endif /* INET6 */ 21652 #ifdef INET 21653 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 21654 #endif 21655 th = rack->r_ctl.fsb.th; 21656 udp = rack->r_ctl.fsb.udp; 21657 if (udp) { 21658 #ifdef INET6 21659 if (isipv6) 21660 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21661 else 21662 #endif /* INET6 */ 21663 ulen = hdrlen + len - sizeof(struct ip); 21664 udp->uh_ulen = htons(ulen); 21665 } 21666 } else { 21667 #ifdef INET6 21668 if (isipv6) { 21669 ip6 = mtod(m, struct ip6_hdr *); 21670 if (tp->t_port) { 21671 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 21672 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21673 udp->uh_dport = tp->t_port; 21674 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21675 udp->uh_ulen = htons(ulen); 21676 th = (struct tcphdr *)(udp + 1); 21677 } else 21678 th = (struct tcphdr *)(ip6 + 1); 21679 tcpip_fillheaders(inp, tp->t_port, ip6, th); 21680 } else 21681 #endif /* INET6 */ 21682 { 21683 #ifdef INET 21684 ip = mtod(m, struct ip *); 21685 if (tp->t_port) { 21686 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 21687 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21688 udp->uh_dport = tp->t_port; 21689 ulen = hdrlen + len - sizeof(struct ip); 21690 udp->uh_ulen = htons(ulen); 21691 th = (struct tcphdr *)(udp + 1); 21692 } else 21693 th = (struct tcphdr *)(ip + 1); 21694 tcpip_fillheaders(inp, tp->t_port, ip, th); 21695 #endif 21696 } 21697 } 21698 /* 21699 * If we are starting a connection, send ECN setup SYN packet. If we 21700 * are on a retransmit, we may resend those bits a number of times 21701 * as per RFC 3168. 21702 */ 21703 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 21704 flags |= tcp_ecn_output_syn_sent(tp); 21705 } 21706 /* Also handle parallel SYN for ECN */ 21707 if (TCPS_HAVERCVDSYN(tp->t_state) && 21708 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 21709 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 21710 if ((tp->t_state == TCPS_SYN_RECEIVED) && 21711 (tp->t_flags2 & TF2_ECN_SND_ECE)) 21712 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 21713 #ifdef INET6 21714 if (isipv6) { 21715 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 21716 ip6->ip6_flow |= htonl(ect << 20); 21717 } 21718 else 21719 #endif 21720 { 21721 #ifdef INET 21722 ip->ip_tos &= ~IPTOS_ECN_MASK; 21723 ip->ip_tos |= ect; 21724 #endif 21725 } 21726 } 21727 th->th_seq = htonl(rack_seq); 21728 th->th_ack = htonl(tp->rcv_nxt); 21729 tcp_set_flags(th, flags); 21730 /* 21731 * Calculate receive window. Don't shrink window, but avoid silly 21732 * window syndrome. 21733 * If a RST segment is sent, advertise a window of zero. 21734 */ 21735 if (flags & TH_RST) { 21736 recwin = 0; 21737 } else { 21738 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 21739 recwin < (long)segsiz) { 21740 recwin = 0; 21741 } 21742 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 21743 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 21744 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 21745 } 21746 21747 /* 21748 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 21749 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 21750 * handled in syncache. 21751 */ 21752 if (flags & TH_SYN) 21753 th->th_win = htons((u_short) 21754 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 21755 else { 21756 /* Avoid shrinking window with window scaling. */ 21757 recwin = roundup2(recwin, 1 << tp->rcv_scale); 21758 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 21759 } 21760 /* 21761 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 21762 * window. This may cause the remote transmitter to stall. This 21763 * flag tells soreceive() to disable delayed acknowledgements when 21764 * draining the buffer. This can occur if the receiver is 21765 * attempting to read more data than can be buffered prior to 21766 * transmitting on the connection. 21767 */ 21768 if (th->th_win == 0) { 21769 tp->t_sndzerowin++; 21770 tp->t_flags |= TF_RXWIN0SENT; 21771 } else 21772 tp->t_flags &= ~TF_RXWIN0SENT; 21773 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 21774 /* Now are we using fsb?, if so copy the template data to the mbuf */ 21775 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21776 uint8_t *cpto; 21777 21778 cpto = mtod(m, uint8_t *); 21779 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 21780 /* 21781 * We have just copied in: 21782 * IP/IP6 21783 * <optional udphdr> 21784 * tcphdr (no options) 21785 * 21786 * We need to grab the correct pointers into the mbuf 21787 * for both the tcp header, and possibly the udp header (if tunneling). 21788 * We do this by using the offset in the copy buffer and adding it 21789 * to the mbuf base pointer (cpto). 21790 */ 21791 #ifdef INET6 21792 if (isipv6) 21793 ip6 = mtod(m, struct ip6_hdr *); 21794 else 21795 #endif /* INET6 */ 21796 #ifdef INET 21797 ip = mtod(m, struct ip *); 21798 #endif 21799 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 21800 /* If we have a udp header lets set it into the mbuf as well */ 21801 if (udp) 21802 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 21803 } 21804 if (optlen) { 21805 bcopy(opt, th + 1, optlen); 21806 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 21807 } 21808 /* 21809 * Put TCP length in extended header, and then checksum extended 21810 * header and data. 21811 */ 21812 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 21813 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21814 if (to.to_flags & TOF_SIGNATURE) { 21815 /* 21816 * Calculate MD5 signature and put it into the place 21817 * determined before. 21818 * NOTE: since TCP options buffer doesn't point into 21819 * mbuf's data, calculate offset and use it. 21820 */ 21821 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 21822 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 21823 /* 21824 * Do not send segment if the calculation of MD5 21825 * digest has failed. 21826 */ 21827 goto out; 21828 } 21829 } 21830 #endif 21831 #ifdef INET6 21832 if (isipv6) { 21833 /* 21834 * ip6_plen is not need to be filled now, and will be filled 21835 * in ip6_output. 21836 */ 21837 if (tp->t_port) { 21838 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 21839 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21840 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 21841 th->th_sum = htons(0); 21842 UDPSTAT_INC(udps_opackets); 21843 } else { 21844 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 21845 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21846 th->th_sum = in6_cksum_pseudo(ip6, 21847 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 21848 0); 21849 } 21850 } 21851 #endif 21852 #if defined(INET6) && defined(INET) 21853 else 21854 #endif 21855 #ifdef INET 21856 { 21857 if (tp->t_port) { 21858 m->m_pkthdr.csum_flags = CSUM_UDP; 21859 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21860 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 21861 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 21862 th->th_sum = htons(0); 21863 UDPSTAT_INC(udps_opackets); 21864 } else { 21865 m->m_pkthdr.csum_flags = CSUM_TCP; 21866 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21867 th->th_sum = in_pseudo(ip->ip_src.s_addr, 21868 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 21869 IPPROTO_TCP + len + optlen)); 21870 } 21871 /* IP version must be set here for ipv4/ipv6 checking later */ 21872 KASSERT(ip->ip_v == IPVERSION, 21873 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 21874 } 21875 #endif 21876 /* 21877 * Enable TSO and specify the size of the segments. The TCP pseudo 21878 * header checksum is always provided. XXX: Fixme: This is currently 21879 * not the case for IPv6. 21880 */ 21881 if (tso) { 21882 /* 21883 * Here we must use t_maxseg and the optlen since 21884 * the optlen may include SACK's (or DSACK). 21885 */ 21886 KASSERT(len > tp->t_maxseg - optlen, 21887 ("%s: len <= tso_segsz", __func__)); 21888 m->m_pkthdr.csum_flags |= CSUM_TSO; 21889 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 21890 } 21891 KASSERT(len + hdrlen == m_length(m, NULL), 21892 ("%s: mbuf chain different than expected: %d + %u != %u", 21893 __func__, len, hdrlen, m_length(m, NULL))); 21894 21895 #ifdef TCP_HHOOK 21896 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 21897 hhook_run_tcp_est_out(tp, th, &to, len, tso); 21898 #endif 21899 if ((rack->r_ctl.crte != NULL) && 21900 (rack->rc_hw_nobuf == 0) && 21901 tcp_bblogging_on(tp)) { 21902 rack_log_queue_level(tp, rack, len, &tv, cts); 21903 } 21904 /* We're getting ready to send; log now. */ 21905 if (tcp_bblogging_on(rack->rc_tp)) { 21906 union tcp_log_stackspecific log; 21907 21908 memset(&log, 0, sizeof(log)); 21909 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 21910 if (rack->rack_no_prr) 21911 log.u_bbr.flex1 = 0; 21912 else 21913 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 21914 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 21915 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 21916 log.u_bbr.flex4 = orig_len; 21917 /* Save off the early/late values */ 21918 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 21919 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 21920 log.u_bbr.bw_inuse = rack_get_bw(rack); 21921 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 21922 log.u_bbr.flex8 = 0; 21923 if (rsm) { 21924 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 21925 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 21926 counter_u64_add(rack_collapsed_win_rxt, 1); 21927 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 21928 } 21929 if (doing_tlp) 21930 log.u_bbr.flex8 = 2; 21931 else 21932 log.u_bbr.flex8 = 1; 21933 } else { 21934 if (doing_tlp) 21935 log.u_bbr.flex8 = 3; 21936 } 21937 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 21938 log.u_bbr.flex7 = mark; 21939 log.u_bbr.flex7 <<= 8; 21940 log.u_bbr.flex7 |= pass; 21941 log.u_bbr.pkts_out = tp->t_maxseg; 21942 log.u_bbr.timeStamp = cts; 21943 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 21944 if (rsm && (rsm->r_rtr_cnt > 0)) { 21945 /* 21946 * When we have a retransmit we want to log the 21947 * burst at send and flight at send from before. 21948 */ 21949 log.u_bbr.flex5 = rsm->r_fas; 21950 log.u_bbr.bbr_substate = rsm->r_bas; 21951 } else { 21952 /* 21953 * New transmits we log in flex5 the inflight again as 21954 * well as the number of segments in our send in the 21955 * substate field. 21956 */ 21957 log.u_bbr.flex5 = log.u_bbr.inflight; 21958 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 21959 } 21960 log.u_bbr.lt_epoch = cwnd_to_use; 21961 log.u_bbr.delivered = sendalot; 21962 log.u_bbr.rttProp = (uintptr_t)rsm; 21963 log.u_bbr.pkt_epoch = __LINE__; 21964 if (rsm) { 21965 log.u_bbr.delRate = rsm->r_flags; 21966 log.u_bbr.delRate <<= 31; 21967 log.u_bbr.delRate |= rack->r_must_retran; 21968 log.u_bbr.delRate <<= 1; 21969 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 21970 } else { 21971 log.u_bbr.delRate = rack->r_must_retran; 21972 log.u_bbr.delRate <<= 1; 21973 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 21974 } 21975 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 21976 len, &log, false, NULL, __func__, __LINE__, &tv); 21977 } else 21978 lgb = NULL; 21979 21980 /* 21981 * Fill in IP length and desired time to live and send to IP level. 21982 * There should be a better way to handle ttl and tos; we could keep 21983 * them in the template, but need a way to checksum without them. 21984 */ 21985 /* 21986 * m->m_pkthdr.len should have been set before cksum calcuration, 21987 * because in6_cksum() need it. 21988 */ 21989 #ifdef INET6 21990 if (isipv6) { 21991 /* 21992 * we separately set hoplimit for every segment, since the 21993 * user might want to change the value via setsockopt. Also, 21994 * desired default hop limit might be changed via Neighbor 21995 * Discovery. 21996 */ 21997 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 21998 21999 /* 22000 * Set the packet size here for the benefit of DTrace 22001 * probes. ip6_output() will set it properly; it's supposed 22002 * to include the option header lengths as well. 22003 */ 22004 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 22005 22006 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 22007 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22008 else 22009 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22010 22011 if (tp->t_state == TCPS_SYN_SENT) 22012 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 22013 22014 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 22015 /* TODO: IPv6 IP6TOS_ECT bit on */ 22016 error = ip6_output(m, 22017 inp->in6p_outputopts, 22018 &inp->inp_route6, 22019 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 22020 NULL, NULL, inp); 22021 22022 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 22023 mtu = inp->inp_route6.ro_nh->nh_mtu; 22024 } 22025 #endif /* INET6 */ 22026 #if defined(INET) && defined(INET6) 22027 else 22028 #endif 22029 #ifdef INET 22030 { 22031 ip->ip_len = htons(m->m_pkthdr.len); 22032 #ifdef INET6 22033 if (inp->inp_vflag & INP_IPV6PROTO) 22034 ip->ip_ttl = in6_selecthlim(inp, NULL); 22035 #endif /* INET6 */ 22036 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 22037 /* 22038 * If we do path MTU discovery, then we set DF on every 22039 * packet. This might not be the best thing to do according 22040 * to RFC3390 Section 2. However the tcp hostcache migitates 22041 * the problem so it affects only the first tcp connection 22042 * with a host. 22043 * 22044 * NB: Don't set DF on small MTU/MSS to have a safe 22045 * fallback. 22046 */ 22047 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 22048 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22049 if (tp->t_port == 0 || len < V_tcp_minmss) { 22050 ip->ip_off |= htons(IP_DF); 22051 } 22052 } else { 22053 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22054 } 22055 22056 if (tp->t_state == TCPS_SYN_SENT) 22057 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 22058 22059 TCP_PROBE5(send, NULL, tp, ip, tp, th); 22060 22061 error = ip_output(m, 22062 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22063 inp->inp_options, 22064 #else 22065 NULL, 22066 #endif 22067 &inp->inp_route, 22068 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 22069 inp); 22070 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 22071 mtu = inp->inp_route.ro_nh->nh_mtu; 22072 } 22073 #endif /* INET */ 22074 if (lgb) { 22075 lgb->tlb_errno = error; 22076 lgb = NULL; 22077 } 22078 22079 out: 22080 /* 22081 * In transmit state, time the transmission and arrange for the 22082 * retransmit. In persist state, just set snd_max. 22083 */ 22084 if ((rsm == NULL) && doing_tlp) 22085 add_flag |= RACK_TLP; 22086 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 22087 rack_to_usec_ts(&tv), 22088 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 22089 if (error == 0) { 22090 if (add_flag & RACK_IS_PCM) { 22091 /* We just launched a PCM */ 22092 /* rrs here log */ 22093 rack->pcm_in_progress = 1; 22094 rack->pcm_needed = 0; 22095 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 22096 } 22097 if (rsm == NULL) { 22098 if (rack->lt_bw_up == 0) { 22099 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 22100 rack->r_ctl.lt_seq = tp->snd_una; 22101 rack->lt_bw_up = 1; 22102 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 22103 /* 22104 * Need to record what we have since we are 22105 * approaching seq wrap. 22106 */ 22107 uint64_t tmark; 22108 22109 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 22110 rack->r_ctl.lt_seq = tp->snd_una; 22111 tmark = tcp_get_u64_usecs(&tv); 22112 if (tmark > rack->r_ctl.lt_timemark) { 22113 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 22114 rack->r_ctl.lt_timemark = tmark; 22115 } 22116 } 22117 } 22118 rack->forced_ack = 0; /* If we send something zap the FA flag */ 22119 counter_u64_add(rack_total_bytes, len); 22120 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 22121 if (rsm && doing_tlp) { 22122 rack->rc_last_sent_tlp_past_cumack = 0; 22123 rack->rc_last_sent_tlp_seq_valid = 1; 22124 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 22125 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 22126 } 22127 if (rack->rc_hw_nobuf) { 22128 rack->rc_hw_nobuf = 0; 22129 rack->r_ctl.rc_agg_delayed = 0; 22130 rack->r_early = 0; 22131 rack->r_late = 0; 22132 rack->r_ctl.rc_agg_early = 0; 22133 } 22134 if (rsm && (doing_tlp == 0)) { 22135 /* Set we retransmitted */ 22136 rack->rc_gp_saw_rec = 1; 22137 } else { 22138 if (cwnd_to_use > tp->snd_ssthresh) { 22139 /* Set we sent in CA */ 22140 rack->rc_gp_saw_ca = 1; 22141 } else { 22142 /* Set we sent in SS */ 22143 rack->rc_gp_saw_ss = 1; 22144 } 22145 } 22146 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22147 (tp->t_flags & TF_SACK_PERMIT) && 22148 tp->rcv_numsacks > 0) 22149 tcp_clean_dsack_blocks(tp); 22150 tot_len_this_send += len; 22151 if (len == 0) { 22152 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 22153 } else { 22154 int idx; 22155 22156 idx = (len / segsiz) + 3; 22157 if (idx >= TCP_MSS_ACCT_ATIMER) 22158 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 22159 else 22160 counter_u64_add(rack_out_size[idx], 1); 22161 } 22162 } 22163 if ((rack->rack_no_prr == 0) && 22164 sub_from_prr && 22165 (error == 0)) { 22166 if (rack->r_ctl.rc_prr_sndcnt >= len) 22167 rack->r_ctl.rc_prr_sndcnt -= len; 22168 else 22169 rack->r_ctl.rc_prr_sndcnt = 0; 22170 } 22171 sub_from_prr = 0; 22172 if (rsm != NULL) { 22173 if (doing_tlp) 22174 /* Make sure the TLP is added */ 22175 rsm->r_flags |= RACK_TLP; 22176 else 22177 /* If its a resend without TLP then it must not have the flag */ 22178 rsm->r_flags &= ~RACK_TLP; 22179 } 22180 if ((error == 0) && 22181 (len > 0) && 22182 (tp->snd_una == tp->snd_max)) 22183 rack->r_ctl.rc_tlp_rxt_last_time = cts; 22184 22185 { 22186 /* 22187 * This block is not associated with the above error == 0 test. 22188 * It is used to advance snd_max if we have a new transmit. 22189 */ 22190 tcp_seq startseq = tp->snd_max; 22191 22192 22193 if (rsm && (doing_tlp == 0)) 22194 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 22195 if (error) 22196 /* We don't log or do anything with errors */ 22197 goto nomore; 22198 if (doing_tlp == 0) { 22199 if (rsm == NULL) { 22200 /* 22201 * Not a retransmission of some 22202 * sort, new data is going out so 22203 * clear our TLP count and flag. 22204 */ 22205 rack->rc_tlp_in_progress = 0; 22206 rack->r_ctl.rc_tlp_cnt_out = 0; 22207 } 22208 } else { 22209 /* 22210 * We have just sent a TLP, mark that it is true 22211 * and make sure our in progress is set so we 22212 * continue to check the count. 22213 */ 22214 rack->rc_tlp_in_progress = 1; 22215 rack->r_ctl.rc_tlp_cnt_out++; 22216 } 22217 /* 22218 * If we are retransmitting we are done, snd_max 22219 * does not get updated. 22220 */ 22221 if (sack_rxmit) 22222 goto nomore; 22223 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 22224 /* 22225 * Update the time we just added data since 22226 * nothing was outstanding. 22227 */ 22228 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 22229 tp->t_acktime = ticks; 22230 } 22231 /* 22232 * Now for special SYN/FIN handling. 22233 */ 22234 if (flags & (TH_SYN | TH_FIN)) { 22235 if ((flags & TH_SYN) && 22236 ((tp->t_flags & TF_SENTSYN) == 0)) { 22237 tp->snd_max++; 22238 tp->t_flags |= TF_SENTSYN; 22239 } 22240 if ((flags & TH_FIN) && 22241 ((tp->t_flags & TF_SENTFIN) == 0)) { 22242 tp->snd_max++; 22243 tp->t_flags |= TF_SENTFIN; 22244 } 22245 } 22246 tp->snd_max += len; 22247 if (rack->rc_new_rnd_needed) { 22248 rack_new_round_starts(tp, rack, tp->snd_max); 22249 } 22250 /* 22251 * Time this transmission if not a retransmission and 22252 * not currently timing anything. 22253 * This is only relevant in case of switching back to 22254 * the base stack. 22255 */ 22256 if (tp->t_rtttime == 0) { 22257 tp->t_rtttime = ticks; 22258 tp->t_rtseq = startseq; 22259 KMOD_TCPSTAT_INC(tcps_segstimed); 22260 } 22261 if (len && 22262 ((tp->t_flags & TF_GPUTINPROG) == 0)) 22263 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 22264 /* 22265 * If we are doing FO we need to update the mbuf position and subtract 22266 * this happens when the peer sends us duplicate information and 22267 * we thus want to send a DSACK. 22268 * 22269 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 22270 * turned off? If not then we are going to echo multiple DSACK blocks 22271 * out (with the TSO), which we should not be doing. 22272 */ 22273 if (rack->r_fast_output && len) { 22274 if (rack->r_ctl.fsb.left_to_send > len) 22275 rack->r_ctl.fsb.left_to_send -= len; 22276 else 22277 rack->r_ctl.fsb.left_to_send = 0; 22278 if (rack->r_ctl.fsb.left_to_send < segsiz) 22279 rack->r_fast_output = 0; 22280 if (rack->r_fast_output) { 22281 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 22282 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 22283 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 22284 } 22285 } 22286 if (rack_pcm_blast == 0) { 22287 if ((orig_len > len) && 22288 (add_flag & RACK_IS_PCM) && 22289 (len < pace_max_seg) && 22290 ((pace_max_seg - len) > segsiz)) { 22291 /* 22292 * We are doing a PCM measurement and we did 22293 * not get enough data in the TSO to meet the 22294 * burst requirement. 22295 */ 22296 uint32_t n_len; 22297 22298 n_len = (orig_len - len); 22299 orig_len -= len; 22300 pace_max_seg -= len; 22301 len = n_len; 22302 sb_offset = tp->snd_max - tp->snd_una; 22303 /* Re-lock for the next spin */ 22304 SOCK_SENDBUF_LOCK(so); 22305 goto send; 22306 } 22307 } else { 22308 if ((orig_len > len) && 22309 (add_flag & RACK_IS_PCM) && 22310 ((orig_len - len) > segsiz)) { 22311 /* 22312 * We are doing a PCM measurement and we did 22313 * not get enough data in the TSO to meet the 22314 * burst requirement. 22315 */ 22316 uint32_t n_len; 22317 22318 n_len = (orig_len - len); 22319 orig_len -= len; 22320 len = n_len; 22321 sb_offset = tp->snd_max - tp->snd_una; 22322 /* Re-lock for the next spin */ 22323 SOCK_SENDBUF_LOCK(so); 22324 goto send; 22325 } 22326 } 22327 } 22328 nomore: 22329 if (error) { 22330 rack->r_ctl.rc_agg_delayed = 0; 22331 rack->r_early = 0; 22332 rack->r_late = 0; 22333 rack->r_ctl.rc_agg_early = 0; 22334 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 22335 /* 22336 * Failures do not advance the seq counter above. For the 22337 * case of ENOBUFS we will fall out and retry in 1ms with 22338 * the hpts. Everything else will just have to retransmit 22339 * with the timer. 22340 * 22341 * In any case, we do not want to loop around for another 22342 * send without a good reason. 22343 */ 22344 sendalot = 0; 22345 switch (error) { 22346 case EPERM: 22347 case EACCES: 22348 tp->t_softerror = error; 22349 #ifdef TCP_ACCOUNTING 22350 crtsc = get_cyclecount(); 22351 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22352 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22353 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22354 } 22355 sched_unpin(); 22356 #endif 22357 return (error); 22358 case ENOBUFS: 22359 /* 22360 * Pace us right away to retry in a some 22361 * time 22362 */ 22363 if (rack->r_ctl.crte != NULL) { 22364 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 22365 if (tcp_bblogging_on(rack->rc_tp)) 22366 rack_log_queue_level(tp, rack, len, &tv, cts); 22367 } else 22368 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 22369 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 22370 if (rack->rc_enobuf < 0x7f) 22371 rack->rc_enobuf++; 22372 if (slot < (10 * HPTS_USEC_IN_MSEC)) 22373 slot = 10 * HPTS_USEC_IN_MSEC; 22374 if (rack->r_ctl.crte != NULL) { 22375 counter_u64_add(rack_saw_enobuf_hw, 1); 22376 tcp_rl_log_enobuf(rack->r_ctl.crte); 22377 } 22378 counter_u64_add(rack_saw_enobuf, 1); 22379 goto enobufs; 22380 case EMSGSIZE: 22381 /* 22382 * For some reason the interface we used initially 22383 * to send segments changed to another or lowered 22384 * its MTU. If TSO was active we either got an 22385 * interface without TSO capabilits or TSO was 22386 * turned off. If we obtained mtu from ip_output() 22387 * then update it and try again. 22388 */ 22389 if (tso) 22390 tp->t_flags &= ~TF_TSO; 22391 if (mtu != 0) { 22392 int saved_mtu; 22393 22394 saved_mtu = tp->t_maxseg; 22395 tcp_mss_update(tp, -1, mtu, NULL, NULL); 22396 if (saved_mtu > tp->t_maxseg) { 22397 goto again; 22398 } 22399 } 22400 slot = 10 * HPTS_USEC_IN_MSEC; 22401 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22402 #ifdef TCP_ACCOUNTING 22403 crtsc = get_cyclecount(); 22404 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22405 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22406 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22407 } 22408 sched_unpin(); 22409 #endif 22410 return (error); 22411 case ENETUNREACH: 22412 counter_u64_add(rack_saw_enetunreach, 1); 22413 /* FALLTHROUGH */ 22414 case EHOSTDOWN: 22415 case EHOSTUNREACH: 22416 case ENETDOWN: 22417 if (TCPS_HAVERCVDSYN(tp->t_state)) { 22418 tp->t_softerror = error; 22419 error = 0; 22420 } 22421 /* FALLTHROUGH */ 22422 default: 22423 slot = 10 * HPTS_USEC_IN_MSEC; 22424 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22425 #ifdef TCP_ACCOUNTING 22426 crtsc = get_cyclecount(); 22427 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22428 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22429 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22430 } 22431 sched_unpin(); 22432 #endif 22433 return (error); 22434 } 22435 } else { 22436 rack->rc_enobuf = 0; 22437 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 22438 rack->r_ctl.retran_during_recovery += len; 22439 } 22440 KMOD_TCPSTAT_INC(tcps_sndtotal); 22441 22442 /* 22443 * Data sent (as far as we can tell). If this advertises a larger 22444 * window than any other segment, then remember the size of the 22445 * advertised window. Any pending ACK has now been sent. 22446 */ 22447 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 22448 tp->rcv_adv = tp->rcv_nxt + recwin; 22449 22450 tp->last_ack_sent = tp->rcv_nxt; 22451 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 22452 enobufs: 22453 if (sendalot) { 22454 /* Do we need to turn off sendalot? */ 22455 if (pace_max_seg && 22456 (tot_len_this_send >= pace_max_seg)) { 22457 /* We hit our max. */ 22458 sendalot = 0; 22459 } 22460 } 22461 if ((error == 0) && (flags & TH_FIN)) 22462 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 22463 if (flags & TH_RST) { 22464 /* 22465 * We don't send again after sending a RST. 22466 */ 22467 slot = 0; 22468 sendalot = 0; 22469 if (error == 0) 22470 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 22471 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 22472 /* 22473 * Get our pacing rate, if an error 22474 * occurred in sending (ENOBUF) we would 22475 * hit the else if with slot preset. Other 22476 * errors return. 22477 */ 22478 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 22479 } 22480 /* We have sent clear the flag */ 22481 rack->r_ent_rec_ns = 0; 22482 if (rack->r_must_retran) { 22483 if (rsm) { 22484 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 22485 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 22486 /* 22487 * We have retransmitted all. 22488 */ 22489 rack->r_must_retran = 0; 22490 rack->r_ctl.rc_out_at_rto = 0; 22491 } 22492 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22493 /* 22494 * Sending new data will also kill 22495 * the loop. 22496 */ 22497 rack->r_must_retran = 0; 22498 rack->r_ctl.rc_out_at_rto = 0; 22499 } 22500 } 22501 rack->r_ctl.fsb.recwin = recwin; 22502 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 22503 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22504 /* 22505 * We hit an RTO and now have past snd_max at the RTO 22506 * clear all the WAS flags. 22507 */ 22508 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 22509 } 22510 if (slot) { 22511 /* set the rack tcb into the slot N */ 22512 if ((error == 0) && 22513 rack_use_rfo && 22514 ((flags & (TH_SYN|TH_FIN)) == 0) && 22515 (rsm == NULL) && 22516 (ipoptlen == 0) && 22517 (doing_tlp == 0) && 22518 rack->r_fsb_inited && 22519 TCPS_HAVEESTABLISHED(tp->t_state) && 22520 ((IN_RECOVERY(tp->t_flags)) == 0) && 22521 (rack->r_must_retran == 0) && 22522 ((tp->t_flags & TF_NEEDFIN) == 0) && 22523 (len > 0) && (orig_len > 0) && 22524 (orig_len > len) && 22525 ((orig_len - len) >= segsiz) && 22526 ((optlen == 0) || 22527 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22528 /* We can send at least one more MSS using our fsb */ 22529 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22530 segsiz, pace_max_seg, hw_tls, flags); 22531 } else 22532 rack->r_fast_output = 0; 22533 rack_log_fsb(rack, tp, so, flags, 22534 ipoptlen, orig_len, len, error, 22535 (rsm == NULL), optlen, __LINE__, 2); 22536 } else if (sendalot) { 22537 int ret; 22538 22539 sack_rxmit = 0; 22540 if ((error == 0) && 22541 rack_use_rfo && 22542 ((flags & (TH_SYN|TH_FIN)) == 0) && 22543 (rsm == NULL) && 22544 (doing_tlp == 0) && 22545 (ipoptlen == 0) && 22546 (rack->r_must_retran == 0) && 22547 rack->r_fsb_inited && 22548 TCPS_HAVEESTABLISHED(tp->t_state) && 22549 ((IN_RECOVERY(tp->t_flags)) == 0) && 22550 ((tp->t_flags & TF_NEEDFIN) == 0) && 22551 (len > 0) && (orig_len > 0) && 22552 (orig_len > len) && 22553 ((orig_len - len) >= segsiz) && 22554 ((optlen == 0) || 22555 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22556 /* we can use fast_output for more */ 22557 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22558 segsiz, pace_max_seg, hw_tls, flags); 22559 if (rack->r_fast_output) { 22560 error = 0; 22561 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__); 22562 if (ret >= 0) 22563 return (ret); 22564 else if (error) 22565 goto nomore; 22566 22567 } 22568 } 22569 goto again; 22570 } 22571 skip_all_send: 22572 /* Assure when we leave that snd_nxt will point to top */ 22573 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22574 tp->snd_nxt = tp->snd_max; 22575 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 22576 #ifdef TCP_ACCOUNTING 22577 crtsc = get_cyclecount() - ts_val; 22578 if (tot_len_this_send) { 22579 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22580 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22581 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 22582 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 22583 } 22584 } else { 22585 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22586 tp->tcp_cnt_counters[SND_OUT_ACK]++; 22587 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 22588 } 22589 } 22590 sched_unpin(); 22591 #endif 22592 if (error == ENOBUFS) 22593 error = 0; 22594 return (error); 22595 } 22596 22597 static void 22598 rack_update_seg(struct tcp_rack *rack) 22599 { 22600 uint32_t orig_val; 22601 22602 orig_val = rack->r_ctl.rc_pace_max_segs; 22603 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 22604 if (orig_val != rack->r_ctl.rc_pace_max_segs) 22605 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 22606 } 22607 22608 static void 22609 rack_mtu_change(struct tcpcb *tp) 22610 { 22611 /* 22612 * The MSS may have changed 22613 */ 22614 struct tcp_rack *rack; 22615 struct rack_sendmap *rsm; 22616 22617 rack = (struct tcp_rack *)tp->t_fb_ptr; 22618 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 22619 /* 22620 * The MTU has changed we need to resend everything 22621 * since all we have sent is lost. We first fix 22622 * up the mtu though. 22623 */ 22624 rack_set_pace_segments(tp, rack, __LINE__, NULL); 22625 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 22626 rack_remxt_tmr(tp); 22627 rack->r_fast_output = 0; 22628 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 22629 rack->r_ctl.rc_sacked); 22630 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 22631 rack->r_must_retran = 1; 22632 /* Mark all inflight to needing to be rxt'd */ 22633 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 22634 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 22635 } 22636 } 22637 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 22638 /* We don't use snd_nxt to retransmit */ 22639 tp->snd_nxt = tp->snd_max; 22640 } 22641 22642 static int 22643 rack_set_dgp(struct tcp_rack *rack) 22644 { 22645 if (rack->dgp_on == 1) 22646 return(0); 22647 if ((rack->use_fixed_rate == 1) && 22648 (rack->rc_always_pace == 1)) { 22649 /* 22650 * We are already pacing another 22651 * way. 22652 */ 22653 return (EBUSY); 22654 } 22655 if (rack->rc_always_pace == 1) { 22656 rack_remove_pacing(rack); 22657 } 22658 if (tcp_incr_dgp_pacing_cnt() == 0) 22659 return (ENOSPC); 22660 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 22661 rack->rc_fillcw_apply_discount = 0; 22662 rack->dgp_on = 1; 22663 rack->rc_always_pace = 1; 22664 rack->rc_pace_dnd = 1; 22665 rack->use_fixed_rate = 0; 22666 if (rack->gp_ready) 22667 rack_set_cc_pacing(rack); 22668 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22669 rack->rack_attempt_hdwr_pace = 0; 22670 /* rxt settings */ 22671 rack->full_size_rxt = 1; 22672 rack->shape_rxt_to_pacing_min = 0; 22673 /* cmpack=1 */ 22674 rack->r_use_cmp_ack = 1; 22675 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 22676 rack->r_use_cmp_ack) 22677 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22678 /* scwnd=1 */ 22679 rack->rack_enable_scwnd = 1; 22680 /* dynamic=100 */ 22681 rack->rc_gp_dyn_mul = 1; 22682 /* gp_inc_ca */ 22683 rack->r_ctl.rack_per_of_gp_ca = 100; 22684 /* rrr_conf=3 */ 22685 rack->r_rr_config = 3; 22686 /* npush=2 */ 22687 rack->r_ctl.rc_no_push_at_mrtt = 2; 22688 /* fillcw=1 */ 22689 rack->rc_pace_to_cwnd = 1; 22690 rack->rc_pace_fill_if_rttin_range = 0; 22691 rack->rtt_limit_mul = 0; 22692 /* noprr=1 */ 22693 rack->rack_no_prr = 1; 22694 /* lscwnd=1 */ 22695 rack->r_limit_scw = 1; 22696 /* gp_inc_rec */ 22697 rack->r_ctl.rack_per_of_gp_rec = 90; 22698 return (0); 22699 } 22700 22701 static int 22702 rack_set_profile(struct tcp_rack *rack, int prof) 22703 { 22704 int err = EINVAL; 22705 if (prof == 1) { 22706 /* 22707 * Profile 1 is "standard" DGP. It ignores 22708 * client buffer level. 22709 */ 22710 err = rack_set_dgp(rack); 22711 if (err) 22712 return (err); 22713 } else if (prof == 6) { 22714 err = rack_set_dgp(rack); 22715 if (err) 22716 return (err); 22717 /* 22718 * Profile 6 tweaks DGP so that it will apply to 22719 * fill-cw the same settings that profile5 does 22720 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 22721 */ 22722 rack->rc_fillcw_apply_discount = 1; 22723 } else if (prof == 0) { 22724 /* This changes things back to the default settings */ 22725 if (rack->rc_always_pace == 1) { 22726 rack_remove_pacing(rack); 22727 } else { 22728 /* Make sure any stray flags are off */ 22729 rack->dgp_on = 0; 22730 rack->rc_hybrid_mode = 0; 22731 rack->use_fixed_rate = 0; 22732 } 22733 err = 0; 22734 if (rack_fill_cw_state) 22735 rack->rc_pace_to_cwnd = 1; 22736 else 22737 rack->rc_pace_to_cwnd = 0; 22738 22739 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 22740 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22741 rack->rc_always_pace = 1; 22742 if (rack->rack_hibeta) 22743 rack_set_cc_pacing(rack); 22744 } else 22745 rack->rc_always_pace = 0; 22746 if (rack_dsack_std_based & 0x1) { 22747 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 22748 rack->rc_rack_tmr_std_based = 1; 22749 } 22750 if (rack_dsack_std_based & 0x2) { 22751 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 22752 rack->rc_rack_use_dsack = 1; 22753 } 22754 if (rack_use_cmp_acks) 22755 rack->r_use_cmp_ack = 1; 22756 else 22757 rack->r_use_cmp_ack = 0; 22758 if (rack_disable_prr) 22759 rack->rack_no_prr = 1; 22760 else 22761 rack->rack_no_prr = 0; 22762 if (rack_gp_no_rec_chg) 22763 rack->rc_gp_no_rec_chg = 1; 22764 else 22765 rack->rc_gp_no_rec_chg = 0; 22766 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 22767 rack->r_mbuf_queue = 1; 22768 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 22769 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22770 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22771 } else { 22772 rack->r_mbuf_queue = 0; 22773 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 22774 } 22775 if (rack_enable_shared_cwnd) 22776 rack->rack_enable_scwnd = 1; 22777 else 22778 rack->rack_enable_scwnd = 0; 22779 if (rack_do_dyn_mul) { 22780 /* When dynamic adjustment is on CA needs to start at 100% */ 22781 rack->rc_gp_dyn_mul = 1; 22782 if (rack_do_dyn_mul >= 100) 22783 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 22784 } else { 22785 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 22786 rack->rc_gp_dyn_mul = 0; 22787 } 22788 rack->r_rr_config = 0; 22789 rack->r_ctl.rc_no_push_at_mrtt = 0; 22790 rack->rc_pace_fill_if_rttin_range = 0; 22791 rack->rtt_limit_mul = 0; 22792 22793 if (rack_enable_hw_pacing) 22794 rack->rack_hdw_pace_ena = 1; 22795 else 22796 rack->rack_hdw_pace_ena = 0; 22797 if (rack_disable_prr) 22798 rack->rack_no_prr = 1; 22799 else 22800 rack->rack_no_prr = 0; 22801 if (rack_limits_scwnd) 22802 rack->r_limit_scw = 1; 22803 else 22804 rack->r_limit_scw = 0; 22805 rack_init_retransmit_value(rack, rack_rxt_controls); 22806 err = 0; 22807 } 22808 return (err); 22809 } 22810 22811 static int 22812 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 22813 { 22814 struct deferred_opt_list *dol; 22815 22816 dol = malloc(sizeof(struct deferred_opt_list), 22817 M_TCPDO, M_NOWAIT|M_ZERO); 22818 if (dol == NULL) { 22819 /* 22820 * No space yikes -- fail out.. 22821 */ 22822 return (0); 22823 } 22824 dol->optname = sopt_name; 22825 dol->optval = loptval; 22826 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 22827 return (1); 22828 } 22829 22830 static int 22831 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 22832 { 22833 #ifdef TCP_REQUEST_TRK 22834 struct tcp_sendfile_track *sft; 22835 struct timeval tv; 22836 tcp_seq seq; 22837 int err; 22838 22839 microuptime(&tv); 22840 22841 /* Make sure no fixed rate is on */ 22842 rack->use_fixed_rate = 0; 22843 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 22844 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 22845 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 22846 /* Now allocate or find our entry that will have these settings */ 22847 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 22848 if (sft == NULL) { 22849 rack->rc_tp->tcp_hybrid_error++; 22850 /* no space, where would it have gone? */ 22851 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 22852 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 22853 return (ENOSPC); 22854 } 22855 /* mask our internal flags */ 22856 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 22857 /* The seq will be snd_una + everything in the buffer */ 22858 seq = sft->start_seq; 22859 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 22860 /* Disabling hybrid pacing */ 22861 if (rack->rc_hybrid_mode) { 22862 rack_set_profile(rack, 0); 22863 rack->rc_tp->tcp_hybrid_stop++; 22864 } 22865 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 22866 return (0); 22867 } 22868 if (rack->dgp_on == 0) { 22869 /* 22870 * If we have not yet turned DGP on, do so 22871 * now setting pure DGP mode, no buffer level 22872 * response. 22873 */ 22874 if ((err = rack_set_profile(rack, 1)) != 0){ 22875 /* Failed to turn pacing on */ 22876 rack->rc_tp->tcp_hybrid_error++; 22877 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 22878 return (err); 22879 } 22880 } 22881 /* 22882 * Now we must switch to hybrid mode as well which also 22883 * means moving to regular pacing. 22884 */ 22885 if (rack->rc_hybrid_mode == 0) { 22886 /* First time */ 22887 if (tcp_can_enable_pacing()) { 22888 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22889 rack->rc_hybrid_mode = 1; 22890 } else { 22891 return (ENOSPC); 22892 } 22893 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 22894 /* 22895 * This should be true. 22896 */ 22897 tcp_dec_dgp_pacing_cnt(); 22898 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 22899 } 22900 } 22901 /* Now set in our flags */ 22902 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 22903 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 22904 sft->cspr = hybrid->cspr; 22905 else 22906 sft->cspr = 0; 22907 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 22908 sft->hint_maxseg = hybrid->hint_maxseg; 22909 else 22910 sft->hint_maxseg = 0; 22911 rack->rc_tp->tcp_hybrid_start++; 22912 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 22913 return (0); 22914 #else 22915 return (ENOTSUP); 22916 #endif 22917 } 22918 22919 static int 22920 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 22921 { 22922 /* We pulled a SSI info log out what was there */ 22923 si->bytes_transmitted = tp->t_sndbytes; 22924 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 22925 return (0); 22926 } 22927 22928 static int 22929 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 22930 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 22931 22932 { 22933 struct epoch_tracker et; 22934 struct sockopt sopt; 22935 struct cc_newreno_opts opt; 22936 uint64_t val; 22937 int error = 0; 22938 uint16_t ca, ss; 22939 22940 switch (sopt_name) { 22941 case TCP_RACK_SET_RXT_OPTIONS: 22942 if (optval <= 2) { 22943 rack_init_retransmit_value(rack, optval); 22944 } else { 22945 /* 22946 * You must send in 0, 1 or 2 all else is 22947 * invalid. 22948 */ 22949 error = EINVAL; 22950 } 22951 break; 22952 case TCP_RACK_DSACK_OPT: 22953 RACK_OPTS_INC(tcp_rack_dsack_opt); 22954 if (optval & 0x1) { 22955 rack->rc_rack_tmr_std_based = 1; 22956 } else { 22957 rack->rc_rack_tmr_std_based = 0; 22958 } 22959 if (optval & 0x2) { 22960 rack->rc_rack_use_dsack = 1; 22961 } else { 22962 rack->rc_rack_use_dsack = 0; 22963 } 22964 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 22965 break; 22966 case TCP_RACK_PACING_DIVISOR: 22967 RACK_OPTS_INC(tcp_rack_pacing_divisor); 22968 if (optval == 0) { 22969 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 22970 } else { 22971 if (optval < RL_MIN_DIVISOR) 22972 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 22973 else 22974 rack->r_ctl.pace_len_divisor = optval; 22975 } 22976 break; 22977 case TCP_RACK_HI_BETA: 22978 RACK_OPTS_INC(tcp_rack_hi_beta); 22979 if (optval > 0) { 22980 rack->rack_hibeta = 1; 22981 if ((optval >= 50) && 22982 (optval <= 100)) { 22983 /* 22984 * User wants to set a custom beta. 22985 */ 22986 rack->r_ctl.saved_hibeta = optval; 22987 if (rack->rc_pacing_cc_set) 22988 rack_undo_cc_pacing(rack); 22989 rack->r_ctl.rc_saved_beta = optval; 22990 } 22991 if (rack->rc_pacing_cc_set == 0) 22992 rack_set_cc_pacing(rack); 22993 } else { 22994 rack->rack_hibeta = 0; 22995 if (rack->rc_pacing_cc_set) 22996 rack_undo_cc_pacing(rack); 22997 } 22998 break; 22999 case TCP_RACK_PACING_BETA: 23000 error = EINVAL; 23001 break; 23002 case TCP_RACK_TIMER_SLOP: 23003 RACK_OPTS_INC(tcp_rack_timer_slop); 23004 rack->r_ctl.timer_slop = optval; 23005 if (rack->rc_tp->t_srtt) { 23006 /* 23007 * If we have an SRTT lets update t_rxtcur 23008 * to have the new slop. 23009 */ 23010 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 23011 rack_rto_min, rack_rto_max, 23012 rack->r_ctl.timer_slop); 23013 } 23014 break; 23015 case TCP_RACK_PACING_BETA_ECN: 23016 RACK_OPTS_INC(tcp_rack_beta_ecn); 23017 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23018 /* This only works for newreno. */ 23019 error = EINVAL; 23020 break; 23021 } 23022 if (rack->rc_pacing_cc_set) { 23023 /* 23024 * Set them into the real CC module 23025 * whats in the rack pcb is the old values 23026 * to be used on restoral/ 23027 */ 23028 sopt.sopt_dir = SOPT_SET; 23029 opt.name = CC_NEWRENO_BETA_ECN; 23030 opt.val = optval; 23031 if (CC_ALGO(tp)->ctl_output != NULL) 23032 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23033 else 23034 error = ENOENT; 23035 } else { 23036 /* 23037 * Not pacing yet so set it into our local 23038 * rack pcb storage. 23039 */ 23040 rack->r_ctl.rc_saved_beta_ecn = optval; 23041 } 23042 break; 23043 case TCP_DEFER_OPTIONS: 23044 RACK_OPTS_INC(tcp_defer_opt); 23045 if (optval) { 23046 if (rack->gp_ready) { 23047 /* Too late */ 23048 error = EINVAL; 23049 break; 23050 } 23051 rack->defer_options = 1; 23052 } else 23053 rack->defer_options = 0; 23054 break; 23055 case TCP_RACK_MEASURE_CNT: 23056 RACK_OPTS_INC(tcp_rack_measure_cnt); 23057 if (optval && (optval <= 0xff)) { 23058 rack->r_ctl.req_measurements = optval; 23059 } else 23060 error = EINVAL; 23061 break; 23062 case TCP_REC_ABC_VAL: 23063 RACK_OPTS_INC(tcp_rec_abc_val); 23064 if (optval > 0) 23065 rack->r_use_labc_for_rec = 1; 23066 else 23067 rack->r_use_labc_for_rec = 0; 23068 break; 23069 case TCP_RACK_ABC_VAL: 23070 RACK_OPTS_INC(tcp_rack_abc_val); 23071 if ((optval > 0) && (optval < 255)) 23072 rack->rc_labc = optval; 23073 else 23074 error = EINVAL; 23075 break; 23076 case TCP_HDWR_UP_ONLY: 23077 RACK_OPTS_INC(tcp_pacing_up_only); 23078 if (optval) 23079 rack->r_up_only = 1; 23080 else 23081 rack->r_up_only = 0; 23082 break; 23083 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 23084 RACK_OPTS_INC(tcp_fillcw_rate_cap); 23085 rack->r_ctl.fillcw_cap = loptval; 23086 break; 23087 case TCP_PACING_RATE_CAP: 23088 RACK_OPTS_INC(tcp_pacing_rate_cap); 23089 if ((rack->dgp_on == 1) && 23090 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23091 /* 23092 * If we are doing DGP we need to switch 23093 * to using the pacing limit. 23094 */ 23095 if (tcp_can_enable_pacing() == 0) { 23096 error = ENOSPC; 23097 break; 23098 } 23099 /* 23100 * Now change up the flags and counts to be correct. 23101 */ 23102 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23103 tcp_dec_dgp_pacing_cnt(); 23104 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23105 } 23106 rack->r_ctl.bw_rate_cap = loptval; 23107 break; 23108 case TCP_HYBRID_PACING: 23109 if (hybrid == NULL) { 23110 error = EINVAL; 23111 break; 23112 } 23113 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 23114 error = EPERM; 23115 break; 23116 } 23117 error = process_hybrid_pacing(rack, hybrid); 23118 break; 23119 case TCP_SIDECHAN_DIS: /* URL:scodm */ 23120 if (optval) 23121 rack->r_ctl.side_chan_dis_mask = optval; 23122 else 23123 rack->r_ctl.side_chan_dis_mask = 0; 23124 break; 23125 case TCP_RACK_PROFILE: 23126 RACK_OPTS_INC(tcp_profile); 23127 error = rack_set_profile(rack, optval); 23128 break; 23129 case TCP_USE_CMP_ACKS: 23130 RACK_OPTS_INC(tcp_use_cmp_acks); 23131 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 23132 /* You can't turn it off once its on! */ 23133 error = EINVAL; 23134 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 23135 rack->r_use_cmp_ack = 1; 23136 rack->r_mbuf_queue = 1; 23137 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23138 } 23139 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 23140 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23141 break; 23142 case TCP_SHARED_CWND_TIME_LIMIT: 23143 RACK_OPTS_INC(tcp_lscwnd); 23144 if (optval) 23145 rack->r_limit_scw = 1; 23146 else 23147 rack->r_limit_scw = 0; 23148 break; 23149 case TCP_RACK_DGP_IN_REC: 23150 error = EINVAL; 23151 break; 23152 case TCP_RACK_PACE_TO_FILL: 23153 RACK_OPTS_INC(tcp_fillcw); 23154 if (optval == 0) 23155 rack->rc_pace_to_cwnd = 0; 23156 else { 23157 rack->rc_pace_to_cwnd = 1; 23158 } 23159 if ((optval >= rack_gp_rtt_maxmul) && 23160 rack_gp_rtt_maxmul && 23161 (optval < 0xf)) { 23162 rack->rc_pace_fill_if_rttin_range = 1; 23163 rack->rtt_limit_mul = optval; 23164 } else { 23165 rack->rc_pace_fill_if_rttin_range = 0; 23166 rack->rtt_limit_mul = 0; 23167 } 23168 break; 23169 case TCP_RACK_NO_PUSH_AT_MAX: 23170 RACK_OPTS_INC(tcp_npush); 23171 if (optval == 0) 23172 rack->r_ctl.rc_no_push_at_mrtt = 0; 23173 else if (optval < 0xff) 23174 rack->r_ctl.rc_no_push_at_mrtt = optval; 23175 else 23176 error = EINVAL; 23177 break; 23178 case TCP_SHARED_CWND_ENABLE: 23179 RACK_OPTS_INC(tcp_rack_scwnd); 23180 if (optval == 0) 23181 rack->rack_enable_scwnd = 0; 23182 else 23183 rack->rack_enable_scwnd = 1; 23184 break; 23185 case TCP_RACK_MBUF_QUEUE: 23186 /* Now do we use the LRO mbuf-queue feature */ 23187 RACK_OPTS_INC(tcp_rack_mbufq); 23188 if (optval || rack->r_use_cmp_ack) 23189 rack->r_mbuf_queue = 1; 23190 else 23191 rack->r_mbuf_queue = 0; 23192 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23193 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23194 else 23195 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23196 break; 23197 case TCP_RACK_NONRXT_CFG_RATE: 23198 RACK_OPTS_INC(tcp_rack_cfg_rate); 23199 if (optval == 0) 23200 rack->rack_rec_nonrxt_use_cr = 0; 23201 else 23202 rack->rack_rec_nonrxt_use_cr = 1; 23203 break; 23204 case TCP_NO_PRR: 23205 RACK_OPTS_INC(tcp_rack_noprr); 23206 if (optval == 0) 23207 rack->rack_no_prr = 0; 23208 else if (optval == 1) 23209 rack->rack_no_prr = 1; 23210 else if (optval == 2) 23211 rack->no_prr_addback = 1; 23212 else 23213 error = EINVAL; 23214 break; 23215 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 23216 if (optval > 0) 23217 rack->cspr_is_fcc = 1; 23218 else 23219 rack->cspr_is_fcc = 0; 23220 break; 23221 case TCP_TIMELY_DYN_ADJ: 23222 RACK_OPTS_INC(tcp_timely_dyn); 23223 if (optval == 0) 23224 rack->rc_gp_dyn_mul = 0; 23225 else { 23226 rack->rc_gp_dyn_mul = 1; 23227 if (optval >= 100) { 23228 /* 23229 * If the user sets something 100 or more 23230 * its the gp_ca value. 23231 */ 23232 rack->r_ctl.rack_per_of_gp_ca = optval; 23233 } 23234 } 23235 break; 23236 case TCP_RACK_DO_DETECTION: 23237 error = EINVAL; 23238 break; 23239 case TCP_RACK_TLP_USE: 23240 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 23241 error = EINVAL; 23242 break; 23243 } 23244 RACK_OPTS_INC(tcp_tlp_use); 23245 rack->rack_tlp_threshold_use = optval; 23246 break; 23247 case TCP_RACK_TLP_REDUCE: 23248 /* RACK TLP cwnd reduction (bool) */ 23249 RACK_OPTS_INC(tcp_rack_tlp_reduce); 23250 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 23251 break; 23252 /* Pacing related ones */ 23253 case TCP_RACK_PACE_ALWAYS: 23254 /* 23255 * zero is old rack method, 1 is new 23256 * method using a pacing rate. 23257 */ 23258 RACK_OPTS_INC(tcp_rack_pace_always); 23259 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23260 error = EPERM; 23261 break; 23262 } 23263 if (optval > 0) { 23264 if (rack->rc_always_pace) { 23265 error = EALREADY; 23266 break; 23267 } else if (tcp_can_enable_pacing()) { 23268 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23269 rack->rc_always_pace = 1; 23270 if (rack->rack_hibeta) 23271 rack_set_cc_pacing(rack); 23272 } 23273 else { 23274 error = ENOSPC; 23275 break; 23276 } 23277 } else { 23278 if (rack->rc_always_pace == 1) { 23279 rack_remove_pacing(rack); 23280 } 23281 } 23282 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23283 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23284 else 23285 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23286 /* A rate may be set irate or other, if so set seg size */ 23287 rack_update_seg(rack); 23288 break; 23289 case TCP_BBR_RACK_INIT_RATE: 23290 RACK_OPTS_INC(tcp_initial_rate); 23291 val = optval; 23292 /* Change from kbits per second to bytes per second */ 23293 val *= 1000; 23294 val /= 8; 23295 rack->r_ctl.init_rate = val; 23296 if (rack->rc_always_pace) 23297 rack_update_seg(rack); 23298 break; 23299 case TCP_BBR_IWINTSO: 23300 error = EINVAL; 23301 break; 23302 case TCP_RACK_FORCE_MSEG: 23303 RACK_OPTS_INC(tcp_rack_force_max_seg); 23304 if (optval) 23305 rack->rc_force_max_seg = 1; 23306 else 23307 rack->rc_force_max_seg = 0; 23308 break; 23309 case TCP_RACK_PACE_MIN_SEG: 23310 RACK_OPTS_INC(tcp_rack_min_seg); 23311 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 23312 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23313 break; 23314 case TCP_RACK_PACE_MAX_SEG: 23315 /* Max segments size in a pace in bytes */ 23316 RACK_OPTS_INC(tcp_rack_max_seg); 23317 if ((rack->dgp_on == 1) && 23318 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23319 /* 23320 * If we set a max-seg and are doing DGP then 23321 * we now fall under the pacing limits not the 23322 * DGP ones. 23323 */ 23324 if (tcp_can_enable_pacing() == 0) { 23325 error = ENOSPC; 23326 break; 23327 } 23328 /* 23329 * Now change up the flags and counts to be correct. 23330 */ 23331 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23332 tcp_dec_dgp_pacing_cnt(); 23333 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23334 } 23335 if (optval <= MAX_USER_SET_SEG) 23336 rack->rc_user_set_max_segs = optval; 23337 else 23338 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 23339 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23340 break; 23341 case TCP_RACK_PACE_RATE_REC: 23342 /* Set the fixed pacing rate in Bytes per second ca */ 23343 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 23344 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23345 error = EPERM; 23346 break; 23347 } 23348 if (rack->dgp_on) { 23349 /* 23350 * We are already pacing another 23351 * way. 23352 */ 23353 error = EBUSY; 23354 break; 23355 } 23356 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23357 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23358 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23359 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23360 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23361 rack->use_fixed_rate = 1; 23362 if (rack->rack_hibeta) 23363 rack_set_cc_pacing(rack); 23364 rack_log_pacing_delay_calc(rack, 23365 rack->r_ctl.rc_fixed_pacing_rate_ss, 23366 rack->r_ctl.rc_fixed_pacing_rate_ca, 23367 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23368 __LINE__, NULL,0); 23369 break; 23370 23371 case TCP_RACK_PACE_RATE_SS: 23372 /* Set the fixed pacing rate in Bytes per second ca */ 23373 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 23374 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23375 error = EPERM; 23376 break; 23377 } 23378 if (rack->dgp_on) { 23379 /* 23380 * We are already pacing another 23381 * way. 23382 */ 23383 error = EBUSY; 23384 break; 23385 } 23386 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23387 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23388 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23389 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23390 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23391 rack->use_fixed_rate = 1; 23392 if (rack->rack_hibeta) 23393 rack_set_cc_pacing(rack); 23394 rack_log_pacing_delay_calc(rack, 23395 rack->r_ctl.rc_fixed_pacing_rate_ss, 23396 rack->r_ctl.rc_fixed_pacing_rate_ca, 23397 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23398 __LINE__, NULL, 0); 23399 break; 23400 23401 case TCP_RACK_PACE_RATE_CA: 23402 /* Set the fixed pacing rate in Bytes per second ca */ 23403 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 23404 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23405 error = EPERM; 23406 break; 23407 } 23408 if (rack->dgp_on) { 23409 /* 23410 * We are already pacing another 23411 * way. 23412 */ 23413 error = EBUSY; 23414 break; 23415 } 23416 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23417 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23418 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23419 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23420 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23421 rack->use_fixed_rate = 1; 23422 if (rack->rack_hibeta) 23423 rack_set_cc_pacing(rack); 23424 rack_log_pacing_delay_calc(rack, 23425 rack->r_ctl.rc_fixed_pacing_rate_ss, 23426 rack->r_ctl.rc_fixed_pacing_rate_ca, 23427 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23428 __LINE__, NULL, 0); 23429 break; 23430 case TCP_RACK_GP_INCREASE_REC: 23431 RACK_OPTS_INC(tcp_gp_inc_rec); 23432 rack->r_ctl.rack_per_of_gp_rec = optval; 23433 rack_log_pacing_delay_calc(rack, 23434 rack->r_ctl.rack_per_of_gp_ss, 23435 rack->r_ctl.rack_per_of_gp_ca, 23436 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23437 __LINE__, NULL, 0); 23438 break; 23439 case TCP_RACK_GP_INCREASE_CA: 23440 RACK_OPTS_INC(tcp_gp_inc_ca); 23441 ca = optval; 23442 if (ca < 100) { 23443 /* 23444 * We don't allow any reduction 23445 * over the GP b/w. 23446 */ 23447 error = EINVAL; 23448 break; 23449 } 23450 rack->r_ctl.rack_per_of_gp_ca = ca; 23451 rack_log_pacing_delay_calc(rack, 23452 rack->r_ctl.rack_per_of_gp_ss, 23453 rack->r_ctl.rack_per_of_gp_ca, 23454 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23455 __LINE__, NULL, 0); 23456 break; 23457 case TCP_RACK_GP_INCREASE_SS: 23458 RACK_OPTS_INC(tcp_gp_inc_ss); 23459 ss = optval; 23460 if (ss < 100) { 23461 /* 23462 * We don't allow any reduction 23463 * over the GP b/w. 23464 */ 23465 error = EINVAL; 23466 break; 23467 } 23468 rack->r_ctl.rack_per_of_gp_ss = ss; 23469 rack_log_pacing_delay_calc(rack, 23470 rack->r_ctl.rack_per_of_gp_ss, 23471 rack->r_ctl.rack_per_of_gp_ca, 23472 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23473 __LINE__, NULL, 0); 23474 break; 23475 case TCP_RACK_RR_CONF: 23476 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 23477 if (optval && optval <= 3) 23478 rack->r_rr_config = optval; 23479 else 23480 rack->r_rr_config = 0; 23481 break; 23482 case TCP_PACING_DND: /* URL:dnd */ 23483 if (optval > 0) 23484 rack->rc_pace_dnd = 1; 23485 else 23486 rack->rc_pace_dnd = 0; 23487 break; 23488 case TCP_HDWR_RATE_CAP: 23489 RACK_OPTS_INC(tcp_hdwr_rate_cap); 23490 if (optval) { 23491 if (rack->r_rack_hw_rate_caps == 0) 23492 rack->r_rack_hw_rate_caps = 1; 23493 else 23494 error = EALREADY; 23495 } else { 23496 rack->r_rack_hw_rate_caps = 0; 23497 } 23498 break; 23499 case TCP_DGP_UPPER_BOUNDS: 23500 { 23501 uint8_t val; 23502 val = optval & 0x0000ff; 23503 rack->r_ctl.rack_per_upper_bound_ca = val; 23504 val = (optval >> 16) & 0x0000ff; 23505 rack->r_ctl.rack_per_upper_bound_ss = val; 23506 break; 23507 } 23508 case TCP_SS_EEXIT: /* URL:eexit */ 23509 if (optval > 0) { 23510 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 23511 if (optval & 0x10000) { 23512 rack->r_ctl.gate_to_fs = 1; 23513 } else { 23514 rack->r_ctl.gate_to_fs = 0; 23515 } 23516 if (optval & 0x20000) { 23517 rack->r_ctl.use_gp_not_last = 1; 23518 } else { 23519 rack->r_ctl.use_gp_not_last = 0; 23520 } 23521 if (optval & 0xfffc0000) { 23522 uint32_t v; 23523 23524 v = (optval >> 18) & 0x00003fff; 23525 if (v >= 1000) 23526 rack->r_ctl.gp_gain_req = v; 23527 } 23528 } else { 23529 /* We do not do ss early exit at all */ 23530 rack->rc_initial_ss_comp = 1; 23531 rack->r_ctl.gp_rnd_thresh = 0; 23532 } 23533 break; 23534 case TCP_RACK_SPLIT_LIMIT: 23535 RACK_OPTS_INC(tcp_split_limit); 23536 rack->r_ctl.rc_split_limit = optval; 23537 break; 23538 case TCP_BBR_HDWR_PACE: 23539 RACK_OPTS_INC(tcp_hdwr_pacing); 23540 if (optval){ 23541 if (rack->rack_hdrw_pacing == 0) { 23542 rack->rack_hdw_pace_ena = 1; 23543 rack->rack_attempt_hdwr_pace = 0; 23544 } else 23545 error = EALREADY; 23546 } else { 23547 rack->rack_hdw_pace_ena = 0; 23548 #ifdef RATELIMIT 23549 if (rack->r_ctl.crte != NULL) { 23550 rack->rack_hdrw_pacing = 0; 23551 rack->rack_attempt_hdwr_pace = 0; 23552 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 23553 rack->r_ctl.crte = NULL; 23554 } 23555 #endif 23556 } 23557 break; 23558 /* End Pacing related ones */ 23559 case TCP_RACK_PRR_SENDALOT: 23560 /* Allow PRR to send more than one seg */ 23561 RACK_OPTS_INC(tcp_rack_prr_sendalot); 23562 rack->r_ctl.rc_prr_sendalot = optval; 23563 break; 23564 case TCP_RACK_MIN_TO: 23565 /* Minimum time between rack t-o's in ms */ 23566 RACK_OPTS_INC(tcp_rack_min_to); 23567 rack->r_ctl.rc_min_to = optval; 23568 break; 23569 case TCP_RACK_EARLY_SEG: 23570 /* If early recovery max segments */ 23571 RACK_OPTS_INC(tcp_rack_early_seg); 23572 rack->r_ctl.rc_early_recovery_segs = optval; 23573 break; 23574 case TCP_RACK_ENABLE_HYSTART: 23575 { 23576 if (optval) { 23577 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23578 if (rack_do_hystart > RACK_HYSTART_ON) 23579 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23580 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23581 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23582 } else { 23583 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23584 } 23585 } 23586 break; 23587 case TCP_RACK_REORD_THRESH: 23588 /* RACK reorder threshold (shift amount) */ 23589 RACK_OPTS_INC(tcp_rack_reord_thresh); 23590 if ((optval > 0) && (optval < 31)) 23591 rack->r_ctl.rc_reorder_shift = optval; 23592 else 23593 error = EINVAL; 23594 break; 23595 case TCP_RACK_REORD_FADE: 23596 /* Does reordering fade after ms time */ 23597 RACK_OPTS_INC(tcp_rack_reord_fade); 23598 rack->r_ctl.rc_reorder_fade = optval; 23599 break; 23600 case TCP_RACK_TLP_THRESH: 23601 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 23602 RACK_OPTS_INC(tcp_rack_tlp_thresh); 23603 if (optval) 23604 rack->r_ctl.rc_tlp_threshold = optval; 23605 else 23606 error = EINVAL; 23607 break; 23608 case TCP_BBR_USE_RACK_RR: 23609 RACK_OPTS_INC(tcp_rack_rr); 23610 if (optval) 23611 rack->use_rack_rr = 1; 23612 else 23613 rack->use_rack_rr = 0; 23614 break; 23615 case TCP_RACK_PKT_DELAY: 23616 /* RACK added ms i.e. rack-rtt + reord + N */ 23617 RACK_OPTS_INC(tcp_rack_pkt_delay); 23618 rack->r_ctl.rc_pkt_delay = optval; 23619 break; 23620 case TCP_DELACK: 23621 RACK_OPTS_INC(tcp_rack_delayed_ack); 23622 if (optval == 0) 23623 tp->t_delayed_ack = 0; 23624 else 23625 tp->t_delayed_ack = 1; 23626 if (tp->t_flags & TF_DELACK) { 23627 tp->t_flags &= ~TF_DELACK; 23628 tp->t_flags |= TF_ACKNOW; 23629 NET_EPOCH_ENTER(et); 23630 rack_output(tp); 23631 NET_EPOCH_EXIT(et); 23632 } 23633 break; 23634 23635 case TCP_BBR_RACK_RTT_USE: 23636 RACK_OPTS_INC(tcp_rack_rtt_use); 23637 if ((optval != USE_RTT_HIGH) && 23638 (optval != USE_RTT_LOW) && 23639 (optval != USE_RTT_AVG)) 23640 error = EINVAL; 23641 else 23642 rack->r_ctl.rc_rate_sample_method = optval; 23643 break; 23644 case TCP_HONOR_HPTS_MIN: 23645 RACK_OPTS_INC(tcp_honor_hpts); 23646 if (optval) { 23647 rack->r_use_hpts_min = 1; 23648 /* 23649 * Must be between 2 - 80% to be a reduction else 23650 * we keep the default (10%). 23651 */ 23652 if ((optval > 1) && (optval <= 80)) { 23653 rack->r_ctl.max_reduction = optval; 23654 } 23655 } else 23656 rack->r_use_hpts_min = 0; 23657 break; 23658 case TCP_REC_IS_DYN: /* URL:dynrec */ 23659 RACK_OPTS_INC(tcp_dyn_rec); 23660 if (optval) 23661 rack->rc_gp_no_rec_chg = 1; 23662 else 23663 rack->rc_gp_no_rec_chg = 0; 23664 break; 23665 case TCP_NO_TIMELY: 23666 RACK_OPTS_INC(tcp_notimely); 23667 if (optval) { 23668 rack->rc_skip_timely = 1; 23669 rack->r_ctl.rack_per_of_gp_rec = 90; 23670 rack->r_ctl.rack_per_of_gp_ca = 100; 23671 rack->r_ctl.rack_per_of_gp_ss = 250; 23672 } else { 23673 rack->rc_skip_timely = 0; 23674 } 23675 break; 23676 case TCP_GP_USE_LTBW: 23677 if (optval == 0) { 23678 rack->use_lesser_lt_bw = 0; 23679 rack->dis_lt_bw = 1; 23680 } else if (optval == 1) { 23681 rack->use_lesser_lt_bw = 1; 23682 rack->dis_lt_bw = 0; 23683 } else if (optval == 2) { 23684 rack->use_lesser_lt_bw = 0; 23685 rack->dis_lt_bw = 0; 23686 } 23687 break; 23688 case TCP_DATA_AFTER_CLOSE: 23689 RACK_OPTS_INC(tcp_data_after_close); 23690 if (optval) 23691 rack->rc_allow_data_af_clo = 1; 23692 else 23693 rack->rc_allow_data_af_clo = 0; 23694 break; 23695 default: 23696 break; 23697 } 23698 tcp_log_socket_option(tp, sopt_name, optval, error); 23699 return (error); 23700 } 23701 23702 static void 23703 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 23704 { 23705 /* 23706 * A new connection has been created (tp) and 23707 * the parent is the inpcb given. We want to 23708 * apply a read-lock to the parent (we are already 23709 * holding a write lock on the tp) and copy anything 23710 * out of the rack specific data as long as its tfb is 23711 * the same as ours i.e. we are the same stack. Otherwise 23712 * we just return. 23713 */ 23714 struct tcpcb *par; 23715 struct tcp_rack *dest, *src; 23716 int cnt = 0; 23717 23718 par = intotcpcb(parent); 23719 if (par->t_fb != tp->t_fb) { 23720 /* Not the same stack */ 23721 tcp_log_socket_option(tp, 0, 0, 1); 23722 return; 23723 } 23724 /* Ok if we reach here lets setup the two rack pointers */ 23725 dest = (struct tcp_rack *)tp->t_fb_ptr; 23726 src = (struct tcp_rack *)par->t_fb_ptr; 23727 if ((src == NULL) || (dest == NULL)) { 23728 /* Huh? */ 23729 tcp_log_socket_option(tp, 0, 0, 2); 23730 return; 23731 } 23732 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 23733 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 23734 if ((src->dgp_on) && (dest->dgp_on == 0)) { 23735 /* Profile 1 had to be set via sock opt */ 23736 rack_set_dgp(dest); 23737 cnt++; 23738 } 23739 /* TCP_RACK_SET_RXT_OPTIONS */ 23740 if (dest->full_size_rxt != src->full_size_rxt) { 23741 dest->full_size_rxt = src->full_size_rxt; 23742 cnt++; 23743 } 23744 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 23745 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 23746 cnt++; 23747 } 23748 /* TCP_RACK_DSACK_OPT */ 23749 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 23750 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 23751 cnt++; 23752 } 23753 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 23754 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 23755 cnt++; 23756 } 23757 /* TCP_RACK_PACING_DIVISOR */ 23758 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 23759 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 23760 cnt++; 23761 } 23762 /* TCP_RACK_HI_BETA */ 23763 if (src->rack_hibeta != dest->rack_hibeta) { 23764 cnt++; 23765 if (src->rack_hibeta) { 23766 dest->r_ctl.rc_saved_beta = src->r_ctl.rc_saved_beta; 23767 dest->rack_hibeta = 1; 23768 } else { 23769 dest->rack_hibeta = 0; 23770 } 23771 } 23772 /* TCP_RACK_TIMER_SLOP */ 23773 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 23774 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 23775 cnt++; 23776 } 23777 /* TCP_RACK_PACING_BETA_ECN */ 23778 if (dest->r_ctl.rc_saved_beta_ecn != src->r_ctl.rc_saved_beta_ecn) { 23779 dest->r_ctl.rc_saved_beta_ecn = src->r_ctl.rc_saved_beta_ecn; 23780 cnt++; 23781 } 23782 /* We do not do TCP_DEFER_OPTIONS */ 23783 /* TCP_RACK_MEASURE_CNT */ 23784 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 23785 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 23786 cnt++; 23787 } 23788 /* TCP_HDWR_UP_ONLY */ 23789 if (dest->r_up_only != src->r_up_only) { 23790 dest->r_up_only = src->r_up_only; 23791 cnt++; 23792 } 23793 /* TCP_FILLCW_RATE_CAP */ 23794 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 23795 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 23796 cnt++; 23797 } 23798 /* TCP_PACING_RATE_CAP */ 23799 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 23800 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 23801 cnt++; 23802 } 23803 /* A listener can't set TCP_HYBRID_PACING */ 23804 /* TCP_SIDECHAN_DIS */ 23805 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 23806 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 23807 cnt++; 23808 } 23809 /* TCP_SHARED_CWND_TIME_LIMIT */ 23810 if (dest->r_limit_scw != src->r_limit_scw) { 23811 dest->r_limit_scw = src->r_limit_scw; 23812 cnt++; 23813 } 23814 /* TCP_RACK_PACE_TO_FILL */ 23815 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 23816 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 23817 cnt++; 23818 } 23819 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 23820 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 23821 cnt++; 23822 } 23823 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 23824 dest->rtt_limit_mul = src->rtt_limit_mul; 23825 cnt++; 23826 } 23827 /* TCP_RACK_NO_PUSH_AT_MAX */ 23828 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 23829 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 23830 cnt++; 23831 } 23832 /* TCP_SHARED_CWND_ENABLE */ 23833 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 23834 dest->rack_enable_scwnd = src->rack_enable_scwnd; 23835 cnt++; 23836 } 23837 /* TCP_USE_CMP_ACKS */ 23838 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 23839 dest->r_use_cmp_ack = src->r_use_cmp_ack; 23840 cnt++; 23841 } 23842 23843 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23844 dest->r_mbuf_queue = src->r_mbuf_queue; 23845 cnt++; 23846 } 23847 /* TCP_RACK_MBUF_QUEUE */ 23848 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23849 dest->r_mbuf_queue = src->r_mbuf_queue; 23850 cnt++; 23851 } 23852 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 23853 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23854 } else { 23855 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23856 } 23857 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 23858 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23859 } 23860 /* TCP_RACK_NONRXT_CFG_RATE */ 23861 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 23862 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 23863 cnt++; 23864 } 23865 /* TCP_NO_PRR */ 23866 if (dest->rack_no_prr != src->rack_no_prr) { 23867 dest->rack_no_prr = src->rack_no_prr; 23868 cnt++; 23869 } 23870 if (dest->no_prr_addback != src->no_prr_addback) { 23871 dest->no_prr_addback = src->no_prr_addback; 23872 cnt++; 23873 } 23874 /* RACK_CSPR_IS_FCC */ 23875 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 23876 dest->cspr_is_fcc = src->cspr_is_fcc; 23877 cnt++; 23878 } 23879 /* TCP_TIMELY_DYN_ADJ */ 23880 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 23881 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 23882 cnt++; 23883 } 23884 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23885 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23886 cnt++; 23887 } 23888 /* TCP_RACK_TLP_USE */ 23889 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 23890 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 23891 cnt++; 23892 } 23893 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 23894 /* TCP_BBR_RACK_INIT_RATE */ 23895 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 23896 dest->r_ctl.init_rate = src->r_ctl.init_rate; 23897 cnt++; 23898 } 23899 /* TCP_RACK_FORCE_MSEG */ 23900 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 23901 dest->rc_force_max_seg = src->rc_force_max_seg; 23902 cnt++; 23903 } 23904 /* TCP_RACK_PACE_MIN_SEG */ 23905 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 23906 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 23907 cnt++; 23908 } 23909 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 23910 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 23911 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 23912 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 23913 cnt++; 23914 } 23915 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 23916 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 23917 cnt++; 23918 } 23919 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 23920 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 23921 cnt++; 23922 } 23923 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 23924 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 23925 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 23926 cnt++; 23927 } 23928 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23929 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23930 cnt++; 23931 } 23932 23933 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 23934 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 23935 cnt++; 23936 } 23937 /* TCP_RACK_RR_CONF */ 23938 if (dest->r_rr_config != src->r_rr_config) { 23939 dest->r_rr_config = src->r_rr_config; 23940 cnt++; 23941 } 23942 /* TCP_PACING_DND */ 23943 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 23944 dest->rc_pace_dnd = src->rc_pace_dnd; 23945 cnt++; 23946 } 23947 /* TCP_HDWR_RATE_CAP */ 23948 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 23949 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 23950 cnt++; 23951 } 23952 /* TCP_DGP_UPPER_BOUNDS */ 23953 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 23954 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 23955 cnt++; 23956 } 23957 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 23958 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 23959 cnt++; 23960 } 23961 /* TCP_SS_EEXIT */ 23962 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 23963 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 23964 cnt++; 23965 } 23966 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 23967 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 23968 cnt++; 23969 } 23970 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 23971 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 23972 cnt++; 23973 } 23974 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 23975 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 23976 cnt++; 23977 } 23978 /* TCP_BBR_HDWR_PACE */ 23979 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 23980 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 23981 cnt++; 23982 } 23983 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 23984 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 23985 cnt++; 23986 } 23987 /* TCP_RACK_PRR_SENDALOT */ 23988 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 23989 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 23990 cnt++; 23991 } 23992 /* TCP_RACK_MIN_TO */ 23993 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 23994 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 23995 cnt++; 23996 } 23997 /* TCP_RACK_EARLY_SEG */ 23998 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 23999 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 24000 cnt++; 24001 } 24002 /* TCP_RACK_ENABLE_HYSTART */ 24003 if (par->t_ccv.flags != tp->t_ccv.flags) { 24004 cnt++; 24005 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24006 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 24007 if (rack_do_hystart > RACK_HYSTART_ON) 24008 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 24009 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 24010 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 24011 } else { 24012 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 24013 } 24014 } 24015 /* TCP_RACK_REORD_THRESH */ 24016 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 24017 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 24018 cnt++; 24019 } 24020 /* TCP_RACK_REORD_FADE */ 24021 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 24022 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 24023 cnt++; 24024 } 24025 /* TCP_RACK_TLP_THRESH */ 24026 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 24027 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 24028 cnt++; 24029 } 24030 /* TCP_BBR_USE_RACK_RR */ 24031 if (dest->use_rack_rr != src->use_rack_rr) { 24032 dest->use_rack_rr = src->use_rack_rr; 24033 cnt++; 24034 } 24035 /* TCP_RACK_PKT_DELAY */ 24036 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 24037 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 24038 cnt++; 24039 } 24040 /* TCP_DELACK will get copied via the main code if applicable */ 24041 /* TCP_BBR_RACK_RTT_USE */ 24042 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 24043 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 24044 cnt++; 24045 } 24046 /* TCP_HONOR_HPTS_MIN */ 24047 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 24048 dest->r_use_hpts_min = src->r_use_hpts_min; 24049 cnt++; 24050 } 24051 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 24052 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 24053 cnt++; 24054 } 24055 /* TCP_REC_IS_DYN */ 24056 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 24057 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 24058 cnt++; 24059 } 24060 if (dest->rc_skip_timely != src->rc_skip_timely) { 24061 dest->rc_skip_timely = src->rc_skip_timely; 24062 cnt++; 24063 } 24064 /* TCP_DATA_AFTER_CLOSE */ 24065 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 24066 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 24067 cnt++; 24068 } 24069 /* TCP_GP_USE_LTBW */ 24070 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 24071 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 24072 cnt++; 24073 } 24074 if (dest->dis_lt_bw != src->dis_lt_bw) { 24075 dest->dis_lt_bw = src->dis_lt_bw; 24076 cnt++; 24077 } 24078 tcp_log_socket_option(tp, 0, cnt, 0); 24079 } 24080 24081 24082 static void 24083 rack_apply_deferred_options(struct tcp_rack *rack) 24084 { 24085 struct deferred_opt_list *dol, *sdol; 24086 uint32_t s_optval; 24087 24088 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 24089 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 24090 /* Disadvantage of deferal is you loose the error return */ 24091 s_optval = (uint32_t)dol->optval; 24092 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 24093 free(dol, M_TCPDO); 24094 } 24095 } 24096 24097 static void 24098 rack_hw_tls_change(struct tcpcb *tp, int chg) 24099 { 24100 /* Update HW tls state */ 24101 struct tcp_rack *rack; 24102 24103 rack = (struct tcp_rack *)tp->t_fb_ptr; 24104 if (chg) 24105 rack->r_ctl.fsb.hw_tls = 1; 24106 else 24107 rack->r_ctl.fsb.hw_tls = 0; 24108 } 24109 24110 static int 24111 rack_pru_options(struct tcpcb *tp, int flags) 24112 { 24113 if (flags & PRUS_OOB) 24114 return (EOPNOTSUPP); 24115 return (0); 24116 } 24117 24118 static bool 24119 rack_wake_check(struct tcpcb *tp) 24120 { 24121 struct tcp_rack *rack; 24122 struct timeval tv; 24123 uint32_t cts; 24124 24125 rack = (struct tcp_rack *)tp->t_fb_ptr; 24126 if (rack->r_ctl.rc_hpts_flags) { 24127 cts = tcp_get_usecs(&tv); 24128 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 24129 /* 24130 * Pacing timer is up, check if we are ready. 24131 */ 24132 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 24133 return (true); 24134 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 24135 /* 24136 * A timer is up, check if we are ready. 24137 */ 24138 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 24139 return (true); 24140 } 24141 } 24142 return (false); 24143 } 24144 24145 static struct tcp_function_block __tcp_rack = { 24146 .tfb_tcp_block_name = __XSTRING(STACKNAME), 24147 .tfb_tcp_output = rack_output, 24148 .tfb_do_queued_segments = ctf_do_queued_segments, 24149 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 24150 .tfb_tcp_do_segment = rack_do_segment, 24151 .tfb_tcp_ctloutput = rack_ctloutput, 24152 .tfb_tcp_fb_init = rack_init, 24153 .tfb_tcp_fb_fini = rack_fini, 24154 .tfb_tcp_timer_stop_all = rack_stopall, 24155 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 24156 .tfb_tcp_handoff_ok = rack_handoff_ok, 24157 .tfb_tcp_mtu_chg = rack_mtu_change, 24158 .tfb_pru_options = rack_pru_options, 24159 .tfb_hwtls_change = rack_hw_tls_change, 24160 .tfb_chg_query = rack_chg_query, 24161 .tfb_switch_failed = rack_switch_failed, 24162 .tfb_early_wake_check = rack_wake_check, 24163 .tfb_compute_pipe = rack_compute_pipe, 24164 .tfb_stack_info = rack_stack_information, 24165 .tfb_inherit = rack_inherit, 24166 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP | TCP_FUNC_DEFAULT_OK, 24167 24168 }; 24169 24170 /* 24171 * rack_ctloutput() must drop the inpcb lock before performing copyin on 24172 * socket option arguments. When it re-acquires the lock after the copy, it 24173 * has to revalidate that the connection is still valid for the socket 24174 * option. 24175 */ 24176 static int 24177 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24178 { 24179 struct inpcb *inp = tptoinpcb(tp); 24180 #ifdef INET 24181 struct ip *ip; 24182 #endif 24183 struct tcp_rack *rack; 24184 struct tcp_hybrid_req hybrid; 24185 uint64_t loptval; 24186 int32_t error = 0, optval; 24187 24188 rack = (struct tcp_rack *)tp->t_fb_ptr; 24189 if (rack == NULL) { 24190 INP_WUNLOCK(inp); 24191 return (EINVAL); 24192 } 24193 #ifdef INET 24194 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 24195 #endif 24196 24197 switch (sopt->sopt_level) { 24198 #ifdef INET6 24199 case IPPROTO_IPV6: 24200 MPASS(inp->inp_vflag & INP_IPV6PROTO); 24201 switch (sopt->sopt_name) { 24202 case IPV6_USE_MIN_MTU: 24203 tcp6_use_min_mtu(tp); 24204 break; 24205 } 24206 INP_WUNLOCK(inp); 24207 return (0); 24208 #endif 24209 #ifdef INET 24210 case IPPROTO_IP: 24211 switch (sopt->sopt_name) { 24212 case IP_TOS: 24213 /* 24214 * The DSCP codepoint has changed, update the fsb. 24215 */ 24216 ip->ip_tos = rack->rc_inp->inp_ip_tos; 24217 break; 24218 case IP_TTL: 24219 /* 24220 * The TTL has changed, update the fsb. 24221 */ 24222 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 24223 break; 24224 } 24225 INP_WUNLOCK(inp); 24226 return (0); 24227 #endif 24228 #ifdef SO_PEERPRIO 24229 case SOL_SOCKET: 24230 switch (sopt->sopt_name) { 24231 case SO_PEERPRIO: /* SC-URL:bs */ 24232 /* Already read in and sanity checked in sosetopt(). */ 24233 if (inp->inp_socket) { 24234 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 24235 } 24236 break; 24237 } 24238 INP_WUNLOCK(inp); 24239 return (0); 24240 #endif 24241 case IPPROTO_TCP: 24242 switch (sopt->sopt_name) { 24243 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 24244 /* Pacing related ones */ 24245 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 24246 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 24247 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 24248 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 24249 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 24250 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 24251 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 24252 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 24253 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 24254 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 24255 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 24256 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 24257 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 24258 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 24259 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 24260 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 24261 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24262 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 24263 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 24264 /* End pacing related */ 24265 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 24266 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 24267 case TCP_RACK_MIN_TO: /* URL:min_to */ 24268 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 24269 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 24270 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 24271 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 24272 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 24273 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 24274 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 24275 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 24276 case TCP_NO_PRR: /* URL:noprr */ 24277 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 24278 case TCP_DATA_AFTER_CLOSE: /* no URL */ 24279 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 24280 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 24281 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 24282 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 24283 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 24284 case TCP_RACK_PROFILE: /* URL:profile */ 24285 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24286 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 24287 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 24288 case TCP_RACK_ABC_VAL: /* URL:labc */ 24289 case TCP_REC_ABC_VAL: /* URL:reclabc */ 24290 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 24291 case TCP_DEFER_OPTIONS: /* URL:defer */ 24292 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 24293 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 24294 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 24295 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 24296 case TCP_RACK_HI_BETA: /* URL:hibeta */ 24297 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 24298 case TCP_SS_EEXIT: /* URL:eexit */ 24299 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 24300 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 24301 case TCP_PACING_DND: /* URL:dnd */ 24302 case TCP_NO_TIMELY: /* URL:notimely */ 24303 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24304 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 24305 case TCP_REC_IS_DYN: /* URL:dynrec */ 24306 case TCP_GP_USE_LTBW: /* URL:useltbw */ 24307 goto process_opt; 24308 break; 24309 default: 24310 /* Filter off all unknown options to the base stack */ 24311 return (tcp_default_ctloutput(tp, sopt)); 24312 break; 24313 } 24314 default: 24315 INP_WUNLOCK(inp); 24316 return (0); 24317 } 24318 process_opt: 24319 INP_WUNLOCK(inp); 24320 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24321 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 24322 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 24323 /* 24324 * We truncate it down to 32 bits for the socket-option trace this 24325 * means rates > 34Gbps won't show right, but thats probably ok. 24326 */ 24327 optval = (uint32_t)loptval; 24328 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 24329 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 24330 } else { 24331 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 24332 /* Save it in 64 bit form too */ 24333 loptval = optval; 24334 } 24335 if (error) 24336 return (error); 24337 INP_WLOCK(inp); 24338 if (tp->t_fb != &__tcp_rack) { 24339 INP_WUNLOCK(inp); 24340 return (ENOPROTOOPT); 24341 } 24342 if (rack->defer_options && (rack->gp_ready == 0) && 24343 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 24344 (sopt->sopt_name != TCP_HYBRID_PACING) && 24345 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 24346 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 24347 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 24348 /* Options are being deferred */ 24349 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 24350 INP_WUNLOCK(inp); 24351 return (0); 24352 } else { 24353 /* No memory to defer, fail */ 24354 INP_WUNLOCK(inp); 24355 return (ENOMEM); 24356 } 24357 } 24358 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 24359 INP_WUNLOCK(inp); 24360 return (error); 24361 } 24362 24363 static void 24364 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 24365 { 24366 24367 INP_WLOCK_ASSERT(tptoinpcb(tp)); 24368 bzero(ti, sizeof(*ti)); 24369 24370 ti->tcpi_state = tp->t_state; 24371 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 24372 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 24373 if (tp->t_flags & TF_SACK_PERMIT) 24374 ti->tcpi_options |= TCPI_OPT_SACK; 24375 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 24376 ti->tcpi_options |= TCPI_OPT_WSCALE; 24377 ti->tcpi_snd_wscale = tp->snd_scale; 24378 ti->tcpi_rcv_wscale = tp->rcv_scale; 24379 } 24380 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 24381 ti->tcpi_options |= TCPI_OPT_ECN; 24382 if (tp->t_flags & TF_FASTOPEN) 24383 ti->tcpi_options |= TCPI_OPT_TFO; 24384 /* still kept in ticks is t_rcvtime */ 24385 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 24386 /* Since we hold everything in precise useconds this is easy */ 24387 ti->tcpi_rtt = tp->t_srtt; 24388 ti->tcpi_rttvar = tp->t_rttvar; 24389 ti->tcpi_rto = tp->t_rxtcur; 24390 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 24391 ti->tcpi_snd_cwnd = tp->snd_cwnd; 24392 /* 24393 * FreeBSD-specific extension fields for tcp_info. 24394 */ 24395 ti->tcpi_rcv_space = tp->rcv_wnd; 24396 ti->tcpi_rcv_nxt = tp->rcv_nxt; 24397 ti->tcpi_snd_wnd = tp->snd_wnd; 24398 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 24399 ti->tcpi_snd_nxt = tp->snd_nxt; 24400 ti->tcpi_snd_mss = tp->t_maxseg; 24401 ti->tcpi_rcv_mss = tp->t_maxseg; 24402 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 24403 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 24404 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 24405 ti->tcpi_total_tlp = tp->t_sndtlppack; 24406 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 24407 ti->tcpi_rttmin = tp->t_rttlow; 24408 #ifdef NETFLIX_STATS 24409 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 24410 #endif 24411 #ifdef TCP_OFFLOAD 24412 if (tp->t_flags & TF_TOE) { 24413 ti->tcpi_options |= TCPI_OPT_TOE; 24414 tcp_offload_tcp_info(tp, ti); 24415 } 24416 #endif 24417 } 24418 24419 static int 24420 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24421 { 24422 struct inpcb *inp = tptoinpcb(tp); 24423 struct tcp_rack *rack; 24424 int32_t error, optval; 24425 uint64_t val, loptval; 24426 struct tcp_info ti; 24427 /* 24428 * Because all our options are either boolean or an int, we can just 24429 * pull everything into optval and then unlock and copy. If we ever 24430 * add a option that is not a int, then this will have quite an 24431 * impact to this routine. 24432 */ 24433 error = 0; 24434 rack = (struct tcp_rack *)tp->t_fb_ptr; 24435 if (rack == NULL) { 24436 INP_WUNLOCK(inp); 24437 return (EINVAL); 24438 } 24439 switch (sopt->sopt_name) { 24440 case TCP_INFO: 24441 /* First get the info filled */ 24442 rack_fill_info(tp, &ti); 24443 /* Fix up the rtt related fields if needed */ 24444 INP_WUNLOCK(inp); 24445 error = sooptcopyout(sopt, &ti, sizeof ti); 24446 return (error); 24447 /* 24448 * Beta is the congestion control value for NewReno that influences how 24449 * much of a backoff happens when loss is detected. It is normally set 24450 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 24451 * when you exit recovery. 24452 */ 24453 case TCP_RACK_PACING_BETA: 24454 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24455 error = EINVAL; 24456 else if (rack->rc_pacing_cc_set == 0) 24457 optval = rack->r_ctl.rc_saved_beta; 24458 else { 24459 /* 24460 * Reach out into the CC data and report back what 24461 * I have previously set. Yeah it looks hackish but 24462 * we don't want to report the saved values. 24463 */ 24464 if (tp->t_ccv.cc_data) 24465 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 24466 else 24467 error = EINVAL; 24468 } 24469 break; 24470 /* 24471 * Beta_ecn is the congestion control value for NewReno that influences how 24472 * much of a backoff happens when a ECN mark is detected. It is normally set 24473 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 24474 * you exit recovery. Note that classic ECN has a beta of 50, it is only 24475 * ABE Ecn that uses this "less" value, but we do too with pacing :) 24476 */ 24477 case TCP_RACK_PACING_BETA_ECN: 24478 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24479 error = EINVAL; 24480 else if (rack->rc_pacing_cc_set == 0) 24481 optval = rack->r_ctl.rc_saved_beta_ecn; 24482 else { 24483 /* 24484 * Reach out into the CC data and report back what 24485 * I have previously set. Yeah it looks hackish but 24486 * we don't want to report the saved values. 24487 */ 24488 if (tp->t_ccv.cc_data) 24489 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 24490 else 24491 error = EINVAL; 24492 } 24493 break; 24494 case TCP_RACK_DSACK_OPT: 24495 optval = 0; 24496 if (rack->rc_rack_tmr_std_based) { 24497 optval |= 1; 24498 } 24499 if (rack->rc_rack_use_dsack) { 24500 optval |= 2; 24501 } 24502 break; 24503 case TCP_RACK_ENABLE_HYSTART: 24504 { 24505 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24506 optval = RACK_HYSTART_ON; 24507 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 24508 optval = RACK_HYSTART_ON_W_SC; 24509 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 24510 optval = RACK_HYSTART_ON_W_SC_C; 24511 } else { 24512 optval = RACK_HYSTART_OFF; 24513 } 24514 } 24515 break; 24516 case TCP_RACK_DGP_IN_REC: 24517 error = EINVAL; 24518 break; 24519 case TCP_RACK_HI_BETA: 24520 optval = rack->rack_hibeta; 24521 break; 24522 case TCP_DEFER_OPTIONS: 24523 optval = rack->defer_options; 24524 break; 24525 case TCP_RACK_MEASURE_CNT: 24526 optval = rack->r_ctl.req_measurements; 24527 break; 24528 case TCP_REC_ABC_VAL: 24529 optval = rack->r_use_labc_for_rec; 24530 break; 24531 case TCP_RACK_ABC_VAL: 24532 optval = rack->rc_labc; 24533 break; 24534 case TCP_HDWR_UP_ONLY: 24535 optval= rack->r_up_only; 24536 break; 24537 case TCP_FILLCW_RATE_CAP: 24538 loptval = rack->r_ctl.fillcw_cap; 24539 break; 24540 case TCP_PACING_RATE_CAP: 24541 loptval = rack->r_ctl.bw_rate_cap; 24542 break; 24543 case TCP_RACK_PROFILE: 24544 /* You cannot retrieve a profile, its write only */ 24545 error = EINVAL; 24546 break; 24547 case TCP_SIDECHAN_DIS: 24548 optval = rack->r_ctl.side_chan_dis_mask; 24549 break; 24550 case TCP_HYBRID_PACING: 24551 /* You cannot retrieve hybrid pacing information, its write only */ 24552 error = EINVAL; 24553 break; 24554 case TCP_USE_CMP_ACKS: 24555 optval = rack->r_use_cmp_ack; 24556 break; 24557 case TCP_RACK_PACE_TO_FILL: 24558 optval = rack->rc_pace_to_cwnd; 24559 break; 24560 case TCP_RACK_NO_PUSH_AT_MAX: 24561 optval = rack->r_ctl.rc_no_push_at_mrtt; 24562 break; 24563 case TCP_SHARED_CWND_ENABLE: 24564 optval = rack->rack_enable_scwnd; 24565 break; 24566 case TCP_RACK_NONRXT_CFG_RATE: 24567 optval = rack->rack_rec_nonrxt_use_cr; 24568 break; 24569 case TCP_NO_PRR: 24570 if (rack->rack_no_prr == 1) 24571 optval = 1; 24572 else if (rack->no_prr_addback == 1) 24573 optval = 2; 24574 else 24575 optval = 0; 24576 break; 24577 case TCP_GP_USE_LTBW: 24578 if (rack->dis_lt_bw) { 24579 /* It is not used */ 24580 optval = 0; 24581 } else if (rack->use_lesser_lt_bw) { 24582 /* we use min() */ 24583 optval = 1; 24584 } else { 24585 /* we use max() */ 24586 optval = 2; 24587 } 24588 break; 24589 case TCP_RACK_DO_DETECTION: 24590 error = EINVAL; 24591 break; 24592 case TCP_RACK_MBUF_QUEUE: 24593 /* Now do we use the LRO mbuf-queue feature */ 24594 optval = rack->r_mbuf_queue; 24595 break; 24596 case RACK_CSPR_IS_FCC: 24597 optval = rack->cspr_is_fcc; 24598 break; 24599 case TCP_TIMELY_DYN_ADJ: 24600 optval = rack->rc_gp_dyn_mul; 24601 break; 24602 case TCP_BBR_IWINTSO: 24603 error = EINVAL; 24604 break; 24605 case TCP_RACK_TLP_REDUCE: 24606 /* RACK TLP cwnd reduction (bool) */ 24607 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 24608 break; 24609 case TCP_BBR_RACK_INIT_RATE: 24610 val = rack->r_ctl.init_rate; 24611 /* convert to kbits per sec */ 24612 val *= 8; 24613 val /= 1000; 24614 optval = (uint32_t)val; 24615 break; 24616 case TCP_RACK_FORCE_MSEG: 24617 optval = rack->rc_force_max_seg; 24618 break; 24619 case TCP_RACK_PACE_MIN_SEG: 24620 optval = rack->r_ctl.rc_user_set_min_segs; 24621 break; 24622 case TCP_RACK_PACE_MAX_SEG: 24623 /* Max segments in a pace */ 24624 optval = rack->rc_user_set_max_segs; 24625 break; 24626 case TCP_RACK_PACE_ALWAYS: 24627 /* Use the always pace method */ 24628 optval = rack->rc_always_pace; 24629 break; 24630 case TCP_RACK_PRR_SENDALOT: 24631 /* Allow PRR to send more than one seg */ 24632 optval = rack->r_ctl.rc_prr_sendalot; 24633 break; 24634 case TCP_RACK_MIN_TO: 24635 /* Minimum time between rack t-o's in ms */ 24636 optval = rack->r_ctl.rc_min_to; 24637 break; 24638 case TCP_RACK_SPLIT_LIMIT: 24639 optval = rack->r_ctl.rc_split_limit; 24640 break; 24641 case TCP_RACK_EARLY_SEG: 24642 /* If early recovery max segments */ 24643 optval = rack->r_ctl.rc_early_recovery_segs; 24644 break; 24645 case TCP_RACK_REORD_THRESH: 24646 /* RACK reorder threshold (shift amount) */ 24647 optval = rack->r_ctl.rc_reorder_shift; 24648 break; 24649 case TCP_SS_EEXIT: 24650 if (rack->r_ctl.gp_rnd_thresh) { 24651 uint32_t v; 24652 24653 v = rack->r_ctl.gp_gain_req; 24654 v <<= 17; 24655 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 24656 if (rack->r_ctl.gate_to_fs == 1) 24657 optval |= 0x10000; 24658 } else 24659 optval = 0; 24660 break; 24661 case TCP_RACK_REORD_FADE: 24662 /* Does reordering fade after ms time */ 24663 optval = rack->r_ctl.rc_reorder_fade; 24664 break; 24665 case TCP_BBR_USE_RACK_RR: 24666 /* Do we use the rack cheat for rxt */ 24667 optval = rack->use_rack_rr; 24668 break; 24669 case TCP_RACK_RR_CONF: 24670 optval = rack->r_rr_config; 24671 break; 24672 case TCP_HDWR_RATE_CAP: 24673 optval = rack->r_rack_hw_rate_caps; 24674 break; 24675 case TCP_BBR_HDWR_PACE: 24676 optval = rack->rack_hdw_pace_ena; 24677 break; 24678 case TCP_RACK_TLP_THRESH: 24679 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24680 optval = rack->r_ctl.rc_tlp_threshold; 24681 break; 24682 case TCP_RACK_PKT_DELAY: 24683 /* RACK added ms i.e. rack-rtt + reord + N */ 24684 optval = rack->r_ctl.rc_pkt_delay; 24685 break; 24686 case TCP_RACK_TLP_USE: 24687 optval = rack->rack_tlp_threshold_use; 24688 break; 24689 case TCP_PACING_DND: 24690 optval = rack->rc_pace_dnd; 24691 break; 24692 case TCP_RACK_PACE_RATE_CA: 24693 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 24694 break; 24695 case TCP_RACK_PACE_RATE_SS: 24696 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 24697 break; 24698 case TCP_RACK_PACE_RATE_REC: 24699 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 24700 break; 24701 case TCP_DGP_UPPER_BOUNDS: 24702 optval = rack->r_ctl.rack_per_upper_bound_ss; 24703 optval <<= 16; 24704 optval |= rack->r_ctl.rack_per_upper_bound_ca; 24705 break; 24706 case TCP_RACK_GP_INCREASE_SS: 24707 optval = rack->r_ctl.rack_per_of_gp_ca; 24708 break; 24709 case TCP_RACK_GP_INCREASE_CA: 24710 optval = rack->r_ctl.rack_per_of_gp_ss; 24711 break; 24712 case TCP_RACK_PACING_DIVISOR: 24713 optval = rack->r_ctl.pace_len_divisor; 24714 break; 24715 case TCP_BBR_RACK_RTT_USE: 24716 optval = rack->r_ctl.rc_rate_sample_method; 24717 break; 24718 case TCP_DELACK: 24719 optval = tp->t_delayed_ack; 24720 break; 24721 case TCP_DATA_AFTER_CLOSE: 24722 optval = rack->rc_allow_data_af_clo; 24723 break; 24724 case TCP_SHARED_CWND_TIME_LIMIT: 24725 optval = rack->r_limit_scw; 24726 break; 24727 case TCP_HONOR_HPTS_MIN: 24728 if (rack->r_use_hpts_min) 24729 optval = rack->r_ctl.max_reduction; 24730 else 24731 optval = 0; 24732 break; 24733 case TCP_REC_IS_DYN: 24734 optval = rack->rc_gp_no_rec_chg; 24735 break; 24736 case TCP_NO_TIMELY: 24737 optval = rack->rc_skip_timely; 24738 break; 24739 case TCP_RACK_TIMER_SLOP: 24740 optval = rack->r_ctl.timer_slop; 24741 break; 24742 default: 24743 return (tcp_default_ctloutput(tp, sopt)); 24744 break; 24745 } 24746 INP_WUNLOCK(inp); 24747 if (error == 0) { 24748 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24749 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 24750 error = sooptcopyout(sopt, &loptval, sizeof loptval); 24751 else 24752 error = sooptcopyout(sopt, &optval, sizeof optval); 24753 } 24754 return (error); 24755 } 24756 24757 static int 24758 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 24759 { 24760 if (sopt->sopt_dir == SOPT_SET) { 24761 return (rack_set_sockopt(tp, sopt)); 24762 } else if (sopt->sopt_dir == SOPT_GET) { 24763 return (rack_get_sockopt(tp, sopt)); 24764 } else { 24765 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 24766 } 24767 } 24768 24769 static const char *rack_stack_names[] = { 24770 __XSTRING(STACKNAME), 24771 #ifdef STACKALIAS 24772 __XSTRING(STACKALIAS), 24773 #endif 24774 }; 24775 24776 static int 24777 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 24778 { 24779 memset(mem, 0, size); 24780 return (0); 24781 } 24782 24783 static void 24784 rack_dtor(void *mem, int32_t size, void *arg) 24785 { 24786 24787 } 24788 24789 static bool rack_mod_inited = false; 24790 24791 static int 24792 tcp_addrack(module_t mod, int32_t type, void *data) 24793 { 24794 int32_t err = 0; 24795 int num_stacks; 24796 24797 switch (type) { 24798 case MOD_LOAD: 24799 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 24800 sizeof(struct rack_sendmap), 24801 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 24802 24803 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 24804 sizeof(struct tcp_rack), 24805 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 24806 24807 sysctl_ctx_init(&rack_sysctl_ctx); 24808 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 24809 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 24810 OID_AUTO, 24811 #ifdef STACKALIAS 24812 __XSTRING(STACKALIAS), 24813 #else 24814 __XSTRING(STACKNAME), 24815 #endif 24816 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 24817 ""); 24818 if (rack_sysctl_root == NULL) { 24819 printf("Failed to add sysctl node\n"); 24820 err = EFAULT; 24821 goto free_uma; 24822 } 24823 rack_init_sysctls(); 24824 num_stacks = nitems(rack_stack_names); 24825 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 24826 rack_stack_names, &num_stacks); 24827 if (err) { 24828 printf("Failed to register %s stack name for " 24829 "%s module\n", rack_stack_names[num_stacks], 24830 __XSTRING(MODNAME)); 24831 sysctl_ctx_free(&rack_sysctl_ctx); 24832 free_uma: 24833 uma_zdestroy(rack_zone); 24834 uma_zdestroy(rack_pcb_zone); 24835 rack_counter_destroy(); 24836 printf("Failed to register rack module -- err:%d\n", err); 24837 return (err); 24838 } 24839 tcp_lro_reg_mbufq(); 24840 rack_mod_inited = true; 24841 break; 24842 case MOD_QUIESCE: 24843 err = deregister_tcp_functions(&__tcp_rack, true, false); 24844 break; 24845 case MOD_UNLOAD: 24846 err = deregister_tcp_functions(&__tcp_rack, false, true); 24847 if (err == EBUSY) 24848 break; 24849 if (rack_mod_inited) { 24850 uma_zdestroy(rack_zone); 24851 uma_zdestroy(rack_pcb_zone); 24852 sysctl_ctx_free(&rack_sysctl_ctx); 24853 rack_counter_destroy(); 24854 rack_mod_inited = false; 24855 } 24856 tcp_lro_dereg_mbufq(); 24857 err = 0; 24858 break; 24859 default: 24860 return (EOPNOTSUPP); 24861 } 24862 return (err); 24863 } 24864 24865 static moduledata_t tcp_rack = { 24866 .name = __XSTRING(MODNAME), 24867 .evhand = tcp_addrack, 24868 .priv = 0 24869 }; 24870 24871 MODULE_VERSION(MODNAME, 1); 24872 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 24873 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 24874 24875 #endif /* #if !defined(INET) && !defined(INET6) */ 24876