1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/mbuf.h> 45 #include <sys/proc.h> /* for proc0 declaration */ 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 #ifdef STATS 51 #include <sys/qmath.h> 52 #include <sys/tree.h> 53 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 54 #else 55 #include <sys/tree.h> 56 #endif 57 #include <sys/refcount.h> 58 #include <sys/queue.h> 59 #include <sys/tim_filter.h> 60 #include <sys/smp.h> 61 #include <sys/kthread.h> 62 #include <sys/kern_prefetch.h> 63 #include <sys/protosw.h> 64 #ifdef TCP_ACCOUNTING 65 #include <sys/sched.h> 66 #include <machine/cpu.h> 67 #endif 68 #include <vm/uma.h> 69 70 #include <net/route.h> 71 #include <net/route/nhop.h> 72 #include <net/vnet.h> 73 74 #define TCPSTATES /* for logging */ 75 76 #include <netinet/in.h> 77 #include <netinet/in_kdtrace.h> 78 #include <netinet/in_pcb.h> 79 #include <netinet/ip.h> 80 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 81 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 82 #include <netinet/ip_var.h> 83 #include <netinet/ip6.h> 84 #include <netinet6/in6_pcb.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/tcp.h> 87 #define TCPOUTFLAGS 88 #include <netinet/tcp_fsm.h> 89 #include <netinet/tcp_seq.h> 90 #include <netinet/tcp_timer.h> 91 #include <netinet/tcp_var.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_syncache.h> 94 #include <netinet/tcp_hpts.h> 95 #include <netinet/tcp_ratelimit.h> 96 #include <netinet/tcp_accounting.h> 97 #include <netinet/tcpip.h> 98 #include <netinet/cc/cc.h> 99 #include <netinet/cc/cc_newreno.h> 100 #include <netinet/tcp_fastopen.h> 101 #include <netinet/tcp_lro.h> 102 #ifdef NETFLIX_SHARED_CWND 103 #include <netinet/tcp_shared_cwnd.h> 104 #endif 105 #ifdef TCP_OFFLOAD 106 #include <netinet/tcp_offload.h> 107 #endif 108 #ifdef INET6 109 #include <netinet6/tcp6_var.h> 110 #endif 111 #include <netinet/tcp_ecn.h> 112 113 #include <netipsec/ipsec_support.h> 114 115 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 116 #include <netipsec/ipsec.h> 117 #include <netipsec/ipsec6.h> 118 #endif /* IPSEC */ 119 120 #include <netinet/udp.h> 121 #include <netinet/udp_var.h> 122 #include <machine/in_cksum.h> 123 124 #ifdef MAC 125 #include <security/mac/mac_framework.h> 126 #endif 127 #include "sack_filter.h" 128 #include "tcp_rack.h" 129 #include "tailq_hash.h" 130 #include "rack_bbr_common.h" 131 132 uma_zone_t rack_zone; 133 uma_zone_t rack_pcb_zone; 134 135 #ifndef TICKS2SBT 136 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 137 #endif 138 139 VNET_DECLARE(uint32_t, newreno_beta); 140 VNET_DECLARE(uint32_t, newreno_beta_ecn); 141 #define V_newreno_beta VNET(newreno_beta) 142 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 143 144 #define M_TCPFSB __CONCAT(M_TCPFSB, STACKNAME) 145 #define M_TCPDO __CONCAT(M_TCPDO, STACKNAME) 146 147 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb_" __XSTRING(STACKNAME), "TCP fast send block"); 148 MALLOC_DEFINE(M_TCPDO, "tcp_do_" __XSTRING(STACKNAME), "TCP deferred options"); 149 MALLOC_DEFINE(M_TCPPCM, "tcp_pcm_" __XSTRING(STACKNAME), "TCP PCM measurement information"); 150 151 struct sysctl_ctx_list rack_sysctl_ctx; 152 struct sysctl_oid *rack_sysctl_root; 153 154 #define CUM_ACKED 1 155 #define SACKED 2 156 157 /* 158 * The RACK module incorporates a number of 159 * TCP ideas that have been put out into the IETF 160 * over the last few years: 161 * - Matt Mathis's Rate Halving which slowly drops 162 * the congestion window so that the ack clock can 163 * be maintained during a recovery. 164 * - Yuchung Cheng's RACK TCP (for which its named) that 165 * will stop us using the number of dup acks and instead 166 * use time as the gage of when we retransmit. 167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 168 * of Dukkipati et.al. 169 * RACK depends on SACK, so if an endpoint arrives that 170 * cannot do SACK the state machine below will shuttle the 171 * connection back to using the "default" TCP stack that is 172 * in FreeBSD. 173 * 174 * To implement RACK the original TCP stack was first decomposed 175 * into a functional state machine with individual states 176 * for each of the possible TCP connection states. The do_segment 177 * functions role in life is to mandate the connection supports SACK 178 * initially and then assure that the RACK state matches the conenction 179 * state before calling the states do_segment function. Each 180 * state is simplified due to the fact that the original do_segment 181 * has been decomposed and we *know* what state we are in (no 182 * switches on the state) and all tests for SACK are gone. This 183 * greatly simplifies what each state does. 184 * 185 * TCP output is also over-written with a new version since it 186 * must maintain the new rack scoreboard. 187 * 188 */ 189 static int32_t rack_tlp_thresh = 1; 190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 191 static int32_t rack_tlp_use_greater = 1; 192 static int32_t rack_reorder_thresh = 2; 193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 194 * - 60 seconds */ 195 static uint32_t rack_pcm_every_n_rounds = 100; 196 static uint32_t rack_pcm_blast = 0; 197 static uint32_t rack_pcm_is_enabled = 1; 198 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto -> rec */ 199 200 static uint32_t rack_gp_gain_req = 1200; /* Amount percent wise required to gain to record a round as "gaining" */ 201 static uint32_t rack_rnd_cnt_req = 0x10005; /* Default number of rounds if we are below rack_gp_gain_req where we exit ss */ 202 203 204 static int32_t rack_rxt_scoreboard_clear_thresh = 2; 205 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 206 static int32_t rack_rxt_controls = 0; 207 static int32_t rack_fill_cw_state = 0; 208 static uint8_t rack_req_measurements = 1; 209 /* Attack threshold detections */ 210 static uint32_t rack_highest_sack_thresh_seen = 0; 211 static uint32_t rack_highest_move_thresh_seen = 0; 212 static uint32_t rack_merge_out_sacks_on_attack = 0; 213 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 214 static int32_t rack_hw_rate_caps = 0; /* 1; */ 215 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 216 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 217 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 218 static int32_t rack_hw_up_only = 0; 219 static int32_t rack_stats_gets_ms_rtt = 1; 220 static int32_t rack_prr_addbackmax = 2; 221 static int32_t rack_do_hystart = 0; 222 static int32_t rack_apply_rtt_with_reduced_conf = 0; 223 static int32_t rack_hibeta_setting = 0; 224 static int32_t rack_default_pacing_divisor = 250; 225 static uint16_t rack_pacing_min_seg = 0; 226 static int32_t rack_timely_off = 0; 227 228 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 229 static int32_t rack_pkt_delay = 1000; 230 static int32_t rack_send_a_lot_in_prr = 1; 231 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 232 static int32_t rack_verbose_logging = 0; 233 static int32_t rack_ignore_data_after_close = 1; 234 static int32_t rack_enable_shared_cwnd = 1; 235 static int32_t rack_use_cmp_acks = 1; 236 static int32_t rack_use_fsb = 1; 237 static int32_t rack_use_rfo = 1; 238 static int32_t rack_use_rsm_rfo = 1; 239 static int32_t rack_max_abc_post_recovery = 2; 240 static int32_t rack_client_low_buf = 0; 241 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 242 static int32_t rack_bw_multipler = 0; /* Limit on fill cw's jump up to be this x gp_est */ 243 #ifdef TCP_ACCOUNTING 244 static int32_t rack_tcp_accounting = 0; 245 #endif 246 static int32_t rack_limits_scwnd = 1; 247 static int32_t rack_enable_mqueue_for_nonpaced = 0; 248 static int32_t rack_hybrid_allow_set_maxseg = 0; 249 static int32_t rack_disable_prr = 0; 250 static int32_t use_rack_rr = 1; 251 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 252 static int32_t rack_persist_min = 250000; /* 250usec */ 253 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 254 static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */ 255 static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */ 256 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 257 static int32_t rack_limit_time_with_srtt = 0; 258 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 259 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 260 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 261 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 262 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 263 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 264 265 /* 266 * Currently regular tcp has a rto_min of 30ms 267 * the backoff goes 12 times so that ends up 268 * being a total of 122.850 seconds before a 269 * connection is killed. 270 */ 271 static uint32_t rack_def_data_window = 20; 272 static uint32_t rack_goal_bdp = 2; 273 static uint32_t rack_min_srtts = 1; 274 static uint32_t rack_min_measure_usec = 0; 275 static int32_t rack_tlp_min = 10000; /* 10ms */ 276 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 277 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 278 static const int32_t rack_free_cache = 2; 279 static int32_t rack_hptsi_segments = 40; 280 static int32_t rack_rate_sample_method = USE_RTT_LOW; 281 static int32_t rack_pace_every_seg = 0; 282 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 283 static int32_t rack_slot_reduction = 4; 284 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 285 static int32_t rack_cwnd_block_ends_measure = 0; 286 static int32_t rack_rwnd_block_ends_measure = 0; 287 static int32_t rack_def_profile = 0; 288 289 static int32_t rack_lower_cwnd_at_tlp = 0; 290 static int32_t rack_always_send_oldest = 0; 291 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 292 293 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 294 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 295 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 296 297 /* Probertt */ 298 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 299 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 300 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 301 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 302 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 303 304 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 305 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 306 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 307 static uint32_t rack_probertt_use_min_rtt_exit = 0; 308 static uint32_t rack_probe_rtt_sets_cwnd = 0; 309 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 310 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 311 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 312 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 313 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 314 static uint32_t rack_probertt_filter_life = 10000000; 315 static uint32_t rack_probertt_lower_within = 10; 316 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 317 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 318 static int32_t rack_probertt_clear_is = 1; 319 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 320 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 321 322 /* Part of pacing */ 323 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 324 325 /* Timely information: 326 * 327 * Here we have various control parameters on how 328 * timely may change the multiplier. rack_gain_p5_ub 329 * is associated with timely but not directly influencing 330 * the rate decision like the other variables. It controls 331 * the way fill-cw interacts with timely and caps how much 332 * timely can boost the fill-cw b/w. 333 * 334 * The other values are various boost/shrink numbers as well 335 * as potential caps when adjustments are made to the timely 336 * gain (returned by rack_get_output_gain(). Remember too that 337 * the gain returned can be overriden by other factors such as 338 * probeRTT as well as fixed-rate-pacing. 339 */ 340 static int32_t rack_gain_p5_ub = 250; 341 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 342 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 343 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 344 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 345 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 346 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 347 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 348 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 349 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 350 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 351 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 352 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 353 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 354 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 355 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 356 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 357 static int32_t rack_timely_no_stopping = 0; 358 static int32_t rack_down_raise_thresh = 100; 359 static int32_t rack_req_segs = 1; 360 static uint64_t rack_bw_rate_cap = 0; 361 static uint64_t rack_fillcw_bw_cap = 3750000; /* Cap fillcw at 30Mbps */ 362 363 364 /* Rack specific counters */ 365 counter_u64_t rack_saw_enobuf; 366 counter_u64_t rack_saw_enobuf_hw; 367 counter_u64_t rack_saw_enetunreach; 368 counter_u64_t rack_persists_sends; 369 counter_u64_t rack_persists_acks; 370 counter_u64_t rack_persists_loss; 371 counter_u64_t rack_persists_lost_ends; 372 counter_u64_t rack_total_bytes; 373 #ifdef INVARIANTS 374 counter_u64_t rack_adjust_map_bw; 375 #endif 376 /* Tail loss probe counters */ 377 counter_u64_t rack_tlp_tot; 378 counter_u64_t rack_tlp_newdata; 379 counter_u64_t rack_tlp_retran; 380 counter_u64_t rack_tlp_retran_bytes; 381 counter_u64_t rack_to_tot; 382 counter_u64_t rack_hot_alloc; 383 counter_u64_t rack_to_alloc; 384 counter_u64_t rack_to_alloc_hard; 385 counter_u64_t rack_to_alloc_emerg; 386 counter_u64_t rack_to_alloc_limited; 387 counter_u64_t rack_alloc_limited_conns; 388 counter_u64_t rack_split_limited; 389 counter_u64_t rack_rxt_clamps_cwnd; 390 counter_u64_t rack_rxt_clamps_cwnd_uniq; 391 392 counter_u64_t rack_multi_single_eq; 393 counter_u64_t rack_proc_non_comp_ack; 394 395 counter_u64_t rack_fto_send; 396 counter_u64_t rack_fto_rsm_send; 397 counter_u64_t rack_nfto_resend; 398 counter_u64_t rack_non_fto_send; 399 counter_u64_t rack_extended_rfo; 400 401 counter_u64_t rack_sack_proc_all; 402 counter_u64_t rack_sack_proc_short; 403 counter_u64_t rack_sack_proc_restart; 404 counter_u64_t rack_sack_attacks_detected; 405 counter_u64_t rack_sack_attacks_reversed; 406 counter_u64_t rack_sack_attacks_suspect; 407 counter_u64_t rack_sack_used_next_merge; 408 counter_u64_t rack_sack_splits; 409 counter_u64_t rack_sack_used_prev_merge; 410 counter_u64_t rack_sack_skipped_acked; 411 counter_u64_t rack_ack_total; 412 counter_u64_t rack_express_sack; 413 counter_u64_t rack_sack_total; 414 counter_u64_t rack_move_none; 415 counter_u64_t rack_move_some; 416 417 counter_u64_t rack_input_idle_reduces; 418 counter_u64_t rack_collapsed_win; 419 counter_u64_t rack_collapsed_win_seen; 420 counter_u64_t rack_collapsed_win_rxt; 421 counter_u64_t rack_collapsed_win_rxt_bytes; 422 counter_u64_t rack_try_scwnd; 423 counter_u64_t rack_hw_pace_init_fail; 424 counter_u64_t rack_hw_pace_lost; 425 426 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 427 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 428 429 430 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 431 432 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 433 (tv) = (value) + slop; \ 434 if ((u_long)(tv) < (u_long)(tvmin)) \ 435 (tv) = (tvmin); \ 436 if ((u_long)(tv) > (u_long)(tvmax)) \ 437 (tv) = (tvmax); \ 438 } while (0) 439 440 static void 441 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 442 443 static int 444 rack_process_ack(struct mbuf *m, struct tcphdr *th, 445 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 446 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val, int32_t orig_tlen); 447 static int 448 rack_process_data(struct mbuf *m, struct tcphdr *th, 449 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 450 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 451 static void 452 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 453 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 454 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 455 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 456 uint8_t limit_type); 457 static struct rack_sendmap * 458 rack_check_recovery_mode(struct tcpcb *tp, 459 uint32_t tsused); 460 static uint32_t 461 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack); 462 static void 463 rack_cong_signal(struct tcpcb *tp, 464 uint32_t type, uint32_t ack, int ); 465 static void rack_counter_destroy(void); 466 static int 467 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 468 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 469 static void 470 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 471 static void 472 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 473 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 474 static void rack_dtor(void *mem, int32_t size, void *arg); 475 static void 476 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 477 uint32_t flex1, uint32_t flex2, 478 uint32_t flex3, uint32_t flex4, 479 uint32_t flex5, uint32_t flex6, 480 uint16_t flex7, uint8_t mod); 481 482 static void 483 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 484 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 485 struct rack_sendmap *rsm, uint8_t quality); 486 static struct rack_sendmap * 487 rack_find_high_nonack(struct tcp_rack *rack, 488 struct rack_sendmap *rsm); 489 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 490 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 491 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 492 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 493 static void 494 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 495 tcp_seq th_ack, int line, uint8_t quality); 496 static void 497 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 498 499 static uint32_t 500 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 501 static int32_t rack_handoff_ok(struct tcpcb *tp); 502 static int32_t rack_init(struct tcpcb *tp, void **ptr); 503 static void rack_init_sysctls(void); 504 505 static void 506 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 507 struct tcphdr *th, int entered_rec, int dup_ack_struck, 508 int *dsack_seen, int *sacks_seen); 509 static void 510 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 511 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 512 struct rack_sendmap *hintrsm, uint32_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 513 514 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 515 516 517 static void 518 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 519 struct rack_sendmap *rsm, uint32_t cts); 520 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 521 static int32_t rack_output(struct tcpcb *tp); 522 523 static uint32_t 524 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 525 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 526 uint32_t cts, uint32_t segsiz); 527 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 528 static void rack_remxt_tmr(struct tcpcb *tp); 529 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 530 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 531 static int32_t rack_stopall(struct tcpcb *tp); 532 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 533 static uint32_t 534 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 535 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz); 536 static void 537 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 538 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz); 539 static int 540 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 541 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 542 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 543 static int 544 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 545 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 546 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 547 548 static int 549 rack_do_closing(struct mbuf *m, struct tcphdr *th, 550 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 551 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 552 static int 553 rack_do_established(struct mbuf *m, struct tcphdr *th, 554 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 555 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 556 static int 557 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 559 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 560 static int 561 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 562 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 563 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 564 static int 565 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 566 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 567 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 568 static int 569 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 570 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 571 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 572 static int 573 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 574 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 575 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 576 static int 577 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 578 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 579 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 580 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 581 struct rack_sendmap * 582 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 583 uint32_t tsused); 584 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 585 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 586 static void 587 tcp_rack_partialack(struct tcpcb *tp); 588 static int 589 rack_set_profile(struct tcp_rack *rack, int prof); 590 static void 591 rack_apply_deferred_options(struct tcp_rack *rack); 592 593 int32_t rack_clear_counter=0; 594 595 static uint64_t 596 rack_get_lt_bw(struct tcp_rack *rack) 597 { 598 struct timeval tv; 599 uint64_t tim, bytes; 600 601 tim = rack->r_ctl.lt_bw_time; 602 bytes = rack->r_ctl.lt_bw_bytes; 603 if (rack->lt_bw_up) { 604 /* Include all the current bytes too */ 605 microuptime(&tv); 606 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 607 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 608 } 609 if ((bytes != 0) && (tim != 0)) 610 return ((bytes * (uint64_t)1000000) / tim); 611 else 612 return (0); 613 } 614 615 static void 616 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 617 { 618 struct sockopt sopt; 619 struct cc_newreno_opts opt; 620 struct tcpcb *tp; 621 uint32_t old_beta; 622 uint32_t old_beta_ecn; 623 int error = 0, failed = 0; 624 625 tp = rack->rc_tp; 626 if (tp->t_cc == NULL) { 627 /* Tcb is leaving */ 628 return; 629 } 630 rack->rc_pacing_cc_set = 1; 631 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 632 /* Not new-reno we can't play games with beta! */ 633 failed = 1; 634 goto out; 635 636 } 637 if (CC_ALGO(tp)->ctl_output == NULL) { 638 /* Huh, not using new-reno so no swaps.? */ 639 failed = 2; 640 goto out; 641 } 642 /* Get the current values out */ 643 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 644 sopt.sopt_dir = SOPT_GET; 645 opt.name = CC_NEWRENO_BETA; 646 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 647 if (error) { 648 failed = 3; 649 goto out; 650 } 651 old_beta = opt.val; 652 opt.name = CC_NEWRENO_BETA_ECN; 653 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 654 if (error) { 655 failed = 4; 656 goto out; 657 } 658 old_beta_ecn = opt.val; 659 660 /* Now lets set in the values we have stored */ 661 sopt.sopt_dir = SOPT_SET; 662 opt.name = CC_NEWRENO_BETA; 663 opt.val = rack->r_ctl.rc_saved_beta; 664 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 665 if (error) { 666 failed = 5; 667 goto out; 668 } 669 opt.name = CC_NEWRENO_BETA_ECN; 670 opt.val = rack->r_ctl.rc_saved_beta_ecn; 671 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 672 if (error) { 673 failed = 6; 674 goto out; 675 } 676 /* Save off the values for restoral */ 677 rack->r_ctl.rc_saved_beta = old_beta; 678 rack->r_ctl.rc_saved_beta_ecn = old_beta_ecn; 679 out: 680 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 681 union tcp_log_stackspecific log; 682 struct timeval tv; 683 struct newreno *ptr; 684 685 ptr = ((struct newreno *)tp->t_ccv.cc_data); 686 memset(&log, 0, sizeof(log)); 687 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 688 log.u_bbr.flex1 = ptr->beta; 689 log.u_bbr.flex2 = ptr->beta_ecn; 690 log.u_bbr.flex3 = ptr->newreno_flags; 691 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta; 692 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta_ecn; 693 log.u_bbr.flex6 = failed; 694 log.u_bbr.flex7 = rack->gp_ready; 695 log.u_bbr.flex7 <<= 1; 696 log.u_bbr.flex7 |= rack->use_fixed_rate; 697 log.u_bbr.flex7 <<= 1; 698 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 699 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 700 log.u_bbr.flex8 = flex8; 701 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 702 0, &log, false, NULL, NULL, 0, &tv); 703 } 704 } 705 706 static void 707 rack_set_cc_pacing(struct tcp_rack *rack) 708 { 709 if (rack->rc_pacing_cc_set) 710 return; 711 /* 712 * Use the swap utility placing in 3 for flex8 to id a 713 * set of a new set of values. 714 */ 715 rack->rc_pacing_cc_set = 1; 716 rack_swap_beta_values(rack, 3); 717 } 718 719 static void 720 rack_undo_cc_pacing(struct tcp_rack *rack) 721 { 722 if (rack->rc_pacing_cc_set == 0) 723 return; 724 /* 725 * Use the swap utility placing in 4 for flex8 to id a 726 * restoral of the old values. 727 */ 728 rack->rc_pacing_cc_set = 0; 729 rack_swap_beta_values(rack, 4); 730 } 731 732 static void 733 rack_remove_pacing(struct tcp_rack *rack) 734 { 735 if (rack->rc_pacing_cc_set) 736 rack_undo_cc_pacing(rack); 737 if (rack->r_ctl.pacing_method & RACK_REG_PACING) 738 tcp_decrement_paced_conn(); 739 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) 740 tcp_dec_dgp_pacing_cnt(); 741 rack->rc_always_pace = 0; 742 rack->r_ctl.pacing_method = RACK_PACING_NONE; 743 rack->dgp_on = 0; 744 rack->rc_hybrid_mode = 0; 745 rack->use_fixed_rate = 0; 746 } 747 748 static void 749 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 750 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 751 { 752 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 753 union tcp_log_stackspecific log; 754 struct timeval tv; 755 756 memset(&log, 0, sizeof(log)); 757 log.u_bbr.flex1 = seq_end; 758 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 759 log.u_bbr.flex3 = ack_end_t; 760 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 761 log.u_bbr.flex5 = send_end_t; 762 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 763 log.u_bbr.flex7 = mode; 764 log.u_bbr.flex8 = 69; 765 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 766 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 767 log.u_bbr.pkts_out = line; 768 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 769 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 770 log.u_bbr.epoch = rack->r_ctl.current_round; 771 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 772 if (rsm != NULL) { 773 log.u_bbr.applimited = rsm->r_start; 774 log.u_bbr.delivered = rsm->r_end; 775 log.u_bbr.epoch = rsm->r_flags; 776 } 777 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 778 TCP_LOG_EVENTP(rack->rc_tp, NULL, 779 &rack->rc_inp->inp_socket->so_rcv, 780 &rack->rc_inp->inp_socket->so_snd, 781 BBR_LOG_HPTSI_CALC, 0, 782 0, &log, false, &tv); 783 } 784 } 785 786 static int 787 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 788 { 789 uint32_t stat; 790 int32_t error; 791 792 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 793 if (error || req->newptr == NULL) 794 return error; 795 796 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 797 if (error) 798 return (error); 799 if (stat == 1) { 800 #ifdef INVARIANTS 801 printf("Clearing RACK counters\n"); 802 #endif 803 counter_u64_zero(rack_tlp_tot); 804 counter_u64_zero(rack_tlp_newdata); 805 counter_u64_zero(rack_tlp_retran); 806 counter_u64_zero(rack_tlp_retran_bytes); 807 counter_u64_zero(rack_to_tot); 808 counter_u64_zero(rack_saw_enobuf); 809 counter_u64_zero(rack_saw_enobuf_hw); 810 counter_u64_zero(rack_saw_enetunreach); 811 counter_u64_zero(rack_persists_sends); 812 counter_u64_zero(rack_total_bytes); 813 counter_u64_zero(rack_persists_acks); 814 counter_u64_zero(rack_persists_loss); 815 counter_u64_zero(rack_persists_lost_ends); 816 #ifdef INVARIANTS 817 counter_u64_zero(rack_adjust_map_bw); 818 #endif 819 counter_u64_zero(rack_to_alloc_hard); 820 counter_u64_zero(rack_to_alloc_emerg); 821 counter_u64_zero(rack_sack_proc_all); 822 counter_u64_zero(rack_fto_send); 823 counter_u64_zero(rack_fto_rsm_send); 824 counter_u64_zero(rack_extended_rfo); 825 counter_u64_zero(rack_hw_pace_init_fail); 826 counter_u64_zero(rack_hw_pace_lost); 827 counter_u64_zero(rack_non_fto_send); 828 counter_u64_zero(rack_nfto_resend); 829 counter_u64_zero(rack_sack_proc_short); 830 counter_u64_zero(rack_sack_proc_restart); 831 counter_u64_zero(rack_to_alloc); 832 counter_u64_zero(rack_to_alloc_limited); 833 counter_u64_zero(rack_alloc_limited_conns); 834 counter_u64_zero(rack_split_limited); 835 counter_u64_zero(rack_rxt_clamps_cwnd); 836 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 837 counter_u64_zero(rack_multi_single_eq); 838 counter_u64_zero(rack_proc_non_comp_ack); 839 counter_u64_zero(rack_sack_attacks_detected); 840 counter_u64_zero(rack_sack_attacks_reversed); 841 counter_u64_zero(rack_sack_attacks_suspect); 842 counter_u64_zero(rack_sack_used_next_merge); 843 counter_u64_zero(rack_sack_used_prev_merge); 844 counter_u64_zero(rack_sack_splits); 845 counter_u64_zero(rack_sack_skipped_acked); 846 counter_u64_zero(rack_ack_total); 847 counter_u64_zero(rack_express_sack); 848 counter_u64_zero(rack_sack_total); 849 counter_u64_zero(rack_move_none); 850 counter_u64_zero(rack_move_some); 851 counter_u64_zero(rack_try_scwnd); 852 counter_u64_zero(rack_collapsed_win); 853 counter_u64_zero(rack_collapsed_win_rxt); 854 counter_u64_zero(rack_collapsed_win_seen); 855 counter_u64_zero(rack_collapsed_win_rxt_bytes); 856 } else if (stat == 2) { 857 #ifdef INVARIANTS 858 printf("Clearing RACK option array\n"); 859 #endif 860 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 861 } else if (stat == 3) { 862 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 863 } else if (stat == 4) { 864 #ifdef INVARIANTS 865 printf("Clearing RACK out size array\n"); 866 #endif 867 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 868 } 869 rack_clear_counter = 0; 870 return (0); 871 } 872 873 static void 874 rack_init_sysctls(void) 875 { 876 struct sysctl_oid *rack_counters; 877 struct sysctl_oid *rack_attack; 878 struct sysctl_oid *rack_pacing; 879 struct sysctl_oid *rack_timely; 880 struct sysctl_oid *rack_timers; 881 struct sysctl_oid *rack_tlp; 882 struct sysctl_oid *rack_misc; 883 struct sysctl_oid *rack_features; 884 struct sysctl_oid *rack_measure; 885 struct sysctl_oid *rack_probertt; 886 struct sysctl_oid *rack_hw_pacing; 887 888 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 889 SYSCTL_CHILDREN(rack_sysctl_root), 890 OID_AUTO, 891 "sack_attack", 892 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 893 "Rack Sack Attack Counters and Controls"); 894 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 895 SYSCTL_CHILDREN(rack_sysctl_root), 896 OID_AUTO, 897 "stats", 898 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 899 "Rack Counters"); 900 SYSCTL_ADD_S32(&rack_sysctl_ctx, 901 SYSCTL_CHILDREN(rack_sysctl_root), 902 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 903 &rack_rate_sample_method , USE_RTT_LOW, 904 "What method should we use for rate sampling 0=high, 1=low "); 905 /* Probe rtt related controls */ 906 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 907 SYSCTL_CHILDREN(rack_sysctl_root), 908 OID_AUTO, 909 "probertt", 910 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 911 "ProbeRTT related Controls"); 912 SYSCTL_ADD_U16(&rack_sysctl_ctx, 913 SYSCTL_CHILDREN(rack_probertt), 914 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 915 &rack_atexit_prtt_hbp, 130, 916 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 917 SYSCTL_ADD_U16(&rack_sysctl_ctx, 918 SYSCTL_CHILDREN(rack_probertt), 919 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 920 &rack_atexit_prtt, 130, 921 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 922 SYSCTL_ADD_U16(&rack_sysctl_ctx, 923 SYSCTL_CHILDREN(rack_probertt), 924 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 925 &rack_per_of_gp_probertt, 60, 926 "What percentage of goodput do we pace at in probertt"); 927 SYSCTL_ADD_U16(&rack_sysctl_ctx, 928 SYSCTL_CHILDREN(rack_probertt), 929 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 930 &rack_per_of_gp_probertt_reduce, 10, 931 "What percentage of goodput do we reduce every gp_srtt"); 932 SYSCTL_ADD_U16(&rack_sysctl_ctx, 933 SYSCTL_CHILDREN(rack_probertt), 934 OID_AUTO, "gp_per_low", CTLFLAG_RW, 935 &rack_per_of_gp_lowthresh, 40, 936 "What percentage of goodput do we allow the multiplier to fall to"); 937 SYSCTL_ADD_U32(&rack_sysctl_ctx, 938 SYSCTL_CHILDREN(rack_probertt), 939 OID_AUTO, "time_between", CTLFLAG_RW, 940 &rack_time_between_probertt, 96000000, 941 "How many useconds between the lowest rtt falling must past before we enter probertt"); 942 SYSCTL_ADD_U32(&rack_sysctl_ctx, 943 SYSCTL_CHILDREN(rack_probertt), 944 OID_AUTO, "safety", CTLFLAG_RW, 945 &rack_probe_rtt_safety_val, 2000000, 946 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 947 SYSCTL_ADD_U32(&rack_sysctl_ctx, 948 SYSCTL_CHILDREN(rack_probertt), 949 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 950 &rack_probe_rtt_sets_cwnd, 0, 951 "Do we set the cwnd too (if always_lower is on)"); 952 SYSCTL_ADD_U32(&rack_sysctl_ctx, 953 SYSCTL_CHILDREN(rack_probertt), 954 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 955 &rack_max_drain_wait, 2, 956 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 957 SYSCTL_ADD_U32(&rack_sysctl_ctx, 958 SYSCTL_CHILDREN(rack_probertt), 959 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 960 &rack_must_drain, 1, 961 "We must drain this many gp_srtt's waiting for flight to reach goal"); 962 SYSCTL_ADD_U32(&rack_sysctl_ctx, 963 SYSCTL_CHILDREN(rack_probertt), 964 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 965 &rack_probertt_use_min_rtt_entry, 1, 966 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 967 SYSCTL_ADD_U32(&rack_sysctl_ctx, 968 SYSCTL_CHILDREN(rack_probertt), 969 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 970 &rack_probertt_use_min_rtt_exit, 0, 971 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 972 SYSCTL_ADD_U32(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_probertt), 974 OID_AUTO, "length_div", CTLFLAG_RW, 975 &rack_probertt_gpsrtt_cnt_div, 0, 976 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 977 SYSCTL_ADD_U32(&rack_sysctl_ctx, 978 SYSCTL_CHILDREN(rack_probertt), 979 OID_AUTO, "length_mul", CTLFLAG_RW, 980 &rack_probertt_gpsrtt_cnt_mul, 0, 981 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 982 SYSCTL_ADD_U32(&rack_sysctl_ctx, 983 SYSCTL_CHILDREN(rack_probertt), 984 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 985 &rack_min_probertt_hold, 200000, 986 "What is the minimum time we hold probertt at target"); 987 SYSCTL_ADD_U32(&rack_sysctl_ctx, 988 SYSCTL_CHILDREN(rack_probertt), 989 OID_AUTO, "filter_life", CTLFLAG_RW, 990 &rack_probertt_filter_life, 10000000, 991 "What is the time for the filters life in useconds"); 992 SYSCTL_ADD_U32(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_probertt), 994 OID_AUTO, "lower_within", CTLFLAG_RW, 995 &rack_probertt_lower_within, 10, 996 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 997 SYSCTL_ADD_U32(&rack_sysctl_ctx, 998 SYSCTL_CHILDREN(rack_probertt), 999 OID_AUTO, "must_move", CTLFLAG_RW, 1000 &rack_min_rtt_movement, 250, 1001 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 1002 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_probertt), 1004 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 1005 &rack_probertt_clear_is, 1, 1006 "Do we clear I/S counts on exiting probe-rtt"); 1007 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_probertt), 1009 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 1010 &rack_max_drain_hbp, 1, 1011 "How many extra drain gpsrtt's do we get in highly buffered paths"); 1012 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_probertt), 1014 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 1015 &rack_hbp_thresh, 3, 1016 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 1017 /* Pacing related sysctls */ 1018 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1019 SYSCTL_CHILDREN(rack_sysctl_root), 1020 OID_AUTO, 1021 "pacing", 1022 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1023 "Pacing related Controls"); 1024 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1025 SYSCTL_CHILDREN(rack_pacing), 1026 OID_AUTO, "pcm_enabled", CTLFLAG_RW, 1027 &rack_pcm_is_enabled, 1, 1028 "Do we by default do PCM measurements?"); 1029 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1030 SYSCTL_CHILDREN(rack_pacing), 1031 OID_AUTO, "pcm_rnds", CTLFLAG_RW, 1032 &rack_pcm_every_n_rounds, 100, 1033 "How many rounds before we need to do a PCM measurement"); 1034 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1035 SYSCTL_CHILDREN(rack_pacing), 1036 OID_AUTO, "pcm_blast", CTLFLAG_RW, 1037 &rack_pcm_blast, 0, 1038 "Blast out the full cwnd/rwnd when doing a PCM measurement"); 1039 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_pacing), 1041 OID_AUTO, "rnd_gp_gain", CTLFLAG_RW, 1042 &rack_gp_gain_req, 1200, 1043 "How much do we have to increase the GP to record the round 1200 = 120.0"); 1044 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1045 SYSCTL_CHILDREN(rack_pacing), 1046 OID_AUTO, "dgp_out_of_ss_at", CTLFLAG_RW, 1047 &rack_rnd_cnt_req, 0x10005, 1048 "How many rounds less than rnd_gp_gain will drop us out of SS"); 1049 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1050 SYSCTL_CHILDREN(rack_pacing), 1051 OID_AUTO, "no_timely", CTLFLAG_RW, 1052 &rack_timely_off, 0, 1053 "Do we not use timely in DGP?"); 1054 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1055 SYSCTL_CHILDREN(rack_pacing), 1056 OID_AUTO, "fillcw", CTLFLAG_RW, 1057 &rack_fill_cw_state, 0, 1058 "Enable fillcw on new connections (default=0 off)?"); 1059 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1060 SYSCTL_CHILDREN(rack_pacing), 1061 OID_AUTO, "min_burst", CTLFLAG_RW, 1062 &rack_pacing_min_seg, 0, 1063 "What is the min burst size for pacing (0 disables)?"); 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_pacing), 1066 OID_AUTO, "divisor", CTLFLAG_RW, 1067 &rack_default_pacing_divisor, 250, 1068 "What is the default divisor given to the rl code?"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_pacing), 1071 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1072 &rack_bw_multipler, 0, 1073 "What is the limit multiplier of the current gp_est that fillcw can increase the b/w too, 200 == 200% (0 = off)?"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_pacing), 1076 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1077 &rack_max_per_above, 30, 1078 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1079 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_pacing), 1081 OID_AUTO, "allow1mss", CTLFLAG_RW, 1082 &rack_pace_one_seg, 0, 1083 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1084 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_pacing), 1086 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1087 &rack_limit_time_with_srtt, 0, 1088 "Do we limit pacing time based on srtt"); 1089 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1090 SYSCTL_CHILDREN(rack_pacing), 1091 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1092 &rack_per_of_gp_ss, 250, 1093 "If non zero, what percentage of goodput to pace at in slow start"); 1094 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_pacing), 1096 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1097 &rack_per_of_gp_ca, 150, 1098 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1099 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_pacing), 1101 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1102 &rack_per_of_gp_rec, 200, 1103 "If non zero, what percentage of goodput to pace at in recovery"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_pacing), 1106 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1107 &rack_hptsi_segments, 40, 1108 "What size is the max for TSO segments in pacing and burst mitigation"); 1109 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_pacing), 1111 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1112 &rack_slot_reduction, 4, 1113 "When doing only burst mitigation what is the reduce divisor"); 1114 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_sysctl_root), 1116 OID_AUTO, "use_pacing", CTLFLAG_RW, 1117 &rack_pace_every_seg, 0, 1118 "If set we use pacing, if clear we use only the original burst mitigation"); 1119 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_pacing), 1121 OID_AUTO, "rate_cap", CTLFLAG_RW, 1122 &rack_bw_rate_cap, 0, 1123 "If set we apply this value to the absolute rate cap used by pacing"); 1124 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_pacing), 1126 OID_AUTO, "fillcw_cap", CTLFLAG_RW, 1127 &rack_fillcw_bw_cap, 3750000, 1128 "Do we have an absolute cap on the amount of b/w fillcw can specify (0 = no)?"); 1129 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_sysctl_root), 1131 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1132 &rack_req_measurements, 1, 1133 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1134 /* Hardware pacing */ 1135 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1136 SYSCTL_CHILDREN(rack_sysctl_root), 1137 OID_AUTO, 1138 "hdwr_pacing", 1139 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1140 "Pacing related Controls"); 1141 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1142 SYSCTL_CHILDREN(rack_hw_pacing), 1143 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1144 &rack_hw_rwnd_factor, 2, 1145 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1146 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1147 SYSCTL_CHILDREN(rack_hw_pacing), 1148 OID_AUTO, "precheck", CTLFLAG_RW, 1149 &rack_hw_check_queue, 0, 1150 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1151 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1152 SYSCTL_CHILDREN(rack_hw_pacing), 1153 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1154 &rack_enobuf_hw_boost_mult, 0, 1155 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1156 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1157 SYSCTL_CHILDREN(rack_hw_pacing), 1158 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1159 &rack_enobuf_hw_max, 2, 1160 "What is the max boost the pacing time if we see a ENOBUFS?"); 1161 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1162 SYSCTL_CHILDREN(rack_hw_pacing), 1163 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1164 &rack_enobuf_hw_min, 2, 1165 "What is the min boost the pacing time if we see a ENOBUFS?"); 1166 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1167 SYSCTL_CHILDREN(rack_hw_pacing), 1168 OID_AUTO, "enable", CTLFLAG_RW, 1169 &rack_enable_hw_pacing, 0, 1170 "Should RACK attempt to use hw pacing?"); 1171 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1172 SYSCTL_CHILDREN(rack_hw_pacing), 1173 OID_AUTO, "rate_cap", CTLFLAG_RW, 1174 &rack_hw_rate_caps, 0, 1175 "Does the highest hardware pacing rate cap the rate we will send at??"); 1176 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1177 SYSCTL_CHILDREN(rack_hw_pacing), 1178 OID_AUTO, "uncap_per", CTLFLAG_RW, 1179 &rack_hw_rate_cap_per, 0, 1180 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1181 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1182 SYSCTL_CHILDREN(rack_hw_pacing), 1183 OID_AUTO, "rate_min", CTLFLAG_RW, 1184 &rack_hw_rate_min, 0, 1185 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1186 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1187 SYSCTL_CHILDREN(rack_hw_pacing), 1188 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1189 &rack_hw_rate_to_low, 0, 1190 "If we fall below this rate, dis-engage hw pacing?"); 1191 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1192 SYSCTL_CHILDREN(rack_hw_pacing), 1193 OID_AUTO, "up_only", CTLFLAG_RW, 1194 &rack_hw_up_only, 0, 1195 "Do we allow hw pacing to lower the rate selected?"); 1196 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1197 SYSCTL_CHILDREN(rack_sysctl_root), 1198 OID_AUTO, 1199 "timely", 1200 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1201 "Rack Timely RTT Controls"); 1202 /* Timely based GP dynmics */ 1203 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1204 SYSCTL_CHILDREN(rack_timely), 1205 OID_AUTO, "upper", CTLFLAG_RW, 1206 &rack_gp_per_bw_mul_up, 2, 1207 "Rack timely upper range for equal b/w (in percentage)"); 1208 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1209 SYSCTL_CHILDREN(rack_timely), 1210 OID_AUTO, "lower", CTLFLAG_RW, 1211 &rack_gp_per_bw_mul_down, 4, 1212 "Rack timely lower range for equal b/w (in percentage)"); 1213 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1214 SYSCTL_CHILDREN(rack_timely), 1215 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1216 &rack_gp_rtt_maxmul, 3, 1217 "Rack timely multiplier of lowest rtt for rtt_max"); 1218 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1219 SYSCTL_CHILDREN(rack_timely), 1220 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1221 &rack_gp_rtt_mindiv, 4, 1222 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1223 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1224 SYSCTL_CHILDREN(rack_timely), 1225 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1226 &rack_gp_rtt_minmul, 1, 1227 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1228 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1229 SYSCTL_CHILDREN(rack_timely), 1230 OID_AUTO, "decrease", CTLFLAG_RW, 1231 &rack_gp_decrease_per, 80, 1232 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1233 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1234 SYSCTL_CHILDREN(rack_timely), 1235 OID_AUTO, "increase", CTLFLAG_RW, 1236 &rack_gp_increase_per, 2, 1237 "Rack timely increase perentage of our GP multiplication factor"); 1238 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1239 SYSCTL_CHILDREN(rack_timely), 1240 OID_AUTO, "lowerbound", CTLFLAG_RW, 1241 &rack_per_lower_bound, 50, 1242 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1243 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1244 SYSCTL_CHILDREN(rack_timely), 1245 OID_AUTO, "p5_upper", CTLFLAG_RW, 1246 &rack_gain_p5_ub, 250, 1247 "Profile 5 upper bound to timely gain"); 1248 1249 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1250 SYSCTL_CHILDREN(rack_timely), 1251 OID_AUTO, "upperboundss", CTLFLAG_RW, 1252 &rack_per_upper_bound_ss, 0, 1253 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1254 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1255 SYSCTL_CHILDREN(rack_timely), 1256 OID_AUTO, "upperboundca", CTLFLAG_RW, 1257 &rack_per_upper_bound_ca, 0, 1258 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1259 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1260 SYSCTL_CHILDREN(rack_timely), 1261 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1262 &rack_do_dyn_mul, 0, 1263 "Rack timely do we enable dynmaic timely goodput by default"); 1264 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1265 SYSCTL_CHILDREN(rack_timely), 1266 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1267 &rack_gp_no_rec_chg, 1, 1268 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1269 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1270 SYSCTL_CHILDREN(rack_timely), 1271 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1272 &rack_timely_dec_clear, 6, 1273 "Rack timely what threshold do we count to before another boost during b/w decent"); 1274 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1275 SYSCTL_CHILDREN(rack_timely), 1276 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1277 &rack_timely_max_push_rise, 3, 1278 "Rack timely how many times do we push up with b/w increase"); 1279 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1280 SYSCTL_CHILDREN(rack_timely), 1281 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1282 &rack_timely_max_push_drop, 3, 1283 "Rack timely how many times do we push back on b/w decent"); 1284 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1285 SYSCTL_CHILDREN(rack_timely), 1286 OID_AUTO, "min_segs", CTLFLAG_RW, 1287 &rack_timely_min_segs, 4, 1288 "Rack timely when setting the cwnd what is the min num segments"); 1289 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1290 SYSCTL_CHILDREN(rack_timely), 1291 OID_AUTO, "nonstop", CTLFLAG_RW, 1292 &rack_timely_no_stopping, 0, 1293 "Rack timely don't stop increase"); 1294 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1295 SYSCTL_CHILDREN(rack_timely), 1296 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1297 &rack_down_raise_thresh, 100, 1298 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1299 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1300 SYSCTL_CHILDREN(rack_timely), 1301 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1302 &rack_req_segs, 1, 1303 "Bottom dragging if not these many segments outstanding and room"); 1304 1305 /* TLP and Rack related parameters */ 1306 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1307 SYSCTL_CHILDREN(rack_sysctl_root), 1308 OID_AUTO, 1309 "tlp", 1310 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1311 "TLP and Rack related Controls"); 1312 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1313 SYSCTL_CHILDREN(rack_tlp), 1314 OID_AUTO, "use_rrr", CTLFLAG_RW, 1315 &use_rack_rr, 1, 1316 "Do we use Rack Rapid Recovery"); 1317 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1318 SYSCTL_CHILDREN(rack_tlp), 1319 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1320 &rack_max_abc_post_recovery, 2, 1321 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1322 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1323 SYSCTL_CHILDREN(rack_tlp), 1324 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1325 &rack_non_rxt_use_cr, 0, 1326 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1327 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1328 SYSCTL_CHILDREN(rack_tlp), 1329 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1330 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1331 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1332 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1333 SYSCTL_CHILDREN(rack_tlp), 1334 OID_AUTO, "limit", CTLFLAG_RW, 1335 &rack_tlp_limit, 2, 1336 "How many TLP's can be sent without sending new data"); 1337 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1338 SYSCTL_CHILDREN(rack_tlp), 1339 OID_AUTO, "use_greater", CTLFLAG_RW, 1340 &rack_tlp_use_greater, 1, 1341 "Should we use the rack_rtt time if its greater than srtt"); 1342 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1343 SYSCTL_CHILDREN(rack_tlp), 1344 OID_AUTO, "tlpminto", CTLFLAG_RW, 1345 &rack_tlp_min, 10000, 1346 "TLP minimum timeout per the specification (in microseconds)"); 1347 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1348 SYSCTL_CHILDREN(rack_tlp), 1349 OID_AUTO, "send_oldest", CTLFLAG_RW, 1350 &rack_always_send_oldest, 0, 1351 "Should we always send the oldest TLP and RACK-TLP"); 1352 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1353 SYSCTL_CHILDREN(rack_tlp), 1354 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1355 &rack_lower_cwnd_at_tlp, 0, 1356 "When a TLP completes a retran should we enter recovery"); 1357 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1358 SYSCTL_CHILDREN(rack_tlp), 1359 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1360 &rack_reorder_thresh, 2, 1361 "What factor for rack will be added when seeing reordering (shift right)"); 1362 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1363 SYSCTL_CHILDREN(rack_tlp), 1364 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1365 &rack_tlp_thresh, 1, 1366 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1367 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1368 SYSCTL_CHILDREN(rack_tlp), 1369 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1370 &rack_reorder_fade, 60000000, 1371 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1372 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1373 SYSCTL_CHILDREN(rack_tlp), 1374 OID_AUTO, "pktdelay", CTLFLAG_RW, 1375 &rack_pkt_delay, 1000, 1376 "Extra RACK time (in microseconds) besides reordering thresh"); 1377 1378 /* Timer related controls */ 1379 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1380 SYSCTL_CHILDREN(rack_sysctl_root), 1381 OID_AUTO, 1382 "timers", 1383 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1384 "Timer related controls"); 1385 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1386 SYSCTL_CHILDREN(rack_timers), 1387 OID_AUTO, "reset_ssth_rec_rto", CTLFLAG_RW, 1388 &rack_ssthresh_rest_rto_rec, 0, 1389 "When doing recovery -> rto -> recovery do we reset SSthresh?"); 1390 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1391 SYSCTL_CHILDREN(rack_timers), 1392 OID_AUTO, "scoreboard_thresh", CTLFLAG_RW, 1393 &rack_rxt_scoreboard_clear_thresh, 2, 1394 "How many RTO's are allowed before we clear the scoreboard"); 1395 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1396 SYSCTL_CHILDREN(rack_timers), 1397 OID_AUTO, "honor_hpts_min", CTLFLAG_RW, 1398 &rack_honors_hpts_min_to, 1, 1399 "Do rack pacing timers honor hpts min timeout"); 1400 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1401 SYSCTL_CHILDREN(rack_timers), 1402 OID_AUTO, "hpts_max_reduce", CTLFLAG_RW, 1403 &rack_max_reduce, 10, 1404 "Max percentage we will reduce slot by for pacing when we are behind"); 1405 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1406 SYSCTL_CHILDREN(rack_timers), 1407 OID_AUTO, "persmin", CTLFLAG_RW, 1408 &rack_persist_min, 250000, 1409 "What is the minimum time in microseconds between persists"); 1410 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1411 SYSCTL_CHILDREN(rack_timers), 1412 OID_AUTO, "persmax", CTLFLAG_RW, 1413 &rack_persist_max, 2000000, 1414 "What is the largest delay in microseconds between persists"); 1415 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1416 SYSCTL_CHILDREN(rack_timers), 1417 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1418 &rack_delayed_ack_time, 40000, 1419 "Delayed ack time (40ms in microseconds)"); 1420 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1421 SYSCTL_CHILDREN(rack_timers), 1422 OID_AUTO, "minrto", CTLFLAG_RW, 1423 &rack_rto_min, 30000, 1424 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1425 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1426 SYSCTL_CHILDREN(rack_timers), 1427 OID_AUTO, "maxrto", CTLFLAG_RW, 1428 &rack_rto_max, 4000000, 1429 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1430 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1431 SYSCTL_CHILDREN(rack_timers), 1432 OID_AUTO, "minto", CTLFLAG_RW, 1433 &rack_min_to, 1000, 1434 "Minimum rack timeout in microseconds"); 1435 /* Measure controls */ 1436 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1437 SYSCTL_CHILDREN(rack_sysctl_root), 1438 OID_AUTO, 1439 "measure", 1440 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1441 "Measure related controls"); 1442 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1443 SYSCTL_CHILDREN(rack_measure), 1444 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1445 &rack_wma_divisor, 8, 1446 "When doing b/w calculation what is the divisor for the WMA"); 1447 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1448 SYSCTL_CHILDREN(rack_measure), 1449 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1450 &rack_cwnd_block_ends_measure, 0, 1451 "Does a cwnd just-return end the measurement window (app limited)"); 1452 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1453 SYSCTL_CHILDREN(rack_measure), 1454 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1455 &rack_rwnd_block_ends_measure, 0, 1456 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1457 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1458 SYSCTL_CHILDREN(rack_measure), 1459 OID_AUTO, "min_target", CTLFLAG_RW, 1460 &rack_def_data_window, 20, 1461 "What is the minimum target window (in mss) for a GP measurements"); 1462 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1463 SYSCTL_CHILDREN(rack_measure), 1464 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1465 &rack_goal_bdp, 2, 1466 "What is the goal BDP to measure"); 1467 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1468 SYSCTL_CHILDREN(rack_measure), 1469 OID_AUTO, "min_srtts", CTLFLAG_RW, 1470 &rack_min_srtts, 1, 1471 "What is the goal BDP to measure"); 1472 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1473 SYSCTL_CHILDREN(rack_measure), 1474 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1475 &rack_min_measure_usec, 0, 1476 "What is the Minimum time time for a measurement if 0, this is off"); 1477 /* Features */ 1478 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1479 SYSCTL_CHILDREN(rack_sysctl_root), 1480 OID_AUTO, 1481 "features", 1482 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1483 "Feature controls"); 1484 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1485 SYSCTL_CHILDREN(rack_features), 1486 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1487 &rack_hybrid_allow_set_maxseg, 0, 1488 "Should hybrid pacing allow the setmss command"); 1489 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1490 SYSCTL_CHILDREN(rack_features), 1491 OID_AUTO, "cmpack", CTLFLAG_RW, 1492 &rack_use_cmp_acks, 1, 1493 "Should RACK have LRO send compressed acks"); 1494 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1495 SYSCTL_CHILDREN(rack_features), 1496 OID_AUTO, "fsb", CTLFLAG_RW, 1497 &rack_use_fsb, 1, 1498 "Should RACK use the fast send block?"); 1499 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1500 SYSCTL_CHILDREN(rack_features), 1501 OID_AUTO, "rfo", CTLFLAG_RW, 1502 &rack_use_rfo, 1, 1503 "Should RACK use rack_fast_output()?"); 1504 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1505 SYSCTL_CHILDREN(rack_features), 1506 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1507 &rack_use_rsm_rfo, 1, 1508 "Should RACK use rack_fast_rsm_output()?"); 1509 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1510 SYSCTL_CHILDREN(rack_features), 1511 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1512 &rack_enable_mqueue_for_nonpaced, 0, 1513 "Should RACK use mbuf queuing for non-paced connections"); 1514 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1515 SYSCTL_CHILDREN(rack_features), 1516 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1517 &rack_do_hystart, 0, 1518 "Should RACK enable HyStart++ on connections?"); 1519 /* Misc rack controls */ 1520 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1521 SYSCTL_CHILDREN(rack_sysctl_root), 1522 OID_AUTO, 1523 "misc", 1524 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1525 "Misc related controls"); 1526 #ifdef TCP_ACCOUNTING 1527 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1528 SYSCTL_CHILDREN(rack_misc), 1529 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1530 &rack_tcp_accounting, 0, 1531 "Should we turn on TCP accounting for all rack sessions?"); 1532 #endif 1533 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1534 SYSCTL_CHILDREN(rack_misc), 1535 OID_AUTO, "dnd", CTLFLAG_RW, 1536 &rack_dnd_default, 0, 1537 "Do not disturb default for rack_rrr = 3"); 1538 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1539 SYSCTL_CHILDREN(rack_misc), 1540 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1541 &sad_seg_size_per, 800, 1542 "Percentage of segment size needed in a sack 800 = 80.0?"); 1543 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1544 SYSCTL_CHILDREN(rack_misc), 1545 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1546 &rack_rxt_controls, 0, 1547 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1548 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1549 SYSCTL_CHILDREN(rack_misc), 1550 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1551 &rack_hibeta_setting, 0, 1552 "Do we ue a high beta (80 instead of 50)?"); 1553 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1554 SYSCTL_CHILDREN(rack_misc), 1555 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1556 &rack_apply_rtt_with_reduced_conf, 0, 1557 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1558 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1559 SYSCTL_CHILDREN(rack_misc), 1560 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1561 &rack_dsack_std_based, 3, 1562 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1563 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1564 SYSCTL_CHILDREN(rack_misc), 1565 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1566 &rack_prr_addbackmax, 2, 1567 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1568 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1569 SYSCTL_CHILDREN(rack_misc), 1570 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1571 &rack_stats_gets_ms_rtt, 1, 1572 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1573 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_misc), 1575 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1576 &rack_client_low_buf, 0, 1577 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1578 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_misc), 1580 OID_AUTO, "defprofile", CTLFLAG_RW, 1581 &rack_def_profile, 0, 1582 "Should RACK use a default profile (0=no, num == profile num)?"); 1583 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1584 SYSCTL_CHILDREN(rack_misc), 1585 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1586 &rack_enable_shared_cwnd, 1, 1587 "Should RACK try to use the shared cwnd on connections where allowed"); 1588 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1589 SYSCTL_CHILDREN(rack_misc), 1590 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1591 &rack_limits_scwnd, 1, 1592 "Should RACK place low end time limits on the shared cwnd feature"); 1593 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1594 SYSCTL_CHILDREN(rack_misc), 1595 OID_AUTO, "no_prr", CTLFLAG_RW, 1596 &rack_disable_prr, 0, 1597 "Should RACK not use prr and only pace (must have pacing on)"); 1598 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1599 SYSCTL_CHILDREN(rack_misc), 1600 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1601 &rack_verbose_logging, 0, 1602 "Should RACK black box logging be verbose"); 1603 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1604 SYSCTL_CHILDREN(rack_misc), 1605 OID_AUTO, "data_after_close", CTLFLAG_RW, 1606 &rack_ignore_data_after_close, 1, 1607 "Do we hold off sending a RST until all pending data is ack'd"); 1608 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1609 SYSCTL_CHILDREN(rack_misc), 1610 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1611 &rack_sack_not_required, 1, 1612 "Do we allow rack to run on connections not supporting SACK"); 1613 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1614 SYSCTL_CHILDREN(rack_misc), 1615 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1616 &rack_send_a_lot_in_prr, 1, 1617 "Send a lot in prr"); 1618 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1619 SYSCTL_CHILDREN(rack_misc), 1620 OID_AUTO, "autoscale", CTLFLAG_RW, 1621 &rack_autosndbuf_inc, 20, 1622 "What percentage should rack scale up its snd buffer by?"); 1623 1624 1625 /* Sack Attacker detection stuff */ 1626 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1627 SYSCTL_CHILDREN(rack_attack), 1628 OID_AUTO, "merge_out", CTLFLAG_RW, 1629 &rack_merge_out_sacks_on_attack, 0, 1630 "Do we merge the sendmap when we decide we are being attacked?"); 1631 1632 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1633 SYSCTL_CHILDREN(rack_attack), 1634 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1635 &rack_highest_sack_thresh_seen, 0, 1636 "Highest sack to ack ratio seen"); 1637 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1638 SYSCTL_CHILDREN(rack_attack), 1639 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1640 &rack_highest_move_thresh_seen, 0, 1641 "Highest move to non-move ratio seen"); 1642 rack_ack_total = counter_u64_alloc(M_WAITOK); 1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1644 SYSCTL_CHILDREN(rack_attack), 1645 OID_AUTO, "acktotal", CTLFLAG_RD, 1646 &rack_ack_total, 1647 "Total number of Ack's"); 1648 rack_express_sack = counter_u64_alloc(M_WAITOK); 1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1650 SYSCTL_CHILDREN(rack_attack), 1651 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1652 &rack_express_sack, 1653 "Total expresss number of Sack's"); 1654 rack_sack_total = counter_u64_alloc(M_WAITOK); 1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_attack), 1657 OID_AUTO, "sacktotal", CTLFLAG_RD, 1658 &rack_sack_total, 1659 "Total number of SACKs"); 1660 rack_move_none = counter_u64_alloc(M_WAITOK); 1661 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1662 SYSCTL_CHILDREN(rack_attack), 1663 OID_AUTO, "move_none", CTLFLAG_RD, 1664 &rack_move_none, 1665 "Total number of SACK index reuse of positions under threshold"); 1666 rack_move_some = counter_u64_alloc(M_WAITOK); 1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1668 SYSCTL_CHILDREN(rack_attack), 1669 OID_AUTO, "move_some", CTLFLAG_RD, 1670 &rack_move_some, 1671 "Total number of SACK index reuse of positions over threshold"); 1672 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_attack), 1675 OID_AUTO, "attacks", CTLFLAG_RD, 1676 &rack_sack_attacks_detected, 1677 "Total number of SACK attackers that had sack disabled"); 1678 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_attack), 1681 OID_AUTO, "reversed", CTLFLAG_RD, 1682 &rack_sack_attacks_reversed, 1683 "Total number of SACK attackers that were later determined false positive"); 1684 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_attack), 1687 OID_AUTO, "suspect", CTLFLAG_RD, 1688 &rack_sack_attacks_suspect, 1689 "Total number of SACKs that triggered early detection"); 1690 1691 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1692 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1693 SYSCTL_CHILDREN(rack_attack), 1694 OID_AUTO, "nextmerge", CTLFLAG_RD, 1695 &rack_sack_used_next_merge, 1696 "Total number of times we used the next merge"); 1697 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1698 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1699 SYSCTL_CHILDREN(rack_attack), 1700 OID_AUTO, "prevmerge", CTLFLAG_RD, 1701 &rack_sack_used_prev_merge, 1702 "Total number of times we used the prev merge"); 1703 /* Counters */ 1704 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1705 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1706 SYSCTL_CHILDREN(rack_counters), 1707 OID_AUTO, "totalbytes", CTLFLAG_RD, 1708 &rack_total_bytes, 1709 "Total number of bytes sent"); 1710 rack_fto_send = counter_u64_alloc(M_WAITOK); 1711 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1712 SYSCTL_CHILDREN(rack_counters), 1713 OID_AUTO, "fto_send", CTLFLAG_RD, 1714 &rack_fto_send, "Total number of rack_fast_output sends"); 1715 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1716 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1717 SYSCTL_CHILDREN(rack_counters), 1718 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1719 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1720 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1722 SYSCTL_CHILDREN(rack_counters), 1723 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1724 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1725 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1726 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1727 SYSCTL_CHILDREN(rack_counters), 1728 OID_AUTO, "nfto_send", CTLFLAG_RD, 1729 &rack_non_fto_send, "Total number of rack_output first sends"); 1730 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_counters), 1733 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1734 &rack_extended_rfo, "Total number of times we extended rfo"); 1735 1736 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_counters), 1739 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1740 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1741 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1742 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_counters), 1745 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1746 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1747 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1748 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1749 SYSCTL_CHILDREN(rack_counters), 1750 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1751 &rack_tlp_tot, 1752 "Total number of tail loss probe expirations"); 1753 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1754 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1755 SYSCTL_CHILDREN(rack_counters), 1756 OID_AUTO, "tlp_new", CTLFLAG_RD, 1757 &rack_tlp_newdata, 1758 "Total number of tail loss probe sending new data"); 1759 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1760 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1761 SYSCTL_CHILDREN(rack_counters), 1762 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1763 &rack_tlp_retran, 1764 "Total number of tail loss probe sending retransmitted data"); 1765 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1766 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1767 SYSCTL_CHILDREN(rack_counters), 1768 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1769 &rack_tlp_retran_bytes, 1770 "Total bytes of tail loss probe sending retransmitted data"); 1771 rack_to_tot = counter_u64_alloc(M_WAITOK); 1772 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1773 SYSCTL_CHILDREN(rack_counters), 1774 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1775 &rack_to_tot, 1776 "Total number of times the rack to expired"); 1777 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1778 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1779 SYSCTL_CHILDREN(rack_counters), 1780 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1781 &rack_saw_enobuf, 1782 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1783 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1784 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1785 SYSCTL_CHILDREN(rack_counters), 1786 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1787 &rack_saw_enobuf_hw, 1788 "Total number of times a send returned enobuf for hdwr paced connections"); 1789 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1790 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1791 SYSCTL_CHILDREN(rack_counters), 1792 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1793 &rack_saw_enetunreach, 1794 "Total number of times a send received a enetunreachable"); 1795 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1796 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1797 SYSCTL_CHILDREN(rack_counters), 1798 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1799 &rack_hot_alloc, 1800 "Total allocations from the top of our list"); 1801 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1802 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1803 SYSCTL_CHILDREN(rack_counters), 1804 OID_AUTO, "allocs", CTLFLAG_RD, 1805 &rack_to_alloc, 1806 "Total allocations of tracking structures"); 1807 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1809 SYSCTL_CHILDREN(rack_counters), 1810 OID_AUTO, "allochard", CTLFLAG_RD, 1811 &rack_to_alloc_hard, 1812 "Total allocations done with sleeping the hard way"); 1813 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1814 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1815 SYSCTL_CHILDREN(rack_counters), 1816 OID_AUTO, "allocemerg", CTLFLAG_RD, 1817 &rack_to_alloc_emerg, 1818 "Total allocations done from emergency cache"); 1819 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1820 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1821 SYSCTL_CHILDREN(rack_counters), 1822 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1823 &rack_to_alloc_limited, 1824 "Total allocations dropped due to limit"); 1825 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1826 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1827 SYSCTL_CHILDREN(rack_counters), 1828 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1829 &rack_alloc_limited_conns, 1830 "Connections with allocations dropped due to limit"); 1831 rack_split_limited = counter_u64_alloc(M_WAITOK); 1832 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1833 SYSCTL_CHILDREN(rack_counters), 1834 OID_AUTO, "split_limited", CTLFLAG_RD, 1835 &rack_split_limited, 1836 "Split allocations dropped due to limit"); 1837 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1838 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1839 SYSCTL_CHILDREN(rack_counters), 1840 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1841 &rack_rxt_clamps_cwnd, 1842 "Number of times that excessive rxt clamped the cwnd down"); 1843 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1844 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1845 SYSCTL_CHILDREN(rack_counters), 1846 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1847 &rack_rxt_clamps_cwnd_uniq, 1848 "Number of connections that have had excessive rxt clamped the cwnd down"); 1849 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1850 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1851 SYSCTL_CHILDREN(rack_counters), 1852 OID_AUTO, "persist_sends", CTLFLAG_RD, 1853 &rack_persists_sends, 1854 "Number of times we sent a persist probe"); 1855 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1856 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1857 SYSCTL_CHILDREN(rack_counters), 1858 OID_AUTO, "persist_acks", CTLFLAG_RD, 1859 &rack_persists_acks, 1860 "Number of times a persist probe was acked"); 1861 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1862 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1863 SYSCTL_CHILDREN(rack_counters), 1864 OID_AUTO, "persist_loss", CTLFLAG_RD, 1865 &rack_persists_loss, 1866 "Number of times we detected a lost persist probe (no ack)"); 1867 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1868 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1869 SYSCTL_CHILDREN(rack_counters), 1870 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1871 &rack_persists_lost_ends, 1872 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1873 #ifdef INVARIANTS 1874 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1875 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1876 SYSCTL_CHILDREN(rack_counters), 1877 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1878 &rack_adjust_map_bw, 1879 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1880 #endif 1881 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1882 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1883 SYSCTL_CHILDREN(rack_counters), 1884 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1885 &rack_multi_single_eq, 1886 "Number of compressed acks total represented"); 1887 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1888 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1889 SYSCTL_CHILDREN(rack_counters), 1890 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1891 &rack_proc_non_comp_ack, 1892 "Number of non compresseds acks that we processed"); 1893 1894 1895 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1896 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1897 SYSCTL_CHILDREN(rack_counters), 1898 OID_AUTO, "sack_long", CTLFLAG_RD, 1899 &rack_sack_proc_all, 1900 "Total times we had to walk whole list for sack processing"); 1901 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1902 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1903 SYSCTL_CHILDREN(rack_counters), 1904 OID_AUTO, "sack_restart", CTLFLAG_RD, 1905 &rack_sack_proc_restart, 1906 "Total times we had to walk whole list due to a restart"); 1907 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1908 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1909 SYSCTL_CHILDREN(rack_counters), 1910 OID_AUTO, "sack_short", CTLFLAG_RD, 1911 &rack_sack_proc_short, 1912 "Total times we took shortcut for sack processing"); 1913 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1914 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1915 SYSCTL_CHILDREN(rack_attack), 1916 OID_AUTO, "skipacked", CTLFLAG_RD, 1917 &rack_sack_skipped_acked, 1918 "Total number of times we skipped previously sacked"); 1919 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1920 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1921 SYSCTL_CHILDREN(rack_attack), 1922 OID_AUTO, "ofsplit", CTLFLAG_RD, 1923 &rack_sack_splits, 1924 "Total number of times we did the old fashion tree split"); 1925 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1926 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1927 SYSCTL_CHILDREN(rack_counters), 1928 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1929 &rack_input_idle_reduces, 1930 "Total number of idle reductions on input"); 1931 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1932 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1933 SYSCTL_CHILDREN(rack_counters), 1934 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1935 &rack_collapsed_win_seen, 1936 "Total number of collapsed window events seen (where our window shrinks)"); 1937 1938 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1939 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1940 SYSCTL_CHILDREN(rack_counters), 1941 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1942 &rack_collapsed_win, 1943 "Total number of collapsed window events where we mark packets"); 1944 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1945 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1946 SYSCTL_CHILDREN(rack_counters), 1947 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1948 &rack_collapsed_win_rxt, 1949 "Total number of packets that were retransmitted"); 1950 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1951 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1952 SYSCTL_CHILDREN(rack_counters), 1953 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1954 &rack_collapsed_win_rxt_bytes, 1955 "Total number of bytes that were retransmitted"); 1956 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1957 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1958 SYSCTL_CHILDREN(rack_counters), 1959 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1960 &rack_try_scwnd, 1961 "Total number of scwnd attempts"); 1962 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1963 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1964 OID_AUTO, "outsize", CTLFLAG_RD, 1965 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1966 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1967 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1968 OID_AUTO, "opts", CTLFLAG_RD, 1969 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1970 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1971 SYSCTL_CHILDREN(rack_sysctl_root), 1972 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1973 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1974 } 1975 1976 static uint32_t 1977 rc_init_window(struct tcp_rack *rack) 1978 { 1979 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1980 1981 } 1982 1983 static uint64_t 1984 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1985 { 1986 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1987 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1988 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1989 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1990 else 1991 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1992 } 1993 1994 static void 1995 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 1996 uint64_t data, uint8_t mod, uint16_t aux, 1997 struct tcp_sendfile_track *cur, int line) 1998 { 1999 #ifdef TCP_REQUEST_TRK 2000 int do_log = 0; 2001 2002 /* 2003 * The rate cap one is noisy and only should come out when normal BB logging 2004 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 2005 * once per chunk and make up the BBpoint that can be turned on by the client. 2006 */ 2007 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2008 /* 2009 * The very noisy two need to only come out when 2010 * we have verbose logging on. 2011 */ 2012 if (rack_verbose_logging != 0) 2013 do_log = tcp_bblogging_on(rack->rc_tp); 2014 else 2015 do_log = 0; 2016 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2017 /* 2018 * All other less noisy logs here except the measure which 2019 * also needs to come out on the point and the log. 2020 */ 2021 do_log = tcp_bblogging_on(rack->rc_tp); 2022 } else { 2023 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2024 } 2025 2026 if (do_log) { 2027 union tcp_log_stackspecific log; 2028 struct timeval tv; 2029 uint64_t lt_bw; 2030 2031 /* Convert our ms to a microsecond */ 2032 memset(&log, 0, sizeof(log)); 2033 2034 log.u_bbr.cwnd_gain = line; 2035 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2036 log.u_bbr.rttProp = tim; 2037 log.u_bbr.bw_inuse = cbw; 2038 log.u_bbr.delRate = rack_get_gp_est(rack); 2039 lt_bw = rack_get_lt_bw(rack); 2040 log.u_bbr.flex1 = seq; 2041 log.u_bbr.pacing_gain = aux; 2042 /* lt_bw = < flex3 | flex2 > */ 2043 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2044 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2045 /* Record the last obtained us rtt in inflight */ 2046 if (cur == NULL) { 2047 /* Make sure we are looking at the right log if an overide comes in */ 2048 cur = rack->r_ctl.rc_last_sft; 2049 } 2050 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2051 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2052 else { 2053 /* Use the last known rtt i.e. the rack-rtt */ 2054 log.u_bbr.inflight = rack->rc_rack_rtt; 2055 } 2056 if (cur != NULL) { 2057 uint64_t off; 2058 2059 log.u_bbr.cur_del_rate = cur->deadline; 2060 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2061 /* start = < lost | pkt_epoch > */ 2062 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2063 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2064 log.u_bbr.flex6 = cur->start_seq; 2065 log.u_bbr.pkts_out = cur->end_seq; 2066 } else { 2067 /* start = < lost | pkt_epoch > */ 2068 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2069 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2070 /* end = < pkts_out | flex6 > */ 2071 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2072 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2073 } 2074 /* first_send = <lt_epoch | epoch> */ 2075 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2076 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2077 /* localtime = <delivered | applimited>*/ 2078 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2079 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2080 #ifdef TCP_REQUEST_TRK 2081 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2082 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2083 #endif 2084 log.u_bbr.inhpts = 1; 2085 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2086 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2087 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2088 } else { 2089 log.u_bbr.flex7 = 0xffff; 2090 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2091 } 2092 /* 2093 * Compose bbr_state to be a bit wise 0000ADHF 2094 * where A is the always_pace flag 2095 * where D is the dgp_on flag 2096 * where H is the hybrid_mode on flag 2097 * where F is the use_fixed_rate flag. 2098 */ 2099 log.u_bbr.bbr_state = rack->rc_always_pace; 2100 log.u_bbr.bbr_state <<= 1; 2101 log.u_bbr.bbr_state |= rack->dgp_on; 2102 log.u_bbr.bbr_state <<= 1; 2103 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2104 log.u_bbr.bbr_state <<= 1; 2105 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2106 log.u_bbr.flex8 = mod; 2107 tcp_log_event(rack->rc_tp, NULL, 2108 &rack->rc_inp->inp_socket->so_rcv, 2109 &rack->rc_inp->inp_socket->so_snd, 2110 TCP_HYBRID_PACING_LOG, 0, 2111 0, &log, false, NULL, __func__, __LINE__, &tv); 2112 2113 } 2114 #endif 2115 } 2116 2117 #ifdef TCP_REQUEST_TRK 2118 static void 2119 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2120 { 2121 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2122 union tcp_log_stackspecific log; 2123 struct timeval tv; 2124 uint64_t off; 2125 2126 /* Convert our ms to a microsecond */ 2127 memset(&log, 0, sizeof(log)); 2128 2129 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2130 log.u_bbr.delRate = cur->sent_at_fs; 2131 2132 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { 2133 /* 2134 * We did not get a new Rules Applied to set so 2135 * no overlapping send occured, this means the 2136 * current byte counts are correct. 2137 */ 2138 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2139 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2140 } else { 2141 /* 2142 * Overlapping send case, we switched to a new 2143 * send and did a rules applied. 2144 */ 2145 log.u_bbr.cur_del_rate = cur->sent_at_ls; 2146 log.u_bbr.rttProp = cur->rxt_at_ls; 2147 } 2148 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2149 log.u_bbr.cwnd_gain = line; 2150 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2151 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2152 /* start = < flex1 | flex2 > */ 2153 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2154 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2155 /* end = < flex3 | flex4 > */ 2156 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2157 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2158 2159 /* localtime = <delivered | applimited>*/ 2160 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2161 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2162 /* client timestamp = <lt_epoch | epoch>*/ 2163 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2164 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2165 /* now set all the flags in */ 2166 log.u_bbr.pkts_out = cur->hybrid_flags; 2167 log.u_bbr.lost = cur->playout_ms; 2168 log.u_bbr.flex6 = cur->flags; 2169 /* 2170 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2171 * where a false retransmit occurred so first_send <-> lastsend may 2172 * include longer time then it actually took if we have a false rxt. 2173 */ 2174 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2175 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2176 /* 2177 * Compose bbr_state to be a bit wise 0000ADHF 2178 * where A is the always_pace flag 2179 * where D is the dgp_on flag 2180 * where H is the hybrid_mode on flag 2181 * where F is the use_fixed_rate flag. 2182 */ 2183 log.u_bbr.bbr_state = rack->rc_always_pace; 2184 log.u_bbr.bbr_state <<= 1; 2185 log.u_bbr.bbr_state |= rack->dgp_on; 2186 log.u_bbr.bbr_state <<= 1; 2187 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2188 log.u_bbr.bbr_state <<= 1; 2189 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2190 2191 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2192 tcp_log_event(rack->rc_tp, NULL, 2193 &rack->rc_inp->inp_socket->so_rcv, 2194 &rack->rc_inp->inp_socket->so_snd, 2195 TCP_HYBRID_PACING_LOG, 0, 2196 0, &log, false, NULL, __func__, __LINE__, &tv); 2197 } 2198 } 2199 #endif 2200 2201 static inline uint64_t 2202 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2203 { 2204 uint64_t ret_bw, ether; 2205 uint64_t u_segsiz; 2206 2207 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2208 if (rack->r_is_v6){ 2209 #ifdef INET6 2210 ether += sizeof(struct ip6_hdr); 2211 #endif 2212 ether += 14; /* eheader size 6+6+2 */ 2213 } else { 2214 #ifdef INET 2215 ether += sizeof(struct ip); 2216 #endif 2217 ether += 14; /* eheader size 6+6+2 */ 2218 } 2219 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2220 ret_bw = bw; 2221 ret_bw *= ether; 2222 ret_bw /= u_segsiz; 2223 return (ret_bw); 2224 } 2225 2226 static void 2227 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2228 { 2229 #ifdef TCP_REQUEST_TRK 2230 struct timeval tv; 2231 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2232 #endif 2233 2234 if (rack->r_ctl.bw_rate_cap == 0) 2235 return; 2236 #ifdef TCP_REQUEST_TRK 2237 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2238 (rack->r_ctl.rc_last_sft != NULL)) { 2239 /* 2240 * We have a dynamic cap. The original target 2241 * is in bw_rate_cap, but we need to look at 2242 * how long it is until we hit the deadline. 2243 */ 2244 struct tcp_sendfile_track *ent; 2245 2246 ent = rack->r_ctl.rc_last_sft; 2247 microuptime(&tv); 2248 timenow = tcp_tv_to_lusectick(&tv); 2249 if (timenow >= ent->deadline) { 2250 /* No time left we do DGP only */ 2251 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2252 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2253 rack->r_ctl.bw_rate_cap = 0; 2254 return; 2255 } 2256 /* We have the time */ 2257 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2258 if (timeleft < HPTS_MSEC_IN_SEC) { 2259 /* If there is less than a ms left just use DGPs rate */ 2260 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2261 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2262 rack->r_ctl.bw_rate_cap = 0; 2263 return; 2264 } 2265 /* 2266 * Now lets find the amount of data left to send. 2267 * 2268 * Now ideally we want to use the end_seq to figure out how much more 2269 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2270 */ 2271 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2272 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2273 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2274 else { 2275 /* TSNH, we should catch it at the send */ 2276 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2277 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2278 rack->r_ctl.bw_rate_cap = 0; 2279 return; 2280 } 2281 } else { 2282 /* 2283 * The hard way, figure out how much is gone and then 2284 * take that away from the total the client asked for 2285 * (thats off by tls overhead if this is tls). 2286 */ 2287 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2288 lengone = rack->rc_tp->snd_una - ent->start_seq; 2289 else 2290 lengone = 0; 2291 if (lengone < (ent->end - ent->start)) 2292 lenleft = (ent->end - ent->start) - lengone; 2293 else { 2294 /* TSNH, we should catch it at the send */ 2295 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2296 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2297 rack->r_ctl.bw_rate_cap = 0; 2298 return; 2299 } 2300 } 2301 if (lenleft == 0) { 2302 /* We have it all sent */ 2303 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2304 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2305 if (rack->r_ctl.bw_rate_cap) 2306 goto normal_ratecap; 2307 else 2308 return; 2309 } 2310 calcbw = lenleft * HPTS_USEC_IN_SEC; 2311 calcbw /= timeleft; 2312 /* Now we must compensate for IP/TCP overhead */ 2313 calcbw = rack_compensate_for_linerate(rack, calcbw); 2314 /* Update the bit rate cap */ 2315 rack->r_ctl.bw_rate_cap = calcbw; 2316 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2317 (rack_hybrid_allow_set_maxseg == 1) && 2318 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2319 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2320 uint32_t orig_max; 2321 2322 orig_max = rack->r_ctl.rc_pace_max_segs; 2323 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2324 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2325 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2326 } 2327 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2328 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2329 if ((calcbw > 0) && (*bw > calcbw)) { 2330 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2331 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2332 *capped = 1; 2333 *bw = calcbw; 2334 } 2335 return; 2336 } 2337 normal_ratecap: 2338 #endif 2339 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2340 #ifdef TCP_REQUEST_TRK 2341 if (rack->rc_hybrid_mode && 2342 rack->rc_catch_up && 2343 (rack->r_ctl.rc_last_sft != NULL) && 2344 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2345 (rack_hybrid_allow_set_maxseg == 1) && 2346 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2347 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2348 uint32_t orig_max; 2349 2350 orig_max = rack->r_ctl.rc_pace_max_segs; 2351 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2352 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2353 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2354 } 2355 #endif 2356 *capped = 1; 2357 *bw = rack->r_ctl.bw_rate_cap; 2358 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2359 *bw, 0, 0, 2360 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2361 } 2362 } 2363 2364 static uint64_t 2365 rack_get_gp_est(struct tcp_rack *rack) 2366 { 2367 uint64_t bw, lt_bw, ret_bw; 2368 2369 if (rack->rc_gp_filled == 0) { 2370 /* 2371 * We have yet no b/w measurement, 2372 * if we have a user set initial bw 2373 * return it. If we don't have that and 2374 * we have an srtt, use the tcp IW (10) to 2375 * calculate a fictional b/w over the SRTT 2376 * which is more or less a guess. Note 2377 * we don't use our IW from rack on purpose 2378 * so if we have like IW=30, we are not 2379 * calculating a "huge" b/w. 2380 */ 2381 uint64_t srtt; 2382 2383 if (rack->dis_lt_bw == 1) 2384 lt_bw = 0; 2385 else 2386 lt_bw = rack_get_lt_bw(rack); 2387 if (lt_bw) { 2388 /* 2389 * No goodput bw but a long-term b/w does exist 2390 * lets use that. 2391 */ 2392 ret_bw = lt_bw; 2393 goto compensate; 2394 } 2395 if (rack->r_ctl.init_rate) 2396 return (rack->r_ctl.init_rate); 2397 2398 /* Ok lets come up with the IW guess, if we have a srtt */ 2399 if (rack->rc_tp->t_srtt == 0) { 2400 /* 2401 * Go with old pacing method 2402 * i.e. burst mitigation only. 2403 */ 2404 return (0); 2405 } 2406 /* Ok lets get the initial TCP win (not racks) */ 2407 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2408 srtt = (uint64_t)rack->rc_tp->t_srtt; 2409 bw *= (uint64_t)USECS_IN_SECOND; 2410 bw /= srtt; 2411 ret_bw = bw; 2412 goto compensate; 2413 2414 } 2415 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2416 /* Averaging is done, we can return the value */ 2417 bw = rack->r_ctl.gp_bw; 2418 } else { 2419 /* Still doing initial average must calculate */ 2420 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2421 } 2422 if (rack->dis_lt_bw) { 2423 /* We are not using lt-bw */ 2424 ret_bw = bw; 2425 goto compensate; 2426 } 2427 lt_bw = rack_get_lt_bw(rack); 2428 if (lt_bw == 0) { 2429 /* If we don't have one then equate it to the gp_bw */ 2430 lt_bw = rack->r_ctl.gp_bw; 2431 } 2432 if (rack->use_lesser_lt_bw) { 2433 if (lt_bw < bw) 2434 ret_bw = lt_bw; 2435 else 2436 ret_bw = bw; 2437 } else { 2438 if (lt_bw > bw) 2439 ret_bw = lt_bw; 2440 else 2441 ret_bw = bw; 2442 } 2443 /* 2444 * Now lets compensate based on the TCP/IP overhead. Our 2445 * Goodput estimate does not include this so we must pace out 2446 * a bit faster since our pacing calculations do. The pacing 2447 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2448 * we are using to do this, so we do that here in the opposite 2449 * direction as well. This means that if we are tunneled and the 2450 * segsiz is say 1200 bytes we will get quite a boost, but its 2451 * compensated for in the pacing time the opposite way. 2452 */ 2453 compensate: 2454 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2455 return(ret_bw); 2456 } 2457 2458 2459 static uint64_t 2460 rack_get_bw(struct tcp_rack *rack) 2461 { 2462 uint64_t bw; 2463 2464 if (rack->use_fixed_rate) { 2465 /* Return the fixed pacing rate */ 2466 return (rack_get_fixed_pacing_bw(rack)); 2467 } 2468 bw = rack_get_gp_est(rack); 2469 return (bw); 2470 } 2471 2472 static uint16_t 2473 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2474 { 2475 if (rack->use_fixed_rate) { 2476 return (100); 2477 } else if (rack->in_probe_rtt && (rsm == NULL)) 2478 return (rack->r_ctl.rack_per_of_gp_probertt); 2479 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2480 rack->r_ctl.rack_per_of_gp_rec)) { 2481 if (rsm) { 2482 /* a retransmission always use the recovery rate */ 2483 return (rack->r_ctl.rack_per_of_gp_rec); 2484 } else if (rack->rack_rec_nonrxt_use_cr) { 2485 /* Directed to use the configured rate */ 2486 goto configured_rate; 2487 } else if (rack->rack_no_prr && 2488 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2489 /* No PRR, lets just use the b/w estimate only */ 2490 return (100); 2491 } else { 2492 /* 2493 * Here we may have a non-retransmit but we 2494 * have no overrides, so just use the recovery 2495 * rate (prr is in effect). 2496 */ 2497 return (rack->r_ctl.rack_per_of_gp_rec); 2498 } 2499 } 2500 configured_rate: 2501 /* For the configured rate we look at our cwnd vs the ssthresh */ 2502 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2503 return (rack->r_ctl.rack_per_of_gp_ss); 2504 else 2505 return (rack->r_ctl.rack_per_of_gp_ca); 2506 } 2507 2508 static void 2509 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2510 { 2511 /* 2512 * Types of logs (mod value) 2513 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2514 * 2 = a dsack round begins, persist is reset to 16. 2515 * 3 = a dsack round ends 2516 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2517 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2518 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2519 */ 2520 if (tcp_bblogging_on(rack->rc_tp)) { 2521 union tcp_log_stackspecific log; 2522 struct timeval tv; 2523 2524 memset(&log, 0, sizeof(log)); 2525 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2526 log.u_bbr.flex1 <<= 1; 2527 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2528 log.u_bbr.flex1 <<= 1; 2529 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2530 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2531 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2532 log.u_bbr.flex4 = flex4; 2533 log.u_bbr.flex5 = flex5; 2534 log.u_bbr.flex6 = flex6; 2535 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2536 log.u_bbr.flex8 = mod; 2537 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2538 log.u_bbr.epoch = rack->r_ctl.current_round; 2539 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2540 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2541 &rack->rc_inp->inp_socket->so_rcv, 2542 &rack->rc_inp->inp_socket->so_snd, 2543 RACK_DSACK_HANDLING, 0, 2544 0, &log, false, &tv); 2545 } 2546 } 2547 2548 static void 2549 rack_log_hdwr_pacing(struct tcp_rack *rack, 2550 uint64_t rate, uint64_t hw_rate, int line, 2551 int error, uint16_t mod) 2552 { 2553 if (tcp_bblogging_on(rack->rc_tp)) { 2554 union tcp_log_stackspecific log; 2555 struct timeval tv; 2556 const struct ifnet *ifp; 2557 uint64_t ifp64; 2558 2559 memset(&log, 0, sizeof(log)); 2560 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2561 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2562 if (rack->r_ctl.crte) { 2563 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2564 } else if (rack->rc_inp->inp_route.ro_nh && 2565 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2566 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2567 } else 2568 ifp = NULL; 2569 if (ifp) { 2570 ifp64 = (uintptr_t)ifp; 2571 log.u_bbr.flex3 = ((ifp64 >> 32) & 0x00000000ffffffff); 2572 log.u_bbr.flex4 = (ifp64 & 0x00000000ffffffff); 2573 } 2574 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2575 log.u_bbr.bw_inuse = rate; 2576 log.u_bbr.flex5 = line; 2577 log.u_bbr.flex6 = error; 2578 log.u_bbr.flex7 = mod; 2579 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2580 log.u_bbr.flex8 = rack->use_fixed_rate; 2581 log.u_bbr.flex8 <<= 1; 2582 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2583 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2584 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2585 if (rack->r_ctl.crte) 2586 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2587 else 2588 log.u_bbr.cur_del_rate = 0; 2589 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2590 log.u_bbr.epoch = rack->r_ctl.current_round; 2591 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2592 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2593 &rack->rc_inp->inp_socket->so_rcv, 2594 &rack->rc_inp->inp_socket->so_snd, 2595 BBR_LOG_HDWR_PACE, 0, 2596 0, &log, false, &tv); 2597 } 2598 } 2599 2600 static uint64_t 2601 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2602 { 2603 /* 2604 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2605 */ 2606 uint64_t bw_est, high_rate; 2607 uint64_t gain; 2608 2609 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2610 bw_est = bw * gain; 2611 bw_est /= (uint64_t)100; 2612 /* Never fall below the minimum (def 64kbps) */ 2613 if (bw_est < RACK_MIN_BW) 2614 bw_est = RACK_MIN_BW; 2615 if (rack->r_rack_hw_rate_caps) { 2616 /* Rate caps are in place */ 2617 if (rack->r_ctl.crte != NULL) { 2618 /* We have a hdwr rate already */ 2619 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2620 if (bw_est >= high_rate) { 2621 /* We are capping bw at the highest rate table entry */ 2622 if (rack_hw_rate_cap_per && 2623 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2624 rack->r_rack_hw_rate_caps = 0; 2625 goto done; 2626 } 2627 rack_log_hdwr_pacing(rack, 2628 bw_est, high_rate, __LINE__, 2629 0, 3); 2630 bw_est = high_rate; 2631 if (capped) 2632 *capped = 1; 2633 } 2634 } else if ((rack->rack_hdrw_pacing == 0) && 2635 (rack->rack_hdw_pace_ena) && 2636 (rack->rack_attempt_hdwr_pace == 0) && 2637 (rack->rc_inp->inp_route.ro_nh != NULL) && 2638 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2639 /* 2640 * Special case, we have not yet attempted hardware 2641 * pacing, and yet we may, when we do, find out if we are 2642 * above the highest rate. We need to know the maxbw for the interface 2643 * in question (if it supports ratelimiting). We get back 2644 * a 0, if the interface is not found in the RL lists. 2645 */ 2646 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2647 if (high_rate) { 2648 /* Yep, we have a rate is it above this rate? */ 2649 if (bw_est > high_rate) { 2650 bw_est = high_rate; 2651 if (capped) 2652 *capped = 1; 2653 } 2654 } 2655 } 2656 } 2657 done: 2658 return (bw_est); 2659 } 2660 2661 static void 2662 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2663 { 2664 if (tcp_bblogging_on(rack->rc_tp)) { 2665 union tcp_log_stackspecific log; 2666 struct timeval tv; 2667 2668 if ((mod != 1) && (rack_verbose_logging == 0)) { 2669 /* 2670 * We get 3 values currently for mod 2671 * 1 - We are retransmitting and this tells the reason. 2672 * 2 - We are clearing a dup-ack count. 2673 * 3 - We are incrementing a dup-ack count. 2674 * 2675 * The clear/increment are only logged 2676 * if you have BBverbose on. 2677 */ 2678 return; 2679 } 2680 memset(&log, 0, sizeof(log)); 2681 log.u_bbr.flex1 = tsused; 2682 log.u_bbr.flex2 = thresh; 2683 log.u_bbr.flex3 = rsm->r_flags; 2684 log.u_bbr.flex4 = rsm->r_dupack; 2685 log.u_bbr.flex5 = rsm->r_start; 2686 log.u_bbr.flex6 = rsm->r_end; 2687 log.u_bbr.flex8 = mod; 2688 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2689 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2690 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2691 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2692 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2693 log.u_bbr.pacing_gain = rack->r_must_retran; 2694 log.u_bbr.epoch = rack->r_ctl.current_round; 2695 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; 2696 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2697 &rack->rc_inp->inp_socket->so_rcv, 2698 &rack->rc_inp->inp_socket->so_snd, 2699 BBR_LOG_SETTINGS_CHG, 0, 2700 0, &log, false, &tv); 2701 } 2702 } 2703 2704 static void 2705 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2706 { 2707 if (tcp_bblogging_on(rack->rc_tp)) { 2708 union tcp_log_stackspecific log; 2709 struct timeval tv; 2710 2711 memset(&log, 0, sizeof(log)); 2712 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2713 log.u_bbr.flex2 = to; 2714 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2715 log.u_bbr.flex4 = slot; 2716 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2717 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2718 log.u_bbr.flex7 = rack->rc_in_persist; 2719 log.u_bbr.flex8 = which; 2720 if (rack->rack_no_prr) 2721 log.u_bbr.pkts_out = 0; 2722 else 2723 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2724 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2725 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2726 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2727 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2728 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2729 log.u_bbr.pacing_gain = rack->r_must_retran; 2730 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2731 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2732 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2733 log.u_bbr.lost = rack_rto_min; 2734 log.u_bbr.epoch = rack->r_ctl.roundends; 2735 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2736 log.u_bbr.bw_inuse <<= 32; 2737 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2738 log.u_bbr.applimited = rack->rc_tp->t_flags2; 2739 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2740 &rack->rc_inp->inp_socket->so_rcv, 2741 &rack->rc_inp->inp_socket->so_snd, 2742 BBR_LOG_TIMERSTAR, 0, 2743 0, &log, false, &tv); 2744 } 2745 } 2746 2747 static void 2748 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2749 { 2750 if (tcp_bblogging_on(rack->rc_tp)) { 2751 union tcp_log_stackspecific log; 2752 struct timeval tv; 2753 2754 memset(&log, 0, sizeof(log)); 2755 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2756 log.u_bbr.flex8 = to_num; 2757 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2758 log.u_bbr.flex2 = rack->rc_rack_rtt; 2759 if (rsm == NULL) 2760 log.u_bbr.flex3 = 0; 2761 else 2762 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2763 if (rack->rack_no_prr) 2764 log.u_bbr.flex5 = 0; 2765 else 2766 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2767 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2768 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2769 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2770 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2771 log.u_bbr.pacing_gain = rack->r_must_retran; 2772 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2773 log.u_bbr.bw_inuse <<= 32; 2774 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2775 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2776 &rack->rc_inp->inp_socket->so_rcv, 2777 &rack->rc_inp->inp_socket->so_snd, 2778 BBR_LOG_RTO, 0, 2779 0, &log, false, &tv); 2780 } 2781 } 2782 2783 static void 2784 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2785 struct rack_sendmap *prev, 2786 struct rack_sendmap *rsm, 2787 struct rack_sendmap *next, 2788 int flag, uint32_t th_ack, int line) 2789 { 2790 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2791 union tcp_log_stackspecific log; 2792 struct timeval tv; 2793 2794 memset(&log, 0, sizeof(log)); 2795 log.u_bbr.flex8 = flag; 2796 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2797 log.u_bbr.cur_del_rate = (uintptr_t)prev; 2798 log.u_bbr.delRate = (uintptr_t)rsm; 2799 log.u_bbr.rttProp = (uintptr_t)next; 2800 log.u_bbr.flex7 = 0; 2801 if (prev) { 2802 log.u_bbr.flex1 = prev->r_start; 2803 log.u_bbr.flex2 = prev->r_end; 2804 log.u_bbr.flex7 |= 0x4; 2805 } 2806 if (rsm) { 2807 log.u_bbr.flex3 = rsm->r_start; 2808 log.u_bbr.flex4 = rsm->r_end; 2809 log.u_bbr.flex7 |= 0x2; 2810 } 2811 if (next) { 2812 log.u_bbr.flex5 = next->r_start; 2813 log.u_bbr.flex6 = next->r_end; 2814 log.u_bbr.flex7 |= 0x1; 2815 } 2816 log.u_bbr.applimited = line; 2817 log.u_bbr.pkts_out = th_ack; 2818 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2819 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2820 if (rack->rack_no_prr) 2821 log.u_bbr.lost = 0; 2822 else 2823 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2824 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2825 log.u_bbr.bw_inuse <<= 32; 2826 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2827 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2828 &rack->rc_inp->inp_socket->so_rcv, 2829 &rack->rc_inp->inp_socket->so_snd, 2830 TCP_LOG_MAPCHG, 0, 2831 0, &log, false, &tv); 2832 } 2833 } 2834 2835 static void 2836 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2837 struct rack_sendmap *rsm, int conf) 2838 { 2839 if (tcp_bblogging_on(tp)) { 2840 union tcp_log_stackspecific log; 2841 struct timeval tv; 2842 memset(&log, 0, sizeof(log)); 2843 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2844 log.u_bbr.flex1 = t; 2845 log.u_bbr.flex2 = len; 2846 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2847 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2848 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2849 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2850 log.u_bbr.flex7 = conf; 2851 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2852 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2853 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2854 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2855 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2856 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2857 if (rsm) { 2858 log.u_bbr.pkt_epoch = rsm->r_start; 2859 log.u_bbr.lost = rsm->r_end; 2860 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2861 /* We loose any upper of the 24 bits */ 2862 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2863 } else { 2864 /* Its a SYN */ 2865 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2866 log.u_bbr.lost = 0; 2867 log.u_bbr.cwnd_gain = 0; 2868 log.u_bbr.pacing_gain = 0; 2869 } 2870 /* Write out general bits of interest rrs here */ 2871 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2872 log.u_bbr.use_lt_bw <<= 1; 2873 log.u_bbr.use_lt_bw |= rack->forced_ack; 2874 log.u_bbr.use_lt_bw <<= 1; 2875 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2876 log.u_bbr.use_lt_bw <<= 1; 2877 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2878 log.u_bbr.use_lt_bw <<= 1; 2879 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2880 log.u_bbr.use_lt_bw <<= 1; 2881 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2882 log.u_bbr.use_lt_bw <<= 1; 2883 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2884 log.u_bbr.use_lt_bw <<= 1; 2885 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2886 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2887 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2888 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2889 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2890 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2891 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2892 log.u_bbr.bw_inuse <<= 32; 2893 if (rsm) 2894 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2895 TCP_LOG_EVENTP(tp, NULL, 2896 &rack->rc_inp->inp_socket->so_rcv, 2897 &rack->rc_inp->inp_socket->so_snd, 2898 BBR_LOG_BBRRTT, 0, 2899 0, &log, false, &tv); 2900 2901 2902 } 2903 } 2904 2905 static void 2906 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2907 { 2908 /* 2909 * Log the rtt sample we are 2910 * applying to the srtt algorithm in 2911 * useconds. 2912 */ 2913 if (tcp_bblogging_on(rack->rc_tp)) { 2914 union tcp_log_stackspecific log; 2915 struct timeval tv; 2916 2917 /* Convert our ms to a microsecond */ 2918 memset(&log, 0, sizeof(log)); 2919 log.u_bbr.flex1 = rtt; 2920 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2921 log.u_bbr.flex7 = 1; 2922 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2923 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2924 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2925 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2926 log.u_bbr.pacing_gain = rack->r_must_retran; 2927 /* 2928 * We capture in delRate the upper 32 bits as 2929 * the confidence level we had declared, and the 2930 * lower 32 bits as the actual RTT using the arrival 2931 * timestamp. 2932 */ 2933 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2934 log.u_bbr.delRate <<= 32; 2935 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2936 /* Lets capture all the things that make up t_rtxcur */ 2937 log.u_bbr.applimited = rack_rto_min; 2938 log.u_bbr.epoch = rack_rto_max; 2939 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2940 log.u_bbr.lost = rack_rto_min; 2941 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2942 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2943 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2944 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2945 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2946 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2947 &rack->rc_inp->inp_socket->so_rcv, 2948 &rack->rc_inp->inp_socket->so_snd, 2949 TCP_LOG_RTT, 0, 2950 0, &log, false, &tv); 2951 } 2952 } 2953 2954 static void 2955 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2956 { 2957 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2958 union tcp_log_stackspecific log; 2959 struct timeval tv; 2960 2961 /* Convert our ms to a microsecond */ 2962 memset(&log, 0, sizeof(log)); 2963 log.u_bbr.flex1 = rtt; 2964 log.u_bbr.flex2 = send_time; 2965 log.u_bbr.flex3 = ack_time; 2966 log.u_bbr.flex4 = where; 2967 log.u_bbr.flex7 = 2; 2968 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2969 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2970 log.u_bbr.bw_inuse <<= 32; 2971 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2972 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2973 &rack->rc_inp->inp_socket->so_rcv, 2974 &rack->rc_inp->inp_socket->so_snd, 2975 TCP_LOG_RTT, 0, 2976 0, &log, false, &tv); 2977 } 2978 } 2979 2980 2981 static void 2982 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 2983 { 2984 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2985 union tcp_log_stackspecific log; 2986 struct timeval tv; 2987 2988 /* Convert our ms to a microsecond */ 2989 memset(&log, 0, sizeof(log)); 2990 log.u_bbr.flex1 = idx; 2991 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 2992 log.u_bbr.flex3 = tsecho; 2993 log.u_bbr.flex7 = 3; 2994 log.u_bbr.rttProp = tsv; 2995 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2996 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 2997 log.u_bbr.bw_inuse <<= 32; 2998 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 2999 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3000 &rack->rc_inp->inp_socket->so_rcv, 3001 &rack->rc_inp->inp_socket->so_snd, 3002 TCP_LOG_RTT, 0, 3003 0, &log, false, &tv); 3004 } 3005 } 3006 3007 3008 static inline void 3009 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 3010 { 3011 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3012 union tcp_log_stackspecific log; 3013 struct timeval tv; 3014 3015 memset(&log, 0, sizeof(log)); 3016 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3017 log.u_bbr.flex1 = line; 3018 log.u_bbr.flex2 = tick; 3019 log.u_bbr.flex3 = tp->t_maxunacktime; 3020 log.u_bbr.flex4 = tp->t_acktime; 3021 log.u_bbr.flex8 = event; 3022 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3023 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3024 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3025 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3026 log.u_bbr.pacing_gain = rack->r_must_retran; 3027 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3028 log.u_bbr.bw_inuse <<= 32; 3029 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3030 TCP_LOG_EVENTP(tp, NULL, 3031 &rack->rc_inp->inp_socket->so_rcv, 3032 &rack->rc_inp->inp_socket->so_snd, 3033 BBR_LOG_PROGRESS, 0, 3034 0, &log, false, &tv); 3035 } 3036 } 3037 3038 static void 3039 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 3040 { 3041 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3042 union tcp_log_stackspecific log; 3043 3044 memset(&log, 0, sizeof(log)); 3045 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3046 log.u_bbr.flex1 = slot; 3047 if (rack->rack_no_prr) 3048 log.u_bbr.flex2 = 0; 3049 else 3050 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3051 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3052 log.u_bbr.flex6 = line; 3053 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3054 log.u_bbr.flex8 = rack->rc_in_persist; 3055 log.u_bbr.timeStamp = cts; 3056 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3057 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3058 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3059 log.u_bbr.pacing_gain = rack->r_must_retran; 3060 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3061 &rack->rc_inp->inp_socket->so_rcv, 3062 &rack->rc_inp->inp_socket->so_snd, 3063 BBR_LOG_BBRSND, 0, 3064 0, &log, false, tv); 3065 } 3066 } 3067 3068 static void 3069 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3070 { 3071 if (tcp_bblogging_on(rack->rc_tp)) { 3072 union tcp_log_stackspecific log; 3073 struct timeval tv; 3074 3075 memset(&log, 0, sizeof(log)); 3076 log.u_bbr.flex1 = did_out; 3077 log.u_bbr.flex2 = nxt_pkt; 3078 log.u_bbr.flex3 = way_out; 3079 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3080 if (rack->rack_no_prr) 3081 log.u_bbr.flex5 = 0; 3082 else 3083 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3084 log.u_bbr.flex6 = nsegs; 3085 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3086 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3087 log.u_bbr.flex7 <<= 1; 3088 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3089 log.u_bbr.flex7 <<= 1; 3090 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3091 log.u_bbr.flex8 = rack->rc_in_persist; 3092 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3093 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3094 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3095 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3096 log.u_bbr.use_lt_bw <<= 1; 3097 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3098 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3099 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3100 log.u_bbr.pacing_gain = rack->r_must_retran; 3101 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3102 log.u_bbr.bw_inuse <<= 32; 3103 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3104 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; 3105 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; 3106 log.u_bbr.lost = rack->rc_tp->t_srtt; 3107 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; 3108 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3109 &rack->rc_inp->inp_socket->so_rcv, 3110 &rack->rc_inp->inp_socket->so_snd, 3111 BBR_LOG_DOSEG_DONE, 0, 3112 0, &log, false, &tv); 3113 } 3114 } 3115 3116 static void 3117 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3118 { 3119 if (tcp_bblogging_on(rack->rc_tp)) { 3120 union tcp_log_stackspecific log; 3121 struct timeval tv; 3122 3123 memset(&log, 0, sizeof(log)); 3124 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3125 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3126 log.u_bbr.flex4 = arg1; 3127 log.u_bbr.flex5 = arg2; 3128 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3129 log.u_bbr.flex6 = arg3; 3130 log.u_bbr.flex8 = frm; 3131 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3132 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3133 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3134 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3135 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3136 log.u_bbr.pacing_gain = rack->r_must_retran; 3137 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3138 &tptosocket(tp)->so_snd, 3139 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3140 } 3141 } 3142 3143 static void 3144 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3145 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3146 { 3147 if (tcp_bblogging_on(rack->rc_tp)) { 3148 union tcp_log_stackspecific log; 3149 struct timeval tv; 3150 3151 memset(&log, 0, sizeof(log)); 3152 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3153 log.u_bbr.flex1 = slot; 3154 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3155 log.u_bbr.flex4 = reason; 3156 if (rack->rack_no_prr) 3157 log.u_bbr.flex5 = 0; 3158 else 3159 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3160 log.u_bbr.flex7 = hpts_calling; 3161 log.u_bbr.flex8 = rack->rc_in_persist; 3162 log.u_bbr.lt_epoch = cwnd_to_use; 3163 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3164 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3165 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3166 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3167 log.u_bbr.pacing_gain = rack->r_must_retran; 3168 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3169 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3170 log.u_bbr.bw_inuse <<= 32; 3171 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3172 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3173 &rack->rc_inp->inp_socket->so_rcv, 3174 &rack->rc_inp->inp_socket->so_snd, 3175 BBR_LOG_JUSTRET, 0, 3176 tlen, &log, false, &tv); 3177 } 3178 } 3179 3180 static void 3181 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3182 struct timeval *tv, uint32_t flags_on_entry) 3183 { 3184 if (tcp_bblogging_on(rack->rc_tp)) { 3185 union tcp_log_stackspecific log; 3186 3187 memset(&log, 0, sizeof(log)); 3188 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3189 log.u_bbr.flex1 = line; 3190 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3191 log.u_bbr.flex3 = flags_on_entry; 3192 log.u_bbr.flex4 = us_cts; 3193 if (rack->rack_no_prr) 3194 log.u_bbr.flex5 = 0; 3195 else 3196 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3197 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3198 log.u_bbr.flex7 = hpts_removed; 3199 log.u_bbr.flex8 = 1; 3200 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3201 log.u_bbr.timeStamp = us_cts; 3202 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3203 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3204 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3205 log.u_bbr.pacing_gain = rack->r_must_retran; 3206 log.u_bbr.bw_inuse = rack->r_ctl.current_round; 3207 log.u_bbr.bw_inuse <<= 32; 3208 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; 3209 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3210 &rack->rc_inp->inp_socket->so_rcv, 3211 &rack->rc_inp->inp_socket->so_snd, 3212 BBR_LOG_TIMERCANC, 0, 3213 0, &log, false, tv); 3214 } 3215 } 3216 3217 static void 3218 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3219 uint32_t flex1, uint32_t flex2, 3220 uint32_t flex3, uint32_t flex4, 3221 uint32_t flex5, uint32_t flex6, 3222 uint16_t flex7, uint8_t mod) 3223 { 3224 if (tcp_bblogging_on(rack->rc_tp)) { 3225 union tcp_log_stackspecific log; 3226 struct timeval tv; 3227 3228 if (mod == 1) { 3229 /* No you can't use 1, its for the real to cancel */ 3230 return; 3231 } 3232 memset(&log, 0, sizeof(log)); 3233 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3234 log.u_bbr.flex1 = flex1; 3235 log.u_bbr.flex2 = flex2; 3236 log.u_bbr.flex3 = flex3; 3237 log.u_bbr.flex4 = flex4; 3238 log.u_bbr.flex5 = flex5; 3239 log.u_bbr.flex6 = flex6; 3240 log.u_bbr.flex7 = flex7; 3241 log.u_bbr.flex8 = mod; 3242 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3243 &rack->rc_inp->inp_socket->so_rcv, 3244 &rack->rc_inp->inp_socket->so_snd, 3245 BBR_LOG_TIMERCANC, 0, 3246 0, &log, false, &tv); 3247 } 3248 } 3249 3250 static void 3251 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3252 { 3253 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3254 union tcp_log_stackspecific log; 3255 struct timeval tv; 3256 3257 memset(&log, 0, sizeof(log)); 3258 log.u_bbr.flex1 = timers; 3259 log.u_bbr.flex2 = ret; 3260 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3261 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3262 log.u_bbr.flex5 = cts; 3263 if (rack->rack_no_prr) 3264 log.u_bbr.flex6 = 0; 3265 else 3266 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3267 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3268 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3269 log.u_bbr.pacing_gain = rack->r_must_retran; 3270 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3271 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3272 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3273 &rack->rc_inp->inp_socket->so_rcv, 3274 &rack->rc_inp->inp_socket->so_snd, 3275 BBR_LOG_TO_PROCESS, 0, 3276 0, &log, false, &tv); 3277 } 3278 } 3279 3280 static void 3281 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3282 { 3283 if (tcp_bblogging_on(rack->rc_tp)) { 3284 union tcp_log_stackspecific log; 3285 struct timeval tv; 3286 3287 memset(&log, 0, sizeof(log)); 3288 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3289 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3290 if (rack->rack_no_prr) 3291 log.u_bbr.flex3 = 0; 3292 else 3293 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3294 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3295 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3296 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3297 log.u_bbr.flex7 = line; 3298 log.u_bbr.flex8 = frm; 3299 log.u_bbr.pkts_out = orig_cwnd; 3300 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3301 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3302 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3303 log.u_bbr.use_lt_bw <<= 1; 3304 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3305 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3306 &rack->rc_inp->inp_socket->so_rcv, 3307 &rack->rc_inp->inp_socket->so_snd, 3308 BBR_LOG_BBRUPD, 0, 3309 0, &log, false, &tv); 3310 } 3311 } 3312 3313 static void 3314 rack_counter_destroy(void) 3315 { 3316 counter_u64_free(rack_total_bytes); 3317 counter_u64_free(rack_fto_send); 3318 counter_u64_free(rack_fto_rsm_send); 3319 counter_u64_free(rack_nfto_resend); 3320 counter_u64_free(rack_hw_pace_init_fail); 3321 counter_u64_free(rack_hw_pace_lost); 3322 counter_u64_free(rack_non_fto_send); 3323 counter_u64_free(rack_extended_rfo); 3324 counter_u64_free(rack_ack_total); 3325 counter_u64_free(rack_express_sack); 3326 counter_u64_free(rack_sack_total); 3327 counter_u64_free(rack_move_none); 3328 counter_u64_free(rack_move_some); 3329 counter_u64_free(rack_sack_attacks_detected); 3330 counter_u64_free(rack_sack_attacks_reversed); 3331 counter_u64_free(rack_sack_attacks_suspect); 3332 counter_u64_free(rack_sack_used_next_merge); 3333 counter_u64_free(rack_sack_used_prev_merge); 3334 counter_u64_free(rack_tlp_tot); 3335 counter_u64_free(rack_tlp_newdata); 3336 counter_u64_free(rack_tlp_retran); 3337 counter_u64_free(rack_tlp_retran_bytes); 3338 counter_u64_free(rack_to_tot); 3339 counter_u64_free(rack_saw_enobuf); 3340 counter_u64_free(rack_saw_enobuf_hw); 3341 counter_u64_free(rack_saw_enetunreach); 3342 counter_u64_free(rack_hot_alloc); 3343 counter_u64_free(rack_to_alloc); 3344 counter_u64_free(rack_to_alloc_hard); 3345 counter_u64_free(rack_to_alloc_emerg); 3346 counter_u64_free(rack_to_alloc_limited); 3347 counter_u64_free(rack_alloc_limited_conns); 3348 counter_u64_free(rack_split_limited); 3349 counter_u64_free(rack_multi_single_eq); 3350 counter_u64_free(rack_rxt_clamps_cwnd); 3351 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3352 counter_u64_free(rack_proc_non_comp_ack); 3353 counter_u64_free(rack_sack_proc_all); 3354 counter_u64_free(rack_sack_proc_restart); 3355 counter_u64_free(rack_sack_proc_short); 3356 counter_u64_free(rack_sack_skipped_acked); 3357 counter_u64_free(rack_sack_splits); 3358 counter_u64_free(rack_input_idle_reduces); 3359 counter_u64_free(rack_collapsed_win); 3360 counter_u64_free(rack_collapsed_win_rxt); 3361 counter_u64_free(rack_collapsed_win_rxt_bytes); 3362 counter_u64_free(rack_collapsed_win_seen); 3363 counter_u64_free(rack_try_scwnd); 3364 counter_u64_free(rack_persists_sends); 3365 counter_u64_free(rack_persists_acks); 3366 counter_u64_free(rack_persists_loss); 3367 counter_u64_free(rack_persists_lost_ends); 3368 #ifdef INVARIANTS 3369 counter_u64_free(rack_adjust_map_bw); 3370 #endif 3371 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3372 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3373 } 3374 3375 static struct rack_sendmap * 3376 rack_alloc(struct tcp_rack *rack) 3377 { 3378 struct rack_sendmap *rsm; 3379 3380 /* 3381 * First get the top of the list it in 3382 * theory is the "hottest" rsm we have, 3383 * possibly just freed by ack processing. 3384 */ 3385 if (rack->rc_free_cnt > rack_free_cache) { 3386 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3387 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3388 counter_u64_add(rack_hot_alloc, 1); 3389 rack->rc_free_cnt--; 3390 return (rsm); 3391 } 3392 /* 3393 * Once we get under our free cache we probably 3394 * no longer have a "hot" one available. Lets 3395 * get one from UMA. 3396 */ 3397 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3398 if (rsm) { 3399 rack->r_ctl.rc_num_maps_alloced++; 3400 counter_u64_add(rack_to_alloc, 1); 3401 return (rsm); 3402 } 3403 /* 3404 * Dig in to our aux rsm's (the last two) since 3405 * UMA failed to get us one. 3406 */ 3407 if (rack->rc_free_cnt) { 3408 counter_u64_add(rack_to_alloc_emerg, 1); 3409 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3410 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3411 rack->rc_free_cnt--; 3412 return (rsm); 3413 } 3414 return (NULL); 3415 } 3416 3417 static struct rack_sendmap * 3418 rack_alloc_full_limit(struct tcp_rack *rack) 3419 { 3420 if ((V_tcp_map_entries_limit > 0) && 3421 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3422 counter_u64_add(rack_to_alloc_limited, 1); 3423 if (!rack->alloc_limit_reported) { 3424 rack->alloc_limit_reported = 1; 3425 counter_u64_add(rack_alloc_limited_conns, 1); 3426 } 3427 return (NULL); 3428 } 3429 return (rack_alloc(rack)); 3430 } 3431 3432 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3433 static struct rack_sendmap * 3434 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3435 { 3436 struct rack_sendmap *rsm; 3437 3438 if (limit_type) { 3439 /* currently there is only one limit type */ 3440 if (rack->r_ctl.rc_split_limit > 0 && 3441 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3442 counter_u64_add(rack_split_limited, 1); 3443 if (!rack->alloc_limit_reported) { 3444 rack->alloc_limit_reported = 1; 3445 counter_u64_add(rack_alloc_limited_conns, 1); 3446 } 3447 return (NULL); 3448 } 3449 } 3450 3451 /* allocate and mark in the limit type, if set */ 3452 rsm = rack_alloc(rack); 3453 if (rsm != NULL && limit_type) { 3454 rsm->r_limit_type = limit_type; 3455 rack->r_ctl.rc_num_split_allocs++; 3456 } 3457 return (rsm); 3458 } 3459 3460 static void 3461 rack_free_trim(struct tcp_rack *rack) 3462 { 3463 struct rack_sendmap *rsm; 3464 3465 /* 3466 * Free up all the tail entries until 3467 * we get our list down to the limit. 3468 */ 3469 while (rack->rc_free_cnt > rack_free_cache) { 3470 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3471 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3472 rack->rc_free_cnt--; 3473 rack->r_ctl.rc_num_maps_alloced--; 3474 uma_zfree(rack_zone, rsm); 3475 } 3476 } 3477 3478 static void 3479 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3480 { 3481 if (rsm->r_flags & RACK_APP_LIMITED) { 3482 KASSERT((rack->r_ctl.rc_app_limited_cnt > 0), 3483 ("app_cnt %u, rsm %p", rack->r_ctl.rc_app_limited_cnt, rsm)); 3484 rack->r_ctl.rc_app_limited_cnt--; 3485 } 3486 if (rsm->r_limit_type) { 3487 /* currently there is only one limit type */ 3488 rack->r_ctl.rc_num_split_allocs--; 3489 } 3490 if (rsm == rack->r_ctl.rc_first_appl) { 3491 rack->r_ctl.cleared_app_ack_seq = rsm->r_end; 3492 rack->r_ctl.cleared_app_ack = 1; 3493 if (rack->r_ctl.rc_app_limited_cnt == 0) 3494 rack->r_ctl.rc_first_appl = NULL; 3495 else 3496 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3497 } 3498 if (rsm == rack->r_ctl.rc_resend) 3499 rack->r_ctl.rc_resend = NULL; 3500 if (rsm == rack->r_ctl.rc_end_appl) 3501 rack->r_ctl.rc_end_appl = NULL; 3502 if (rack->r_ctl.rc_tlpsend == rsm) 3503 rack->r_ctl.rc_tlpsend = NULL; 3504 if (rack->r_ctl.rc_sacklast == rsm) 3505 rack->r_ctl.rc_sacklast = NULL; 3506 memset(rsm, 0, sizeof(struct rack_sendmap)); 3507 /* Make sure we are not going to overrun our count limit of 0xff */ 3508 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { 3509 rack_free_trim(rack); 3510 } 3511 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3512 rack->rc_free_cnt++; 3513 } 3514 3515 static uint32_t 3516 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3517 { 3518 uint64_t srtt, bw, len, tim; 3519 uint32_t segsiz, def_len, minl; 3520 3521 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3522 def_len = rack_def_data_window * segsiz; 3523 if (rack->rc_gp_filled == 0) { 3524 /* 3525 * We have no measurement (IW is in flight?) so 3526 * we can only guess using our data_window sysctl 3527 * value (usually 20MSS). 3528 */ 3529 return (def_len); 3530 } 3531 /* 3532 * Now we have a number of factors to consider. 3533 * 3534 * 1) We have a desired BDP which is usually 3535 * at least 2. 3536 * 2) We have a minimum number of rtt's usually 1 SRTT 3537 * but we allow it too to be more. 3538 * 3) We want to make sure a measurement last N useconds (if 3539 * we have set rack_min_measure_usec. 3540 * 3541 * We handle the first concern here by trying to create a data 3542 * window of max(rack_def_data_window, DesiredBDP). The 3543 * second concern we handle in not letting the measurement 3544 * window end normally until at least the required SRTT's 3545 * have gone by which is done further below in 3546 * rack_enough_for_measurement(). Finally the third concern 3547 * we also handle here by calculating how long that time 3548 * would take at the current BW and then return the 3549 * max of our first calculation and that length. Note 3550 * that if rack_min_measure_usec is 0, we don't deal 3551 * with concern 3. Also for both Concern 1 and 3 an 3552 * application limited period could end the measurement 3553 * earlier. 3554 * 3555 * So lets calculate the BDP with the "known" b/w using 3556 * the SRTT as our rtt and then multiply it by the goal. 3557 */ 3558 bw = rack_get_bw(rack); 3559 srtt = (uint64_t)tp->t_srtt; 3560 len = bw * srtt; 3561 len /= (uint64_t)HPTS_USEC_IN_SEC; 3562 len *= max(1, rack_goal_bdp); 3563 /* Now we need to round up to the nearest MSS */ 3564 len = roundup(len, segsiz); 3565 if (rack_min_measure_usec) { 3566 /* Now calculate our min length for this b/w */ 3567 tim = rack_min_measure_usec; 3568 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3569 if (minl == 0) 3570 minl = 1; 3571 minl = roundup(minl, segsiz); 3572 if (len < minl) 3573 len = minl; 3574 } 3575 /* 3576 * Now if we have a very small window we want 3577 * to attempt to get the window that is 3578 * as small as possible. This happens on 3579 * low b/w connections and we don't want to 3580 * span huge numbers of rtt's between measurements. 3581 * 3582 * We basically include 2 over our "MIN window" so 3583 * that the measurement can be shortened (possibly) by 3584 * an ack'ed packet. 3585 */ 3586 if (len < def_len) 3587 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3588 else 3589 return (max((uint32_t)len, def_len)); 3590 3591 } 3592 3593 static int 3594 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3595 { 3596 uint32_t tim, srtts, segsiz; 3597 3598 /* 3599 * Has enough time passed for the GP measurement to be valid? 3600 */ 3601 if (SEQ_LT(th_ack, tp->gput_seq)) { 3602 /* Not enough bytes yet */ 3603 return (0); 3604 } 3605 if ((tp->snd_max == tp->snd_una) || 3606 (th_ack == tp->snd_max)){ 3607 /* 3608 * All is acked quality of all acked is 3609 * usually low or medium, but we in theory could split 3610 * all acked into two cases, where you got 3611 * a signifigant amount of your window and 3612 * where you did not. For now we leave it 3613 * but it is something to contemplate in the 3614 * future. The danger here is that delayed ack 3615 * is effecting the last byte (which is a 50:50 chance). 3616 */ 3617 *quality = RACK_QUALITY_ALLACKED; 3618 return (1); 3619 } 3620 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3621 /* 3622 * We obtained our entire window of data we wanted 3623 * no matter if we are in recovery or not then 3624 * its ok since expanding the window does not 3625 * make things fuzzy (or at least not as much). 3626 */ 3627 *quality = RACK_QUALITY_HIGH; 3628 return (1); 3629 } 3630 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3631 if (SEQ_LT(th_ack, tp->gput_ack) && 3632 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3633 /* Not enough bytes yet */ 3634 return (0); 3635 } 3636 if (rack->r_ctl.rc_first_appl && 3637 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3638 /* 3639 * We are up to the app limited send point 3640 * we have to measure irrespective of the time.. 3641 */ 3642 *quality = RACK_QUALITY_APPLIMITED; 3643 return (1); 3644 } 3645 /* Now what about time? */ 3646 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3647 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3648 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3649 /* 3650 * We do not allow a measurement if we are in recovery 3651 * that would shrink the goodput window we wanted. 3652 * This is to prevent cloudyness of when the last send 3653 * was actually made. 3654 */ 3655 *quality = RACK_QUALITY_HIGH; 3656 return (1); 3657 } 3658 /* Nope not even a full SRTT has passed */ 3659 return (0); 3660 } 3661 3662 static void 3663 rack_log_timely(struct tcp_rack *rack, 3664 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3665 uint64_t up_bnd, int line, uint8_t method) 3666 { 3667 if (tcp_bblogging_on(rack->rc_tp)) { 3668 union tcp_log_stackspecific log; 3669 struct timeval tv; 3670 3671 memset(&log, 0, sizeof(log)); 3672 log.u_bbr.flex1 = logged; 3673 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3674 log.u_bbr.flex2 <<= 4; 3675 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3676 log.u_bbr.flex2 <<= 4; 3677 log.u_bbr.flex2 |= rack->rc_gp_incr; 3678 log.u_bbr.flex2 <<= 4; 3679 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3680 log.u_bbr.flex3 = rack->rc_gp_incr; 3681 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3682 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3683 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3684 log.u_bbr.flex7 = rack->rc_gp_bwred; 3685 log.u_bbr.flex8 = method; 3686 log.u_bbr.cur_del_rate = cur_bw; 3687 log.u_bbr.delRate = low_bnd; 3688 log.u_bbr.bw_inuse = up_bnd; 3689 log.u_bbr.rttProp = rack_get_bw(rack); 3690 log.u_bbr.pkt_epoch = line; 3691 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3692 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3693 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3694 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3695 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3696 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3697 log.u_bbr.cwnd_gain <<= 1; 3698 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3699 log.u_bbr.cwnd_gain <<= 1; 3700 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3701 log.u_bbr.cwnd_gain <<= 1; 3702 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3703 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3704 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3705 &rack->rc_inp->inp_socket->so_rcv, 3706 &rack->rc_inp->inp_socket->so_snd, 3707 TCP_TIMELY_WORK, 0, 3708 0, &log, false, &tv); 3709 } 3710 } 3711 3712 static int 3713 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3714 { 3715 /* 3716 * Before we increase we need to know if 3717 * the estimate just made was less than 3718 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3719 * 3720 * If we already are pacing at a fast enough 3721 * rate to push us faster there is no sense of 3722 * increasing. 3723 * 3724 * We first caculate our actual pacing rate (ss or ca multiplier 3725 * times our cur_bw). 3726 * 3727 * Then we take the last measured rate and multipy by our 3728 * maximum pacing overage to give us a max allowable rate. 3729 * 3730 * If our act_rate is smaller than our max_allowable rate 3731 * then we should increase. Else we should hold steady. 3732 * 3733 */ 3734 uint64_t act_rate, max_allow_rate; 3735 3736 if (rack_timely_no_stopping) 3737 return (1); 3738 3739 if ((cur_bw == 0) || (last_bw_est == 0)) { 3740 /* 3741 * Initial startup case or 3742 * everything is acked case. 3743 */ 3744 rack_log_timely(rack, mult, cur_bw, 0, 0, 3745 __LINE__, 9); 3746 return (1); 3747 } 3748 if (mult <= 100) { 3749 /* 3750 * We can always pace at or slightly above our rate. 3751 */ 3752 rack_log_timely(rack, mult, cur_bw, 0, 0, 3753 __LINE__, 9); 3754 return (1); 3755 } 3756 act_rate = cur_bw * (uint64_t)mult; 3757 act_rate /= 100; 3758 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3759 max_allow_rate /= 100; 3760 if (act_rate < max_allow_rate) { 3761 /* 3762 * Here the rate we are actually pacing at 3763 * is smaller than 10% above our last measurement. 3764 * This means we are pacing below what we would 3765 * like to try to achieve (plus some wiggle room). 3766 */ 3767 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3768 __LINE__, 9); 3769 return (1); 3770 } else { 3771 /* 3772 * Here we are already pacing at least rack_max_per_above(10%) 3773 * what we are getting back. This indicates most likely 3774 * that we are being limited (cwnd/rwnd/app) and can't 3775 * get any more b/w. There is no sense of trying to 3776 * raise up the pacing rate its not speeding us up 3777 * and we already are pacing faster than we are getting. 3778 */ 3779 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3780 __LINE__, 8); 3781 return (0); 3782 } 3783 } 3784 3785 static void 3786 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3787 { 3788 /* 3789 * When we drag bottom, we want to assure 3790 * that no multiplier is below 1.0, if so 3791 * we want to restore it to at least that. 3792 */ 3793 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3794 /* This is unlikely we usually do not touch recovery */ 3795 rack->r_ctl.rack_per_of_gp_rec = 100; 3796 } 3797 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3798 rack->r_ctl.rack_per_of_gp_ca = 100; 3799 } 3800 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3801 rack->r_ctl.rack_per_of_gp_ss = 100; 3802 } 3803 } 3804 3805 static void 3806 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3807 { 3808 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3809 rack->r_ctl.rack_per_of_gp_ca = 100; 3810 } 3811 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3812 rack->r_ctl.rack_per_of_gp_ss = 100; 3813 } 3814 } 3815 3816 static void 3817 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3818 { 3819 int32_t calc, logged, plus; 3820 3821 logged = 0; 3822 3823 if (rack->rc_skip_timely) 3824 return; 3825 if (override) { 3826 /* 3827 * override is passed when we are 3828 * loosing b/w and making one last 3829 * gasp at trying to not loose out 3830 * to a new-reno flow. 3831 */ 3832 goto extra_boost; 3833 } 3834 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3835 if (rack->rc_gp_incr && 3836 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3837 /* 3838 * Reset and get 5 strokes more before the boost. Note 3839 * that the count is 0 based so we have to add one. 3840 */ 3841 extra_boost: 3842 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3843 rack->rc_gp_timely_inc_cnt = 0; 3844 } else 3845 plus = (uint32_t)rack_gp_increase_per; 3846 /* Must be at least 1% increase for true timely increases */ 3847 if ((plus < 1) && 3848 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3849 plus = 1; 3850 if (rack->rc_gp_saw_rec && 3851 (rack->rc_gp_no_rec_chg == 0) && 3852 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3853 rack->r_ctl.rack_per_of_gp_rec)) { 3854 /* We have been in recovery ding it too */ 3855 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3856 if (calc > 0xffff) 3857 calc = 0xffff; 3858 logged |= 1; 3859 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3860 if (rack->r_ctl.rack_per_upper_bound_ca && 3861 (rack->rc_dragged_bottom == 0) && 3862 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3863 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3864 } 3865 if (rack->rc_gp_saw_ca && 3866 (rack->rc_gp_saw_ss == 0) && 3867 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3868 rack->r_ctl.rack_per_of_gp_ca)) { 3869 /* In CA */ 3870 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3871 if (calc > 0xffff) 3872 calc = 0xffff; 3873 logged |= 2; 3874 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3875 if (rack->r_ctl.rack_per_upper_bound_ca && 3876 (rack->rc_dragged_bottom == 0) && 3877 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3878 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3879 } 3880 if (rack->rc_gp_saw_ss && 3881 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3882 rack->r_ctl.rack_per_of_gp_ss)) { 3883 /* In SS */ 3884 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3885 if (calc > 0xffff) 3886 calc = 0xffff; 3887 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3888 if (rack->r_ctl.rack_per_upper_bound_ss && 3889 (rack->rc_dragged_bottom == 0) && 3890 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3891 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3892 logged |= 4; 3893 } 3894 if (logged && 3895 (rack->rc_gp_incr == 0)){ 3896 /* Go into increment mode */ 3897 rack->rc_gp_incr = 1; 3898 rack->rc_gp_timely_inc_cnt = 0; 3899 } 3900 if (rack->rc_gp_incr && 3901 logged && 3902 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3903 rack->rc_gp_timely_inc_cnt++; 3904 } 3905 rack_log_timely(rack, logged, plus, 0, 0, 3906 __LINE__, 1); 3907 } 3908 3909 static uint32_t 3910 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3911 { 3912 /*- 3913 * norm_grad = rtt_diff / minrtt; 3914 * new_per = curper * (1 - B * norm_grad) 3915 * 3916 * B = rack_gp_decrease_per (default 80%) 3917 * rtt_dif = input var current rtt-diff 3918 * curper = input var current percentage 3919 * minrtt = from rack filter 3920 * 3921 * In order to do the floating point calculations above we 3922 * do an integer conversion. The code looks confusing so let me 3923 * translate it into something that use more variables and 3924 * is clearer for us humans :) 3925 * 3926 * uint64_t norm_grad, inverse, reduce_by, final_result; 3927 * uint32_t perf; 3928 * 3929 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 3930 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 3931 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 3932 * inverse /= 1000000; 3933 * reduce_by = (1000000 - inverse); 3934 * final_result = (cur_per * reduce_by) / 1000000; 3935 * perf = (uint32_t)final_result; 3936 */ 3937 uint64_t perf; 3938 3939 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3940 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3941 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3942 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3943 (uint64_t)1000000)) / 3944 (uint64_t)1000000); 3945 if (perf > curper) { 3946 /* TSNH */ 3947 perf = curper - 1; 3948 } 3949 return ((uint32_t)perf); 3950 } 3951 3952 static uint32_t 3953 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3954 { 3955 /* 3956 * highrttthresh 3957 * result = curper * (1 - (B * ( 1 - ------ )) 3958 * gp_srtt 3959 * 3960 * B = rack_gp_decrease_per (default .8 i.e. 80) 3961 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3962 */ 3963 uint64_t perf; 3964 uint32_t highrttthresh; 3965 3966 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3967 3968 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3969 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3970 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3971 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3972 if (tcp_bblogging_on(rack->rc_tp)) { 3973 uint64_t log1; 3974 3975 log1 = rtt; 3976 log1 <<= 32; 3977 log1 |= highrttthresh; 3978 rack_log_timely(rack, 3979 rack_gp_decrease_per, 3980 (uint64_t)curper, 3981 log1, 3982 perf, 3983 __LINE__, 3984 15); 3985 } 3986 return (perf); 3987 } 3988 3989 static void 3990 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3991 { 3992 uint64_t logvar, logvar2, logvar3; 3993 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3994 3995 if (rack->rc_skip_timely) 3996 return; 3997 if (rack->rc_gp_incr) { 3998 /* Turn off increment counting */ 3999 rack->rc_gp_incr = 0; 4000 rack->rc_gp_timely_inc_cnt = 0; 4001 } 4002 ss_red = ca_red = rec_red = 0; 4003 logged = 0; 4004 /* Calculate the reduction value */ 4005 if (rtt_diff < 0) { 4006 rtt_diff *= -1; 4007 } 4008 /* Must be at least 1% reduction */ 4009 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 4010 /* We have been in recovery ding it too */ 4011 if (timely_says == 2) { 4012 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 4013 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4014 if (alt < new_per) 4015 val = alt; 4016 else 4017 val = new_per; 4018 } else 4019 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4020 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4021 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4022 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4023 } else { 4024 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4025 rec_red = 0; 4026 } 4027 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4028 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4029 logged |= 1; 4030 } 4031 if (rack->rc_gp_saw_ss) { 4032 /* Sent in SS */ 4033 if (timely_says == 2) { 4034 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4035 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4036 if (alt < new_per) 4037 val = alt; 4038 else 4039 val = new_per; 4040 } else 4041 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4042 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4043 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4044 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4045 } else { 4046 ss_red = new_per; 4047 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4048 logvar = new_per; 4049 logvar <<= 32; 4050 logvar |= alt; 4051 logvar2 = (uint32_t)rtt; 4052 logvar2 <<= 32; 4053 logvar2 |= (uint32_t)rtt_diff; 4054 logvar3 = rack_gp_rtt_maxmul; 4055 logvar3 <<= 32; 4056 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4057 rack_log_timely(rack, timely_says, 4058 logvar2, logvar3, 4059 logvar, __LINE__, 10); 4060 } 4061 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4062 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4063 logged |= 4; 4064 } else if (rack->rc_gp_saw_ca) { 4065 /* Sent in CA */ 4066 if (timely_says == 2) { 4067 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4068 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4069 if (alt < new_per) 4070 val = alt; 4071 else 4072 val = new_per; 4073 } else 4074 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4075 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4076 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4077 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4078 } else { 4079 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4080 ca_red = 0; 4081 logvar = new_per; 4082 logvar <<= 32; 4083 logvar |= alt; 4084 logvar2 = (uint32_t)rtt; 4085 logvar2 <<= 32; 4086 logvar2 |= (uint32_t)rtt_diff; 4087 logvar3 = rack_gp_rtt_maxmul; 4088 logvar3 <<= 32; 4089 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4090 rack_log_timely(rack, timely_says, 4091 logvar2, logvar3, 4092 logvar, __LINE__, 10); 4093 } 4094 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4095 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4096 logged |= 2; 4097 } 4098 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4099 rack->rc_gp_timely_dec_cnt++; 4100 if (rack_timely_dec_clear && 4101 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4102 rack->rc_gp_timely_dec_cnt = 0; 4103 } 4104 logvar = ss_red; 4105 logvar <<= 32; 4106 logvar |= ca_red; 4107 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4108 __LINE__, 2); 4109 } 4110 4111 static void 4112 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4113 uint32_t rtt, uint32_t line, uint8_t reas) 4114 { 4115 if (tcp_bblogging_on(rack->rc_tp)) { 4116 union tcp_log_stackspecific log; 4117 struct timeval tv; 4118 4119 memset(&log, 0, sizeof(log)); 4120 log.u_bbr.flex1 = line; 4121 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4122 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4123 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4124 log.u_bbr.flex5 = rtt; 4125 log.u_bbr.flex6 = rack->rc_highly_buffered; 4126 log.u_bbr.flex6 <<= 1; 4127 log.u_bbr.flex6 |= rack->forced_ack; 4128 log.u_bbr.flex6 <<= 1; 4129 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4130 log.u_bbr.flex6 <<= 1; 4131 log.u_bbr.flex6 |= rack->in_probe_rtt; 4132 log.u_bbr.flex6 <<= 1; 4133 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4134 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4135 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4136 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4137 log.u_bbr.flex8 = reas; 4138 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4139 log.u_bbr.delRate = rack_get_bw(rack); 4140 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4141 log.u_bbr.cur_del_rate <<= 32; 4142 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4143 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4144 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4145 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4146 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4147 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4148 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4149 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4150 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4151 log.u_bbr.rttProp = us_cts; 4152 log.u_bbr.rttProp <<= 32; 4153 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4154 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4155 &rack->rc_inp->inp_socket->so_rcv, 4156 &rack->rc_inp->inp_socket->so_snd, 4157 BBR_LOG_RTT_SHRINKS, 0, 4158 0, &log, false, &rack->r_ctl.act_rcv_time); 4159 } 4160 } 4161 4162 static void 4163 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4164 { 4165 uint64_t bwdp; 4166 4167 bwdp = rack_get_bw(rack); 4168 bwdp *= (uint64_t)rtt; 4169 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4170 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4171 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4172 /* 4173 * A window protocol must be able to have 4 packets 4174 * outstanding as the floor in order to function 4175 * (especially considering delayed ack :D). 4176 */ 4177 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4178 } 4179 } 4180 4181 static void 4182 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4183 { 4184 /** 4185 * ProbeRTT is a bit different in rack_pacing than in 4186 * BBR. It is like BBR in that it uses the lowering of 4187 * the RTT as a signal that we saw something new and 4188 * counts from there for how long between. But it is 4189 * different in that its quite simple. It does not 4190 * play with the cwnd and wait until we get down 4191 * to N segments outstanding and hold that for 4192 * 200ms. Instead it just sets the pacing reduction 4193 * rate to a set percentage (70 by default) and hold 4194 * that for a number of recent GP Srtt's. 4195 */ 4196 uint32_t segsiz; 4197 4198 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4199 if (rack->rc_gp_dyn_mul == 0) 4200 return; 4201 4202 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4203 /* We are idle */ 4204 return; 4205 } 4206 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4207 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4208 /* 4209 * Stop the goodput now, the idea here is 4210 * that future measurements with in_probe_rtt 4211 * won't register if they are not greater so 4212 * we want to get what info (if any) is available 4213 * now. 4214 */ 4215 rack_do_goodput_measurement(rack->rc_tp, rack, 4216 rack->rc_tp->snd_una, __LINE__, 4217 RACK_QUALITY_PROBERTT); 4218 } 4219 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4220 rack->r_ctl.rc_time_probertt_entered = us_cts; 4221 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4222 rack->r_ctl.rc_pace_min_segs); 4223 rack->in_probe_rtt = 1; 4224 rack->measure_saw_probe_rtt = 1; 4225 rack->r_ctl.rc_time_probertt_starts = 0; 4226 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4227 if (rack_probertt_use_min_rtt_entry) 4228 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4229 else 4230 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4231 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4232 __LINE__, RACK_RTTS_ENTERPROBE); 4233 } 4234 4235 static void 4236 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4237 { 4238 struct rack_sendmap *rsm; 4239 uint32_t segsiz; 4240 4241 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4242 rack->r_ctl.rc_pace_min_segs); 4243 rack->in_probe_rtt = 0; 4244 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4245 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4246 /* 4247 * Stop the goodput now, the idea here is 4248 * that future measurements with in_probe_rtt 4249 * won't register if they are not greater so 4250 * we want to get what info (if any) is available 4251 * now. 4252 */ 4253 rack_do_goodput_measurement(rack->rc_tp, rack, 4254 rack->rc_tp->snd_una, __LINE__, 4255 RACK_QUALITY_PROBERTT); 4256 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4257 /* 4258 * We don't have enough data to make a measurement. 4259 * So lets just stop and start here after exiting 4260 * probe-rtt. We probably are not interested in 4261 * the results anyway. 4262 */ 4263 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4264 } 4265 /* 4266 * Measurements through the current snd_max are going 4267 * to be limited by the slower pacing rate. 4268 * 4269 * We need to mark these as app-limited so we 4270 * don't collapse the b/w. 4271 */ 4272 rsm = tqhash_max(rack->r_ctl.tqh); 4273 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4274 if (rack->r_ctl.rc_app_limited_cnt == 0) 4275 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4276 else { 4277 /* 4278 * Go out to the end app limited and mark 4279 * this new one as next and move the end_appl up 4280 * to this guy. 4281 */ 4282 if (rack->r_ctl.rc_end_appl) 4283 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4284 rack->r_ctl.rc_end_appl = rsm; 4285 } 4286 rsm->r_flags |= RACK_APP_LIMITED; 4287 rack->r_ctl.rc_app_limited_cnt++; 4288 } 4289 /* 4290 * Now, we need to examine our pacing rate multipliers. 4291 * If its under 100%, we need to kick it back up to 4292 * 100%. We also don't let it be over our "max" above 4293 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4294 * Note setting clamp_atexit_prtt to 0 has the effect 4295 * of setting CA/SS to 100% always at exit (which is 4296 * the default behavior). 4297 */ 4298 if (rack_probertt_clear_is) { 4299 rack->rc_gp_incr = 0; 4300 rack->rc_gp_bwred = 0; 4301 rack->rc_gp_timely_inc_cnt = 0; 4302 rack->rc_gp_timely_dec_cnt = 0; 4303 } 4304 /* Do we do any clamping at exit? */ 4305 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4306 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4307 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4308 } 4309 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4310 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4311 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4312 } 4313 /* 4314 * Lets set rtt_diff to 0, so that we will get a "boost" 4315 * after exiting. 4316 */ 4317 rack->r_ctl.rc_rtt_diff = 0; 4318 4319 /* Clear all flags so we start fresh */ 4320 rack->rc_tp->t_bytes_acked = 0; 4321 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4322 /* 4323 * If configured to, set the cwnd and ssthresh to 4324 * our targets. 4325 */ 4326 if (rack_probe_rtt_sets_cwnd) { 4327 uint64_t ebdp; 4328 uint32_t setto; 4329 4330 /* Set ssthresh so we get into CA once we hit our target */ 4331 if (rack_probertt_use_min_rtt_exit == 1) { 4332 /* Set to min rtt */ 4333 rack_set_prtt_target(rack, segsiz, 4334 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4335 } else if (rack_probertt_use_min_rtt_exit == 2) { 4336 /* Set to current gp rtt */ 4337 rack_set_prtt_target(rack, segsiz, 4338 rack->r_ctl.rc_gp_srtt); 4339 } else if (rack_probertt_use_min_rtt_exit == 3) { 4340 /* Set to entry gp rtt */ 4341 rack_set_prtt_target(rack, segsiz, 4342 rack->r_ctl.rc_entry_gp_rtt); 4343 } else { 4344 uint64_t sum; 4345 uint32_t setval; 4346 4347 sum = rack->r_ctl.rc_entry_gp_rtt; 4348 sum *= 10; 4349 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4350 if (sum >= 20) { 4351 /* 4352 * A highly buffered path needs 4353 * cwnd space for timely to work. 4354 * Lets set things up as if 4355 * we are heading back here again. 4356 */ 4357 setval = rack->r_ctl.rc_entry_gp_rtt; 4358 } else if (sum >= 15) { 4359 /* 4360 * Lets take the smaller of the 4361 * two since we are just somewhat 4362 * buffered. 4363 */ 4364 setval = rack->r_ctl.rc_gp_srtt; 4365 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4366 setval = rack->r_ctl.rc_entry_gp_rtt; 4367 } else { 4368 /* 4369 * Here we are not highly buffered 4370 * and should pick the min we can to 4371 * keep from causing loss. 4372 */ 4373 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4374 } 4375 rack_set_prtt_target(rack, segsiz, 4376 setval); 4377 } 4378 if (rack_probe_rtt_sets_cwnd > 1) { 4379 /* There is a percentage here to boost */ 4380 ebdp = rack->r_ctl.rc_target_probertt_flight; 4381 ebdp *= rack_probe_rtt_sets_cwnd; 4382 ebdp /= 100; 4383 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4384 } else 4385 setto = rack->r_ctl.rc_target_probertt_flight; 4386 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4387 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4388 /* Enforce a min */ 4389 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4390 } 4391 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4392 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4393 } 4394 rack_log_rtt_shrinks(rack, us_cts, 4395 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4396 __LINE__, RACK_RTTS_EXITPROBE); 4397 /* Clear times last so log has all the info */ 4398 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4399 rack->r_ctl.rc_time_probertt_entered = us_cts; 4400 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4401 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4402 } 4403 4404 static void 4405 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4406 { 4407 /* Check in on probe-rtt */ 4408 4409 if (rack->rc_gp_filled == 0) { 4410 /* We do not do p-rtt unless we have gp measurements */ 4411 return; 4412 } 4413 if (rack->in_probe_rtt) { 4414 uint64_t no_overflow; 4415 uint32_t endtime, must_stay; 4416 4417 if (rack->r_ctl.rc_went_idle_time && 4418 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4419 /* 4420 * We went idle during prtt, just exit now. 4421 */ 4422 rack_exit_probertt(rack, us_cts); 4423 } else if (rack_probe_rtt_safety_val && 4424 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4425 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4426 /* 4427 * Probe RTT safety value triggered! 4428 */ 4429 rack_log_rtt_shrinks(rack, us_cts, 4430 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4431 __LINE__, RACK_RTTS_SAFETY); 4432 rack_exit_probertt(rack, us_cts); 4433 } 4434 /* Calculate the max we will wait */ 4435 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4436 if (rack->rc_highly_buffered) 4437 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4438 /* Calculate the min we must wait */ 4439 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4440 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4441 TSTMP_LT(us_cts, endtime)) { 4442 uint32_t calc; 4443 /* Do we lower more? */ 4444 no_exit: 4445 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4446 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4447 else 4448 calc = 0; 4449 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4450 if (calc) { 4451 /* Maybe */ 4452 calc *= rack_per_of_gp_probertt_reduce; 4453 if (calc > rack_per_of_gp_probertt) 4454 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4455 else 4456 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4457 /* Limit it too */ 4458 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4459 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4460 } 4461 /* We must reach target or the time set */ 4462 return; 4463 } 4464 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4465 if ((TSTMP_LT(us_cts, must_stay) && 4466 rack->rc_highly_buffered) || 4467 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4468 rack->r_ctl.rc_target_probertt_flight)) { 4469 /* We are not past the must_stay time */ 4470 goto no_exit; 4471 } 4472 rack_log_rtt_shrinks(rack, us_cts, 4473 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4474 __LINE__, RACK_RTTS_REACHTARGET); 4475 rack->r_ctl.rc_time_probertt_starts = us_cts; 4476 if (rack->r_ctl.rc_time_probertt_starts == 0) 4477 rack->r_ctl.rc_time_probertt_starts = 1; 4478 /* Restore back to our rate we want to pace at in prtt */ 4479 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4480 } 4481 /* 4482 * Setup our end time, some number of gp_srtts plus 200ms. 4483 */ 4484 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4485 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4486 if (rack_probertt_gpsrtt_cnt_div) 4487 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4488 else 4489 endtime = 0; 4490 endtime += rack_min_probertt_hold; 4491 endtime += rack->r_ctl.rc_time_probertt_starts; 4492 if (TSTMP_GEQ(us_cts, endtime)) { 4493 /* yes, exit probertt */ 4494 rack_exit_probertt(rack, us_cts); 4495 } 4496 4497 } else if ((rack->rc_skip_timely == 0) && 4498 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && 4499 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { 4500 /* Go into probertt, its been too long since we went lower */ 4501 rack_enter_probertt(rack, us_cts); 4502 } 4503 } 4504 4505 static void 4506 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4507 uint32_t rtt, int32_t rtt_diff) 4508 { 4509 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4510 uint32_t losses; 4511 4512 if ((rack->rc_gp_dyn_mul == 0) || 4513 (rack->use_fixed_rate) || 4514 (rack->in_probe_rtt) || 4515 (rack->rc_always_pace == 0)) { 4516 /* No dynamic GP multiplier in play */ 4517 return; 4518 } 4519 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4520 cur_bw = rack_get_bw(rack); 4521 /* Calculate our up and down range */ 4522 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4523 up_bnd /= 100; 4524 up_bnd += rack->r_ctl.last_gp_comp_bw; 4525 4526 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4527 subfr /= 100; 4528 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4529 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4530 /* 4531 * This is the case where our RTT is above 4532 * the max target and we have been configured 4533 * to just do timely no bonus up stuff in that case. 4534 * 4535 * There are two configurations, set to 1, and we 4536 * just do timely if we are over our max. If its 4537 * set above 1 then we slam the multipliers down 4538 * to 100 and then decrement per timely. 4539 */ 4540 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4541 __LINE__, 3); 4542 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4543 rack_validate_multipliers_at_or_below_100(rack); 4544 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4545 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4546 /* 4547 * We are decreasing this is a bit complicated this 4548 * means we are loosing ground. This could be 4549 * because another flow entered and we are competing 4550 * for b/w with it. This will push the RTT up which 4551 * makes timely unusable unless we want to get shoved 4552 * into a corner and just be backed off (the age 4553 * old problem with delay based CC). 4554 * 4555 * On the other hand if it was a route change we 4556 * would like to stay somewhat contained and not 4557 * blow out the buffers. 4558 */ 4559 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4560 __LINE__, 3); 4561 rack->r_ctl.last_gp_comp_bw = cur_bw; 4562 if (rack->rc_gp_bwred == 0) { 4563 /* Go into reduction counting */ 4564 rack->rc_gp_bwred = 1; 4565 rack->rc_gp_timely_dec_cnt = 0; 4566 } 4567 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4568 /* 4569 * Push another time with a faster pacing 4570 * to try to gain back (we include override to 4571 * get a full raise factor). 4572 */ 4573 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4574 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4575 (timely_says == 0) || 4576 (rack_down_raise_thresh == 0)) { 4577 /* 4578 * Do an override up in b/w if we were 4579 * below the threshold or if the threshold 4580 * is zero we always do the raise. 4581 */ 4582 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4583 } else { 4584 /* Log it stays the same */ 4585 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4586 __LINE__, 11); 4587 } 4588 rack->rc_gp_timely_dec_cnt++; 4589 /* We are not incrementing really no-count */ 4590 rack->rc_gp_incr = 0; 4591 rack->rc_gp_timely_inc_cnt = 0; 4592 } else { 4593 /* 4594 * Lets just use the RTT 4595 * information and give up 4596 * pushing. 4597 */ 4598 goto use_timely; 4599 } 4600 } else if ((timely_says != 2) && 4601 !losses && 4602 (last_bw_est > up_bnd)) { 4603 /* 4604 * We are increasing b/w lets keep going, updating 4605 * our b/w and ignoring any timely input, unless 4606 * of course we are at our max raise (if there is one). 4607 */ 4608 4609 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4610 __LINE__, 3); 4611 rack->r_ctl.last_gp_comp_bw = cur_bw; 4612 if (rack->rc_gp_saw_ss && 4613 rack->r_ctl.rack_per_upper_bound_ss && 4614 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4615 /* 4616 * In cases where we can't go higher 4617 * we should just use timely. 4618 */ 4619 goto use_timely; 4620 } 4621 if (rack->rc_gp_saw_ca && 4622 rack->r_ctl.rack_per_upper_bound_ca && 4623 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4624 /* 4625 * In cases where we can't go higher 4626 * we should just use timely. 4627 */ 4628 goto use_timely; 4629 } 4630 rack->rc_gp_bwred = 0; 4631 rack->rc_gp_timely_dec_cnt = 0; 4632 /* You get a set number of pushes if timely is trying to reduce */ 4633 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4634 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4635 } else { 4636 /* Log it stays the same */ 4637 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4638 __LINE__, 12); 4639 } 4640 return; 4641 } else { 4642 /* 4643 * We are staying between the lower and upper range bounds 4644 * so use timely to decide. 4645 */ 4646 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4647 __LINE__, 3); 4648 use_timely: 4649 if (timely_says) { 4650 rack->rc_gp_incr = 0; 4651 rack->rc_gp_timely_inc_cnt = 0; 4652 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4653 !losses && 4654 (last_bw_est < low_bnd)) { 4655 /* We are loosing ground */ 4656 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4657 rack->rc_gp_timely_dec_cnt++; 4658 /* We are not incrementing really no-count */ 4659 rack->rc_gp_incr = 0; 4660 rack->rc_gp_timely_inc_cnt = 0; 4661 } else 4662 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4663 } else { 4664 rack->rc_gp_bwred = 0; 4665 rack->rc_gp_timely_dec_cnt = 0; 4666 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4667 } 4668 } 4669 } 4670 4671 static int32_t 4672 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4673 { 4674 int32_t timely_says; 4675 uint64_t log_mult, log_rtt_a_diff; 4676 4677 log_rtt_a_diff = rtt; 4678 log_rtt_a_diff <<= 32; 4679 log_rtt_a_diff |= (uint32_t)rtt_diff; 4680 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4681 rack_gp_rtt_maxmul)) { 4682 /* Reduce the b/w multiplier */ 4683 timely_says = 2; 4684 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4685 log_mult <<= 32; 4686 log_mult |= prev_rtt; 4687 rack_log_timely(rack, timely_says, log_mult, 4688 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4689 log_rtt_a_diff, __LINE__, 4); 4690 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4691 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4692 max(rack_gp_rtt_mindiv , 1)))) { 4693 /* Increase the b/w multiplier */ 4694 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4695 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4696 max(rack_gp_rtt_mindiv , 1)); 4697 log_mult <<= 32; 4698 log_mult |= prev_rtt; 4699 timely_says = 0; 4700 rack_log_timely(rack, timely_says, log_mult , 4701 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4702 log_rtt_a_diff, __LINE__, 5); 4703 } else { 4704 /* 4705 * Use a gradient to find it the timely gradient 4706 * is: 4707 * grad = rc_rtt_diff / min_rtt; 4708 * 4709 * anything below or equal to 0 will be 4710 * a increase indication. Anything above 4711 * zero is a decrease. Note we take care 4712 * of the actual gradient calculation 4713 * in the reduction (its not needed for 4714 * increase). 4715 */ 4716 log_mult = prev_rtt; 4717 if (rtt_diff <= 0) { 4718 /* 4719 * Rttdiff is less than zero, increase the 4720 * b/w multiplier (its 0 or negative) 4721 */ 4722 timely_says = 0; 4723 rack_log_timely(rack, timely_says, log_mult, 4724 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4725 } else { 4726 /* Reduce the b/w multiplier */ 4727 timely_says = 1; 4728 rack_log_timely(rack, timely_says, log_mult, 4729 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4730 } 4731 } 4732 return (timely_says); 4733 } 4734 4735 static __inline int 4736 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4737 { 4738 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4739 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4740 /** 4741 * This covers the case that the 4742 * resent is completely inside 4743 * the gp range or up to it. 4744 * |----------------| 4745 * |-----| <or> 4746 * |----| 4747 * <or> |---| 4748 */ 4749 return (1); 4750 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4751 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4752 /** 4753 * This covers the case of 4754 * |--------------| 4755 * |-------->| 4756 */ 4757 return (1); 4758 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4759 SEQ_LT(rsm->r_start, tp->gput_ack) && 4760 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4761 4762 /** 4763 * This covers the case of 4764 * |--------------| 4765 * |-------->| 4766 */ 4767 return (1); 4768 } 4769 return (0); 4770 } 4771 4772 static __inline void 4773 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4774 { 4775 4776 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4777 return; 4778 /* 4779 * We have a Goodput measurement in progress. Mark 4780 * the send if its within the window. If its not 4781 * in the window make sure it does not have the mark. 4782 */ 4783 if (rack_in_gp_window(tp, rsm)) 4784 rsm->r_flags |= RACK_IN_GP_WIN; 4785 else 4786 rsm->r_flags &= ~RACK_IN_GP_WIN; 4787 } 4788 4789 static __inline void 4790 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4791 { 4792 /* A GP measurement is ending, clear all marks on the send map*/ 4793 struct rack_sendmap *rsm = NULL; 4794 4795 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4796 if (rsm == NULL) { 4797 rsm = tqhash_min(rack->r_ctl.tqh); 4798 } 4799 /* Nothing left? */ 4800 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4801 rsm->r_flags &= ~RACK_IN_GP_WIN; 4802 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4803 } 4804 } 4805 4806 4807 static __inline void 4808 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4809 { 4810 struct rack_sendmap *rsm = NULL; 4811 4812 if (tp->snd_una == tp->snd_max) { 4813 /* Nothing outstanding yet, nothing to do here */ 4814 return; 4815 } 4816 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4817 /* 4818 * We are measuring ahead of some outstanding 4819 * data. We need to walk through up until we get 4820 * to gp_seq marking so that no rsm is set incorrectly 4821 * with RACK_IN_GP_WIN. 4822 */ 4823 rsm = tqhash_min(rack->r_ctl.tqh); 4824 while (rsm != NULL) { 4825 rack_mark_in_gp_win(tp, rsm); 4826 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4827 break; 4828 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4829 } 4830 } 4831 if (rsm == NULL) { 4832 /* 4833 * Need to find the GP seq, if rsm is 4834 * set we stopped as we hit it. 4835 */ 4836 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4837 if (rsm == NULL) 4838 return; 4839 rack_mark_in_gp_win(tp, rsm); 4840 } 4841 /* 4842 * Now we may need to mark already sent rsm, ahead of 4843 * gput_seq in the window since they may have been sent 4844 * *before* we started our measurment. The rsm, if non-null 4845 * has been marked (note if rsm would have been NULL we would have 4846 * returned in the previous block). So we go to the next, and continue 4847 * until we run out of entries or we exceed the gp_ack value. 4848 */ 4849 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4850 while (rsm) { 4851 rack_mark_in_gp_win(tp, rsm); 4852 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4853 break; 4854 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4855 } 4856 } 4857 4858 static void 4859 rack_log_gp_calc(struct tcp_rack *rack, uint32_t add_part, uint32_t sub_part, uint32_t srtt, uint64_t meas_bw, uint64_t utim, uint8_t meth, uint32_t line) 4860 { 4861 if (tcp_bblogging_on(rack->rc_tp)) { 4862 union tcp_log_stackspecific log; 4863 struct timeval tv; 4864 4865 memset(&log, 0, sizeof(log)); 4866 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4867 log.u_bbr.flex1 = add_part; 4868 log.u_bbr.flex2 = sub_part; 4869 log.u_bbr.flex3 = rack_wma_divisor; 4870 log.u_bbr.flex4 = srtt; 4871 log.u_bbr.flex7 = (uint16_t)line; 4872 log.u_bbr.flex8 = meth; 4873 log.u_bbr.delRate = rack->r_ctl.gp_bw; 4874 log.u_bbr.cur_del_rate = meas_bw; 4875 log.u_bbr.rttProp = utim; 4876 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4877 &rack->rc_inp->inp_socket->so_rcv, 4878 &rack->rc_inp->inp_socket->so_snd, 4879 BBR_LOG_THRESH_CALC, 0, 4880 0, &log, false, &rack->r_ctl.act_rcv_time); 4881 } 4882 } 4883 4884 static void 4885 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4886 tcp_seq th_ack, int line, uint8_t quality) 4887 { 4888 uint64_t tim, bytes_ps, stim, utim; 4889 uint32_t segsiz, bytes, reqbytes, us_cts; 4890 int32_t gput, new_rtt_diff, timely_says; 4891 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4892 int did_add = 0; 4893 4894 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4895 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4896 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4897 tim = us_cts - tp->gput_ts; 4898 else 4899 tim = 0; 4900 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4901 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4902 else 4903 stim = 0; 4904 /* 4905 * Use the larger of the send time or ack time. This prevents us 4906 * from being influenced by ack artifacts to come up with too 4907 * high of measurement. Note that since we are spanning over many more 4908 * bytes in most of our measurements hopefully that is less likely to 4909 * occur. 4910 */ 4911 if (tim > stim) 4912 utim = max(tim, 1); 4913 else 4914 utim = max(stim, 1); 4915 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4916 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 4917 if ((tim == 0) && (stim == 0)) { 4918 /* 4919 * Invalid measurement time, maybe 4920 * all on one ack/one send? 4921 */ 4922 bytes = 0; 4923 bytes_ps = 0; 4924 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4925 0, 0, 0, 10, __LINE__, NULL, quality); 4926 goto skip_measurement; 4927 } 4928 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4929 /* We never made a us_rtt measurement? */ 4930 bytes = 0; 4931 bytes_ps = 0; 4932 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4933 0, 0, 0, 10, __LINE__, NULL, quality); 4934 goto skip_measurement; 4935 } 4936 /* 4937 * Calculate the maximum possible b/w this connection 4938 * could have. We base our calculation on the lowest 4939 * rtt we have seen during the measurement and the 4940 * largest rwnd the client has given us in that time. This 4941 * forms a BDP that is the maximum that we could ever 4942 * get to the client. Anything larger is not valid. 4943 * 4944 * I originally had code here that rejected measurements 4945 * where the time was less than 1/2 the latest us_rtt. 4946 * But after thinking on that I realized its wrong since 4947 * say you had a 150Mbps or even 1Gbps link, and you 4948 * were a long way away.. example I am in Europe (100ms rtt) 4949 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4950 * bytes my time would be 1.2ms, and yet my rtt would say 4951 * the measurement was invalid the time was < 50ms. The 4952 * same thing is true for 150Mb (8ms of time). 4953 * 4954 * A better way I realized is to look at what the maximum 4955 * the connection could possibly do. This is gated on 4956 * the lowest RTT we have seen and the highest rwnd. 4957 * We should in theory never exceed that, if we are 4958 * then something on the path is storing up packets 4959 * and then feeding them all at once to our endpoint 4960 * messing up our measurement. 4961 */ 4962 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4963 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4964 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4965 if (SEQ_LT(th_ack, tp->gput_seq)) { 4966 /* No measurement can be made */ 4967 bytes = 0; 4968 bytes_ps = 0; 4969 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4970 0, 0, 0, 10, __LINE__, NULL, quality); 4971 goto skip_measurement; 4972 } else 4973 bytes = (th_ack - tp->gput_seq); 4974 bytes_ps = (uint64_t)bytes; 4975 /* 4976 * Don't measure a b/w for pacing unless we have gotten at least 4977 * an initial windows worth of data in this measurement interval. 4978 * 4979 * Small numbers of bytes get badly influenced by delayed ack and 4980 * other artifacts. Note we take the initial window or our 4981 * defined minimum GP (defaulting to 10 which hopefully is the 4982 * IW). 4983 */ 4984 if (rack->rc_gp_filled == 0) { 4985 /* 4986 * The initial estimate is special. We 4987 * have blasted out an IW worth of packets 4988 * without a real valid ack ts results. We 4989 * then setup the app_limited_needs_set flag, 4990 * this should get the first ack in (probably 2 4991 * MSS worth) to be recorded as the timestamp. 4992 * We thus allow a smaller number of bytes i.e. 4993 * IW - 2MSS. 4994 */ 4995 reqbytes -= (2 * segsiz); 4996 /* Also lets fill previous for our first measurement to be neutral */ 4997 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4998 } 4999 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 5000 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5001 rack->r_ctl.rc_app_limited_cnt, 5002 0, 0, 10, __LINE__, NULL, quality); 5003 goto skip_measurement; 5004 } 5005 /* 5006 * We now need to calculate the Timely like status so 5007 * we can update (possibly) the b/w multipliers. 5008 */ 5009 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 5010 if (rack->rc_gp_filled == 0) { 5011 /* No previous reading */ 5012 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 5013 } else { 5014 if (rack->measure_saw_probe_rtt == 0) { 5015 /* 5016 * We don't want a probertt to be counted 5017 * since it will be negative incorrectly. We 5018 * expect to be reducing the RTT when we 5019 * pace at a slower rate. 5020 */ 5021 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 5022 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 5023 } 5024 } 5025 timely_says = rack_make_timely_judgement(rack, 5026 rack->r_ctl.rc_gp_srtt, 5027 rack->r_ctl.rc_rtt_diff, 5028 rack->r_ctl.rc_prev_gp_srtt 5029 ); 5030 bytes_ps *= HPTS_USEC_IN_SEC; 5031 bytes_ps /= utim; 5032 if (bytes_ps > rack->r_ctl.last_max_bw) { 5033 /* 5034 * Something is on path playing 5035 * since this b/w is not possible based 5036 * on our BDP (highest rwnd and lowest rtt 5037 * we saw in the measurement window). 5038 * 5039 * Another option here would be to 5040 * instead skip the measurement. 5041 */ 5042 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 5043 bytes_ps, rack->r_ctl.last_max_bw, 0, 5044 11, __LINE__, NULL, quality); 5045 bytes_ps = rack->r_ctl.last_max_bw; 5046 } 5047 /* We store gp for b/w in bytes per second */ 5048 if (rack->rc_gp_filled == 0) { 5049 /* Initial measurement */ 5050 if (bytes_ps) { 5051 rack->r_ctl.gp_bw = bytes_ps; 5052 rack->rc_gp_filled = 1; 5053 rack->r_ctl.num_measurements = 1; 5054 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5055 } else { 5056 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5057 rack->r_ctl.rc_app_limited_cnt, 5058 0, 0, 10, __LINE__, NULL, quality); 5059 } 5060 if (tcp_in_hpts(rack->rc_tp) && 5061 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5062 /* 5063 * Ok we can't trust the pacer in this case 5064 * where we transition from un-paced to paced. 5065 * Or for that matter when the burst mitigation 5066 * was making a wild guess and got it wrong. 5067 * Stop the pacer and clear up all the aggregate 5068 * delays etc. 5069 */ 5070 tcp_hpts_remove(rack->rc_tp); 5071 rack->r_ctl.rc_hpts_flags = 0; 5072 rack->r_ctl.rc_last_output_to = 0; 5073 } 5074 did_add = 2; 5075 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5076 /* Still a small number run an average */ 5077 rack->r_ctl.gp_bw += bytes_ps; 5078 addpart = rack->r_ctl.num_measurements; 5079 rack->r_ctl.num_measurements++; 5080 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5081 /* We have collected enough to move forward */ 5082 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5083 } 5084 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5085 did_add = 3; 5086 } else { 5087 /* 5088 * We want to take 1/wma of the goodput and add in to 7/8th 5089 * of the old value weighted by the srtt. So if your measurement 5090 * period is say 2 SRTT's long you would get 1/4 as the 5091 * value, if it was like 1/2 SRTT then you would get 1/16th. 5092 * 5093 * But we must be careful not to take too much i.e. if the 5094 * srtt is say 20ms and the measurement is taken over 5095 * 400ms our weight would be 400/20 i.e. 20. On the 5096 * other hand if we get a measurement over 1ms with a 5097 * 10ms rtt we only want to take a much smaller portion. 5098 */ 5099 uint8_t meth; 5100 5101 if (rack->r_ctl.num_measurements < 0xff) { 5102 rack->r_ctl.num_measurements++; 5103 } 5104 srtt = (uint64_t)tp->t_srtt; 5105 if (srtt == 0) { 5106 /* 5107 * Strange why did t_srtt go back to zero? 5108 */ 5109 if (rack->r_ctl.rc_rack_min_rtt) 5110 srtt = rack->r_ctl.rc_rack_min_rtt; 5111 else 5112 srtt = HPTS_USEC_IN_MSEC; 5113 } 5114 /* 5115 * XXXrrs: Note for reviewers, in playing with 5116 * dynamic pacing I discovered this GP calculation 5117 * as done originally leads to some undesired results. 5118 * Basically you can get longer measurements contributing 5119 * too much to the WMA. Thus I changed it if you are doing 5120 * dynamic adjustments to only do the aportioned adjustment 5121 * if we have a very small (time wise) measurement. Longer 5122 * measurements just get there weight (defaulting to 1/8) 5123 * add to the WMA. We may want to think about changing 5124 * this to always do that for both sides i.e. dynamic 5125 * and non-dynamic... but considering lots of folks 5126 * were playing with this I did not want to change the 5127 * calculation per.se. without your thoughts.. Lawerence? 5128 * Peter?? 5129 */ 5130 if (rack->rc_gp_dyn_mul == 0) { 5131 subpart = rack->r_ctl.gp_bw * utim; 5132 subpart /= (srtt * 8); 5133 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5134 /* 5135 * The b/w update takes no more 5136 * away then 1/2 our running total 5137 * so factor it in. 5138 */ 5139 addpart = bytes_ps * utim; 5140 addpart /= (srtt * 8); 5141 meth = 1; 5142 } else { 5143 /* 5144 * Don't allow a single measurement 5145 * to account for more than 1/2 of the 5146 * WMA. This could happen on a retransmission 5147 * where utim becomes huge compared to 5148 * srtt (multiple retransmissions when using 5149 * the sending rate which factors in all the 5150 * transmissions from the first one). 5151 */ 5152 subpart = rack->r_ctl.gp_bw / 2; 5153 addpart = bytes_ps / 2; 5154 meth = 2; 5155 } 5156 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5157 resid_bw = rack->r_ctl.gp_bw - subpart; 5158 rack->r_ctl.gp_bw = resid_bw + addpart; 5159 did_add = 1; 5160 } else { 5161 if ((utim / srtt) <= 1) { 5162 /* 5163 * The b/w update was over a small period 5164 * of time. The idea here is to prevent a small 5165 * measurement time period from counting 5166 * too much. So we scale it based on the 5167 * time so it attributes less than 1/rack_wma_divisor 5168 * of its measurement. 5169 */ 5170 subpart = rack->r_ctl.gp_bw * utim; 5171 subpart /= (srtt * rack_wma_divisor); 5172 addpart = bytes_ps * utim; 5173 addpart /= (srtt * rack_wma_divisor); 5174 meth = 3; 5175 } else { 5176 /* 5177 * The scaled measurement was long 5178 * enough so lets just add in the 5179 * portion of the measurement i.e. 1/rack_wma_divisor 5180 */ 5181 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5182 addpart = bytes_ps / rack_wma_divisor; 5183 meth = 4; 5184 } 5185 if ((rack->measure_saw_probe_rtt == 0) || 5186 (bytes_ps > rack->r_ctl.gp_bw)) { 5187 /* 5188 * For probe-rtt we only add it in 5189 * if its larger, all others we just 5190 * add in. 5191 */ 5192 did_add = 1; 5193 rack_log_gp_calc(rack, addpart, subpart, srtt, bytes_ps, utim, meth, __LINE__); 5194 resid_bw = rack->r_ctl.gp_bw - subpart; 5195 rack->r_ctl.gp_bw = resid_bw + addpart; 5196 } 5197 } 5198 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5199 } 5200 /* 5201 * We only watch the growth of the GP during the initial startup 5202 * or first-slowstart that ensues. If we ever needed to watch 5203 * growth of gp outside of that period all we need to do is 5204 * remove the first clause of this if (rc_initial_ss_comp). 5205 */ 5206 if ((rack->rc_initial_ss_comp == 0) && 5207 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { 5208 uint64_t gp_est; 5209 5210 gp_est = bytes_ps; 5211 if (tcp_bblogging_on(rack->rc_tp)) { 5212 union tcp_log_stackspecific log; 5213 struct timeval tv; 5214 5215 memset(&log, 0, sizeof(log)); 5216 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5217 log.u_bbr.flex1 = rack->r_ctl.current_round; 5218 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 5219 log.u_bbr.delRate = gp_est; 5220 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5221 log.u_bbr.flex8 = 41; 5222 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5223 0, &log, false, NULL, __func__, __LINE__,&tv); 5224 } 5225 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || 5226 (rack->r_ctl.last_gpest == 0)) { 5227 /* 5228 * The round we get our measurement averaging going 5229 * is the base round so it always is the source point 5230 * for when we had our first increment. From there on 5231 * we only record the round that had a rise. 5232 */ 5233 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5234 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5235 } else if (gp_est >= rack->r_ctl.last_gpest) { 5236 /* 5237 * Test to see if its gone up enough 5238 * to set the round count up to now. Note 5239 * that on the seeding of the 4th measurement we 5240 */ 5241 gp_est *= 1000; 5242 gp_est /= rack->r_ctl.last_gpest; 5243 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { 5244 /* 5245 * We went up enough to record the round. 5246 */ 5247 if (tcp_bblogging_on(rack->rc_tp)) { 5248 union tcp_log_stackspecific log; 5249 struct timeval tv; 5250 5251 memset(&log, 0, sizeof(log)); 5252 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5253 log.u_bbr.flex1 = rack->r_ctl.current_round; 5254 log.u_bbr.flex2 = (uint32_t)gp_est; 5255 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; 5256 log.u_bbr.delRate = gp_est; 5257 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; 5258 log.u_bbr.flex8 = 42; 5259 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5260 0, &log, false, NULL, __func__, __LINE__,&tv); 5261 } 5262 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; 5263 if (rack->r_ctl.use_gp_not_last == 1) 5264 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; 5265 else 5266 rack->r_ctl.last_gpest = bytes_ps; 5267 } 5268 } 5269 } 5270 if ((rack->gp_ready == 0) && 5271 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5272 /* We have enough measurements now */ 5273 rack->gp_ready = 1; 5274 if (rack->dgp_on || 5275 rack->rack_hibeta) 5276 rack_set_cc_pacing(rack); 5277 if (rack->defer_options) 5278 rack_apply_deferred_options(rack); 5279 } 5280 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5281 rack_get_bw(rack), 22, did_add, NULL, quality); 5282 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5283 5284 if ((rack->measure_saw_probe_rtt == 0) && 5285 rack->rc_gp_rtt_set) { 5286 if (rack->rc_skip_timely == 0) { 5287 rack_update_multiplier(rack, timely_says, bytes_ps, 5288 rack->r_ctl.rc_gp_srtt, 5289 rack->r_ctl.rc_rtt_diff); 5290 } 5291 } 5292 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5293 rack_get_bw(rack), 3, line, NULL, quality); 5294 rack_log_pacing_delay_calc(rack, 5295 bytes, /* flex2 */ 5296 tim, /* flex1 */ 5297 bytes_ps, /* bw_inuse */ 5298 rack->r_ctl.gp_bw, /* delRate */ 5299 rack_get_lt_bw(rack), /* rttProp */ 5300 20, line, NULL, 0); 5301 /* reset the gp srtt and setup the new prev */ 5302 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5303 /* Record the lost count for the next measurement */ 5304 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5305 skip_measurement: 5306 /* 5307 * We restart our diffs based on the gpsrtt in the 5308 * measurement window. 5309 */ 5310 rack->rc_gp_rtt_set = 0; 5311 rack->rc_gp_saw_rec = 0; 5312 rack->rc_gp_saw_ca = 0; 5313 rack->rc_gp_saw_ss = 0; 5314 rack->rc_dragged_bottom = 0; 5315 if (quality == RACK_QUALITY_HIGH) { 5316 /* 5317 * Gput in the stats world is in kbps where bytes_ps is 5318 * bytes per second so we do ((x * 8)/ 1000). 5319 */ 5320 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5321 #ifdef STATS 5322 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5323 gput); 5324 /* 5325 * XXXLAS: This is a temporary hack, and should be 5326 * chained off VOI_TCP_GPUT when stats(9) grows an 5327 * API to deal with chained VOIs. 5328 */ 5329 if (tp->t_stats_gput_prev > 0) 5330 stats_voi_update_abs_s32(tp->t_stats, 5331 VOI_TCP_GPUT_ND, 5332 ((gput - tp->t_stats_gput_prev) * 100) / 5333 tp->t_stats_gput_prev); 5334 #endif 5335 tp->t_stats_gput_prev = gput; 5336 } 5337 tp->t_flags &= ~TF_GPUTINPROG; 5338 /* 5339 * Now are we app limited now and there is space from where we 5340 * were to where we want to go? 5341 * 5342 * We don't do the other case i.e. non-applimited here since 5343 * the next send will trigger us picking up the missing data. 5344 */ 5345 if (rack->r_ctl.rc_first_appl && 5346 TCPS_HAVEESTABLISHED(tp->t_state) && 5347 rack->r_ctl.rc_app_limited_cnt && 5348 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5349 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5350 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5351 /* 5352 * Yep there is enough outstanding to make a measurement here. 5353 */ 5354 struct rack_sendmap *rsm; 5355 5356 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5357 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5358 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5359 rack->app_limited_needs_set = 0; 5360 tp->gput_seq = th_ack; 5361 if (rack->in_probe_rtt) 5362 rack->measure_saw_probe_rtt = 1; 5363 else if ((rack->measure_saw_probe_rtt) && 5364 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5365 rack->measure_saw_probe_rtt = 0; 5366 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5367 /* There is a full window to gain info from */ 5368 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5369 } else { 5370 /* We can only measure up to the applimited point */ 5371 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5372 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5373 /* 5374 * We don't have enough to make a measurement. 5375 */ 5376 tp->t_flags &= ~TF_GPUTINPROG; 5377 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5378 0, 0, 0, 6, __LINE__, NULL, quality); 5379 return; 5380 } 5381 } 5382 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5383 /* 5384 * We will get no more data into the SB 5385 * this means we need to have the data available 5386 * before we start a measurement. 5387 */ 5388 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5389 /* Nope not enough data. */ 5390 return; 5391 } 5392 } 5393 tp->t_flags |= TF_GPUTINPROG; 5394 /* 5395 * Now we need to find the timestamp of the send at tp->gput_seq 5396 * for the send based measurement. 5397 */ 5398 rack->r_ctl.rc_gp_cumack_ts = 0; 5399 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5400 if (rsm) { 5401 /* Ok send-based limit is set */ 5402 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5403 /* 5404 * Move back to include the earlier part 5405 * so our ack time lines up right (this may 5406 * make an overlapping measurement but thats 5407 * ok). 5408 */ 5409 tp->gput_seq = rsm->r_start; 5410 } 5411 if (rsm->r_flags & RACK_ACKED) { 5412 struct rack_sendmap *nrsm; 5413 5414 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5415 tp->gput_seq = rsm->r_end; 5416 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5417 if (nrsm) 5418 rsm = nrsm; 5419 else { 5420 rack->app_limited_needs_set = 1; 5421 } 5422 } else 5423 rack->app_limited_needs_set = 1; 5424 /* We always go from the first send */ 5425 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5426 } else { 5427 /* 5428 * If we don't find the rsm due to some 5429 * send-limit set the current time, which 5430 * basically disables the send-limit. 5431 */ 5432 struct timeval tv; 5433 5434 microuptime(&tv); 5435 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5436 } 5437 rack_tend_gp_marks(tp, rack); 5438 rack_log_pacing_delay_calc(rack, 5439 tp->gput_seq, 5440 tp->gput_ack, 5441 (uintptr_t)rsm, 5442 tp->gput_ts, 5443 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5444 9, 5445 __LINE__, rsm, quality); 5446 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5447 } else { 5448 /* 5449 * To make sure proper timestamp merging occurs, we need to clear 5450 * all GP marks if we don't start a measurement. 5451 */ 5452 rack_clear_gp_marks(tp, rack); 5453 } 5454 } 5455 5456 /* 5457 * CC wrapper hook functions 5458 */ 5459 static void 5460 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5461 uint16_t type, int32_t post_recovery) 5462 { 5463 uint32_t prior_cwnd, acked; 5464 struct tcp_log_buffer *lgb = NULL; 5465 uint8_t labc_to_use, quality; 5466 5467 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5468 tp->t_ccv.nsegs = nsegs; 5469 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5470 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5471 uint32_t max; 5472 5473 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5474 if (tp->t_ccv.bytes_this_ack > max) { 5475 tp->t_ccv.bytes_this_ack = max; 5476 } 5477 } 5478 #ifdef STATS 5479 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5480 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5481 #endif 5482 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5483 /* 5484 * We will ack all the data, time to end any 5485 * lt_bw_up we have running until something 5486 * new is sent. Note we need to use the actual 5487 * ack_rcv_time which with pacing may be different. 5488 */ 5489 uint64_t tmark; 5490 5491 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5492 rack->r_ctl.lt_seq = tp->snd_max; 5493 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 5494 if (tmark >= rack->r_ctl.lt_timemark) { 5495 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 5496 } 5497 rack->r_ctl.lt_timemark = tmark; 5498 rack->lt_bw_up = 0; 5499 } 5500 quality = RACK_QUALITY_NONE; 5501 if ((tp->t_flags & TF_GPUTINPROG) && 5502 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5503 /* Measure the Goodput */ 5504 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5505 } 5506 /* Which way our we limited, if not cwnd limited no advance in CA */ 5507 if (tp->snd_cwnd <= tp->snd_wnd) 5508 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5509 else 5510 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5511 if (tp->snd_cwnd > tp->snd_ssthresh) { 5512 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5513 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5514 /* For the setting of a window past use the actual scwnd we are using */ 5515 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5516 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5517 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5518 } 5519 } else { 5520 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5521 tp->t_bytes_acked = 0; 5522 } 5523 prior_cwnd = tp->snd_cwnd; 5524 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5525 (rack_client_low_buf && rack->client_bufferlvl && 5526 (rack->client_bufferlvl < rack_client_low_buf))) 5527 labc_to_use = rack->rc_labc; 5528 else 5529 labc_to_use = rack_max_abc_post_recovery; 5530 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5531 union tcp_log_stackspecific log; 5532 struct timeval tv; 5533 5534 memset(&log, 0, sizeof(log)); 5535 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5536 log.u_bbr.flex1 = th_ack; 5537 log.u_bbr.flex2 = tp->t_ccv.flags; 5538 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5539 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5540 log.u_bbr.flex5 = labc_to_use; 5541 log.u_bbr.flex6 = prior_cwnd; 5542 log.u_bbr.flex7 = V_tcp_do_newsack; 5543 log.u_bbr.flex8 = 1; 5544 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5545 0, &log, false, NULL, __func__, __LINE__,&tv); 5546 } 5547 if (CC_ALGO(tp)->ack_received != NULL) { 5548 /* XXXLAS: Find a way to live without this */ 5549 tp->t_ccv.curack = th_ack; 5550 tp->t_ccv.labc = labc_to_use; 5551 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5552 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5553 } 5554 if (lgb) { 5555 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5556 } 5557 if (rack->r_must_retran) { 5558 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5559 /* 5560 * We now are beyond the rxt point so lets disable 5561 * the flag. 5562 */ 5563 rack->r_ctl.rc_out_at_rto = 0; 5564 rack->r_must_retran = 0; 5565 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5566 /* 5567 * Only decrement the rc_out_at_rto if the cwnd advances 5568 * at least a whole segment. Otherwise next time the peer 5569 * acks, we won't be able to send this generaly happens 5570 * when we are in Congestion Avoidance. 5571 */ 5572 if (acked <= rack->r_ctl.rc_out_at_rto){ 5573 rack->r_ctl.rc_out_at_rto -= acked; 5574 } else { 5575 rack->r_ctl.rc_out_at_rto = 0; 5576 } 5577 } 5578 } 5579 #ifdef STATS 5580 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5581 #endif 5582 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5583 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5584 } 5585 if ((rack->rc_initial_ss_comp == 0) && 5586 (tp->snd_cwnd >= tp->snd_ssthresh)) { 5587 /* 5588 * The cwnd has grown beyond ssthresh we have 5589 * entered ca and completed our first Slowstart. 5590 */ 5591 rack->rc_initial_ss_comp = 1; 5592 } 5593 } 5594 5595 static void 5596 tcp_rack_partialack(struct tcpcb *tp) 5597 { 5598 struct tcp_rack *rack; 5599 5600 rack = (struct tcp_rack *)tp->t_fb_ptr; 5601 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5602 /* 5603 * If we are doing PRR and have enough 5604 * room to send <or> we are pacing and prr 5605 * is disabled we will want to see if we 5606 * can send data (by setting r_wanted_output to 5607 * true). 5608 */ 5609 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5610 rack->rack_no_prr) 5611 rack->r_wanted_output = 1; 5612 } 5613 5614 static void 5615 rack_exit_recovery(struct tcpcb *tp, struct tcp_rack *rack, int how) 5616 { 5617 /* 5618 * Now exit recovery. 5619 */ 5620 EXIT_RECOVERY(tp->t_flags); 5621 } 5622 5623 static void 5624 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 5625 { 5626 struct tcp_rack *rack; 5627 uint32_t orig_cwnd; 5628 5629 orig_cwnd = tp->snd_cwnd; 5630 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5631 rack = (struct tcp_rack *)tp->t_fb_ptr; 5632 /* only alert CC if we alerted when we entered */ 5633 if (CC_ALGO(tp)->post_recovery != NULL) { 5634 tp->t_ccv.curack = th_ack; 5635 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 5636 if (tp->snd_cwnd < tp->snd_ssthresh) { 5637 /* 5638 * Rack has burst control and pacing 5639 * so lets not set this any lower than 5640 * snd_ssthresh per RFC-6582 (option 2). 5641 */ 5642 tp->snd_cwnd = tp->snd_ssthresh; 5643 } 5644 } 5645 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5646 union tcp_log_stackspecific log; 5647 struct timeval tv; 5648 5649 memset(&log, 0, sizeof(log)); 5650 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5651 log.u_bbr.flex1 = th_ack; 5652 log.u_bbr.flex2 = tp->t_ccv.flags; 5653 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5654 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5655 log.u_bbr.flex5 = V_tcp_abc_l_var; 5656 log.u_bbr.flex6 = orig_cwnd; 5657 log.u_bbr.flex7 = V_tcp_do_newsack; 5658 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 5659 log.u_bbr.flex8 = 2; 5660 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5661 0, &log, false, NULL, __func__, __LINE__, &tv); 5662 } 5663 if ((rack->rack_no_prr == 0) && 5664 (rack->no_prr_addback == 0) && 5665 (rack->r_ctl.rc_prr_sndcnt > 0)) { 5666 /* 5667 * Suck the next prr cnt back into cwnd, but 5668 * only do that if we are not application limited. 5669 */ 5670 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 5671 /* 5672 * We are allowed to add back to the cwnd the amount we did 5673 * not get out if: 5674 * a) no_prr_addback is off. 5675 * b) we are not app limited 5676 * c) we are doing prr 5677 * <and> 5678 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 5679 */ 5680 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 5681 rack->r_ctl.rc_prr_sndcnt); 5682 } 5683 rack->r_ctl.rc_prr_sndcnt = 0; 5684 rack_log_to_prr(rack, 1, 0, __LINE__); 5685 } 5686 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 5687 tp->snd_recover = tp->snd_una; 5688 if (rack->r_ctl.dsack_persist) { 5689 rack->r_ctl.dsack_persist--; 5690 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 5691 rack->r_ctl.num_dsack = 0; 5692 } 5693 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 5694 } 5695 if (rack->rto_from_rec == 1) { 5696 rack->rto_from_rec = 0; 5697 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 5698 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 5699 } 5700 rack_exit_recovery(tp, rack, 1); 5701 } 5702 5703 static void 5704 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 5705 { 5706 struct tcp_rack *rack; 5707 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 5708 5709 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5710 #ifdef STATS 5711 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 5712 #endif 5713 if (IN_RECOVERY(tp->t_flags) == 0) { 5714 in_rec_at_entry = 0; 5715 ssthresh_enter = tp->snd_ssthresh; 5716 cwnd_enter = tp->snd_cwnd; 5717 } else 5718 in_rec_at_entry = 1; 5719 rack = (struct tcp_rack *)tp->t_fb_ptr; 5720 switch (type) { 5721 case CC_NDUPACK: 5722 tp->t_flags &= ~TF_WASFRECOVERY; 5723 tp->t_flags &= ~TF_WASCRECOVERY; 5724 if (!IN_FASTRECOVERY(tp->t_flags)) { 5725 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ 5726 if (rack->rc_initial_ss_comp == 0) { 5727 /* Yep it is the end of the initial slowstart */ 5728 rack->rc_initial_ss_comp = 1; 5729 } 5730 rack->r_ctl.rc_prr_delivered = 0; 5731 rack->r_ctl.rc_prr_out = 0; 5732 rack->r_fast_output = 0; 5733 if (rack->rack_no_prr == 0) { 5734 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5735 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 5736 } 5737 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 5738 tp->snd_recover = tp->snd_max; 5739 if (tp->t_flags2 & TF2_ECN_PERMIT) 5740 tp->t_flags2 |= TF2_ECN_SND_CWR; 5741 } 5742 break; 5743 case CC_ECN: 5744 if (!IN_CONGRECOVERY(tp->t_flags) || 5745 /* 5746 * Allow ECN reaction on ACK to CWR, if 5747 * that data segment was also CE marked. 5748 */ 5749 SEQ_GEQ(ack, tp->snd_recover)) { 5750 EXIT_CONGRECOVERY(tp->t_flags); 5751 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5752 rack->r_fast_output = 0; 5753 tp->snd_recover = tp->snd_max + 1; 5754 if (tp->t_flags2 & TF2_ECN_PERMIT) 5755 tp->t_flags2 |= TF2_ECN_SND_CWR; 5756 } 5757 break; 5758 case CC_RTO: 5759 tp->t_dupacks = 0; 5760 tp->t_bytes_acked = 0; 5761 rack->r_fast_output = 0; 5762 if (IN_RECOVERY(tp->t_flags)) 5763 rack_exit_recovery(tp, rack, 2); 5764 orig_cwnd = tp->snd_cwnd; 5765 rack_log_to_prr(rack, 16, orig_cwnd, line); 5766 if (CC_ALGO(tp)->cong_signal == NULL) { 5767 /* TSNH */ 5768 tp->snd_ssthresh = max(2, 5769 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5770 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5771 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5772 } 5773 if (tp->t_flags2 & TF2_ECN_PERMIT) 5774 tp->t_flags2 |= TF2_ECN_SND_CWR; 5775 break; 5776 case CC_RTO_ERR: 5777 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5778 /* RTO was unnecessary, so reset everything. */ 5779 tp->snd_cwnd = tp->snd_cwnd_prev; 5780 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5781 tp->snd_recover = tp->snd_recover_prev; 5782 if (tp->t_flags & TF_WASFRECOVERY) { 5783 ENTER_FASTRECOVERY(tp->t_flags); 5784 tp->t_flags &= ~TF_WASFRECOVERY; 5785 } 5786 if (tp->t_flags & TF_WASCRECOVERY) { 5787 ENTER_CONGRECOVERY(tp->t_flags); 5788 tp->t_flags &= ~TF_WASCRECOVERY; 5789 } 5790 tp->snd_nxt = tp->snd_max; 5791 tp->t_badrxtwin = 0; 5792 break; 5793 } 5794 if ((CC_ALGO(tp)->cong_signal != NULL) && 5795 (type != CC_RTO)){ 5796 tp->t_ccv.curack = ack; 5797 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 5798 } 5799 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5800 rack_log_to_prr(rack, 15, cwnd_enter, line); 5801 rack->r_ctl.dsack_byte_cnt = 0; 5802 rack->r_ctl.retran_during_recovery = 0; 5803 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5804 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5805 rack->r_ent_rec_ns = 1; 5806 } 5807 } 5808 5809 static inline void 5810 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5811 { 5812 uint32_t i_cwnd; 5813 5814 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5815 5816 if (CC_ALGO(tp)->after_idle != NULL) 5817 CC_ALGO(tp)->after_idle(&tp->t_ccv); 5818 5819 if (tp->snd_cwnd == 1) 5820 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5821 else 5822 i_cwnd = rc_init_window(rack); 5823 5824 /* 5825 * Being idle is no different than the initial window. If the cc 5826 * clamps it down below the initial window raise it to the initial 5827 * window. 5828 */ 5829 if (tp->snd_cwnd < i_cwnd) { 5830 tp->snd_cwnd = i_cwnd; 5831 } 5832 } 5833 5834 /* 5835 * Indicate whether this ack should be delayed. We can delay the ack if 5836 * following conditions are met: 5837 * - There is no delayed ack timer in progress. 5838 * - Our last ack wasn't a 0-sized window. We never want to delay 5839 * the ack that opens up a 0-sized window. 5840 * - LRO wasn't used for this segment. We make sure by checking that the 5841 * segment size is not larger than the MSS. 5842 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5843 * connection. 5844 */ 5845 #define DELAY_ACK(tp, tlen) \ 5846 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5847 ((tp->t_flags & TF_DELACK) == 0) && \ 5848 (tlen <= tp->t_maxseg) && \ 5849 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5850 5851 static struct rack_sendmap * 5852 rack_find_lowest_rsm(struct tcp_rack *rack) 5853 { 5854 struct rack_sendmap *rsm; 5855 5856 /* 5857 * Walk the time-order transmitted list looking for an rsm that is 5858 * not acked. This will be the one that was sent the longest time 5859 * ago that is still outstanding. 5860 */ 5861 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5862 if (rsm->r_flags & RACK_ACKED) { 5863 continue; 5864 } 5865 goto finish; 5866 } 5867 finish: 5868 return (rsm); 5869 } 5870 5871 static struct rack_sendmap * 5872 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5873 { 5874 struct rack_sendmap *prsm; 5875 5876 /* 5877 * Walk the sequence order list backward until we hit and arrive at 5878 * the highest seq not acked. In theory when this is called it 5879 * should be the last segment (which it was not). 5880 */ 5881 prsm = rsm; 5882 5883 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 5884 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5885 continue; 5886 } 5887 return (prsm); 5888 } 5889 return (NULL); 5890 } 5891 5892 static uint32_t 5893 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts, int line, int log_allowed) 5894 { 5895 int32_t lro; 5896 uint32_t thresh; 5897 5898 /* 5899 * lro is the flag we use to determine if we have seen reordering. 5900 * If it gets set we have seen reordering. The reorder logic either 5901 * works in one of two ways: 5902 * 5903 * If reorder-fade is configured, then we track the last time we saw 5904 * re-ordering occur. If we reach the point where enough time as 5905 * passed we no longer consider reordering as occurring. 5906 * 5907 * Or if reorder-face is 0, then once we see reordering we consider 5908 * the connection to alway be subject to reordering and just set lro 5909 * to 1. 5910 * 5911 * In the end if lro is non-zero we add the extra time for 5912 * reordering in. 5913 */ 5914 if (srtt == 0) 5915 srtt = 1; 5916 if (rack->r_ctl.rc_reorder_ts) { 5917 if (rack->r_ctl.rc_reorder_fade) { 5918 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5919 lro = cts - rack->r_ctl.rc_reorder_ts; 5920 if (lro == 0) { 5921 /* 5922 * No time as passed since the last 5923 * reorder, mark it as reordering. 5924 */ 5925 lro = 1; 5926 } 5927 } else { 5928 /* Negative time? */ 5929 lro = 0; 5930 } 5931 if (lro > rack->r_ctl.rc_reorder_fade) { 5932 /* Turn off reordering seen too */ 5933 rack->r_ctl.rc_reorder_ts = 0; 5934 lro = 0; 5935 } 5936 } else { 5937 /* Reodering does not fade */ 5938 lro = 1; 5939 } 5940 } else { 5941 lro = 0; 5942 } 5943 if (rack->rc_rack_tmr_std_based == 0) { 5944 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5945 } else { 5946 /* Standards based pkt-delay is 1/4 srtt */ 5947 thresh = srtt + (srtt >> 2); 5948 } 5949 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5950 /* It must be set, if not you get 1/4 rtt */ 5951 if (rack->r_ctl.rc_reorder_shift) 5952 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5953 else 5954 thresh += (srtt >> 2); 5955 } 5956 if (rack->rc_rack_use_dsack && 5957 lro && 5958 (rack->r_ctl.num_dsack > 0)) { 5959 /* 5960 * We only increase the reordering window if we 5961 * have seen reordering <and> we have a DSACK count. 5962 */ 5963 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5964 if (log_allowed) 5965 rack_log_dsack_event(rack, 4, line, srtt, thresh); 5966 } 5967 /* SRTT * 2 is the ceiling */ 5968 if (thresh > (srtt * 2)) { 5969 thresh = srtt * 2; 5970 } 5971 /* And we don't want it above the RTO max either */ 5972 if (thresh > rack_rto_max) { 5973 thresh = rack_rto_max; 5974 } 5975 if (log_allowed) 5976 rack_log_dsack_event(rack, 6, line, srtt, thresh); 5977 return (thresh); 5978 } 5979 5980 static uint32_t 5981 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5982 struct rack_sendmap *rsm, uint32_t srtt) 5983 { 5984 struct rack_sendmap *prsm; 5985 uint32_t thresh, len; 5986 int segsiz; 5987 5988 if (srtt == 0) 5989 srtt = 1; 5990 if (rack->r_ctl.rc_tlp_threshold) 5991 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5992 else 5993 thresh = (srtt * 2); 5994 5995 /* Get the previous sent packet, if any */ 5996 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5997 len = rsm->r_end - rsm->r_start; 5998 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5999 /* Exactly like the ID */ 6000 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6001 uint32_t alt_thresh; 6002 /* 6003 * Compensate for delayed-ack with the d-ack time. 6004 */ 6005 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6006 if (alt_thresh > thresh) 6007 thresh = alt_thresh; 6008 } 6009 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6010 /* 2.1 behavior */ 6011 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6012 if (prsm && (len <= segsiz)) { 6013 /* 6014 * Two packets outstanding, thresh should be (2*srtt) + 6015 * possible inter-packet delay (if any). 6016 */ 6017 uint32_t inter_gap = 0; 6018 int idx, nidx; 6019 6020 idx = rsm->r_rtr_cnt - 1; 6021 nidx = prsm->r_rtr_cnt - 1; 6022 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6023 /* Yes it was sent later (or at the same time) */ 6024 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6025 } 6026 thresh += inter_gap; 6027 } else if (len <= segsiz) { 6028 /* 6029 * Possibly compensate for delayed-ack. 6030 */ 6031 uint32_t alt_thresh; 6032 6033 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6034 if (alt_thresh > thresh) 6035 thresh = alt_thresh; 6036 } 6037 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6038 /* 2.2 behavior */ 6039 if (len <= segsiz) { 6040 uint32_t alt_thresh; 6041 /* 6042 * Compensate for delayed-ack with the d-ack time. 6043 */ 6044 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6045 if (alt_thresh > thresh) 6046 thresh = alt_thresh; 6047 } 6048 } 6049 /* Not above an RTO */ 6050 if (thresh > tp->t_rxtcur) { 6051 thresh = tp->t_rxtcur; 6052 } 6053 /* Not above a RTO max */ 6054 if (thresh > rack_rto_max) { 6055 thresh = rack_rto_max; 6056 } 6057 /* Apply user supplied min TLP */ 6058 if (thresh < rack_tlp_min) { 6059 thresh = rack_tlp_min; 6060 } 6061 return (thresh); 6062 } 6063 6064 static uint32_t 6065 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6066 { 6067 /* 6068 * We want the rack_rtt which is the 6069 * last rtt we measured. However if that 6070 * does not exist we fallback to the srtt (which 6071 * we probably will never do) and then as a last 6072 * resort we use RACK_INITIAL_RTO if no srtt is 6073 * yet set. 6074 */ 6075 if (rack->rc_rack_rtt) 6076 return (rack->rc_rack_rtt); 6077 else if (tp->t_srtt == 0) 6078 return (RACK_INITIAL_RTO); 6079 return (tp->t_srtt); 6080 } 6081 6082 static struct rack_sendmap * 6083 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6084 { 6085 /* 6086 * Check to see that we don't need to fall into recovery. We will 6087 * need to do so if our oldest transmit is past the time we should 6088 * have had an ack. 6089 */ 6090 struct tcp_rack *rack; 6091 struct rack_sendmap *rsm; 6092 int32_t idx; 6093 uint32_t srtt, thresh; 6094 6095 rack = (struct tcp_rack *)tp->t_fb_ptr; 6096 if (tqhash_empty(rack->r_ctl.tqh)) { 6097 return (NULL); 6098 } 6099 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6100 if (rsm == NULL) 6101 return (NULL); 6102 6103 6104 if (rsm->r_flags & RACK_ACKED) { 6105 rsm = rack_find_lowest_rsm(rack); 6106 if (rsm == NULL) 6107 return (NULL); 6108 } 6109 idx = rsm->r_rtr_cnt - 1; 6110 srtt = rack_grab_rtt(tp, rack); 6111 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 6112 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6113 return (NULL); 6114 } 6115 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6116 return (NULL); 6117 } 6118 /* Ok if we reach here we are over-due and this guy can be sent */ 6119 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6120 return (rsm); 6121 } 6122 6123 static uint32_t 6124 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6125 { 6126 int32_t t; 6127 int32_t tt; 6128 uint32_t ret_val; 6129 6130 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6131 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6132 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6133 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6134 ret_val = (uint32_t)tt; 6135 return (ret_val); 6136 } 6137 6138 static uint32_t 6139 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6140 { 6141 /* 6142 * Start the FR timer, we do this based on getting the first one in 6143 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6144 * events we need to stop the running timer (if its running) before 6145 * starting the new one. 6146 */ 6147 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6148 uint32_t srtt_cur; 6149 int32_t idx; 6150 int32_t is_tlp_timer = 0; 6151 struct rack_sendmap *rsm; 6152 6153 if (rack->t_timers_stopped) { 6154 /* All timers have been stopped none are to run */ 6155 return (0); 6156 } 6157 if (rack->rc_in_persist) { 6158 /* We can't start any timer in persists */ 6159 return (rack_get_persists_timer_val(tp, rack)); 6160 } 6161 rack->rc_on_min_to = 0; 6162 if ((tp->t_state < TCPS_ESTABLISHED) || 6163 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6164 goto activate_rxt; 6165 } 6166 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6167 if ((rsm == NULL) || sup_rack) { 6168 /* Nothing on the send map or no rack */ 6169 activate_rxt: 6170 time_since_sent = 0; 6171 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6172 if (rsm) { 6173 /* 6174 * Should we discount the RTX timer any? 6175 * 6176 * We want to discount it the smallest amount. 6177 * If a timer (Rack/TLP or RXT) has gone off more 6178 * recently thats the discount we want to use (now - timer time). 6179 * If the retransmit of the oldest packet was more recent then 6180 * we want to use that (now - oldest-packet-last_transmit_time). 6181 * 6182 */ 6183 idx = rsm->r_rtr_cnt - 1; 6184 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6185 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6186 else 6187 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6188 if (TSTMP_GT(cts, tstmp_touse)) 6189 time_since_sent = cts - tstmp_touse; 6190 } 6191 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6192 sbavail(&tptosocket(tp)->so_snd)) { 6193 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6194 to = tp->t_rxtcur; 6195 if (to > time_since_sent) 6196 to -= time_since_sent; 6197 else 6198 to = rack->r_ctl.rc_min_to; 6199 if (to == 0) 6200 to = 1; 6201 /* Special case for KEEPINIT */ 6202 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6203 (TP_KEEPINIT(tp) != 0) && 6204 rsm) { 6205 /* 6206 * We have to put a ceiling on the rxt timer 6207 * of the keep-init timeout. 6208 */ 6209 uint32_t max_time, red; 6210 6211 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6212 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6213 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6214 if (red < max_time) 6215 max_time -= red; 6216 else 6217 max_time = 1; 6218 } 6219 /* Reduce timeout to the keep value if needed */ 6220 if (max_time < to) 6221 to = max_time; 6222 } 6223 return (to); 6224 } 6225 return (0); 6226 } 6227 if (rsm->r_flags & RACK_ACKED) { 6228 rsm = rack_find_lowest_rsm(rack); 6229 if (rsm == NULL) { 6230 /* No lowest? */ 6231 goto activate_rxt; 6232 } 6233 } 6234 /* Convert from ms to usecs */ 6235 if ((rsm->r_flags & RACK_SACK_PASSED) || 6236 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6237 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6238 if ((tp->t_flags & TF_SENTFIN) && 6239 ((tp->snd_max - tp->snd_una) == 1) && 6240 (rsm->r_flags & RACK_HAS_FIN)) { 6241 /* 6242 * We don't start a rack timer if all we have is a 6243 * FIN outstanding. 6244 */ 6245 goto activate_rxt; 6246 } 6247 if ((rack->use_rack_rr == 0) && 6248 (IN_FASTRECOVERY(tp->t_flags)) && 6249 (rack->rack_no_prr == 0) && 6250 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6251 /* 6252 * We are not cheating, in recovery and 6253 * not enough ack's to yet get our next 6254 * retransmission out. 6255 * 6256 * Note that classified attackers do not 6257 * get to use the rack-cheat. 6258 */ 6259 goto activate_tlp; 6260 } 6261 srtt = rack_grab_rtt(tp, rack); 6262 thresh = rack_calc_thresh_rack(rack, srtt, cts, __LINE__, 1); 6263 idx = rsm->r_rtr_cnt - 1; 6264 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6265 if (SEQ_GEQ(exp, cts)) { 6266 to = exp - cts; 6267 if (to < rack->r_ctl.rc_min_to) { 6268 to = rack->r_ctl.rc_min_to; 6269 if (rack->r_rr_config == 3) 6270 rack->rc_on_min_to = 1; 6271 } 6272 } else { 6273 to = rack->r_ctl.rc_min_to; 6274 if (rack->r_rr_config == 3) 6275 rack->rc_on_min_to = 1; 6276 } 6277 } else { 6278 /* Ok we need to do a TLP not RACK */ 6279 activate_tlp: 6280 if ((rack->rc_tlp_in_progress != 0) && 6281 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6282 /* 6283 * The previous send was a TLP and we have sent 6284 * N TLP's without sending new data. 6285 */ 6286 goto activate_rxt; 6287 } 6288 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6289 if (rsm == NULL) { 6290 /* We found no rsm to TLP with. */ 6291 goto activate_rxt; 6292 } 6293 if (rsm->r_flags & RACK_HAS_FIN) { 6294 /* If its a FIN we dont do TLP */ 6295 rsm = NULL; 6296 goto activate_rxt; 6297 } 6298 idx = rsm->r_rtr_cnt - 1; 6299 time_since_sent = 0; 6300 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6301 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6302 else 6303 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6304 if (TSTMP_GT(cts, tstmp_touse)) 6305 time_since_sent = cts - tstmp_touse; 6306 is_tlp_timer = 1; 6307 if (tp->t_srtt) { 6308 if ((rack->rc_srtt_measure_made == 0) && 6309 (tp->t_srtt == 1)) { 6310 /* 6311 * If another stack as run and set srtt to 1, 6312 * then the srtt was 0, so lets use the initial. 6313 */ 6314 srtt = RACK_INITIAL_RTO; 6315 } else { 6316 srtt_cur = tp->t_srtt; 6317 srtt = srtt_cur; 6318 } 6319 } else 6320 srtt = RACK_INITIAL_RTO; 6321 /* 6322 * If the SRTT is not keeping up and the 6323 * rack RTT has spiked we want to use 6324 * the last RTT not the smoothed one. 6325 */ 6326 if (rack_tlp_use_greater && 6327 tp->t_srtt && 6328 (srtt < rack_grab_rtt(tp, rack))) { 6329 srtt = rack_grab_rtt(tp, rack); 6330 } 6331 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6332 if (thresh > time_since_sent) { 6333 to = thresh - time_since_sent; 6334 } else { 6335 to = rack->r_ctl.rc_min_to; 6336 rack_log_alt_to_to_cancel(rack, 6337 thresh, /* flex1 */ 6338 time_since_sent, /* flex2 */ 6339 tstmp_touse, /* flex3 */ 6340 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6341 (uint32_t)rsm->r_tim_lastsent[idx], 6342 srtt, 6343 idx, 99); 6344 } 6345 if (to < rack_tlp_min) { 6346 to = rack_tlp_min; 6347 } 6348 if (to > TICKS_2_USEC(tcp_rexmit_max)) { 6349 /* 6350 * If the TLP time works out to larger than the max 6351 * RTO lets not do TLP.. just RTO. 6352 */ 6353 goto activate_rxt; 6354 } 6355 } 6356 if (is_tlp_timer == 0) { 6357 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6358 } else { 6359 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6360 } 6361 if (to == 0) 6362 to = 1; 6363 return (to); 6364 } 6365 6366 static void 6367 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6368 { 6369 if (rack->rc_in_persist == 0) { 6370 if (tp->t_flags & TF_GPUTINPROG) { 6371 /* 6372 * Stop the goodput now, the calling of the 6373 * measurement function clears the flag. 6374 */ 6375 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6376 RACK_QUALITY_PERSIST); 6377 } 6378 #ifdef NETFLIX_SHARED_CWND 6379 if (rack->r_ctl.rc_scw) { 6380 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6381 rack->rack_scwnd_is_idle = 1; 6382 } 6383 #endif 6384 rack->r_ctl.rc_went_idle_time = cts; 6385 if (rack->r_ctl.rc_went_idle_time == 0) 6386 rack->r_ctl.rc_went_idle_time = 1; 6387 if (rack->lt_bw_up) { 6388 /* Suspend our LT BW measurement */ 6389 uint64_t tmark; 6390 6391 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6392 rack->r_ctl.lt_seq = snd_una; 6393 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 6394 if (tmark >= rack->r_ctl.lt_timemark) { 6395 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6396 } 6397 rack->r_ctl.lt_timemark = tmark; 6398 rack->lt_bw_up = 0; 6399 rack->r_persist_lt_bw_off = 1; 6400 } 6401 rack_timer_cancel(tp, rack, cts, __LINE__); 6402 rack->r_ctl.persist_lost_ends = 0; 6403 rack->probe_not_answered = 0; 6404 rack->forced_ack = 0; 6405 tp->t_rxtshift = 0; 6406 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6407 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6408 rack->rc_in_persist = 1; 6409 } 6410 } 6411 6412 static void 6413 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6414 { 6415 if (tcp_in_hpts(rack->rc_tp)) { 6416 tcp_hpts_remove(rack->rc_tp); 6417 rack->r_ctl.rc_hpts_flags = 0; 6418 } 6419 #ifdef NETFLIX_SHARED_CWND 6420 if (rack->r_ctl.rc_scw) { 6421 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6422 rack->rack_scwnd_is_idle = 0; 6423 } 6424 #endif 6425 if (rack->rc_gp_dyn_mul && 6426 (rack->use_fixed_rate == 0) && 6427 (rack->rc_always_pace)) { 6428 /* 6429 * Do we count this as if a probe-rtt just 6430 * finished? 6431 */ 6432 uint32_t time_idle, idle_min; 6433 6434 time_idle = cts - rack->r_ctl.rc_went_idle_time; 6435 idle_min = rack_min_probertt_hold; 6436 if (rack_probertt_gpsrtt_cnt_div) { 6437 uint64_t extra; 6438 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 6439 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 6440 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 6441 idle_min += (uint32_t)extra; 6442 } 6443 if (time_idle >= idle_min) { 6444 /* Yes, we count it as a probe-rtt. */ 6445 uint32_t us_cts; 6446 6447 us_cts = tcp_get_usecs(NULL); 6448 if (rack->in_probe_rtt == 0) { 6449 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 6450 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 6451 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 6452 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 6453 } else { 6454 rack_exit_probertt(rack, us_cts); 6455 } 6456 } 6457 } 6458 if (rack->r_persist_lt_bw_off) { 6459 /* Continue where we left off */ 6460 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); 6461 rack->lt_bw_up = 1; 6462 rack->r_persist_lt_bw_off = 0; 6463 } 6464 rack->rc_in_persist = 0; 6465 rack->r_ctl.rc_went_idle_time = 0; 6466 tp->t_rxtshift = 0; 6467 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6468 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6469 rack->r_ctl.rc_agg_delayed = 0; 6470 rack->r_early = 0; 6471 rack->r_late = 0; 6472 rack->r_ctl.rc_agg_early = 0; 6473 } 6474 6475 static void 6476 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 6477 struct hpts_diag *diag, struct timeval *tv) 6478 { 6479 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6480 union tcp_log_stackspecific log; 6481 6482 memset(&log, 0, sizeof(log)); 6483 log.u_bbr.flex1 = diag->p_nxt_slot; 6484 log.u_bbr.flex2 = diag->p_cur_slot; 6485 log.u_bbr.flex3 = diag->slot_req; 6486 log.u_bbr.flex4 = diag->inp_hptsslot; 6487 log.u_bbr.flex5 = diag->slot_remaining; 6488 log.u_bbr.flex6 = diag->need_new_to; 6489 log.u_bbr.flex7 = diag->p_hpts_active; 6490 log.u_bbr.flex8 = diag->p_on_min_sleep; 6491 /* Hijack other fields as needed */ 6492 log.u_bbr.epoch = diag->have_slept; 6493 log.u_bbr.lt_epoch = diag->yet_to_sleep; 6494 log.u_bbr.pkts_out = diag->co_ret; 6495 log.u_bbr.applimited = diag->hpts_sleep_time; 6496 log.u_bbr.delivered = diag->p_prev_slot; 6497 log.u_bbr.inflight = diag->p_runningslot; 6498 log.u_bbr.bw_inuse = diag->wheel_slot; 6499 log.u_bbr.rttProp = diag->wheel_cts; 6500 log.u_bbr.timeStamp = cts; 6501 log.u_bbr.delRate = diag->maxslots; 6502 log.u_bbr.cur_del_rate = diag->p_curtick; 6503 log.u_bbr.cur_del_rate <<= 32; 6504 log.u_bbr.cur_del_rate |= diag->p_lasttick; 6505 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6506 &rack->rc_inp->inp_socket->so_rcv, 6507 &rack->rc_inp->inp_socket->so_snd, 6508 BBR_LOG_HPTSDIAG, 0, 6509 0, &log, false, tv); 6510 } 6511 6512 } 6513 6514 static void 6515 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 6516 { 6517 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6518 union tcp_log_stackspecific log; 6519 struct timeval tv; 6520 6521 memset(&log, 0, sizeof(log)); 6522 log.u_bbr.flex1 = sb->sb_flags; 6523 log.u_bbr.flex2 = len; 6524 log.u_bbr.flex3 = sb->sb_state; 6525 log.u_bbr.flex8 = type; 6526 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6527 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6528 &rack->rc_inp->inp_socket->so_rcv, 6529 &rack->rc_inp->inp_socket->so_snd, 6530 TCP_LOG_SB_WAKE, 0, 6531 len, &log, false, &tv); 6532 } 6533 } 6534 6535 static void 6536 rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 6537 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 6538 { 6539 struct hpts_diag diag; 6540 struct inpcb *inp = tptoinpcb(tp); 6541 struct timeval tv; 6542 uint32_t delayed_ack = 0; 6543 uint32_t hpts_timeout; 6544 uint32_t entry_slot = slot; 6545 uint8_t stopped; 6546 uint32_t left = 0; 6547 uint32_t us_cts; 6548 6549 if ((tp->t_state == TCPS_CLOSED) || 6550 (tp->t_state == TCPS_LISTEN)) { 6551 return; 6552 } 6553 if (tcp_in_hpts(tp)) { 6554 /* Already on the pacer */ 6555 return; 6556 } 6557 stopped = rack->rc_tmr_stopped; 6558 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 6559 left = rack->r_ctl.rc_timer_exp - cts; 6560 } 6561 rack->r_ctl.rc_timer_exp = 0; 6562 rack->r_ctl.rc_hpts_flags = 0; 6563 us_cts = tcp_get_usecs(&tv); 6564 /* Now early/late accounting */ 6565 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 6566 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 6567 /* 6568 * We have a early carry over set, 6569 * we can always add more time so we 6570 * can always make this compensation. 6571 * 6572 * Note if ack's are allowed to wake us do not 6573 * penalize the next timer for being awoke 6574 * by an ack aka the rc_agg_early (non-paced mode). 6575 */ 6576 slot += rack->r_ctl.rc_agg_early; 6577 rack->r_early = 0; 6578 rack->r_ctl.rc_agg_early = 0; 6579 } 6580 if ((rack->r_late) && 6581 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { 6582 /* 6583 * This is harder, we can 6584 * compensate some but it 6585 * really depends on what 6586 * the current pacing time is. 6587 */ 6588 if (rack->r_ctl.rc_agg_delayed >= slot) { 6589 /* 6590 * We can't compensate for it all. 6591 * And we have to have some time 6592 * on the clock. We always have a min 6593 * 10 slots (10 x 10 i.e. 100 usecs). 6594 */ 6595 if (slot <= HPTS_TICKS_PER_SLOT) { 6596 /* We gain delay */ 6597 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 6598 slot = HPTS_TICKS_PER_SLOT; 6599 } else { 6600 /* We take off some */ 6601 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 6602 slot = HPTS_TICKS_PER_SLOT; 6603 } 6604 } else { 6605 slot -= rack->r_ctl.rc_agg_delayed; 6606 rack->r_ctl.rc_agg_delayed = 0; 6607 /* Make sure we have 100 useconds at minimum */ 6608 if (slot < HPTS_TICKS_PER_SLOT) { 6609 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 6610 slot = HPTS_TICKS_PER_SLOT; 6611 } 6612 if (rack->r_ctl.rc_agg_delayed == 0) 6613 rack->r_late = 0; 6614 } 6615 } else if (rack->r_late) { 6616 /* r_use_hpts_min is on and so is DGP */ 6617 uint32_t max_red; 6618 6619 max_red = (slot * rack->r_ctl.max_reduction) / 100; 6620 if (max_red >= rack->r_ctl.rc_agg_delayed) { 6621 slot -= rack->r_ctl.rc_agg_delayed; 6622 rack->r_ctl.rc_agg_delayed = 0; 6623 } else { 6624 slot -= max_red; 6625 rack->r_ctl.rc_agg_delayed -= max_red; 6626 } 6627 } 6628 if ((rack->r_use_hpts_min == 1) && 6629 (slot > 0) && 6630 (rack->dgp_on == 1)) { 6631 /* 6632 * We are enforcing a min pacing timer 6633 * based on our hpts min timeout. 6634 */ 6635 uint32_t min; 6636 6637 min = get_hpts_min_sleep_time(); 6638 if (min > slot) { 6639 slot = min; 6640 } 6641 } 6642 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 6643 if (tp->t_flags & TF_DELACK) { 6644 delayed_ack = TICKS_2_USEC(tcp_delacktime); 6645 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 6646 } 6647 if (delayed_ack && ((hpts_timeout == 0) || 6648 (delayed_ack < hpts_timeout))) 6649 hpts_timeout = delayed_ack; 6650 else 6651 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6652 /* 6653 * If no timers are going to run and we will fall off the hptsi 6654 * wheel, we resort to a keep-alive timer if its configured. 6655 */ 6656 if ((hpts_timeout == 0) && 6657 (slot == 0)) { 6658 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6659 (tp->t_state <= TCPS_CLOSING)) { 6660 /* 6661 * Ok we have no timer (persists, rack, tlp, rxt or 6662 * del-ack), we don't have segments being paced. So 6663 * all that is left is the keepalive timer. 6664 */ 6665 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6666 /* Get the established keep-alive time */ 6667 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 6668 } else { 6669 /* 6670 * Get the initial setup keep-alive time, 6671 * note that this is probably not going to 6672 * happen, since rack will be running a rxt timer 6673 * if a SYN of some sort is outstanding. It is 6674 * actually handled in rack_timeout_rxt(). 6675 */ 6676 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 6677 } 6678 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 6679 if (rack->in_probe_rtt) { 6680 /* 6681 * We want to instead not wake up a long time from 6682 * now but to wake up about the time we would 6683 * exit probe-rtt and initiate a keep-alive ack. 6684 * This will get us out of probe-rtt and update 6685 * our min-rtt. 6686 */ 6687 hpts_timeout = rack_min_probertt_hold; 6688 } 6689 } 6690 } 6691 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 6692 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 6693 /* 6694 * RACK, TLP, persists and RXT timers all are restartable 6695 * based on actions input .. i.e we received a packet (ack 6696 * or sack) and that changes things (rw, or snd_una etc). 6697 * Thus we can restart them with a new value. For 6698 * keep-alive, delayed_ack we keep track of what was left 6699 * and restart the timer with a smaller value. 6700 */ 6701 if (left < hpts_timeout) 6702 hpts_timeout = left; 6703 } 6704 if (hpts_timeout) { 6705 /* 6706 * Hack alert for now we can't time-out over 2,147,483 6707 * seconds (a bit more than 596 hours), which is probably ok 6708 * :). 6709 */ 6710 if (hpts_timeout > 0x7ffffffe) 6711 hpts_timeout = 0x7ffffffe; 6712 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 6713 } 6714 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 6715 if ((rack->gp_ready == 0) && 6716 (rack->use_fixed_rate == 0) && 6717 (hpts_timeout < slot) && 6718 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 6719 /* 6720 * We have no good estimate yet for the 6721 * old clunky burst mitigation or the 6722 * real pacing. And the tlp or rxt is smaller 6723 * than the pacing calculation. Lets not 6724 * pace that long since we know the calculation 6725 * so far is not accurate. 6726 */ 6727 slot = hpts_timeout; 6728 } 6729 /** 6730 * Turn off all the flags for queuing by default. The 6731 * flags have important meanings to what happens when 6732 * LRO interacts with the transport. Most likely (by default now) 6733 * mbuf_queueing and ack compression are on. So the transport 6734 * has a couple of flags that control what happens (if those 6735 * are not on then these flags won't have any effect since it 6736 * won't go through the queuing LRO path). 6737 * 6738 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 6739 * pacing output, so don't disturb. But 6740 * it also means LRO can wake me if there 6741 * is a SACK arrival. 6742 * 6743 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 6744 * with the above flag (QUEUE_READY) and 6745 * when present it says don't even wake me 6746 * if a SACK arrives. 6747 * 6748 * The idea behind these flags is that if we are pacing we 6749 * set the MBUF_QUEUE_READY and only get woken up if 6750 * a SACK arrives (which could change things) or if 6751 * our pacing timer expires. If, however, we have a rack 6752 * timer running, then we don't even want a sack to wake 6753 * us since the rack timer has to expire before we can send. 6754 * 6755 * Other cases should usually have none of the flags set 6756 * so LRO can call into us. 6757 */ 6758 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 6759 if (slot) { 6760 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 6761 rack->r_ctl.rc_last_output_to = us_cts + slot; 6762 /* 6763 * A pacing timer (slot) is being set, in 6764 * such a case we cannot send (we are blocked by 6765 * the timer). So lets tell LRO that it should not 6766 * wake us unless there is a SACK. Note this only 6767 * will be effective if mbuf queueing is on or 6768 * compressed acks are being processed. 6769 */ 6770 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 6771 /* 6772 * But wait if we have a Rack timer running 6773 * even a SACK should not disturb us (with 6774 * the exception of r_rr_config 3). 6775 */ 6776 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 6777 (IN_RECOVERY(tp->t_flags))) { 6778 if (rack->r_rr_config != 3) 6779 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6780 else if (rack->rc_pace_dnd) { 6781 /* 6782 * When DND is on, we only let a sack 6783 * interrupt us if we are not in recovery. 6784 * 6785 * If DND is off, then we never hit here 6786 * and let all sacks wake us up. 6787 * 6788 */ 6789 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6790 } 6791 } 6792 if (rack->rc_ack_can_sendout_data) { 6793 /* 6794 * Ahh but wait, this is that special case 6795 * where the pacing timer can be disturbed 6796 * backout the changes (used for non-paced 6797 * burst limiting). 6798 */ 6799 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 6800 TF2_MBUF_QUEUE_READY); 6801 } 6802 if ((rack->use_rack_rr) && 6803 (rack->r_rr_config < 2) && 6804 ((hpts_timeout) && (hpts_timeout < slot))) { 6805 /* 6806 * Arrange for the hpts to kick back in after the 6807 * t-o if the t-o does not cause a send. 6808 */ 6809 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6810 __LINE__, &diag); 6811 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6812 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6813 } else { 6814 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 6815 __LINE__, &diag); 6816 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6817 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6818 } 6819 } else if (hpts_timeout) { 6820 /* 6821 * With respect to t_flags2(?) here, lets let any new acks wake 6822 * us up here. Since we are not pacing (no pacing timer), output 6823 * can happen so we should let it. If its a Rack timer, then any inbound 6824 * packet probably won't change the sending (we will be blocked) 6825 * but it may change the prr stats so letting it in (the set defaults 6826 * at the start of this block) are good enough. 6827 */ 6828 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6829 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 6830 __LINE__, &diag); 6831 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6832 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6833 } else { 6834 /* No timer starting */ 6835 #ifdef INVARIANTS 6836 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6837 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6838 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6839 } 6840 #endif 6841 } 6842 rack->rc_tmr_stopped = 0; 6843 if (slot) 6844 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 6845 } 6846 6847 static void 6848 rack_mark_lost(struct tcpcb *tp, 6849 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 6850 { 6851 struct rack_sendmap *nrsm; 6852 uint32_t thresh, exp; 6853 6854 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 6855 nrsm = rsm; 6856 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { 6857 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { 6858 /* Got up to all that were marked sack-passed */ 6859 break; 6860 } 6861 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 6862 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 6863 if (TSTMP_LT(exp, cts) || (exp == cts)) { 6864 /* We now consider it lost */ 6865 nrsm->r_flags |= RACK_WAS_LOST; 6866 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 6867 } else { 6868 /* Past here it won't be lost so stop */ 6869 break; 6870 } 6871 } 6872 } 6873 } 6874 6875 /* 6876 * RACK Timer, here we simply do logging and house keeping. 6877 * the normal rack_output() function will call the 6878 * appropriate thing to check if we need to do a RACK retransmit. 6879 * We return 1, saying don't proceed with rack_output only 6880 * when all timers have been stopped (destroyed PCB?). 6881 */ 6882 static int 6883 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6884 { 6885 /* 6886 * This timer simply provides an internal trigger to send out data. 6887 * The check_recovery_mode call will see if there are needed 6888 * retransmissions, if so we will enter fast-recovery. The output 6889 * call may or may not do the same thing depending on sysctl 6890 * settings. 6891 */ 6892 struct rack_sendmap *rsm; 6893 6894 counter_u64_add(rack_to_tot, 1); 6895 if (rack->r_state && (rack->r_state != tp->t_state)) 6896 rack_set_state(tp, rack); 6897 rack->rc_on_min_to = 0; 6898 rsm = rack_check_recovery_mode(tp, cts); 6899 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6900 if (rsm) { 6901 /* We need to stroke any lost that are now declared as lost */ 6902 rack_mark_lost(tp, rack, rsm, cts); 6903 rack->r_ctl.rc_resend = rsm; 6904 rack->r_timer_override = 1; 6905 if (rack->use_rack_rr) { 6906 /* 6907 * Don't accumulate extra pacing delay 6908 * we are allowing the rack timer to 6909 * over-ride pacing i.e. rrr takes precedence 6910 * if the pacing interval is longer than the rrr 6911 * time (in other words we get the min pacing 6912 * time versus rrr pacing time). 6913 */ 6914 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6915 } 6916 } 6917 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6918 if (rsm == NULL) { 6919 /* restart a timer and return 1 */ 6920 rack_start_hpts_timer(rack, tp, cts, 6921 0, 0, 0); 6922 return (1); 6923 } 6924 return (0); 6925 } 6926 6927 6928 6929 static void 6930 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6931 { 6932 6933 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 6934 /* 6935 * The trailing space changed, mbufs can grow 6936 * at the tail but they can't shrink from 6937 * it, KASSERT that. Adjust the orig_m_len to 6938 * compensate for this change. 6939 */ 6940 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 6941 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 6942 rsm->m, 6943 rsm, 6944 (intmax_t)M_TRAILINGROOM(rsm->m), 6945 rsm->orig_t_space, 6946 rsm->orig_m_len, 6947 rsm->m->m_len)); 6948 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 6949 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 6950 } 6951 if (rsm->m->m_len < rsm->orig_m_len) { 6952 /* 6953 * Mbuf shrank, trimmed off the top by an ack, our 6954 * offset changes. 6955 */ 6956 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 6957 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 6958 rsm->m, rsm->m->m_len, 6959 rsm, rsm->orig_m_len, 6960 rsm->soff)); 6961 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 6962 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6963 else 6964 rsm->soff = 0; 6965 rsm->orig_m_len = rsm->m->m_len; 6966 #ifdef INVARIANTS 6967 } else if (rsm->m->m_len > rsm->orig_m_len) { 6968 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 6969 rsm, rsm->m); 6970 #endif 6971 } 6972 } 6973 6974 static void 6975 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6976 { 6977 struct mbuf *m; 6978 uint32_t soff; 6979 6980 if (src_rsm->m && 6981 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 6982 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 6983 /* Fix up the orig_m_len and possibly the mbuf offset */ 6984 rack_adjust_orig_mlen(src_rsm); 6985 } 6986 m = src_rsm->m; 6987 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6988 while (soff >= m->m_len) { 6989 /* Move out past this mbuf */ 6990 soff -= m->m_len; 6991 m = m->m_next; 6992 KASSERT((m != NULL), 6993 ("rsm:%p nrsm:%p hit at soff:%u null m", 6994 src_rsm, rsm, soff)); 6995 if (m == NULL) { 6996 /* This should *not* happen which is why there is a kassert */ 6997 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 6998 (src_rsm->r_start - rack->rc_tp->snd_una), 6999 &src_rsm->soff); 7000 src_rsm->orig_m_len = src_rsm->m->m_len; 7001 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7002 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7003 (rsm->r_start - rack->rc_tp->snd_una), 7004 &rsm->soff); 7005 rsm->orig_m_len = rsm->m->m_len; 7006 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7007 return; 7008 } 7009 } 7010 rsm->m = m; 7011 rsm->soff = soff; 7012 rsm->orig_m_len = m->m_len; 7013 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7014 } 7015 7016 static __inline void 7017 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7018 struct rack_sendmap *rsm, uint32_t start) 7019 { 7020 int idx; 7021 7022 nrsm->r_start = start; 7023 nrsm->r_end = rsm->r_end; 7024 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7025 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; 7026 nrsm->r_flags = rsm->r_flags; 7027 nrsm->r_dupack = rsm->r_dupack; 7028 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7029 nrsm->r_rtr_bytes = 0; 7030 nrsm->r_fas = rsm->r_fas; 7031 nrsm->r_bas = rsm->r_bas; 7032 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); 7033 nrsm->r_just_ret = rsm->r_just_ret; 7034 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7035 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7036 } 7037 /* Now if we have SYN flag we keep it on the left edge */ 7038 if (nrsm->r_flags & RACK_HAS_SYN) 7039 nrsm->r_flags &= ~RACK_HAS_SYN; 7040 /* Now if we have a FIN flag we keep it on the right edge */ 7041 if (rsm->r_flags & RACK_HAS_FIN) 7042 rsm->r_flags &= ~RACK_HAS_FIN; 7043 /* Push bit must go to the right edge as well */ 7044 if (rsm->r_flags & RACK_HAD_PUSH) 7045 rsm->r_flags &= ~RACK_HAD_PUSH; 7046 /* Update the count if app limited */ 7047 if (nrsm->r_flags & RACK_APP_LIMITED) 7048 rack->r_ctl.rc_app_limited_cnt++; 7049 /* Clone over the state of the hw_tls flag */ 7050 nrsm->r_hw_tls = rsm->r_hw_tls; 7051 /* 7052 * Now we need to find nrsm's new location in the mbuf chain 7053 * we basically calculate a new offset, which is soff + 7054 * how much is left in original rsm. Then we walk out the mbuf 7055 * chain to find the righ position, it may be the same mbuf 7056 * or maybe not. 7057 */ 7058 KASSERT(((rsm->m != NULL) || 7059 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7060 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7061 if (rsm->m) 7062 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7063 } 7064 7065 static struct rack_sendmap * 7066 rack_merge_rsm(struct tcp_rack *rack, 7067 struct rack_sendmap *l_rsm, 7068 struct rack_sendmap *r_rsm) 7069 { 7070 /* 7071 * We are merging two ack'd RSM's, 7072 * the l_rsm is on the left (lower seq 7073 * values) and the r_rsm is on the right 7074 * (higher seq value). The simplest way 7075 * to merge these is to move the right 7076 * one into the left. I don't think there 7077 * is any reason we need to try to find 7078 * the oldest (or last oldest retransmitted). 7079 */ 7080 rack_log_map_chg(rack->rc_tp, rack, NULL, 7081 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7082 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); 7083 if (l_rsm->r_dupack < r_rsm->r_dupack) 7084 l_rsm->r_dupack = r_rsm->r_dupack; 7085 if (r_rsm->r_rtr_bytes) 7086 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7087 if (r_rsm->r_in_tmap) { 7088 /* This really should not happen */ 7089 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7090 r_rsm->r_in_tmap = 0; 7091 } 7092 7093 /* Now the flags */ 7094 if (r_rsm->r_flags & RACK_HAS_FIN) 7095 l_rsm->r_flags |= RACK_HAS_FIN; 7096 if (r_rsm->r_flags & RACK_TLP) 7097 l_rsm->r_flags |= RACK_TLP; 7098 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7099 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7100 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7101 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7102 /* 7103 * If both are app-limited then let the 7104 * free lower the count. If right is app 7105 * limited and left is not, transfer. 7106 */ 7107 l_rsm->r_flags |= RACK_APP_LIMITED; 7108 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7109 if (r_rsm == rack->r_ctl.rc_first_appl) 7110 rack->r_ctl.rc_first_appl = l_rsm; 7111 } 7112 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7113 /* 7114 * We keep the largest value, which is the newest 7115 * send. We do this in case a segment that is 7116 * joined together and not part of a GP estimate 7117 * later gets expanded into the GP estimate. 7118 * 7119 * We prohibit the merging of unlike kinds i.e. 7120 * all pieces that are in the GP estimate can be 7121 * merged and all pieces that are not in a GP estimate 7122 * can be merged, but not disimilar pieces. Combine 7123 * this with taking the highest here and we should 7124 * be ok unless of course the client reneges. Then 7125 * all bets are off. 7126 */ 7127 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7128 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7129 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7130 } 7131 /* 7132 * When merging two RSM's we also need to consider the ack time and keep 7133 * newest. If the ack gets merged into a measurement then that is the 7134 * one we will want to be using. 7135 */ 7136 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7137 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7138 7139 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7140 /* Transfer the split limit to the map we free */ 7141 r_rsm->r_limit_type = l_rsm->r_limit_type; 7142 l_rsm->r_limit_type = 0; 7143 } 7144 rack_free(rack, r_rsm); 7145 l_rsm->r_flags |= RACK_MERGED; 7146 return (l_rsm); 7147 } 7148 7149 /* 7150 * TLP Timer, here we simply setup what segment we want to 7151 * have the TLP expire on, the normal rack_output() will then 7152 * send it out. 7153 * 7154 * We return 1, saying don't proceed with rack_output only 7155 * when all timers have been stopped (destroyed PCB?). 7156 */ 7157 static int 7158 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7159 { 7160 /* 7161 * Tail Loss Probe. 7162 */ 7163 struct rack_sendmap *rsm = NULL; 7164 int insret __diagused; 7165 struct socket *so = tptosocket(tp); 7166 uint32_t amm; 7167 uint32_t out, avail; 7168 int collapsed_win = 0; 7169 7170 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7171 /* Its not time yet */ 7172 return (0); 7173 } 7174 if (ctf_progress_timeout_check(tp, true)) { 7175 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7176 return (-ETIMEDOUT); /* tcp_drop() */ 7177 } 7178 /* 7179 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7180 * need to figure out how to force a full MSS segment out. 7181 */ 7182 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7183 rack->r_ctl.retran_during_recovery = 0; 7184 rack->r_might_revert = 0; 7185 rack->r_ctl.dsack_byte_cnt = 0; 7186 counter_u64_add(rack_tlp_tot, 1); 7187 if (rack->r_state && (rack->r_state != tp->t_state)) 7188 rack_set_state(tp, rack); 7189 avail = sbavail(&so->so_snd); 7190 out = tp->snd_max - tp->snd_una; 7191 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7192 /* special case, we need a retransmission */ 7193 collapsed_win = 1; 7194 goto need_retran; 7195 } 7196 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7197 rack->r_ctl.dsack_persist--; 7198 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7199 rack->r_ctl.num_dsack = 0; 7200 } 7201 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7202 } 7203 if ((tp->t_flags & TF_GPUTINPROG) && 7204 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7205 /* 7206 * If this is the second in a row 7207 * TLP and we are doing a measurement 7208 * its time to abandon the measurement. 7209 * Something is likely broken on 7210 * the clients network and measuring a 7211 * broken network does us no good. 7212 */ 7213 tp->t_flags &= ~TF_GPUTINPROG; 7214 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7215 rack->r_ctl.rc_gp_srtt /*flex1*/, 7216 tp->gput_seq, 7217 0, 0, 18, __LINE__, NULL, 0); 7218 } 7219 /* 7220 * Check our send oldest always settings, and if 7221 * there is an oldest to send jump to the need_retran. 7222 */ 7223 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7224 goto need_retran; 7225 7226 if (avail > out) { 7227 /* New data is available */ 7228 amm = avail - out; 7229 if (amm > ctf_fixed_maxseg(tp)) { 7230 amm = ctf_fixed_maxseg(tp); 7231 if ((amm + out) > tp->snd_wnd) { 7232 /* We are rwnd limited */ 7233 goto need_retran; 7234 } 7235 } else if (amm < ctf_fixed_maxseg(tp)) { 7236 /* not enough to fill a MTU */ 7237 goto need_retran; 7238 } 7239 if (IN_FASTRECOVERY(tp->t_flags)) { 7240 /* Unlikely */ 7241 if (rack->rack_no_prr == 0) { 7242 if (out + amm <= tp->snd_wnd) { 7243 rack->r_ctl.rc_prr_sndcnt = amm; 7244 rack->r_ctl.rc_tlp_new_data = amm; 7245 rack_log_to_prr(rack, 4, 0, __LINE__); 7246 } 7247 } else 7248 goto need_retran; 7249 } else { 7250 /* Set the send-new override */ 7251 if (out + amm <= tp->snd_wnd) 7252 rack->r_ctl.rc_tlp_new_data = amm; 7253 else 7254 goto need_retran; 7255 } 7256 rack->r_ctl.rc_tlpsend = NULL; 7257 counter_u64_add(rack_tlp_newdata, 1); 7258 goto send; 7259 } 7260 need_retran: 7261 /* 7262 * Ok we need to arrange the last un-acked segment to be re-sent, or 7263 * optionally the first un-acked segment. 7264 */ 7265 if (collapsed_win == 0) { 7266 if (rack_always_send_oldest) 7267 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7268 else { 7269 rsm = tqhash_max(rack->r_ctl.tqh); 7270 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7271 rsm = rack_find_high_nonack(rack, rsm); 7272 } 7273 } 7274 if (rsm == NULL) { 7275 #ifdef TCP_BLACKBOX 7276 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7277 #endif 7278 goto out; 7279 } 7280 } else { 7281 /* 7282 * We had a collapsed window, lets find 7283 * the point before the collapse. 7284 */ 7285 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7286 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7287 else { 7288 rsm = tqhash_min(rack->r_ctl.tqh); 7289 } 7290 if (rsm == NULL) { 7291 /* Huh */ 7292 goto out; 7293 } 7294 } 7295 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7296 /* 7297 * We need to split this the last segment in two. 7298 */ 7299 struct rack_sendmap *nrsm; 7300 7301 nrsm = rack_alloc_full_limit(rack); 7302 if (nrsm == NULL) { 7303 /* 7304 * No memory to split, we will just exit and punt 7305 * off to the RXT timer. 7306 */ 7307 goto out; 7308 } 7309 rack_clone_rsm(rack, nrsm, rsm, 7310 (rsm->r_end - ctf_fixed_maxseg(tp))); 7311 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7312 #ifndef INVARIANTS 7313 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7314 #else 7315 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7316 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7317 nrsm, insret, rack, rsm); 7318 } 7319 #endif 7320 if (rsm->r_in_tmap) { 7321 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7322 nrsm->r_in_tmap = 1; 7323 } 7324 rsm = nrsm; 7325 } 7326 rack->r_ctl.rc_tlpsend = rsm; 7327 send: 7328 /* Make sure output path knows we are doing a TLP */ 7329 *doing_tlp = 1; 7330 rack->r_timer_override = 1; 7331 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7332 return (0); 7333 out: 7334 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7335 return (0); 7336 } 7337 7338 /* 7339 * Delayed ack Timer, here we simply need to setup the 7340 * ACK_NOW flag and remove the DELACK flag. From there 7341 * the output routine will send the ack out. 7342 * 7343 * We only return 1, saying don't proceed, if all timers 7344 * are stopped (destroyed PCB?). 7345 */ 7346 static int 7347 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7348 { 7349 7350 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7351 tp->t_flags &= ~TF_DELACK; 7352 tp->t_flags |= TF_ACKNOW; 7353 KMOD_TCPSTAT_INC(tcps_delack); 7354 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7355 return (0); 7356 } 7357 7358 static inline int 7359 rack_send_ack_challange(struct tcp_rack *rack) 7360 { 7361 struct tcptemp *t_template; 7362 7363 t_template = tcpip_maketemplate(rack->rc_inp); 7364 if (t_template) { 7365 if (rack->forced_ack == 0) { 7366 rack->forced_ack = 1; 7367 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7368 } else { 7369 rack->probe_not_answered = 1; 7370 } 7371 tcp_respond(rack->rc_tp, t_template->tt_ipgen, 7372 &t_template->tt_t, (struct mbuf *)NULL, 7373 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); 7374 free(t_template, M_TEMP); 7375 /* This does send an ack so kill any D-ack timer */ 7376 if (rack->rc_tp->t_flags & TF_DELACK) 7377 rack->rc_tp->t_flags &= ~TF_DELACK; 7378 return(1); 7379 } else 7380 return (0); 7381 7382 } 7383 7384 /* 7385 * Persists timer, here we simply send the 7386 * same thing as a keepalive will. 7387 * the one byte send. 7388 * 7389 * We only return 1, saying don't proceed, if all timers 7390 * are stopped (destroyed PCB?). 7391 */ 7392 static int 7393 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7394 { 7395 int32_t retval = 1; 7396 7397 if (rack->rc_in_persist == 0) 7398 return (0); 7399 if (ctf_progress_timeout_check(tp, false)) { 7400 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7401 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7402 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7403 return (-ETIMEDOUT); /* tcp_drop() */ 7404 } 7405 /* 7406 * Persistence timer into zero window. Force a byte to be output, if 7407 * possible. 7408 */ 7409 KMOD_TCPSTAT_INC(tcps_persisttimeo); 7410 /* 7411 * Hack: if the peer is dead/unreachable, we do not time out if the 7412 * window is closed. After a full backoff, drop the connection if 7413 * the idle time (no responses to probes) reaches the maximum 7414 * backoff that we would use if retransmitting. 7415 */ 7416 if (tp->t_rxtshift >= V_tcp_retries && 7417 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 7418 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 7419 KMOD_TCPSTAT_INC(tcps_persistdrop); 7420 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7421 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7422 retval = -ETIMEDOUT; /* tcp_drop() */ 7423 goto out; 7424 } 7425 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 7426 tp->snd_una == tp->snd_max) 7427 rack_exit_persist(tp, rack, cts); 7428 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 7429 /* 7430 * If the user has closed the socket then drop a persisting 7431 * connection after a much reduced timeout. 7432 */ 7433 if (tp->t_state > TCPS_CLOSE_WAIT && 7434 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 7435 KMOD_TCPSTAT_INC(tcps_persistdrop); 7436 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7437 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7438 retval = -ETIMEDOUT; /* tcp_drop() */ 7439 goto out; 7440 } 7441 if (rack_send_ack_challange(rack)) { 7442 /* only set it if we were answered */ 7443 if (rack->probe_not_answered) { 7444 counter_u64_add(rack_persists_loss, 1); 7445 rack->r_ctl.persist_lost_ends++; 7446 } 7447 counter_u64_add(rack_persists_sends, 1); 7448 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 7449 } 7450 if (tp->t_rxtshift < V_tcp_retries) 7451 tp->t_rxtshift++; 7452 out: 7453 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 7454 rack_start_hpts_timer(rack, tp, cts, 7455 0, 0, 0); 7456 return (retval); 7457 } 7458 7459 /* 7460 * If a keepalive goes off, we had no other timers 7461 * happening. We always return 1 here since this 7462 * routine either drops the connection or sends 7463 * out a segment with respond. 7464 */ 7465 static int 7466 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7467 { 7468 struct inpcb *inp = tptoinpcb(tp); 7469 7470 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 7471 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 7472 /* 7473 * Keep-alive timer went off; send something or drop connection if 7474 * idle for too long. 7475 */ 7476 KMOD_TCPSTAT_INC(tcps_keeptimeo); 7477 if (tp->t_state < TCPS_ESTABLISHED) 7478 goto dropit; 7479 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7480 tp->t_state <= TCPS_CLOSING) { 7481 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 7482 goto dropit; 7483 /* 7484 * Send a packet designed to force a response if the peer is 7485 * up and reachable: either an ACK if the connection is 7486 * still alive, or an RST if the peer has closed the 7487 * connection due to timeout or reboot. Using sequence 7488 * number tp->snd_una-1 causes the transmitted zero-length 7489 * segment to lie outside the receive window; by the 7490 * protocol spec, this requires the correspondent TCP to 7491 * respond. 7492 */ 7493 KMOD_TCPSTAT_INC(tcps_keepprobe); 7494 rack_send_ack_challange(rack); 7495 } 7496 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7497 return (1); 7498 dropit: 7499 KMOD_TCPSTAT_INC(tcps_keepdrops); 7500 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7501 return (-ETIMEDOUT); /* tcp_drop() */ 7502 } 7503 7504 /* 7505 * Retransmit helper function, clear up all the ack 7506 * flags and take care of important book keeping. 7507 */ 7508 static void 7509 rack_remxt_tmr(struct tcpcb *tp) 7510 { 7511 /* 7512 * The retransmit timer went off, all sack'd blocks must be 7513 * un-acked. 7514 */ 7515 struct rack_sendmap *rsm, *trsm = NULL; 7516 struct tcp_rack *rack; 7517 7518 rack = (struct tcp_rack *)tp->t_fb_ptr; 7519 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 7520 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 7521 rack->r_timer_override = 1; 7522 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 7523 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 7524 rack->r_late = 0; 7525 rack->r_early = 0; 7526 rack->r_ctl.rc_agg_delayed = 0; 7527 rack->r_ctl.rc_agg_early = 0; 7528 if (rack->r_state && (rack->r_state != tp->t_state)) 7529 rack_set_state(tp, rack); 7530 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { 7531 /* 7532 * We do not clear the scoreboard until we have had 7533 * more than rack_rxt_scoreboard_clear_thresh time-outs. 7534 */ 7535 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7536 if (rack->r_ctl.rc_resend != NULL) 7537 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7538 7539 return; 7540 } 7541 /* 7542 * Ideally we would like to be able to 7543 * mark SACK-PASS on anything not acked here. 7544 * 7545 * However, if we do that we would burst out 7546 * all that data 1ms apart. This would be unwise, 7547 * so for now we will just let the normal rxt timer 7548 * and tlp timer take care of it. 7549 * 7550 * Also we really need to stick them back in sequence 7551 * order. This way we send in the proper order and any 7552 * sacks that come floating in will "re-ack" the data. 7553 * To do this we zap the tmap with an INIT and then 7554 * walk through and place every rsm in the tail queue 7555 * hash table back in its seq ordered place. 7556 */ 7557 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7558 7559 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 7560 rsm->r_dupack = 0; 7561 if (rack_verbose_logging) 7562 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7563 /* We must re-add it back to the tlist */ 7564 if (trsm == NULL) { 7565 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7566 } else { 7567 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 7568 } 7569 rsm->r_in_tmap = 1; 7570 trsm = rsm; 7571 if (rsm->r_flags & RACK_ACKED) 7572 rsm->r_flags |= RACK_WAS_ACKED; 7573 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST); 7574 rsm->r_flags |= RACK_MUST_RXT; 7575 } 7576 /* zero the lost since it's all gone */ 7577 rack->r_ctl.rc_considered_lost = 0; 7578 /* Clear the count (we just un-acked them) */ 7579 rack->r_ctl.rc_sacked = 0; 7580 rack->r_ctl.rc_sacklast = NULL; 7581 /* Clear the tlp rtx mark */ 7582 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7583 if (rack->r_ctl.rc_resend != NULL) 7584 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7585 rack->r_ctl.rc_prr_sndcnt = 0; 7586 rack_log_to_prr(rack, 6, 0, __LINE__); 7587 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7588 if (rack->r_ctl.rc_resend != NULL) 7589 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7590 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && 7591 ((tp->t_flags & TF_SENTFIN) == 0)) { 7592 /* 7593 * For non-sack customers new data 7594 * needs to go out as retransmits until 7595 * we retransmit up to snd_max. 7596 */ 7597 rack->r_must_retran = 1; 7598 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 7599 rack->r_ctl.rc_sacked); 7600 } 7601 } 7602 7603 static void 7604 rack_convert_rtts(struct tcpcb *tp) 7605 { 7606 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 7607 tp->t_rxtcur = RACK_REXMTVAL(tp); 7608 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7609 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 7610 } 7611 if (tp->t_rxtcur > rack_rto_max) { 7612 tp->t_rxtcur = rack_rto_max; 7613 } 7614 } 7615 7616 static void 7617 rack_cc_conn_init(struct tcpcb *tp) 7618 { 7619 struct tcp_rack *rack; 7620 uint32_t srtt; 7621 7622 rack = (struct tcp_rack *)tp->t_fb_ptr; 7623 srtt = tp->t_srtt; 7624 cc_conn_init(tp); 7625 /* 7626 * Now convert to rack's internal format, 7627 * if required. 7628 */ 7629 if ((srtt == 0) && (tp->t_srtt != 0)) 7630 rack_convert_rtts(tp); 7631 /* 7632 * We want a chance to stay in slowstart as 7633 * we create a connection. TCP spec says that 7634 * initially ssthresh is infinite. For our 7635 * purposes that is the snd_wnd. 7636 */ 7637 if (tp->snd_ssthresh < tp->snd_wnd) { 7638 tp->snd_ssthresh = tp->snd_wnd; 7639 } 7640 /* 7641 * We also want to assure a IW worth of 7642 * data can get inflight. 7643 */ 7644 if (rc_init_window(rack) < tp->snd_cwnd) 7645 tp->snd_cwnd = rc_init_window(rack); 7646 } 7647 7648 /* 7649 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 7650 * we will setup to retransmit the lowest seq number outstanding. 7651 */ 7652 static int 7653 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7654 { 7655 struct inpcb *inp = tptoinpcb(tp); 7656 int32_t rexmt; 7657 int32_t retval = 0; 7658 bool isipv6; 7659 7660 if ((tp->t_flags & TF_GPUTINPROG) && 7661 (tp->t_rxtshift)) { 7662 /* 7663 * We have had a second timeout 7664 * measurements on successive rxt's are not profitable. 7665 * It is unlikely to be of any use (the network is 7666 * broken or the client went away). 7667 */ 7668 tp->t_flags &= ~TF_GPUTINPROG; 7669 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7670 rack->r_ctl.rc_gp_srtt /*flex1*/, 7671 tp->gput_seq, 7672 0, 0, 18, __LINE__, NULL, 0); 7673 } 7674 if (ctf_progress_timeout_check(tp, false)) { 7675 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7676 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7677 return (-ETIMEDOUT); /* tcp_drop() */ 7678 } 7679 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 7680 rack->r_ctl.retran_during_recovery = 0; 7681 rack->rc_ack_required = 1; 7682 rack->r_ctl.dsack_byte_cnt = 0; 7683 if (IN_RECOVERY(tp->t_flags) && 7684 (rack->rto_from_rec == 0)) { 7685 /* 7686 * Mark that we had a rto while in recovery 7687 * and save the ssthresh so if we go back 7688 * into recovery we will have a chance 7689 * to slowstart back to the level. 7690 */ 7691 rack->rto_from_rec = 1; 7692 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; 7693 } 7694 if (IN_FASTRECOVERY(tp->t_flags)) 7695 tp->t_flags |= TF_WASFRECOVERY; 7696 else 7697 tp->t_flags &= ~TF_WASFRECOVERY; 7698 if (IN_CONGRECOVERY(tp->t_flags)) 7699 tp->t_flags |= TF_WASCRECOVERY; 7700 else 7701 tp->t_flags &= ~TF_WASCRECOVERY; 7702 if (TCPS_HAVEESTABLISHED(tp->t_state) && 7703 (tp->snd_una == tp->snd_max)) { 7704 /* Nothing outstanding .. nothing to do */ 7705 return (0); 7706 } 7707 if (rack->r_ctl.dsack_persist) { 7708 rack->r_ctl.dsack_persist--; 7709 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7710 rack->r_ctl.num_dsack = 0; 7711 } 7712 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7713 } 7714 /* 7715 * Rack can only run one timer at a time, so we cannot 7716 * run a KEEPINIT (gating SYN sending) and a retransmit 7717 * timer for the SYN. So if we are in a front state and 7718 * have a KEEPINIT timer we need to check the first transmit 7719 * against now to see if we have exceeded the KEEPINIT time 7720 * (if one is set). 7721 */ 7722 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 7723 (TP_KEEPINIT(tp) != 0)) { 7724 struct rack_sendmap *rsm; 7725 7726 rsm = tqhash_min(rack->r_ctl.tqh); 7727 if (rsm) { 7728 /* Ok we have something outstanding to test keepinit with */ 7729 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 7730 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 7731 /* We have exceeded the KEEPINIT time */ 7732 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7733 goto drop_it; 7734 } 7735 } 7736 } 7737 /* 7738 * Retransmission timer went off. Message has not been acked within 7739 * retransmit interval. Back off to a longer retransmit interval 7740 * and retransmit one segment. 7741 */ 7742 if ((rack->r_ctl.rc_resend == NULL) || 7743 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 7744 /* 7745 * If the rwnd collapsed on 7746 * the one we are retransmitting 7747 * it does not count against the 7748 * rxt count. 7749 */ 7750 tp->t_rxtshift++; 7751 } 7752 rack_remxt_tmr(tp); 7753 if (tp->t_rxtshift > V_tcp_retries) { 7754 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7755 drop_it: 7756 tp->t_rxtshift = V_tcp_retries; 7757 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 7758 /* XXXGL: previously t_softerror was casted to uint16_t */ 7759 MPASS(tp->t_softerror >= 0); 7760 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 7761 goto out; /* tcp_drop() */ 7762 } 7763 if (tp->t_state == TCPS_SYN_SENT) { 7764 /* 7765 * If the SYN was retransmitted, indicate CWND to be limited 7766 * to 1 segment in cc_conn_init(). 7767 */ 7768 tp->snd_cwnd = 1; 7769 } else if (tp->t_rxtshift == 1) { 7770 /* 7771 * first retransmit; record ssthresh and cwnd so they can be 7772 * recovered if this turns out to be a "bad" retransmit. A 7773 * retransmit is considered "bad" if an ACK for this segment 7774 * is received within RTT/2 interval; the assumption here is 7775 * that the ACK was already in flight. See "On Estimating 7776 * End-to-End Network Path Properties" by Allman and Paxson 7777 * for more details. 7778 */ 7779 tp->snd_cwnd_prev = tp->snd_cwnd; 7780 tp->snd_ssthresh_prev = tp->snd_ssthresh; 7781 tp->snd_recover_prev = tp->snd_recover; 7782 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 7783 tp->t_flags |= TF_PREVVALID; 7784 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 7785 tp->t_flags &= ~TF_PREVVALID; 7786 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 7787 if ((tp->t_state == TCPS_SYN_SENT) || 7788 (tp->t_state == TCPS_SYN_RECEIVED)) 7789 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 7790 else 7791 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 7792 7793 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 7794 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 7795 /* 7796 * We enter the path for PLMTUD if connection is established or, if 7797 * connection is FIN_WAIT_1 status, reason for the last is that if 7798 * amount of data we send is very small, we could send it in couple 7799 * of packets and process straight to FIN. In that case we won't 7800 * catch ESTABLISHED state. 7801 */ 7802 #ifdef INET6 7803 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 7804 #else 7805 isipv6 = false; 7806 #endif 7807 if (((V_tcp_pmtud_blackhole_detect == 1) || 7808 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 7809 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 7810 ((tp->t_state == TCPS_ESTABLISHED) || 7811 (tp->t_state == TCPS_FIN_WAIT_1))) { 7812 /* 7813 * Idea here is that at each stage of mtu probe (usually, 7814 * 1448 -> 1188 -> 524) should be given 2 chances to recover 7815 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 7816 * should take care of that. 7817 */ 7818 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 7819 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 7820 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 7821 tp->t_rxtshift % 2 == 0)) { 7822 /* 7823 * Enter Path MTU Black-hole Detection mechanism: - 7824 * Disable Path MTU Discovery (IP "DF" bit). - 7825 * Reduce MTU to lower value than what we negotiated 7826 * with peer. 7827 */ 7828 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7829 /* Record that we may have found a black hole. */ 7830 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7831 /* Keep track of previous MSS. */ 7832 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7833 } 7834 7835 /* 7836 * Reduce the MSS to blackhole value or to the 7837 * default in an attempt to retransmit. 7838 */ 7839 #ifdef INET6 7840 if (isipv6 && 7841 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7842 /* Use the sysctl tuneable blackhole MSS. */ 7843 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7844 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7845 } else if (isipv6) { 7846 /* Use the default MSS. */ 7847 tp->t_maxseg = V_tcp_v6mssdflt; 7848 /* 7849 * Disable Path MTU Discovery when we switch 7850 * to minmss. 7851 */ 7852 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7853 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7854 } 7855 #endif 7856 #if defined(INET6) && defined(INET) 7857 else 7858 #endif 7859 #ifdef INET 7860 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7861 /* Use the sysctl tuneable blackhole MSS. */ 7862 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7863 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7864 } else { 7865 /* Use the default MSS. */ 7866 tp->t_maxseg = V_tcp_mssdflt; 7867 /* 7868 * Disable Path MTU Discovery when we switch 7869 * to minmss. 7870 */ 7871 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7872 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7873 } 7874 #endif 7875 } else { 7876 /* 7877 * If further retransmissions are still unsuccessful 7878 * with a lowered MTU, maybe this isn't a blackhole 7879 * and we restore the previous MSS and blackhole 7880 * detection flags. The limit '6' is determined by 7881 * giving each probe stage (1448, 1188, 524) 2 7882 * chances to recover. 7883 */ 7884 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7885 (tp->t_rxtshift >= 6)) { 7886 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7887 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7888 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7889 if (tp->t_maxseg < V_tcp_mssdflt) { 7890 /* 7891 * The MSS is so small we should not 7892 * process incoming SACK's since we are 7893 * subject to attack in such a case. 7894 */ 7895 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 7896 } else { 7897 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 7898 } 7899 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7900 } 7901 } 7902 } 7903 /* 7904 * Disable RFC1323 and SACK if we haven't got any response to 7905 * our third SYN to work-around some broken terminal servers 7906 * (most of which have hopefully been retired) that have bad VJ 7907 * header compression code which trashes TCP segments containing 7908 * unknown-to-them TCP options. 7909 */ 7910 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7911 (tp->t_rxtshift == 3)) 7912 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7913 /* 7914 * If we backed off this far, our srtt estimate is probably bogus. 7915 * Clobber it so we'll take the next rtt measurement as our srtt; 7916 * move the current srtt into rttvar to keep the current retransmit 7917 * times until then. 7918 */ 7919 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7920 #ifdef INET6 7921 if ((inp->inp_vflag & INP_IPV6) != 0) 7922 in6_losing(inp); 7923 else 7924 #endif 7925 in_losing(inp); 7926 tp->t_rttvar += tp->t_srtt; 7927 tp->t_srtt = 0; 7928 } 7929 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7930 tp->snd_recover = tp->snd_max; 7931 tp->t_flags |= TF_ACKNOW; 7932 tp->t_rtttime = 0; 7933 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 7934 out: 7935 return (retval); 7936 } 7937 7938 static int 7939 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7940 { 7941 int32_t ret = 0; 7942 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7943 7944 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7945 (tp->t_flags & TF_GPUTINPROG)) { 7946 /* 7947 * We have a goodput in progress 7948 * and we have entered a late state. 7949 * Do we have enough data in the sb 7950 * to handle the GPUT request? 7951 */ 7952 uint32_t bytes; 7953 7954 bytes = tp->gput_ack - tp->gput_seq; 7955 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7956 bytes += tp->gput_seq - tp->snd_una; 7957 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 7958 /* 7959 * There are not enough bytes in the socket 7960 * buffer that have been sent to cover this 7961 * measurement. Cancel it. 7962 */ 7963 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7964 rack->r_ctl.rc_gp_srtt /*flex1*/, 7965 tp->gput_seq, 7966 0, 0, 18, __LINE__, NULL, 0); 7967 tp->t_flags &= ~TF_GPUTINPROG; 7968 } 7969 } 7970 if (timers == 0) { 7971 return (0); 7972 } 7973 if (tp->t_state == TCPS_LISTEN) { 7974 /* no timers on listen sockets */ 7975 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7976 return (0); 7977 return (1); 7978 } 7979 if ((timers & PACE_TMR_RACK) && 7980 rack->rc_on_min_to) { 7981 /* 7982 * For the rack timer when we 7983 * are on a min-timeout (which means rrr_conf = 3) 7984 * we don't want to check the timer. It may 7985 * be going off for a pace and thats ok we 7986 * want to send the retransmit (if its ready). 7987 * 7988 * If its on a normal rack timer (non-min) then 7989 * we will check if its expired. 7990 */ 7991 goto skip_time_check; 7992 } 7993 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7994 uint32_t left; 7995 7996 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 7997 ret = -1; 7998 rack_log_to_processing(rack, cts, ret, 0); 7999 return (0); 8000 } 8001 if (hpts_calling == 0) { 8002 /* 8003 * A user send or queued mbuf (sack) has called us? We 8004 * return 0 and let the pacing guards 8005 * deal with it if they should or 8006 * should not cause a send. 8007 */ 8008 ret = -2; 8009 rack_log_to_processing(rack, cts, ret, 0); 8010 return (0); 8011 } 8012 /* 8013 * Ok our timer went off early and we are not paced false 8014 * alarm, go back to sleep. We make sure we don't have 8015 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8016 * flag in place. 8017 */ 8018 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8019 ret = -3; 8020 left = rack->r_ctl.rc_timer_exp - cts; 8021 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8022 rack_log_to_processing(rack, cts, ret, left); 8023 return (1); 8024 } 8025 skip_time_check: 8026 rack->rc_tmr_stopped = 0; 8027 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8028 if (timers & PACE_TMR_DELACK) { 8029 ret = rack_timeout_delack(tp, rack, cts); 8030 } else if (timers & PACE_TMR_RACK) { 8031 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8032 rack->r_fast_output = 0; 8033 ret = rack_timeout_rack(tp, rack, cts); 8034 } else if (timers & PACE_TMR_TLP) { 8035 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8036 rack->r_fast_output = 0; 8037 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8038 } else if (timers & PACE_TMR_RXT) { 8039 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8040 rack->r_fast_output = 0; 8041 ret = rack_timeout_rxt(tp, rack, cts); 8042 } else if (timers & PACE_TMR_PERSIT) { 8043 ret = rack_timeout_persist(tp, rack, cts); 8044 } else if (timers & PACE_TMR_KEEP) { 8045 ret = rack_timeout_keepalive(tp, rack, cts); 8046 } 8047 rack_log_to_processing(rack, cts, ret, timers); 8048 return (ret); 8049 } 8050 8051 static void 8052 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8053 { 8054 struct timeval tv; 8055 uint32_t us_cts, flags_on_entry; 8056 uint8_t hpts_removed = 0; 8057 8058 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8059 us_cts = tcp_get_usecs(&tv); 8060 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8061 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8062 ((tp->snd_max - tp->snd_una) == 0))) { 8063 tcp_hpts_remove(rack->rc_tp); 8064 hpts_removed = 1; 8065 /* If we were not delayed cancel out the flag. */ 8066 if ((tp->snd_max - tp->snd_una) == 0) 8067 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8068 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8069 } 8070 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8071 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8072 if (tcp_in_hpts(rack->rc_tp) && 8073 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8074 /* 8075 * Canceling timer's when we have no output being 8076 * paced. We also must remove ourselves from the 8077 * hpts. 8078 */ 8079 tcp_hpts_remove(rack->rc_tp); 8080 hpts_removed = 1; 8081 } 8082 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8083 } 8084 if (hpts_removed == 0) 8085 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8086 } 8087 8088 static int 8089 rack_stopall(struct tcpcb *tp) 8090 { 8091 struct tcp_rack *rack; 8092 8093 rack = (struct tcp_rack *)tp->t_fb_ptr; 8094 rack->t_timers_stopped = 1; 8095 8096 tcp_hpts_remove(tp); 8097 8098 return (0); 8099 } 8100 8101 static void 8102 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8103 { 8104 /* 8105 * Assure no timers are running. 8106 */ 8107 if (tcp_timer_active(tp, TT_PERSIST)) { 8108 /* We enter in persists, set the flag appropriately */ 8109 rack->rc_in_persist = 1; 8110 } 8111 if (tcp_in_hpts(rack->rc_tp)) { 8112 tcp_hpts_remove(rack->rc_tp); 8113 } 8114 } 8115 8116 static void 8117 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8118 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz) 8119 { 8120 int32_t idx; 8121 8122 rsm->r_rtr_cnt++; 8123 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8124 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8125 rsm->r_flags |= RACK_OVERMAX; 8126 } 8127 rsm->r_act_rxt_cnt++; 8128 /* Peg the count/index */ 8129 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8130 rsm->r_dupack = 0; 8131 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8132 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8133 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8134 } 8135 if (rsm->r_flags & RACK_WAS_LOST) { 8136 /* 8137 * We retransmitted it putting it back in flight 8138 * remove the lost desgination and reduce the 8139 * bytes considered lost. 8140 */ 8141 rsm->r_flags &= ~RACK_WAS_LOST; 8142 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 8143 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 8144 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 8145 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 8146 else 8147 rack->r_ctl.rc_considered_lost = 0; 8148 } 8149 idx = rsm->r_rtr_cnt - 1; 8150 rsm->r_tim_lastsent[idx] = ts; 8151 /* 8152 * Here we don't add in the len of send, since its already 8153 * in snduna <->snd_max. 8154 */ 8155 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8156 rack->r_ctl.rc_sacked); 8157 if (rsm->r_flags & RACK_ACKED) { 8158 /* Problably MTU discovery messing with us */ 8159 rsm->r_flags &= ~RACK_ACKED; 8160 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8161 } 8162 if (rsm->r_in_tmap) { 8163 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8164 rsm->r_in_tmap = 0; 8165 } 8166 /* Lets make sure it really is in or not the GP window */ 8167 rack_mark_in_gp_win(tp, rsm); 8168 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8169 rsm->r_in_tmap = 1; 8170 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8171 /* Take off the must retransmit flag, if its on */ 8172 if (rsm->r_flags & RACK_MUST_RXT) { 8173 if (rack->r_must_retran) 8174 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8175 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8176 /* 8177 * We have retransmitted all we need. Clear 8178 * any must retransmit flags. 8179 */ 8180 rack->r_must_retran = 0; 8181 rack->r_ctl.rc_out_at_rto = 0; 8182 } 8183 rsm->r_flags &= ~RACK_MUST_RXT; 8184 } 8185 /* Remove any collapsed flag */ 8186 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8187 if (rsm->r_flags & RACK_SACK_PASSED) { 8188 /* We have retransmitted due to the SACK pass */ 8189 rsm->r_flags &= ~RACK_SACK_PASSED; 8190 rsm->r_flags |= RACK_WAS_SACKPASS; 8191 } 8192 } 8193 8194 static uint32_t 8195 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8196 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz) 8197 { 8198 /* 8199 * We (re-)transmitted starting at rsm->r_start for some length 8200 * (possibly less than r_end. 8201 */ 8202 struct rack_sendmap *nrsm; 8203 int insret __diagused; 8204 uint32_t c_end; 8205 int32_t len; 8206 8207 len = *lenp; 8208 c_end = rsm->r_start + len; 8209 if (SEQ_GEQ(c_end, rsm->r_end)) { 8210 /* 8211 * We retransmitted the whole piece or more than the whole 8212 * slopping into the next rsm. 8213 */ 8214 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8215 if (c_end == rsm->r_end) { 8216 *lenp = 0; 8217 return (0); 8218 } else { 8219 int32_t act_len; 8220 8221 /* Hangs over the end return whats left */ 8222 act_len = rsm->r_end - rsm->r_start; 8223 *lenp = (len - act_len); 8224 return (rsm->r_end); 8225 } 8226 /* We don't get out of this block. */ 8227 } 8228 /* 8229 * Here we retransmitted less than the whole thing which means we 8230 * have to split this into what was transmitted and what was not. 8231 */ 8232 nrsm = rack_alloc_full_limit(rack); 8233 if (nrsm == NULL) { 8234 /* 8235 * We can't get memory, so lets not proceed. 8236 */ 8237 *lenp = 0; 8238 return (0); 8239 } 8240 /* 8241 * So here we are going to take the original rsm and make it what we 8242 * retransmitted. nrsm will be the tail portion we did not 8243 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8244 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8245 * 1, 6 and the new piece will be 6, 11. 8246 */ 8247 rack_clone_rsm(rack, nrsm, rsm, c_end); 8248 nrsm->r_dupack = 0; 8249 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8250 #ifndef INVARIANTS 8251 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8252 #else 8253 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8254 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8255 nrsm, insret, rack, rsm); 8256 } 8257 #endif 8258 if (rsm->r_in_tmap) { 8259 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8260 nrsm->r_in_tmap = 1; 8261 } 8262 rsm->r_flags &= (~RACK_HAS_FIN); 8263 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8264 /* Log a split of rsm into rsm and nrsm */ 8265 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8266 *lenp = 0; 8267 return (0); 8268 } 8269 8270 static void 8271 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8272 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8273 struct rack_sendmap *hintrsm, uint32_t add_flag, struct mbuf *s_mb, 8274 uint32_t s_moff, int hw_tls, int segsiz) 8275 { 8276 struct tcp_rack *rack; 8277 struct rack_sendmap *rsm, *nrsm; 8278 int insret __diagused; 8279 8280 register uint32_t snd_max, snd_una; 8281 8282 /* 8283 * Add to the RACK log of packets in flight or retransmitted. If 8284 * there is a TS option we will use the TS echoed, if not we will 8285 * grab a TS. 8286 * 8287 * Retransmissions will increment the count and move the ts to its 8288 * proper place. Note that if options do not include TS's then we 8289 * won't be able to effectively use the ACK for an RTT on a retran. 8290 * 8291 * Notes about r_start and r_end. Lets consider a send starting at 8292 * sequence 1 for 10 bytes. In such an example the r_start would be 8293 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8294 * This means that r_end is actually the first sequence for the next 8295 * slot (11). 8296 * 8297 */ 8298 /* 8299 * If err is set what do we do XXXrrs? should we not add the thing? 8300 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8301 * i.e. proceed with add ** do this for now. 8302 */ 8303 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8304 if (err) 8305 /* 8306 * We don't log errors -- we could but snd_max does not 8307 * advance in this case either. 8308 */ 8309 return; 8310 8311 if (th_flags & TH_RST) { 8312 /* 8313 * We don't log resets and we return immediately from 8314 * sending 8315 */ 8316 return; 8317 } 8318 rack = (struct tcp_rack *)tp->t_fb_ptr; 8319 snd_una = tp->snd_una; 8320 snd_max = tp->snd_max; 8321 if (th_flags & (TH_SYN | TH_FIN)) { 8322 /* 8323 * The call to rack_log_output is made before bumping 8324 * snd_max. This means we can record one extra byte on a SYN 8325 * or FIN if seq_out is adding more on and a FIN is present 8326 * (and we are not resending). 8327 */ 8328 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 8329 len++; 8330 if (th_flags & TH_FIN) 8331 len++; 8332 } 8333 if (SEQ_LEQ((seq_out + len), snd_una)) { 8334 /* Are sending an old segment to induce an ack (keep-alive)? */ 8335 return; 8336 } 8337 if (SEQ_LT(seq_out, snd_una)) { 8338 /* huh? should we panic? */ 8339 uint32_t end; 8340 8341 end = seq_out + len; 8342 seq_out = snd_una; 8343 if (SEQ_GEQ(end, seq_out)) 8344 len = end - seq_out; 8345 else 8346 len = 0; 8347 } 8348 if (len == 0) { 8349 /* We don't log zero window probes */ 8350 return; 8351 } 8352 if (IN_FASTRECOVERY(tp->t_flags)) { 8353 rack->r_ctl.rc_prr_out += len; 8354 } 8355 /* First question is it a retransmission or new? */ 8356 if (seq_out == snd_max) { 8357 /* Its new */ 8358 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 8359 again: 8360 rsm = rack_alloc(rack); 8361 if (rsm == NULL) { 8362 /* 8363 * Hmm out of memory and the tcb got destroyed while 8364 * we tried to wait. 8365 */ 8366 return; 8367 } 8368 if (th_flags & TH_FIN) { 8369 rsm->r_flags = RACK_HAS_FIN|add_flag; 8370 } else { 8371 rsm->r_flags = add_flag; 8372 } 8373 if (hw_tls) 8374 rsm->r_hw_tls = 1; 8375 rsm->r_tim_lastsent[0] = cts; 8376 rsm->r_rtr_cnt = 1; 8377 rsm->r_act_rxt_cnt = 0; 8378 rsm->r_rtr_bytes = 0; 8379 if (th_flags & TH_SYN) { 8380 /* The data space is one beyond snd_una */ 8381 rsm->r_flags |= RACK_HAS_SYN; 8382 } 8383 rsm->r_start = seq_out; 8384 rsm->r_end = rsm->r_start + len; 8385 rack_mark_in_gp_win(tp, rsm); 8386 rsm->r_dupack = 0; 8387 /* 8388 * save off the mbuf location that 8389 * sndmbuf_noadv returned (which is 8390 * where we started copying from).. 8391 */ 8392 rsm->m = s_mb; 8393 rsm->soff = s_moff; 8394 /* 8395 * Here we do add in the len of send, since its not yet 8396 * reflected in in snduna <->snd_max 8397 */ 8398 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 8399 rack->r_ctl.rc_sacked) + 8400 (rsm->r_end - rsm->r_start)); 8401 if ((rack->rc_initial_ss_comp == 0) && 8402 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { 8403 rack->r_ctl.ss_hi_fs = rsm->r_fas; 8404 } 8405 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 8406 if (rsm->m) { 8407 if (rsm->m->m_len <= rsm->soff) { 8408 /* 8409 * XXXrrs Question, will this happen? 8410 * 8411 * If sbsndptr is set at the correct place 8412 * then s_moff should always be somewhere 8413 * within rsm->m. But if the sbsndptr was 8414 * off then that won't be true. If it occurs 8415 * we need to walkout to the correct location. 8416 */ 8417 struct mbuf *lm; 8418 8419 lm = rsm->m; 8420 while (lm->m_len <= rsm->soff) { 8421 rsm->soff -= lm->m_len; 8422 lm = lm->m_next; 8423 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 8424 __func__, rack, s_moff, s_mb, rsm->soff)); 8425 } 8426 rsm->m = lm; 8427 } 8428 rsm->orig_m_len = rsm->m->m_len; 8429 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 8430 } else { 8431 rsm->orig_m_len = 0; 8432 rsm->orig_t_space = 0; 8433 } 8434 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 8435 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8436 /* Log a new rsm */ 8437 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 8438 #ifndef INVARIANTS 8439 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 8440 #else 8441 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 8442 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8443 nrsm, insret, rack, rsm); 8444 } 8445 #endif 8446 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8447 rsm->r_in_tmap = 1; 8448 if (rsm->r_flags & RACK_IS_PCM) { 8449 rack->r_ctl.pcm_i.send_time = cts; 8450 rack->r_ctl.pcm_i.eseq = rsm->r_end; 8451 /* First time through we set the start too */ 8452 if (rack->pcm_in_progress == 0) 8453 rack->r_ctl.pcm_i.sseq = rsm->r_start; 8454 } 8455 /* 8456 * Special case detection, is there just a single 8457 * packet outstanding when we are not in recovery? 8458 * 8459 * If this is true mark it so. 8460 */ 8461 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 8462 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 8463 struct rack_sendmap *prsm; 8464 8465 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 8466 if (prsm) 8467 prsm->r_one_out_nr = 1; 8468 } 8469 return; 8470 } 8471 /* 8472 * If we reach here its a retransmission and we need to find it. 8473 */ 8474 more: 8475 if (hintrsm && (hintrsm->r_start == seq_out)) { 8476 rsm = hintrsm; 8477 hintrsm = NULL; 8478 } else { 8479 /* No hints sorry */ 8480 rsm = NULL; 8481 } 8482 if ((rsm) && (rsm->r_start == seq_out)) { 8483 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8484 if (len == 0) { 8485 return; 8486 } else { 8487 goto more; 8488 } 8489 } 8490 /* Ok it was not the last pointer go through it the hard way. */ 8491 refind: 8492 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 8493 if (rsm) { 8494 if (rsm->r_start == seq_out) { 8495 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8496 if (len == 0) { 8497 return; 8498 } else { 8499 goto refind; 8500 } 8501 } 8502 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 8503 /* Transmitted within this piece */ 8504 /* 8505 * Ok we must split off the front and then let the 8506 * update do the rest 8507 */ 8508 nrsm = rack_alloc_full_limit(rack); 8509 if (nrsm == NULL) { 8510 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 8511 return; 8512 } 8513 /* 8514 * copy rsm to nrsm and then trim the front of rsm 8515 * to not include this part. 8516 */ 8517 rack_clone_rsm(rack, nrsm, rsm, seq_out); 8518 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8519 #ifndef INVARIANTS 8520 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8521 #else 8522 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8523 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8524 nrsm, insret, rack, rsm); 8525 } 8526 #endif 8527 if (rsm->r_in_tmap) { 8528 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8529 nrsm->r_in_tmap = 1; 8530 } 8531 rsm->r_flags &= (~RACK_HAS_FIN); 8532 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 8533 if (len == 0) { 8534 return; 8535 } else if (len > 0) 8536 goto refind; 8537 } 8538 } 8539 /* 8540 * Hmm not found in map did they retransmit both old and on into the 8541 * new? 8542 */ 8543 if (seq_out == tp->snd_max) { 8544 goto again; 8545 } else if (SEQ_LT(seq_out, tp->snd_max)) { 8546 #ifdef INVARIANTS 8547 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 8548 seq_out, len, tp->snd_una, tp->snd_max); 8549 printf("Starting Dump of all rack entries\n"); 8550 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8551 printf("rsm:%p start:%u end:%u\n", 8552 rsm, rsm->r_start, rsm->r_end); 8553 } 8554 printf("Dump complete\n"); 8555 panic("seq_out not found rack:%p tp:%p", 8556 rack, tp); 8557 #endif 8558 } else { 8559 #ifdef INVARIANTS 8560 /* 8561 * Hmm beyond sndmax? (only if we are using the new rtt-pack 8562 * flag) 8563 */ 8564 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 8565 seq_out, len, tp->snd_max, tp); 8566 #endif 8567 } 8568 } 8569 8570 /* 8571 * Record one of the RTT updates from an ack into 8572 * our sample structure. 8573 */ 8574 8575 static void 8576 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 8577 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 8578 { 8579 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8580 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 8581 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 8582 } 8583 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8584 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 8585 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 8586 } 8587 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 8588 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 8589 rack->r_ctl.rc_gp_lowrtt = us_rtt; 8590 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 8591 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 8592 } 8593 if ((confidence == 1) && 8594 ((rsm == NULL) || 8595 (rsm->r_just_ret) || 8596 (rsm->r_one_out_nr && 8597 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 8598 /* 8599 * If the rsm had a just return 8600 * hit it then we can't trust the 8601 * rtt measurement for buffer deterimination 8602 * Note that a confidence of 2, indicates 8603 * SACK'd which overrides the r_just_ret or 8604 * the r_one_out_nr. If it was a CUM-ACK and 8605 * we had only two outstanding, but get an 8606 * ack for only 1. Then that also lowers our 8607 * confidence. 8608 */ 8609 confidence = 0; 8610 } 8611 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8612 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 8613 if (rack->r_ctl.rack_rs.confidence == 0) { 8614 /* 8615 * We take anything with no current confidence 8616 * saved. 8617 */ 8618 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8619 rack->r_ctl.rack_rs.confidence = confidence; 8620 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8621 } else if (confidence != 0) { 8622 /* 8623 * Once we have a confident number, 8624 * we can update it with a smaller 8625 * value since this confident number 8626 * may include the DSACK time until 8627 * the next segment (the second one) arrived. 8628 */ 8629 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8630 rack->r_ctl.rack_rs.confidence = confidence; 8631 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8632 } 8633 } 8634 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 8635 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 8636 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 8637 rack->r_ctl.rack_rs.rs_rtt_cnt++; 8638 } 8639 8640 /* 8641 * Collect new round-trip time estimate 8642 * and update averages and current timeout. 8643 */ 8644 static void 8645 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 8646 { 8647 int32_t delta; 8648 int32_t rtt; 8649 8650 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 8651 /* No valid sample */ 8652 return; 8653 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 8654 /* We are to use the lowest RTT seen in a single ack */ 8655 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 8656 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 8657 /* We are to use the highest RTT seen in a single ack */ 8658 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 8659 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 8660 /* We are to use the average RTT seen in a single ack */ 8661 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 8662 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 8663 } else { 8664 #ifdef INVARIANTS 8665 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 8666 #endif 8667 return; 8668 } 8669 if (rtt == 0) 8670 rtt = 1; 8671 if (rack->rc_gp_rtt_set == 0) { 8672 /* 8673 * With no RTT we have to accept 8674 * even one we are not confident of. 8675 */ 8676 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 8677 rack->rc_gp_rtt_set = 1; 8678 } else if (rack->r_ctl.rack_rs.confidence) { 8679 /* update the running gp srtt */ 8680 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 8681 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 8682 } 8683 if (rack->r_ctl.rack_rs.confidence) { 8684 /* 8685 * record the low and high for highly buffered path computation, 8686 * we only do this if we are confident (not a retransmission). 8687 */ 8688 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 8689 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8690 } 8691 if (rack->rc_highly_buffered == 0) { 8692 /* 8693 * Currently once we declare a path has 8694 * highly buffered there is no going 8695 * back, which may be a problem... 8696 */ 8697 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 8698 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 8699 rack->r_ctl.rc_highest_us_rtt, 8700 rack->r_ctl.rc_lowest_us_rtt, 8701 RACK_RTTS_SEEHBP); 8702 rack->rc_highly_buffered = 1; 8703 } 8704 } 8705 } 8706 if ((rack->r_ctl.rack_rs.confidence) || 8707 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 8708 /* 8709 * If we are highly confident of it <or> it was 8710 * never retransmitted we accept it as the last us_rtt. 8711 */ 8712 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8713 /* The lowest rtt can be set if its was not retransmited */ 8714 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 8715 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8716 if (rack->r_ctl.rc_lowest_us_rtt == 0) 8717 rack->r_ctl.rc_lowest_us_rtt = 1; 8718 } 8719 } 8720 rack = (struct tcp_rack *)tp->t_fb_ptr; 8721 if (tp->t_srtt != 0) { 8722 /* 8723 * We keep a simple srtt in microseconds, like our rtt 8724 * measurement. We don't need to do any tricks with shifting 8725 * etc. Instead we just add in 1/8th of the new measurement 8726 * and subtract out 1/8 of the old srtt. We do the same with 8727 * the variance after finding the absolute value of the 8728 * difference between this sample and the current srtt. 8729 */ 8730 delta = tp->t_srtt - rtt; 8731 /* Take off 1/8th of the current sRTT */ 8732 tp->t_srtt -= (tp->t_srtt >> 3); 8733 /* Add in 1/8th of the new RTT just measured */ 8734 tp->t_srtt += (rtt >> 3); 8735 if (tp->t_srtt <= 0) 8736 tp->t_srtt = 1; 8737 /* Now lets make the absolute value of the variance */ 8738 if (delta < 0) 8739 delta = -delta; 8740 /* Subtract out 1/8th */ 8741 tp->t_rttvar -= (tp->t_rttvar >> 3); 8742 /* Add in 1/8th of the new variance we just saw */ 8743 tp->t_rttvar += (delta >> 3); 8744 if (tp->t_rttvar <= 0) 8745 tp->t_rttvar = 1; 8746 } else { 8747 /* 8748 * No rtt measurement yet - use the unsmoothed rtt. Set the 8749 * variance to half the rtt (so our first retransmit happens 8750 * at 3*rtt). 8751 */ 8752 tp->t_srtt = rtt; 8753 tp->t_rttvar = rtt >> 1; 8754 } 8755 rack->rc_srtt_measure_made = 1; 8756 KMOD_TCPSTAT_INC(tcps_rttupdated); 8757 if (tp->t_rttupdated < UCHAR_MAX) 8758 tp->t_rttupdated++; 8759 #ifdef STATS 8760 if (rack_stats_gets_ms_rtt == 0) { 8761 /* Send in the microsecond rtt used for rxt timeout purposes */ 8762 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 8763 } else if (rack_stats_gets_ms_rtt == 1) { 8764 /* Send in the millisecond rtt used for rxt timeout purposes */ 8765 int32_t ms_rtt; 8766 8767 /* Round up */ 8768 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8769 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8770 } else if (rack_stats_gets_ms_rtt == 2) { 8771 /* Send in the millisecond rtt has close to the path RTT as we can get */ 8772 int32_t ms_rtt; 8773 8774 /* Round up */ 8775 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8776 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8777 } else { 8778 /* Send in the microsecond rtt has close to the path RTT as we can get */ 8779 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8780 } 8781 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8782 #endif 8783 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 8784 /* 8785 * the retransmit should happen at rtt + 4 * rttvar. Because of the 8786 * way we do the smoothing, srtt and rttvar will each average +1/2 8787 * tick of bias. When we compute the retransmit timer, we want 1/2 8788 * tick of rounding and 1 extra tick because of +-1/2 tick 8789 * uncertainty in the firing of the timer. The bias will give us 8790 * exactly the 1.5 tick we need. But, because the bias is 8791 * statistical, we have to test that we don't drop below the minimum 8792 * feasible timer (which is 2 ticks). 8793 */ 8794 tp->t_rxtshift = 0; 8795 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8796 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 8797 rack_log_rtt_sample(rack, rtt); 8798 tp->t_softerror = 0; 8799 } 8800 8801 8802 static void 8803 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 8804 { 8805 /* 8806 * Apply to filter the inbound us-rtt at us_cts. 8807 */ 8808 uint32_t old_rtt; 8809 8810 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 8811 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 8812 us_rtt, us_cts); 8813 if (old_rtt > us_rtt) { 8814 /* We just hit a new lower rtt time */ 8815 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 8816 __LINE__, RACK_RTTS_NEWRTT); 8817 /* 8818 * Only count it if its lower than what we saw within our 8819 * calculated range. 8820 */ 8821 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 8822 if (rack_probertt_lower_within && 8823 rack->rc_gp_dyn_mul && 8824 (rack->use_fixed_rate == 0) && 8825 (rack->rc_always_pace)) { 8826 /* 8827 * We are seeing a new lower rtt very close 8828 * to the time that we would have entered probe-rtt. 8829 * This is probably due to the fact that a peer flow 8830 * has entered probe-rtt. Lets go in now too. 8831 */ 8832 uint32_t val; 8833 8834 val = rack_probertt_lower_within * rack_time_between_probertt; 8835 val /= 100; 8836 if ((rack->in_probe_rtt == 0) && 8837 (rack->rc_skip_timely == 0) && 8838 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 8839 rack_enter_probertt(rack, us_cts); 8840 } 8841 } 8842 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 8843 } 8844 } 8845 } 8846 8847 static int 8848 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 8849 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 8850 { 8851 uint32_t us_rtt; 8852 int32_t i, all; 8853 uint32_t t, len_acked; 8854 8855 if ((rsm->r_flags & RACK_ACKED) || 8856 (rsm->r_flags & RACK_WAS_ACKED)) 8857 /* Already done */ 8858 return (0); 8859 if (rsm->r_no_rtt_allowed) { 8860 /* Not allowed */ 8861 return (0); 8862 } 8863 if (ack_type == CUM_ACKED) { 8864 if (SEQ_GT(th_ack, rsm->r_end)) { 8865 len_acked = rsm->r_end - rsm->r_start; 8866 all = 1; 8867 } else { 8868 len_acked = th_ack - rsm->r_start; 8869 all = 0; 8870 } 8871 } else { 8872 len_acked = rsm->r_end - rsm->r_start; 8873 all = 0; 8874 } 8875 if (rsm->r_rtr_cnt == 1) { 8876 8877 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8878 if ((int)t <= 0) 8879 t = 1; 8880 if (!tp->t_rttlow || tp->t_rttlow > t) 8881 tp->t_rttlow = t; 8882 if (!rack->r_ctl.rc_rack_min_rtt || 8883 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8884 rack->r_ctl.rc_rack_min_rtt = t; 8885 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8886 rack->r_ctl.rc_rack_min_rtt = 1; 8887 } 8888 } 8889 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8890 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8891 else 8892 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8893 if (us_rtt == 0) 8894 us_rtt = 1; 8895 if (CC_ALGO(tp)->rttsample != NULL) { 8896 /* Kick the RTT to the CC */ 8897 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8898 } 8899 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8900 if (ack_type == SACKED) { 8901 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8902 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8903 } else { 8904 /* 8905 * We need to setup what our confidence 8906 * is in this ack. 8907 * 8908 * If the rsm was app limited and it is 8909 * less than a mss in length (the end 8910 * of the send) then we have a gap. If we 8911 * were app limited but say we were sending 8912 * multiple MSS's then we are more confident 8913 * int it. 8914 * 8915 * When we are not app-limited then we see if 8916 * the rsm is being included in the current 8917 * measurement, we tell this by the app_limited_needs_set 8918 * flag. 8919 * 8920 * Note that being cwnd blocked is not applimited 8921 * as well as the pacing delay between packets which 8922 * are sending only 1 or 2 MSS's also will show up 8923 * in the RTT. We probably need to examine this algorithm 8924 * a bit more and enhance it to account for the delay 8925 * between rsm's. We could do that by saving off the 8926 * pacing delay of each rsm (in an rsm) and then 8927 * factoring that in somehow though for now I am 8928 * not sure how :) 8929 */ 8930 int calc_conf = 0; 8931 8932 if (rsm->r_flags & RACK_APP_LIMITED) { 8933 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8934 calc_conf = 0; 8935 else 8936 calc_conf = 1; 8937 } else if (rack->app_limited_needs_set == 0) { 8938 calc_conf = 1; 8939 } else { 8940 calc_conf = 0; 8941 } 8942 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8943 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8944 calc_conf, rsm, rsm->r_rtr_cnt); 8945 } 8946 if ((rsm->r_flags & RACK_TLP) && 8947 (!IN_FASTRECOVERY(tp->t_flags))) { 8948 /* Segment was a TLP and our retrans matched */ 8949 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8950 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 8951 } 8952 } 8953 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 8954 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8955 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 8956 /* New more recent rack_tmit_time */ 8957 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8958 if (rack->r_ctl.rc_rack_tmit_time == 0) 8959 rack->r_ctl.rc_rack_tmit_time = 1; 8960 rack->rc_rack_rtt = t; 8961 } 8962 return (1); 8963 } 8964 /* 8965 * We clear the soft/rxtshift since we got an ack. 8966 * There is no assurance we will call the commit() function 8967 * so we need to clear these to avoid incorrect handling. 8968 */ 8969 tp->t_rxtshift = 0; 8970 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8971 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8972 tp->t_softerror = 0; 8973 if (to && (to->to_flags & TOF_TS) && 8974 (ack_type == CUM_ACKED) && 8975 (to->to_tsecr) && 8976 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8977 /* 8978 * Now which timestamp does it match? In this block the ACK 8979 * must be coming from a previous transmission. 8980 */ 8981 for (i = 0; i < rsm->r_rtr_cnt; i++) { 8982 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 8983 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8984 if ((int)t <= 0) 8985 t = 1; 8986 if (CC_ALGO(tp)->rttsample != NULL) { 8987 /* 8988 * Kick the RTT to the CC, here 8989 * we lie a bit in that we know the 8990 * retransmission is correct even though 8991 * we retransmitted. This is because 8992 * we match the timestamps. 8993 */ 8994 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 8995 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 8996 else 8997 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 8998 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8999 } 9000 if ((i + 1) < rsm->r_rtr_cnt) { 9001 /* 9002 * The peer ack'd from our previous 9003 * transmission. We have a spurious 9004 * retransmission and thus we dont 9005 * want to update our rack_rtt. 9006 * 9007 * Hmm should there be a CC revert here? 9008 * 9009 */ 9010 return (0); 9011 } 9012 if (!tp->t_rttlow || tp->t_rttlow > t) 9013 tp->t_rttlow = t; 9014 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9015 rack->r_ctl.rc_rack_min_rtt = t; 9016 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9017 rack->r_ctl.rc_rack_min_rtt = 1; 9018 } 9019 } 9020 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9021 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9022 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9023 /* New more recent rack_tmit_time */ 9024 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9025 if (rack->r_ctl.rc_rack_tmit_time == 0) 9026 rack->r_ctl.rc_rack_tmit_time = 1; 9027 rack->rc_rack_rtt = t; 9028 } 9029 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9030 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9031 rsm->r_rtr_cnt); 9032 return (1); 9033 } 9034 } 9035 /* If we are logging log out the sendmap */ 9036 if (tcp_bblogging_on(rack->rc_tp)) { 9037 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9038 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9039 } 9040 } 9041 goto ts_not_found; 9042 } else { 9043 /* 9044 * Ok its a SACK block that we retransmitted. or a windows 9045 * machine without timestamps. We can tell nothing from the 9046 * time-stamp since its not there or the time the peer last 9047 * received a segment that moved forward its cum-ack point. 9048 */ 9049 ts_not_found: 9050 i = rsm->r_rtr_cnt - 1; 9051 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9052 if ((int)t <= 0) 9053 t = 1; 9054 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9055 /* 9056 * We retransmitted and the ack came back in less 9057 * than the smallest rtt we have observed. We most 9058 * likely did an improper retransmit as outlined in 9059 * 6.2 Step 2 point 2 in the rack-draft so we 9060 * don't want to update our rack_rtt. We in 9061 * theory (in future) might want to think about reverting our 9062 * cwnd state but we won't for now. 9063 */ 9064 return (0); 9065 } else if (rack->r_ctl.rc_rack_min_rtt) { 9066 /* 9067 * We retransmitted it and the retransmit did the 9068 * job. 9069 */ 9070 if (!rack->r_ctl.rc_rack_min_rtt || 9071 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9072 rack->r_ctl.rc_rack_min_rtt = t; 9073 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9074 rack->r_ctl.rc_rack_min_rtt = 1; 9075 } 9076 } 9077 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9078 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9079 (uint32_t)rsm->r_tim_lastsent[i]))) { 9080 /* New more recent rack_tmit_time */ 9081 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9082 if (rack->r_ctl.rc_rack_tmit_time == 0) 9083 rack->r_ctl.rc_rack_tmit_time = 1; 9084 rack->rc_rack_rtt = t; 9085 } 9086 return (1); 9087 } 9088 } 9089 return (0); 9090 } 9091 9092 /* 9093 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9094 */ 9095 static void 9096 rack_log_sack_passed(struct tcpcb *tp, 9097 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts) 9098 { 9099 struct rack_sendmap *nrsm; 9100 uint32_t thresh; 9101 9102 /* Get our rxt threshold for lost consideration */ 9103 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(tp, rack), cts, __LINE__, 0); 9104 /* Now start looking at rsm's */ 9105 nrsm = rsm; 9106 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9107 rack_head, r_tnext) { 9108 if (nrsm == rsm) { 9109 /* Skip original segment he is acked */ 9110 continue; 9111 } 9112 if (nrsm->r_flags & RACK_ACKED) { 9113 /* 9114 * Skip ack'd segments, though we 9115 * should not see these, since tmap 9116 * should not have ack'd segments. 9117 */ 9118 continue; 9119 } 9120 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9121 /* 9122 * If the peer dropped the rwnd on 9123 * these then we don't worry about them. 9124 */ 9125 continue; 9126 } 9127 /* Check lost state */ 9128 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { 9129 uint32_t exp; 9130 9131 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; 9132 if (TSTMP_LT(exp, cts) || (exp == cts)) { 9133 /* We consider it lost */ 9134 nrsm->r_flags |= RACK_WAS_LOST; 9135 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; 9136 } 9137 } 9138 if (nrsm->r_flags & RACK_SACK_PASSED) { 9139 /* 9140 * We found one that is already marked 9141 * passed, we have been here before and 9142 * so all others below this are marked. 9143 */ 9144 break; 9145 } 9146 nrsm->r_flags |= RACK_SACK_PASSED; 9147 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9148 } 9149 } 9150 9151 static void 9152 rack_need_set_test(struct tcpcb *tp, 9153 struct tcp_rack *rack, 9154 struct rack_sendmap *rsm, 9155 tcp_seq th_ack, 9156 int line, 9157 int use_which) 9158 { 9159 struct rack_sendmap *s_rsm; 9160 9161 if ((tp->t_flags & TF_GPUTINPROG) && 9162 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9163 /* 9164 * We were app limited, and this ack 9165 * butts up or goes beyond the point where we want 9166 * to start our next measurement. We need 9167 * to record the new gput_ts as here and 9168 * possibly update the start sequence. 9169 */ 9170 uint32_t seq, ts; 9171 9172 if (rsm->r_rtr_cnt > 1) { 9173 /* 9174 * This is a retransmit, can we 9175 * really make any assessment at this 9176 * point? We are not really sure of 9177 * the timestamp, is it this or the 9178 * previous transmission? 9179 * 9180 * Lets wait for something better that 9181 * is not retransmitted. 9182 */ 9183 return; 9184 } 9185 seq = tp->gput_seq; 9186 ts = tp->gput_ts; 9187 rack->app_limited_needs_set = 0; 9188 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9189 /* Do we start at a new end? */ 9190 if ((use_which == RACK_USE_BEG) && 9191 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9192 /* 9193 * When we get an ACK that just eats 9194 * up some of the rsm, we set RACK_USE_BEG 9195 * since whats at r_start (i.e. th_ack) 9196 * is left unacked and thats where the 9197 * measurement now starts. 9198 */ 9199 tp->gput_seq = rsm->r_start; 9200 } 9201 if ((use_which == RACK_USE_END) && 9202 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9203 /* 9204 * We use the end when the cumack 9205 * is moving forward and completely 9206 * deleting the rsm passed so basically 9207 * r_end holds th_ack. 9208 * 9209 * For SACK's we also want to use the end 9210 * since this piece just got sacked and 9211 * we want to target anything after that 9212 * in our measurement. 9213 */ 9214 tp->gput_seq = rsm->r_end; 9215 } 9216 if (use_which == RACK_USE_END_OR_THACK) { 9217 /* 9218 * special case for ack moving forward, 9219 * not a sack, we need to move all the 9220 * way up to where this ack cum-ack moves 9221 * to. 9222 */ 9223 if (SEQ_GT(th_ack, rsm->r_end)) 9224 tp->gput_seq = th_ack; 9225 else 9226 tp->gput_seq = rsm->r_end; 9227 } 9228 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9229 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9230 else 9231 s_rsm = NULL; 9232 /* 9233 * Pick up the correct send time if we can the rsm passed in 9234 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9235 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9236 * find a different seq i.e. the next send up. 9237 * 9238 * If that has not been sent, s_rsm will be NULL and we must 9239 * arrange it so this function will get called again by setting 9240 * app_limited_needs_set. 9241 */ 9242 if (s_rsm) 9243 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9244 else { 9245 /* If we hit here we have to have *not* sent tp->gput_seq */ 9246 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9247 /* Set it up so we will go through here again */ 9248 rack->app_limited_needs_set = 1; 9249 } 9250 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9251 /* 9252 * We moved beyond this guy's range, re-calculate 9253 * the new end point. 9254 */ 9255 if (rack->rc_gp_filled == 0) { 9256 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9257 } else { 9258 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9259 } 9260 } 9261 /* 9262 * We are moving the goal post, we may be able to clear the 9263 * measure_saw_probe_rtt flag. 9264 */ 9265 if ((rack->in_probe_rtt == 0) && 9266 (rack->measure_saw_probe_rtt) && 9267 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9268 rack->measure_saw_probe_rtt = 0; 9269 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9270 seq, tp->gput_seq, 9271 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9272 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9273 5, line, NULL, 0); 9274 if (rack->rc_gp_filled && 9275 ((tp->gput_ack - tp->gput_seq) < 9276 max(rc_init_window(rack), (MIN_GP_WIN * 9277 ctf_fixed_maxseg(tp))))) { 9278 uint32_t ideal_amount; 9279 9280 ideal_amount = rack_get_measure_window(tp, rack); 9281 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9282 /* 9283 * There is no sense of continuing this measurement 9284 * because its too small to gain us anything we 9285 * trust. Skip it and that way we can start a new 9286 * measurement quicker. 9287 */ 9288 tp->t_flags &= ~TF_GPUTINPROG; 9289 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9290 0, 0, 9291 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9292 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9293 6, __LINE__, NULL, 0); 9294 } else { 9295 /* 9296 * Reset the window further out. 9297 */ 9298 tp->gput_ack = tp->gput_seq + ideal_amount; 9299 } 9300 } 9301 rack_tend_gp_marks(tp, rack); 9302 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9303 } 9304 } 9305 9306 static inline int 9307 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9308 { 9309 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 9310 /* Behind our TLP definition or right at */ 9311 return (0); 9312 } 9313 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 9314 /* The start is beyond or right at our end of TLP definition */ 9315 return (0); 9316 } 9317 /* It has to be a sub-part of the original TLP recorded */ 9318 return (1); 9319 } 9320 9321 static uint32_t 9322 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 9323 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 9324 uint32_t segsiz) 9325 { 9326 uint32_t start, end, changed = 0; 9327 struct rack_sendmap stack_map; 9328 struct rack_sendmap *rsm, *nrsm, *prev, *next; 9329 int insret __diagused; 9330 int32_t used_ref = 1; 9331 int can_use_hookery = 0; 9332 9333 start = sack->start; 9334 end = sack->end; 9335 rsm = *prsm; 9336 9337 do_rest_ofb: 9338 if ((rsm == NULL) || 9339 (SEQ_LT(end, rsm->r_start)) || 9340 (SEQ_GEQ(start, rsm->r_end)) || 9341 (SEQ_LT(start, rsm->r_start))) { 9342 /* 9343 * We are not in the right spot, 9344 * find the correct spot in the tree. 9345 */ 9346 used_ref = 0; 9347 rsm = tqhash_find(rack->r_ctl.tqh, start); 9348 } 9349 if (rsm == NULL) { 9350 /* TSNH */ 9351 goto out; 9352 } 9353 /* Ok we have an ACK for some piece of this rsm */ 9354 if (rsm->r_start != start) { 9355 if ((rsm->r_flags & RACK_ACKED) == 0) { 9356 /* 9357 * Before any splitting or hookery is 9358 * done is it a TLP of interest i.e. rxt? 9359 */ 9360 if ((rsm->r_flags & RACK_TLP) && 9361 (rsm->r_rtr_cnt > 1)) { 9362 /* 9363 * We are splitting a rxt TLP, check 9364 * if we need to save off the start/end 9365 */ 9366 if (rack->rc_last_tlp_acked_set && 9367 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9368 /* 9369 * We already turned this on since we are inside 9370 * the previous one was a partially sack now we 9371 * are getting another one (maybe all of it). 9372 * 9373 */ 9374 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9375 /* 9376 * Lets make sure we have all of it though. 9377 */ 9378 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9379 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9380 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9381 rack->r_ctl.last_tlp_acked_end); 9382 } 9383 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9384 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9385 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9386 rack->r_ctl.last_tlp_acked_end); 9387 } 9388 } else { 9389 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9390 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9391 rack->rc_last_tlp_past_cumack = 0; 9392 rack->rc_last_tlp_acked_set = 1; 9393 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9394 } 9395 } 9396 /** 9397 * Need to split this in two pieces the before and after, 9398 * the before remains in the map, the after must be 9399 * added. In other words we have: 9400 * rsm |--------------| 9401 * sackblk |-------> 9402 * rsm will become 9403 * rsm |---| 9404 * and nrsm will be the sacked piece 9405 * nrsm |----------| 9406 * 9407 * But before we start down that path lets 9408 * see if the sack spans over on top of 9409 * the next guy and it is already sacked. 9410 * 9411 */ 9412 /* 9413 * Hookery can only be used if the two entries 9414 * are in the same bucket and neither one of 9415 * them staddle the bucket line. 9416 */ 9417 next = tqhash_next(rack->r_ctl.tqh, rsm); 9418 if (next && 9419 (rsm->bindex == next->bindex) && 9420 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9421 ((next->r_flags & RACK_STRADDLE) == 0) && 9422 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9423 ((next->r_flags & RACK_IS_PCM) == 0) && 9424 (rsm->r_flags & RACK_IN_GP_WIN) && 9425 (next->r_flags & RACK_IN_GP_WIN)) 9426 can_use_hookery = 1; 9427 else 9428 can_use_hookery = 0; 9429 if (next && can_use_hookery && 9430 (next->r_flags & RACK_ACKED) && 9431 SEQ_GEQ(end, next->r_start)) { 9432 /** 9433 * So the next one is already acked, and 9434 * we can thus by hookery use our stack_map 9435 * to reflect the piece being sacked and 9436 * then adjust the two tree entries moving 9437 * the start and ends around. So we start like: 9438 * rsm |------------| (not-acked) 9439 * next |-----------| (acked) 9440 * sackblk |--------> 9441 * We want to end like so: 9442 * rsm |------| (not-acked) 9443 * next |-----------------| (acked) 9444 * nrsm |-----| 9445 * Where nrsm is a temporary stack piece we 9446 * use to update all the gizmos. 9447 */ 9448 /* Copy up our fudge block */ 9449 nrsm = &stack_map; 9450 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9451 /* Now adjust our tree blocks */ 9452 tqhash_update_end(rack->r_ctl.tqh, rsm, start); 9453 next->r_start = start; 9454 rsm->r_flags |= RACK_SHUFFLED; 9455 next->r_flags |= RACK_SHUFFLED; 9456 /* Now we must adjust back where next->m is */ 9457 rack_setup_offset_for_rsm(rack, rsm, next); 9458 /* 9459 * Which timestamp do we keep? It is rather 9460 * important in GP measurements to have the 9461 * accurate end of the send window. 9462 * 9463 * We keep the largest value, which is the newest 9464 * send. We do this in case a segment that is 9465 * joined together and not part of a GP estimate 9466 * later gets expanded into the GP estimate. 9467 * 9468 * We prohibit the merging of unlike kinds i.e. 9469 * all pieces that are in the GP estimate can be 9470 * merged and all pieces that are not in a GP estimate 9471 * can be merged, but not disimilar pieces. Combine 9472 * this with taking the highest here and we should 9473 * be ok unless of course the client reneges. Then 9474 * all bets are off. 9475 */ 9476 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 9477 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 9478 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 9479 /* 9480 * And we must keep the newest ack arrival time. 9481 */ 9482 if (next->r_ack_arrival < 9483 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9484 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9485 9486 9487 /* We don't need to adjust rsm, it did not change */ 9488 /* Clear out the dup ack count of the remainder */ 9489 rsm->r_dupack = 0; 9490 rsm->r_just_ret = 0; 9491 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9492 /* Now lets make sure our fudge block is right */ 9493 nrsm->r_start = start; 9494 /* Now lets update all the stats and such */ 9495 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9496 if (rack->app_limited_needs_set) 9497 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9498 changed += (nrsm->r_end - nrsm->r_start); 9499 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9500 if (rsm->r_flags & RACK_WAS_LOST) { 9501 int my_chg; 9502 9503 my_chg = (nrsm->r_end - nrsm->r_start); 9504 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9505 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9506 if (my_chg <= rack->r_ctl.rc_considered_lost) 9507 rack->r_ctl.rc_considered_lost -= my_chg; 9508 else 9509 rack->r_ctl.rc_considered_lost = 0; 9510 } 9511 if (nrsm->r_flags & RACK_SACK_PASSED) { 9512 rack->r_ctl.rc_reorder_ts = cts; 9513 if (rack->r_ctl.rc_reorder_ts == 0) 9514 rack->r_ctl.rc_reorder_ts = 1; 9515 } 9516 /* 9517 * Now we want to go up from rsm (the 9518 * one left un-acked) to the next one 9519 * in the tmap. We do this so when 9520 * we walk backwards we include marking 9521 * sack-passed on rsm (The one passed in 9522 * is skipped since it is generally called 9523 * on something sacked before removing it 9524 * from the tmap). 9525 */ 9526 if (rsm->r_in_tmap) { 9527 nrsm = TAILQ_NEXT(rsm, r_tnext); 9528 /* 9529 * Now that we have the next 9530 * one walk backwards from there. 9531 */ 9532 if (nrsm && nrsm->r_in_tmap) 9533 rack_log_sack_passed(tp, rack, nrsm, cts); 9534 } 9535 /* Now are we done? */ 9536 if (SEQ_LT(end, next->r_end) || 9537 (end == next->r_end)) { 9538 /* Done with block */ 9539 goto out; 9540 } 9541 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 9542 counter_u64_add(rack_sack_used_next_merge, 1); 9543 /* Postion for the next block */ 9544 start = next->r_end; 9545 rsm = tqhash_next(rack->r_ctl.tqh, next); 9546 if (rsm == NULL) 9547 goto out; 9548 } else { 9549 /** 9550 * We can't use any hookery here, so we 9551 * need to split the map. We enter like 9552 * so: 9553 * rsm |--------| 9554 * sackblk |-----> 9555 * We will add the new block nrsm and 9556 * that will be the new portion, and then 9557 * fall through after reseting rsm. So we 9558 * split and look like this: 9559 * rsm |----| 9560 * sackblk |-----> 9561 * nrsm |---| 9562 * We then fall through reseting 9563 * rsm to nrsm, so the next block 9564 * picks it up. 9565 */ 9566 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9567 if (nrsm == NULL) { 9568 /* 9569 * failed XXXrrs what can we do but loose the sack 9570 * info? 9571 */ 9572 goto out; 9573 } 9574 counter_u64_add(rack_sack_splits, 1); 9575 rack_clone_rsm(rack, nrsm, rsm, start); 9576 rsm->r_just_ret = 0; 9577 #ifndef INVARIANTS 9578 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9579 #else 9580 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9581 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9582 nrsm, insret, rack, rsm); 9583 } 9584 #endif 9585 if (rsm->r_in_tmap) { 9586 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9587 nrsm->r_in_tmap = 1; 9588 } 9589 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 9590 rsm->r_flags &= (~RACK_HAS_FIN); 9591 /* Position us to point to the new nrsm that starts the sack blk */ 9592 rsm = nrsm; 9593 } 9594 } else { 9595 /* Already sacked this piece */ 9596 counter_u64_add(rack_sack_skipped_acked, 1); 9597 if (end == rsm->r_end) { 9598 /* Done with block */ 9599 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9600 goto out; 9601 } else if (SEQ_LT(end, rsm->r_end)) { 9602 /* A partial sack to a already sacked block */ 9603 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9604 goto out; 9605 } else { 9606 /* 9607 * The end goes beyond this guy 9608 * reposition the start to the 9609 * next block. 9610 */ 9611 start = rsm->r_end; 9612 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9613 if (rsm == NULL) 9614 goto out; 9615 } 9616 } 9617 } 9618 if (SEQ_GEQ(end, rsm->r_end)) { 9619 /** 9620 * The end of this block is either beyond this guy or right 9621 * at this guy. I.e.: 9622 * rsm --- |-----| 9623 * end |-----| 9624 * <or> 9625 * end |---------| 9626 */ 9627 if ((rsm->r_flags & RACK_ACKED) == 0) { 9628 /* 9629 * Is it a TLP of interest? 9630 */ 9631 if ((rsm->r_flags & RACK_TLP) && 9632 (rsm->r_rtr_cnt > 1)) { 9633 /* 9634 * We are splitting a rxt TLP, check 9635 * if we need to save off the start/end 9636 */ 9637 if (rack->rc_last_tlp_acked_set && 9638 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9639 /* 9640 * We already turned this on since we are inside 9641 * the previous one was a partially sack now we 9642 * are getting another one (maybe all of it). 9643 */ 9644 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9645 /* 9646 * Lets make sure we have all of it though. 9647 */ 9648 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9649 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9650 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9651 rack->r_ctl.last_tlp_acked_end); 9652 } 9653 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9654 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9655 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9656 rack->r_ctl.last_tlp_acked_end); 9657 } 9658 } else { 9659 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9660 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9661 rack->rc_last_tlp_past_cumack = 0; 9662 rack->rc_last_tlp_acked_set = 1; 9663 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9664 } 9665 } 9666 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9667 changed += (rsm->r_end - rsm->r_start); 9668 /* You get a count for acking a whole segment or more */ 9669 if (rsm->r_flags & RACK_WAS_LOST) { 9670 int my_chg; 9671 9672 my_chg = (rsm->r_end - rsm->r_start); 9673 rsm->r_flags &= ~RACK_WAS_LOST; 9674 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9675 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9676 if (my_chg <= rack->r_ctl.rc_considered_lost) 9677 rack->r_ctl.rc_considered_lost -= my_chg; 9678 else 9679 rack->r_ctl.rc_considered_lost = 0; 9680 } 9681 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9682 if (rsm->r_in_tmap) /* should be true */ 9683 rack_log_sack_passed(tp, rack, rsm, cts); 9684 /* Is Reordering occuring? */ 9685 if (rsm->r_flags & RACK_SACK_PASSED) { 9686 rsm->r_flags &= ~RACK_SACK_PASSED; 9687 rack->r_ctl.rc_reorder_ts = cts; 9688 if (rack->r_ctl.rc_reorder_ts == 0) 9689 rack->r_ctl.rc_reorder_ts = 1; 9690 } 9691 if (rack->app_limited_needs_set) 9692 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9693 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9694 rsm->r_flags |= RACK_ACKED; 9695 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9696 if (rsm->r_in_tmap) { 9697 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9698 rsm->r_in_tmap = 0; 9699 } 9700 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 9701 } else { 9702 counter_u64_add(rack_sack_skipped_acked, 1); 9703 } 9704 if (end == rsm->r_end) { 9705 /* This block only - done, setup for next */ 9706 goto out; 9707 } 9708 /* 9709 * There is more not coverend by this rsm move on 9710 * to the next block in the tail queue hash table. 9711 */ 9712 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 9713 start = rsm->r_end; 9714 rsm = nrsm; 9715 if (rsm == NULL) 9716 goto out; 9717 goto do_rest_ofb; 9718 } 9719 /** 9720 * The end of this sack block is smaller than 9721 * our rsm i.e.: 9722 * rsm --- |-----| 9723 * end |--| 9724 */ 9725 if ((rsm->r_flags & RACK_ACKED) == 0) { 9726 /* 9727 * Is it a TLP of interest? 9728 */ 9729 if ((rsm->r_flags & RACK_TLP) && 9730 (rsm->r_rtr_cnt > 1)) { 9731 /* 9732 * We are splitting a rxt TLP, check 9733 * if we need to save off the start/end 9734 */ 9735 if (rack->rc_last_tlp_acked_set && 9736 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9737 /* 9738 * We already turned this on since we are inside 9739 * the previous one was a partially sack now we 9740 * are getting another one (maybe all of it). 9741 */ 9742 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9743 /* 9744 * Lets make sure we have all of it though. 9745 */ 9746 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9747 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9748 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9749 rack->r_ctl.last_tlp_acked_end); 9750 } 9751 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9752 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9753 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9754 rack->r_ctl.last_tlp_acked_end); 9755 } 9756 } else { 9757 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9758 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9759 rack->rc_last_tlp_past_cumack = 0; 9760 rack->rc_last_tlp_acked_set = 1; 9761 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9762 } 9763 } 9764 /* 9765 * Hookery can only be used if the two entries 9766 * are in the same bucket and neither one of 9767 * them staddle the bucket line. 9768 */ 9769 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 9770 if (prev && 9771 (rsm->bindex == prev->bindex) && 9772 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9773 ((prev->r_flags & RACK_STRADDLE) == 0) && 9774 ((rsm->r_flags & RACK_IS_PCM) == 0) && 9775 ((prev->r_flags & RACK_IS_PCM) == 0) && 9776 (rsm->r_flags & RACK_IN_GP_WIN) && 9777 (prev->r_flags & RACK_IN_GP_WIN)) 9778 can_use_hookery = 1; 9779 else 9780 can_use_hookery = 0; 9781 if (prev && can_use_hookery && 9782 (prev->r_flags & RACK_ACKED)) { 9783 /** 9784 * Goal, we want the right remainder of rsm to shrink 9785 * in place and span from (rsm->r_start = end) to rsm->r_end. 9786 * We want to expand prev to go all the way 9787 * to prev->r_end <- end. 9788 * so in the tree we have before: 9789 * prev |--------| (acked) 9790 * rsm |-------| (non-acked) 9791 * sackblk |-| 9792 * We churn it so we end up with 9793 * prev |----------| (acked) 9794 * rsm |-----| (non-acked) 9795 * nrsm |-| (temporary) 9796 * 9797 * Note if either prev/rsm is a TLP we don't 9798 * do this. 9799 */ 9800 nrsm = &stack_map; 9801 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9802 tqhash_update_end(rack->r_ctl.tqh, prev, end); 9803 rsm->r_start = end; 9804 rsm->r_flags |= RACK_SHUFFLED; 9805 prev->r_flags |= RACK_SHUFFLED; 9806 /* Now adjust nrsm (stack copy) to be 9807 * the one that is the small 9808 * piece that was "sacked". 9809 */ 9810 nrsm->r_end = end; 9811 rsm->r_dupack = 0; 9812 /* 9813 * Which timestamp do we keep? It is rather 9814 * important in GP measurements to have the 9815 * accurate end of the send window. 9816 * 9817 * We keep the largest value, which is the newest 9818 * send. We do this in case a segment that is 9819 * joined together and not part of a GP estimate 9820 * later gets expanded into the GP estimate. 9821 * 9822 * We prohibit the merging of unlike kinds i.e. 9823 * all pieces that are in the GP estimate can be 9824 * merged and all pieces that are not in a GP estimate 9825 * can be merged, but not disimilar pieces. Combine 9826 * this with taking the highest here and we should 9827 * be ok unless of course the client reneges. Then 9828 * all bets are off. 9829 */ 9830 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 9831 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 9832 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9833 } 9834 /* 9835 * And we must keep the newest ack arrival time. 9836 */ 9837 9838 if(prev->r_ack_arrival < 9839 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9840 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9841 9842 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9843 /* 9844 * Now that the rsm has had its start moved forward 9845 * lets go ahead and get its new place in the world. 9846 */ 9847 rack_setup_offset_for_rsm(rack, prev, rsm); 9848 /* 9849 * Now nrsm is our new little piece 9850 * that is acked (which was merged 9851 * to prev). Update the rtt and changed 9852 * based on that. Also check for reordering. 9853 */ 9854 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9855 if (rack->app_limited_needs_set) 9856 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9857 changed += (nrsm->r_end - nrsm->r_start); 9858 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9859 if (rsm->r_flags & RACK_WAS_LOST) { 9860 int my_chg; 9861 9862 my_chg = (nrsm->r_end - nrsm->r_start); 9863 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9864 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9865 if (my_chg <= rack->r_ctl.rc_considered_lost) 9866 rack->r_ctl.rc_considered_lost -= my_chg; 9867 else 9868 rack->r_ctl.rc_considered_lost = 0; 9869 } 9870 if (nrsm->r_flags & RACK_SACK_PASSED) { 9871 rack->r_ctl.rc_reorder_ts = cts; 9872 if (rack->r_ctl.rc_reorder_ts == 0) 9873 rack->r_ctl.rc_reorder_ts = 1; 9874 } 9875 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 9876 rsm = prev; 9877 counter_u64_add(rack_sack_used_prev_merge, 1); 9878 } else { 9879 /** 9880 * This is the case where our previous 9881 * block is not acked either, so we must 9882 * split the block in two. 9883 */ 9884 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9885 if (nrsm == NULL) { 9886 /* failed rrs what can we do but loose the sack info? */ 9887 goto out; 9888 } 9889 if ((rsm->r_flags & RACK_TLP) && 9890 (rsm->r_rtr_cnt > 1)) { 9891 /* 9892 * We are splitting a rxt TLP, check 9893 * if we need to save off the start/end 9894 */ 9895 if (rack->rc_last_tlp_acked_set && 9896 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9897 /* 9898 * We already turned this on since this block is inside 9899 * the previous one was a partially sack now we 9900 * are getting another one (maybe all of it). 9901 */ 9902 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9903 /* 9904 * Lets make sure we have all of it though. 9905 */ 9906 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9907 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9908 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9909 rack->r_ctl.last_tlp_acked_end); 9910 } 9911 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9912 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9913 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9914 rack->r_ctl.last_tlp_acked_end); 9915 } 9916 } else { 9917 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9918 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9919 rack->rc_last_tlp_acked_set = 1; 9920 rack->rc_last_tlp_past_cumack = 0; 9921 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9922 } 9923 } 9924 /** 9925 * In this case nrsm becomes 9926 * nrsm->r_start = end; 9927 * nrsm->r_end = rsm->r_end; 9928 * which is un-acked. 9929 * <and> 9930 * rsm->r_end = nrsm->r_start; 9931 * i.e. the remaining un-acked 9932 * piece is left on the left 9933 * hand side. 9934 * 9935 * So we start like this 9936 * rsm |----------| (not acked) 9937 * sackblk |---| 9938 * build it so we have 9939 * rsm |---| (acked) 9940 * nrsm |------| (not acked) 9941 */ 9942 counter_u64_add(rack_sack_splits, 1); 9943 rack_clone_rsm(rack, nrsm, rsm, end); 9944 rsm->r_flags &= (~RACK_HAS_FIN); 9945 rsm->r_just_ret = 0; 9946 #ifndef INVARIANTS 9947 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9948 #else 9949 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9950 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 9951 nrsm, insret, rack, rsm); 9952 } 9953 #endif 9954 if (rsm->r_in_tmap) { 9955 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9956 nrsm->r_in_tmap = 1; 9957 } 9958 nrsm->r_dupack = 0; 9959 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 9960 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9961 changed += (rsm->r_end - rsm->r_start); 9962 if (rsm->r_flags & RACK_WAS_LOST) { 9963 int my_chg; 9964 9965 my_chg = (rsm->r_end - rsm->r_start); 9966 rsm->r_flags &= ~RACK_WAS_LOST; 9967 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), 9968 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 9969 if (my_chg <= rack->r_ctl.rc_considered_lost) 9970 rack->r_ctl.rc_considered_lost -= my_chg; 9971 else 9972 rack->r_ctl.rc_considered_lost = 0; 9973 } 9974 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9975 9976 if (rsm->r_in_tmap) /* should be true */ 9977 rack_log_sack_passed(tp, rack, rsm, cts); 9978 /* Is Reordering occuring? */ 9979 if (rsm->r_flags & RACK_SACK_PASSED) { 9980 rsm->r_flags &= ~RACK_SACK_PASSED; 9981 rack->r_ctl.rc_reorder_ts = cts; 9982 if (rack->r_ctl.rc_reorder_ts == 0) 9983 rack->r_ctl.rc_reorder_ts = 1; 9984 } 9985 if (rack->app_limited_needs_set) 9986 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9987 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9988 rsm->r_flags |= RACK_ACKED; 9989 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); 9990 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 9991 if (rsm->r_in_tmap) { 9992 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9993 rsm->r_in_tmap = 0; 9994 } 9995 } 9996 } else if (start != end){ 9997 /* 9998 * The block was already acked. 9999 */ 10000 counter_u64_add(rack_sack_skipped_acked, 1); 10001 } 10002 out: 10003 if (rsm && 10004 ((rsm->r_flags & RACK_TLP) == 0) && 10005 (rsm->r_flags & RACK_ACKED)) { 10006 /* 10007 * Now can we merge where we worked 10008 * with either the previous or 10009 * next block? 10010 */ 10011 next = tqhash_next(rack->r_ctl.tqh, rsm); 10012 while (next) { 10013 if (next->r_flags & RACK_TLP) 10014 break; 10015 /* Only allow merges between ones in or out of GP window */ 10016 if ((next->r_flags & RACK_IN_GP_WIN) && 10017 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10018 break; 10019 } 10020 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10021 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10022 break; 10023 } 10024 if (rsm->bindex != next->bindex) 10025 break; 10026 if (rsm->r_flags & RACK_STRADDLE) 10027 break; 10028 if (rsm->r_flags & RACK_IS_PCM) 10029 break; 10030 if (next->r_flags & RACK_STRADDLE) 10031 break; 10032 if (next->r_flags & RACK_IS_PCM) 10033 break; 10034 if (next->r_flags & RACK_ACKED) { 10035 /* yep this and next can be merged */ 10036 rsm = rack_merge_rsm(rack, rsm, next); 10037 next = tqhash_next(rack->r_ctl.tqh, rsm); 10038 } else 10039 break; 10040 } 10041 /* Now what about the previous? */ 10042 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10043 while (prev) { 10044 if (prev->r_flags & RACK_TLP) 10045 break; 10046 /* Only allow merges between ones in or out of GP window */ 10047 if ((prev->r_flags & RACK_IN_GP_WIN) && 10048 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10049 break; 10050 } 10051 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10052 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10053 break; 10054 } 10055 if (rsm->bindex != prev->bindex) 10056 break; 10057 if (rsm->r_flags & RACK_STRADDLE) 10058 break; 10059 if (rsm->r_flags & RACK_IS_PCM) 10060 break; 10061 if (prev->r_flags & RACK_STRADDLE) 10062 break; 10063 if (prev->r_flags & RACK_IS_PCM) 10064 break; 10065 if (prev->r_flags & RACK_ACKED) { 10066 /* yep the previous and this can be merged */ 10067 rsm = rack_merge_rsm(rack, prev, rsm); 10068 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10069 } else 10070 break; 10071 } 10072 } 10073 if (used_ref == 0) { 10074 counter_u64_add(rack_sack_proc_all, 1); 10075 } else { 10076 counter_u64_add(rack_sack_proc_short, 1); 10077 } 10078 /* Save off the next one for quick reference. */ 10079 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10080 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10081 return (changed); 10082 } 10083 10084 static void inline 10085 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10086 { 10087 struct rack_sendmap *tmap; 10088 10089 tmap = NULL; 10090 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10091 /* Its no longer sacked, mark it so */ 10092 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10093 #ifdef INVARIANTS 10094 if (rsm->r_in_tmap) { 10095 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10096 rack, rsm, rsm->r_flags); 10097 } 10098 #endif 10099 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10100 /* Rebuild it into our tmap */ 10101 if (tmap == NULL) { 10102 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10103 tmap = rsm; 10104 } else { 10105 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10106 tmap = rsm; 10107 } 10108 tmap->r_in_tmap = 1; 10109 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10110 } 10111 /* 10112 * Now lets possibly clear the sack filter so we start 10113 * recognizing sacks that cover this area. 10114 */ 10115 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10116 10117 } 10118 10119 10120 static void inline 10121 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10122 { 10123 /* 10124 * We look at advancing the end send time for our GP 10125 * measurement tracking only as the cumulative acknowledgment 10126 * moves forward. You might wonder about this, why not 10127 * at every transmission or retransmission within the 10128 * GP window update the rc_gp_cumack_ts? Well its rather 10129 * nuanced but basically the GP window *may* expand (as 10130 * it does below) or worse and harder to track it may shrink. 10131 * 10132 * This last makes it impossible to track at the time of 10133 * the send, since you may set forward your rc_gp_cumack_ts 10134 * when you send, because that send *is* in your currently 10135 * "guessed" window, but then it shrinks. Now which was 10136 * the send time of the last bytes in the window, by the 10137 * time you ask that question that part of the sendmap 10138 * is freed. So you don't know and you will have too 10139 * long of send window. Instead by updating the time 10140 * marker only when the cumack advances this assures us 10141 * that we will have only the sends in the window of our 10142 * GP measurement. 10143 * 10144 * Another complication from this is the 10145 * merging of sendmap entries. During SACK processing this 10146 * can happen to conserve the sendmap size. That breaks 10147 * everything down in tracking the send window of the GP 10148 * estimate. So to prevent that and keep it working with 10149 * a tiny bit more limited merging, we only allow like 10150 * types to be merged. I.e. if two sends are in the GP window 10151 * then its ok to merge them together. If two sends are not 10152 * in the GP window its ok to merge them together too. Though 10153 * one send in and one send out cannot be merged. We combine 10154 * this with never allowing the shrinking of the GP window when 10155 * we are in recovery so that we can properly calculate the 10156 * sending times. 10157 * 10158 * This all of course seems complicated, because it is.. :) 10159 * 10160 * The cum-ack is being advanced upon the sendmap. 10161 * If we are not doing a GP estimate don't 10162 * proceed. 10163 */ 10164 uint64_t ts; 10165 10166 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10167 return; 10168 /* 10169 * If this sendmap entry is going 10170 * beyond the measurement window we had picked, 10171 * expand the measurement window by that much. 10172 */ 10173 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10174 tp->gput_ack = rsm->r_end; 10175 } 10176 /* 10177 * If we have not setup a ack, then we 10178 * have no idea if the newly acked pieces 10179 * will be "in our seq measurement range". If 10180 * it is when we clear the app_limited_needs_set 10181 * flag the timestamp will be updated. 10182 */ 10183 if (rack->app_limited_needs_set) 10184 return; 10185 /* 10186 * Finally, we grab out the latest timestamp 10187 * that this packet was sent and then see 10188 * if: 10189 * a) The packet touches are newly defined GP range. 10190 * b) The time is greater than (newer) than the 10191 * one we currently have. If so we update 10192 * our sending end time window. 10193 * 10194 * Note we *do not* do this at send time. The reason 10195 * is that if you do you *may* pick up a newer timestamp 10196 * for a range you are not going to measure. We project 10197 * out how far and then sometimes modify that to be 10198 * smaller. If that occurs then you will have a send 10199 * that does not belong to the range included. 10200 */ 10201 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10202 rack->r_ctl.rc_gp_cumack_ts) 10203 return; 10204 if (rack_in_gp_window(tp, rsm)) { 10205 rack->r_ctl.rc_gp_cumack_ts = ts; 10206 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10207 __LINE__, from, rsm); 10208 } 10209 } 10210 10211 static void 10212 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10213 { 10214 struct rack_sendmap *rsm; 10215 /* 10216 * The ACK point is advancing to th_ack, we must drop off 10217 * the packets in the rack log and calculate any eligble 10218 * RTT's. 10219 */ 10220 10221 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { 10222 /* 10223 * If we have some sack blocks in the filter 10224 * lets prune them out by calling sfb with no blocks. 10225 */ 10226 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); 10227 } 10228 if (SEQ_GT(th_ack, tp->snd_una)) { 10229 /* Clear any app ack remembered settings */ 10230 rack->r_ctl.cleared_app_ack = 0; 10231 } 10232 rack->r_wanted_output = 1; 10233 if (SEQ_GT(th_ack, tp->snd_una)) 10234 rack->r_ctl.last_cumack_advance = acktime; 10235 10236 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10237 if ((rack->rc_last_tlp_acked_set == 1)&& 10238 (rack->rc_last_tlp_past_cumack == 1) && 10239 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10240 /* 10241 * We have reached the point where our last rack 10242 * tlp retransmit sequence is ahead of the cum-ack. 10243 * This can only happen when the cum-ack moves all 10244 * the way around (its been a full 2^^31+1 bytes 10245 * or more since we sent a retransmitted TLP). Lets 10246 * turn off the valid flag since its not really valid. 10247 * 10248 * Note since sack's also turn on this event we have 10249 * a complication, we have to wait to age it out until 10250 * the cum-ack is by the TLP before checking which is 10251 * what the next else clause does. 10252 */ 10253 rack_log_dsack_event(rack, 9, __LINE__, 10254 rack->r_ctl.last_tlp_acked_start, 10255 rack->r_ctl.last_tlp_acked_end); 10256 rack->rc_last_tlp_acked_set = 0; 10257 rack->rc_last_tlp_past_cumack = 0; 10258 } else if ((rack->rc_last_tlp_acked_set == 1) && 10259 (rack->rc_last_tlp_past_cumack == 0) && 10260 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10261 /* 10262 * It is safe to start aging TLP's out. 10263 */ 10264 rack->rc_last_tlp_past_cumack = 1; 10265 } 10266 /* We do the same for the tlp send seq as well */ 10267 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10268 (rack->rc_last_sent_tlp_past_cumack == 1) && 10269 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10270 rack_log_dsack_event(rack, 9, __LINE__, 10271 rack->r_ctl.last_sent_tlp_seq, 10272 (rack->r_ctl.last_sent_tlp_seq + 10273 rack->r_ctl.last_sent_tlp_len)); 10274 rack->rc_last_sent_tlp_seq_valid = 0; 10275 rack->rc_last_sent_tlp_past_cumack = 0; 10276 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10277 (rack->rc_last_sent_tlp_past_cumack == 0) && 10278 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10279 /* 10280 * It is safe to start aging TLP's send. 10281 */ 10282 rack->rc_last_sent_tlp_past_cumack = 1; 10283 } 10284 more: 10285 rsm = tqhash_min(rack->r_ctl.tqh); 10286 if (rsm == NULL) { 10287 if ((th_ack - 1) == tp->iss) { 10288 /* 10289 * For the SYN incoming case we will not 10290 * have called tcp_output for the sending of 10291 * the SYN, so there will be no map. All 10292 * other cases should probably be a panic. 10293 */ 10294 return; 10295 } 10296 if (tp->t_flags & TF_SENTFIN) { 10297 /* if we sent a FIN we often will not have map */ 10298 return; 10299 } 10300 #ifdef INVARIANTS 10301 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u\n", 10302 tp, 10303 tp->t_state, th_ack, rack, 10304 tp->snd_una, tp->snd_max); 10305 #endif 10306 return; 10307 } 10308 if (SEQ_LT(th_ack, rsm->r_start)) { 10309 /* Huh map is missing this */ 10310 #ifdef INVARIANTS 10311 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 10312 rsm->r_start, 10313 th_ack, tp->t_state, rack->r_state); 10314 #endif 10315 return; 10316 } 10317 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 10318 10319 /* Now was it a retransmitted TLP? */ 10320 if ((rsm->r_flags & RACK_TLP) && 10321 (rsm->r_rtr_cnt > 1)) { 10322 /* 10323 * Yes, this rsm was a TLP and retransmitted, remember that 10324 * since if a DSACK comes back on this we don't want 10325 * to think of it as a reordered segment. This may 10326 * get updated again with possibly even other TLPs 10327 * in flight, but thats ok. Only when we don't send 10328 * a retransmitted TLP for 1/2 the sequences space 10329 * will it get turned off (above). 10330 */ 10331 if (rack->rc_last_tlp_acked_set && 10332 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10333 /* 10334 * We already turned this on since the end matches, 10335 * the previous one was a partially ack now we 10336 * are getting another one (maybe all of it). 10337 */ 10338 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10339 /* 10340 * Lets make sure we have all of it though. 10341 */ 10342 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10343 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10344 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10345 rack->r_ctl.last_tlp_acked_end); 10346 } 10347 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10348 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10349 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10350 rack->r_ctl.last_tlp_acked_end); 10351 } 10352 } else { 10353 rack->rc_last_tlp_past_cumack = 1; 10354 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10355 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10356 rack->rc_last_tlp_acked_set = 1; 10357 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10358 } 10359 } 10360 /* Now do we consume the whole thing? */ 10361 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 10362 if (SEQ_GEQ(th_ack, rsm->r_end)) { 10363 /* Its all consumed. */ 10364 uint32_t left; 10365 uint8_t newly_acked; 10366 10367 if (rsm->r_flags & RACK_WAS_LOST) { 10368 /* 10369 * This can happen when we marked it as lost 10370 * and yet before retransmitting we get an ack 10371 * which can happen due to reordering. 10372 */ 10373 rsm->r_flags &= ~RACK_WAS_LOST; 10374 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), 10375 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack)); 10376 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) 10377 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; 10378 else 10379 rack->r_ctl.rc_considered_lost = 0; 10380 } 10381 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 10382 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 10383 rsm->r_rtr_bytes = 0; 10384 /* 10385 * Record the time of highest cumack sent if its in our measurement 10386 * window and possibly bump out the end. 10387 */ 10388 rack_rsm_sender_update(rack, tp, rsm, 4); 10389 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 10390 if (rsm->r_in_tmap) { 10391 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10392 rsm->r_in_tmap = 0; 10393 } 10394 newly_acked = 1; 10395 if (rsm->r_flags & RACK_ACKED) { 10396 /* 10397 * It was acked on the scoreboard -- remove 10398 * it from total 10399 */ 10400 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10401 newly_acked = 0; 10402 } else if (rsm->r_flags & RACK_SACK_PASSED) { 10403 /* 10404 * There are segments ACKED on the 10405 * scoreboard further up. We are seeing 10406 * reordering. 10407 */ 10408 rsm->r_flags &= ~RACK_SACK_PASSED; 10409 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10410 rsm->r_flags |= RACK_ACKED; 10411 rack->r_ctl.rc_reorder_ts = cts; 10412 if (rack->r_ctl.rc_reorder_ts == 0) 10413 rack->r_ctl.rc_reorder_ts = 1; 10414 if (rack->r_ent_rec_ns) { 10415 /* 10416 * We have sent no more, and we saw an sack 10417 * then ack arrive. 10418 */ 10419 rack->r_might_revert = 1; 10420 } 10421 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10422 } else { 10423 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); 10424 } 10425 if ((rsm->r_flags & RACK_TO_REXT) && 10426 (tp->t_flags & TF_RCVD_TSTMP) && 10427 (to->to_flags & TOF_TS) && 10428 (to->to_tsecr != 0) && 10429 (tp->t_flags & TF_PREVVALID)) { 10430 /* 10431 * We can use the timestamp to see 10432 * if this retransmission was from the 10433 * first transmit. If so we made a mistake. 10434 */ 10435 tp->t_flags &= ~TF_PREVVALID; 10436 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 10437 /* The first transmit is what this ack is for */ 10438 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 10439 } 10440 } 10441 left = th_ack - rsm->r_end; 10442 if (rack->app_limited_needs_set && newly_acked) 10443 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 10444 /* Free back to zone */ 10445 rack_free(rack, rsm); 10446 if (left) { 10447 goto more; 10448 } 10449 /* Check for reneging */ 10450 rsm = tqhash_min(rack->r_ctl.tqh); 10451 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 10452 /* 10453 * The peer has moved snd_una up to 10454 * the edge of this send, i.e. one 10455 * that it had previously acked. The only 10456 * way that can be true if the peer threw 10457 * away data (space issues) that it had 10458 * previously sacked (else it would have 10459 * given us snd_una up to (rsm->r_end). 10460 * We need to undo the acked markings here. 10461 * 10462 * Note we have to look to make sure th_ack is 10463 * our rsm->r_start in case we get an old ack 10464 * where th_ack is behind snd_una. 10465 */ 10466 rack_peer_reneges(rack, rsm, th_ack); 10467 } 10468 return; 10469 } 10470 if (rsm->r_flags & RACK_ACKED) { 10471 /* 10472 * It was acked on the scoreboard -- remove it from 10473 * total for the part being cum-acked. 10474 */ 10475 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 10476 } else { 10477 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); 10478 } 10479 /* And what about the lost flag? */ 10480 if (rsm->r_flags & RACK_WAS_LOST) { 10481 /* 10482 * This can happen when we marked it as lost 10483 * and yet before retransmitting we get an ack 10484 * which can happen due to reordering. In this 10485 * case its only a partial ack of the send. 10486 */ 10487 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), 10488 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack)); 10489 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) 10490 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; 10491 else 10492 rack->r_ctl.rc_considered_lost = 0; 10493 } 10494 /* 10495 * Clear the dup ack count for 10496 * the piece that remains. 10497 */ 10498 rsm->r_dupack = 0; 10499 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10500 if (rsm->r_rtr_bytes) { 10501 /* 10502 * It was retransmitted adjust the 10503 * sack holes for what was acked. 10504 */ 10505 int ack_am; 10506 10507 ack_am = (th_ack - rsm->r_start); 10508 if (ack_am >= rsm->r_rtr_bytes) { 10509 rack->r_ctl.rc_holes_rxt -= ack_am; 10510 rsm->r_rtr_bytes -= ack_am; 10511 } 10512 } 10513 /* 10514 * Update where the piece starts and record 10515 * the time of send of highest cumack sent if 10516 * its in our GP range. 10517 */ 10518 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 10519 /* Now we need to move our offset forward too */ 10520 if (rsm->m && 10521 ((rsm->orig_m_len != rsm->m->m_len) || 10522 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 10523 /* Fix up the orig_m_len and possibly the mbuf offset */ 10524 rack_adjust_orig_mlen(rsm); 10525 } 10526 rsm->soff += (th_ack - rsm->r_start); 10527 rack_rsm_sender_update(rack, tp, rsm, 5); 10528 /* The trim will move th_ack into r_start for us */ 10529 tqhash_trim(rack->r_ctl.tqh, th_ack); 10530 /* Now do we need to move the mbuf fwd too? */ 10531 { 10532 struct mbuf *m; 10533 uint32_t soff; 10534 10535 m = rsm->m; 10536 soff = rsm->soff; 10537 if (m) { 10538 while (soff >= m->m_len) { 10539 soff -= m->m_len; 10540 KASSERT((m->m_next != NULL), 10541 (" rsm:%p off:%u soff:%u m:%p", 10542 rsm, rsm->soff, soff, m)); 10543 m = m->m_next; 10544 if (m == NULL) { 10545 /* 10546 * This is a fall-back that prevents a panic. In reality 10547 * we should be able to walk the mbuf's and find our place. 10548 * At this point snd_una has not been updated with the sbcut() yet 10549 * but tqhash_trim did update rsm->r_start so the offset calcuation 10550 * should work fine. This is undesirable since we will take cache 10551 * hits to access the socket buffer. And even more puzzling is that 10552 * it happens occasionally. It should not :( 10553 */ 10554 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 10555 (rsm->r_start - tp->snd_una), 10556 &soff); 10557 break; 10558 } 10559 } 10560 /* 10561 * Now save in our updated values. 10562 */ 10563 rsm->m = m; 10564 rsm->soff = soff; 10565 rsm->orig_m_len = rsm->m->m_len; 10566 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 10567 } 10568 } 10569 if (rack->app_limited_needs_set && 10570 SEQ_GEQ(th_ack, tp->gput_seq)) 10571 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 10572 } 10573 10574 static void 10575 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 10576 { 10577 struct rack_sendmap *rsm; 10578 int sack_pass_fnd = 0; 10579 10580 if (rack->r_might_revert) { 10581 /* 10582 * Ok we have reordering, have not sent anything, we 10583 * might want to revert the congestion state if nothing 10584 * further has SACK_PASSED on it. Lets check. 10585 * 10586 * We also get here when we have DSACKs come in for 10587 * all the data that we FR'd. Note that a rxt or tlp 10588 * timer clears this from happening. 10589 */ 10590 10591 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 10592 if (rsm->r_flags & RACK_SACK_PASSED) { 10593 sack_pass_fnd = 1; 10594 break; 10595 } 10596 } 10597 if (sack_pass_fnd == 0) { 10598 /* 10599 * We went into recovery 10600 * incorrectly due to reordering! 10601 */ 10602 int orig_cwnd; 10603 10604 rack->r_ent_rec_ns = 0; 10605 orig_cwnd = tp->snd_cwnd; 10606 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 10607 tp->snd_recover = tp->snd_una; 10608 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 10609 if (IN_RECOVERY(tp->t_flags)) { 10610 rack_exit_recovery(tp, rack, 3); 10611 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ 10612 /* 10613 * We were in recovery, had an RTO 10614 * and then re-entered recovery (more sack's arrived) 10615 * and we have properly recorded the old ssthresh from 10616 * the first recovery. We want to be able to slow-start 10617 * back to this level. The ssthresh from the timeout 10618 * and then back into recovery will end up most likely 10619 * to be min(cwnd=1mss, 2mss). Which makes it basically 10620 * so we get no slow-start after our RTO. 10621 */ 10622 rack->rto_from_rec = 0; 10623 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) 10624 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; 10625 } 10626 } 10627 } 10628 rack->r_might_revert = 0; 10629 } 10630 } 10631 10632 10633 static int 10634 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 10635 { 10636 10637 uint32_t am, l_end; 10638 int was_tlp = 0; 10639 10640 if (SEQ_GT(end, start)) 10641 am = end - start; 10642 else 10643 am = 0; 10644 if ((rack->rc_last_tlp_acked_set ) && 10645 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 10646 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 10647 /* 10648 * The DSACK is because of a TLP which we don't 10649 * do anything with the reordering window over since 10650 * it was not reordering that caused the DSACK but 10651 * our previous retransmit TLP. 10652 */ 10653 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10654 was_tlp = 1; 10655 goto skip_dsack_round; 10656 } 10657 if (rack->rc_last_sent_tlp_seq_valid) { 10658 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 10659 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 10660 (SEQ_LEQ(end, l_end))) { 10661 /* 10662 * This dsack is from the last sent TLP, ignore it 10663 * for reordering purposes. 10664 */ 10665 rack_log_dsack_event(rack, 7, __LINE__, start, end); 10666 was_tlp = 1; 10667 goto skip_dsack_round; 10668 } 10669 } 10670 if (rack->rc_dsack_round_seen == 0) { 10671 rack->rc_dsack_round_seen = 1; 10672 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 10673 rack->r_ctl.num_dsack++; 10674 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 10675 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 10676 } 10677 skip_dsack_round: 10678 /* 10679 * We keep track of how many DSACK blocks we get 10680 * after a recovery incident. 10681 */ 10682 rack->r_ctl.dsack_byte_cnt += am; 10683 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 10684 rack->r_ctl.retran_during_recovery && 10685 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 10686 /* 10687 * False recovery most likely culprit is reordering. If 10688 * nothing else is missing we need to revert. 10689 */ 10690 rack->r_might_revert = 1; 10691 rack_handle_might_revert(rack->rc_tp, rack); 10692 rack->r_might_revert = 0; 10693 rack->r_ctl.retran_during_recovery = 0; 10694 rack->r_ctl.dsack_byte_cnt = 0; 10695 } 10696 return (was_tlp); 10697 } 10698 10699 static uint32_t 10700 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 10701 { 10702 return (((tp->snd_max - snd_una) - 10703 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); 10704 } 10705 10706 static int32_t 10707 rack_compute_pipe(struct tcpcb *tp) 10708 { 10709 return ((int32_t)do_rack_compute_pipe(tp, 10710 (struct tcp_rack *)tp->t_fb_ptr, 10711 tp->snd_una)); 10712 } 10713 10714 static void 10715 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 10716 { 10717 /* Deal with changed and PRR here (in recovery only) */ 10718 uint32_t pipe, snd_una; 10719 10720 rack->r_ctl.rc_prr_delivered += changed; 10721 10722 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 10723 /* 10724 * It is all outstanding, we are application limited 10725 * and thus we don't need more room to send anything. 10726 * Note we use tp->snd_una here and not th_ack because 10727 * the data as yet not been cut from the sb. 10728 */ 10729 rack->r_ctl.rc_prr_sndcnt = 0; 10730 return; 10731 } 10732 /* Compute prr_sndcnt */ 10733 if (SEQ_GT(tp->snd_una, th_ack)) { 10734 snd_una = tp->snd_una; 10735 } else { 10736 snd_una = th_ack; 10737 } 10738 pipe = do_rack_compute_pipe(tp, rack, snd_una); 10739 if (pipe > tp->snd_ssthresh) { 10740 long sndcnt; 10741 10742 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 10743 if (rack->r_ctl.rc_prr_recovery_fs > 0) 10744 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 10745 else { 10746 rack->r_ctl.rc_prr_sndcnt = 0; 10747 rack_log_to_prr(rack, 9, 0, __LINE__); 10748 sndcnt = 0; 10749 } 10750 sndcnt++; 10751 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 10752 sndcnt -= rack->r_ctl.rc_prr_out; 10753 else 10754 sndcnt = 0; 10755 rack->r_ctl.rc_prr_sndcnt = sndcnt; 10756 rack_log_to_prr(rack, 10, 0, __LINE__); 10757 } else { 10758 uint32_t limit; 10759 10760 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 10761 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 10762 else 10763 limit = 0; 10764 if (changed > limit) 10765 limit = changed; 10766 limit += ctf_fixed_maxseg(tp); 10767 if (tp->snd_ssthresh > pipe) { 10768 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 10769 rack_log_to_prr(rack, 11, 0, __LINE__); 10770 } else { 10771 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 10772 rack_log_to_prr(rack, 12, 0, __LINE__); 10773 } 10774 } 10775 } 10776 10777 static void 10778 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 10779 int *dsack_seen, int *sacks_seen) 10780 { 10781 uint32_t changed; 10782 struct tcp_rack *rack; 10783 struct rack_sendmap *rsm; 10784 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 10785 register uint32_t th_ack; 10786 int32_t i, j, k, num_sack_blks = 0; 10787 uint32_t cts, acked, ack_point; 10788 int loop_start = 0; 10789 uint32_t tsused; 10790 uint32_t segsiz; 10791 10792 10793 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10794 if (tcp_get_flags(th) & TH_RST) { 10795 /* We don't log resets */ 10796 return; 10797 } 10798 rack = (struct tcp_rack *)tp->t_fb_ptr; 10799 cts = tcp_get_usecs(NULL); 10800 rsm = tqhash_min(rack->r_ctl.tqh); 10801 changed = 0; 10802 th_ack = th->th_ack; 10803 segsiz = ctf_fixed_maxseg(rack->rc_tp); 10804 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 10805 /* 10806 * You only get credit for 10807 * MSS and greater (and you get extra 10808 * credit for larger cum-ack moves). 10809 */ 10810 int ac; 10811 10812 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 10813 counter_u64_add(rack_ack_total, ac); 10814 } 10815 if (SEQ_GT(th_ack, tp->snd_una)) { 10816 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 10817 tp->t_acktime = ticks; 10818 } 10819 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 10820 changed = th_ack - rsm->r_start; 10821 if (changed) { 10822 rack_process_to_cumack(tp, rack, th_ack, cts, to, 10823 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 10824 } 10825 if ((to->to_flags & TOF_SACK) == 0) { 10826 /* We are done nothing left and no sack. */ 10827 rack_handle_might_revert(tp, rack); 10828 /* 10829 * For cases where we struck a dup-ack 10830 * with no SACK, add to the changes so 10831 * PRR will work right. 10832 */ 10833 if (dup_ack_struck && (changed == 0)) { 10834 changed += ctf_fixed_maxseg(rack->rc_tp); 10835 } 10836 goto out; 10837 } 10838 /* Sack block processing */ 10839 if (SEQ_GT(th_ack, tp->snd_una)) 10840 ack_point = th_ack; 10841 else 10842 ack_point = tp->snd_una; 10843 for (i = 0; i < to->to_nsacks; i++) { 10844 bcopy((to->to_sacks + i * TCPOLEN_SACK), 10845 &sack, sizeof(sack)); 10846 sack.start = ntohl(sack.start); 10847 sack.end = ntohl(sack.end); 10848 if (SEQ_GT(sack.end, sack.start) && 10849 SEQ_GT(sack.start, ack_point) && 10850 SEQ_LT(sack.start, tp->snd_max) && 10851 SEQ_GT(sack.end, ack_point) && 10852 SEQ_LEQ(sack.end, tp->snd_max)) { 10853 sack_blocks[num_sack_blks] = sack; 10854 num_sack_blks++; 10855 } else if (SEQ_LEQ(sack.start, th_ack) && 10856 SEQ_LEQ(sack.end, th_ack)) { 10857 int was_tlp; 10858 10859 if (dsack_seen != NULL) 10860 *dsack_seen = 1; 10861 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 10862 /* 10863 * Its a D-SACK block. 10864 */ 10865 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 10866 } 10867 } 10868 if (rack->rc_dsack_round_seen) { 10869 /* Is the dsack roound over? */ 10870 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 10871 /* Yes it is */ 10872 rack->rc_dsack_round_seen = 0; 10873 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 10874 } 10875 } 10876 /* 10877 * Sort the SACK blocks so we can update the rack scoreboard with 10878 * just one pass. 10879 */ 10880 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, 10881 num_sack_blks, th->th_ack); 10882 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 10883 if (sacks_seen != NULL) 10884 *sacks_seen = num_sack_blks; 10885 if (num_sack_blks == 0) { 10886 /* Nothing to sack, but we need to update counts */ 10887 goto out_with_totals; 10888 } 10889 /* Its a sack of some sort */ 10890 if (num_sack_blks < 2) { 10891 /* Only one, we don't need to sort */ 10892 goto do_sack_work; 10893 } 10894 /* Sort the sacks */ 10895 for (i = 0; i < num_sack_blks; i++) { 10896 for (j = i + 1; j < num_sack_blks; j++) { 10897 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 10898 sack = sack_blocks[i]; 10899 sack_blocks[i] = sack_blocks[j]; 10900 sack_blocks[j] = sack; 10901 } 10902 } 10903 } 10904 /* 10905 * Now are any of the sack block ends the same (yes some 10906 * implementations send these)? 10907 */ 10908 again: 10909 if (num_sack_blks == 0) 10910 goto out_with_totals; 10911 if (num_sack_blks > 1) { 10912 for (i = 0; i < num_sack_blks; i++) { 10913 for (j = i + 1; j < num_sack_blks; j++) { 10914 if (sack_blocks[i].end == sack_blocks[j].end) { 10915 /* 10916 * Ok these two have the same end we 10917 * want the smallest end and then 10918 * throw away the larger and start 10919 * again. 10920 */ 10921 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 10922 /* 10923 * The second block covers 10924 * more area use that 10925 */ 10926 sack_blocks[i].start = sack_blocks[j].start; 10927 } 10928 /* 10929 * Now collapse out the dup-sack and 10930 * lower the count 10931 */ 10932 for (k = (j + 1); k < num_sack_blks; k++) { 10933 sack_blocks[j].start = sack_blocks[k].start; 10934 sack_blocks[j].end = sack_blocks[k].end; 10935 j++; 10936 } 10937 num_sack_blks--; 10938 goto again; 10939 } 10940 } 10941 } 10942 } 10943 do_sack_work: 10944 /* 10945 * First lets look to see if 10946 * we have retransmitted and 10947 * can use the transmit next? 10948 */ 10949 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 10950 if (rsm && 10951 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 10952 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 10953 /* 10954 * We probably did the FR and the next 10955 * SACK in continues as we would expect. 10956 */ 10957 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz); 10958 if (acked) { 10959 rack->r_wanted_output = 1; 10960 changed += acked; 10961 } 10962 if (num_sack_blks == 1) { 10963 /* 10964 * This is what we would expect from 10965 * a normal implementation to happen 10966 * after we have retransmitted the FR, 10967 * i.e the sack-filter pushes down 10968 * to 1 block and the next to be retransmitted 10969 * is the sequence in the sack block (has more 10970 * are acked). Count this as ACK'd data to boost 10971 * up the chances of recovering any false positives. 10972 */ 10973 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 10974 counter_u64_add(rack_express_sack, 1); 10975 goto out_with_totals; 10976 } else { 10977 /* 10978 * Start the loop through the 10979 * rest of blocks, past the first block. 10980 */ 10981 loop_start = 1; 10982 } 10983 } 10984 counter_u64_add(rack_sack_total, 1); 10985 rsm = rack->r_ctl.rc_sacklast; 10986 for (i = loop_start; i < num_sack_blks; i++) { 10987 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz); 10988 if (acked) { 10989 rack->r_wanted_output = 1; 10990 changed += acked; 10991 } 10992 } 10993 out_with_totals: 10994 if (num_sack_blks > 1) { 10995 /* 10996 * You get an extra stroke if 10997 * you have more than one sack-blk, this 10998 * could be where we are skipping forward 10999 * and the sack-filter is still working, or 11000 * it could be an attacker constantly 11001 * moving us. 11002 */ 11003 counter_u64_add(rack_move_some, 1); 11004 } 11005 out: 11006 if (changed) { 11007 /* Something changed cancel the rack timer */ 11008 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11009 } 11010 tsused = tcp_get_usecs(NULL); 11011 rsm = tcp_rack_output(tp, rack, tsused); 11012 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11013 rsm && 11014 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11015 /* Enter recovery */ 11016 entered_recovery = 1; 11017 rack_cong_signal(tp, CC_NDUPACK, th_ack, __LINE__); 11018 /* 11019 * When we enter recovery we need to assure we send 11020 * one packet. 11021 */ 11022 if (rack->rack_no_prr == 0) { 11023 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11024 rack_log_to_prr(rack, 8, 0, __LINE__); 11025 } 11026 rack->r_timer_override = 1; 11027 rack->r_early = 0; 11028 rack->r_ctl.rc_agg_early = 0; 11029 } else if (IN_FASTRECOVERY(tp->t_flags) && 11030 rsm && 11031 (rack->r_rr_config == 3)) { 11032 /* 11033 * Assure we can output and we get no 11034 * remembered pace time except the retransmit. 11035 */ 11036 rack->r_timer_override = 1; 11037 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11038 rack->r_ctl.rc_resend = rsm; 11039 } 11040 if (IN_FASTRECOVERY(tp->t_flags) && 11041 (rack->rack_no_prr == 0) && 11042 (entered_recovery == 0)) { 11043 rack_update_prr(tp, rack, changed, th_ack); 11044 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11045 ((tcp_in_hpts(rack->rc_tp) == 0) && 11046 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11047 /* 11048 * If you are pacing output you don't want 11049 * to override. 11050 */ 11051 rack->r_early = 0; 11052 rack->r_ctl.rc_agg_early = 0; 11053 rack->r_timer_override = 1; 11054 } 11055 } 11056 } 11057 11058 static void 11059 rack_strike_dupack(struct tcp_rack *rack, tcp_seq th_ack) 11060 { 11061 struct rack_sendmap *rsm; 11062 11063 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11064 while (rsm) { 11065 /* 11066 * We need to skip anything already set 11067 * to be retransmitted. 11068 */ 11069 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11070 (rsm->r_flags & RACK_MUST_RXT)) { 11071 rsm = TAILQ_NEXT(rsm, r_tnext); 11072 continue; 11073 } 11074 break; 11075 } 11076 if (rsm && (rsm->r_dupack < 0xff)) { 11077 rsm->r_dupack++; 11078 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11079 struct timeval tv; 11080 uint32_t cts; 11081 /* 11082 * Here we see if we need to retransmit. For 11083 * a SACK type connection if enough time has passed 11084 * we will get a return of the rsm. For a non-sack 11085 * connection we will get the rsm returned if the 11086 * dupack value is 3 or more. 11087 */ 11088 cts = tcp_get_usecs(&tv); 11089 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11090 if (rack->r_ctl.rc_resend != NULL) { 11091 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11092 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11093 th_ack, __LINE__); 11094 } 11095 rack->r_wanted_output = 1; 11096 rack->r_timer_override = 1; 11097 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11098 } 11099 } else { 11100 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11101 } 11102 } 11103 } 11104 11105 static void 11106 rack_check_bottom_drag(struct tcpcb *tp, 11107 struct tcp_rack *rack, 11108 struct socket *so) 11109 { 11110 /* 11111 * So what is dragging bottom? 11112 * 11113 * Dragging bottom means you were under pacing and had a 11114 * delay in processing inbound acks waiting on our pacing 11115 * timer to expire. While you were waiting all of the acknowledgments 11116 * for the packets you sent have arrived. This means we are pacing 11117 * way underneath the bottleneck to the point where our Goodput 11118 * measurements stop working, since they require more than one 11119 * ack (usually at least 8 packets worth with multiple acks so we can 11120 * gauge the inter-ack times). If that occurs we have a real problem 11121 * since we are stuck in a hole that we can't get out of without 11122 * something speeding us up. 11123 * 11124 * We also check to see if we are widdling down to just one segment 11125 * outstanding. If this occurs and we have room to send in our cwnd/rwnd 11126 * then we are adding the delayed ack interval into our measurments and 11127 * we need to speed up slightly. 11128 */ 11129 uint32_t segsiz, minseg; 11130 11131 segsiz = ctf_fixed_maxseg(tp); 11132 minseg = segsiz; 11133 if (tp->snd_max == tp->snd_una) { 11134 /* 11135 * We are doing dynamic pacing and we are way 11136 * under. Basically everything got acked while 11137 * we were still waiting on the pacer to expire. 11138 * 11139 * This means we need to boost the b/w in 11140 * addition to any earlier boosting of 11141 * the multiplier. 11142 */ 11143 uint64_t lt_bw; 11144 11145 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); 11146 lt_bw = rack_get_lt_bw(rack); 11147 rack->rc_dragged_bottom = 1; 11148 rack_validate_multipliers_at_or_above100(rack); 11149 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11150 (rack->dis_lt_bw == 0) && 11151 (rack->use_lesser_lt_bw == 0) && 11152 (lt_bw > 0)) { 11153 /* 11154 * Lets use the long-term b/w we have 11155 * been getting as a base. 11156 */ 11157 if (rack->rc_gp_filled == 0) { 11158 if (lt_bw > ONE_POINT_TWO_MEG) { 11159 /* 11160 * If we have no measurement 11161 * don't let us set in more than 11162 * 1.2Mbps. If we are still too 11163 * low after pacing with this we 11164 * will hopefully have a max b/w 11165 * available to sanity check things. 11166 */ 11167 lt_bw = ONE_POINT_TWO_MEG; 11168 } 11169 rack->r_ctl.rc_rtt_diff = 0; 11170 rack->r_ctl.gp_bw = lt_bw; 11171 rack->rc_gp_filled = 1; 11172 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11173 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11174 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11175 } else if (lt_bw > rack->r_ctl.gp_bw) { 11176 rack->r_ctl.rc_rtt_diff = 0; 11177 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11178 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11179 rack->r_ctl.gp_bw = lt_bw; 11180 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11181 } else 11182 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11183 if ((rack->gp_ready == 0) && 11184 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11185 /* We have enough measurements now */ 11186 rack->gp_ready = 1; 11187 if (rack->dgp_on || 11188 rack->rack_hibeta) 11189 rack_set_cc_pacing(rack); 11190 if (rack->defer_options) 11191 rack_apply_deferred_options(rack); 11192 } 11193 } else { 11194 /* 11195 * zero rtt possibly?, settle for just an old increase. 11196 */ 11197 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11198 } 11199 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11200 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11201 minseg)) && 11202 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11203 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11204 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11205 (segsiz * rack_req_segs))) { 11206 /* 11207 * We are doing dynamic GP pacing and 11208 * we have everything except 1MSS or less 11209 * bytes left out. We are still pacing away. 11210 * And there is data that could be sent, This 11211 * means we are inserting delayed ack time in 11212 * our measurements because we are pacing too slow. 11213 */ 11214 rack_validate_multipliers_at_or_above100(rack); 11215 rack->rc_dragged_bottom = 1; 11216 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11217 } 11218 } 11219 11220 #ifdef TCP_REQUEST_TRK 11221 static void 11222 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11223 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11224 { 11225 int do_log; 11226 11227 do_log = tcp_bblogging_on(rack->rc_tp); 11228 if (do_log == 0) { 11229 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11230 return; 11231 /* We only allow the three below with point logging on */ 11232 if ((mod != HYBRID_LOG_RULES_APP) && 11233 (mod != HYBRID_LOG_RULES_SET) && 11234 (mod != HYBRID_LOG_REQ_COMP)) 11235 return; 11236 11237 } 11238 if (do_log) { 11239 union tcp_log_stackspecific log; 11240 struct timeval tv; 11241 11242 /* Convert our ms to a microsecond */ 11243 memset(&log, 0, sizeof(log)); 11244 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11245 log.u_bbr.flex1 = seq; 11246 log.u_bbr.cwnd_gain = line; 11247 if (cur != NULL) { 11248 uint64_t off; 11249 11250 log.u_bbr.flex2 = cur->start_seq; 11251 log.u_bbr.flex3 = cur->end_seq; 11252 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11253 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11254 log.u_bbr.flex6 = cur->flags; 11255 log.u_bbr.pkts_out = cur->hybrid_flags; 11256 log.u_bbr.rttProp = cur->timestamp; 11257 log.u_bbr.cur_del_rate = cur->cspr; 11258 log.u_bbr.bw_inuse = cur->start; 11259 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11260 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11261 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11262 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11263 log.u_bbr.inhpts = 1; 11264 #ifdef TCP_REQUEST_TRK 11265 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11266 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11267 #endif 11268 } else { 11269 log.u_bbr.flex2 = err; 11270 } 11271 /* 11272 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11273 */ 11274 log.u_bbr.flex7 = rack->rc_catch_up; 11275 log.u_bbr.flex7 <<= 1; 11276 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11277 log.u_bbr.flex7 <<= 1; 11278 log.u_bbr.flex7 |= rack->dgp_on; 11279 /* 11280 * Compose bbr_state to be a bit wise 0000ADHF 11281 * where A is the always_pace flag 11282 * where D is the dgp_on flag 11283 * where H is the hybrid_mode on flag 11284 * where F is the use_fixed_rate flag. 11285 */ 11286 log.u_bbr.bbr_state = rack->rc_always_pace; 11287 log.u_bbr.bbr_state <<= 1; 11288 log.u_bbr.bbr_state |= rack->dgp_on; 11289 log.u_bbr.bbr_state <<= 1; 11290 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 11291 log.u_bbr.bbr_state <<= 1; 11292 log.u_bbr.bbr_state |= rack->use_fixed_rate; 11293 log.u_bbr.flex8 = mod; 11294 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11295 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11296 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11297 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 11298 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 11299 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 11300 tcp_log_event(rack->rc_tp, NULL, 11301 &rack->rc_inp->inp_socket->so_rcv, 11302 &rack->rc_inp->inp_socket->so_snd, 11303 TCP_HYBRID_PACING_LOG, 0, 11304 0, &log, false, NULL, __func__, __LINE__, &tv); 11305 } 11306 } 11307 #endif 11308 11309 #ifdef TCP_REQUEST_TRK 11310 static void 11311 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11312 { 11313 struct tcp_sendfile_track *rc_cur, *orig_ent; 11314 struct tcpcb *tp; 11315 int err = 0; 11316 11317 orig_ent = rack->r_ctl.rc_last_sft; 11318 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 11319 if (rc_cur == NULL) { 11320 /* If not in the beginning what about the end piece */ 11321 if (rack->rc_hybrid_mode) 11322 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11323 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 11324 } else { 11325 err = 12345; 11326 } 11327 /* If we find no parameters we are in straight DGP mode */ 11328 if(rc_cur == NULL) { 11329 /* None found for this seq, just DGP for now */ 11330 if (rack->rc_hybrid_mode) { 11331 rack->r_ctl.client_suggested_maxseg = 0; 11332 rack->rc_catch_up = 0; 11333 if (rack->cspr_is_fcc == 0) 11334 rack->r_ctl.bw_rate_cap = 0; 11335 else 11336 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11337 } 11338 if (rack->rc_hybrid_mode) { 11339 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11340 } 11341 if (rack->r_ctl.rc_last_sft) { 11342 rack->r_ctl.rc_last_sft = NULL; 11343 } 11344 return; 11345 } 11346 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 11347 /* This entry was never setup for hybrid pacing on/off etc */ 11348 if (rack->rc_hybrid_mode) { 11349 rack->r_ctl.client_suggested_maxseg = 0; 11350 rack->rc_catch_up = 0; 11351 rack->r_ctl.bw_rate_cap = 0; 11352 } 11353 if (rack->r_ctl.rc_last_sft) { 11354 rack->r_ctl.rc_last_sft = NULL; 11355 } 11356 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11357 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; 11358 rc_cur->first_send = cts; 11359 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; 11360 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11361 } 11362 return; 11363 } 11364 /* 11365 * Ok if we have a new entry *or* have never 11366 * set up an entry we need to proceed. If 11367 * we have already set it up this entry we 11368 * just continue along with what we already 11369 * setup. 11370 */ 11371 tp = rack->rc_tp; 11372 if ((rack->r_ctl.rc_last_sft != NULL) && 11373 (rack->r_ctl.rc_last_sft == rc_cur)) { 11374 /* Its already in place */ 11375 if (rack->rc_hybrid_mode) 11376 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 11377 return; 11378 } 11379 if (rack->rc_hybrid_mode == 0) { 11380 rack->r_ctl.rc_last_sft = rc_cur; 11381 if (orig_ent) { 11382 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11383 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11384 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11385 } 11386 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11387 return; 11388 } 11389 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 11390 /* Compensate for all the header overhead's */ 11391 if (rack->cspr_is_fcc == 0) 11392 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11393 else 11394 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11395 } else { 11396 if (rack->rc_hybrid_mode) { 11397 if (rack->cspr_is_fcc == 0) 11398 rack->r_ctl.bw_rate_cap = 0; 11399 else 11400 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11401 } 11402 } 11403 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 11404 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 11405 else 11406 rack->r_ctl.client_suggested_maxseg = 0; 11407 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { 11408 /* 11409 * It is the same timestamp as the previous one 11410 * add the hybrid flag that will indicate we use 11411 * sendtime not arrival time for catch-up mode. 11412 */ 11413 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; 11414 } 11415 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 11416 (rc_cur->cspr > 0)) { 11417 uint64_t len; 11418 11419 rack->rc_catch_up = 1; 11420 /* 11421 * Calculate the deadline time, first set the 11422 * time to when the request arrived. 11423 */ 11424 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { 11425 /* 11426 * For cases where its a duplicate tm (we received more 11427 * than one request for a tm) we want to use now, the point 11428 * where we are just sending the first bit of the request. 11429 */ 11430 rc_cur->deadline = cts; 11431 } else { 11432 /* 11433 * Here we have a different tm from the last request 11434 * so we want to use arrival time as our base. 11435 */ 11436 rc_cur->deadline = rc_cur->localtime; 11437 } 11438 /* 11439 * Next calculate the length and compensate for 11440 * TLS if need be. 11441 */ 11442 len = rc_cur->end - rc_cur->start; 11443 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 11444 /* 11445 * This session is doing TLS. Take a swag guess 11446 * at the overhead. 11447 */ 11448 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 11449 } 11450 /* 11451 * Now considering the size, and the cspr, what is the time that 11452 * would be required at the cspr rate. Here we use the raw 11453 * cspr value since the client only looks at the raw data. We 11454 * do use len which includes TLS overhead, but not the TCP/IP etc. 11455 * That will get made up for in the CU pacing rate set. 11456 */ 11457 len *= HPTS_USEC_IN_SEC; 11458 len /= rc_cur->cspr; 11459 rc_cur->deadline += len; 11460 } else { 11461 rack->rc_catch_up = 0; 11462 rc_cur->deadline = 0; 11463 } 11464 if (rack->r_ctl.client_suggested_maxseg != 0) { 11465 /* 11466 * We need to reset the max pace segs if we have a 11467 * client_suggested_maxseg. 11468 */ 11469 rack_set_pace_segments(tp, rack, __LINE__, NULL); 11470 } 11471 if (orig_ent) { 11472 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; 11473 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; 11474 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; 11475 } 11476 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11477 /* Remember it for next time and for CU mode */ 11478 rack->r_ctl.rc_last_sft = rc_cur; 11479 rack->r_ctl.last_tm_mark = rc_cur->timestamp; 11480 } 11481 #endif 11482 11483 static void 11484 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11485 { 11486 #ifdef TCP_REQUEST_TRK 11487 struct tcp_sendfile_track *ent; 11488 11489 ent = rack->r_ctl.rc_last_sft; 11490 if ((ent == NULL) || 11491 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 11492 (SEQ_GEQ(seq, ent->end_seq))) { 11493 /* Time to update the track. */ 11494 rack_set_dgp_hybrid_mode(rack, seq, len, cts); 11495 ent = rack->r_ctl.rc_last_sft; 11496 } 11497 /* Out of all */ 11498 if (ent == NULL) { 11499 return; 11500 } 11501 if (SEQ_LT(ent->end_seq, (seq + len))) { 11502 /* 11503 * This is the case where our end_seq guess 11504 * was wrong. This is usually due to TLS having 11505 * more bytes then our guess. It could also be the 11506 * case that the client sent in two requests closely 11507 * and the SB is full of both so we are sending part 11508 * of each (end|beg). In such a case lets move this 11509 * guys end to match the end of this send. That 11510 * way it will complete when all of it is acked. 11511 */ 11512 ent->end_seq = (seq + len); 11513 if (rack->rc_hybrid_mode) 11514 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 11515 } 11516 /* Now validate we have set the send time of this one */ 11517 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11518 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 11519 ent->first_send = cts; 11520 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 11521 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11522 } 11523 #endif 11524 } 11525 11526 static void 11527 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 11528 { 11529 /* 11530 * The fast output path is enabled and we 11531 * have moved the cumack forward. Lets see if 11532 * we can expand forward the fast path length by 11533 * that amount. What we would ideally like to 11534 * do is increase the number of bytes in the 11535 * fast path block (left_to_send) by the 11536 * acked amount. However we have to gate that 11537 * by two factors: 11538 * 1) The amount outstanding and the rwnd of the peer 11539 * (i.e. we don't want to exceed the rwnd of the peer). 11540 * <and> 11541 * 2) The amount of data left in the socket buffer (i.e. 11542 * we can't send beyond what is in the buffer). 11543 * 11544 * Note that this does not take into account any increase 11545 * in the cwnd. We will only extend the fast path by 11546 * what was acked. 11547 */ 11548 uint32_t new_total, gating_val; 11549 11550 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 11551 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 11552 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 11553 if (new_total <= gating_val) { 11554 /* We can increase left_to_send by the acked amount */ 11555 counter_u64_add(rack_extended_rfo, 1); 11556 rack->r_ctl.fsb.left_to_send = new_total; 11557 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 11558 ("rack:%p left_to_send:%u sbavail:%u out:%u", 11559 rack, rack->r_ctl.fsb.left_to_send, 11560 sbavail(&rack->rc_inp->inp_socket->so_snd), 11561 (tp->snd_max - tp->snd_una))); 11562 11563 } 11564 } 11565 11566 static void 11567 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 11568 { 11569 /* 11570 * Here any sendmap entry that points to the 11571 * beginning mbuf must be adjusted to the correct 11572 * offset. This must be called with: 11573 * 1) The socket buffer locked 11574 * 2) snd_una adjusted to its new position. 11575 * 11576 * Note that (2) implies rack_ack_received has also 11577 * been called and all the sbcut's have been done. 11578 * 11579 * We grab the first mbuf in the socket buffer and 11580 * then go through the front of the sendmap, recalculating 11581 * the stored offset for any sendmap entry that has 11582 * that mbuf. We must use the sb functions to do this 11583 * since its possible an add was done has well as 11584 * the subtraction we may have just completed. This should 11585 * not be a penalty though, since we just referenced the sb 11586 * to go in and trim off the mbufs that we freed (of course 11587 * there will be a penalty for the sendmap references though). 11588 * 11589 * Note also with INVARIANT on, we validate with a KASSERT 11590 * that the first sendmap entry has a soff of 0. 11591 * 11592 */ 11593 struct mbuf *m; 11594 struct rack_sendmap *rsm; 11595 tcp_seq snd_una; 11596 #ifdef INVARIANTS 11597 int first_processed = 0; 11598 #endif 11599 11600 snd_una = rack->rc_tp->snd_una; 11601 SOCKBUF_LOCK_ASSERT(sb); 11602 m = sb->sb_mb; 11603 rsm = tqhash_min(rack->r_ctl.tqh); 11604 if ((rsm == NULL) || (m == NULL)) { 11605 /* Nothing outstanding */ 11606 return; 11607 } 11608 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 11609 KASSERT((rsm->m == m), 11610 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 11611 rack, sb, rsm)); 11612 while (rsm->m && (rsm->m == m)) { 11613 /* one to adjust */ 11614 #ifdef INVARIANTS 11615 struct mbuf *tm; 11616 uint32_t soff; 11617 11618 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 11619 if ((rsm->orig_m_len != m->m_len) || 11620 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 11621 rack_adjust_orig_mlen(rsm); 11622 } 11623 if (first_processed == 0) { 11624 KASSERT((rsm->soff == 0), 11625 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 11626 rack, rsm)); 11627 first_processed = 1; 11628 } 11629 if ((rsm->soff != soff) || (rsm->m != tm)) { 11630 /* 11631 * This is not a fatal error, we anticipate it 11632 * might happen (the else code), so we count it here 11633 * so that under invariant we can see that it really 11634 * does happen. 11635 */ 11636 counter_u64_add(rack_adjust_map_bw, 1); 11637 } 11638 rsm->m = tm; 11639 rsm->soff = soff; 11640 if (tm) { 11641 rsm->orig_m_len = rsm->m->m_len; 11642 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11643 } else { 11644 rsm->orig_m_len = 0; 11645 rsm->orig_t_space = 0; 11646 } 11647 #else 11648 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 11649 if (rsm->m) { 11650 rsm->orig_m_len = rsm->m->m_len; 11651 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 11652 } else { 11653 rsm->orig_m_len = 0; 11654 rsm->orig_t_space = 0; 11655 } 11656 #endif 11657 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 11658 if (rsm == NULL) 11659 break; 11660 } 11661 } 11662 11663 #ifdef TCP_REQUEST_TRK 11664 static inline void 11665 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 11666 { 11667 struct tcp_sendfile_track *ent; 11668 int i; 11669 11670 if ((rack->rc_hybrid_mode == 0) && 11671 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 11672 /* 11673 * Just do normal completions hybrid pacing is not on 11674 * and CLDL is off as well. 11675 */ 11676 tcp_req_check_for_comp(rack->rc_tp, th_ack); 11677 return; 11678 } 11679 /* 11680 * Originally I was just going to find the th_ack associated 11681 * with an entry. But then I realized a large strech ack could 11682 * in theory ack two or more requests at once. So instead we 11683 * need to find all entries that are completed by th_ack not 11684 * just a single entry and do our logging. 11685 */ 11686 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11687 while (ent != NULL) { 11688 /* 11689 * We may be doing hybrid pacing or CLDL and need more details possibly 11690 * so we do it manually instead of calling 11691 * tcp_req_check_for_comp() 11692 */ 11693 uint64_t laa, tim, data, cbw, ftim; 11694 11695 /* Ok this ack frees it */ 11696 rack_log_hybrid(rack, th_ack, 11697 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 11698 rack_log_hybrid_sends(rack, ent, __LINE__); 11699 /* calculate the time based on the ack arrival */ 11700 data = ent->end - ent->start; 11701 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 11702 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 11703 if (ent->first_send > ent->localtime) 11704 ftim = ent->first_send; 11705 else 11706 ftim = ent->localtime; 11707 } else { 11708 /* TSNH */ 11709 ftim = ent->localtime; 11710 } 11711 if (laa > ent->localtime) 11712 tim = laa - ftim; 11713 else 11714 tim = 0; 11715 cbw = data * HPTS_USEC_IN_SEC; 11716 if (tim > 0) 11717 cbw /= tim; 11718 else 11719 cbw = 0; 11720 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 11721 /* 11722 * Check to see if we are freeing what we are pointing to send wise 11723 * if so be sure to NULL the pointer so we know we are no longer 11724 * set to anything. 11725 */ 11726 if (ent == rack->r_ctl.rc_last_sft) { 11727 rack->r_ctl.rc_last_sft = NULL; 11728 if (rack->rc_hybrid_mode) { 11729 rack->rc_catch_up = 0; 11730 if (rack->cspr_is_fcc == 0) 11731 rack->r_ctl.bw_rate_cap = 0; 11732 else 11733 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 11734 rack->r_ctl.client_suggested_maxseg = 0; 11735 } 11736 } 11737 /* Generate the log that the tcp_netflix call would have */ 11738 tcp_req_log_req_info(rack->rc_tp, ent, 11739 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 11740 /* Free it and see if there is another one */ 11741 tcp_req_free_a_slot(rack->rc_tp, ent); 11742 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 11743 } 11744 } 11745 #endif 11746 11747 11748 /* 11749 * Return value of 1, we do not need to call rack_process_data(). 11750 * return value of 0, rack_process_data can be called. 11751 * For ret_val if its 0 the TCP is locked, if its non-zero 11752 * its unlocked and probably unsafe to touch the TCB. 11753 */ 11754 static int 11755 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11756 struct tcpcb *tp, struct tcpopt *to, 11757 uint32_t tiwin, int32_t tlen, 11758 int32_t * ofia, int32_t thflags, int32_t *ret_val, int32_t orig_tlen) 11759 { 11760 int32_t ourfinisacked = 0; 11761 int32_t nsegs, acked_amount; 11762 int32_t acked; 11763 struct mbuf *mfree; 11764 struct tcp_rack *rack; 11765 int32_t under_pacing = 0; 11766 int32_t post_recovery = 0; 11767 uint32_t p_cwnd; 11768 11769 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11770 11771 rack = (struct tcp_rack *)tp->t_fb_ptr; 11772 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { 11773 /* Checking SEG.ACK against ISS is definitely redundant. */ 11774 tp->t_flags2 |= TF2_NO_ISS_CHECK; 11775 } 11776 if (!V_tcp_insecure_ack) { 11777 tcp_seq seq_min; 11778 bool ghost_ack_check; 11779 11780 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { 11781 /* Check for too old ACKs (RFC 5961, Section 5.2). */ 11782 seq_min = tp->snd_una - tp->max_sndwnd; 11783 ghost_ack_check = false; 11784 } else { 11785 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { 11786 /* Checking for ghost ACKs is stricter. */ 11787 seq_min = tp->iss + 1; 11788 ghost_ack_check = true; 11789 } else { 11790 /* 11791 * Checking for too old ACKs (RFC 5961, 11792 * Section 5.2) is stricter. 11793 */ 11794 seq_min = tp->snd_una - tp->max_sndwnd; 11795 ghost_ack_check = false; 11796 } 11797 } 11798 if (SEQ_LT(th->th_ack, seq_min)) { 11799 if (ghost_ack_check) 11800 TCPSTAT_INC(tcps_rcvghostack); 11801 else 11802 TCPSTAT_INC(tcps_rcvacktooold); 11803 /* Send challenge ACK. */ 11804 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11805 rack->r_wanted_output = 1; 11806 return (1); 11807 } 11808 } 11809 if (SEQ_GT(th->th_ack, tp->snd_max)) { 11810 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 11811 rack->r_wanted_output = 1; 11812 return (1); 11813 } 11814 if (rack->gp_ready && 11815 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11816 under_pacing = 1; 11817 } 11818 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 11819 int in_rec, dup_ack_struck = 0; 11820 int dsack_seen = 0, sacks_seen = 0; 11821 11822 in_rec = IN_FASTRECOVERY(tp->t_flags); 11823 if (rack->rc_in_persist) { 11824 tp->t_rxtshift = 0; 11825 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11826 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11827 } 11828 11829 if ((th->th_ack == tp->snd_una) && 11830 (tiwin == tp->snd_wnd) && 11831 (orig_tlen == 0) && 11832 ((to->to_flags & TOF_SACK) == 0)) { 11833 rack_strike_dupack(rack, th->th_ack); 11834 dup_ack_struck = 1; 11835 } 11836 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 11837 dup_ack_struck, &dsack_seen, &sacks_seen); 11838 11839 } 11840 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11841 /* 11842 * Old ack, behind (or duplicate to) the last one rcv'd 11843 * Note: We mark reordering is occuring if its 11844 * less than and we have not closed our window. 11845 */ 11846 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 11847 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 11848 if (rack->r_ctl.rc_reorder_ts == 0) 11849 rack->r_ctl.rc_reorder_ts = 1; 11850 } 11851 return (0); 11852 } 11853 /* 11854 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 11855 * something we sent. 11856 */ 11857 if (tp->t_flags & TF_NEEDSYN) { 11858 /* 11859 * T/TCP: Connection was half-synchronized, and our SYN has 11860 * been ACK'd (so connection is now fully synchronized). Go 11861 * to non-starred state, increment snd_una for ACK of SYN, 11862 * and check if we can do window scaling. 11863 */ 11864 tp->t_flags &= ~TF_NEEDSYN; 11865 tp->snd_una++; 11866 /* Do window scaling? */ 11867 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11868 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11869 tp->rcv_scale = tp->request_r_scale; 11870 /* Send window already scaled. */ 11871 } 11872 } 11873 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11874 11875 acked = BYTES_THIS_ACK(tp, th); 11876 if (acked) { 11877 /* 11878 * Any time we move the cum-ack forward clear 11879 * keep-alive tied probe-not-answered. The 11880 * persists clears its own on entry. 11881 */ 11882 rack->probe_not_answered = 0; 11883 } 11884 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11885 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11886 /* 11887 * If we just performed our first retransmit, and the ACK arrives 11888 * within our recovery window, then it was a mistake to do the 11889 * retransmit in the first place. Recover our original cwnd and 11890 * ssthresh, and proceed to transmit where we left off. 11891 */ 11892 if ((tp->t_flags & TF_PREVVALID) && 11893 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11894 tp->t_flags &= ~TF_PREVVALID; 11895 if (tp->t_rxtshift == 1 && 11896 (int)(ticks - tp->t_badrxtwin) < 0) 11897 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 11898 } 11899 if (acked) { 11900 /* assure we are not backed off */ 11901 tp->t_rxtshift = 0; 11902 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11903 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11904 rack->rc_tlp_in_progress = 0; 11905 rack->r_ctl.rc_tlp_cnt_out = 0; 11906 /* 11907 * If it is the RXT timer we want to 11908 * stop it, so we can restart a TLP. 11909 */ 11910 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11911 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11912 #ifdef TCP_REQUEST_TRK 11913 rack_req_check_for_comp(rack, th->th_ack); 11914 #endif 11915 } 11916 /* 11917 * If we have a timestamp reply, update smoothed round trip time. If 11918 * no timestamp is present but transmit timer is running and timed 11919 * sequence number was acked, update smoothed round trip time. Since 11920 * we now have an rtt measurement, cancel the timer backoff (cf., 11921 * Phil Karn's retransmit alg.). Recompute the initial retransmit 11922 * timer. 11923 * 11924 * Some boxes send broken timestamp replies during the SYN+ACK 11925 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11926 * and blow up the retransmit timer. 11927 */ 11928 /* 11929 * If all outstanding data is acked, stop retransmit timer and 11930 * remember to restart (more output or persist). If there is more 11931 * data to be acked, restart retransmit timer, using current 11932 * (possibly backed-off) value. 11933 */ 11934 if (acked == 0) { 11935 if (ofia) 11936 *ofia = ourfinisacked; 11937 return (0); 11938 } 11939 if (IN_RECOVERY(tp->t_flags)) { 11940 if (SEQ_LT(th->th_ack, tp->snd_recover) && 11941 (SEQ_LT(th->th_ack, tp->snd_max))) { 11942 tcp_rack_partialack(tp); 11943 } else { 11944 rack_post_recovery(tp, th->th_ack); 11945 post_recovery = 1; 11946 /* 11947 * Grab the segsiz, multiply by 2 and add the snd_cwnd 11948 * that is the max the CC should add if we are exiting 11949 * recovery and doing a late add. 11950 */ 11951 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 11952 p_cwnd <<= 1; 11953 p_cwnd += tp->snd_cwnd; 11954 } 11955 } else if ((rack->rto_from_rec == 1) && 11956 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 11957 /* 11958 * We were in recovery, hit a rxt timeout 11959 * and never re-entered recovery. The timeout(s) 11960 * made up all the lost data. In such a case 11961 * we need to clear the rto_from_rec flag. 11962 */ 11963 rack->rto_from_rec = 0; 11964 } 11965 /* 11966 * Let the congestion control algorithm update congestion control 11967 * related information. This typically means increasing the 11968 * congestion window. 11969 */ 11970 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); 11971 if (post_recovery && 11972 (tp->snd_cwnd > p_cwnd)) { 11973 /* Must be non-newreno (cubic) getting too ahead of itself */ 11974 tp->snd_cwnd = p_cwnd; 11975 } 11976 SOCK_SENDBUF_LOCK(so); 11977 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 11978 tp->snd_wnd -= acked_amount; 11979 mfree = sbcut_locked(&so->so_snd, acked_amount); 11980 if ((sbused(&so->so_snd) == 0) && 11981 (acked > acked_amount) && 11982 (tp->t_state >= TCPS_FIN_WAIT_1) && 11983 (tp->t_flags & TF_SENTFIN)) { 11984 /* 11985 * We must be sure our fin 11986 * was sent and acked (we can be 11987 * in FIN_WAIT_1 without having 11988 * sent the fin). 11989 */ 11990 ourfinisacked = 1; 11991 } 11992 tp->snd_una = th->th_ack; 11993 /* wakeups? */ 11994 if (acked_amount && sbavail(&so->so_snd)) 11995 rack_adjust_sendmap_head(rack, &so->so_snd); 11996 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11997 /* NB: sowwakeup_locked() does an implicit unlock. */ 11998 sowwakeup_locked(so); 11999 m_freem(mfree); 12000 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12001 tp->snd_recover = tp->snd_una; 12002 12003 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12004 tp->snd_nxt = tp->snd_max; 12005 } 12006 if (under_pacing && 12007 (rack->use_fixed_rate == 0) && 12008 (rack->in_probe_rtt == 0) && 12009 rack->rc_gp_dyn_mul && 12010 rack->rc_always_pace) { 12011 /* Check if we are dragging bottom */ 12012 rack_check_bottom_drag(tp, rack, so); 12013 } 12014 if (tp->snd_una == tp->snd_max) { 12015 /* Nothing left outstanding */ 12016 tp->t_flags &= ~TF_PREVVALID; 12017 if (rack->r_ctl.rc_went_idle_time == 0) 12018 rack->r_ctl.rc_went_idle_time = 1; 12019 rack->r_ctl.retran_during_recovery = 0; 12020 rack->r_ctl.dsack_byte_cnt = 0; 12021 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12022 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12023 tp->t_acktime = 0; 12024 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12025 rack->rc_suspicious = 0; 12026 /* Set need output so persist might get set */ 12027 rack->r_wanted_output = 1; 12028 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12029 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12030 (sbavail(&so->so_snd) == 0) && 12031 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12032 /* 12033 * The socket was gone and the 12034 * peer sent data (now or in the past), time to 12035 * reset him. 12036 */ 12037 *ret_val = 1; 12038 /* tcp_close will kill the inp pre-log the Reset */ 12039 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12040 tp = tcp_close(tp); 12041 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12042 return (1); 12043 } 12044 } 12045 if (ofia) 12046 *ofia = ourfinisacked; 12047 return (0); 12048 } 12049 12050 12051 static void 12052 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12053 int dir, uint32_t flags, struct rack_sendmap *rsm) 12054 { 12055 if (tcp_bblogging_on(rack->rc_tp)) { 12056 union tcp_log_stackspecific log; 12057 struct timeval tv; 12058 12059 memset(&log, 0, sizeof(log)); 12060 log.u_bbr.flex1 = cnt; 12061 log.u_bbr.flex2 = split; 12062 log.u_bbr.flex3 = out; 12063 log.u_bbr.flex4 = line; 12064 log.u_bbr.flex5 = rack->r_must_retran; 12065 log.u_bbr.flex6 = flags; 12066 log.u_bbr.flex7 = rack->rc_has_collapsed; 12067 log.u_bbr.flex8 = dir; /* 12068 * 1 is collapsed, 0 is uncollapsed, 12069 * 2 is log of a rsm being marked, 3 is a split. 12070 */ 12071 if (rsm == NULL) 12072 log.u_bbr.rttProp = 0; 12073 else 12074 log.u_bbr.rttProp = (uintptr_t)rsm; 12075 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12076 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12077 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12078 &rack->rc_inp->inp_socket->so_rcv, 12079 &rack->rc_inp->inp_socket->so_snd, 12080 TCP_RACK_LOG_COLLAPSE, 0, 12081 0, &log, false, &tv); 12082 } 12083 } 12084 12085 static void 12086 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12087 { 12088 /* 12089 * Here all we do is mark the collapsed point and set the flag. 12090 * This may happen again and again, but there is no 12091 * sense splitting our map until we know where the 12092 * peer finally lands in the collapse. 12093 */ 12094 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12095 if ((rack->rc_has_collapsed == 0) || 12096 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12097 counter_u64_add(rack_collapsed_win_seen, 1); 12098 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12099 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12100 rack->rc_has_collapsed = 1; 12101 rack->r_collapse_point_valid = 1; 12102 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12103 } 12104 12105 static void 12106 rack_un_collapse_window(struct tcp_rack *rack, int line) 12107 { 12108 struct rack_sendmap *nrsm, *rsm; 12109 int cnt = 0, split = 0; 12110 int insret __diagused; 12111 12112 12113 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12114 rack->rc_has_collapsed = 0; 12115 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12116 if (rsm == NULL) { 12117 /* Nothing to do maybe the peer ack'ed it all */ 12118 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12119 return; 12120 } 12121 /* Now do we need to split this one? */ 12122 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12123 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12124 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12125 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12126 if (nrsm == NULL) { 12127 /* We can't get a rsm, mark all? */ 12128 nrsm = rsm; 12129 goto no_split; 12130 } 12131 /* Clone it */ 12132 split = 1; 12133 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12134 #ifndef INVARIANTS 12135 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12136 #else 12137 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12138 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12139 nrsm, insret, rack, rsm); 12140 } 12141 #endif 12142 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12143 rack->r_ctl.last_collapse_point, __LINE__); 12144 if (rsm->r_in_tmap) { 12145 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12146 nrsm->r_in_tmap = 1; 12147 } 12148 /* 12149 * Set in the new RSM as the 12150 * collapsed starting point 12151 */ 12152 rsm = nrsm; 12153 } 12154 12155 no_split: 12156 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12157 cnt++; 12158 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12159 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12160 cnt++; 12161 } 12162 if (cnt) { 12163 counter_u64_add(rack_collapsed_win, 1); 12164 } 12165 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12166 } 12167 12168 static void 12169 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12170 int32_t tlen, int32_t tfo_syn) 12171 { 12172 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12173 rack_timer_cancel(tp, rack, 12174 rack->r_ctl.rc_rcvtime, __LINE__); 12175 tp->t_flags |= TF_DELACK; 12176 } else { 12177 rack->r_wanted_output = 1; 12178 tp->t_flags |= TF_ACKNOW; 12179 } 12180 } 12181 12182 static void 12183 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12184 { 12185 /* 12186 * If fast output is in progress, lets validate that 12187 * the new window did not shrink on us and make it 12188 * so fast output should end. 12189 */ 12190 if (rack->r_fast_output) { 12191 uint32_t out; 12192 12193 /* 12194 * Calculate what we will send if left as is 12195 * and compare that to our send window. 12196 */ 12197 out = ctf_outstanding(tp); 12198 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12199 /* ok we have an issue */ 12200 if (out >= tp->snd_wnd) { 12201 /* Turn off fast output the window is met or collapsed */ 12202 rack->r_fast_output = 0; 12203 } else { 12204 /* we have some room left */ 12205 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12206 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12207 /* If not at least 1 full segment never mind */ 12208 rack->r_fast_output = 0; 12209 } 12210 } 12211 } 12212 } 12213 } 12214 12215 /* 12216 * Return value of 1, the TCB is unlocked and most 12217 * likely gone, return value of 0, the TCP is still 12218 * locked. 12219 */ 12220 static int 12221 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12222 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12223 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12224 { 12225 /* 12226 * Update window information. Don't look at window if no ACK: TAC's 12227 * send garbage on first SYN. 12228 */ 12229 int32_t nsegs; 12230 int32_t tfo_syn; 12231 struct tcp_rack *rack; 12232 12233 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12234 12235 rack = (struct tcp_rack *)tp->t_fb_ptr; 12236 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12237 if ((thflags & TH_ACK) && 12238 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12239 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12240 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12241 /* keep track of pure window updates */ 12242 if (tlen == 0 && 12243 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12244 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12245 tp->snd_wnd = tiwin; 12246 rack_validate_fo_sendwin_up(tp, rack); 12247 tp->snd_wl1 = th->th_seq; 12248 tp->snd_wl2 = th->th_ack; 12249 if (tp->snd_wnd > tp->max_sndwnd) 12250 tp->max_sndwnd = tp->snd_wnd; 12251 rack->r_wanted_output = 1; 12252 } else if (thflags & TH_ACK) { 12253 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12254 tp->snd_wnd = tiwin; 12255 rack_validate_fo_sendwin_up(tp, rack); 12256 tp->snd_wl1 = th->th_seq; 12257 tp->snd_wl2 = th->th_ack; 12258 } 12259 } 12260 if (tp->snd_wnd < ctf_outstanding(tp)) 12261 /* The peer collapsed the window */ 12262 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12263 else if (rack->rc_has_collapsed) 12264 rack_un_collapse_window(rack, __LINE__); 12265 if ((rack->r_collapse_point_valid) && 12266 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12267 rack->r_collapse_point_valid = 0; 12268 /* Was persist timer active and now we have window space? */ 12269 if ((rack->rc_in_persist != 0) && 12270 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12271 rack->r_ctl.rc_pace_min_segs))) { 12272 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12273 tp->snd_nxt = tp->snd_max; 12274 /* Make sure we output to start the timer */ 12275 rack->r_wanted_output = 1; 12276 } 12277 /* Do we enter persists? */ 12278 if ((rack->rc_in_persist == 0) && 12279 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12280 TCPS_HAVEESTABLISHED(tp->t_state) && 12281 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12282 sbavail(&tptosocket(tp)->so_snd) && 12283 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12284 /* 12285 * Here the rwnd is less than 12286 * the pacing size, we are established, 12287 * nothing is outstanding, and there is 12288 * data to send. Enter persists. 12289 */ 12290 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12291 } 12292 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12293 m_freem(m); 12294 return (0); 12295 } 12296 /* 12297 * don't process the URG bit, ignore them drag 12298 * along the up. 12299 */ 12300 tp->rcv_up = tp->rcv_nxt; 12301 12302 /* 12303 * Process the segment text, merging it into the TCP sequencing 12304 * queue, and arranging for acknowledgment of receipt if necessary. 12305 * This process logically involves adjusting tp->rcv_wnd as data is 12306 * presented to the user (this happens in tcp_usrreq.c, case 12307 * PRU_RCVD). If a FIN has already been received on this connection 12308 * then we just ignore the text. 12309 */ 12310 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12311 (tp->t_flags & TF_FASTOPEN)); 12312 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12313 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12314 tcp_seq save_start = th->th_seq; 12315 tcp_seq save_rnxt = tp->rcv_nxt; 12316 int save_tlen = tlen; 12317 12318 m_adj(m, drop_hdrlen); /* delayed header drop */ 12319 /* 12320 * Insert segment which includes th into TCP reassembly 12321 * queue with control block tp. Set thflags to whether 12322 * reassembly now includes a segment with FIN. This handles 12323 * the common case inline (segment is the next to be 12324 * received on an established connection, and the queue is 12325 * empty), avoiding linkage into and removal from the queue 12326 * and repetition of various conversions. Set DELACK for 12327 * segments received in order, but ack immediately when 12328 * segments are out of order (so fast retransmit can work). 12329 */ 12330 if (th->th_seq == tp->rcv_nxt && 12331 SEGQ_EMPTY(tp) && 12332 (TCPS_HAVEESTABLISHED(tp->t_state) || 12333 tfo_syn)) { 12334 #ifdef NETFLIX_SB_LIMITS 12335 u_int mcnt, appended; 12336 12337 if (so->so_rcv.sb_shlim) { 12338 mcnt = m_memcnt(m); 12339 appended = 0; 12340 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12341 CFO_NOSLEEP, NULL) == false) { 12342 counter_u64_add(tcp_sb_shlim_fails, 1); 12343 m_freem(m); 12344 return (0); 12345 } 12346 } 12347 #endif 12348 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 12349 tp->rcv_nxt += tlen; 12350 if (tlen && 12351 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12352 (tp->t_fbyte_in == 0)) { 12353 tp->t_fbyte_in = ticks; 12354 if (tp->t_fbyte_in == 0) 12355 tp->t_fbyte_in = 1; 12356 if (tp->t_fbyte_out && tp->t_fbyte_in) 12357 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12358 } 12359 thflags = tcp_get_flags(th) & TH_FIN; 12360 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12361 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12362 SOCK_RECVBUF_LOCK(so); 12363 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12364 m_freem(m); 12365 } else { 12366 int32_t newsize; 12367 12368 if (tlen > 0) { 12369 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12370 if (newsize) 12371 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12372 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12373 } 12374 #ifdef NETFLIX_SB_LIMITS 12375 appended = 12376 #endif 12377 sbappendstream_locked(&so->so_rcv, m, 0); 12378 } 12379 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12380 /* NB: sorwakeup_locked() does an implicit unlock. */ 12381 sorwakeup_locked(so); 12382 #ifdef NETFLIX_SB_LIMITS 12383 if (so->so_rcv.sb_shlim && appended != mcnt) 12384 counter_fo_release(so->so_rcv.sb_shlim, 12385 mcnt - appended); 12386 #endif 12387 } else { 12388 /* 12389 * XXX: Due to the header drop above "th" is 12390 * theoretically invalid by now. Fortunately 12391 * m_adj() doesn't actually frees any mbufs when 12392 * trimming from the head. 12393 */ 12394 tcp_seq temp = save_start; 12395 12396 thflags = tcp_reass(tp, th, &temp, &tlen, m); 12397 tp->t_flags |= TF_ACKNOW; 12398 if (tp->t_flags & TF_WAKESOR) { 12399 tp->t_flags &= ~TF_WAKESOR; 12400 /* NB: sorwakeup_locked() does an implicit unlock. */ 12401 sorwakeup_locked(so); 12402 } 12403 } 12404 if ((tp->t_flags & TF_SACK_PERMIT) && 12405 (save_tlen > 0) && 12406 TCPS_HAVEESTABLISHED(tp->t_state)) { 12407 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 12408 /* 12409 * DSACK actually handled in the fastpath 12410 * above. 12411 */ 12412 tcp_update_sack_list(tp, save_start, 12413 save_start + save_tlen); 12414 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 12415 if ((tp->rcv_numsacks >= 1) && 12416 (tp->sackblks[0].end == save_start)) { 12417 /* 12418 * Partial overlap, recorded at todrop 12419 * above. 12420 */ 12421 tcp_update_sack_list(tp, 12422 tp->sackblks[0].start, 12423 tp->sackblks[0].end); 12424 } else { 12425 tcp_update_dsack_list(tp, save_start, 12426 save_start + save_tlen); 12427 } 12428 } else if (tlen >= save_tlen) { 12429 /* Update of sackblks. */ 12430 tcp_update_dsack_list(tp, save_start, 12431 save_start + save_tlen); 12432 } else if (tlen > 0) { 12433 tcp_update_dsack_list(tp, save_start, 12434 save_start + tlen); 12435 } 12436 } 12437 } else { 12438 m_freem(m); 12439 thflags &= ~TH_FIN; 12440 } 12441 12442 /* 12443 * If FIN is received ACK the FIN and let the user know that the 12444 * connection is closing. 12445 */ 12446 if (thflags & TH_FIN) { 12447 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12448 /* The socket upcall is handled by socantrcvmore. */ 12449 socantrcvmore(so); 12450 /* 12451 * If connection is half-synchronized (ie NEEDSYN 12452 * flag on) then delay ACK, so it may be piggybacked 12453 * when SYN is sent. Otherwise, since we received a 12454 * FIN then no more input can be expected, send ACK 12455 * now. 12456 */ 12457 if (tp->t_flags & TF_NEEDSYN) { 12458 rack_timer_cancel(tp, rack, 12459 rack->r_ctl.rc_rcvtime, __LINE__); 12460 tp->t_flags |= TF_DELACK; 12461 } else { 12462 tp->t_flags |= TF_ACKNOW; 12463 } 12464 tp->rcv_nxt++; 12465 } 12466 switch (tp->t_state) { 12467 /* 12468 * In SYN_RECEIVED and ESTABLISHED STATES enter the 12469 * CLOSE_WAIT state. 12470 */ 12471 case TCPS_SYN_RECEIVED: 12472 tp->t_starttime = ticks; 12473 /* FALLTHROUGH */ 12474 case TCPS_ESTABLISHED: 12475 rack_timer_cancel(tp, rack, 12476 rack->r_ctl.rc_rcvtime, __LINE__); 12477 tcp_state_change(tp, TCPS_CLOSE_WAIT); 12478 break; 12479 12480 /* 12481 * If still in FIN_WAIT_1 STATE FIN has not been 12482 * acked so enter the CLOSING state. 12483 */ 12484 case TCPS_FIN_WAIT_1: 12485 rack_timer_cancel(tp, rack, 12486 rack->r_ctl.rc_rcvtime, __LINE__); 12487 tcp_state_change(tp, TCPS_CLOSING); 12488 break; 12489 12490 /* 12491 * In FIN_WAIT_2 state enter the TIME_WAIT state, 12492 * starting the time-wait timer, turning off the 12493 * other standard timers. 12494 */ 12495 case TCPS_FIN_WAIT_2: 12496 rack_timer_cancel(tp, rack, 12497 rack->r_ctl.rc_rcvtime, __LINE__); 12498 tcp_twstart(tp); 12499 return (1); 12500 } 12501 } 12502 /* 12503 * Return any desired output. 12504 */ 12505 if ((tp->t_flags & TF_ACKNOW) || 12506 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 12507 rack->r_wanted_output = 1; 12508 } 12509 return (0); 12510 } 12511 12512 /* 12513 * Here nothing is really faster, its just that we 12514 * have broken out the fast-data path also just like 12515 * the fast-ack. 12516 */ 12517 static int 12518 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 12519 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12520 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 12521 { 12522 int32_t nsegs; 12523 int32_t newsize = 0; /* automatic sockbuf scaling */ 12524 struct tcp_rack *rack; 12525 #ifdef NETFLIX_SB_LIMITS 12526 u_int mcnt, appended; 12527 #endif 12528 12529 /* 12530 * If last ACK falls within this segment's sequence numbers, record 12531 * the timestamp. NOTE that the test is modified according to the 12532 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12533 */ 12534 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 12535 return (0); 12536 } 12537 if (tiwin && tiwin != tp->snd_wnd) { 12538 return (0); 12539 } 12540 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 12541 return (0); 12542 } 12543 if (__predict_false((to->to_flags & TOF_TS) && 12544 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 12545 return (0); 12546 } 12547 if (__predict_false((th->th_ack != tp->snd_una))) { 12548 return (0); 12549 } 12550 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 12551 return (0); 12552 } 12553 if ((to->to_flags & TOF_TS) != 0 && 12554 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12555 tp->ts_recent_age = tcp_ts_getticks(); 12556 tp->ts_recent = to->to_tsval; 12557 } 12558 rack = (struct tcp_rack *)tp->t_fb_ptr; 12559 /* 12560 * This is a pure, in-sequence data packet with nothing on the 12561 * reassembly queue and we have enough buffer space to take it. 12562 */ 12563 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12564 12565 #ifdef NETFLIX_SB_LIMITS 12566 if (so->so_rcv.sb_shlim) { 12567 mcnt = m_memcnt(m); 12568 appended = 0; 12569 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12570 CFO_NOSLEEP, NULL) == false) { 12571 counter_u64_add(tcp_sb_shlim_fails, 1); 12572 m_freem(m); 12573 return (1); 12574 } 12575 } 12576 #endif 12577 /* Clean receiver SACK report if present */ 12578 if (tp->rcv_numsacks) 12579 tcp_clean_sackreport(tp); 12580 KMOD_TCPSTAT_INC(tcps_preddat); 12581 tp->rcv_nxt += tlen; 12582 if (tlen && 12583 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12584 (tp->t_fbyte_in == 0)) { 12585 tp->t_fbyte_in = ticks; 12586 if (tp->t_fbyte_in == 0) 12587 tp->t_fbyte_in = 1; 12588 if (tp->t_fbyte_out && tp->t_fbyte_in) 12589 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12590 } 12591 /* 12592 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 12593 */ 12594 tp->snd_wl1 = th->th_seq; 12595 /* 12596 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 12597 */ 12598 tp->rcv_up = tp->rcv_nxt; 12599 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12600 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12601 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12602 12603 /* Add data to socket buffer. */ 12604 SOCK_RECVBUF_LOCK(so); 12605 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12606 m_freem(m); 12607 } else { 12608 /* 12609 * Set new socket buffer size. Give up when limit is 12610 * reached. 12611 */ 12612 if (newsize) 12613 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12614 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12615 m_adj(m, drop_hdrlen); /* delayed header drop */ 12616 #ifdef NETFLIX_SB_LIMITS 12617 appended = 12618 #endif 12619 sbappendstream_locked(&so->so_rcv, m, 0); 12620 ctf_calc_rwin(so, tp); 12621 } 12622 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12623 /* NB: sorwakeup_locked() does an implicit unlock. */ 12624 sorwakeup_locked(so); 12625 #ifdef NETFLIX_SB_LIMITS 12626 if (so->so_rcv.sb_shlim && mcnt != appended) 12627 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 12628 #endif 12629 rack_handle_delayed_ack(tp, rack, tlen, 0); 12630 if (tp->snd_una == tp->snd_max) 12631 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12632 return (1); 12633 } 12634 12635 /* 12636 * This subfunction is used to try to highly optimize the 12637 * fast path. We again allow window updates that are 12638 * in sequence to remain in the fast-path. We also add 12639 * in the __predict's to attempt to help the compiler. 12640 * Note that if we return a 0, then we can *not* process 12641 * it and the caller should push the packet into the 12642 * slow-path. 12643 */ 12644 static int 12645 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12646 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12647 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 12648 { 12649 int32_t acked; 12650 int32_t nsegs; 12651 int32_t under_pacing = 0; 12652 struct tcp_rack *rack; 12653 12654 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12655 /* Old ack, behind (or duplicate to) the last one rcv'd */ 12656 return (0); 12657 } 12658 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 12659 /* Above what we have sent? */ 12660 return (0); 12661 } 12662 if (__predict_false(tiwin == 0)) { 12663 /* zero window */ 12664 return (0); 12665 } 12666 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 12667 /* We need a SYN or a FIN, unlikely.. */ 12668 return (0); 12669 } 12670 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 12671 /* Timestamp is behind .. old ack with seq wrap? */ 12672 return (0); 12673 } 12674 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 12675 /* Still recovering */ 12676 return (0); 12677 } 12678 rack = (struct tcp_rack *)tp->t_fb_ptr; 12679 if (rack->r_ctl.rc_sacked) { 12680 /* We have sack holes on our scoreboard */ 12681 return (0); 12682 } 12683 /* Ok if we reach here, we can process a fast-ack */ 12684 if (rack->gp_ready && 12685 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12686 under_pacing = 1; 12687 } 12688 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12689 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 12690 /* Did the window get updated? */ 12691 if (tiwin != tp->snd_wnd) { 12692 tp->snd_wnd = tiwin; 12693 rack_validate_fo_sendwin_up(tp, rack); 12694 tp->snd_wl1 = th->th_seq; 12695 if (tp->snd_wnd > tp->max_sndwnd) 12696 tp->max_sndwnd = tp->snd_wnd; 12697 } 12698 /* Do we exit persists? */ 12699 if ((rack->rc_in_persist != 0) && 12700 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12701 rack->r_ctl.rc_pace_min_segs))) { 12702 rack_exit_persist(tp, rack, cts); 12703 } 12704 /* Do we enter persists? */ 12705 if ((rack->rc_in_persist == 0) && 12706 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12707 TCPS_HAVEESTABLISHED(tp->t_state) && 12708 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12709 sbavail(&tptosocket(tp)->so_snd) && 12710 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12711 /* 12712 * Here the rwnd is less than 12713 * the pacing size, we are established, 12714 * nothing is outstanding, and there is 12715 * data to send. Enter persists. 12716 */ 12717 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 12718 } 12719 /* 12720 * If last ACK falls within this segment's sequence numbers, record 12721 * the timestamp. NOTE that the test is modified according to the 12722 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12723 */ 12724 if ((to->to_flags & TOF_TS) != 0 && 12725 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12726 tp->ts_recent_age = tcp_ts_getticks(); 12727 tp->ts_recent = to->to_tsval; 12728 } 12729 /* 12730 * This is a pure ack for outstanding data. 12731 */ 12732 KMOD_TCPSTAT_INC(tcps_predack); 12733 12734 /* 12735 * "bad retransmit" recovery. 12736 */ 12737 if ((tp->t_flags & TF_PREVVALID) && 12738 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12739 tp->t_flags &= ~TF_PREVVALID; 12740 if (tp->t_rxtshift == 1 && 12741 (int)(ticks - tp->t_badrxtwin) < 0) 12742 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12743 } 12744 /* 12745 * Recalculate the transmit timer / rtt. 12746 * 12747 * Some boxes send broken timestamp replies during the SYN+ACK 12748 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12749 * and blow up the retransmit timer. 12750 */ 12751 acked = BYTES_THIS_ACK(tp, th); 12752 12753 #ifdef TCP_HHOOK 12754 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 12755 hhook_run_tcp_est_in(tp, th, to); 12756 #endif 12757 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12758 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12759 if (acked) { 12760 struct mbuf *mfree; 12761 12762 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 12763 SOCK_SENDBUF_LOCK(so); 12764 mfree = sbcut_locked(&so->so_snd, acked); 12765 tp->snd_una = th->th_ack; 12766 /* Note we want to hold the sb lock through the sendmap adjust */ 12767 rack_adjust_sendmap_head(rack, &so->so_snd); 12768 /* Wake up the socket if we have room to write more */ 12769 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12770 sowwakeup_locked(so); 12771 m_freem(mfree); 12772 tp->t_rxtshift = 0; 12773 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12774 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12775 rack->rc_tlp_in_progress = 0; 12776 rack->r_ctl.rc_tlp_cnt_out = 0; 12777 /* 12778 * If it is the RXT timer we want to 12779 * stop it, so we can restart a TLP. 12780 */ 12781 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12782 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12783 12784 #ifdef TCP_REQUEST_TRK 12785 rack_req_check_for_comp(rack, th->th_ack); 12786 #endif 12787 } 12788 /* 12789 * Let the congestion control algorithm update congestion control 12790 * related information. This typically means increasing the 12791 * congestion window. 12792 */ 12793 if (tp->snd_wnd < ctf_outstanding(tp)) { 12794 /* The peer collapsed the window */ 12795 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12796 } else if (rack->rc_has_collapsed) 12797 rack_un_collapse_window(rack, __LINE__); 12798 if ((rack->r_collapse_point_valid) && 12799 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 12800 rack->r_collapse_point_valid = 0; 12801 /* 12802 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 12803 */ 12804 tp->snd_wl2 = th->th_ack; 12805 tp->t_dupacks = 0; 12806 m_freem(m); 12807 /* ND6_HINT(tp); *//* Some progress has been made. */ 12808 12809 /* 12810 * If all outstanding data are acked, stop retransmit timer, 12811 * otherwise restart timer using current (possibly backed-off) 12812 * value. If process is waiting for space, wakeup/selwakeup/signal. 12813 * If data are ready to send, let tcp_output decide between more 12814 * output or persist. 12815 */ 12816 if (under_pacing && 12817 (rack->use_fixed_rate == 0) && 12818 (rack->in_probe_rtt == 0) && 12819 rack->rc_gp_dyn_mul && 12820 rack->rc_always_pace) { 12821 /* Check if we are dragging bottom */ 12822 rack_check_bottom_drag(tp, rack, so); 12823 } 12824 if (tp->snd_una == tp->snd_max) { 12825 tp->t_flags &= ~TF_PREVVALID; 12826 rack->r_ctl.retran_during_recovery = 0; 12827 rack->rc_suspicious = 0; 12828 rack->r_ctl.dsack_byte_cnt = 0; 12829 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12830 if (rack->r_ctl.rc_went_idle_time == 0) 12831 rack->r_ctl.rc_went_idle_time = 1; 12832 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12833 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12834 tp->t_acktime = 0; 12835 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12836 } 12837 if (acked && rack->r_fast_output) 12838 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 12839 if (sbavail(&so->so_snd)) { 12840 rack->r_wanted_output = 1; 12841 } 12842 return (1); 12843 } 12844 12845 /* 12846 * Return value of 1, the TCB is unlocked and most 12847 * likely gone, return value of 0, the TCP is still 12848 * locked. 12849 */ 12850 static int 12851 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 12852 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12853 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12854 { 12855 int32_t ret_val = 0; 12856 int32_t orig_tlen = tlen; 12857 int32_t todrop; 12858 int32_t ourfinisacked = 0; 12859 struct tcp_rack *rack; 12860 12861 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12862 12863 ctf_calc_rwin(so, tp); 12864 /* 12865 * If the state is SYN_SENT: if seg contains an ACK, but not for our 12866 * SYN, drop the input. if seg contains a RST, then drop the 12867 * connection. if seg does not contain SYN, then drop it. Otherwise 12868 * this is an acceptable SYN segment initialize tp->rcv_nxt and 12869 * tp->irs if seg contains ack then advance tp->snd_una if seg 12870 * contains an ECE and ECN support is enabled, the stream is ECN 12871 * capable. if SYN has been acked change to ESTABLISHED else 12872 * SYN_RCVD state arrange for segment to be acked (eventually) 12873 * continue processing rest of data/controls. 12874 */ 12875 if ((thflags & TH_ACK) && 12876 (SEQ_LEQ(th->th_ack, tp->iss) || 12877 SEQ_GT(th->th_ack, tp->snd_max))) { 12878 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 12879 ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen); 12880 return (1); 12881 } 12882 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 12883 TCP_PROBE5(connect__refused, NULL, tp, 12884 mtod(m, const char *), tp, th); 12885 tp = tcp_drop(tp, ECONNREFUSED); 12886 ctf_do_drop(m, tp); 12887 return (1); 12888 } 12889 if (thflags & TH_RST) { 12890 ctf_do_drop(m, tp); 12891 return (1); 12892 } 12893 if (!(thflags & TH_SYN)) { 12894 ctf_do_drop(m, tp); 12895 return (1); 12896 } 12897 tp->irs = th->th_seq; 12898 tcp_rcvseqinit(tp); 12899 rack = (struct tcp_rack *)tp->t_fb_ptr; 12900 if (thflags & TH_ACK) { 12901 int tfo_partial = 0; 12902 12903 KMOD_TCPSTAT_INC(tcps_connects); 12904 soisconnected(so); 12905 #ifdef MAC 12906 mac_socketpeer_set_from_mbuf(m, so); 12907 #endif 12908 /* Do window scaling on this connection? */ 12909 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12910 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12911 tp->rcv_scale = tp->request_r_scale; 12912 } 12913 tp->rcv_adv += min(tp->rcv_wnd, 12914 TCP_MAXWIN << tp->rcv_scale); 12915 /* 12916 * If not all the data that was sent in the TFO SYN 12917 * has been acked, resend the remainder right away. 12918 */ 12919 if ((tp->t_flags & TF_FASTOPEN) && 12920 (tp->snd_una != tp->snd_max)) { 12921 /* Was it a partial ack? */ 12922 if (SEQ_LT(th->th_ack, tp->snd_max)) 12923 tfo_partial = 1; 12924 } 12925 /* 12926 * If there's data, delay ACK; if there's also a FIN ACKNOW 12927 * will be turned on later. 12928 */ 12929 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 12930 rack_timer_cancel(tp, rack, 12931 rack->r_ctl.rc_rcvtime, __LINE__); 12932 tp->t_flags |= TF_DELACK; 12933 } else { 12934 rack->r_wanted_output = 1; 12935 tp->t_flags |= TF_ACKNOW; 12936 } 12937 12938 tcp_ecn_input_syn_sent(tp, thflags, iptos); 12939 12940 if (SEQ_GT(th->th_ack, tp->snd_una)) { 12941 /* 12942 * We advance snd_una for the 12943 * fast open case. If th_ack is 12944 * acknowledging data beyond 12945 * snd_una we can't just call 12946 * ack-processing since the 12947 * data stream in our send-map 12948 * will start at snd_una + 1 (one 12949 * beyond the SYN). If its just 12950 * equal we don't need to do that 12951 * and there is no send_map. 12952 */ 12953 tp->snd_una++; 12954 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { 12955 /* 12956 * We sent a SYN with data, and thus have a 12957 * sendmap entry with a SYN set. Lets find it 12958 * and take off the send bit and the byte and 12959 * set it up to be what we send (send it next). 12960 */ 12961 struct rack_sendmap *rsm; 12962 12963 rsm = tqhash_min(rack->r_ctl.tqh); 12964 if (rsm) { 12965 if (rsm->r_flags & RACK_HAS_SYN) { 12966 rsm->r_flags &= ~RACK_HAS_SYN; 12967 rsm->r_start++; 12968 } 12969 rack->r_ctl.rc_resend = rsm; 12970 } 12971 } 12972 } 12973 /* 12974 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 12975 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 12976 */ 12977 tp->t_starttime = ticks; 12978 if (tp->t_flags & TF_NEEDFIN) { 12979 tcp_state_change(tp, TCPS_FIN_WAIT_1); 12980 tp->t_flags &= ~TF_NEEDFIN; 12981 thflags &= ~TH_SYN; 12982 } else { 12983 tcp_state_change(tp, TCPS_ESTABLISHED); 12984 TCP_PROBE5(connect__established, NULL, tp, 12985 mtod(m, const char *), tp, th); 12986 rack_cc_conn_init(tp); 12987 } 12988 } else { 12989 /* 12990 * Received initial SYN in SYN-SENT[*] state => simultaneous 12991 * open. If segment contains CC option and there is a 12992 * cached CC, apply TAO test. If it succeeds, connection is * 12993 * half-synchronized. Otherwise, do 3-way handshake: 12994 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 12995 * there was no CC option, clear cached CC value. 12996 */ 12997 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 12998 tcp_state_change(tp, TCPS_SYN_RECEIVED); 12999 } 13000 /* 13001 * Advance th->th_seq to correspond to first data byte. If data, 13002 * trim to stay within window, dropping FIN if necessary. 13003 */ 13004 th->th_seq++; 13005 if (tlen > tp->rcv_wnd) { 13006 todrop = tlen - tp->rcv_wnd; 13007 m_adj(m, -todrop); 13008 tlen = tp->rcv_wnd; 13009 thflags &= ~TH_FIN; 13010 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13011 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13012 } 13013 tp->snd_wl1 = th->th_seq - 1; 13014 tp->rcv_up = th->th_seq; 13015 /* 13016 * Client side of transaction: already sent SYN and data. If the 13017 * remote host used T/TCP to validate the SYN, our data will be 13018 * ACK'd; if so, enter normal data segment processing in the middle 13019 * of step 5, ack processing. Otherwise, goto step 6. 13020 */ 13021 if (thflags & TH_ACK) { 13022 /* For syn-sent we need to possibly update the rtt */ 13023 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13024 uint32_t t, mcts; 13025 13026 mcts = tcp_ts_getticks(); 13027 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13028 if (!tp->t_rttlow || tp->t_rttlow > t) 13029 tp->t_rttlow = t; 13030 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13031 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13032 tcp_rack_xmit_timer_commit(rack, tp); 13033 } 13034 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) 13035 return (ret_val); 13036 /* We may have changed to FIN_WAIT_1 above */ 13037 if (tp->t_state == TCPS_FIN_WAIT_1) { 13038 /* 13039 * In FIN_WAIT_1 STATE in addition to the processing 13040 * for the ESTABLISHED state if our FIN is now 13041 * acknowledged then enter FIN_WAIT_2. 13042 */ 13043 if (ourfinisacked) { 13044 /* 13045 * If we can't receive any more data, then 13046 * closing user can proceed. Starting the 13047 * timer is contrary to the specification, 13048 * but if we don't get a FIN we'll hang 13049 * forever. 13050 * 13051 * XXXjl: we should release the tp also, and 13052 * use a compressed state. 13053 */ 13054 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13055 soisdisconnected(so); 13056 tcp_timer_activate(tp, TT_2MSL, 13057 (tcp_fast_finwait2_recycle ? 13058 tcp_finwait2_timeout : 13059 TP_MAXIDLE(tp))); 13060 } 13061 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13062 } 13063 } 13064 } 13065 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13066 tiwin, thflags, nxt_pkt)); 13067 } 13068 13069 /* 13070 * Return value of 1, the TCB is unlocked and most 13071 * likely gone, return value of 0, the TCP is still 13072 * locked. 13073 */ 13074 static int 13075 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13076 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13077 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13078 { 13079 struct tcp_rack *rack; 13080 int32_t orig_tlen = tlen; 13081 int32_t ret_val = 0; 13082 int32_t ourfinisacked = 0; 13083 13084 rack = (struct tcp_rack *)tp->t_fb_ptr; 13085 ctf_calc_rwin(so, tp); 13086 if ((thflags & TH_RST) || 13087 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13088 return (ctf_process_rst(m, th, so, tp)); 13089 if ((thflags & TH_ACK) && 13090 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13091 SEQ_GT(th->th_ack, tp->snd_max))) { 13092 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13093 ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen); 13094 return (1); 13095 } 13096 if (tp->t_flags & TF_FASTOPEN) { 13097 /* 13098 * When a TFO connection is in SYN_RECEIVED, the 13099 * only valid packets are the initial SYN, a 13100 * retransmit/copy of the initial SYN (possibly with 13101 * a subset of the original data), a valid ACK, a 13102 * FIN, or a RST. 13103 */ 13104 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13105 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13106 ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen); 13107 return (1); 13108 } else if (thflags & TH_SYN) { 13109 /* non-initial SYN is ignored */ 13110 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13111 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13112 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13113 ctf_do_drop(m, NULL); 13114 return (0); 13115 } 13116 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13117 ctf_do_drop(m, NULL); 13118 return (0); 13119 } 13120 } 13121 13122 /* 13123 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13124 * it's less than ts_recent, drop it. 13125 */ 13126 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13127 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13128 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13129 return (ret_val); 13130 } 13131 /* 13132 * In the SYN-RECEIVED state, validate that the packet belongs to 13133 * this connection before trimming the data to fit the receive 13134 * window. Check the sequence number versus IRS since we know the 13135 * sequence numbers haven't wrapped. This is a partial fix for the 13136 * "LAND" DoS attack. 13137 */ 13138 if (SEQ_LT(th->th_seq, tp->irs)) { 13139 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13140 ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen); 13141 return (1); 13142 } 13143 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13144 return (ret_val); 13145 } 13146 /* 13147 * If last ACK falls within this segment's sequence numbers, record 13148 * its timestamp. NOTE: 1) That the test incorporates suggestions 13149 * from the latest proposal of the tcplw@cray.com list (Braden 13150 * 1993/04/26). 2) That updating only on newer timestamps interferes 13151 * with our earlier PAWS tests, so this check should be solely 13152 * predicated on the sequence space of this segment. 3) That we 13153 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13154 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13155 * SEG.Len, This modified check allows us to overcome RFC1323's 13156 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13157 * p.869. In such cases, we can still calculate the RTT correctly 13158 * when RCV.NXT == Last.ACK.Sent. 13159 */ 13160 if ((to->to_flags & TOF_TS) != 0 && 13161 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13162 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13163 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13164 tp->ts_recent_age = tcp_ts_getticks(); 13165 tp->ts_recent = to->to_tsval; 13166 } 13167 tp->snd_wnd = tiwin; 13168 rack_validate_fo_sendwin_up(tp, rack); 13169 /* 13170 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13171 * is on (half-synchronized state), then queue data for later 13172 * processing; else drop segment and return. 13173 */ 13174 if ((thflags & TH_ACK) == 0) { 13175 if (tp->t_flags & TF_FASTOPEN) { 13176 rack_cc_conn_init(tp); 13177 } 13178 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13179 tiwin, thflags, nxt_pkt)); 13180 } 13181 KMOD_TCPSTAT_INC(tcps_connects); 13182 if (tp->t_flags & TF_SONOTCONN) { 13183 tp->t_flags &= ~TF_SONOTCONN; 13184 soisconnected(so); 13185 } 13186 /* Do window scaling? */ 13187 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13188 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13189 tp->rcv_scale = tp->request_r_scale; 13190 } 13191 /* 13192 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13193 * FIN-WAIT-1 13194 */ 13195 tp->t_starttime = ticks; 13196 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { 13197 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13198 tp->t_tfo_pending = NULL; 13199 } 13200 if (tp->t_flags & TF_NEEDFIN) { 13201 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13202 tp->t_flags &= ~TF_NEEDFIN; 13203 } else { 13204 tcp_state_change(tp, TCPS_ESTABLISHED); 13205 TCP_PROBE5(accept__established, NULL, tp, 13206 mtod(m, const char *), tp, th); 13207 /* 13208 * TFO connections call cc_conn_init() during SYN 13209 * processing. Calling it again here for such connections 13210 * is not harmless as it would undo the snd_cwnd reduction 13211 * that occurs when a TFO SYN|ACK is retransmitted. 13212 */ 13213 if (!(tp->t_flags & TF_FASTOPEN)) 13214 rack_cc_conn_init(tp); 13215 } 13216 /* 13217 * Account for the ACK of our SYN prior to 13218 * regular ACK processing below, except for 13219 * simultaneous SYN, which is handled later. 13220 */ 13221 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13222 tp->snd_una++; 13223 /* 13224 * If segment contains data or ACK, will call tcp_reass() later; if 13225 * not, do so now to pass queued data to user. 13226 */ 13227 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13228 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13229 (struct mbuf *)0); 13230 if (tp->t_flags & TF_WAKESOR) { 13231 tp->t_flags &= ~TF_WAKESOR; 13232 /* NB: sorwakeup_locked() does an implicit unlock. */ 13233 sorwakeup_locked(so); 13234 } 13235 } 13236 tp->snd_wl1 = th->th_seq - 1; 13237 /* For syn-recv we need to possibly update the rtt */ 13238 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13239 uint32_t t, mcts; 13240 13241 mcts = tcp_ts_getticks(); 13242 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13243 if (!tp->t_rttlow || tp->t_rttlow > t) 13244 tp->t_rttlow = t; 13245 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13246 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13247 tcp_rack_xmit_timer_commit(rack, tp); 13248 } 13249 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13250 return (ret_val); 13251 } 13252 if (tp->t_state == TCPS_FIN_WAIT_1) { 13253 /* We could have went to FIN_WAIT_1 (or EST) above */ 13254 /* 13255 * In FIN_WAIT_1 STATE in addition to the processing for the 13256 * ESTABLISHED state if our FIN is now acknowledged then 13257 * enter FIN_WAIT_2. 13258 */ 13259 if (ourfinisacked) { 13260 /* 13261 * If we can't receive any more data, then closing 13262 * user can proceed. Starting the timer is contrary 13263 * to the specification, but if we don't get a FIN 13264 * we'll hang forever. 13265 * 13266 * XXXjl: we should release the tp also, and use a 13267 * compressed state. 13268 */ 13269 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13270 soisdisconnected(so); 13271 tcp_timer_activate(tp, TT_2MSL, 13272 (tcp_fast_finwait2_recycle ? 13273 tcp_finwait2_timeout : 13274 TP_MAXIDLE(tp))); 13275 } 13276 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13277 } 13278 } 13279 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13280 tiwin, thflags, nxt_pkt)); 13281 } 13282 13283 /* 13284 * Return value of 1, the TCB is unlocked and most 13285 * likely gone, return value of 0, the TCP is still 13286 * locked. 13287 */ 13288 static int 13289 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13290 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13291 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13292 { 13293 int32_t ret_val = 0; 13294 int32_t orig_tlen = tlen; 13295 struct tcp_rack *rack; 13296 13297 /* 13298 * Header prediction: check for the two common cases of a 13299 * uni-directional data xfer. If the packet has no control flags, 13300 * is in-sequence, the window didn't change and we're not 13301 * retransmitting, it's a candidate. If the length is zero and the 13302 * ack moved forward, we're the sender side of the xfer. Just free 13303 * the data acked & wake any higher level process that was blocked 13304 * waiting for space. If the length is non-zero and the ack didn't 13305 * move, we're the receiver side. If we're getting packets in-order 13306 * (the reassembly queue is empty), add the data toc The socket 13307 * buffer and note that we need a delayed ack. Make sure that the 13308 * hidden state-flags are also off. Since we check for 13309 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13310 */ 13311 rack = (struct tcp_rack *)tp->t_fb_ptr; 13312 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13313 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13314 __predict_true(SEGQ_EMPTY(tp)) && 13315 __predict_true(th->th_seq == tp->rcv_nxt)) { 13316 if (tlen == 0) { 13317 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13318 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13319 return (0); 13320 } 13321 } else { 13322 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13323 tiwin, nxt_pkt, iptos)) { 13324 return (0); 13325 } 13326 } 13327 } 13328 ctf_calc_rwin(so, tp); 13329 13330 if ((thflags & TH_RST) || 13331 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13332 return (ctf_process_rst(m, th, so, tp)); 13333 13334 /* 13335 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13336 * synchronized state. 13337 */ 13338 if (thflags & TH_SYN) { 13339 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13340 return (ret_val); 13341 } 13342 /* 13343 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13344 * it's less than ts_recent, drop it. 13345 */ 13346 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13347 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13348 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13349 return (ret_val); 13350 } 13351 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13352 return (ret_val); 13353 } 13354 /* 13355 * If last ACK falls within this segment's sequence numbers, record 13356 * its timestamp. NOTE: 1) That the test incorporates suggestions 13357 * from the latest proposal of the tcplw@cray.com list (Braden 13358 * 1993/04/26). 2) That updating only on newer timestamps interferes 13359 * with our earlier PAWS tests, so this check should be solely 13360 * predicated on the sequence space of this segment. 3) That we 13361 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13362 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13363 * SEG.Len, This modified check allows us to overcome RFC1323's 13364 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13365 * p.869. In such cases, we can still calculate the RTT correctly 13366 * when RCV.NXT == Last.ACK.Sent. 13367 */ 13368 if ((to->to_flags & TOF_TS) != 0 && 13369 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13370 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13371 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13372 tp->ts_recent_age = tcp_ts_getticks(); 13373 tp->ts_recent = to->to_tsval; 13374 } 13375 /* 13376 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13377 * is on (half-synchronized state), then queue data for later 13378 * processing; else drop segment and return. 13379 */ 13380 if ((thflags & TH_ACK) == 0) { 13381 if (tp->t_flags & TF_NEEDSYN) { 13382 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13383 tiwin, thflags, nxt_pkt)); 13384 13385 } else if (tp->t_flags & TF_ACKNOW) { 13386 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13387 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13388 return (ret_val); 13389 } else { 13390 ctf_do_drop(m, NULL); 13391 return (0); 13392 } 13393 } 13394 /* 13395 * Ack processing. 13396 */ 13397 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13398 return (ret_val); 13399 } 13400 if (sbavail(&so->so_snd)) { 13401 if (ctf_progress_timeout_check(tp, true)) { 13402 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 13403 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen); 13404 return (1); 13405 } 13406 } 13407 /* State changes only happen in rack_process_data() */ 13408 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13409 tiwin, thflags, nxt_pkt)); 13410 } 13411 13412 /* 13413 * Return value of 1, the TCB is unlocked and most 13414 * likely gone, return value of 0, the TCP is still 13415 * locked. 13416 */ 13417 static int 13418 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 13419 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13420 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13421 { 13422 int32_t ret_val = 0; 13423 int32_t orig_tlen = tlen; 13424 13425 ctf_calc_rwin(so, tp); 13426 if ((thflags & TH_RST) || 13427 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13428 return (ctf_process_rst(m, th, so, tp)); 13429 /* 13430 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13431 * synchronized state. 13432 */ 13433 if (thflags & TH_SYN) { 13434 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13435 return (ret_val); 13436 } 13437 /* 13438 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13439 * it's less than ts_recent, drop it. 13440 */ 13441 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13442 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13443 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13444 return (ret_val); 13445 } 13446 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13447 return (ret_val); 13448 } 13449 /* 13450 * If last ACK falls within this segment's sequence numbers, record 13451 * its timestamp. NOTE: 1) That the test incorporates suggestions 13452 * from the latest proposal of the tcplw@cray.com list (Braden 13453 * 1993/04/26). 2) That updating only on newer timestamps interferes 13454 * with our earlier PAWS tests, so this check should be solely 13455 * predicated on the sequence space of this segment. 3) That we 13456 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13457 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13458 * SEG.Len, This modified check allows us to overcome RFC1323's 13459 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13460 * p.869. In such cases, we can still calculate the RTT correctly 13461 * when RCV.NXT == Last.ACK.Sent. 13462 */ 13463 if ((to->to_flags & TOF_TS) != 0 && 13464 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13465 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13466 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13467 tp->ts_recent_age = tcp_ts_getticks(); 13468 tp->ts_recent = to->to_tsval; 13469 } 13470 /* 13471 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13472 * is on (half-synchronized state), then queue data for later 13473 * processing; else drop segment and return. 13474 */ 13475 if ((thflags & TH_ACK) == 0) { 13476 if (tp->t_flags & TF_NEEDSYN) { 13477 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13478 tiwin, thflags, nxt_pkt)); 13479 13480 } else if (tp->t_flags & TF_ACKNOW) { 13481 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13482 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13483 return (ret_val); 13484 } else { 13485 ctf_do_drop(m, NULL); 13486 return (0); 13487 } 13488 } 13489 /* 13490 * Ack processing. 13491 */ 13492 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val, orig_tlen)) { 13493 return (ret_val); 13494 } 13495 if (sbavail(&so->so_snd)) { 13496 if (ctf_progress_timeout_check(tp, true)) { 13497 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13498 tp, tick, PROGRESS_DROP, __LINE__); 13499 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen); 13500 return (1); 13501 } 13502 } 13503 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13504 tiwin, thflags, nxt_pkt)); 13505 } 13506 13507 static int 13508 rack_check_data_after_close(struct mbuf *m, 13509 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 13510 { 13511 struct tcp_rack *rack; 13512 13513 rack = (struct tcp_rack *)tp->t_fb_ptr; 13514 if (rack->rc_allow_data_af_clo == 0) { 13515 close_now: 13516 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13517 /* tcp_close will kill the inp pre-log the Reset */ 13518 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13519 tp = tcp_close(tp); 13520 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 13521 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 13522 return (1); 13523 } 13524 if (sbavail(&so->so_snd) == 0) 13525 goto close_now; 13526 /* Ok we allow data that is ignored and a followup reset */ 13527 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13528 tp->rcv_nxt = th->th_seq + *tlen; 13529 tp->t_flags2 |= TF2_DROP_AF_DATA; 13530 rack->r_wanted_output = 1; 13531 *tlen = 0; 13532 return (0); 13533 } 13534 13535 /* 13536 * Return value of 1, the TCB is unlocked and most 13537 * likely gone, return value of 0, the TCP is still 13538 * locked. 13539 */ 13540 static int 13541 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 13542 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13543 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13544 { 13545 int32_t ret_val = 0; 13546 int32_t orig_tlen = tlen; 13547 int32_t ourfinisacked = 0; 13548 13549 ctf_calc_rwin(so, tp); 13550 13551 if ((thflags & TH_RST) || 13552 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13553 return (ctf_process_rst(m, th, so, tp)); 13554 /* 13555 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13556 * synchronized state. 13557 */ 13558 if (thflags & TH_SYN) { 13559 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13560 return (ret_val); 13561 } 13562 /* 13563 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13564 * it's less than ts_recent, drop it. 13565 */ 13566 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13567 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13568 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13569 return (ret_val); 13570 } 13571 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13572 return (ret_val); 13573 } 13574 /* 13575 * If new data are received on a connection after the user processes 13576 * are gone, then RST the other end. 13577 */ 13578 if ((tp->t_flags & TF_CLOSED) && tlen && 13579 rack_check_data_after_close(m, tp, &tlen, th, so)) 13580 return (1); 13581 /* 13582 * If last ACK falls within this segment's sequence numbers, record 13583 * its timestamp. NOTE: 1) That the test incorporates suggestions 13584 * from the latest proposal of the tcplw@cray.com list (Braden 13585 * 1993/04/26). 2) That updating only on newer timestamps interferes 13586 * with our earlier PAWS tests, so this check should be solely 13587 * predicated on the sequence space of this segment. 3) That we 13588 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13589 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13590 * SEG.Len, This modified check allows us to overcome RFC1323's 13591 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13592 * p.869. In such cases, we can still calculate the RTT correctly 13593 * when RCV.NXT == Last.ACK.Sent. 13594 */ 13595 if ((to->to_flags & TOF_TS) != 0 && 13596 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13597 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13598 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13599 tp->ts_recent_age = tcp_ts_getticks(); 13600 tp->ts_recent = to->to_tsval; 13601 } 13602 /* 13603 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13604 * is on (half-synchronized state), then queue data for later 13605 * processing; else drop segment and return. 13606 */ 13607 if ((thflags & TH_ACK) == 0) { 13608 if (tp->t_flags & TF_NEEDSYN) { 13609 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13610 tiwin, thflags, nxt_pkt)); 13611 } else if (tp->t_flags & TF_ACKNOW) { 13612 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13613 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13614 return (ret_val); 13615 } else { 13616 ctf_do_drop(m, NULL); 13617 return (0); 13618 } 13619 } 13620 /* 13621 * Ack processing. 13622 */ 13623 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13624 return (ret_val); 13625 } 13626 if (ourfinisacked) { 13627 /* 13628 * If we can't receive any more data, then closing user can 13629 * proceed. Starting the timer is contrary to the 13630 * specification, but if we don't get a FIN we'll hang 13631 * forever. 13632 * 13633 * XXXjl: we should release the tp also, and use a 13634 * compressed state. 13635 */ 13636 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13637 soisdisconnected(so); 13638 tcp_timer_activate(tp, TT_2MSL, 13639 (tcp_fast_finwait2_recycle ? 13640 tcp_finwait2_timeout : 13641 TP_MAXIDLE(tp))); 13642 } 13643 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13644 } 13645 if (sbavail(&so->so_snd)) { 13646 if (ctf_progress_timeout_check(tp, true)) { 13647 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13648 tp, tick, PROGRESS_DROP, __LINE__); 13649 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen); 13650 return (1); 13651 } 13652 } 13653 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13654 tiwin, thflags, nxt_pkt)); 13655 } 13656 13657 /* 13658 * Return value of 1, the TCB is unlocked and most 13659 * likely gone, return value of 0, the TCP is still 13660 * locked. 13661 */ 13662 static int 13663 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 13664 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13665 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13666 { 13667 int32_t ret_val = 0; 13668 int32_t orig_tlen = tlen; 13669 int32_t ourfinisacked = 0; 13670 13671 ctf_calc_rwin(so, tp); 13672 13673 if ((thflags & TH_RST) || 13674 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13675 return (ctf_process_rst(m, th, so, tp)); 13676 /* 13677 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13678 * synchronized state. 13679 */ 13680 if (thflags & TH_SYN) { 13681 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13682 return (ret_val); 13683 } 13684 /* 13685 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13686 * it's less than ts_recent, drop it. 13687 */ 13688 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13689 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13690 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13691 return (ret_val); 13692 } 13693 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13694 return (ret_val); 13695 } 13696 /* 13697 * If last ACK falls within this segment's sequence numbers, record 13698 * its timestamp. NOTE: 1) That the test incorporates suggestions 13699 * from the latest proposal of the tcplw@cray.com list (Braden 13700 * 1993/04/26). 2) That updating only on newer timestamps interferes 13701 * with our earlier PAWS tests, so this check should be solely 13702 * predicated on the sequence space of this segment. 3) That we 13703 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13704 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13705 * SEG.Len, This modified check allows us to overcome RFC1323's 13706 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13707 * p.869. In such cases, we can still calculate the RTT correctly 13708 * when RCV.NXT == Last.ACK.Sent. 13709 */ 13710 if ((to->to_flags & TOF_TS) != 0 && 13711 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13712 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13713 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13714 tp->ts_recent_age = tcp_ts_getticks(); 13715 tp->ts_recent = to->to_tsval; 13716 } 13717 /* 13718 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13719 * is on (half-synchronized state), then queue data for later 13720 * processing; else drop segment and return. 13721 */ 13722 if ((thflags & TH_ACK) == 0) { 13723 if (tp->t_flags & TF_NEEDSYN) { 13724 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13725 tiwin, thflags, nxt_pkt)); 13726 } else if (tp->t_flags & TF_ACKNOW) { 13727 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13728 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13729 return (ret_val); 13730 } else { 13731 ctf_do_drop(m, NULL); 13732 return (0); 13733 } 13734 } 13735 /* 13736 * Ack processing. 13737 */ 13738 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13739 return (ret_val); 13740 } 13741 if (ourfinisacked) { 13742 tcp_twstart(tp); 13743 m_freem(m); 13744 return (1); 13745 } 13746 if (sbavail(&so->so_snd)) { 13747 if (ctf_progress_timeout_check(tp, true)) { 13748 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13749 tp, tick, PROGRESS_DROP, __LINE__); 13750 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen); 13751 return (1); 13752 } 13753 } 13754 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13755 tiwin, thflags, nxt_pkt)); 13756 } 13757 13758 /* 13759 * Return value of 1, the TCB is unlocked and most 13760 * likely gone, return value of 0, the TCP is still 13761 * locked. 13762 */ 13763 static int 13764 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 13765 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13766 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13767 { 13768 int32_t ret_val = 0; 13769 int32_t orig_tlen; 13770 int32_t ourfinisacked = 0; 13771 13772 ctf_calc_rwin(so, tp); 13773 13774 if ((thflags & TH_RST) || 13775 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13776 return (ctf_process_rst(m, th, so, tp)); 13777 /* 13778 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13779 * synchronized state. 13780 */ 13781 if (thflags & TH_SYN) { 13782 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13783 return (ret_val); 13784 } 13785 /* 13786 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13787 * it's less than ts_recent, drop it. 13788 */ 13789 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13790 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13791 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13792 return (ret_val); 13793 } 13794 orig_tlen = tlen; 13795 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13796 return (ret_val); 13797 } 13798 /* 13799 * If last ACK falls within this segment's sequence numbers, record 13800 * its timestamp. NOTE: 1) That the test incorporates suggestions 13801 * from the latest proposal of the tcplw@cray.com list (Braden 13802 * 1993/04/26). 2) That updating only on newer timestamps interferes 13803 * with our earlier PAWS tests, so this check should be solely 13804 * predicated on the sequence space of this segment. 3) That we 13805 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13806 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13807 * SEG.Len, This modified check allows us to overcome RFC1323's 13808 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13809 * p.869. In such cases, we can still calculate the RTT correctly 13810 * when RCV.NXT == Last.ACK.Sent. 13811 */ 13812 if ((to->to_flags & TOF_TS) != 0 && 13813 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13814 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13815 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13816 tp->ts_recent_age = tcp_ts_getticks(); 13817 tp->ts_recent = to->to_tsval; 13818 } 13819 /* 13820 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13821 * is on (half-synchronized state), then queue data for later 13822 * processing; else drop segment and return. 13823 */ 13824 if ((thflags & TH_ACK) == 0) { 13825 if (tp->t_flags & TF_NEEDSYN) { 13826 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13827 tiwin, thflags, nxt_pkt)); 13828 } else if (tp->t_flags & TF_ACKNOW) { 13829 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13830 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13831 return (ret_val); 13832 } else { 13833 ctf_do_drop(m, NULL); 13834 return (0); 13835 } 13836 } 13837 /* 13838 * case TCPS_LAST_ACK: Ack processing. 13839 */ 13840 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13841 return (ret_val); 13842 } 13843 if (ourfinisacked) { 13844 tp = tcp_close(tp); 13845 ctf_do_drop(m, tp); 13846 return (1); 13847 } 13848 if (sbavail(&so->so_snd)) { 13849 if (ctf_progress_timeout_check(tp, true)) { 13850 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13851 tp, tick, PROGRESS_DROP, __LINE__); 13852 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen); 13853 return (1); 13854 } 13855 } 13856 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13857 tiwin, thflags, nxt_pkt)); 13858 } 13859 13860 /* 13861 * Return value of 1, the TCB is unlocked and most 13862 * likely gone, return value of 0, the TCP is still 13863 * locked. 13864 */ 13865 static int 13866 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 13867 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13868 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13869 { 13870 int32_t ret_val = 0; 13871 int32_t orig_tlen = tlen; 13872 int32_t ourfinisacked = 0; 13873 13874 ctf_calc_rwin(so, tp); 13875 13876 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 13877 if ((thflags & TH_RST) || 13878 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13879 return (ctf_process_rst(m, th, so, tp)); 13880 /* 13881 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13882 * synchronized state. 13883 */ 13884 if (thflags & TH_SYN) { 13885 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13886 return (ret_val); 13887 } 13888 /* 13889 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13890 * it's less than ts_recent, drop it. 13891 */ 13892 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13893 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13894 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13895 return (ret_val); 13896 } 13897 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 13898 return (ret_val); 13899 } 13900 /* 13901 * If new data are received on a connection after the user processes 13902 * are gone, then RST the other end. 13903 */ 13904 if ((tp->t_flags & TF_CLOSED) && tlen && 13905 rack_check_data_after_close(m, tp, &tlen, th, so)) 13906 return (1); 13907 /* 13908 * If last ACK falls within this segment's sequence numbers, record 13909 * its timestamp. NOTE: 1) That the test incorporates suggestions 13910 * from the latest proposal of the tcplw@cray.com list (Braden 13911 * 1993/04/26). 2) That updating only on newer timestamps interferes 13912 * with our earlier PAWS tests, so this check should be solely 13913 * predicated on the sequence space of this segment. 3) That we 13914 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13915 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13916 * SEG.Len, This modified check allows us to overcome RFC1323's 13917 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13918 * p.869. In such cases, we can still calculate the RTT correctly 13919 * when RCV.NXT == Last.ACK.Sent. 13920 */ 13921 if ((to->to_flags & TOF_TS) != 0 && 13922 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13923 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13924 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13925 tp->ts_recent_age = tcp_ts_getticks(); 13926 tp->ts_recent = to->to_tsval; 13927 } 13928 /* 13929 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13930 * is on (half-synchronized state), then queue data for later 13931 * processing; else drop segment and return. 13932 */ 13933 if ((thflags & TH_ACK) == 0) { 13934 if (tp->t_flags & TF_NEEDSYN) { 13935 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13936 tiwin, thflags, nxt_pkt)); 13937 } else if (tp->t_flags & TF_ACKNOW) { 13938 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13939 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13940 return (ret_val); 13941 } else { 13942 ctf_do_drop(m, NULL); 13943 return (0); 13944 } 13945 } 13946 /* 13947 * Ack processing. 13948 */ 13949 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val, orig_tlen)) { 13950 return (ret_val); 13951 } 13952 if (sbavail(&so->so_snd)) { 13953 if (ctf_progress_timeout_check(tp, true)) { 13954 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13955 tp, tick, PROGRESS_DROP, __LINE__); 13956 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen); 13957 return (1); 13958 } 13959 } 13960 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13961 tiwin, thflags, nxt_pkt)); 13962 } 13963 13964 static void inline 13965 rack_clear_rate_sample(struct tcp_rack *rack) 13966 { 13967 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 13968 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 13969 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 13970 } 13971 13972 static void 13973 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 13974 { 13975 uint64_t bw_est, rate_wanted; 13976 int chged = 0; 13977 uint32_t user_max, orig_min, orig_max; 13978 13979 #ifdef TCP_REQUEST_TRK 13980 if (rack->rc_hybrid_mode && 13981 (rack->r_ctl.rc_pace_max_segs != 0) && 13982 (rack_hybrid_allow_set_maxseg == 1) && 13983 (rack->r_ctl.rc_last_sft != NULL)) { 13984 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 13985 return; 13986 } 13987 #endif 13988 orig_min = rack->r_ctl.rc_pace_min_segs; 13989 orig_max = rack->r_ctl.rc_pace_max_segs; 13990 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 13991 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 13992 chged = 1; 13993 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 13994 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 13995 if (user_max != rack->r_ctl.rc_pace_max_segs) 13996 chged = 1; 13997 } 13998 if (rack->rc_force_max_seg) { 13999 rack->r_ctl.rc_pace_max_segs = user_max; 14000 } else if (rack->use_fixed_rate) { 14001 bw_est = rack_get_bw(rack); 14002 if ((rack->r_ctl.crte == NULL) || 14003 (bw_est != rack->r_ctl.crte->rate)) { 14004 rack->r_ctl.rc_pace_max_segs = user_max; 14005 } else { 14006 /* We are pacing right at the hardware rate */ 14007 uint32_t segsiz, pace_one; 14008 14009 if (rack_pace_one_seg || 14010 (rack->r_ctl.rc_user_set_min_segs == 1)) 14011 pace_one = 1; 14012 else 14013 pace_one = 0; 14014 segsiz = min(ctf_fixed_maxseg(tp), 14015 rack->r_ctl.rc_pace_min_segs); 14016 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14017 tp, bw_est, segsiz, pace_one, 14018 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14019 } 14020 } else if (rack->rc_always_pace) { 14021 if (rack->r_ctl.gp_bw || 14022 rack->r_ctl.init_rate) { 14023 /* We have a rate of some sort set */ 14024 uint32_t orig; 14025 14026 bw_est = rack_get_bw(rack); 14027 orig = rack->r_ctl.rc_pace_max_segs; 14028 if (fill_override) 14029 rate_wanted = *fill_override; 14030 else 14031 rate_wanted = rack_get_gp_est(rack); 14032 if (rate_wanted) { 14033 /* We have something */ 14034 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14035 rate_wanted, 14036 ctf_fixed_maxseg(rack->rc_tp)); 14037 } else 14038 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14039 if (orig != rack->r_ctl.rc_pace_max_segs) 14040 chged = 1; 14041 } else if ((rack->r_ctl.gp_bw == 0) && 14042 (rack->r_ctl.rc_pace_max_segs == 0)) { 14043 /* 14044 * If we have nothing limit us to bursting 14045 * out IW sized pieces. 14046 */ 14047 chged = 1; 14048 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14049 } 14050 } 14051 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14052 chged = 1; 14053 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14054 } 14055 if (chged) 14056 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14057 } 14058 14059 14060 static void 14061 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14062 { 14063 #ifdef INET6 14064 struct ip6_hdr *ip6 = NULL; 14065 #endif 14066 #ifdef INET 14067 struct ip *ip = NULL; 14068 #endif 14069 struct udphdr *udp = NULL; 14070 14071 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14072 #ifdef INET6 14073 if (rack->r_is_v6) { 14074 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14075 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14076 if (tp->t_port) { 14077 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14078 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14079 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14080 udp->uh_dport = tp->t_port; 14081 rack->r_ctl.fsb.udp = udp; 14082 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14083 } else 14084 { 14085 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14086 rack->r_ctl.fsb.udp = NULL; 14087 } 14088 tcpip_fillheaders(rack->rc_inp, 14089 tp->t_port, 14090 ip6, rack->r_ctl.fsb.th); 14091 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14092 } else 14093 #endif /* INET6 */ 14094 #ifdef INET 14095 { 14096 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14097 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14098 if (tp->t_port) { 14099 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14100 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14101 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14102 udp->uh_dport = tp->t_port; 14103 rack->r_ctl.fsb.udp = udp; 14104 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14105 } else 14106 { 14107 rack->r_ctl.fsb.udp = NULL; 14108 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14109 } 14110 tcpip_fillheaders(rack->rc_inp, 14111 tp->t_port, 14112 ip, rack->r_ctl.fsb.th); 14113 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14114 } 14115 #endif 14116 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14117 (long)TCP_MAXWIN << tp->rcv_scale); 14118 rack->r_fsb_inited = 1; 14119 } 14120 14121 static int 14122 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14123 { 14124 /* 14125 * Allocate the larger of spaces V6 if available else just 14126 * V4 and include udphdr (overbook) 14127 */ 14128 #ifdef INET6 14129 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14130 #else 14131 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14132 #endif 14133 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14134 M_TCPFSB, M_NOWAIT|M_ZERO); 14135 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14136 return (ENOMEM); 14137 } 14138 rack->r_fsb_inited = 0; 14139 return (0); 14140 } 14141 14142 static void 14143 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14144 { 14145 /* 14146 * Types of logs (mod value) 14147 * 20 - Initial round setup 14148 * 21 - Rack declares a new round. 14149 */ 14150 struct tcpcb *tp; 14151 14152 tp = rack->rc_tp; 14153 if (tcp_bblogging_on(tp)) { 14154 union tcp_log_stackspecific log; 14155 struct timeval tv; 14156 14157 memset(&log, 0, sizeof(log)); 14158 log.u_bbr.flex1 = rack->r_ctl.current_round; 14159 log.u_bbr.flex2 = rack->r_ctl.roundends; 14160 log.u_bbr.flex3 = high_seq; 14161 log.u_bbr.flex4 = tp->snd_max; 14162 log.u_bbr.flex8 = mod; 14163 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14164 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14165 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14166 TCP_LOG_EVENTP(tp, NULL, 14167 &tptosocket(tp)->so_rcv, 14168 &tptosocket(tp)->so_snd, 14169 TCP_HYSTART, 0, 14170 0, &log, false, &tv); 14171 } 14172 } 14173 14174 static void 14175 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14176 { 14177 rack->rack_deferred_inited = 1; 14178 rack->r_ctl.roundends = tp->snd_max; 14179 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14180 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14181 } 14182 14183 static void 14184 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14185 { 14186 /* Retransmit bit controls. 14187 * 14188 * The setting of these values control one of 14189 * three settings you can have and dictate 14190 * how rack does retransmissions. Note this 14191 * is in *any* mode i.e. pacing on or off DGP 14192 * fixed rate pacing, or just bursting rack. 14193 * 14194 * 1 - Use full sized retransmits i.e. limit 14195 * the size to whatever the pace_max_segments 14196 * size is. 14197 * 14198 * 2 - Use pacer min granularity as a guide to 14199 * the size combined with the current calculated 14200 * goodput b/w measurement. So for example if 14201 * the goodput is measured at 20Mbps we would 14202 * calculate 8125 (pacer minimum 250usec in 14203 * that b/w) and then round it up to the next 14204 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14205 * 14206 * 0 - The rack default 1 MSS (anything not 0/1/2 14207 * fall here too if we are setting via rack_init()). 14208 * 14209 */ 14210 if (ctl == 1) { 14211 rack->full_size_rxt = 1; 14212 rack->shape_rxt_to_pacing_min = 0; 14213 } else if (ctl == 2) { 14214 rack->full_size_rxt = 0; 14215 rack->shape_rxt_to_pacing_min = 1; 14216 } else { 14217 rack->full_size_rxt = 0; 14218 rack->shape_rxt_to_pacing_min = 0; 14219 } 14220 } 14221 14222 static void 14223 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14224 uint32_t flex1, 14225 uint32_t flex2, 14226 uint32_t flex3) 14227 { 14228 if (tcp_bblogging_on(rack->rc_tp)) { 14229 union tcp_log_stackspecific log; 14230 struct timeval tv; 14231 14232 memset(&log, 0, sizeof(log)); 14233 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14234 log.u_bbr.flex8 = mod; 14235 log.u_bbr.flex1 = flex1; 14236 log.u_bbr.flex2 = flex2; 14237 log.u_bbr.flex3 = flex3; 14238 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14239 0, &log, false, NULL, __func__, __LINE__, &tv); 14240 } 14241 } 14242 14243 static int 14244 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14245 { 14246 struct tcp_rack *rack; 14247 struct rack_sendmap *rsm; 14248 int i; 14249 14250 14251 rack = (struct tcp_rack *)tp->t_fb_ptr; 14252 switch (reqr->req) { 14253 case TCP_QUERY_SENDMAP: 14254 if ((reqr->req_param == tp->snd_max) || 14255 (tp->snd_max == tp->snd_una)){ 14256 /* Unlikely */ 14257 return (0); 14258 } 14259 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14260 if (rsm == NULL) { 14261 /* Can't find that seq -- unlikely */ 14262 return (0); 14263 } 14264 reqr->sendmap_start = rsm->r_start; 14265 reqr->sendmap_end = rsm->r_end; 14266 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14267 reqr->sendmap_fas = rsm->r_fas; 14268 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14269 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14270 for(i=0; i<reqr->sendmap_send_cnt; i++) 14271 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14272 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14273 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14274 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14275 reqr->sendmap_dupacks = rsm->r_dupack; 14276 rack_log_chg_info(tp, rack, 1, 14277 rsm->r_start, 14278 rsm->r_end, 14279 rsm->r_flags); 14280 return(1); 14281 break; 14282 case TCP_QUERY_TIMERS_UP: 14283 if (rack->r_ctl.rc_hpts_flags == 0) { 14284 /* no timers up */ 14285 return (0); 14286 } 14287 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14288 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14289 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14290 } 14291 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14292 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 14293 } 14294 rack_log_chg_info(tp, rack, 2, 14295 rack->r_ctl.rc_hpts_flags, 14296 rack->r_ctl.rc_last_output_to, 14297 rack->r_ctl.rc_timer_exp); 14298 return (1); 14299 break; 14300 case TCP_QUERY_RACK_TIMES: 14301 /* Reordering items */ 14302 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 14303 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 14304 /* Timerstamps and timers */ 14305 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 14306 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 14307 reqr->rack_rtt = rack->rc_rack_rtt; 14308 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 14309 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 14310 /* PRR data */ 14311 reqr->rack_sacked = rack->r_ctl.rc_sacked; 14312 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 14313 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 14314 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 14315 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 14316 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 14317 /* TLP and persists info */ 14318 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 14319 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 14320 if (rack->rc_in_persist) { 14321 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 14322 reqr->rack_in_persist = 1; 14323 } else { 14324 reqr->rack_time_went_idle = 0; 14325 reqr->rack_in_persist = 0; 14326 } 14327 if (rack->r_wanted_output) 14328 reqr->rack_wanted_output = 1; 14329 else 14330 reqr->rack_wanted_output = 0; 14331 return (1); 14332 break; 14333 default: 14334 return (-EINVAL); 14335 } 14336 } 14337 14338 static void 14339 rack_switch_failed(struct tcpcb *tp) 14340 { 14341 /* 14342 * This method gets called if a stack switch was 14343 * attempted and it failed. We are left 14344 * but our hpts timers were stopped and we 14345 * need to validate time units and t_flags2. 14346 */ 14347 struct tcp_rack *rack; 14348 struct timeval tv; 14349 uint32_t cts; 14350 uint32_t toval; 14351 struct hpts_diag diag; 14352 14353 rack = (struct tcp_rack *)tp->t_fb_ptr; 14354 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 14355 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14356 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14357 else 14358 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14359 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14360 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14361 if (tp->t_in_hpts > IHPTS_NONE) { 14362 /* Strange */ 14363 return; 14364 } 14365 cts = tcp_get_usecs(&tv); 14366 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14367 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 14368 toval = rack->r_ctl.rc_last_output_to - cts; 14369 } else { 14370 /* one slot please */ 14371 toval = HPTS_TICKS_PER_SLOT; 14372 } 14373 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14374 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 14375 toval = rack->r_ctl.rc_timer_exp - cts; 14376 } else { 14377 /* one slot please */ 14378 toval = HPTS_TICKS_PER_SLOT; 14379 } 14380 } else 14381 toval = HPTS_TICKS_PER_SLOT; 14382 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 14383 __LINE__, &diag); 14384 rack_log_hpts_diag(rack, cts, &diag, &tv); 14385 } 14386 14387 static int 14388 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 14389 { 14390 struct rack_sendmap *rsm, *ersm; 14391 int insret __diagused; 14392 /* 14393 * When initing outstanding, we must be quite careful 14394 * to not refer to tp->t_fb_ptr. This has the old rack 14395 * pointer in it, not the "new" one (when we are doing 14396 * a stack switch). 14397 */ 14398 14399 14400 if (tp->t_fb->tfb_chg_query == NULL) { 14401 /* Create a send map for the current outstanding data */ 14402 14403 rsm = rack_alloc(rack); 14404 if (rsm == NULL) { 14405 uma_zfree(rack_pcb_zone, ptr); 14406 return (ENOMEM); 14407 } 14408 rsm->r_no_rtt_allowed = 1; 14409 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 14410 rsm->r_rtr_cnt = 1; 14411 rsm->r_rtr_bytes = 0; 14412 if (tp->t_flags & TF_SENTFIN) 14413 rsm->r_flags |= RACK_HAS_FIN; 14414 rsm->r_end = tp->snd_max; 14415 if (tp->snd_una == tp->iss) { 14416 /* The data space is one beyond snd_una */ 14417 rsm->r_flags |= RACK_HAS_SYN; 14418 rsm->r_start = tp->iss; 14419 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 14420 } else 14421 rsm->r_start = tp->snd_una; 14422 rsm->r_dupack = 0; 14423 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 14424 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 14425 if (rsm->m) { 14426 rsm->orig_m_len = rsm->m->m_len; 14427 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14428 } else { 14429 rsm->orig_m_len = 0; 14430 rsm->orig_t_space = 0; 14431 } 14432 } else { 14433 /* 14434 * This can happen if we have a stand-alone FIN or 14435 * SYN. 14436 */ 14437 rsm->m = NULL; 14438 rsm->orig_m_len = 0; 14439 rsm->orig_t_space = 0; 14440 rsm->soff = 0; 14441 } 14442 #ifdef INVARIANTS 14443 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14444 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14445 insret, rack, rsm); 14446 } 14447 #else 14448 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14449 #endif 14450 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14451 rsm->r_in_tmap = 1; 14452 } else { 14453 /* We have a query mechanism, lets use it */ 14454 struct tcp_query_resp qr; 14455 int i; 14456 tcp_seq at; 14457 14458 at = tp->snd_una; 14459 while (at != tp->snd_max) { 14460 memset(&qr, 0, sizeof(qr)); 14461 qr.req = TCP_QUERY_SENDMAP; 14462 qr.req_param = at; 14463 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 14464 break; 14465 /* Move forward */ 14466 at = qr.sendmap_end; 14467 /* Now lets build the entry for this one */ 14468 rsm = rack_alloc(rack); 14469 if (rsm == NULL) { 14470 uma_zfree(rack_pcb_zone, ptr); 14471 return (ENOMEM); 14472 } 14473 memset(rsm, 0, sizeof(struct rack_sendmap)); 14474 /* Now configure the rsm and insert it */ 14475 rsm->r_dupack = qr.sendmap_dupacks; 14476 rsm->r_start = qr.sendmap_start; 14477 rsm->r_end = qr.sendmap_end; 14478 if (qr.sendmap_fas) 14479 rsm->r_fas = qr.sendmap_end; 14480 else 14481 rsm->r_fas = rsm->r_start - tp->snd_una; 14482 /* 14483 * We have carefully aligned the bits 14484 * so that all we have to do is copy over 14485 * the bits with the mask. 14486 */ 14487 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 14488 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 14489 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 14490 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 14491 for (i=0 ; i<rsm->r_rtr_cnt; i++) 14492 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 14493 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 14494 (rsm->r_start - tp->snd_una), &rsm->soff); 14495 if (rsm->m) { 14496 rsm->orig_m_len = rsm->m->m_len; 14497 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14498 } else { 14499 rsm->orig_m_len = 0; 14500 rsm->orig_t_space = 0; 14501 } 14502 #ifdef INVARIANTS 14503 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14504 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14505 insret, rack, rsm); 14506 } 14507 #else 14508 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14509 #endif 14510 if ((rsm->r_flags & RACK_ACKED) == 0) { 14511 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 14512 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 14513 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 14514 /* 14515 * If the existing ersm was sent at 14516 * a later time than the new one, then 14517 * the new one should appear ahead of this 14518 * ersm. 14519 */ 14520 rsm->r_in_tmap = 1; 14521 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 14522 break; 14523 } 14524 } 14525 if (rsm->r_in_tmap == 0) { 14526 /* 14527 * Not found so shove it on the tail. 14528 */ 14529 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14530 rsm->r_in_tmap = 1; 14531 } 14532 } else { 14533 if ((rack->r_ctl.rc_sacklast == NULL) || 14534 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 14535 rack->r_ctl.rc_sacklast = rsm; 14536 } 14537 } 14538 rack_log_chg_info(tp, rack, 3, 14539 rsm->r_start, 14540 rsm->r_end, 14541 rsm->r_flags); 14542 } 14543 } 14544 return (0); 14545 } 14546 14547 14548 static int32_t 14549 rack_init(struct tcpcb *tp, void **ptr) 14550 { 14551 struct inpcb *inp = tptoinpcb(tp); 14552 struct tcp_rack *rack = NULL; 14553 uint32_t iwin, snt, us_cts; 14554 size_t sz; 14555 int err, no_query; 14556 14557 tcp_hpts_init(tp); 14558 14559 /* 14560 * First are we the initial or are we a switched stack? 14561 * If we are initing via tcp_newtcppcb the ptr passed 14562 * will be tp->t_fb_ptr. If its a stack switch that 14563 * has a previous stack we can query it will be a local 14564 * var that will in the end be set into t_fb_ptr. 14565 */ 14566 if (ptr == &tp->t_fb_ptr) 14567 no_query = 1; 14568 else 14569 no_query = 0; 14570 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 14571 if (*ptr == NULL) { 14572 /* 14573 * We need to allocate memory but cant. The INP and INP_INFO 14574 * locks and they are recursive (happens during setup. So a 14575 * scheme to drop the locks fails :( 14576 * 14577 */ 14578 return(ENOMEM); 14579 } 14580 memset(*ptr, 0, sizeof(struct tcp_rack)); 14581 rack = (struct tcp_rack *)*ptr; 14582 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 14583 if (rack->r_ctl.tqh == NULL) { 14584 uma_zfree(rack_pcb_zone, rack); 14585 return(ENOMEM); 14586 } 14587 tqhash_init(rack->r_ctl.tqh); 14588 TAILQ_INIT(&rack->r_ctl.rc_free); 14589 TAILQ_INIT(&rack->r_ctl.rc_tmap); 14590 rack->rc_tp = tp; 14591 rack->rc_inp = inp; 14592 /* Set the flag */ 14593 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 14594 /* Probably not needed but lets be sure */ 14595 rack_clear_rate_sample(rack); 14596 /* 14597 * Save off the default values, socket options will poke 14598 * at these if pacing is not on or we have not yet 14599 * reached where pacing is on (gp_ready/fixed enabled). 14600 * When they get set into the CC module (when gp_ready 14601 * is enabled or we enable fixed) then we will set these 14602 * values into the CC and place in here the old values 14603 * so we have a restoral. Then we will set the flag 14604 * rc_pacing_cc_set. That way whenever we turn off pacing 14605 * or switch off this stack, we will know to go restore 14606 * the saved values. 14607 * 14608 * We specifically put into the beta the ecn value for pacing. 14609 */ 14610 rack->rc_new_rnd_needed = 1; 14611 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 14612 /* We want abe like behavior as well */ 14613 14614 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 14615 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 14616 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 14617 if (rack_fill_cw_state) 14618 rack->rc_pace_to_cwnd = 1; 14619 if (rack_pacing_min_seg) 14620 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 14621 if (use_rack_rr) 14622 rack->use_rack_rr = 1; 14623 if (rack_dnd_default) { 14624 rack->rc_pace_dnd = 1; 14625 } 14626 if (V_tcp_delack_enabled) 14627 tp->t_delayed_ack = 1; 14628 else 14629 tp->t_delayed_ack = 0; 14630 #ifdef TCP_ACCOUNTING 14631 if (rack_tcp_accounting) { 14632 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 14633 } 14634 #endif 14635 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; 14636 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); 14637 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); 14638 if (rack->r_ctl.pcm_s == NULL) { 14639 rack->r_ctl.pcm_i.cnt_alloc = 0; 14640 } 14641 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 14642 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 14643 if (rack_enable_shared_cwnd) 14644 rack->rack_enable_scwnd = 1; 14645 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 14646 rack->rc_user_set_max_segs = rack_hptsi_segments; 14647 rack->r_ctl.max_reduction = rack_max_reduce; 14648 rack->rc_force_max_seg = 0; 14649 TAILQ_INIT(&rack->r_ctl.opt_list); 14650 rack->r_ctl.rc_saved_beta = V_newreno_beta_ecn; 14651 rack->r_ctl.rc_saved_beta_ecn = V_newreno_beta_ecn; 14652 if (rack_hibeta_setting) { 14653 rack->rack_hibeta = 1; 14654 if ((rack_hibeta_setting >= 50) && 14655 (rack_hibeta_setting <= 100)) { 14656 rack->r_ctl.rc_saved_beta = rack_hibeta_setting; 14657 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 14658 } 14659 } else { 14660 rack->r_ctl.saved_hibeta = 50; 14661 } 14662 /* 14663 * We initialize to all ones so we never match 0 14664 * just in case the client sends in 0, it hopefully 14665 * will never have all 1's in ms :-) 14666 */ 14667 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; 14668 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 14669 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 14670 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 14671 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 14672 rack->r_ctl.rc_highest_us_rtt = 0; 14673 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 14674 rack->pcm_enabled = rack_pcm_is_enabled; 14675 if (rack_fillcw_bw_cap) 14676 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; 14677 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 14678 if (rack_use_cmp_acks) 14679 rack->r_use_cmp_ack = 1; 14680 if (rack_disable_prr) 14681 rack->rack_no_prr = 1; 14682 if (rack_gp_no_rec_chg) 14683 rack->rc_gp_no_rec_chg = 1; 14684 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 14685 rack->r_ctl.pacing_method |= RACK_REG_PACING; 14686 rack->rc_always_pace = 1; 14687 if (rack->rack_hibeta) 14688 rack_set_cc_pacing(rack); 14689 } else 14690 rack->rc_always_pace = 0; 14691 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 14692 rack->r_mbuf_queue = 1; 14693 else 14694 rack->r_mbuf_queue = 0; 14695 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14696 if (rack_limits_scwnd) 14697 rack->r_limit_scw = 1; 14698 else 14699 rack->r_limit_scw = 0; 14700 rack_init_retransmit_value(rack, rack_rxt_controls); 14701 rack->rc_labc = V_tcp_abc_l_var; 14702 if (rack_honors_hpts_min_to) 14703 rack->r_use_hpts_min = 1; 14704 if (tp->snd_una != 0) { 14705 rack->rc_sendvars_notset = 0; 14706 /* 14707 * Make sure any TCP timers are not running. 14708 */ 14709 tcp_timer_stop(tp); 14710 } else { 14711 /* 14712 * Server side, we are called from the 14713 * syn-cache. This means none of the 14714 * snd_una/max are set yet so we have 14715 * to defer this until the first send. 14716 */ 14717 rack->rc_sendvars_notset = 1; 14718 } 14719 14720 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 14721 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 14722 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 14723 rack->r_ctl.rc_min_to = rack_min_to; 14724 microuptime(&rack->r_ctl.act_rcv_time); 14725 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 14726 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 14727 if (rack_hw_up_only) 14728 rack->r_up_only = 1; 14729 if (rack_do_dyn_mul) { 14730 /* When dynamic adjustment is on CA needs to start at 100% */ 14731 rack->rc_gp_dyn_mul = 1; 14732 if (rack_do_dyn_mul >= 100) 14733 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 14734 } else 14735 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 14736 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 14737 if (rack_timely_off) { 14738 rack->rc_skip_timely = 1; 14739 } 14740 if (rack->rc_skip_timely) { 14741 rack->r_ctl.rack_per_of_gp_rec = 90; 14742 rack->r_ctl.rack_per_of_gp_ca = 100; 14743 rack->r_ctl.rack_per_of_gp_ss = 250; 14744 } 14745 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 14746 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14747 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 14748 14749 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 14750 rack_probertt_filter_life); 14751 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14752 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 14753 rack->r_ctl.rc_time_of_last_probertt = us_cts; 14754 rack->r_ctl.rc_went_idle_time = us_cts; 14755 rack->r_ctl.rc_time_probertt_starts = 0; 14756 14757 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; 14758 if (rack_rnd_cnt_req & 0x10000) 14759 rack->r_ctl.gate_to_fs = 1; 14760 rack->r_ctl.gp_gain_req = rack_gp_gain_req; 14761 if ((rack_rnd_cnt_req & 0x100) > 0) { 14762 14763 } 14764 if (rack_dsack_std_based & 0x1) { 14765 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 14766 rack->rc_rack_tmr_std_based = 1; 14767 } 14768 if (rack_dsack_std_based & 0x2) { 14769 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 14770 rack->rc_rack_use_dsack = 1; 14771 } 14772 /* We require at least one measurement, even if the sysctl is 0 */ 14773 if (rack_req_measurements) 14774 rack->r_ctl.req_measurements = rack_req_measurements; 14775 else 14776 rack->r_ctl.req_measurements = 1; 14777 if (rack_enable_hw_pacing) 14778 rack->rack_hdw_pace_ena = 1; 14779 if (rack_hw_rate_caps) 14780 rack->r_rack_hw_rate_caps = 1; 14781 if (rack_non_rxt_use_cr) 14782 rack->rack_rec_nonrxt_use_cr = 1; 14783 /* Lets setup the fsb block */ 14784 err = rack_init_fsb(tp, rack); 14785 if (err) { 14786 uma_zfree(rack_pcb_zone, *ptr); 14787 *ptr = NULL; 14788 return (err); 14789 } 14790 if (rack_do_hystart) { 14791 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 14792 if (rack_do_hystart > 1) 14793 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 14794 if (rack_do_hystart > 2) 14795 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 14796 } 14797 /* Log what we will do with queries */ 14798 rack_log_chg_info(tp, rack, 7, 14799 no_query, 0, 0); 14800 if (rack_def_profile) 14801 rack_set_profile(rack, rack_def_profile); 14802 /* Cancel the GP measurement in progress */ 14803 tp->t_flags &= ~TF_GPUTINPROG; 14804 if ((tp->t_state != TCPS_CLOSED) && 14805 (tp->t_state != TCPS_TIME_WAIT)) { 14806 /* 14807 * We are already open, we may 14808 * need to adjust a few things. 14809 */ 14810 if (SEQ_GT(tp->snd_max, tp->iss)) 14811 snt = tp->snd_max - tp->iss; 14812 else 14813 snt = 0; 14814 iwin = rc_init_window(rack); 14815 if ((snt < iwin) && 14816 (no_query == 1)) { 14817 /* We are not past the initial window 14818 * on the first init (i.e. a stack switch 14819 * has not yet occured) so we need to make 14820 * sure cwnd and ssthresh is correct. 14821 */ 14822 if (tp->snd_cwnd < iwin) 14823 tp->snd_cwnd = iwin; 14824 /* 14825 * If we are within the initial window 14826 * we want ssthresh to be unlimited. Setting 14827 * it to the rwnd (which the default stack does 14828 * and older racks) is not really a good idea 14829 * since we want to be in SS and grow both the 14830 * cwnd and the rwnd (via dynamic rwnd growth). If 14831 * we set it to the rwnd then as the peer grows its 14832 * rwnd we will be stuck in CA and never hit SS. 14833 * 14834 * Its far better to raise it up high (this takes the 14835 * risk that there as been a loss already, probably 14836 * we should have an indicator in all stacks of loss 14837 * but we don't), but considering the normal use this 14838 * is a risk worth taking. The consequences of not 14839 * hitting SS are far worse than going one more time 14840 * into it early on (before we have sent even a IW). 14841 * It is highly unlikely that we will have had a loss 14842 * before getting the IW out. 14843 */ 14844 tp->snd_ssthresh = 0xffffffff; 14845 } 14846 /* 14847 * Any init based on sequence numbers 14848 * should be done in the deferred init path 14849 * since we can be CLOSED and not have them 14850 * inited when rack_init() is called. We 14851 * are not closed so lets call it. 14852 */ 14853 rack_deferred_init(tp, rack); 14854 } 14855 if ((tp->t_state != TCPS_CLOSED) && 14856 (tp->t_state != TCPS_TIME_WAIT) && 14857 (no_query == 0) && 14858 (tp->snd_una != tp->snd_max)) { 14859 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 14860 if (err) { 14861 *ptr = NULL; 14862 return(err); 14863 } 14864 } 14865 rack_stop_all_timers(tp, rack); 14866 /* Setup all the t_flags2 */ 14867 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14868 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14869 else 14870 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14871 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14872 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14873 /* 14874 * Timers in Rack are kept in microseconds so lets 14875 * convert any initial incoming variables 14876 * from ticks into usecs. Note that we 14877 * also change the values of t_srtt and t_rttvar, if 14878 * they are non-zero. They are kept with a 5 14879 * bit decimal so we have to carefully convert 14880 * these to get the full precision. 14881 */ 14882 rack_convert_rtts(tp); 14883 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 14884 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 14885 /* We do not start any timers on DROPPED connections */ 14886 if (tp->t_fb->tfb_chg_query == NULL) { 14887 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14888 } else { 14889 struct tcp_query_resp qr; 14890 int ret; 14891 14892 memset(&qr, 0, sizeof(qr)); 14893 14894 /* Get the misc time stamps and such for rack */ 14895 qr.req = TCP_QUERY_RACK_TIMES; 14896 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14897 if (ret == 1) { 14898 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 14899 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 14900 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 14901 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 14902 rack->rc_rack_rtt = qr.rack_rtt; 14903 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 14904 rack->r_ctl.rc_sacked = qr.rack_sacked; 14905 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 14906 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 14907 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 14908 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 14909 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 14910 if (qr.rack_tlp_out) { 14911 rack->rc_tlp_in_progress = 1; 14912 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 14913 } else { 14914 rack->rc_tlp_in_progress = 0; 14915 rack->r_ctl.rc_tlp_cnt_out = 0; 14916 } 14917 if (qr.rack_srtt_measured) 14918 rack->rc_srtt_measure_made = 1; 14919 if (qr.rack_in_persist == 1) { 14920 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 14921 #ifdef NETFLIX_SHARED_CWND 14922 if (rack->r_ctl.rc_scw) { 14923 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 14924 rack->rack_scwnd_is_idle = 1; 14925 } 14926 #endif 14927 rack->r_ctl.persist_lost_ends = 0; 14928 rack->probe_not_answered = 0; 14929 rack->forced_ack = 0; 14930 tp->t_rxtshift = 0; 14931 rack->rc_in_persist = 1; 14932 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 14933 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 14934 } 14935 if (qr.rack_wanted_output) 14936 rack->r_wanted_output = 1; 14937 rack_log_chg_info(tp, rack, 6, 14938 qr.rack_min_rtt, 14939 qr.rack_rtt, 14940 qr.rack_reorder_ts); 14941 } 14942 /* Get the old stack timers */ 14943 qr.req_param = 0; 14944 qr.req = TCP_QUERY_TIMERS_UP; 14945 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 14946 if (ret) { 14947 /* 14948 * non-zero return means we have a timer('s) 14949 * to start. Zero means no timer (no keepalive 14950 * I suppose). 14951 */ 14952 uint32_t tov = 0; 14953 14954 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 14955 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 14956 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 14957 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 14958 tov = qr.timer_pacing_to - us_cts; 14959 else 14960 tov = HPTS_TICKS_PER_SLOT; 14961 } 14962 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 14963 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 14964 if (tov == 0) { 14965 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 14966 tov = qr.timer_timer_exp - us_cts; 14967 else 14968 tov = HPTS_TICKS_PER_SLOT; 14969 } 14970 } 14971 rack_log_chg_info(tp, rack, 4, 14972 rack->r_ctl.rc_hpts_flags, 14973 rack->r_ctl.rc_last_output_to, 14974 rack->r_ctl.rc_timer_exp); 14975 if (tov) { 14976 struct hpts_diag diag; 14977 14978 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 14979 __LINE__, &diag); 14980 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 14981 } 14982 } 14983 } 14984 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 14985 __LINE__, RACK_RTTS_INIT); 14986 } 14987 return (0); 14988 } 14989 14990 static int 14991 rack_handoff_ok(struct tcpcb *tp) 14992 { 14993 if ((tp->t_state == TCPS_CLOSED) || 14994 (tp->t_state == TCPS_LISTEN)) { 14995 /* Sure no problem though it may not stick */ 14996 return (0); 14997 } 14998 if ((tp->t_state == TCPS_SYN_SENT) || 14999 (tp->t_state == TCPS_SYN_RECEIVED)) { 15000 /* 15001 * We really don't know if you support sack, 15002 * you have to get to ESTAB or beyond to tell. 15003 */ 15004 return (EAGAIN); 15005 } 15006 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15007 /* 15008 * Rack will only send a FIN after all data is acknowledged. 15009 * So in this case we have more data outstanding. We can't 15010 * switch stacks until either all data and only the FIN 15011 * is left (in which case rack_init() now knows how 15012 * to deal with that) <or> all is acknowledged and we 15013 * are only left with incoming data, though why you 15014 * would want to switch to rack after all data is acknowledged 15015 * I have no idea (rrs)! 15016 */ 15017 return (EAGAIN); 15018 } 15019 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15020 return (0); 15021 } 15022 /* 15023 * If we reach here we don't do SACK on this connection so we can 15024 * never do rack. 15025 */ 15026 return (EINVAL); 15027 } 15028 15029 static void 15030 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15031 { 15032 15033 if (tp->t_fb_ptr) { 15034 uint32_t cnt_free = 0; 15035 struct tcp_rack *rack; 15036 struct rack_sendmap *rsm; 15037 15038 tcp_handle_orphaned_packets(tp); 15039 tp->t_flags &= ~TF_FORCEDATA; 15040 rack = (struct tcp_rack *)tp->t_fb_ptr; 15041 rack_log_pacing_delay_calc(rack, 15042 0, 15043 0, 15044 0, 15045 rack_get_gp_est(rack), /* delRate */ 15046 rack_get_lt_bw(rack), /* rttProp */ 15047 20, __LINE__, NULL, 0); 15048 #ifdef NETFLIX_SHARED_CWND 15049 if (rack->r_ctl.rc_scw) { 15050 uint32_t limit; 15051 15052 if (rack->r_limit_scw) 15053 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15054 else 15055 limit = 0; 15056 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15057 rack->r_ctl.rc_scw_index, 15058 limit); 15059 rack->r_ctl.rc_scw = NULL; 15060 } 15061 #endif 15062 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15063 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15064 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15065 rack->r_ctl.fsb.th = NULL; 15066 } 15067 if (rack->rc_always_pace == 1) { 15068 rack_remove_pacing(rack); 15069 } 15070 /* Clean up any options if they were not applied */ 15071 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15072 struct deferred_opt_list *dol; 15073 15074 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15075 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15076 free(dol, M_TCPDO); 15077 } 15078 /* rack does not use force data but other stacks may clear it */ 15079 if (rack->r_ctl.crte != NULL) { 15080 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15081 rack->rack_hdrw_pacing = 0; 15082 rack->r_ctl.crte = NULL; 15083 } 15084 #ifdef TCP_BLACKBOX 15085 tcp_log_flowend(tp); 15086 #endif 15087 /* 15088 * Lets take a different approach to purging just 15089 * get each one and free it like a cum-ack would and 15090 * not use a foreach loop. 15091 */ 15092 rsm = tqhash_min(rack->r_ctl.tqh); 15093 while (rsm) { 15094 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15095 rack->r_ctl.rc_num_maps_alloced--; 15096 uma_zfree(rack_zone, rsm); 15097 rsm = tqhash_min(rack->r_ctl.tqh); 15098 } 15099 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15100 while (rsm) { 15101 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15102 rack->r_ctl.rc_num_maps_alloced--; 15103 rack->rc_free_cnt--; 15104 cnt_free++; 15105 uma_zfree(rack_zone, rsm); 15106 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15107 } 15108 if (rack->r_ctl.pcm_s != NULL) { 15109 free(rack->r_ctl.pcm_s, M_TCPPCM); 15110 rack->r_ctl.pcm_s = NULL; 15111 rack->r_ctl.pcm_i.cnt_alloc = 0; 15112 rack->r_ctl.pcm_i.cnt = 0; 15113 } 15114 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15115 (tcp_bblogging_on(tp))) { 15116 union tcp_log_stackspecific log; 15117 struct timeval tv; 15118 15119 memset(&log, 0, sizeof(log)); 15120 log.u_bbr.flex8 = 10; 15121 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15122 log.u_bbr.flex2 = rack->rc_free_cnt; 15123 log.u_bbr.flex3 = cnt_free; 15124 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15125 rsm = tqhash_min(rack->r_ctl.tqh); 15126 log.u_bbr.delRate = (uintptr_t)rsm; 15127 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15128 log.u_bbr.cur_del_rate = (uintptr_t)rsm; 15129 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15130 log.u_bbr.pkt_epoch = __LINE__; 15131 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15132 0, &log, false, NULL, NULL, 0, &tv); 15133 } 15134 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15135 ("rack:%p num_aloc:%u after freeing all?", 15136 rack, 15137 rack->r_ctl.rc_num_maps_alloced)); 15138 rack->rc_free_cnt = 0; 15139 free(rack->r_ctl.tqh, M_TCPFSB); 15140 rack->r_ctl.tqh = NULL; 15141 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15142 tp->t_fb_ptr = NULL; 15143 } 15144 /* Make sure snd_nxt is correctly set */ 15145 tp->snd_nxt = tp->snd_max; 15146 } 15147 15148 static void 15149 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15150 { 15151 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15152 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15153 } 15154 switch (tp->t_state) { 15155 case TCPS_SYN_SENT: 15156 rack->r_state = TCPS_SYN_SENT; 15157 rack->r_substate = rack_do_syn_sent; 15158 break; 15159 case TCPS_SYN_RECEIVED: 15160 rack->r_state = TCPS_SYN_RECEIVED; 15161 rack->r_substate = rack_do_syn_recv; 15162 break; 15163 case TCPS_ESTABLISHED: 15164 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15165 rack->r_state = TCPS_ESTABLISHED; 15166 rack->r_substate = rack_do_established; 15167 break; 15168 case TCPS_CLOSE_WAIT: 15169 rack->r_state = TCPS_CLOSE_WAIT; 15170 rack->r_substate = rack_do_close_wait; 15171 break; 15172 case TCPS_FIN_WAIT_1: 15173 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15174 rack->r_state = TCPS_FIN_WAIT_1; 15175 rack->r_substate = rack_do_fin_wait_1; 15176 break; 15177 case TCPS_CLOSING: 15178 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15179 rack->r_state = TCPS_CLOSING; 15180 rack->r_substate = rack_do_closing; 15181 break; 15182 case TCPS_LAST_ACK: 15183 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15184 rack->r_state = TCPS_LAST_ACK; 15185 rack->r_substate = rack_do_lastack; 15186 break; 15187 case TCPS_FIN_WAIT_2: 15188 rack->r_state = TCPS_FIN_WAIT_2; 15189 rack->r_substate = rack_do_fin_wait_2; 15190 break; 15191 case TCPS_LISTEN: 15192 case TCPS_CLOSED: 15193 case TCPS_TIME_WAIT: 15194 default: 15195 break; 15196 }; 15197 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15198 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15199 15200 } 15201 15202 static void 15203 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15204 { 15205 /* 15206 * We received an ack, and then did not 15207 * call send or were bounced out due to the 15208 * hpts was running. Now a timer is up as well, is 15209 * it the right timer? 15210 */ 15211 struct rack_sendmap *rsm; 15212 int tmr_up; 15213 15214 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15215 if (tcp_in_hpts(rack->rc_tp) == 0) { 15216 /* 15217 * Ok we probably need some timer up, but no 15218 * matter what the mask we are not in hpts. We 15219 * may have received an old ack and thus did nothing. 15220 */ 15221 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15222 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15223 return; 15224 } 15225 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15226 return; 15227 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15228 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15229 (tmr_up == PACE_TMR_RXT)) { 15230 /* Should be an RXT */ 15231 return; 15232 } 15233 if (rsm == NULL) { 15234 /* Nothing outstanding? */ 15235 if (tp->t_flags & TF_DELACK) { 15236 if (tmr_up == PACE_TMR_DELACK) 15237 /* We are supposed to have delayed ack up and we do */ 15238 return; 15239 } else if (((V_tcp_always_keepalive || 15240 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 15241 (tp->t_state <= TCPS_CLOSING)) && 15242 (tmr_up == PACE_TMR_KEEP) && 15243 (tp->snd_max == tp->snd_una)) { 15244 /* We should have keep alive up and we do */ 15245 return; 15246 } 15247 } 15248 if (SEQ_GT(tp->snd_max, tp->snd_una) && 15249 ((tmr_up == PACE_TMR_TLP) || 15250 (tmr_up == PACE_TMR_RACK) || 15251 (tmr_up == PACE_TMR_RXT))) { 15252 /* 15253 * Either a Rack, TLP or RXT is fine if we 15254 * have outstanding data. 15255 */ 15256 return; 15257 } else if (tmr_up == PACE_TMR_DELACK) { 15258 /* 15259 * If the delayed ack was going to go off 15260 * before the rtx/tlp/rack timer were going to 15261 * expire, then that would be the timer in control. 15262 * Note we don't check the time here trusting the 15263 * code is correct. 15264 */ 15265 return; 15266 } 15267 /* 15268 * Ok the timer originally started is not what we want now. 15269 * We will force the hpts to be stopped if any, and restart 15270 * with the slot set to what was in the saved slot. 15271 */ 15272 if (tcp_in_hpts(rack->rc_tp)) { 15273 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15274 uint32_t us_cts; 15275 15276 us_cts = tcp_get_usecs(NULL); 15277 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 15278 rack->r_early = 1; 15279 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 15280 } 15281 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 15282 } 15283 tcp_hpts_remove(rack->rc_tp); 15284 } 15285 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15286 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15287 } 15288 15289 15290 static void 15291 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 15292 { 15293 if ((SEQ_LT(tp->snd_wl1, seq) || 15294 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 15295 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 15296 /* keep track of pure window updates */ 15297 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 15298 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 15299 tp->snd_wnd = tiwin; 15300 rack_validate_fo_sendwin_up(tp, rack); 15301 tp->snd_wl1 = seq; 15302 tp->snd_wl2 = ack; 15303 if (tp->snd_wnd > tp->max_sndwnd) 15304 tp->max_sndwnd = tp->snd_wnd; 15305 rack->r_wanted_output = 1; 15306 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 15307 tp->snd_wnd = tiwin; 15308 rack_validate_fo_sendwin_up(tp, rack); 15309 tp->snd_wl1 = seq; 15310 tp->snd_wl2 = ack; 15311 } else { 15312 /* Not a valid win update */ 15313 return; 15314 } 15315 if (tp->snd_wnd > tp->max_sndwnd) 15316 tp->max_sndwnd = tp->snd_wnd; 15317 /* Do we exit persists? */ 15318 if ((rack->rc_in_persist != 0) && 15319 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 15320 rack->r_ctl.rc_pace_min_segs))) { 15321 rack_exit_persist(tp, rack, cts); 15322 } 15323 /* Do we enter persists? */ 15324 if ((rack->rc_in_persist == 0) && 15325 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 15326 TCPS_HAVEESTABLISHED(tp->t_state) && 15327 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 15328 sbavail(&tptosocket(tp)->so_snd) && 15329 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 15330 /* 15331 * Here the rwnd is less than 15332 * the pacing size, we are established, 15333 * nothing is outstanding, and there is 15334 * data to send. Enter persists. 15335 */ 15336 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 15337 } 15338 } 15339 15340 static void 15341 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 15342 { 15343 15344 if (tcp_bblogging_on(rack->rc_tp)) { 15345 struct inpcb *inp = tptoinpcb(tp); 15346 union tcp_log_stackspecific log; 15347 struct timeval ltv; 15348 char tcp_hdr_buf[60]; 15349 struct tcphdr *th; 15350 struct timespec ts; 15351 uint32_t orig_snd_una; 15352 uint8_t xx = 0; 15353 15354 #ifdef TCP_REQUEST_TRK 15355 struct tcp_sendfile_track *tcp_req; 15356 15357 if (SEQ_GT(ae->ack, tp->snd_una)) { 15358 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 15359 } else { 15360 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 15361 } 15362 #endif 15363 memset(&log, 0, sizeof(log)); 15364 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 15365 if (rack->rack_no_prr == 0) 15366 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15367 else 15368 log.u_bbr.flex1 = 0; 15369 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 15370 log.u_bbr.use_lt_bw <<= 1; 15371 log.u_bbr.use_lt_bw |= rack->r_might_revert; 15372 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 15373 log.u_bbr.bbr_state = rack->rc_free_cnt; 15374 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15375 log.u_bbr.pkts_out = tp->t_maxseg; 15376 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 15377 log.u_bbr.flex7 = 1; 15378 log.u_bbr.lost = ae->flags; 15379 log.u_bbr.cwnd_gain = ackval; 15380 log.u_bbr.pacing_gain = 0x2; 15381 if (ae->flags & TSTMP_HDWR) { 15382 /* Record the hardware timestamp if present */ 15383 log.u_bbr.flex3 = M_TSTMP; 15384 ts.tv_sec = ae->timestamp / 1000000000; 15385 ts.tv_nsec = ae->timestamp % 1000000000; 15386 ltv.tv_sec = ts.tv_sec; 15387 ltv.tv_usec = ts.tv_nsec / 1000; 15388 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 15389 } else if (ae->flags & TSTMP_LRO) { 15390 /* Record the LRO the arrival timestamp */ 15391 log.u_bbr.flex3 = M_TSTMP_LRO; 15392 ts.tv_sec = ae->timestamp / 1000000000; 15393 ts.tv_nsec = ae->timestamp % 1000000000; 15394 ltv.tv_sec = ts.tv_sec; 15395 ltv.tv_usec = ts.tv_nsec / 1000; 15396 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 15397 } 15398 log.u_bbr.timeStamp = tcp_get_usecs(<v); 15399 /* Log the rcv time */ 15400 log.u_bbr.delRate = ae->timestamp; 15401 #ifdef TCP_REQUEST_TRK 15402 log.u_bbr.applimited = tp->t_tcpreq_closed; 15403 log.u_bbr.applimited <<= 8; 15404 log.u_bbr.applimited |= tp->t_tcpreq_open; 15405 log.u_bbr.applimited <<= 8; 15406 log.u_bbr.applimited |= tp->t_tcpreq_req; 15407 if (tcp_req) { 15408 /* Copy out any client req info */ 15409 /* seconds */ 15410 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 15411 /* useconds */ 15412 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 15413 log.u_bbr.rttProp = tcp_req->timestamp; 15414 log.u_bbr.cur_del_rate = tcp_req->start; 15415 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 15416 log.u_bbr.flex8 |= 1; 15417 } else { 15418 log.u_bbr.flex8 |= 2; 15419 log.u_bbr.bw_inuse = tcp_req->end; 15420 } 15421 log.u_bbr.flex6 = tcp_req->start_seq; 15422 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 15423 log.u_bbr.flex8 |= 4; 15424 log.u_bbr.epoch = tcp_req->end_seq; 15425 } 15426 } 15427 #endif 15428 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 15429 th = (struct tcphdr *)tcp_hdr_buf; 15430 th->th_seq = ae->seq; 15431 th->th_ack = ae->ack; 15432 th->th_win = ae->win; 15433 /* Now fill in the ports */ 15434 th->th_sport = inp->inp_fport; 15435 th->th_dport = inp->inp_lport; 15436 tcp_set_flags(th, ae->flags); 15437 /* Now do we have a timestamp option? */ 15438 if (ae->flags & HAS_TSTMP) { 15439 u_char *cp; 15440 uint32_t val; 15441 15442 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 15443 cp = (u_char *)(th + 1); 15444 *cp = TCPOPT_NOP; 15445 cp++; 15446 *cp = TCPOPT_NOP; 15447 cp++; 15448 *cp = TCPOPT_TIMESTAMP; 15449 cp++; 15450 *cp = TCPOLEN_TIMESTAMP; 15451 cp++; 15452 val = htonl(ae->ts_value); 15453 bcopy((char *)&val, 15454 (char *)cp, sizeof(uint32_t)); 15455 val = htonl(ae->ts_echo); 15456 bcopy((char *)&val, 15457 (char *)(cp + 4), sizeof(uint32_t)); 15458 } else 15459 th->th_off = (sizeof(struct tcphdr) >> 2); 15460 15461 /* 15462 * For sane logging we need to play a little trick. 15463 * If the ack were fully processed we would have moved 15464 * snd_una to high_seq, but since compressed acks are 15465 * processed in two phases, at this point (logging) snd_una 15466 * won't be advanced. So we would see multiple acks showing 15467 * the advancement. We can prevent that by "pretending" that 15468 * snd_una was advanced and then un-advancing it so that the 15469 * logging code has the right value for tlb_snd_una. 15470 */ 15471 if (tp->snd_una != high_seq) { 15472 orig_snd_una = tp->snd_una; 15473 tp->snd_una = high_seq; 15474 xx = 1; 15475 } else 15476 xx = 0; 15477 TCP_LOG_EVENTP(tp, th, 15478 &tptosocket(tp)->so_rcv, 15479 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 15480 0, &log, true, <v); 15481 if (xx) { 15482 tp->snd_una = orig_snd_una; 15483 } 15484 } 15485 15486 } 15487 15488 static void 15489 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 15490 { 15491 uint32_t us_rtt; 15492 /* 15493 * A persist or keep-alive was forced out, update our 15494 * min rtt time. Note now worry about lost responses. 15495 * When a subsequent keep-alive or persist times out 15496 * and forced_ack is still on, then the last probe 15497 * was not responded to. In such cases we have a 15498 * sysctl that controls the behavior. Either we apply 15499 * the rtt but with reduced confidence (0). Or we just 15500 * plain don't apply the rtt estimate. Having data flow 15501 * will clear the probe_not_answered flag i.e. cum-ack 15502 * move forward <or> exiting and reentering persists. 15503 */ 15504 15505 rack->forced_ack = 0; 15506 rack->rc_tp->t_rxtshift = 0; 15507 if ((rack->rc_in_persist && 15508 (tiwin == rack->rc_tp->snd_wnd)) || 15509 (rack->rc_in_persist == 0)) { 15510 /* 15511 * In persists only apply the RTT update if this is 15512 * a response to our window probe. And that 15513 * means the rwnd sent must match the current 15514 * snd_wnd. If it does not, then we got a 15515 * window update ack instead. For keepalive 15516 * we allow the answer no matter what the window. 15517 * 15518 * Note that if the probe_not_answered is set then 15519 * the forced_ack_ts is the oldest one i.e. the first 15520 * probe sent that might have been lost. This assures 15521 * us that if we do calculate an RTT it is longer not 15522 * some short thing. 15523 */ 15524 if (rack->rc_in_persist) 15525 counter_u64_add(rack_persists_acks, 1); 15526 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 15527 if (us_rtt == 0) 15528 us_rtt = 1; 15529 if (rack->probe_not_answered == 0) { 15530 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15531 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 15532 } else { 15533 /* We have a retransmitted probe here too */ 15534 if (rack_apply_rtt_with_reduced_conf) { 15535 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15536 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 15537 } 15538 } 15539 } 15540 } 15541 15542 static void 15543 rack_new_round_starts(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15544 { 15545 /* 15546 * The next send has occurred mark the end of the round 15547 * as when that data gets acknowledged. We can 15548 * also do common things we might need to do when 15549 * a round begins. 15550 */ 15551 rack->r_ctl.roundends = tp->snd_max; 15552 rack->rc_new_rnd_needed = 0; 15553 rack_log_hystart_event(rack, tp->snd_max, 4); 15554 } 15555 15556 15557 static void 15558 rack_log_pcm(struct tcp_rack *rack, uint8_t mod, uint32_t flex1, uint32_t flex2, 15559 uint32_t flex3) 15560 { 15561 if (tcp_bblogging_on(rack->rc_tp)) { 15562 union tcp_log_stackspecific log; 15563 struct timeval tv; 15564 15565 (void)tcp_get_usecs(&tv); 15566 memset(&log, 0, sizeof(log)); 15567 log.u_bbr.timeStamp = tcp_tv_to_usectick(&tv); 15568 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15569 log.u_bbr.flex8 = mod; 15570 log.u_bbr.flex1 = flex1; 15571 log.u_bbr.flex2 = flex2; 15572 log.u_bbr.flex3 = flex3; 15573 log.u_bbr.flex4 = rack_pcm_every_n_rounds; 15574 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; 15575 log.u_bbr.bbr_substate = rack->pcm_needed; 15576 log.u_bbr.bbr_substate <<= 1; 15577 log.u_bbr.bbr_substate |= rack->pcm_in_progress; 15578 log.u_bbr.bbr_substate <<= 1; 15579 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ 15580 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, 15581 0, &log, false, NULL, NULL, 0, &tv); 15582 } 15583 } 15584 15585 static void 15586 rack_new_round_setup(struct tcpcb *tp, struct tcp_rack *rack, uint32_t high_seq) 15587 { 15588 /* 15589 * The round (current_round) has ended. We now 15590 * setup for the next round by incrementing the 15591 * round numnber and doing any round specific 15592 * things. 15593 */ 15594 rack_log_hystart_event(rack, high_seq, 21); 15595 rack->r_ctl.current_round++; 15596 /* New round (current_round) begins at next send */ 15597 rack->rc_new_rnd_needed = 1; 15598 if ((rack->pcm_enabled == 1) && 15599 (rack->pcm_needed == 0) && 15600 (rack->pcm_in_progress == 0)) { 15601 /* 15602 * If we have enabled PCM, then we need to 15603 * check if the round has adanced to the state 15604 * where one is required. 15605 */ 15606 int rnds; 15607 15608 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 15609 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 15610 rack->pcm_needed = 1; 15611 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15612 } else if (rack_verbose_logging) { 15613 rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_round ); 15614 } 15615 } 15616 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 15617 /* We have hystart enabled send the round info in */ 15618 if (CC_ALGO(tp)->newround != NULL) { 15619 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 15620 } 15621 } 15622 /* 15623 * For DGP an initial startup check. We want to validate 15624 * that we are not just pushing on slow-start and just 15625 * not gaining.. i.e. filling buffers without getting any 15626 * boost in b/w during the inital slow-start. 15627 */ 15628 if (rack->dgp_on && 15629 (rack->rc_initial_ss_comp == 0) && 15630 (tp->snd_cwnd < tp->snd_ssthresh) && 15631 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && 15632 (rack->r_ctl.gp_rnd_thresh > 0) && 15633 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { 15634 15635 /* 15636 * We are in the initial SS and we have hd rack_rnd_cnt_req rounds(def:5) where 15637 * we have not gained the required amount in the gp_est (120.0% aka 1200). Lets 15638 * exit SS. 15639 * 15640 * Pick up the flight size now as we enter slowstart (not the 15641 * cwnd which may be inflated). 15642 */ 15643 rack->rc_initial_ss_comp = 1; 15644 15645 if (tcp_bblogging_on(rack->rc_tp)) { 15646 union tcp_log_stackspecific log; 15647 struct timeval tv; 15648 15649 memset(&log, 0, sizeof(log)); 15650 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15651 log.u_bbr.flex1 = rack->r_ctl.current_round; 15652 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; 15653 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; 15654 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; 15655 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; 15656 log.u_bbr.flex8 = 40; 15657 (void)tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 15658 0, &log, false, NULL, __func__, __LINE__,&tv); 15659 } 15660 if ((rack->r_ctl.gate_to_fs == 1) && 15661 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { 15662 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; 15663 } 15664 tp->snd_ssthresh = tp->snd_cwnd - 1; 15665 /* Turn off any fast output running */ 15666 rack->r_fast_output = 0; 15667 } 15668 } 15669 15670 static int 15671 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 15672 { 15673 /* 15674 * Handle a "special" compressed ack mbuf. Each incoming 15675 * ack has only four possible dispositions: 15676 * 15677 * A) It moves the cum-ack forward 15678 * B) It is behind the cum-ack. 15679 * C) It is a window-update ack. 15680 * D) It is a dup-ack. 15681 * 15682 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 15683 * in the incoming mbuf. We also need to still pay attention 15684 * to nxt_pkt since there may be another packet after this 15685 * one. 15686 */ 15687 #ifdef TCP_ACCOUNTING 15688 uint64_t ts_val; 15689 uint64_t rdstc; 15690 #endif 15691 int segsiz; 15692 struct timespec ts; 15693 struct tcp_rack *rack; 15694 struct tcp_ackent *ae; 15695 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 15696 int cnt, i, did_out, ourfinisacked = 0; 15697 struct tcpopt to_holder, *to = NULL; 15698 #ifdef TCP_ACCOUNTING 15699 int win_up_req = 0; 15700 #endif 15701 int nsegs = 0; 15702 int under_pacing = 0; 15703 int post_recovery = 0; 15704 #ifdef TCP_ACCOUNTING 15705 sched_pin(); 15706 #endif 15707 rack = (struct tcp_rack *)tp->t_fb_ptr; 15708 if (rack->gp_ready && 15709 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 15710 under_pacing = 1; 15711 15712 if (rack->r_state != tp->t_state) 15713 rack_set_state(tp, rack); 15714 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 15715 (tp->t_flags & TF_GPUTINPROG)) { 15716 /* 15717 * We have a goodput in progress 15718 * and we have entered a late state. 15719 * Do we have enough data in the sb 15720 * to handle the GPUT request? 15721 */ 15722 uint32_t bytes; 15723 15724 bytes = tp->gput_ack - tp->gput_seq; 15725 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 15726 bytes += tp->gput_seq - tp->snd_una; 15727 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 15728 /* 15729 * There are not enough bytes in the socket 15730 * buffer that have been sent to cover this 15731 * measurement. Cancel it. 15732 */ 15733 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 15734 rack->r_ctl.rc_gp_srtt /*flex1*/, 15735 tp->gput_seq, 15736 0, 0, 18, __LINE__, NULL, 0); 15737 tp->t_flags &= ~TF_GPUTINPROG; 15738 } 15739 } 15740 to = &to_holder; 15741 to->to_flags = 0; 15742 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 15743 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 15744 cnt = m->m_len / sizeof(struct tcp_ackent); 15745 counter_u64_add(rack_multi_single_eq, cnt); 15746 high_seq = tp->snd_una; 15747 the_win = tp->snd_wnd; 15748 win_seq = tp->snd_wl1; 15749 win_upd_ack = tp->snd_wl2; 15750 cts = tcp_tv_to_usectick(tv); 15751 ms_cts = tcp_tv_to_mssectick(tv); 15752 rack->r_ctl.rc_rcvtime = cts; 15753 segsiz = ctf_fixed_maxseg(tp); 15754 if ((rack->rc_gp_dyn_mul) && 15755 (rack->use_fixed_rate == 0) && 15756 (rack->rc_always_pace)) { 15757 /* Check in on probertt */ 15758 rack_check_probe_rtt(rack, cts); 15759 } 15760 for (i = 0; i < cnt; i++) { 15761 #ifdef TCP_ACCOUNTING 15762 ts_val = get_cyclecount(); 15763 #endif 15764 rack_clear_rate_sample(rack); 15765 ae = ((mtod(m, struct tcp_ackent *)) + i); 15766 if (ae->flags & TH_FIN) 15767 rack_log_pacing_delay_calc(rack, 15768 0, 15769 0, 15770 0, 15771 rack_get_gp_est(rack), /* delRate */ 15772 rack_get_lt_bw(rack), /* rttProp */ 15773 20, __LINE__, NULL, 0); 15774 /* Setup the window */ 15775 tiwin = ae->win << tp->snd_scale; 15776 if (tiwin > rack->r_ctl.rc_high_rwnd) 15777 rack->r_ctl.rc_high_rwnd = tiwin; 15778 /* figure out the type of ack */ 15779 if (SEQ_LT(ae->ack, high_seq)) { 15780 /* Case B*/ 15781 ae->ack_val_set = ACK_BEHIND; 15782 } else if (SEQ_GT(ae->ack, high_seq)) { 15783 /* Case A */ 15784 ae->ack_val_set = ACK_CUMACK; 15785 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 15786 /* Case D */ 15787 ae->ack_val_set = ACK_DUPACK; 15788 } else { 15789 /* Case C */ 15790 ae->ack_val_set = ACK_RWND; 15791 } 15792 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 15793 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 15794 /* Validate timestamp */ 15795 if (ae->flags & HAS_TSTMP) { 15796 /* Setup for a timestamp */ 15797 to->to_flags = TOF_TS; 15798 ae->ts_echo -= tp->ts_offset; 15799 to->to_tsecr = ae->ts_echo; 15800 to->to_tsval = ae->ts_value; 15801 /* 15802 * If echoed timestamp is later than the current time, fall back to 15803 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 15804 * were used when this connection was established. 15805 */ 15806 if (TSTMP_GT(ae->ts_echo, ms_cts)) 15807 to->to_tsecr = 0; 15808 if (tp->ts_recent && 15809 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 15810 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 15811 #ifdef TCP_ACCOUNTING 15812 rdstc = get_cyclecount(); 15813 if (rdstc > ts_val) { 15814 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15815 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15816 } 15817 } 15818 #endif 15819 continue; 15820 } 15821 } 15822 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 15823 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 15824 tp->ts_recent_age = tcp_ts_getticks(); 15825 tp->ts_recent = ae->ts_value; 15826 } 15827 } else { 15828 /* Setup for a no options */ 15829 to->to_flags = 0; 15830 } 15831 /* Update the rcv time and perform idle reduction possibly */ 15832 if (tp->t_idle_reduce && 15833 (tp->snd_max == tp->snd_una) && 15834 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 15835 counter_u64_add(rack_input_idle_reduces, 1); 15836 rack_cc_after_idle(rack, tp); 15837 } 15838 tp->t_rcvtime = ticks; 15839 /* Now what about ECN of a chain of pure ACKs? */ 15840 if (tcp_ecn_input_segment(tp, ae->flags, 0, 15841 tcp_packets_this_ack(tp, ae->ack), 15842 ae->codepoint)) 15843 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 15844 #ifdef TCP_ACCOUNTING 15845 /* Count for the specific type of ack in */ 15846 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15847 tp->tcp_cnt_counters[ae->ack_val_set]++; 15848 } 15849 #endif 15850 /* 15851 * Note how we could move up these in the determination 15852 * above, but we don't so that way the timestamp checks (and ECN) 15853 * is done first before we do any processing on the ACK. 15854 * The non-compressed path through the code has this 15855 * weakness (noted by @jtl) that it actually does some 15856 * processing before verifying the timestamp information. 15857 * We don't take that path here which is why we set 15858 * the ack_val_set first, do the timestamp and ecn 15859 * processing, and then look at what we have setup. 15860 */ 15861 if (ae->ack_val_set == ACK_BEHIND) { 15862 /* 15863 * Case B flag reordering, if window is not closed 15864 * or it could be a keep-alive or persists 15865 */ 15866 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 15867 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15868 if (rack->r_ctl.rc_reorder_ts == 0) 15869 rack->r_ctl.rc_reorder_ts = 1; 15870 } 15871 } else if (ae->ack_val_set == ACK_DUPACK) { 15872 /* Case D */ 15873 rack_strike_dupack(rack, ae->ack); 15874 } else if (ae->ack_val_set == ACK_RWND) { 15875 /* Case C */ 15876 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15877 ts.tv_sec = ae->timestamp / 1000000000; 15878 ts.tv_nsec = ae->timestamp % 1000000000; 15879 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15880 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15881 } else { 15882 rack->r_ctl.act_rcv_time = *tv; 15883 } 15884 if (rack->forced_ack) { 15885 rack_handle_probe_response(rack, tiwin, 15886 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 15887 } 15888 #ifdef TCP_ACCOUNTING 15889 win_up_req = 1; 15890 #endif 15891 win_upd_ack = ae->ack; 15892 win_seq = ae->seq; 15893 the_win = tiwin; 15894 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15895 } else { 15896 /* Case A */ 15897 if (SEQ_GT(ae->ack, tp->snd_max)) { 15898 /* 15899 * We just send an ack since the incoming 15900 * ack is beyond the largest seq we sent. 15901 */ 15902 if ((tp->t_flags & TF_ACKNOW) == 0) { 15903 ctf_ack_war_checks(tp); 15904 if (tp->t_flags && TF_ACKNOW) 15905 rack->r_wanted_output = 1; 15906 } 15907 } else { 15908 nsegs++; 15909 /* If the window changed setup to update */ 15910 if (tiwin != tp->snd_wnd) { 15911 win_upd_ack = ae->ack; 15912 win_seq = ae->seq; 15913 the_win = tiwin; 15914 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 15915 } 15916 #ifdef TCP_ACCOUNTING 15917 /* Account for the acks */ 15918 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15919 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 15920 } 15921 #endif 15922 high_seq = ae->ack; 15923 /* Setup our act_rcv_time */ 15924 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 15925 ts.tv_sec = ae->timestamp / 1000000000; 15926 ts.tv_nsec = ae->timestamp % 1000000000; 15927 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 15928 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 15929 } else { 15930 rack->r_ctl.act_rcv_time = *tv; 15931 } 15932 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 15933 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 15934 #ifdef TCP_REQUEST_TRK 15935 rack_req_check_for_comp(rack, high_seq); 15936 #endif 15937 if (rack->rc_dsack_round_seen) { 15938 /* Is the dsack round over? */ 15939 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 15940 /* Yes it is */ 15941 rack->rc_dsack_round_seen = 0; 15942 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 15943 } 15944 } 15945 } 15946 } 15947 /* And lets be sure to commit the rtt measurements for this ack */ 15948 tcp_rack_xmit_timer_commit(rack, tp); 15949 #ifdef TCP_ACCOUNTING 15950 rdstc = get_cyclecount(); 15951 if (rdstc > ts_val) { 15952 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15953 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 15954 if (ae->ack_val_set == ACK_CUMACK) 15955 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 15956 } 15957 } 15958 #endif 15959 } 15960 #ifdef TCP_ACCOUNTING 15961 ts_val = get_cyclecount(); 15962 #endif 15963 /* Tend to any collapsed window */ 15964 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 15965 /* The peer collapsed the window */ 15966 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 15967 } else if (rack->rc_has_collapsed) 15968 rack_un_collapse_window(rack, __LINE__); 15969 if ((rack->r_collapse_point_valid) && 15970 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 15971 rack->r_collapse_point_valid = 0; 15972 acked_amount = acked = (high_seq - tp->snd_una); 15973 if (acked) { 15974 /* 15975 * The draft (v3) calls for us to use SEQ_GEQ, but that 15976 * causes issues when we are just going app limited. Lets 15977 * instead use SEQ_GT <or> where its equal but more data 15978 * is outstanding. 15979 * 15980 * Also make sure we are on the last ack of a series. We 15981 * have to have all the ack's processed in queue to know 15982 * if there is something left outstanding. 15983 * 15984 */ 15985 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 15986 (rack->rc_new_rnd_needed == 0) && 15987 (nxt_pkt == 0)) { 15988 /* 15989 * We have crossed into a new round with 15990 * this th_ack value. 15991 */ 15992 rack_new_round_setup(tp, rack, high_seq); 15993 } 15994 /* 15995 * Clear the probe not answered flag 15996 * since cum-ack moved forward. 15997 */ 15998 rack->probe_not_answered = 0; 15999 if (tp->t_flags & TF_NEEDSYN) { 16000 /* 16001 * T/TCP: Connection was half-synchronized, and our SYN has 16002 * been ACK'd (so connection is now fully synchronized). Go 16003 * to non-starred state, increment snd_una for ACK of SYN, 16004 * and check if we can do window scaling. 16005 */ 16006 tp->t_flags &= ~TF_NEEDSYN; 16007 tp->snd_una++; 16008 acked_amount = acked = (high_seq - tp->snd_una); 16009 } 16010 if (acked > sbavail(&so->so_snd)) 16011 acked_amount = sbavail(&so->so_snd); 16012 if (IN_FASTRECOVERY(tp->t_flags) && 16013 (rack->rack_no_prr == 0)) 16014 rack_update_prr(tp, rack, acked_amount, high_seq); 16015 if (IN_RECOVERY(tp->t_flags)) { 16016 if (SEQ_LT(high_seq, tp->snd_recover) && 16017 (SEQ_LT(high_seq, tp->snd_max))) { 16018 tcp_rack_partialack(tp); 16019 } else { 16020 rack_post_recovery(tp, high_seq); 16021 post_recovery = 1; 16022 } 16023 } else if ((rack->rto_from_rec == 1) && 16024 SEQ_GEQ(high_seq, tp->snd_recover)) { 16025 /* 16026 * We were in recovery, hit a rxt timeout 16027 * and never re-entered recovery. The timeout(s) 16028 * made up all the lost data. In such a case 16029 * we need to clear the rto_from_rec flag. 16030 */ 16031 rack->rto_from_rec = 0; 16032 } 16033 /* Handle the rack-log-ack part (sendmap) */ 16034 if ((sbused(&so->so_snd) == 0) && 16035 (acked > acked_amount) && 16036 (tp->t_state >= TCPS_FIN_WAIT_1) && 16037 (tp->t_flags & TF_SENTFIN)) { 16038 /* 16039 * We must be sure our fin 16040 * was sent and acked (we can be 16041 * in FIN_WAIT_1 without having 16042 * sent the fin). 16043 */ 16044 ourfinisacked = 1; 16045 /* 16046 * Lets make sure snd_una is updated 16047 * since most likely acked_amount = 0 (it 16048 * should be). 16049 */ 16050 tp->snd_una = high_seq; 16051 } 16052 /* Did we make a RTO error? */ 16053 if ((tp->t_flags & TF_PREVVALID) && 16054 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16055 tp->t_flags &= ~TF_PREVVALID; 16056 if (tp->t_rxtshift == 1 && 16057 (int)(ticks - tp->t_badrxtwin) < 0) 16058 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16059 } 16060 /* Handle the data in the socket buffer */ 16061 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16062 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16063 if (acked_amount > 0) { 16064 uint32_t p_cwnd; 16065 struct mbuf *mfree; 16066 16067 if (post_recovery) { 16068 /* 16069 * Grab the segsiz, multiply by 2 and add the snd_cwnd 16070 * that is the max the CC should add if we are exiting 16071 * recovery and doing a late add. 16072 */ 16073 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16074 p_cwnd <<= 1; 16075 p_cwnd += tp->snd_cwnd; 16076 } 16077 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, post_recovery); 16078 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { 16079 /* Must be non-newreno (cubic) getting too ahead of itself */ 16080 tp->snd_cwnd = p_cwnd; 16081 } 16082 SOCK_SENDBUF_LOCK(so); 16083 mfree = sbcut_locked(&so->so_snd, acked_amount); 16084 tp->snd_una = high_seq; 16085 /* Note we want to hold the sb lock through the sendmap adjust */ 16086 rack_adjust_sendmap_head(rack, &so->so_snd); 16087 /* Wake up the socket if we have room to write more */ 16088 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16089 sowwakeup_locked(so); 16090 m_freem(mfree); 16091 } 16092 /* update progress */ 16093 tp->t_acktime = ticks; 16094 rack_log_progress_event(rack, tp, tp->t_acktime, 16095 PROGRESS_UPDATE, __LINE__); 16096 /* Clear out shifts and such */ 16097 tp->t_rxtshift = 0; 16098 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16099 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16100 rack->rc_tlp_in_progress = 0; 16101 rack->r_ctl.rc_tlp_cnt_out = 0; 16102 /* Send recover and snd_nxt must be dragged along */ 16103 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16104 tp->snd_recover = tp->snd_una; 16105 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16106 tp->snd_nxt = tp->snd_max; 16107 /* 16108 * If the RXT timer is running we want to 16109 * stop it, so we can restart a TLP (or new RXT). 16110 */ 16111 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16112 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16113 tp->snd_wl2 = high_seq; 16114 tp->t_dupacks = 0; 16115 if (under_pacing && 16116 (rack->use_fixed_rate == 0) && 16117 (rack->in_probe_rtt == 0) && 16118 rack->rc_gp_dyn_mul && 16119 rack->rc_always_pace) { 16120 /* Check if we are dragging bottom */ 16121 rack_check_bottom_drag(tp, rack, so); 16122 } 16123 if (tp->snd_una == tp->snd_max) { 16124 tp->t_flags &= ~TF_PREVVALID; 16125 rack->r_ctl.retran_during_recovery = 0; 16126 rack->rc_suspicious = 0; 16127 rack->r_ctl.dsack_byte_cnt = 0; 16128 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16129 if (rack->r_ctl.rc_went_idle_time == 0) 16130 rack->r_ctl.rc_went_idle_time = 1; 16131 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16132 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16133 tp->t_acktime = 0; 16134 /* Set so we might enter persists... */ 16135 rack->r_wanted_output = 1; 16136 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16137 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16138 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16139 (sbavail(&so->so_snd) == 0) && 16140 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16141 /* 16142 * The socket was gone and the 16143 * peer sent data (not now in the past), time to 16144 * reset him. 16145 */ 16146 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16147 /* tcp_close will kill the inp pre-log the Reset */ 16148 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16149 #ifdef TCP_ACCOUNTING 16150 rdstc = get_cyclecount(); 16151 if (rdstc > ts_val) { 16152 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16153 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16154 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16155 } 16156 } 16157 #endif 16158 m_freem(m); 16159 tp = tcp_close(tp); 16160 if (tp == NULL) { 16161 #ifdef TCP_ACCOUNTING 16162 sched_unpin(); 16163 #endif 16164 return (1); 16165 } 16166 /* 16167 * We would normally do drop-with-reset which would 16168 * send back a reset. We can't since we don't have 16169 * all the needed bits. Instead lets arrange for 16170 * a call to tcp_output(). That way since we 16171 * are in the closed state we will generate a reset. 16172 * 16173 * Note if tcp_accounting is on we don't unpin since 16174 * we do that after the goto label. 16175 */ 16176 goto send_out_a_rst; 16177 } 16178 if ((sbused(&so->so_snd) == 0) && 16179 (tp->t_state >= TCPS_FIN_WAIT_1) && 16180 (tp->t_flags & TF_SENTFIN)) { 16181 /* 16182 * If we can't receive any more data, then closing user can 16183 * proceed. Starting the timer is contrary to the 16184 * specification, but if we don't get a FIN we'll hang 16185 * forever. 16186 * 16187 */ 16188 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16189 soisdisconnected(so); 16190 tcp_timer_activate(tp, TT_2MSL, 16191 (tcp_fast_finwait2_recycle ? 16192 tcp_finwait2_timeout : 16193 TP_MAXIDLE(tp))); 16194 } 16195 if (ourfinisacked == 0) { 16196 /* 16197 * We don't change to fin-wait-2 if we have our fin acked 16198 * which means we are probably in TCPS_CLOSING. 16199 */ 16200 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16201 } 16202 } 16203 } 16204 /* Wake up the socket if we have room to write more */ 16205 if (sbavail(&so->so_snd)) { 16206 rack->r_wanted_output = 1; 16207 if (ctf_progress_timeout_check(tp, true)) { 16208 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16209 tp, tick, PROGRESS_DROP, __LINE__); 16210 /* 16211 * We cheat here and don't send a RST, we should send one 16212 * when the pacer drops the connection. 16213 */ 16214 #ifdef TCP_ACCOUNTING 16215 rdstc = get_cyclecount(); 16216 if (rdstc > ts_val) { 16217 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16218 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16219 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16220 } 16221 } 16222 sched_unpin(); 16223 #endif 16224 (void)tcp_drop(tp, ETIMEDOUT); 16225 m_freem(m); 16226 return (1); 16227 } 16228 } 16229 if (ourfinisacked) { 16230 switch(tp->t_state) { 16231 case TCPS_CLOSING: 16232 #ifdef TCP_ACCOUNTING 16233 rdstc = get_cyclecount(); 16234 if (rdstc > ts_val) { 16235 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16236 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16237 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16238 } 16239 } 16240 sched_unpin(); 16241 #endif 16242 tcp_twstart(tp); 16243 m_freem(m); 16244 return (1); 16245 break; 16246 case TCPS_LAST_ACK: 16247 #ifdef TCP_ACCOUNTING 16248 rdstc = get_cyclecount(); 16249 if (rdstc > ts_val) { 16250 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16251 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16252 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16253 } 16254 } 16255 sched_unpin(); 16256 #endif 16257 tp = tcp_close(tp); 16258 ctf_do_drop(m, tp); 16259 return (1); 16260 break; 16261 case TCPS_FIN_WAIT_1: 16262 #ifdef TCP_ACCOUNTING 16263 rdstc = get_cyclecount(); 16264 if (rdstc > ts_val) { 16265 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16266 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16267 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16268 } 16269 } 16270 #endif 16271 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16272 soisdisconnected(so); 16273 tcp_timer_activate(tp, TT_2MSL, 16274 (tcp_fast_finwait2_recycle ? 16275 tcp_finwait2_timeout : 16276 TP_MAXIDLE(tp))); 16277 } 16278 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16279 break; 16280 default: 16281 break; 16282 } 16283 } 16284 if (rack->r_fast_output) { 16285 /* 16286 * We re doing fast output.. can we expand that? 16287 */ 16288 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 16289 } 16290 #ifdef TCP_ACCOUNTING 16291 rdstc = get_cyclecount(); 16292 if (rdstc > ts_val) { 16293 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16294 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16295 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16296 } 16297 } 16298 16299 } else if (win_up_req) { 16300 rdstc = get_cyclecount(); 16301 if (rdstc > ts_val) { 16302 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16303 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 16304 } 16305 } 16306 #endif 16307 } 16308 /* Now is there a next packet, if so we are done */ 16309 m_freem(m); 16310 did_out = 0; 16311 if (nxt_pkt) { 16312 #ifdef TCP_ACCOUNTING 16313 sched_unpin(); 16314 #endif 16315 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 16316 return (0); 16317 } 16318 rack_handle_might_revert(tp, rack); 16319 ctf_calc_rwin(so, tp); 16320 if ((rack->r_wanted_output != 0) || 16321 (rack->r_fast_output != 0) || 16322 (tp->t_flags & TF_ACKNOW )) { 16323 send_out_a_rst: 16324 if (tcp_output(tp) < 0) { 16325 #ifdef TCP_ACCOUNTING 16326 sched_unpin(); 16327 #endif 16328 return (1); 16329 } 16330 did_out = 1; 16331 } 16332 if (tp->t_flags2 & TF2_HPTS_CALLS) 16333 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16334 rack_free_trim(rack); 16335 #ifdef TCP_ACCOUNTING 16336 sched_unpin(); 16337 #endif 16338 rack_timer_audit(tp, rack, &so->so_snd); 16339 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 16340 return (0); 16341 } 16342 16343 #define TCP_LRO_TS_OPTION \ 16344 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 16345 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 16346 16347 static int 16348 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16349 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 16350 struct timeval *tv) 16351 { 16352 struct inpcb *inp = tptoinpcb(tp); 16353 struct socket *so = tptosocket(tp); 16354 #ifdef TCP_ACCOUNTING 16355 uint64_t ts_val; 16356 #endif 16357 int32_t thflags, retval, did_out = 0; 16358 int32_t way_out = 0; 16359 /* 16360 * cts - is the current time from tv (caller gets ts) in microseconds. 16361 * ms_cts - is the current time from tv in milliseconds. 16362 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 16363 */ 16364 uint32_t cts, us_cts, ms_cts; 16365 uint32_t tiwin; 16366 struct timespec ts; 16367 struct tcpopt to; 16368 struct tcp_rack *rack; 16369 struct rack_sendmap *rsm; 16370 int32_t prev_state = 0; 16371 int no_output = 0; 16372 int slot_remaining = 0; 16373 #ifdef TCP_ACCOUNTING 16374 int ack_val_set = 0xf; 16375 #endif 16376 int nsegs; 16377 16378 NET_EPOCH_ASSERT(); 16379 INP_WLOCK_ASSERT(inp); 16380 16381 /* 16382 * tv passed from common code is from either M_TSTMP_LRO or 16383 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 16384 */ 16385 rack = (struct tcp_rack *)tp->t_fb_ptr; 16386 if (rack->rack_deferred_inited == 0) { 16387 /* 16388 * If we are the connecting socket we will 16389 * hit rack_init() when no sequence numbers 16390 * are setup. This makes it so we must defer 16391 * some initialization. Call that now. 16392 */ 16393 rack_deferred_init(tp, rack); 16394 } 16395 /* 16396 * Check to see if we need to skip any output plans. This 16397 * can happen in the non-LRO path where we are pacing and 16398 * must process the ack coming in but need to defer sending 16399 * anything becase a pacing timer is running. 16400 */ 16401 us_cts = tcp_tv_to_usectick(tv); 16402 if (m->m_flags & M_ACKCMP) { 16403 /* 16404 * All compressed ack's are ack's by definition so 16405 * remove any ack required flag and then do the processing. 16406 */ 16407 rack->rc_ack_required = 0; 16408 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 16409 } 16410 thflags = tcp_get_flags(th); 16411 if ((rack->rc_always_pace == 1) && 16412 (rack->rc_ack_can_sendout_data == 0) && 16413 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16414 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 16415 /* 16416 * Ok conditions are right for queuing the packets 16417 * but we do have to check the flags in the inp, it 16418 * could be, if a sack is present, we want to be awoken and 16419 * so should process the packets. 16420 */ 16421 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 16422 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 16423 no_output = 1; 16424 } else { 16425 /* 16426 * If there is no options, or just a 16427 * timestamp option, we will want to queue 16428 * the packets. This is the same that LRO does 16429 * and will need to change with accurate ECN. 16430 */ 16431 uint32_t *ts_ptr; 16432 int optlen; 16433 16434 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 16435 ts_ptr = (uint32_t *)(th + 1); 16436 if ((optlen == 0) || 16437 ((optlen == TCPOLEN_TSTAMP_APPA) && 16438 (*ts_ptr == TCP_LRO_TS_OPTION))) 16439 no_output = 1; 16440 } 16441 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 16442 /* 16443 * It is unrealistic to think we can pace in less than 16444 * the minimum granularity of the pacer (def:250usec). So 16445 * if we have less than that time remaining we should go 16446 * ahead and allow output to be "early". We will attempt to 16447 * make up for it in any pacing time we try to apply on 16448 * the outbound packet. 16449 */ 16450 no_output = 0; 16451 } 16452 } 16453 /* 16454 * If there is a RST or FIN lets dump out the bw 16455 * with a FIN the connection may go on but we 16456 * may not. 16457 */ 16458 if ((thflags & TH_FIN) || (thflags & TH_RST)) 16459 rack_log_pacing_delay_calc(rack, 16460 rack->r_ctl.gp_bw, 16461 0, 16462 0, 16463 rack_get_gp_est(rack), /* delRate */ 16464 rack_get_lt_bw(rack), /* rttProp */ 16465 20, __LINE__, NULL, 0); 16466 if (m->m_flags & M_ACKCMP) { 16467 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 16468 } 16469 cts = tcp_tv_to_usectick(tv); 16470 ms_cts = tcp_tv_to_mssectick(tv); 16471 nsegs = m->m_pkthdr.lro_nsegs; 16472 counter_u64_add(rack_proc_non_comp_ack, 1); 16473 #ifdef TCP_ACCOUNTING 16474 sched_pin(); 16475 if (thflags & TH_ACK) 16476 ts_val = get_cyclecount(); 16477 #endif 16478 if ((m->m_flags & M_TSTMP) || 16479 (m->m_flags & M_TSTMP_LRO)) { 16480 mbuf_tstmp2timespec(m, &ts); 16481 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16482 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16483 } else 16484 rack->r_ctl.act_rcv_time = *tv; 16485 kern_prefetch(rack, &prev_state); 16486 prev_state = 0; 16487 /* 16488 * Unscale the window into a 32-bit value. For the SYN_SENT state 16489 * the scale is zero. 16490 */ 16491 tiwin = th->th_win << tp->snd_scale; 16492 #ifdef TCP_ACCOUNTING 16493 if (thflags & TH_ACK) { 16494 /* 16495 * We have a tradeoff here. We can either do what we are 16496 * doing i.e. pinning to this CPU and then doing the accounting 16497 * <or> we could do a critical enter, setup the rdtsc and cpu 16498 * as in below, and then validate we are on the same CPU on 16499 * exit. I have choosen to not do the critical enter since 16500 * that often will gain you a context switch, and instead lock 16501 * us (line above this if) to the same CPU with sched_pin(). This 16502 * means we may be context switched out for a higher priority 16503 * interupt but we won't be moved to another CPU. 16504 * 16505 * If this occurs (which it won't very often since we most likely 16506 * are running this code in interupt context and only a higher 16507 * priority will bump us ... clock?) we will falsely add in 16508 * to the time the interupt processing time plus the ack processing 16509 * time. This is ok since its a rare event. 16510 */ 16511 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 16512 ctf_fixed_maxseg(tp)); 16513 } 16514 #endif 16515 /* 16516 * Parse options on any incoming segment. 16517 */ 16518 memset(&to, 0, sizeof(to)); 16519 tcp_dooptions(&to, (u_char *)(th + 1), 16520 (th->th_off << 2) - sizeof(struct tcphdr), 16521 (thflags & TH_SYN) ? TO_SYN : 0); 16522 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 16523 __func__)); 16524 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 16525 __func__)); 16526 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { 16527 /* 16528 * We don't look at sack's from the 16529 * peer because the MSS is too small which 16530 * can subject us to an attack. 16531 */ 16532 to.to_flags &= ~TOF_SACK; 16533 } 16534 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16535 (tp->t_flags & TF_GPUTINPROG)) { 16536 /* 16537 * We have a goodput in progress 16538 * and we have entered a late state. 16539 * Do we have enough data in the sb 16540 * to handle the GPUT request? 16541 */ 16542 uint32_t bytes; 16543 16544 bytes = tp->gput_ack - tp->gput_seq; 16545 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16546 bytes += tp->gput_seq - tp->snd_una; 16547 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16548 /* 16549 * There are not enough bytes in the socket 16550 * buffer that have been sent to cover this 16551 * measurement. Cancel it. 16552 */ 16553 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16554 rack->r_ctl.rc_gp_srtt /*flex1*/, 16555 tp->gput_seq, 16556 0, 0, 18, __LINE__, NULL, 0); 16557 tp->t_flags &= ~TF_GPUTINPROG; 16558 } 16559 } 16560 if (tcp_bblogging_on(rack->rc_tp)) { 16561 union tcp_log_stackspecific log; 16562 struct timeval ltv; 16563 #ifdef TCP_REQUEST_TRK 16564 struct tcp_sendfile_track *tcp_req; 16565 16566 if (SEQ_GT(th->th_ack, tp->snd_una)) { 16567 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 16568 } else { 16569 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 16570 } 16571 #endif 16572 memset(&log, 0, sizeof(log)); 16573 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16574 if (rack->rack_no_prr == 0) 16575 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16576 else 16577 log.u_bbr.flex1 = 0; 16578 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16579 log.u_bbr.use_lt_bw <<= 1; 16580 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16581 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16582 log.u_bbr.bbr_state = rack->rc_free_cnt; 16583 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16584 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 16585 log.u_bbr.flex3 = m->m_flags; 16586 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16587 log.u_bbr.lost = thflags; 16588 log.u_bbr.pacing_gain = 0x1; 16589 #ifdef TCP_ACCOUNTING 16590 log.u_bbr.cwnd_gain = ack_val_set; 16591 #endif 16592 log.u_bbr.flex7 = 2; 16593 if (m->m_flags & M_TSTMP) { 16594 /* Record the hardware timestamp if present */ 16595 mbuf_tstmp2timespec(m, &ts); 16596 ltv.tv_sec = ts.tv_sec; 16597 ltv.tv_usec = ts.tv_nsec / 1000; 16598 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16599 } else if (m->m_flags & M_TSTMP_LRO) { 16600 /* Record the LRO the arrival timestamp */ 16601 mbuf_tstmp2timespec(m, &ts); 16602 ltv.tv_sec = ts.tv_sec; 16603 ltv.tv_usec = ts.tv_nsec / 1000; 16604 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16605 } 16606 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16607 /* Log the rcv time */ 16608 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 16609 #ifdef TCP_REQUEST_TRK 16610 log.u_bbr.applimited = tp->t_tcpreq_closed; 16611 log.u_bbr.applimited <<= 8; 16612 log.u_bbr.applimited |= tp->t_tcpreq_open; 16613 log.u_bbr.applimited <<= 8; 16614 log.u_bbr.applimited |= tp->t_tcpreq_req; 16615 if (tcp_req) { 16616 /* Copy out any client req info */ 16617 /* seconds */ 16618 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16619 /* useconds */ 16620 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16621 log.u_bbr.rttProp = tcp_req->timestamp; 16622 log.u_bbr.cur_del_rate = tcp_req->start; 16623 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16624 log.u_bbr.flex8 |= 1; 16625 } else { 16626 log.u_bbr.flex8 |= 2; 16627 log.u_bbr.bw_inuse = tcp_req->end; 16628 } 16629 log.u_bbr.flex6 = tcp_req->start_seq; 16630 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16631 log.u_bbr.flex8 |= 4; 16632 log.u_bbr.epoch = tcp_req->end_seq; 16633 } 16634 } 16635 #endif 16636 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 16637 tlen, &log, true, <v); 16638 } 16639 /* Remove ack required flag if set, we have one */ 16640 if (thflags & TH_ACK) 16641 rack->rc_ack_required = 0; 16642 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16643 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 16644 way_out = 4; 16645 retval = 0; 16646 m_freem(m); 16647 goto done_with_input; 16648 } 16649 /* 16650 * If a segment with the ACK-bit set arrives in the SYN-SENT state 16651 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 16652 */ 16653 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 16654 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 16655 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 16656 ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen); 16657 #ifdef TCP_ACCOUNTING 16658 sched_unpin(); 16659 #endif 16660 return (1); 16661 } 16662 /* 16663 * If timestamps were negotiated during SYN/ACK and a 16664 * segment without a timestamp is received, silently drop 16665 * the segment, unless it is a RST segment or missing timestamps are 16666 * tolerated. 16667 * See section 3.2 of RFC 7323. 16668 */ 16669 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 16670 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 16671 way_out = 5; 16672 retval = 0; 16673 m_freem(m); 16674 goto done_with_input; 16675 } 16676 /* 16677 * Segment received on connection. Reset idle time and keep-alive 16678 * timer. XXX: This should be done after segment validation to 16679 * ignore broken/spoofed segs. 16680 */ 16681 if (tp->t_idle_reduce && 16682 (tp->snd_max == tp->snd_una) && 16683 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16684 counter_u64_add(rack_input_idle_reduces, 1); 16685 rack_cc_after_idle(rack, tp); 16686 } 16687 tp->t_rcvtime = ticks; 16688 #ifdef STATS 16689 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 16690 #endif 16691 if (tiwin > rack->r_ctl.rc_high_rwnd) 16692 rack->r_ctl.rc_high_rwnd = tiwin; 16693 /* 16694 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 16695 * this to occur after we've validated the segment. 16696 */ 16697 if (tcp_ecn_input_segment(tp, thflags, tlen, 16698 tcp_packets_this_ack(tp, th->th_ack), 16699 iptos)) 16700 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 16701 16702 /* 16703 * If echoed timestamp is later than the current time, fall back to 16704 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16705 * were used when this connection was established. 16706 */ 16707 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 16708 to.to_tsecr -= tp->ts_offset; 16709 if (TSTMP_GT(to.to_tsecr, ms_cts)) 16710 to.to_tsecr = 0; 16711 } 16712 if ((rack->r_rcvpath_rtt_up == 1) && 16713 (to.to_flags & TOF_TS) && 16714 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { 16715 uint32_t rtt = 0; 16716 16717 /* 16718 * We are receiving only and thus not sending 16719 * data to do an RTT. We set a flag when we first 16720 * sent this TS to the peer. We now have it back 16721 * and have an RTT to share. We log it as a conf 16722 * 4, we are not so sure about it.. since we 16723 * may have lost an ack. 16724 */ 16725 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) 16726 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); 16727 rack->r_rcvpath_rtt_up = 0; 16728 /* Submit and commit the timer */ 16729 if (rtt > 0) { 16730 tcp_rack_xmit_timer(rack, rtt, 0, rtt, 4, NULL, 1); 16731 tcp_rack_xmit_timer_commit(rack, tp); 16732 } 16733 } 16734 /* 16735 * If its the first time in we need to take care of options and 16736 * verify we can do SACK for rack! 16737 */ 16738 if (rack->r_state == 0) { 16739 /* Should be init'd by rack_init() */ 16740 KASSERT(rack->rc_inp != NULL, 16741 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 16742 if (rack->rc_inp == NULL) { 16743 rack->rc_inp = inp; 16744 } 16745 16746 /* 16747 * Process options only when we get SYN/ACK back. The SYN 16748 * case for incoming connections is handled in tcp_syncache. 16749 * According to RFC1323 the window field in a SYN (i.e., a 16750 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 16751 * this is traditional behavior, may need to be cleaned up. 16752 */ 16753 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 16754 /* Handle parallel SYN for ECN */ 16755 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 16756 if ((to.to_flags & TOF_SCALE) && 16757 (tp->t_flags & TF_REQ_SCALE)) { 16758 tp->t_flags |= TF_RCVD_SCALE; 16759 tp->snd_scale = to.to_wscale; 16760 } else 16761 tp->t_flags &= ~TF_REQ_SCALE; 16762 /* 16763 * Initial send window. It will be updated with the 16764 * next incoming segment to the scaled value. 16765 */ 16766 tp->snd_wnd = th->th_win; 16767 rack_validate_fo_sendwin_up(tp, rack); 16768 if ((to.to_flags & TOF_TS) && 16769 (tp->t_flags & TF_REQ_TSTMP)) { 16770 tp->t_flags |= TF_RCVD_TSTMP; 16771 tp->ts_recent = to.to_tsval; 16772 tp->ts_recent_age = cts; 16773 } else 16774 tp->t_flags &= ~TF_REQ_TSTMP; 16775 if (to.to_flags & TOF_MSS) { 16776 tcp_mss(tp, to.to_mss); 16777 } 16778 if ((tp->t_flags & TF_SACK_PERMIT) && 16779 (to.to_flags & TOF_SACKPERM) == 0) 16780 tp->t_flags &= ~TF_SACK_PERMIT; 16781 if (tp->t_flags & TF_FASTOPEN) { 16782 if (to.to_flags & TOF_FASTOPEN) { 16783 uint16_t mss; 16784 16785 if (to.to_flags & TOF_MSS) 16786 mss = to.to_mss; 16787 else 16788 if ((inp->inp_vflag & INP_IPV6) != 0) 16789 mss = TCP6_MSS; 16790 else 16791 mss = TCP_MSS; 16792 tcp_fastopen_update_cache(tp, mss, 16793 to.to_tfo_len, to.to_tfo_cookie); 16794 } else 16795 tcp_fastopen_disable_path(tp); 16796 } 16797 } 16798 /* 16799 * At this point we are at the initial call. Here we decide 16800 * if we are doing RACK or not. We do this by seeing if 16801 * TF_SACK_PERMIT is set and the sack-not-required is clear. 16802 * The code now does do dup-ack counting so if you don't 16803 * switch back you won't get rack & TLP, but you will still 16804 * get this stack. 16805 */ 16806 16807 if ((rack_sack_not_required == 0) && 16808 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 16809 tcp_switch_back_to_default(tp); 16810 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 16811 tlen, iptos); 16812 #ifdef TCP_ACCOUNTING 16813 sched_unpin(); 16814 #endif 16815 return (1); 16816 } 16817 tcp_set_hpts(tp); 16818 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 16819 } 16820 if (thflags & TH_FIN) 16821 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 16822 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16823 if ((rack->rc_gp_dyn_mul) && 16824 (rack->use_fixed_rate == 0) && 16825 (rack->rc_always_pace)) { 16826 /* Check in on probertt */ 16827 rack_check_probe_rtt(rack, cts); 16828 } 16829 rack_clear_rate_sample(rack); 16830 if ((rack->forced_ack) && 16831 ((tcp_get_flags(th) & TH_RST) == 0)) { 16832 rack_handle_probe_response(rack, tiwin, us_cts); 16833 } 16834 /* 16835 * This is the one exception case where we set the rack state 16836 * always. All other times (timers etc) we must have a rack-state 16837 * set (so we assure we have done the checks above for SACK). 16838 */ 16839 rack->r_ctl.rc_rcvtime = cts; 16840 if (rack->r_state != tp->t_state) 16841 rack_set_state(tp, rack); 16842 if (SEQ_GT(th->th_ack, tp->snd_una) && 16843 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 16844 kern_prefetch(rsm, &prev_state); 16845 prev_state = rack->r_state; 16846 if ((thflags & TH_RST) && 16847 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 16848 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 16849 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 16850 /* The connection will be killed by a reset check the tracepoint */ 16851 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 16852 } 16853 retval = (*rack->r_substate) (m, th, so, 16854 tp, &to, drop_hdrlen, 16855 tlen, tiwin, thflags, nxt_pkt, iptos); 16856 if (retval == 0) { 16857 /* 16858 * If retval is 1 the tcb is unlocked and most likely the tp 16859 * is gone. 16860 */ 16861 INP_WLOCK_ASSERT(inp); 16862 if ((rack->rc_gp_dyn_mul) && 16863 (rack->rc_always_pace) && 16864 (rack->use_fixed_rate == 0) && 16865 rack->in_probe_rtt && 16866 (rack->r_ctl.rc_time_probertt_starts == 0)) { 16867 /* 16868 * If we are going for target, lets recheck before 16869 * we output. 16870 */ 16871 rack_check_probe_rtt(rack, cts); 16872 } 16873 if (rack->set_pacing_done_a_iw == 0) { 16874 /* How much has been acked? */ 16875 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 16876 /* We have enough to set in the pacing segment size */ 16877 rack->set_pacing_done_a_iw = 1; 16878 rack_set_pace_segments(tp, rack, __LINE__, NULL); 16879 } 16880 } 16881 tcp_rack_xmit_timer_commit(rack, tp); 16882 #ifdef TCP_ACCOUNTING 16883 /* 16884 * If we set the ack_val_se to what ack processing we are doing 16885 * we also want to track how many cycles we burned. Note 16886 * the bits after tcp_output we let be "free". This is because 16887 * we are also tracking the tcp_output times as well. Note the 16888 * use of 0xf here since we only have 11 counter (0 - 0xa) and 16889 * 0xf cannot be returned and is what we initialize it too to 16890 * indicate we are not doing the tabulations. 16891 */ 16892 if (ack_val_set != 0xf) { 16893 uint64_t crtsc; 16894 16895 crtsc = get_cyclecount(); 16896 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16897 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 16898 } 16899 } 16900 #endif 16901 if ((nxt_pkt == 0) && (no_output == 0)) { 16902 if ((rack->r_wanted_output != 0) || 16903 (tp->t_flags & TF_ACKNOW) || 16904 (rack->r_fast_output != 0)) { 16905 16906 do_output_now: 16907 if (tcp_output(tp) < 0) { 16908 #ifdef TCP_ACCOUNTING 16909 sched_unpin(); 16910 #endif 16911 return (1); 16912 } 16913 did_out = 1; 16914 } 16915 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16916 rack_free_trim(rack); 16917 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { 16918 goto do_output_now; 16919 } else if ((no_output == 1) && 16920 (nxt_pkt == 0) && 16921 (tcp_in_hpts(rack->rc_tp) == 0)) { 16922 /* 16923 * We are not in hpts and we had a pacing timer up. Use 16924 * the remaining time (slot_remaining) to restart the timer. 16925 */ 16926 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 16927 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 16928 rack_free_trim(rack); 16929 } 16930 /* Clear the flag, it may have been cleared by output but we may not have */ 16931 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 16932 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16933 /* 16934 * The draft (v3) calls for us to use SEQ_GEQ, but that 16935 * causes issues when we are just going app limited. Lets 16936 * instead use SEQ_GT <or> where its equal but more data 16937 * is outstanding. 16938 * 16939 * Also make sure we are on the last ack of a series. We 16940 * have to have all the ack's processed in queue to know 16941 * if there is something left outstanding. 16942 */ 16943 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 16944 (rack->rc_new_rnd_needed == 0) && 16945 (nxt_pkt == 0)) { 16946 /* 16947 * We have crossed into a new round with 16948 * the new snd_unae. 16949 */ 16950 rack_new_round_setup(tp, rack, tp->snd_una); 16951 } 16952 if ((nxt_pkt == 0) && 16953 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 16954 (SEQ_GT(tp->snd_max, tp->snd_una) || 16955 (tp->t_flags & TF_DELACK) || 16956 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 16957 (tp->t_state <= TCPS_CLOSING)))) { 16958 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 16959 if ((tp->snd_max == tp->snd_una) && 16960 ((tp->t_flags & TF_DELACK) == 0) && 16961 (tcp_in_hpts(rack->rc_tp)) && 16962 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 16963 /* keep alive not needed if we are hptsi output yet */ 16964 ; 16965 } else { 16966 int late = 0; 16967 if (tcp_in_hpts(tp)) { 16968 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 16969 us_cts = tcp_get_usecs(NULL); 16970 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 16971 rack->r_early = 1; 16972 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 16973 } else 16974 late = 1; 16975 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16976 } 16977 tcp_hpts_remove(tp); 16978 } 16979 if (late && (did_out == 0)) { 16980 /* 16981 * We are late in the sending 16982 * and we did not call the output 16983 * (this probably should not happen). 16984 */ 16985 goto do_output_now; 16986 } 16987 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 16988 } 16989 way_out = 1; 16990 } else if (nxt_pkt == 0) { 16991 /* Do we have the correct timer running? */ 16992 rack_timer_audit(tp, rack, &so->so_snd); 16993 way_out = 2; 16994 } 16995 done_with_input: 16996 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 16997 if (did_out) 16998 rack->r_wanted_output = 0; 16999 } 17000 17001 #ifdef TCP_ACCOUNTING 17002 sched_unpin(); 17003 #endif 17004 return (retval); 17005 } 17006 17007 static void 17008 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17009 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17010 { 17011 struct timeval tv; 17012 17013 /* First lets see if we have old packets */ 17014 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17015 if (ctf_do_queued_segments(tp, 1)) { 17016 m_freem(m); 17017 return; 17018 } 17019 } 17020 if (m->m_flags & M_TSTMP_LRO) { 17021 mbuf_tstmp2timeval(m, &tv); 17022 } else { 17023 /* Should not be should we kassert instead? */ 17024 tcp_get_usecs(&tv); 17025 } 17026 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17027 &tv) == 0) { 17028 INP_WUNLOCK(tptoinpcb(tp)); 17029 } 17030 } 17031 17032 struct rack_sendmap * 17033 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17034 { 17035 struct rack_sendmap *rsm = NULL; 17036 int32_t idx; 17037 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17038 17039 /* Return the next guy to be re-transmitted */ 17040 if (tqhash_empty(rack->r_ctl.tqh)) { 17041 return (NULL); 17042 } 17043 if (tp->t_flags & TF_SENTFIN) { 17044 /* retran the end FIN? */ 17045 return (NULL); 17046 } 17047 /* ok lets look at this one */ 17048 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17049 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17050 return (rsm); 17051 } 17052 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17053 goto check_it; 17054 } 17055 rsm = rack_find_lowest_rsm(rack); 17056 if (rsm == NULL) { 17057 return (NULL); 17058 } 17059 check_it: 17060 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 17061 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17062 /* 17063 * No sack so we automatically do the 3 strikes and 17064 * retransmit (no rack timer would be started). 17065 */ 17066 return (rsm); 17067 } 17068 if (rsm->r_flags & RACK_ACKED) { 17069 return (NULL); 17070 } 17071 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17072 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17073 /* Its not yet ready */ 17074 return (NULL); 17075 } 17076 srtt = rack_grab_rtt(tp, rack); 17077 idx = rsm->r_rtr_cnt - 1; 17078 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17079 thresh = rack_calc_thresh_rack(rack, srtt, tsused, __LINE__, 1); 17080 if ((tsused == ts_low) || 17081 (TSTMP_LT(tsused, ts_low))) { 17082 /* No time since sending */ 17083 return (NULL); 17084 } 17085 if ((tsused - ts_low) < thresh) { 17086 /* It has not been long enough yet */ 17087 return (NULL); 17088 } 17089 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17090 ((rsm->r_flags & RACK_SACK_PASSED))) { 17091 /* 17092 * We have passed the dup-ack threshold <or> 17093 * a SACK has indicated this is missing. 17094 * Note that if you are a declared attacker 17095 * it is only the dup-ack threshold that 17096 * will cause retransmits. 17097 */ 17098 /* log retransmit reason */ 17099 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17100 rack->r_fast_output = 0; 17101 return (rsm); 17102 } 17103 return (NULL); 17104 } 17105 17106 static void 17107 rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot, 17108 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17109 int line, struct rack_sendmap *rsm, uint8_t quality) 17110 { 17111 if (tcp_bblogging_on(rack->rc_tp)) { 17112 union tcp_log_stackspecific log; 17113 struct timeval tv; 17114 17115 if (rack_verbose_logging == 0) { 17116 /* 17117 * We are not verbose screen out all but 17118 * ones we always want. 17119 */ 17120 if ((method != 2) && 17121 (method != 3) && 17122 (method != 7) && 17123 (method != 89) && 17124 (method != 14) && 17125 (method != 20)) { 17126 return; 17127 } 17128 } 17129 memset(&log, 0, sizeof(log)); 17130 log.u_bbr.flex1 = slot; 17131 log.u_bbr.flex2 = len; 17132 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17133 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17134 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17135 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17136 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17137 log.u_bbr.use_lt_bw <<= 1; 17138 log.u_bbr.use_lt_bw |= rack->r_late; 17139 log.u_bbr.use_lt_bw <<= 1; 17140 log.u_bbr.use_lt_bw |= rack->r_early; 17141 log.u_bbr.use_lt_bw <<= 1; 17142 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17143 log.u_bbr.use_lt_bw <<= 1; 17144 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17145 log.u_bbr.use_lt_bw <<= 1; 17146 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17147 log.u_bbr.use_lt_bw <<= 1; 17148 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17149 log.u_bbr.use_lt_bw <<= 1; 17150 log.u_bbr.use_lt_bw |= rack->gp_ready; 17151 log.u_bbr.pkt_epoch = line; 17152 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17153 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17154 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17155 log.u_bbr.bw_inuse = bw_est; 17156 log.u_bbr.delRate = bw; 17157 if (rack->r_ctl.gp_bw == 0) 17158 log.u_bbr.cur_del_rate = 0; 17159 else 17160 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17161 log.u_bbr.rttProp = len_time; 17162 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17163 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17164 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17165 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17166 /* We are in slow start */ 17167 log.u_bbr.flex7 = 1; 17168 } else { 17169 /* we are on congestion avoidance */ 17170 log.u_bbr.flex7 = 0; 17171 } 17172 log.u_bbr.flex8 = method; 17173 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17174 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17175 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17176 log.u_bbr.cwnd_gain <<= 1; 17177 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17178 log.u_bbr.cwnd_gain <<= 1; 17179 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17180 log.u_bbr.cwnd_gain <<= 1; 17181 log.u_bbr.cwnd_gain |= rack->use_fixed_rate; 17182 log.u_bbr.cwnd_gain <<= 1; 17183 log.u_bbr.cwnd_gain |= rack->rc_always_pace; 17184 log.u_bbr.cwnd_gain <<= 1; 17185 log.u_bbr.cwnd_gain |= rack->gp_ready; 17186 log.u_bbr.bbr_substate = quality; 17187 log.u_bbr.bbr_state = rack->dgp_on; 17188 log.u_bbr.bbr_state <<= 1; 17189 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17190 log.u_bbr.bbr_state <<= 2; 17191 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17192 &rack->rc_inp->inp_socket->so_rcv, 17193 &rack->rc_inp->inp_socket->so_snd, 17194 BBR_LOG_HPTSI_CALC, 0, 17195 0, &log, false, &tv); 17196 } 17197 } 17198 17199 static uint32_t 17200 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17201 { 17202 uint32_t new_tso, user_max, pace_one; 17203 17204 user_max = rack->rc_user_set_max_segs * mss; 17205 if (rack->rc_force_max_seg) { 17206 return (user_max); 17207 } 17208 if (rack->use_fixed_rate && 17209 ((rack->r_ctl.crte == NULL) || 17210 (bw != rack->r_ctl.crte->rate))) { 17211 /* Use the user mss since we are not exactly matched */ 17212 return (user_max); 17213 } 17214 if (rack_pace_one_seg || 17215 (rack->r_ctl.rc_user_set_min_segs == 1)) 17216 pace_one = 1; 17217 else 17218 pace_one = 0; 17219 17220 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17221 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17222 if (new_tso > user_max) 17223 new_tso = user_max; 17224 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17225 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17226 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17227 } 17228 if (rack->r_ctl.rc_user_set_min_segs && 17229 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17230 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17231 return (new_tso); 17232 } 17233 17234 static uint64_t 17235 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17236 { 17237 uint64_t reduced_win; 17238 uint32_t gain; 17239 17240 if (window_input < rc_init_window(rack)) { 17241 /* 17242 * The cwnd is collapsed to 17243 * nearly zero, maybe because of a time-out? 17244 * Lets drop back to the lt-bw. 17245 */ 17246 reduced_win = rack_get_lt_bw(rack); 17247 /* Set the flag so the caller knows its a rate and not a reduced window */ 17248 *rate_set = 1; 17249 gain = 100; 17250 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 17251 /* 17252 * If we are in recover our cwnd needs to be less for 17253 * our pacing consideration. 17254 */ 17255 if (rack->rack_hibeta == 0) { 17256 reduced_win = window_input / 2; 17257 gain = 50; 17258 } else { 17259 reduced_win = window_input * rack->r_ctl.saved_hibeta; 17260 reduced_win /= 100; 17261 gain = rack->r_ctl.saved_hibeta; 17262 } 17263 } else { 17264 /* 17265 * Apply Timely factor to increase/decrease the 17266 * amount we are pacing at. 17267 */ 17268 gain = rack_get_output_gain(rack, NULL); 17269 if (gain > rack_gain_p5_ub) { 17270 gain = rack_gain_p5_ub; 17271 } 17272 reduced_win = window_input * gain; 17273 reduced_win /= 100; 17274 } 17275 if (gain_b != NULL) 17276 *gain_b = gain; 17277 /* 17278 * What is being returned here is a trimmed down 17279 * window values in all cases where rate_set is left 17280 * at 0. In one case we actually return the rate (lt_bw). 17281 * the "reduced_win" is returned as a slimmed down cwnd that 17282 * is then calculated by the caller into a rate when rate_set 17283 * is 0. 17284 */ 17285 return (reduced_win); 17286 } 17287 17288 static int32_t 17289 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 17290 { 17291 uint64_t lentim, fill_bw; 17292 17293 rack->r_via_fill_cw = 0; 17294 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 17295 return (slot); 17296 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 17297 return (slot); 17298 if (rack->r_ctl.rc_last_us_rtt == 0) 17299 return (slot); 17300 if (rack->rc_pace_fill_if_rttin_range && 17301 (rack->r_ctl.rc_last_us_rtt >= 17302 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 17303 /* The rtt is huge, N * smallest, lets not fill */ 17304 return (slot); 17305 } 17306 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) 17307 return (slot); 17308 /* 17309 * first lets calculate the b/w based on the last us-rtt 17310 * and the the smallest send window. 17311 */ 17312 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17313 if (rack->rc_fillcw_apply_discount) { 17314 uint32_t rate_set = 0; 17315 17316 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 17317 if (rate_set) { 17318 goto at_lt_bw; 17319 } 17320 } 17321 /* Take the rwnd if its smaller */ 17322 if (fill_bw > rack->rc_tp->snd_wnd) 17323 fill_bw = rack->rc_tp->snd_wnd; 17324 /* Now lets make it into a b/w */ 17325 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 17326 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17327 /* Adjust to any cap */ 17328 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) 17329 fill_bw = rack->r_ctl.fillcw_cap; 17330 17331 at_lt_bw: 17332 if (rack_bw_multipler > 0) { 17333 /* 17334 * We want to limit fill-cw to the some multiplier 17335 * of the max(lt_bw, gp_est). The normal default 17336 * is 0 for off, so a sysctl has enabled it. 17337 */ 17338 uint64_t lt_bw, gp, rate; 17339 17340 gp = rack_get_gp_est(rack); 17341 lt_bw = rack_get_lt_bw(rack); 17342 if (lt_bw > gp) 17343 rate = lt_bw; 17344 else 17345 rate = gp; 17346 rate *= rack_bw_multipler; 17347 rate /= 100; 17348 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 17349 union tcp_log_stackspecific log; 17350 struct timeval tv; 17351 17352 memset(&log, 0, sizeof(log)); 17353 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17354 log.u_bbr.flex1 = rack_bw_multipler; 17355 log.u_bbr.flex2 = len; 17356 log.u_bbr.cur_del_rate = gp; 17357 log.u_bbr.delRate = lt_bw; 17358 log.u_bbr.bw_inuse = rate; 17359 log.u_bbr.rttProp = fill_bw; 17360 log.u_bbr.flex8 = 44; 17361 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, 17362 BBR_LOG_CWND, 0, 17363 0, &log, false, NULL, 17364 __func__, __LINE__, &tv); 17365 } 17366 if (fill_bw > rate) 17367 fill_bw = rate; 17368 } 17369 /* We are below the min b/w */ 17370 if (non_paced) 17371 *rate_wanted = fill_bw; 17372 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 17373 return (slot); 17374 rack->r_via_fill_cw = 1; 17375 if (rack->r_rack_hw_rate_caps && 17376 (rack->r_ctl.crte != NULL)) { 17377 uint64_t high_rate; 17378 17379 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 17380 if (fill_bw > high_rate) { 17381 /* We are capping bw at the highest rate table entry */ 17382 if (*rate_wanted > high_rate) { 17383 /* The original rate was also capped */ 17384 rack->r_via_fill_cw = 0; 17385 } 17386 rack_log_hdwr_pacing(rack, 17387 fill_bw, high_rate, __LINE__, 17388 0, 3); 17389 fill_bw = high_rate; 17390 if (capped) 17391 *capped = 1; 17392 } 17393 } else if ((rack->r_ctl.crte == NULL) && 17394 (rack->rack_hdrw_pacing == 0) && 17395 (rack->rack_hdw_pace_ena) && 17396 rack->r_rack_hw_rate_caps && 17397 (rack->rack_attempt_hdwr_pace == 0) && 17398 (rack->rc_inp->inp_route.ro_nh != NULL) && 17399 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17400 /* 17401 * Ok we may have a first attempt that is greater than our top rate 17402 * lets check. 17403 */ 17404 uint64_t high_rate; 17405 17406 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 17407 if (high_rate) { 17408 if (fill_bw > high_rate) { 17409 fill_bw = high_rate; 17410 if (capped) 17411 *capped = 1; 17412 } 17413 } 17414 } 17415 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 17416 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 17417 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 17418 fill_bw = rack->r_ctl.bw_rate_cap; 17419 } 17420 /* 17421 * Ok fill_bw holds our mythical b/w to fill the cwnd 17422 * in an rtt (unless it was capped), what does that 17423 * time wise equate too? 17424 */ 17425 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 17426 lentim /= fill_bw; 17427 *rate_wanted = fill_bw; 17428 if (non_paced || (lentim < slot)) { 17429 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 17430 0, lentim, 12, __LINE__, NULL, 0); 17431 return ((int32_t)lentim); 17432 } else 17433 return (slot); 17434 } 17435 17436 static int32_t 17437 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line) 17438 { 17439 uint64_t srtt; 17440 int32_t slot = 0; 17441 int can_start_hw_pacing = 1; 17442 int err; 17443 int pace_one; 17444 17445 if (rack_pace_one_seg || 17446 (rack->r_ctl.rc_user_set_min_segs == 1)) 17447 pace_one = 1; 17448 else 17449 pace_one = 0; 17450 if (rack->rc_always_pace == 0) { 17451 /* 17452 * We use the most optimistic possible cwnd/srtt for 17453 * sending calculations. This will make our 17454 * calculation anticipate getting more through 17455 * quicker then possible. But thats ok we don't want 17456 * the peer to have a gap in data sending. 17457 */ 17458 uint64_t cwnd, tr_perms = 0; 17459 int32_t reduce; 17460 17461 old_method: 17462 /* 17463 * We keep no precise pacing with the old method 17464 * instead we use the pacer to mitigate bursts. 17465 */ 17466 if (rack->r_ctl.rc_rack_min_rtt) 17467 srtt = rack->r_ctl.rc_rack_min_rtt; 17468 else 17469 srtt = max(tp->t_srtt, 1); 17470 if (rack->r_ctl.rc_rack_largest_cwnd) 17471 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 17472 else 17473 cwnd = rack->r_ctl.cwnd_to_use; 17474 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 17475 tr_perms = (cwnd * 1000) / srtt; 17476 if (tr_perms == 0) { 17477 tr_perms = ctf_fixed_maxseg(tp); 17478 } 17479 /* 17480 * Calculate how long this will take to drain, if 17481 * the calculation comes out to zero, thats ok we 17482 * will use send_a_lot to possibly spin around for 17483 * more increasing tot_len_this_send to the point 17484 * that its going to require a pace, or we hit the 17485 * cwnd. Which in that case we are just waiting for 17486 * a ACK. 17487 */ 17488 slot = len / tr_perms; 17489 /* Now do we reduce the time so we don't run dry? */ 17490 if (slot && rack_slot_reduction) { 17491 reduce = (slot / rack_slot_reduction); 17492 if (reduce < slot) { 17493 slot -= reduce; 17494 } else 17495 slot = 0; 17496 } else 17497 reduce = 0; 17498 slot *= HPTS_USEC_IN_MSEC; 17499 if (rack->rc_pace_to_cwnd) { 17500 uint64_t rate_wanted = 0; 17501 17502 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 17503 rack->rc_ack_can_sendout_data = 1; 17504 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 17505 } else 17506 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 17507 /*******************************************************/ 17508 /* RRS: We insert non-paced call to stats here for len */ 17509 /*******************************************************/ 17510 } else { 17511 uint64_t bw_est, res, lentim, rate_wanted; 17512 uint32_t segs, oh; 17513 int capped = 0; 17514 int prev_fill; 17515 17516 if ((rack->r_rr_config == 1) && rsm) { 17517 return (rack->r_ctl.rc_min_to); 17518 } 17519 if (rack->use_fixed_rate) { 17520 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 17521 } else if ((rack->r_ctl.init_rate == 0) && 17522 (rack->r_ctl.gp_bw == 0)) { 17523 /* no way to yet do an estimate */ 17524 bw_est = rate_wanted = 0; 17525 } else if (rack->dgp_on) { 17526 bw_est = rack_get_bw(rack); 17527 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 17528 } else { 17529 uint32_t gain, rate_set = 0; 17530 17531 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17532 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 17533 if (rate_set == 0) { 17534 if (rate_wanted > rack->rc_tp->snd_wnd) 17535 rate_wanted = rack->rc_tp->snd_wnd; 17536 /* Now lets make it into a b/w */ 17537 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 17538 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17539 } 17540 bw_est = rate_wanted; 17541 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 17542 rack->r_ctl.cwnd_to_use, 17543 rate_wanted, bw_est, 17544 rack->r_ctl.rc_last_us_rtt, 17545 88, __LINE__, NULL, gain); 17546 } 17547 if (((bw_est == 0) || (rate_wanted == 0) || (rack->gp_ready == 0)) && 17548 (rack->use_fixed_rate == 0)) { 17549 /* 17550 * No way yet to make a b/w estimate or 17551 * our raise is set incorrectly. 17552 */ 17553 goto old_method; 17554 } 17555 rack_rate_cap_bw(rack, &rate_wanted, &capped); 17556 /* We need to account for all the overheads */ 17557 segs = (len + segsiz - 1) / segsiz; 17558 /* 17559 * We need the diff between 1514 bytes (e-mtu with e-hdr) 17560 * and how much data we put in each packet. Yes this 17561 * means we may be off if we are larger than 1500 bytes 17562 * or smaller. But this just makes us more conservative. 17563 */ 17564 17565 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 17566 if (rack->r_is_v6) { 17567 #ifdef INET6 17568 oh += sizeof(struct ip6_hdr); 17569 #endif 17570 } else { 17571 #ifdef INET 17572 oh += sizeof(struct ip); 17573 #endif 17574 } 17575 /* We add a fixed 14 for the ethernet header */ 17576 oh += 14; 17577 segs *= oh; 17578 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 17579 res = lentim / rate_wanted; 17580 slot = (uint32_t)res; 17581 if (rack_hw_rate_min && 17582 (rate_wanted < rack_hw_rate_min)) { 17583 can_start_hw_pacing = 0; 17584 if (rack->r_ctl.crte) { 17585 /* 17586 * Ok we need to release it, we 17587 * have fallen too low. 17588 */ 17589 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17590 rack->r_ctl.crte = NULL; 17591 rack->rack_attempt_hdwr_pace = 0; 17592 rack->rack_hdrw_pacing = 0; 17593 } 17594 } 17595 if (rack->r_ctl.crte && 17596 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17597 /* 17598 * We want more than the hardware can give us, 17599 * don't start any hw pacing. 17600 */ 17601 can_start_hw_pacing = 0; 17602 if (rack->r_rack_hw_rate_caps == 0) { 17603 /* 17604 * Ok we need to release it, we 17605 * want more than the card can give us and 17606 * no rate cap is in place. Set it up so 17607 * when we want less we can retry. 17608 */ 17609 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17610 rack->r_ctl.crte = NULL; 17611 rack->rack_attempt_hdwr_pace = 0; 17612 rack->rack_hdrw_pacing = 0; 17613 } 17614 } 17615 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 17616 /* 17617 * We lost our rate somehow, this can happen 17618 * if the interface changed underneath us. 17619 */ 17620 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17621 rack->r_ctl.crte = NULL; 17622 /* Lets re-allow attempting to setup pacing */ 17623 rack->rack_hdrw_pacing = 0; 17624 rack->rack_attempt_hdwr_pace = 0; 17625 rack_log_hdwr_pacing(rack, 17626 rate_wanted, bw_est, __LINE__, 17627 0, 6); 17628 } 17629 prev_fill = rack->r_via_fill_cw; 17630 if ((rack->rc_pace_to_cwnd) && 17631 (capped == 0) && 17632 (rack->dgp_on == 1) && 17633 (rack->use_fixed_rate == 0) && 17634 (rack->in_probe_rtt == 0) && 17635 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 17636 /* 17637 * We want to pace at our rate *or* faster to 17638 * fill the cwnd to the max if its not full. 17639 */ 17640 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 17641 /* Re-check to make sure we are not exceeding our max b/w */ 17642 if ((rack->r_ctl.crte != NULL) && 17643 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17644 /* 17645 * We want more than the hardware can give us, 17646 * don't start any hw pacing. 17647 */ 17648 can_start_hw_pacing = 0; 17649 if (rack->r_rack_hw_rate_caps == 0) { 17650 /* 17651 * Ok we need to release it, we 17652 * want more than the card can give us and 17653 * no rate cap is in place. Set it up so 17654 * when we want less we can retry. 17655 */ 17656 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17657 rack->r_ctl.crte = NULL; 17658 rack->rack_attempt_hdwr_pace = 0; 17659 rack->rack_hdrw_pacing = 0; 17660 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 17661 } 17662 } 17663 } 17664 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 17665 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17666 if ((rack->rack_hdw_pace_ena) && 17667 (can_start_hw_pacing > 0) && 17668 (rack->rack_hdrw_pacing == 0) && 17669 (rack->rack_attempt_hdwr_pace == 0)) { 17670 /* 17671 * Lets attempt to turn on hardware pacing 17672 * if we can. 17673 */ 17674 rack->rack_attempt_hdwr_pace = 1; 17675 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 17676 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17677 rate_wanted, 17678 RS_PACING_GEQ, 17679 &err, &rack->r_ctl.crte_prev_rate); 17680 if (rack->r_ctl.crte) { 17681 rack->rack_hdrw_pacing = 1; 17682 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 17683 pace_one, rack->r_ctl.crte, 17684 NULL, rack->r_ctl.pace_len_divisor); 17685 rack_log_hdwr_pacing(rack, 17686 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17687 err, 0); 17688 rack->r_ctl.last_hw_bw_req = rate_wanted; 17689 } else { 17690 counter_u64_add(rack_hw_pace_init_fail, 1); 17691 } 17692 } else if (rack->rack_hdrw_pacing && 17693 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 17694 /* Do we need to adjust our rate? */ 17695 const struct tcp_hwrate_limit_table *nrte; 17696 17697 if (rack->r_up_only && 17698 (rate_wanted < rack->r_ctl.crte->rate)) { 17699 /** 17700 * We have four possible states here 17701 * having to do with the previous time 17702 * and this time. 17703 * previous | this-time 17704 * A) 0 | 0 -- fill_cw not in the picture 17705 * B) 1 | 0 -- we were doing a fill-cw but now are not 17706 * C) 1 | 1 -- all rates from fill_cw 17707 * D) 0 | 1 -- we were doing non-fill and now we are filling 17708 * 17709 * For case A, C and D we don't allow a drop. But for 17710 * case B where we now our on our steady rate we do 17711 * allow a drop. 17712 * 17713 */ 17714 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 17715 goto done_w_hdwr; 17716 } 17717 if ((rate_wanted > rack->r_ctl.crte->rate) || 17718 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 17719 if (rack_hw_rate_to_low && 17720 (bw_est < rack_hw_rate_to_low)) { 17721 /* 17722 * The pacing rate is too low for hardware, but 17723 * do allow hardware pacing to be restarted. 17724 */ 17725 rack_log_hdwr_pacing(rack, 17726 bw_est, rack->r_ctl.crte->rate, __LINE__, 17727 0, 5); 17728 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17729 rack->r_ctl.crte = NULL; 17730 rack->rack_attempt_hdwr_pace = 0; 17731 rack->rack_hdrw_pacing = 0; 17732 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17733 goto done_w_hdwr; 17734 } 17735 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 17736 rack->rc_tp, 17737 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17738 rate_wanted, 17739 RS_PACING_GEQ, 17740 &err, &rack->r_ctl.crte_prev_rate); 17741 if (nrte == NULL) { 17742 /* 17743 * Lost the rate, lets drop hardware pacing 17744 * period. 17745 */ 17746 rack->rack_hdrw_pacing = 0; 17747 rack->r_ctl.crte = NULL; 17748 rack_log_hdwr_pacing(rack, 17749 rate_wanted, 0, __LINE__, 17750 err, 1); 17751 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17752 counter_u64_add(rack_hw_pace_lost, 1); 17753 } else if (nrte != rack->r_ctl.crte) { 17754 rack->r_ctl.crte = nrte; 17755 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 17756 segsiz, pace_one, rack->r_ctl.crte, 17757 NULL, rack->r_ctl.pace_len_divisor); 17758 rack_log_hdwr_pacing(rack, 17759 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17760 err, 2); 17761 rack->r_ctl.last_hw_bw_req = rate_wanted; 17762 } 17763 } else { 17764 /* We just need to adjust the segment size */ 17765 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17766 rack_log_hdwr_pacing(rack, 17767 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17768 0, 4); 17769 rack->r_ctl.last_hw_bw_req = rate_wanted; 17770 } 17771 } 17772 } 17773 done_w_hdwr: 17774 if (rack_limit_time_with_srtt && 17775 (rack->use_fixed_rate == 0) && 17776 (rack->rack_hdrw_pacing == 0)) { 17777 /* 17778 * Sanity check, we do not allow the pacing delay 17779 * to be longer than the SRTT of the path. If it is 17780 * a slow path, then adding a packet should increase 17781 * the RTT and compensate for this i.e. the srtt will 17782 * be greater so the allowed pacing time will be greater. 17783 * 17784 * Note this restriction is not for where a peak rate 17785 * is set, we are doing fixed pacing or hardware pacing. 17786 */ 17787 if (rack->rc_tp->t_srtt) 17788 srtt = rack->rc_tp->t_srtt; 17789 else 17790 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 17791 if (srtt < (uint64_t)slot) { 17792 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 17793 slot = srtt; 17794 } 17795 } 17796 /*******************************************************************/ 17797 /* RRS: We insert paced call to stats here for len and rate_wanted */ 17798 /*******************************************************************/ 17799 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 17800 } 17801 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 17802 /* 17803 * If this rate is seeing enobufs when it 17804 * goes to send then either the nic is out 17805 * of gas or we are mis-estimating the time 17806 * somehow and not letting the queue empty 17807 * completely. Lets add to the pacing time. 17808 */ 17809 int hw_boost_delay; 17810 17811 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 17812 if (hw_boost_delay > rack_enobuf_hw_max) 17813 hw_boost_delay = rack_enobuf_hw_max; 17814 else if (hw_boost_delay < rack_enobuf_hw_min) 17815 hw_boost_delay = rack_enobuf_hw_min; 17816 slot += hw_boost_delay; 17817 } 17818 return (slot); 17819 } 17820 17821 static void 17822 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 17823 tcp_seq startseq, uint32_t sb_offset) 17824 { 17825 struct rack_sendmap *my_rsm = NULL; 17826 17827 if (tp->t_state < TCPS_ESTABLISHED) { 17828 /* 17829 * We don't start any measurements if we are 17830 * not at least established. 17831 */ 17832 return; 17833 } 17834 if (tp->t_state >= TCPS_FIN_WAIT_1) { 17835 /* 17836 * We will get no more data into the SB 17837 * this means we need to have the data available 17838 * before we start a measurement. 17839 */ 17840 17841 if (sbavail(&tptosocket(tp)->so_snd) < 17842 max(rc_init_window(rack), 17843 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 17844 /* Nope not enough data */ 17845 return; 17846 } 17847 } 17848 tp->t_flags |= TF_GPUTINPROG; 17849 rack->r_ctl.rc_gp_cumack_ts = 0; 17850 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 17851 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 17852 tp->gput_seq = startseq; 17853 rack->app_limited_needs_set = 0; 17854 if (rack->in_probe_rtt) 17855 rack->measure_saw_probe_rtt = 1; 17856 else if ((rack->measure_saw_probe_rtt) && 17857 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 17858 rack->measure_saw_probe_rtt = 0; 17859 if (rack->rc_gp_filled) 17860 tp->gput_ts = rack->r_ctl.last_cumack_advance; 17861 else { 17862 /* Special case initial measurement */ 17863 struct timeval tv; 17864 17865 tp->gput_ts = tcp_get_usecs(&tv); 17866 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 17867 } 17868 /* 17869 * We take a guess out into the future, 17870 * if we have no measurement and no 17871 * initial rate, we measure the first 17872 * initial-windows worth of data to 17873 * speed up getting some GP measurement and 17874 * thus start pacing. 17875 */ 17876 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 17877 rack->app_limited_needs_set = 1; 17878 tp->gput_ack = startseq + max(rc_init_window(rack), 17879 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 17880 rack_log_pacing_delay_calc(rack, 17881 tp->gput_seq, 17882 tp->gput_ack, 17883 0, 17884 tp->gput_ts, 17885 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 17886 9, 17887 __LINE__, NULL, 0); 17888 rack_tend_gp_marks(tp, rack); 17889 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 17890 return; 17891 } 17892 if (sb_offset) { 17893 /* 17894 * We are out somewhere in the sb 17895 * can we use the already outstanding data? 17896 */ 17897 17898 if (rack->r_ctl.rc_app_limited_cnt == 0) { 17899 /* 17900 * Yes first one is good and in this case 17901 * the tp->gput_ts is correctly set based on 17902 * the last ack that arrived (no need to 17903 * set things up when an ack comes in). 17904 */ 17905 my_rsm = tqhash_min(rack->r_ctl.tqh); 17906 if ((my_rsm == NULL) || 17907 (my_rsm->r_rtr_cnt != 1)) { 17908 /* retransmission? */ 17909 goto use_latest; 17910 } 17911 } else { 17912 if (rack->r_ctl.rc_first_appl == NULL) { 17913 /* 17914 * If rc_first_appl is NULL 17915 * then the cnt should be 0. 17916 * This is probably an error, maybe 17917 * a KASSERT would be approprate. 17918 */ 17919 goto use_latest; 17920 } 17921 /* 17922 * If we have a marker pointer to the last one that is 17923 * app limited we can use that, but we need to set 17924 * things up so that when it gets ack'ed we record 17925 * the ack time (if its not already acked). 17926 */ 17927 rack->app_limited_needs_set = 1; 17928 /* 17929 * We want to get to the rsm that is either 17930 * next with space i.e. over 1 MSS or the one 17931 * after that (after the app-limited). 17932 */ 17933 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 17934 if (my_rsm) { 17935 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 17936 /* Have to use the next one */ 17937 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17938 else { 17939 /* Use after the first MSS of it is acked */ 17940 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 17941 goto start_set; 17942 } 17943 } 17944 if ((my_rsm == NULL) || 17945 (my_rsm->r_rtr_cnt != 1)) { 17946 /* 17947 * Either its a retransmit or 17948 * the last is the app-limited one. 17949 */ 17950 goto use_latest; 17951 } 17952 } 17953 tp->gput_seq = my_rsm->r_start; 17954 start_set: 17955 if (my_rsm->r_flags & RACK_ACKED) { 17956 /* 17957 * This one has been acked use the arrival ack time 17958 */ 17959 struct rack_sendmap *nrsm; 17960 17961 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 17962 rack->app_limited_needs_set = 0; 17963 /* 17964 * Ok in this path we need to use the r_end now 17965 * since this guy is the starting ack. 17966 */ 17967 tp->gput_seq = my_rsm->r_end; 17968 /* 17969 * We also need to adjust up the sendtime 17970 * to the send of the next data after my_rsm. 17971 */ 17972 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 17973 if (nrsm != NULL) 17974 my_rsm = nrsm; 17975 else { 17976 /* 17977 * The next as not been sent, thats the 17978 * case for using the latest. 17979 */ 17980 goto use_latest; 17981 } 17982 } 17983 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 17984 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 17985 rack->r_ctl.rc_gp_cumack_ts = 0; 17986 if ((rack->r_ctl.cleared_app_ack == 1) && 17987 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.cleared_app_ack_seq))) { 17988 /* 17989 * We just cleared an application limited period 17990 * so the next seq out needs to skip the first 17991 * ack. 17992 */ 17993 rack->app_limited_needs_set = 1; 17994 rack->r_ctl.cleared_app_ack = 0; 17995 } 17996 rack_log_pacing_delay_calc(rack, 17997 tp->gput_seq, 17998 tp->gput_ack, 17999 (uintptr_t)my_rsm, 18000 tp->gput_ts, 18001 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18002 9, 18003 __LINE__, my_rsm, 0); 18004 /* Now lets make sure all are marked as they should be */ 18005 rack_tend_gp_marks(tp, rack); 18006 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18007 return; 18008 } 18009 18010 use_latest: 18011 /* 18012 * We don't know how long we may have been 18013 * idle or if this is the first-send. Lets 18014 * setup the flag so we will trim off 18015 * the first ack'd data so we get a true 18016 * measurement. 18017 */ 18018 rack->app_limited_needs_set = 1; 18019 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18020 rack->r_ctl.rc_gp_cumack_ts = 0; 18021 /* Find this guy so we can pull the send time */ 18022 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18023 if (my_rsm) { 18024 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18025 if (my_rsm->r_flags & RACK_ACKED) { 18026 /* 18027 * Unlikely since its probably what was 18028 * just transmitted (but I am paranoid). 18029 */ 18030 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18031 rack->app_limited_needs_set = 0; 18032 } 18033 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18034 /* This also is unlikely */ 18035 tp->gput_seq = my_rsm->r_start; 18036 } 18037 } else { 18038 /* 18039 * TSNH unless we have some send-map limit, 18040 * and even at that it should not be hitting 18041 * that limit (we should have stopped sending). 18042 */ 18043 struct timeval tv; 18044 18045 microuptime(&tv); 18046 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18047 } 18048 rack_tend_gp_marks(tp, rack); 18049 rack_log_pacing_delay_calc(rack, 18050 tp->gput_seq, 18051 tp->gput_ack, 18052 (uintptr_t)my_rsm, 18053 tp->gput_ts, 18054 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18055 9, __LINE__, NULL, 0); 18056 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18057 } 18058 18059 static inline uint32_t 18060 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18061 uint32_t avail, int32_t sb_offset) 18062 { 18063 uint32_t len; 18064 uint32_t sendwin; 18065 18066 if (tp->snd_wnd > cwnd_to_use) 18067 sendwin = cwnd_to_use; 18068 else 18069 sendwin = tp->snd_wnd; 18070 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18071 /* We never want to go over our peers rcv-window */ 18072 len = 0; 18073 } else { 18074 uint32_t flight; 18075 18076 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18077 if (flight >= sendwin) { 18078 /* 18079 * We have in flight what we are allowed by cwnd (if 18080 * it was rwnd blocking it would have hit above out 18081 * >= tp->snd_wnd). 18082 */ 18083 return (0); 18084 } 18085 len = sendwin - flight; 18086 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18087 /* We would send too much (beyond the rwnd) */ 18088 len = tp->snd_wnd - ctf_outstanding(tp); 18089 } 18090 if ((len + sb_offset) > avail) { 18091 /* 18092 * We don't have that much in the SB, how much is 18093 * there? 18094 */ 18095 len = avail - sb_offset; 18096 } 18097 } 18098 return (len); 18099 } 18100 18101 static void 18102 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18103 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18104 int rsm_is_null, int optlen, int line, uint16_t mode) 18105 { 18106 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18107 union tcp_log_stackspecific log; 18108 struct timeval tv; 18109 18110 memset(&log, 0, sizeof(log)); 18111 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18112 log.u_bbr.flex1 = error; 18113 log.u_bbr.flex2 = flags; 18114 log.u_bbr.flex3 = rsm_is_null; 18115 log.u_bbr.flex4 = ipoptlen; 18116 log.u_bbr.flex5 = tp->rcv_numsacks; 18117 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18118 log.u_bbr.flex7 = optlen; 18119 log.u_bbr.flex8 = rack->r_fsb_inited; 18120 log.u_bbr.applimited = rack->r_fast_output; 18121 log.u_bbr.bw_inuse = rack_get_bw(rack); 18122 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18123 log.u_bbr.cwnd_gain = mode; 18124 log.u_bbr.pkts_out = orig_len; 18125 log.u_bbr.lt_epoch = len; 18126 log.u_bbr.delivered = line; 18127 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18128 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18129 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 18130 len, &log, false, NULL, __func__, __LINE__, &tv); 18131 } 18132 } 18133 18134 18135 static struct mbuf * 18136 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 18137 struct rack_fast_send_blk *fsb, 18138 int32_t seglimit, int32_t segsize, int hw_tls) 18139 { 18140 #ifdef KERN_TLS 18141 struct ktls_session *tls, *ntls; 18142 #ifdef INVARIANTS 18143 struct mbuf *start; 18144 #endif 18145 #endif 18146 struct mbuf *m, *n, **np, *smb; 18147 struct mbuf *top; 18148 int32_t off, soff; 18149 int32_t len = *plen; 18150 int32_t fragsize; 18151 int32_t len_cp = 0; 18152 uint32_t mlen, frags; 18153 18154 soff = off = the_off; 18155 smb = m = the_m; 18156 np = ⊤ 18157 top = NULL; 18158 #ifdef KERN_TLS 18159 if (hw_tls && (m->m_flags & M_EXTPG)) 18160 tls = m->m_epg_tls; 18161 else 18162 tls = NULL; 18163 #ifdef INVARIANTS 18164 start = m; 18165 #endif 18166 #endif 18167 while (len > 0) { 18168 if (m == NULL) { 18169 *plen = len_cp; 18170 break; 18171 } 18172 #ifdef KERN_TLS 18173 if (hw_tls) { 18174 if (m->m_flags & M_EXTPG) 18175 ntls = m->m_epg_tls; 18176 else 18177 ntls = NULL; 18178 18179 /* 18180 * Avoid mixing TLS records with handshake 18181 * data or TLS records from different 18182 * sessions. 18183 */ 18184 if (tls != ntls) { 18185 MPASS(m != start); 18186 *plen = len_cp; 18187 break; 18188 } 18189 } 18190 #endif 18191 mlen = min(len, m->m_len - off); 18192 if (seglimit) { 18193 /* 18194 * For M_EXTPG mbufs, add 3 segments 18195 * + 1 in case we are crossing page boundaries 18196 * + 2 in case the TLS hdr/trailer are used 18197 * It is cheaper to just add the segments 18198 * than it is to take the cache miss to look 18199 * at the mbuf ext_pgs state in detail. 18200 */ 18201 if (m->m_flags & M_EXTPG) { 18202 fragsize = min(segsize, PAGE_SIZE); 18203 frags = 3; 18204 } else { 18205 fragsize = segsize; 18206 frags = 0; 18207 } 18208 18209 /* Break if we really can't fit anymore. */ 18210 if ((frags + 1) >= seglimit) { 18211 *plen = len_cp; 18212 break; 18213 } 18214 18215 /* 18216 * Reduce size if you can't copy the whole 18217 * mbuf. If we can't copy the whole mbuf, also 18218 * adjust len so the loop will end after this 18219 * mbuf. 18220 */ 18221 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 18222 mlen = (seglimit - frags - 1) * fragsize; 18223 len = mlen; 18224 *plen = len_cp + len; 18225 } 18226 frags += howmany(mlen, fragsize); 18227 if (frags == 0) 18228 frags++; 18229 seglimit -= frags; 18230 KASSERT(seglimit > 0, 18231 ("%s: seglimit went too low", __func__)); 18232 } 18233 n = m_get(M_NOWAIT, m->m_type); 18234 *np = n; 18235 if (n == NULL) 18236 goto nospace; 18237 n->m_len = mlen; 18238 soff += mlen; 18239 len_cp += n->m_len; 18240 if (m->m_flags & (M_EXT | M_EXTPG)) { 18241 n->m_data = m->m_data + off; 18242 mb_dupcl(n, m); 18243 } else { 18244 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 18245 (u_int)n->m_len); 18246 } 18247 len -= n->m_len; 18248 off = 0; 18249 m = m->m_next; 18250 np = &n->m_next; 18251 if (len || (soff == smb->m_len)) { 18252 /* 18253 * We have more so we move forward or 18254 * we have consumed the entire mbuf and 18255 * len has fell to 0. 18256 */ 18257 soff = 0; 18258 smb = m; 18259 } 18260 18261 } 18262 if (fsb != NULL) { 18263 fsb->m = smb; 18264 fsb->off = soff; 18265 if (smb) { 18266 /* 18267 * Save off the size of the mbuf. We do 18268 * this so that we can recognize when it 18269 * has been trimmed by sbcut() as acks 18270 * come in. 18271 */ 18272 fsb->o_m_len = smb->m_len; 18273 fsb->o_t_len = M_TRAILINGROOM(smb); 18274 } else { 18275 /* 18276 * This is the case where the next mbuf went to NULL. This 18277 * means with this copy we have sent everything in the sb. 18278 * In theory we could clear the fast_output flag, but lets 18279 * not since its possible that we could get more added 18280 * and acks that call the extend function which would let 18281 * us send more. 18282 */ 18283 fsb->o_m_len = 0; 18284 fsb->o_t_len = 0; 18285 } 18286 } 18287 return (top); 18288 nospace: 18289 if (top) 18290 m_freem(top); 18291 return (NULL); 18292 18293 } 18294 18295 /* 18296 * This is a copy of m_copym(), taking the TSO segment size/limit 18297 * constraints into account, and advancing the sndptr as it goes. 18298 */ 18299 static struct mbuf * 18300 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 18301 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 18302 { 18303 struct mbuf *m, *n; 18304 int32_t soff; 18305 18306 m = rack->r_ctl.fsb.m; 18307 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 18308 /* 18309 * The trailing space changed, mbufs can grow 18310 * at the tail but they can't shrink from 18311 * it, KASSERT that. Adjust the orig_m_len to 18312 * compensate for this change. 18313 */ 18314 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 18315 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 18316 m, 18317 rack, 18318 (intmax_t)M_TRAILINGROOM(m), 18319 rack->r_ctl.fsb.o_t_len, 18320 rack->r_ctl.fsb.o_m_len, 18321 m->m_len)); 18322 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 18323 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 18324 } 18325 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 18326 /* 18327 * Mbuf shrank, trimmed off the top by an ack, our 18328 * offset changes. 18329 */ 18330 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 18331 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 18332 m, m->m_len, 18333 rack, rack->r_ctl.fsb.o_m_len, 18334 rack->r_ctl.fsb.off)); 18335 18336 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 18337 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 18338 else 18339 rack->r_ctl.fsb.off = 0; 18340 rack->r_ctl.fsb.o_m_len = m->m_len; 18341 #ifdef INVARIANTS 18342 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 18343 panic("rack:%p m:%p m_len grew outside of t_space compensation", 18344 rack, m); 18345 #endif 18346 } 18347 soff = rack->r_ctl.fsb.off; 18348 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 18349 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 18350 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 18351 __FUNCTION__, 18352 rack, *plen, m, m->m_len)); 18353 /* Save off the right location before we copy and advance */ 18354 *s_soff = soff; 18355 *s_mb = rack->r_ctl.fsb.m; 18356 n = rack_fo_base_copym(m, soff, plen, 18357 &rack->r_ctl.fsb, 18358 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 18359 return (n); 18360 } 18361 18362 /* Log the buffer level */ 18363 static void 18364 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 18365 int len, struct timeval *tv, 18366 uint32_t cts) 18367 { 18368 uint32_t p_rate = 0, p_queue = 0, err = 0; 18369 union tcp_log_stackspecific log; 18370 18371 #ifdef RATELIMIT 18372 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18373 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18374 #endif 18375 memset(&log, 0, sizeof(log)); 18376 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18377 log.u_bbr.flex1 = p_rate; 18378 log.u_bbr.flex2 = p_queue; 18379 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18380 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18381 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18382 log.u_bbr.flex7 = 99; 18383 log.u_bbr.flex8 = 0; 18384 log.u_bbr.pkts_out = err; 18385 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18386 log.u_bbr.timeStamp = cts; 18387 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18388 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18389 len, &log, false, NULL, __func__, __LINE__, tv); 18390 18391 } 18392 18393 static uint32_t 18394 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 18395 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 18396 { 18397 uint64_t lentime = 0; 18398 #ifdef RATELIMIT 18399 uint32_t p_rate = 0, p_queue = 0, err; 18400 union tcp_log_stackspecific log; 18401 uint64_t bw; 18402 18403 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18404 /* Failed or queue is zero */ 18405 if (err || (p_queue == 0)) { 18406 lentime = 0; 18407 goto out; 18408 } 18409 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18410 if (err) { 18411 lentime = 0; 18412 goto out; 18413 } 18414 /* 18415 * If we reach here we have some bytes in 18416 * the queue. The number returned is a value 18417 * between 0 and 0xffff where ffff is full 18418 * and 0 is empty. So how best to make this into 18419 * something usable? 18420 * 18421 * The "safer" way is lets take the b/w gotten 18422 * from the query (which should be our b/w rate) 18423 * and pretend that a full send (our rc_pace_max_segs) 18424 * is outstanding. We factor it so its as if a full 18425 * number of our MSS segment is terms of full 18426 * ethernet segments are outstanding. 18427 */ 18428 bw = p_rate / 8; 18429 if (bw) { 18430 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 18431 lentime *= ETHERNET_SEGMENT_SIZE; 18432 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 18433 lentime /= bw; 18434 } else { 18435 /* TSNH -- KASSERT? */ 18436 lentime = 0; 18437 } 18438 out: 18439 if (tcp_bblogging_on(tp)) { 18440 memset(&log, 0, sizeof(log)); 18441 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18442 log.u_bbr.flex1 = p_rate; 18443 log.u_bbr.flex2 = p_queue; 18444 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18445 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18446 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18447 log.u_bbr.flex7 = 99; 18448 log.u_bbr.flex8 = 0; 18449 log.u_bbr.pkts_out = err; 18450 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18451 log.u_bbr.cur_del_rate = lentime; 18452 log.u_bbr.timeStamp = cts; 18453 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18454 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18455 len, &log, false, NULL, __func__, __LINE__,tv); 18456 } 18457 #endif 18458 return ((uint32_t)lentime); 18459 } 18460 18461 static int 18462 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 18463 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 18464 { 18465 /* 18466 * Enter the fast retransmit path. We are given that a sched_pin is 18467 * in place (if accounting is compliled in) and the cycle count taken 18468 * at the entry is in the ts_val. The concept her is that the rsm 18469 * now holds the mbuf offsets and such so we can directly transmit 18470 * without a lot of overhead, the len field is already set for 18471 * us to prohibit us from sending too much (usually its 1MSS). 18472 */ 18473 struct ip *ip = NULL; 18474 struct udphdr *udp = NULL; 18475 struct tcphdr *th = NULL; 18476 struct mbuf *m = NULL; 18477 struct inpcb *inp; 18478 uint8_t *cpto; 18479 struct tcp_log_buffer *lgb; 18480 #ifdef TCP_ACCOUNTING 18481 uint64_t crtsc; 18482 int cnt_thru = 1; 18483 #endif 18484 struct tcpopt to; 18485 u_char opt[TCP_MAXOLEN]; 18486 uint32_t hdrlen, optlen; 18487 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 18488 uint16_t flags; 18489 uint32_t if_hw_tsomaxsegcount = 0, startseq; 18490 uint32_t if_hw_tsomaxsegsize; 18491 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 18492 18493 #ifdef INET6 18494 struct ip6_hdr *ip6 = NULL; 18495 18496 if (rack->r_is_v6) { 18497 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18498 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18499 } else 18500 #endif /* INET6 */ 18501 { 18502 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18503 hdrlen = sizeof(struct tcpiphdr); 18504 } 18505 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 18506 goto failed; 18507 } 18508 if (doing_tlp) { 18509 /* Its a TLP add the flag, it may already be there but be sure */ 18510 rsm->r_flags |= RACK_TLP; 18511 } else { 18512 /* If it was a TLP it is not not on this retransmit */ 18513 rsm->r_flags &= ~RACK_TLP; 18514 } 18515 startseq = rsm->r_start; 18516 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 18517 inp = rack->rc_inp; 18518 to.to_flags = 0; 18519 flags = tcp_outflags[tp->t_state]; 18520 if (flags & (TH_SYN|TH_RST)) { 18521 goto failed; 18522 } 18523 if (rsm->r_flags & RACK_HAS_FIN) { 18524 /* We can't send a FIN here */ 18525 goto failed; 18526 } 18527 if (flags & TH_FIN) { 18528 /* We never send a FIN */ 18529 flags &= ~TH_FIN; 18530 } 18531 if (tp->t_flags & TF_RCVD_TSTMP) { 18532 to.to_tsval = ms_cts + tp->ts_offset; 18533 to.to_tsecr = tp->ts_recent; 18534 to.to_flags = TOF_TS; 18535 } 18536 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18537 /* TCP-MD5 (RFC2385). */ 18538 if (tp->t_flags & TF_SIGNATURE) 18539 to.to_flags |= TOF_SIGNATURE; 18540 #endif 18541 optlen = tcp_addoptions(&to, opt); 18542 hdrlen += optlen; 18543 udp = rack->r_ctl.fsb.udp; 18544 if (udp) 18545 hdrlen += sizeof(struct udphdr); 18546 if (rack->r_ctl.rc_pace_max_segs) 18547 max_val = rack->r_ctl.rc_pace_max_segs; 18548 else if (rack->rc_user_set_max_segs) 18549 max_val = rack->rc_user_set_max_segs * segsiz; 18550 else 18551 max_val = len; 18552 if ((tp->t_flags & TF_TSO) && 18553 V_tcp_do_tso && 18554 (len > segsiz) && 18555 (tp->t_port == 0)) 18556 tso = 1; 18557 #ifdef INET6 18558 if (MHLEN < hdrlen + max_linkhdr) 18559 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18560 else 18561 #endif 18562 m = m_gethdr(M_NOWAIT, MT_DATA); 18563 if (m == NULL) 18564 goto failed; 18565 m->m_data += max_linkhdr; 18566 m->m_len = hdrlen; 18567 th = rack->r_ctl.fsb.th; 18568 /* Establish the len to send */ 18569 if (len > max_val) 18570 len = max_val; 18571 if ((tso) && (len + optlen > segsiz)) { 18572 uint32_t if_hw_tsomax; 18573 int32_t max_len; 18574 18575 /* extract TSO information */ 18576 if_hw_tsomax = tp->t_tsomax; 18577 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18578 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18579 /* 18580 * Check if we should limit by maximum payload 18581 * length: 18582 */ 18583 if (if_hw_tsomax != 0) { 18584 /* compute maximum TSO length */ 18585 max_len = (if_hw_tsomax - hdrlen - 18586 max_linkhdr); 18587 if (max_len <= 0) { 18588 goto failed; 18589 } else if (len > max_len) { 18590 len = max_len; 18591 } 18592 } 18593 if (len <= segsiz) { 18594 /* 18595 * In case there are too many small fragments don't 18596 * use TSO: 18597 */ 18598 tso = 0; 18599 } 18600 } else { 18601 tso = 0; 18602 } 18603 if ((tso == 0) && (len > segsiz)) 18604 len = segsiz; 18605 (void)tcp_get_usecs(tv); 18606 if ((len == 0) || 18607 (len <= MHLEN - hdrlen - max_linkhdr)) { 18608 goto failed; 18609 } 18610 th->th_seq = htonl(rsm->r_start); 18611 th->th_ack = htonl(tp->rcv_nxt); 18612 /* 18613 * The PUSH bit should only be applied 18614 * if the full retransmission is made. If 18615 * we are sending less than this is the 18616 * left hand edge and should not have 18617 * the PUSH bit. 18618 */ 18619 if ((rsm->r_flags & RACK_HAD_PUSH) && 18620 (len == (rsm->r_end - rsm->r_start))) 18621 flags |= TH_PUSH; 18622 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 18623 if (th->th_win == 0) { 18624 tp->t_sndzerowin++; 18625 tp->t_flags |= TF_RXWIN0SENT; 18626 } else 18627 tp->t_flags &= ~TF_RXWIN0SENT; 18628 if (rsm->r_flags & RACK_TLP) { 18629 /* 18630 * TLP should not count in retran count, but 18631 * in its own bin 18632 */ 18633 counter_u64_add(rack_tlp_retran, 1); 18634 counter_u64_add(rack_tlp_retran_bytes, len); 18635 } else { 18636 tp->t_sndrexmitpack++; 18637 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18638 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18639 } 18640 #ifdef STATS 18641 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18642 len); 18643 #endif 18644 if (rsm->m == NULL) 18645 goto failed; 18646 if (rsm->m && 18647 ((rsm->orig_m_len != rsm->m->m_len) || 18648 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 18649 /* Fix up the orig_m_len and possibly the mbuf offset */ 18650 rack_adjust_orig_mlen(rsm); 18651 } 18652 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 18653 if (len <= segsiz) { 18654 /* 18655 * Must have ran out of mbufs for the copy 18656 * shorten it to no longer need tso. Lets 18657 * not put on sendalot since we are low on 18658 * mbufs. 18659 */ 18660 tso = 0; 18661 } 18662 if ((m->m_next == NULL) || (len <= 0)){ 18663 goto failed; 18664 } 18665 if (udp) { 18666 if (rack->r_is_v6) 18667 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18668 else 18669 ulen = hdrlen + len - sizeof(struct ip); 18670 udp->uh_ulen = htons(ulen); 18671 } 18672 m->m_pkthdr.rcvif = (struct ifnet *)0; 18673 if (TCPS_HAVERCVDSYN(tp->t_state) && 18674 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18675 int ect = tcp_ecn_output_established(tp, &flags, len, true); 18676 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18677 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18678 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18679 #ifdef INET6 18680 if (rack->r_is_v6) { 18681 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18682 ip6->ip6_flow |= htonl(ect << 20); 18683 } 18684 else 18685 #endif 18686 { 18687 ip->ip_tos &= ~IPTOS_ECN_MASK; 18688 ip->ip_tos |= ect; 18689 } 18690 } 18691 if (rack->r_ctl.crte != NULL) { 18692 /* See if we can send via the hw queue */ 18693 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 18694 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 18695 if (slot == 0) 18696 ip_sendflag = 0; 18697 } 18698 tcp_set_flags(th, flags); 18699 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18700 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18701 if (to.to_flags & TOF_SIGNATURE) { 18702 /* 18703 * Calculate MD5 signature and put it into the place 18704 * determined before. 18705 * NOTE: since TCP options buffer doesn't point into 18706 * mbuf's data, calculate offset and use it. 18707 */ 18708 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18709 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18710 /* 18711 * Do not send segment if the calculation of MD5 18712 * digest has failed. 18713 */ 18714 goto failed; 18715 } 18716 } 18717 #endif 18718 #ifdef INET6 18719 if (rack->r_is_v6) { 18720 if (tp->t_port) { 18721 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18722 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18723 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18724 th->th_sum = htons(0); 18725 UDPSTAT_INC(udps_opackets); 18726 } else { 18727 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18728 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18729 th->th_sum = in6_cksum_pseudo(ip6, 18730 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18731 0); 18732 } 18733 } 18734 #endif 18735 #if defined(INET6) && defined(INET) 18736 else 18737 #endif 18738 #ifdef INET 18739 { 18740 if (tp->t_port) { 18741 m->m_pkthdr.csum_flags = CSUM_UDP; 18742 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18743 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18744 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18745 th->th_sum = htons(0); 18746 UDPSTAT_INC(udps_opackets); 18747 } else { 18748 m->m_pkthdr.csum_flags = CSUM_TCP; 18749 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18750 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18751 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18752 IPPROTO_TCP + len + optlen)); 18753 } 18754 /* IP version must be set here for ipv4/ipv6 checking later */ 18755 KASSERT(ip->ip_v == IPVERSION, 18756 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18757 } 18758 #endif 18759 if (tso) { 18760 /* 18761 * Here we use segsiz since we have no added options besides 18762 * any standard timestamp options (no DSACKs or SACKS are sent 18763 * via either fast-path). 18764 */ 18765 KASSERT(len > segsiz, 18766 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 18767 m->m_pkthdr.csum_flags |= CSUM_TSO; 18768 m->m_pkthdr.tso_segsz = segsiz; 18769 } 18770 #ifdef INET6 18771 if (rack->r_is_v6) { 18772 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 18773 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18774 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18775 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18776 else 18777 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18778 } 18779 #endif 18780 #if defined(INET) && defined(INET6) 18781 else 18782 #endif 18783 #ifdef INET 18784 { 18785 ip->ip_len = htons(m->m_pkthdr.len); 18786 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 18787 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18788 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18789 if (tp->t_port == 0 || len < V_tcp_minmss) { 18790 ip->ip_off |= htons(IP_DF); 18791 } 18792 } else { 18793 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18794 } 18795 } 18796 #endif 18797 if (doing_tlp == 0) { 18798 /* Set we retransmitted */ 18799 rack->rc_gp_saw_rec = 1; 18800 } else { 18801 /* Its a TLP set ca or ss */ 18802 if (tp->snd_cwnd > tp->snd_ssthresh) { 18803 /* Set we sent in CA */ 18804 rack->rc_gp_saw_ca = 1; 18805 } else { 18806 /* Set we sent in SS */ 18807 rack->rc_gp_saw_ss = 1; 18808 } 18809 } 18810 /* Time to copy in our header */ 18811 cpto = mtod(m, uint8_t *); 18812 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18813 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18814 if (optlen) { 18815 bcopy(opt, th + 1, optlen); 18816 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18817 } else { 18818 th->th_off = sizeof(struct tcphdr) >> 2; 18819 } 18820 if (tcp_bblogging_on(rack->rc_tp)) { 18821 union tcp_log_stackspecific log; 18822 18823 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18824 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18825 counter_u64_add(rack_collapsed_win_rxt, 1); 18826 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18827 } 18828 memset(&log, 0, sizeof(log)); 18829 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18830 if (rack->rack_no_prr) 18831 log.u_bbr.flex1 = 0; 18832 else 18833 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18834 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18835 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18836 log.u_bbr.flex4 = max_val; 18837 /* Save off the early/late values */ 18838 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18839 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18840 log.u_bbr.bw_inuse = rack_get_bw(rack); 18841 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 18842 if (doing_tlp == 0) 18843 log.u_bbr.flex8 = 1; 18844 else 18845 log.u_bbr.flex8 = 2; 18846 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18847 log.u_bbr.flex7 = 55; 18848 log.u_bbr.pkts_out = tp->t_maxseg; 18849 log.u_bbr.timeStamp = cts; 18850 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18851 if (rsm->r_rtr_cnt > 0) { 18852 /* 18853 * When we have a retransmit we want to log the 18854 * burst at send and flight at send from before. 18855 */ 18856 log.u_bbr.flex5 = rsm->r_fas; 18857 log.u_bbr.bbr_substate = rsm->r_bas; 18858 } else { 18859 /* 18860 * This is currently unlikely until we do the 18861 * packet pair probes but I will add it for completeness. 18862 */ 18863 log.u_bbr.flex5 = log.u_bbr.inflight; 18864 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 18865 } 18866 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 18867 log.u_bbr.delivered = 0; 18868 log.u_bbr.rttProp = (uintptr_t)rsm; 18869 log.u_bbr.delRate = rsm->r_flags; 18870 log.u_bbr.delRate <<= 31; 18871 log.u_bbr.delRate |= rack->r_must_retran; 18872 log.u_bbr.delRate <<= 1; 18873 log.u_bbr.delRate |= 1; 18874 log.u_bbr.pkt_epoch = __LINE__; 18875 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 18876 len, &log, false, NULL, __func__, __LINE__, tv); 18877 } else 18878 lgb = NULL; 18879 if ((rack->r_ctl.crte != NULL) && 18880 tcp_bblogging_on(tp)) { 18881 rack_log_queue_level(tp, rack, len, tv, cts); 18882 } 18883 #ifdef INET6 18884 if (rack->r_is_v6) { 18885 error = ip6_output(m, inp->in6p_outputopts, 18886 &inp->inp_route6, 18887 ip_sendflag, NULL, NULL, inp); 18888 } 18889 else 18890 #endif 18891 #ifdef INET 18892 { 18893 error = ip_output(m, NULL, 18894 &inp->inp_route, 18895 ip_sendflag, 0, inp); 18896 } 18897 #endif 18898 m = NULL; 18899 if (lgb) { 18900 lgb->tlb_errno = error; 18901 lgb = NULL; 18902 } 18903 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 18904 tp->snd_nxt = tp->snd_max; 18905 if (error) { 18906 goto failed; 18907 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 18908 rack->rc_hw_nobuf = 0; 18909 rack->r_ctl.rc_agg_delayed = 0; 18910 rack->r_early = 0; 18911 rack->r_late = 0; 18912 rack->r_ctl.rc_agg_early = 0; 18913 } 18914 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 18915 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 18916 if (doing_tlp) { 18917 rack->rc_tlp_in_progress = 1; 18918 rack->r_ctl.rc_tlp_cnt_out++; 18919 } 18920 if (error == 0) { 18921 counter_u64_add(rack_total_bytes, len); 18922 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 18923 if (doing_tlp) { 18924 rack->rc_last_sent_tlp_past_cumack = 0; 18925 rack->rc_last_sent_tlp_seq_valid = 1; 18926 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18927 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18928 } 18929 if (rack->r_ctl.rc_prr_sndcnt >= len) 18930 rack->r_ctl.rc_prr_sndcnt -= len; 18931 else 18932 rack->r_ctl.rc_prr_sndcnt = 0; 18933 } 18934 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18935 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18936 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18937 rack->r_ctl.retran_during_recovery += len; 18938 { 18939 int idx; 18940 18941 idx = (len / segsiz) + 3; 18942 if (idx >= TCP_MSS_ACCT_ATIMER) 18943 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18944 else 18945 counter_u64_add(rack_out_size[idx], 1); 18946 } 18947 if (tp->t_rtttime == 0) { 18948 tp->t_rtttime = ticks; 18949 tp->t_rtseq = startseq; 18950 KMOD_TCPSTAT_INC(tcps_segstimed); 18951 } 18952 counter_u64_add(rack_fto_rsm_send, 1); 18953 if (error && (error == ENOBUFS)) { 18954 if (rack->r_ctl.crte != NULL) { 18955 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 18956 if (tcp_bblogging_on(rack->rc_tp)) 18957 rack_log_queue_level(tp, rack, len, tv, cts); 18958 } else 18959 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 18960 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18961 if (rack->rc_enobuf < 0x7f) 18962 rack->rc_enobuf++; 18963 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18964 slot = 10 * HPTS_USEC_IN_MSEC; 18965 if (rack->r_ctl.crte != NULL) { 18966 counter_u64_add(rack_saw_enobuf_hw, 1); 18967 tcp_rl_log_enobuf(rack->r_ctl.crte); 18968 } 18969 counter_u64_add(rack_saw_enobuf, 1); 18970 } else { 18971 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__); 18972 } 18973 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 18974 #ifdef TCP_ACCOUNTING 18975 crtsc = get_cyclecount(); 18976 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18977 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 18978 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 18979 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 18980 } 18981 sched_unpin(); 18982 #endif 18983 return (0); 18984 failed: 18985 if (m) 18986 m_free(m); 18987 return (-1); 18988 } 18989 18990 static void 18991 rack_sndbuf_autoscale(struct tcp_rack *rack) 18992 { 18993 /* 18994 * Automatic sizing of send socket buffer. Often the send buffer 18995 * size is not optimally adjusted to the actual network conditions 18996 * at hand (delay bandwidth product). Setting the buffer size too 18997 * small limits throughput on links with high bandwidth and high 18998 * delay (eg. trans-continental/oceanic links). Setting the 18999 * buffer size too big consumes too much real kernel memory, 19000 * especially with many connections on busy servers. 19001 * 19002 * The criteria to step up the send buffer one notch are: 19003 * 1. receive window of remote host is larger than send buffer 19004 * (with a fudge factor of 5/4th); 19005 * 2. send buffer is filled to 7/8th with data (so we actually 19006 * have data to make use of it); 19007 * 3. send buffer fill has not hit maximal automatic size; 19008 * 4. our send window (slow start and cogestion controlled) is 19009 * larger than sent but unacknowledged data in send buffer. 19010 * 19011 * Note that the rack version moves things much faster since 19012 * we want to avoid hitting cache lines in the rack_fast_output() 19013 * path so this is called much less often and thus moves 19014 * the SB forward by a percentage. 19015 */ 19016 struct socket *so; 19017 struct tcpcb *tp; 19018 uint32_t sendwin, scaleup; 19019 19020 tp = rack->rc_tp; 19021 so = rack->rc_inp->inp_socket; 19022 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19023 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19024 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19025 sbused(&so->so_snd) >= 19026 (so->so_snd.sb_hiwat / 8 * 7) && 19027 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19028 sendwin >= (sbused(&so->so_snd) - 19029 (tp->snd_max - tp->snd_una))) { 19030 if (rack_autosndbuf_inc) 19031 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19032 else 19033 scaleup = V_tcp_autosndbuf_inc; 19034 if (scaleup < V_tcp_autosndbuf_inc) 19035 scaleup = V_tcp_autosndbuf_inc; 19036 scaleup += so->so_snd.sb_hiwat; 19037 if (scaleup > V_tcp_autosndbuf_max) 19038 scaleup = V_tcp_autosndbuf_max; 19039 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19040 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19041 } 19042 } 19043 } 19044 19045 static int 19046 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19047 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long *tot_len, int *send_err, int line) 19048 { 19049 /* 19050 * Enter to do fast output. We are given that the sched_pin is 19051 * in place (if accounting is compiled in) and the cycle count taken 19052 * at entry is in place in ts_val. The idea here is that 19053 * we know how many more bytes needs to be sent (presumably either 19054 * during pacing or to fill the cwnd and that was greater than 19055 * the max-burst). We have how much to send and all the info we 19056 * need to just send. 19057 */ 19058 #ifdef INET 19059 struct ip *ip = NULL; 19060 #endif 19061 struct udphdr *udp = NULL; 19062 struct tcphdr *th = NULL; 19063 struct mbuf *m, *s_mb; 19064 struct inpcb *inp; 19065 uint8_t *cpto; 19066 struct tcp_log_buffer *lgb; 19067 #ifdef TCP_ACCOUNTING 19068 uint64_t crtsc; 19069 #endif 19070 struct tcpopt to; 19071 u_char opt[TCP_MAXOLEN]; 19072 uint32_t hdrlen, optlen; 19073 #ifdef TCP_ACCOUNTING 19074 int cnt_thru = 1; 19075 #endif 19076 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19077 uint16_t flags; 19078 uint32_t s_soff; 19079 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19080 uint32_t if_hw_tsomaxsegsize; 19081 uint32_t add_flag = RACK_SENT_FP; 19082 #ifdef INET6 19083 struct ip6_hdr *ip6 = NULL; 19084 19085 if (rack->r_is_v6) { 19086 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19087 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19088 } else 19089 #endif /* INET6 */ 19090 { 19091 #ifdef INET 19092 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19093 hdrlen = sizeof(struct tcpiphdr); 19094 #endif 19095 } 19096 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19097 m = NULL; 19098 goto failed; 19099 } 19100 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19101 startseq = tp->snd_max; 19102 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19103 inp = rack->rc_inp; 19104 len = rack->r_ctl.fsb.left_to_send; 19105 to.to_flags = 0; 19106 flags = rack->r_ctl.fsb.tcp_flags; 19107 if (tp->t_flags & TF_RCVD_TSTMP) { 19108 to.to_tsval = ms_cts + tp->ts_offset; 19109 to.to_tsecr = tp->ts_recent; 19110 to.to_flags = TOF_TS; 19111 } 19112 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19113 /* TCP-MD5 (RFC2385). */ 19114 if (tp->t_flags & TF_SIGNATURE) 19115 to.to_flags |= TOF_SIGNATURE; 19116 #endif 19117 optlen = tcp_addoptions(&to, opt); 19118 hdrlen += optlen; 19119 udp = rack->r_ctl.fsb.udp; 19120 if (udp) 19121 hdrlen += sizeof(struct udphdr); 19122 if (rack->r_ctl.rc_pace_max_segs) 19123 max_val = rack->r_ctl.rc_pace_max_segs; 19124 else if (rack->rc_user_set_max_segs) 19125 max_val = rack->rc_user_set_max_segs * segsiz; 19126 else 19127 max_val = len; 19128 if ((tp->t_flags & TF_TSO) && 19129 V_tcp_do_tso && 19130 (len > segsiz) && 19131 (tp->t_port == 0)) 19132 tso = 1; 19133 again: 19134 #ifdef INET6 19135 if (MHLEN < hdrlen + max_linkhdr) 19136 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19137 else 19138 #endif 19139 m = m_gethdr(M_NOWAIT, MT_DATA); 19140 if (m == NULL) 19141 goto failed; 19142 m->m_data += max_linkhdr; 19143 m->m_len = hdrlen; 19144 th = rack->r_ctl.fsb.th; 19145 /* Establish the len to send */ 19146 if (len > max_val) 19147 len = max_val; 19148 if ((tso) && (len + optlen > segsiz)) { 19149 uint32_t if_hw_tsomax; 19150 int32_t max_len; 19151 19152 /* extract TSO information */ 19153 if_hw_tsomax = tp->t_tsomax; 19154 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19155 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19156 /* 19157 * Check if we should limit by maximum payload 19158 * length: 19159 */ 19160 if (if_hw_tsomax != 0) { 19161 /* compute maximum TSO length */ 19162 max_len = (if_hw_tsomax - hdrlen - 19163 max_linkhdr); 19164 if (max_len <= 0) { 19165 goto failed; 19166 } else if (len > max_len) { 19167 len = max_len; 19168 } 19169 } 19170 if (len <= segsiz) { 19171 /* 19172 * In case there are too many small fragments don't 19173 * use TSO: 19174 */ 19175 tso = 0; 19176 } 19177 } else { 19178 tso = 0; 19179 } 19180 if ((tso == 0) && (len > segsiz)) 19181 len = segsiz; 19182 (void)tcp_get_usecs(tv); 19183 if ((len == 0) || 19184 (len <= MHLEN - hdrlen - max_linkhdr)) { 19185 goto failed; 19186 } 19187 sb_offset = tp->snd_max - tp->snd_una; 19188 th->th_seq = htonl(tp->snd_max); 19189 th->th_ack = htonl(tp->rcv_nxt); 19190 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19191 if (th->th_win == 0) { 19192 tp->t_sndzerowin++; 19193 tp->t_flags |= TF_RXWIN0SENT; 19194 } else 19195 tp->t_flags &= ~TF_RXWIN0SENT; 19196 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 19197 KMOD_TCPSTAT_INC(tcps_sndpack); 19198 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 19199 #ifdef STATS 19200 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 19201 len); 19202 #endif 19203 if (rack->r_ctl.fsb.m == NULL) 19204 goto failed; 19205 19206 /* s_mb and s_soff are saved for rack_log_output */ 19207 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 19208 &s_mb, &s_soff); 19209 if (len <= segsiz) { 19210 /* 19211 * Must have ran out of mbufs for the copy 19212 * shorten it to no longer need tso. Lets 19213 * not put on sendalot since we are low on 19214 * mbufs. 19215 */ 19216 tso = 0; 19217 } 19218 if (rack->r_ctl.fsb.rfo_apply_push && 19219 (len == rack->r_ctl.fsb.left_to_send)) { 19220 flags |= TH_PUSH; 19221 add_flag |= RACK_HAD_PUSH; 19222 } 19223 if ((m->m_next == NULL) || (len <= 0)){ 19224 goto failed; 19225 } 19226 if (udp) { 19227 if (rack->r_is_v6) 19228 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19229 else 19230 ulen = hdrlen + len - sizeof(struct ip); 19231 udp->uh_ulen = htons(ulen); 19232 } 19233 m->m_pkthdr.rcvif = (struct ifnet *)0; 19234 if (TCPS_HAVERCVDSYN(tp->t_state) && 19235 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19236 int ect = tcp_ecn_output_established(tp, &flags, len, false); 19237 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19238 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19239 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19240 #ifdef INET6 19241 if (rack->r_is_v6) { 19242 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19243 ip6->ip6_flow |= htonl(ect << 20); 19244 } 19245 else 19246 #endif 19247 { 19248 #ifdef INET 19249 ip->ip_tos &= ~IPTOS_ECN_MASK; 19250 ip->ip_tos |= ect; 19251 #endif 19252 } 19253 } 19254 tcp_set_flags(th, flags); 19255 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19256 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19257 if (to.to_flags & TOF_SIGNATURE) { 19258 /* 19259 * Calculate MD5 signature and put it into the place 19260 * determined before. 19261 * NOTE: since TCP options buffer doesn't point into 19262 * mbuf's data, calculate offset and use it. 19263 */ 19264 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19265 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19266 /* 19267 * Do not send segment if the calculation of MD5 19268 * digest has failed. 19269 */ 19270 goto failed; 19271 } 19272 } 19273 #endif 19274 #ifdef INET6 19275 if (rack->r_is_v6) { 19276 if (tp->t_port) { 19277 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19278 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19279 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19280 th->th_sum = htons(0); 19281 UDPSTAT_INC(udps_opackets); 19282 } else { 19283 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19284 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19285 th->th_sum = in6_cksum_pseudo(ip6, 19286 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19287 0); 19288 } 19289 } 19290 #endif 19291 #if defined(INET6) && defined(INET) 19292 else 19293 #endif 19294 #ifdef INET 19295 { 19296 if (tp->t_port) { 19297 m->m_pkthdr.csum_flags = CSUM_UDP; 19298 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19299 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19300 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19301 th->th_sum = htons(0); 19302 UDPSTAT_INC(udps_opackets); 19303 } else { 19304 m->m_pkthdr.csum_flags = CSUM_TCP; 19305 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19306 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19307 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19308 IPPROTO_TCP + len + optlen)); 19309 } 19310 /* IP version must be set here for ipv4/ipv6 checking later */ 19311 KASSERT(ip->ip_v == IPVERSION, 19312 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19313 } 19314 #endif 19315 if (tso) { 19316 /* 19317 * Here we use segsiz since we have no added options besides 19318 * any standard timestamp options (no DSACKs or SACKS are sent 19319 * via either fast-path). 19320 */ 19321 KASSERT(len > segsiz, 19322 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19323 m->m_pkthdr.csum_flags |= CSUM_TSO; 19324 m->m_pkthdr.tso_segsz = segsiz; 19325 } 19326 #ifdef INET6 19327 if (rack->r_is_v6) { 19328 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19329 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19330 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19331 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19332 else 19333 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19334 } 19335 #endif 19336 #if defined(INET) && defined(INET6) 19337 else 19338 #endif 19339 #ifdef INET 19340 { 19341 ip->ip_len = htons(m->m_pkthdr.len); 19342 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19343 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19344 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19345 if (tp->t_port == 0 || len < V_tcp_minmss) { 19346 ip->ip_off |= htons(IP_DF); 19347 } 19348 } else { 19349 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19350 } 19351 } 19352 #endif 19353 if (tp->snd_cwnd > tp->snd_ssthresh) { 19354 /* Set we sent in CA */ 19355 rack->rc_gp_saw_ca = 1; 19356 } else { 19357 /* Set we sent in SS */ 19358 rack->rc_gp_saw_ss = 1; 19359 } 19360 /* Time to copy in our header */ 19361 cpto = mtod(m, uint8_t *); 19362 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19363 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19364 if (optlen) { 19365 bcopy(opt, th + 1, optlen); 19366 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19367 } else { 19368 th->th_off = sizeof(struct tcphdr) >> 2; 19369 } 19370 if ((rack->r_ctl.crte != NULL) && 19371 tcp_bblogging_on(tp)) { 19372 rack_log_queue_level(tp, rack, len, tv, cts); 19373 } 19374 if (tcp_bblogging_on(rack->rc_tp)) { 19375 union tcp_log_stackspecific log; 19376 19377 memset(&log, 0, sizeof(log)); 19378 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19379 if (rack->rack_no_prr) 19380 log.u_bbr.flex1 = 0; 19381 else 19382 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19383 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19384 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19385 log.u_bbr.flex4 = max_val; 19386 /* Save off the early/late values */ 19387 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19388 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19389 log.u_bbr.bw_inuse = rack_get_bw(rack); 19390 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19391 log.u_bbr.flex8 = 0; 19392 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19393 log.u_bbr.flex7 = 44; 19394 log.u_bbr.pkts_out = tp->t_maxseg; 19395 log.u_bbr.timeStamp = cts; 19396 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19397 log.u_bbr.flex5 = log.u_bbr.inflight; 19398 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19399 log.u_bbr.delivered = rack->r_ctl.fsb.left_to_send; 19400 log.u_bbr.rttProp = 0; 19401 log.u_bbr.delRate = rack->r_must_retran; 19402 log.u_bbr.delRate <<= 1; 19403 log.u_bbr.pkt_epoch = line; 19404 /* For fast output no retrans so just inflight and how many mss we send */ 19405 log.u_bbr.flex5 = log.u_bbr.inflight; 19406 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19407 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19408 len, &log, false, NULL, __func__, __LINE__, tv); 19409 } else 19410 lgb = NULL; 19411 #ifdef INET6 19412 if (rack->r_is_v6) { 19413 error = ip6_output(m, inp->in6p_outputopts, 19414 &inp->inp_route6, 19415 0, NULL, NULL, inp); 19416 } 19417 #endif 19418 #if defined(INET) && defined(INET6) 19419 else 19420 #endif 19421 #ifdef INET 19422 { 19423 error = ip_output(m, NULL, 19424 &inp->inp_route, 19425 0, 0, inp); 19426 } 19427 #endif 19428 if (lgb) { 19429 lgb->tlb_errno = error; 19430 lgb = NULL; 19431 } 19432 if (error) { 19433 *send_err = error; 19434 m = NULL; 19435 goto failed; 19436 } else if (rack->rc_hw_nobuf) { 19437 rack->rc_hw_nobuf = 0; 19438 rack->r_ctl.rc_agg_delayed = 0; 19439 rack->r_early = 0; 19440 rack->r_late = 0; 19441 rack->r_ctl.rc_agg_early = 0; 19442 } 19443 if ((error == 0) && (rack->lt_bw_up == 0)) { 19444 /* Unlikely */ 19445 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 19446 rack->r_ctl.lt_seq = tp->snd_una; 19447 rack->lt_bw_up = 1; 19448 } else if ((error == 0) && 19449 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { 19450 /* 19451 * Need to record what we have since we are 19452 * approaching seq wrap. 19453 */ 19454 struct timeval tv; 19455 uint64_t tmark; 19456 19457 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 19458 rack->r_ctl.lt_seq = tp->snd_una; 19459 tmark = tcp_get_u64_usecs(&tv); 19460 if (tmark > rack->r_ctl.lt_timemark) { 19461 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 19462 rack->r_ctl.lt_timemark = tmark; 19463 } 19464 } 19465 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 19466 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 19467 if (tp->snd_una == tp->snd_max) { 19468 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19469 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19470 tp->t_acktime = ticks; 19471 } 19472 counter_u64_add(rack_total_bytes, len); 19473 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 19474 19475 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19476 *tot_len += len; 19477 if ((tp->t_flags & TF_GPUTINPROG) == 0) 19478 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 19479 tp->snd_max += len; 19480 tp->snd_nxt = tp->snd_max; 19481 if (rack->rc_new_rnd_needed) { 19482 rack_new_round_starts(tp, rack, tp->snd_max); 19483 } 19484 { 19485 int idx; 19486 19487 idx = (len / segsiz) + 3; 19488 if (idx >= TCP_MSS_ACCT_ATIMER) 19489 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19490 else 19491 counter_u64_add(rack_out_size[idx], 1); 19492 } 19493 if (len <= rack->r_ctl.fsb.left_to_send) 19494 rack->r_ctl.fsb.left_to_send -= len; 19495 else 19496 rack->r_ctl.fsb.left_to_send = 0; 19497 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19498 rack->r_fast_output = 0; 19499 rack->r_ctl.fsb.left_to_send = 0; 19500 /* At the end of fast_output scale up the sb */ 19501 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); 19502 rack_sndbuf_autoscale(rack); 19503 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); 19504 } 19505 if (tp->t_rtttime == 0) { 19506 tp->t_rtttime = ticks; 19507 tp->t_rtseq = startseq; 19508 KMOD_TCPSTAT_INC(tcps_segstimed); 19509 } 19510 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 19511 (max_val > len) && 19512 (*tot_len < rack->r_ctl.rc_pace_max_segs) && 19513 (tso == 0)) { 19514 max_val -= len; 19515 len = segsiz; 19516 th = rack->r_ctl.fsb.th; 19517 #ifdef TCP_ACCOUNTING 19518 cnt_thru++; 19519 #endif 19520 goto again; 19521 } 19522 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19523 counter_u64_add(rack_fto_send, 1); 19524 slot = rack_get_pacing_delay(rack, tp, *tot_len, NULL, segsiz, __LINE__); 19525 rack_start_hpts_timer(rack, tp, cts, slot, *tot_len, 0); 19526 #ifdef TCP_ACCOUNTING 19527 crtsc = get_cyclecount(); 19528 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19529 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19530 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19531 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((*tot_len + segsiz - 1) / segsiz); 19532 } 19533 sched_unpin(); 19534 #endif 19535 return (0); 19536 failed: 19537 if (m) 19538 m_free(m); 19539 rack->r_fast_output = 0; 19540 return (-1); 19541 } 19542 19543 static inline void 19544 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 19545 struct sockbuf *sb, 19546 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 19547 bool hw_tls, 19548 uint16_t flags) 19549 { 19550 rack->r_fast_output = 1; 19551 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19552 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19553 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 19554 rack->r_ctl.fsb.tcp_flags = flags; 19555 rack->r_ctl.fsb.left_to_send = orig_len - len; 19556 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 19557 /* Less than a full sized pace, lets not */ 19558 rack->r_fast_output = 0; 19559 return; 19560 } else { 19561 /* Round down to the nearest pace_max_seg */ 19562 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 19563 } 19564 if (hw_tls) 19565 rack->r_ctl.fsb.hw_tls = 1; 19566 else 19567 rack->r_ctl.fsb.hw_tls = 0; 19568 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19569 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19570 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19571 (tp->snd_max - tp->snd_una))); 19572 if (rack->r_ctl.fsb.left_to_send < segsiz) 19573 rack->r_fast_output = 0; 19574 else { 19575 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19576 rack->r_ctl.fsb.rfo_apply_push = 1; 19577 else 19578 rack->r_ctl.fsb.rfo_apply_push = 0; 19579 } 19580 } 19581 19582 static uint32_t 19583 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 19584 { 19585 uint64_t min_time; 19586 uint32_t maxlen; 19587 19588 min_time = (uint64_t)get_hpts_min_sleep_time(); 19589 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 19590 maxlen = roundup(maxlen, segsiz); 19591 return (maxlen); 19592 } 19593 19594 static struct rack_sendmap * 19595 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 19596 { 19597 struct rack_sendmap *rsm = NULL; 19598 int thresh; 19599 19600 restart: 19601 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 19602 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 19603 /* Nothing, strange turn off validity */ 19604 rack->r_collapse_point_valid = 0; 19605 return (NULL); 19606 } 19607 /* Can we send it yet? */ 19608 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 19609 /* 19610 * Receiver window has not grown enough for 19611 * the segment to be put on the wire. 19612 */ 19613 return (NULL); 19614 } 19615 if (rsm->r_flags & RACK_ACKED) { 19616 /* 19617 * It has been sacked, lets move to the 19618 * next one if possible. 19619 */ 19620 rack->r_ctl.last_collapse_point = rsm->r_end; 19621 /* Are we done? */ 19622 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 19623 rack->r_ctl.high_collapse_point)) { 19624 rack->r_collapse_point_valid = 0; 19625 return (NULL); 19626 } 19627 goto restart; 19628 } 19629 /* Now has it been long enough ? */ 19630 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); 19631 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 19632 rack_log_collapse(rack, rsm->r_start, 19633 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19634 thresh, __LINE__, 6, rsm->r_flags, rsm); 19635 return (rsm); 19636 } 19637 /* Not enough time */ 19638 rack_log_collapse(rack, rsm->r_start, 19639 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19640 thresh, __LINE__, 7, rsm->r_flags, rsm); 19641 return (NULL); 19642 } 19643 19644 static inline void 19645 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 19646 { 19647 if ((rack->full_size_rxt == 0) && 19648 (rack->shape_rxt_to_pacing_min == 0) && 19649 (*len >= segsiz)) { 19650 *len = segsiz; 19651 } else if (rack->shape_rxt_to_pacing_min && 19652 rack->gp_ready) { 19653 /* We use pacing min as shaping len req */ 19654 uint32_t maxlen; 19655 19656 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 19657 if (*len > maxlen) 19658 *len = maxlen; 19659 } else { 19660 /* 19661 * The else is full_size_rxt is on so send it all 19662 * note we do need to check this for exceeding 19663 * our max segment size due to the fact that 19664 * we do sometimes merge chunks together i.e. 19665 * we cannot just assume that we will never have 19666 * a chunk greater than pace_max_seg 19667 */ 19668 if (*len > pace_max_seg) 19669 *len = pace_max_seg; 19670 } 19671 } 19672 19673 static int 19674 rack_output(struct tcpcb *tp) 19675 { 19676 struct socket *so; 19677 uint32_t recwin; 19678 uint32_t sb_offset, s_moff = 0; 19679 int32_t len, error = 0; 19680 uint16_t flags; 19681 struct mbuf *m, *s_mb = NULL; 19682 struct mbuf *mb; 19683 uint32_t if_hw_tsomaxsegcount = 0; 19684 uint32_t if_hw_tsomaxsegsize; 19685 int32_t segsiz, minseg; 19686 long tot_len_this_send = 0; 19687 #ifdef INET 19688 struct ip *ip = NULL; 19689 #endif 19690 struct udphdr *udp = NULL; 19691 struct tcp_rack *rack; 19692 struct tcphdr *th; 19693 uint8_t pass = 0; 19694 uint8_t mark = 0; 19695 uint8_t check_done = 0; 19696 uint8_t wanted_cookie = 0; 19697 u_char opt[TCP_MAXOLEN]; 19698 unsigned ipoptlen, optlen, hdrlen, ulen=0; 19699 uint32_t rack_seq; 19700 19701 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 19702 unsigned ipsec_optlen = 0; 19703 19704 #endif 19705 int32_t idle, sendalot; 19706 uint32_t tot_idle; 19707 int32_t sub_from_prr = 0; 19708 volatile int32_t sack_rxmit; 19709 struct rack_sendmap *rsm = NULL; 19710 int32_t tso, mtu; 19711 struct tcpopt to; 19712 int32_t slot = 0; 19713 int32_t sup_rack = 0; 19714 uint32_t cts, ms_cts, delayed, early; 19715 uint32_t add_flag = RACK_SENT_SP; 19716 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 19717 uint8_t doing_tlp = 0; 19718 uint32_t cwnd_to_use, pace_max_seg; 19719 int32_t do_a_prefetch = 0; 19720 int32_t prefetch_rsm = 0; 19721 int32_t orig_len = 0; 19722 struct timeval tv; 19723 int32_t prefetch_so_done = 0; 19724 struct tcp_log_buffer *lgb; 19725 struct inpcb *inp = tptoinpcb(tp); 19726 struct sockbuf *sb; 19727 uint64_t ts_val = 0; 19728 #ifdef TCP_ACCOUNTING 19729 uint64_t crtsc; 19730 #endif 19731 #ifdef INET6 19732 struct ip6_hdr *ip6 = NULL; 19733 int32_t isipv6; 19734 #endif 19735 bool hpts_calling, hw_tls = false; 19736 19737 NET_EPOCH_ASSERT(); 19738 INP_WLOCK_ASSERT(inp); 19739 19740 /* setup and take the cache hits here */ 19741 rack = (struct tcp_rack *)tp->t_fb_ptr; 19742 #ifdef TCP_ACCOUNTING 19743 sched_pin(); 19744 ts_val = get_cyclecount(); 19745 #endif 19746 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 19747 tp->t_flags2 &= ~TF2_HPTS_CALLS; 19748 #ifdef TCP_OFFLOAD 19749 if (tp->t_flags & TF_TOE) { 19750 #ifdef TCP_ACCOUNTING 19751 sched_unpin(); 19752 #endif 19753 return (tcp_offload_output(tp)); 19754 } 19755 #endif 19756 if (rack->rack_deferred_inited == 0) { 19757 /* 19758 * If we are the connecting socket we will 19759 * hit rack_init() when no sequence numbers 19760 * are setup. This makes it so we must defer 19761 * some initialization. Call that now. 19762 */ 19763 rack_deferred_init(tp, rack); 19764 } 19765 /* 19766 * For TFO connections in SYN_RECEIVED, only allow the initial 19767 * SYN|ACK and those sent by the retransmit timer. 19768 */ 19769 if ((tp->t_flags & TF_FASTOPEN) && 19770 (tp->t_state == TCPS_SYN_RECEIVED) && 19771 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 19772 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 19773 #ifdef TCP_ACCOUNTING 19774 sched_unpin(); 19775 #endif 19776 return (0); 19777 } 19778 #ifdef INET6 19779 if (rack->r_state) { 19780 /* Use the cache line loaded if possible */ 19781 isipv6 = rack->r_is_v6; 19782 } else { 19783 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 19784 } 19785 #endif 19786 early = 0; 19787 cts = tcp_get_usecs(&tv); 19788 ms_cts = tcp_tv_to_mssectick(&tv); 19789 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 19790 tcp_in_hpts(rack->rc_tp)) { 19791 /* 19792 * We are on the hpts for some timer but not hptsi output. 19793 * Remove from the hpts unconditionally. 19794 */ 19795 rack_timer_cancel(tp, rack, cts, __LINE__); 19796 } 19797 /* Are we pacing and late? */ 19798 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19799 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 19800 /* We are delayed */ 19801 delayed = cts - rack->r_ctl.rc_last_output_to; 19802 } else { 19803 delayed = 0; 19804 } 19805 /* Do the timers, which may override the pacer */ 19806 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 19807 int retval; 19808 19809 retval = rack_process_timers(tp, rack, cts, hpts_calling, 19810 &doing_tlp); 19811 if (retval != 0) { 19812 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 19813 #ifdef TCP_ACCOUNTING 19814 sched_unpin(); 19815 #endif 19816 /* 19817 * If timers want tcp_drop(), then pass error out, 19818 * otherwise suppress it. 19819 */ 19820 return (retval < 0 ? retval : 0); 19821 } 19822 } 19823 if (rack->rc_in_persist) { 19824 if (tcp_in_hpts(rack->rc_tp) == 0) { 19825 /* Timer is not running */ 19826 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19827 } 19828 #ifdef TCP_ACCOUNTING 19829 sched_unpin(); 19830 #endif 19831 return (0); 19832 } 19833 if ((rack->rc_ack_required == 1) && 19834 (rack->r_timer_override == 0)){ 19835 /* A timeout occurred and no ack has arrived */ 19836 if (tcp_in_hpts(rack->rc_tp) == 0) { 19837 /* Timer is not running */ 19838 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19839 } 19840 #ifdef TCP_ACCOUNTING 19841 sched_unpin(); 19842 #endif 19843 return (0); 19844 } 19845 if ((rack->r_timer_override) || 19846 (rack->rc_ack_can_sendout_data) || 19847 (delayed) || 19848 (tp->t_state < TCPS_ESTABLISHED)) { 19849 rack->rc_ack_can_sendout_data = 0; 19850 if (tcp_in_hpts(rack->rc_tp)) 19851 tcp_hpts_remove(rack->rc_tp); 19852 } else if (tcp_in_hpts(rack->rc_tp)) { 19853 /* 19854 * On the hpts you can't pass even if ACKNOW is on, we will 19855 * when the hpts fires. 19856 */ 19857 #ifdef TCP_ACCOUNTING 19858 crtsc = get_cyclecount(); 19859 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19860 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 19861 tp->tcp_cnt_counters[SND_BLOCKED]++; 19862 } 19863 sched_unpin(); 19864 #endif 19865 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 19866 return (0); 19867 } 19868 /* Finish out both pacing early and late accounting */ 19869 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 19870 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 19871 early = rack->r_ctl.rc_last_output_to - cts; 19872 } else 19873 early = 0; 19874 if (delayed && (rack->rc_always_pace == 1)) { 19875 rack->r_ctl.rc_agg_delayed += delayed; 19876 rack->r_late = 1; 19877 } else if (early && (rack->rc_always_pace == 1)) { 19878 rack->r_ctl.rc_agg_early += early; 19879 rack->r_early = 1; 19880 } else if (rack->rc_always_pace == 0) { 19881 /* Non-paced we are not late */ 19882 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; 19883 rack->r_early = rack->r_late = 0; 19884 } 19885 /* Now that early/late accounting is done turn off the flag */ 19886 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 19887 rack->r_wanted_output = 0; 19888 rack->r_timer_override = 0; 19889 if ((tp->t_state != rack->r_state) && 19890 TCPS_HAVEESTABLISHED(tp->t_state)) { 19891 rack_set_state(tp, rack); 19892 } 19893 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19894 minseg = segsiz; 19895 if (rack->r_ctl.rc_pace_max_segs == 0) 19896 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 19897 else 19898 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 19899 if ((rack->r_fast_output) && 19900 (doing_tlp == 0) && 19901 (tp->rcv_numsacks == 0)) { 19902 int ret; 19903 19904 error = 0; 19905 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__); 19906 if (ret > 0) 19907 return(ret); 19908 else if (error) { 19909 inp = rack->rc_inp; 19910 so = inp->inp_socket; 19911 sb = &so->so_snd; 19912 goto nomore; 19913 } else { 19914 /* Return == 0, if there is more we can send tot_len wise fall through and send */ 19915 if (tot_len_this_send >= pace_max_seg) 19916 return (ret); 19917 #ifdef TCP_ACCOUNTING 19918 /* We need to re-pin since fast_output un-pined */ 19919 sched_pin(); 19920 ts_val = get_cyclecount(); 19921 #endif 19922 /* Fall back out so we can send any more that may bring us to pace_max_seg */ 19923 } 19924 } 19925 inp = rack->rc_inp; 19926 /* 19927 * For TFO connections in SYN_SENT or SYN_RECEIVED, 19928 * only allow the initial SYN or SYN|ACK and those sent 19929 * by the retransmit timer. 19930 */ 19931 if ((tp->t_flags & TF_FASTOPEN) && 19932 ((tp->t_state == TCPS_SYN_RECEIVED) || 19933 (tp->t_state == TCPS_SYN_SENT)) && 19934 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 19935 (tp->t_rxtshift == 0)) { /* not a retransmit */ 19936 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 19937 #ifdef TCP_ACCOUNTING 19938 sched_unpin(); 19939 #endif 19940 return (0); 19941 } 19942 /* 19943 * Determine length of data that should be transmitted, and flags 19944 * that will be used. If there is some data or critical controls 19945 * (SYN, RST) to send, then transmit; otherwise, investigate 19946 * further. 19947 */ 19948 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 19949 if (tp->t_idle_reduce) { 19950 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 19951 rack_cc_after_idle(rack, tp); 19952 } 19953 tp->t_flags &= ~TF_LASTIDLE; 19954 if (idle) { 19955 if (tp->t_flags & TF_MORETOCOME) { 19956 tp->t_flags |= TF_LASTIDLE; 19957 idle = 0; 19958 } 19959 } 19960 if ((tp->snd_una == tp->snd_max) && 19961 rack->r_ctl.rc_went_idle_time && 19962 (cts > rack->r_ctl.rc_went_idle_time)) { 19963 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); 19964 if (tot_idle > rack_min_probertt_hold) { 19965 /* Count as a probe rtt */ 19966 if (rack->in_probe_rtt == 0) { 19967 rack->r_ctl.rc_lower_rtt_us_cts = cts; 19968 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 19969 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 19970 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 19971 } else { 19972 rack_exit_probertt(rack, cts); 19973 } 19974 } 19975 } else 19976 tot_idle = 0; 19977 if (rack_use_fsb && 19978 (rack->r_ctl.fsb.tcp_ip_hdr) && 19979 (rack->r_fsb_inited == 0) && 19980 (rack->r_state != TCPS_CLOSED)) 19981 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 19982 if (rack->rc_sendvars_notset == 1) { 19983 rack->rc_sendvars_notset = 0; 19984 /* 19985 * Make sure any TCP timers (keep-alive) is not running. 19986 */ 19987 tcp_timer_stop(tp); 19988 } 19989 if ((rack->rack_no_prr == 1) && 19990 (rack->rc_always_pace == 0)) { 19991 /* 19992 * Sanity check before sending, if we have 19993 * no-pacing enabled and prr is turned off that 19994 * is a logistics error. Correct this by turnning 19995 * prr back on. A user *must* set some form of 19996 * pacing in order to turn PRR off. We do this 19997 * in the output path so that we can avoid socket 19998 * option ordering issues that would occur if we 19999 * tried to do it while setting rack_no_prr on. 20000 */ 20001 rack->rack_no_prr = 0; 20002 } 20003 if ((rack->pcm_enabled == 1) && 20004 (rack->pcm_needed == 0) && 20005 (tot_idle > 0)) { 20006 /* 20007 * We have been idle some micro seconds. We need 20008 * to factor this in to see if a PCM is needed. 20009 */ 20010 uint32_t rtts_idle, rnds; 20011 20012 if (tp->t_srtt) 20013 rtts_idle = tot_idle / tp->t_srtt; 20014 else 20015 rtts_idle = 0; 20016 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; 20017 rack->r_ctl.pcm_idle_rounds += rtts_idle; 20018 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { 20019 rack->pcm_needed = 1; 20020 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); 20021 } 20022 } 20023 again: 20024 sendalot = 0; 20025 cts = tcp_get_usecs(&tv); 20026 ms_cts = tcp_tv_to_mssectick(&tv); 20027 tso = 0; 20028 mtu = 0; 20029 if (TCPS_HAVEESTABLISHED(tp->t_state) && 20030 (rack->r_ctl.pcm_max_seg == 0)) { 20031 /* 20032 * We set in our first send so we know that the ctf_fixed_maxseg 20033 * has been fully set. If we do it in rack_init() we most likely 20034 * see 512 bytes so we end up at 5120, not desirable. 20035 */ 20036 rack->r_ctl.pcm_max_seg = rc_init_window(rack); 20037 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { 20038 /* 20039 * Assure our initial PCM probe is at least 10 MSS. 20040 */ 20041 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; 20042 } 20043 } 20044 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { 20045 uint32_t rw_avail, cwa; 20046 20047 if (tp->snd_wnd > ctf_outstanding(tp)) 20048 rw_avail = tp->snd_wnd - ctf_outstanding(tp); 20049 else 20050 rw_avail = 0; 20051 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) 20052 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 20053 else 20054 cwa = 0; 20055 if ((cwa >= rack->r_ctl.pcm_max_seg) && 20056 (rw_avail > rack->r_ctl.pcm_max_seg)) { 20057 /* Raise up the max seg for this trip through */ 20058 pace_max_seg = rack->r_ctl.pcm_max_seg; 20059 /* Disable any fast output */ 20060 rack->r_fast_output = 0; 20061 } 20062 if (rack_verbose_logging) { 20063 rack_log_pcm(rack, 4, 20064 cwa, rack->r_ctl.pcm_max_seg, rw_avail); 20065 } 20066 } 20067 sb_offset = tp->snd_max - tp->snd_una; 20068 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20069 flags = tcp_outflags[tp->t_state]; 20070 while (rack->rc_free_cnt < rack_free_cache) { 20071 rsm = rack_alloc(rack); 20072 if (rsm == NULL) { 20073 if (hpts_calling) 20074 /* Retry in a ms */ 20075 slot = (1 * HPTS_USEC_IN_MSEC); 20076 so = inp->inp_socket; 20077 sb = &so->so_snd; 20078 goto just_return_nolock; 20079 } 20080 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 20081 rack->rc_free_cnt++; 20082 rsm = NULL; 20083 } 20084 sack_rxmit = 0; 20085 len = 0; 20086 rsm = NULL; 20087 if (flags & TH_RST) { 20088 SOCK_SENDBUF_LOCK(inp->inp_socket); 20089 so = inp->inp_socket; 20090 sb = &so->so_snd; 20091 goto send; 20092 } 20093 if (rack->r_ctl.rc_resend) { 20094 /* Retransmit timer */ 20095 rsm = rack->r_ctl.rc_resend; 20096 rack->r_ctl.rc_resend = NULL; 20097 len = rsm->r_end - rsm->r_start; 20098 sack_rxmit = 1; 20099 sendalot = 0; 20100 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20101 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20102 __func__, __LINE__, 20103 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20104 sb_offset = rsm->r_start - tp->snd_una; 20105 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20106 } else if (rack->r_collapse_point_valid && 20107 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 20108 /* 20109 * If an RSM is returned then enough time has passed 20110 * for us to retransmit it. Move up the collapse point, 20111 * since this rsm has its chance to retransmit now. 20112 */ 20113 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 20114 rack->r_ctl.last_collapse_point = rsm->r_end; 20115 /* Are we done? */ 20116 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20117 rack->r_ctl.high_collapse_point)) 20118 rack->r_collapse_point_valid = 0; 20119 sack_rxmit = 1; 20120 /* We are not doing a TLP */ 20121 doing_tlp = 0; 20122 len = rsm->r_end - rsm->r_start; 20123 sb_offset = rsm->r_start - tp->snd_una; 20124 sendalot = 0; 20125 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20126 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 20127 /* We have a retransmit that takes precedence */ 20128 if ((!IN_FASTRECOVERY(tp->t_flags)) && 20129 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 20130 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 20131 /* Enter recovery if not induced by a time-out */ 20132 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 20133 } 20134 #ifdef INVARIANTS 20135 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 20136 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 20137 tp, rack, rsm, rsm->r_start, tp->snd_una); 20138 } 20139 #endif 20140 len = rsm->r_end - rsm->r_start; 20141 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20142 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20143 __func__, __LINE__, 20144 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20145 sb_offset = rsm->r_start - tp->snd_una; 20146 sendalot = 0; 20147 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20148 if (len > 0) { 20149 sack_rxmit = 1; 20150 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 20151 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 20152 min(len, segsiz)); 20153 } 20154 } else if (rack->r_ctl.rc_tlpsend) { 20155 /* Tail loss probe */ 20156 long cwin; 20157 long tlen; 20158 20159 /* 20160 * Check if we can do a TLP with a RACK'd packet 20161 * this can happen if we are not doing the rack 20162 * cheat and we skipped to a TLP and it 20163 * went off. 20164 */ 20165 rsm = rack->r_ctl.rc_tlpsend; 20166 /* We are doing a TLP make sure the flag is preent */ 20167 rsm->r_flags |= RACK_TLP; 20168 rack->r_ctl.rc_tlpsend = NULL; 20169 sack_rxmit = 1; 20170 tlen = rsm->r_end - rsm->r_start; 20171 if (tlen > segsiz) 20172 tlen = segsiz; 20173 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20174 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20175 __func__, __LINE__, 20176 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20177 sb_offset = rsm->r_start - tp->snd_una; 20178 cwin = min(tp->snd_wnd, tlen); 20179 len = cwin; 20180 } 20181 if (rack->r_must_retran && 20182 (doing_tlp == 0) && 20183 (SEQ_GT(tp->snd_max, tp->snd_una)) && 20184 (rsm == NULL)) { 20185 /* 20186 * There are two different ways that we 20187 * can get into this block: 20188 * a) This is a non-sack connection, we had a time-out 20189 * and thus r_must_retran was set and everything 20190 * left outstanding as been marked for retransmit. 20191 * b) The MTU of the path shrank, so that everything 20192 * was marked to be retransmitted with the smaller 20193 * mtu and r_must_retran was set. 20194 * 20195 * This means that we expect the sendmap (outstanding) 20196 * to all be marked must. We can use the tmap to 20197 * look at them. 20198 * 20199 */ 20200 int sendwin, flight; 20201 20202 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 20203 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 20204 if (flight >= sendwin) { 20205 /* 20206 * We can't send yet. 20207 */ 20208 so = inp->inp_socket; 20209 sb = &so->so_snd; 20210 goto just_return_nolock; 20211 } 20212 /* 20213 * This is the case a/b mentioned above. All 20214 * outstanding/not-acked should be marked. 20215 * We can use the tmap to find them. 20216 */ 20217 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 20218 if (rsm == NULL) { 20219 /* TSNH */ 20220 rack->r_must_retran = 0; 20221 rack->r_ctl.rc_out_at_rto = 0; 20222 so = inp->inp_socket; 20223 sb = &so->so_snd; 20224 goto just_return_nolock; 20225 } 20226 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 20227 /* 20228 * The first one does not have the flag, did we collapse 20229 * further up in our list? 20230 */ 20231 rack->r_must_retran = 0; 20232 rack->r_ctl.rc_out_at_rto = 0; 20233 rsm = NULL; 20234 sack_rxmit = 0; 20235 } else { 20236 sack_rxmit = 1; 20237 len = rsm->r_end - rsm->r_start; 20238 sb_offset = rsm->r_start - tp->snd_una; 20239 sendalot = 0; 20240 if ((rack->full_size_rxt == 0) && 20241 (rack->shape_rxt_to_pacing_min == 0) && 20242 (len >= segsiz)) 20243 len = segsiz; 20244 else if (rack->shape_rxt_to_pacing_min && 20245 rack->gp_ready) { 20246 /* We use pacing min as shaping len req */ 20247 uint32_t maxlen; 20248 20249 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20250 if (len > maxlen) 20251 len = maxlen; 20252 } 20253 /* 20254 * Delay removing the flag RACK_MUST_RXT so 20255 * that the fastpath for retransmit will 20256 * work with this rsm. 20257 */ 20258 } 20259 } 20260 /* 20261 * Enforce a connection sendmap count limit if set 20262 * as long as we are not retransmiting. 20263 */ 20264 if ((rsm == NULL) && 20265 (V_tcp_map_entries_limit > 0) && 20266 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 20267 counter_u64_add(rack_to_alloc_limited, 1); 20268 if (!rack->alloc_limit_reported) { 20269 rack->alloc_limit_reported = 1; 20270 counter_u64_add(rack_alloc_limited_conns, 1); 20271 } 20272 so = inp->inp_socket; 20273 sb = &so->so_snd; 20274 goto just_return_nolock; 20275 } 20276 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 20277 /* we are retransmitting the fin */ 20278 len--; 20279 if (len) { 20280 /* 20281 * When retransmitting data do *not* include the 20282 * FIN. This could happen from a TLP probe. 20283 */ 20284 flags &= ~TH_FIN; 20285 } 20286 } 20287 if (rsm && rack->r_fsb_inited && 20288 rack_use_rsm_rfo && 20289 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 20290 int ret; 20291 20292 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 20293 if (ret == 0) 20294 return (0); 20295 } 20296 so = inp->inp_socket; 20297 sb = &so->so_snd; 20298 if (do_a_prefetch == 0) { 20299 kern_prefetch(sb, &do_a_prefetch); 20300 do_a_prefetch = 1; 20301 } 20302 #ifdef NETFLIX_SHARED_CWND 20303 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 20304 rack->rack_enable_scwnd) { 20305 /* We are doing cwnd sharing */ 20306 if (rack->gp_ready && 20307 (rack->rack_attempted_scwnd == 0) && 20308 (rack->r_ctl.rc_scw == NULL) && 20309 tp->t_lib) { 20310 /* The pcbid is in, lets make an attempt */ 20311 counter_u64_add(rack_try_scwnd, 1); 20312 rack->rack_attempted_scwnd = 1; 20313 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 20314 &rack->r_ctl.rc_scw_index, 20315 segsiz); 20316 } 20317 if (rack->r_ctl.rc_scw && 20318 (rack->rack_scwnd_is_idle == 1) && 20319 sbavail(&so->so_snd)) { 20320 /* we are no longer out of data */ 20321 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 20322 rack->rack_scwnd_is_idle = 0; 20323 } 20324 if (rack->r_ctl.rc_scw) { 20325 /* First lets update and get the cwnd */ 20326 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 20327 rack->r_ctl.rc_scw_index, 20328 tp->snd_cwnd, tp->snd_wnd, segsiz); 20329 } 20330 } 20331 #endif 20332 /* 20333 * Get standard flags, and add SYN or FIN if requested by 'hidden' 20334 * state flags. 20335 */ 20336 if (tp->t_flags & TF_NEEDFIN) 20337 flags |= TH_FIN; 20338 if (tp->t_flags & TF_NEEDSYN) 20339 flags |= TH_SYN; 20340 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 20341 void *end_rsm; 20342 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 20343 if (end_rsm) 20344 kern_prefetch(end_rsm, &prefetch_rsm); 20345 prefetch_rsm = 1; 20346 } 20347 SOCK_SENDBUF_LOCK(so); 20348 if ((sack_rxmit == 0) && 20349 (TCPS_HAVEESTABLISHED(tp->t_state) || 20350 (tp->t_flags & TF_FASTOPEN))) { 20351 /* 20352 * We are not retransmitting (sack_rxmit is 0) so we 20353 * are sending new data. This is always based on snd_max. 20354 * Now in theory snd_max may be equal to snd_una, if so 20355 * then nothing is outstanding and the offset would be 0. 20356 */ 20357 uint32_t avail; 20358 20359 avail = sbavail(sb); 20360 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) 20361 sb_offset = tp->snd_max - tp->snd_una; 20362 else 20363 sb_offset = 0; 20364 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 20365 if (rack->r_ctl.rc_tlp_new_data) { 20366 /* TLP is forcing out new data */ 20367 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 20368 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 20369 } 20370 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 20371 if (tp->snd_wnd > sb_offset) 20372 len = tp->snd_wnd - sb_offset; 20373 else 20374 len = 0; 20375 } else { 20376 len = rack->r_ctl.rc_tlp_new_data; 20377 } 20378 rack->r_ctl.rc_tlp_new_data = 0; 20379 } else { 20380 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 20381 } 20382 if ((rack->r_ctl.crte == NULL) && 20383 IN_FASTRECOVERY(tp->t_flags) && 20384 (rack->full_size_rxt == 0) && 20385 (rack->shape_rxt_to_pacing_min == 0) && 20386 (len > segsiz)) { 20387 /* 20388 * For prr=off, we need to send only 1 MSS 20389 * at a time. We do this because another sack could 20390 * be arriving that causes us to send retransmits and 20391 * we don't want to be on a long pace due to a larger send 20392 * that keeps us from sending out the retransmit. 20393 */ 20394 len = segsiz; 20395 } else if (rack->shape_rxt_to_pacing_min && 20396 rack->gp_ready) { 20397 /* We use pacing min as shaping len req */ 20398 uint32_t maxlen; 20399 20400 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20401 if (len > maxlen) 20402 len = maxlen; 20403 }/* The else is full_size_rxt is on so send it all */ 20404 } else { 20405 uint32_t outstanding; 20406 /* 20407 * We are inside of a Fast recovery episode, this 20408 * is caused by a SACK or 3 dup acks. At this point 20409 * we have sent all the retransmissions and we rely 20410 * on PRR to dictate what we will send in the form of 20411 * new data. 20412 */ 20413 20414 outstanding = tp->snd_max - tp->snd_una; 20415 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 20416 if (tp->snd_wnd > outstanding) { 20417 len = tp->snd_wnd - outstanding; 20418 /* Check to see if we have the data */ 20419 if ((sb_offset + len) > avail) { 20420 /* It does not all fit */ 20421 if (avail > sb_offset) 20422 len = avail - sb_offset; 20423 else 20424 len = 0; 20425 } 20426 } else { 20427 len = 0; 20428 } 20429 } else if (avail > sb_offset) { 20430 len = avail - sb_offset; 20431 } else { 20432 len = 0; 20433 } 20434 if (len > 0) { 20435 if (len > rack->r_ctl.rc_prr_sndcnt) { 20436 len = rack->r_ctl.rc_prr_sndcnt; 20437 } 20438 if (len > 0) { 20439 sub_from_prr = 1; 20440 } 20441 } 20442 if (len > segsiz) { 20443 /* 20444 * We should never send more than a MSS when 20445 * retransmitting or sending new data in prr 20446 * mode unless the override flag is on. Most 20447 * likely the PRR algorithm is not going to 20448 * let us send a lot as well :-) 20449 */ 20450 if (rack->r_ctl.rc_prr_sendalot == 0) { 20451 len = segsiz; 20452 } 20453 } else if (len < segsiz) { 20454 /* 20455 * Do we send any? The idea here is if the 20456 * send empty's the socket buffer we want to 20457 * do it. However if not then lets just wait 20458 * for our prr_sndcnt to get bigger. 20459 */ 20460 long leftinsb; 20461 20462 leftinsb = sbavail(sb) - sb_offset; 20463 if (leftinsb > len) { 20464 /* This send does not empty the sb */ 20465 len = 0; 20466 } 20467 } 20468 } 20469 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 20470 /* 20471 * If you have not established 20472 * and are not doing FAST OPEN 20473 * no data please. 20474 */ 20475 if ((sack_rxmit == 0) && 20476 !(tp->t_flags & TF_FASTOPEN)) { 20477 len = 0; 20478 sb_offset = 0; 20479 } 20480 } 20481 if (prefetch_so_done == 0) { 20482 kern_prefetch(so, &prefetch_so_done); 20483 prefetch_so_done = 1; 20484 } 20485 orig_len = len; 20486 /* 20487 * Lop off SYN bit if it has already been sent. However, if this is 20488 * SYN-SENT state and if segment contains data and if we don't know 20489 * that foreign host supports TAO, suppress sending segment. 20490 */ 20491 if ((flags & TH_SYN) && 20492 SEQ_GT(tp->snd_max, tp->snd_una) && 20493 ((sack_rxmit == 0) && 20494 (tp->t_rxtshift == 0))) { 20495 /* 20496 * When sending additional segments following a TFO SYN|ACK, 20497 * do not include the SYN bit. 20498 */ 20499 if ((tp->t_flags & TF_FASTOPEN) && 20500 (tp->t_state == TCPS_SYN_RECEIVED)) 20501 flags &= ~TH_SYN; 20502 } 20503 /* 20504 * Be careful not to send data and/or FIN on SYN segments. This 20505 * measure is needed to prevent interoperability problems with not 20506 * fully conformant TCP implementations. 20507 */ 20508 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 20509 len = 0; 20510 flags &= ~TH_FIN; 20511 } 20512 /* 20513 * On TFO sockets, ensure no data is sent in the following cases: 20514 * 20515 * - When retransmitting SYN|ACK on a passively-created socket 20516 * 20517 * - When retransmitting SYN on an actively created socket 20518 * 20519 * - When sending a zero-length cookie (cookie request) on an 20520 * actively created socket 20521 * 20522 * - When the socket is in the CLOSED state (RST is being sent) 20523 */ 20524 if ((tp->t_flags & TF_FASTOPEN) && 20525 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 20526 ((tp->t_state == TCPS_SYN_SENT) && 20527 (tp->t_tfo_client_cookie_len == 0)) || 20528 (flags & TH_RST))) { 20529 sack_rxmit = 0; 20530 len = 0; 20531 } 20532 /* Without fast-open there should never be data sent on a SYN */ 20533 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { 20534 len = 0; 20535 } 20536 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 20537 /* We only send 1 MSS if we have a DSACK block */ 20538 add_flag |= RACK_SENT_W_DSACK; 20539 len = segsiz; 20540 } 20541 if (len <= 0) { 20542 /* 20543 * We have nothing to send, or the window shrank, or 20544 * is closed, do we need to go into persists? 20545 */ 20546 len = 0; 20547 if ((tp->snd_wnd == 0) && 20548 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20549 (tp->snd_una == tp->snd_max) && 20550 (sb_offset < (int)sbavail(sb))) { 20551 rack_enter_persist(tp, rack, cts, tp->snd_una); 20552 } 20553 } else if ((rsm == NULL) && 20554 (doing_tlp == 0) && 20555 (len < pace_max_seg)) { 20556 /* 20557 * We are not sending a maximum sized segment for 20558 * some reason. Should we not send anything (think 20559 * sws or persists)? 20560 */ 20561 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20562 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20563 (len < minseg) && 20564 (len < (int)(sbavail(sb) - sb_offset))) { 20565 /* 20566 * Here the rwnd is less than 20567 * the minimum pacing size, this is not a retransmit, 20568 * we are established and 20569 * the send is not the last in the socket buffer 20570 * we send nothing, and we may enter persists 20571 * if nothing is outstanding. 20572 */ 20573 len = 0; 20574 if (tp->snd_max == tp->snd_una) { 20575 /* 20576 * Nothing out we can 20577 * go into persists. 20578 */ 20579 rack_enter_persist(tp, rack, cts, tp->snd_una); 20580 } 20581 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 20582 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20583 (len < (int)(sbavail(sb) - sb_offset)) && 20584 (len < minseg)) { 20585 /* 20586 * Here we are not retransmitting, and 20587 * the cwnd is not so small that we could 20588 * not send at least a min size (rxt timer 20589 * not having gone off), We have 2 segments or 20590 * more already in flight, its not the tail end 20591 * of the socket buffer and the cwnd is blocking 20592 * us from sending out a minimum pacing segment size. 20593 * Lets not send anything. 20594 */ 20595 len = 0; 20596 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 20597 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20598 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20599 (len < (int)(sbavail(sb) - sb_offset)) && 20600 (TCPS_HAVEESTABLISHED(tp->t_state))) { 20601 /* 20602 * Here we have a send window but we have 20603 * filled it up and we can't send another pacing segment. 20604 * We also have in flight more than 2 segments 20605 * and we are not completing the sb i.e. we allow 20606 * the last bytes of the sb to go out even if 20607 * its not a full pacing segment. 20608 */ 20609 len = 0; 20610 } else if ((rack->r_ctl.crte != NULL) && 20611 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 20612 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 20613 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 20614 (len < (int)(sbavail(sb) - sb_offset))) { 20615 /* 20616 * Here we are doing hardware pacing, this is not a TLP, 20617 * we are not sending a pace max segment size, there is rwnd 20618 * room to send at least N pace_max_seg, the cwnd is greater 20619 * than or equal to a full pacing segments plus 4 mss and we have 2 or 20620 * more segments in flight and its not the tail of the socket buffer. 20621 * 20622 * We don't want to send instead we need to get more ack's in to 20623 * allow us to send a full pacing segment. Normally, if we are pacing 20624 * about the right speed, we should have finished our pacing 20625 * send as most of the acks have come back if we are at the 20626 * right rate. This is a bit fuzzy since return path delay 20627 * can delay the acks, which is why we want to make sure we 20628 * have cwnd space to have a bit more than a max pace segments in flight. 20629 * 20630 * If we have not gotten our acks back we are pacing at too high a 20631 * rate delaying will not hurt and will bring our GP estimate down by 20632 * injecting the delay. If we don't do this we will send 20633 * 2 MSS out in response to the acks being clocked in which 20634 * defeats the point of hw-pacing (i.e. to help us get 20635 * larger TSO's out). 20636 */ 20637 len = 0; 20638 } 20639 20640 } 20641 /* len will be >= 0 after this point. */ 20642 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 20643 rack_sndbuf_autoscale(rack); 20644 /* 20645 * Decide if we can use TCP Segmentation Offloading (if supported by 20646 * hardware). 20647 * 20648 * TSO may only be used if we are in a pure bulk sending state. The 20649 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 20650 * options prevent using TSO. With TSO the TCP header is the same 20651 * (except for the sequence number) for all generated packets. This 20652 * makes it impossible to transmit any options which vary per 20653 * generated segment or packet. 20654 * 20655 * IPv4 handling has a clear separation of ip options and ip header 20656 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 20657 * the right thing below to provide length of just ip options and thus 20658 * checking for ipoptlen is enough to decide if ip options are present. 20659 */ 20660 ipoptlen = 0; 20661 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20662 /* 20663 * Pre-calculate here as we save another lookup into the darknesses 20664 * of IPsec that way and can actually decide if TSO is ok. 20665 */ 20666 #ifdef INET6 20667 if (isipv6 && IPSEC_ENABLED(ipv6)) 20668 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 20669 #ifdef INET 20670 else 20671 #endif 20672 #endif /* INET6 */ 20673 #ifdef INET 20674 if (IPSEC_ENABLED(ipv4)) 20675 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 20676 #endif /* INET */ 20677 #endif 20678 20679 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20680 ipoptlen += ipsec_optlen; 20681 #endif 20682 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 20683 (tp->t_port == 0) && 20684 ((tp->t_flags & TF_SIGNATURE) == 0) && 20685 sack_rxmit == 0 && 20686 ipoptlen == 0) 20687 tso = 1; 20688 { 20689 uint32_t outstanding __unused; 20690 20691 outstanding = tp->snd_max - tp->snd_una; 20692 if (tp->t_flags & TF_SENTFIN) { 20693 /* 20694 * If we sent a fin, snd_max is 1 higher than 20695 * snd_una 20696 */ 20697 outstanding--; 20698 } 20699 if (sack_rxmit) { 20700 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 20701 flags &= ~TH_FIN; 20702 } 20703 } 20704 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 20705 (long)TCP_MAXWIN << tp->rcv_scale); 20706 20707 /* 20708 * Sender silly window avoidance. We transmit under the following 20709 * conditions when len is non-zero: 20710 * 20711 * - We have a full segment (or more with TSO) - This is the last 20712 * buffer in a write()/send() and we are either idle or running 20713 * NODELAY - we've timed out (e.g. persist timer) - we have more 20714 * then 1/2 the maximum send window's worth of data (receiver may be 20715 * limited the window size) - we need to retransmit 20716 */ 20717 if (len) { 20718 if (len >= segsiz) { 20719 goto send; 20720 } 20721 /* 20722 * NOTE! on localhost connections an 'ack' from the remote 20723 * end may occur synchronously with the output and cause us 20724 * to flush a buffer queued with moretocome. XXX 20725 * 20726 */ 20727 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 20728 (idle || (tp->t_flags & TF_NODELAY)) && 20729 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20730 (tp->t_flags & TF_NOPUSH) == 0) { 20731 pass = 2; 20732 goto send; 20733 } 20734 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 20735 pass = 22; 20736 goto send; 20737 } 20738 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 20739 pass = 4; 20740 goto send; 20741 } 20742 if (sack_rxmit) { 20743 pass = 6; 20744 goto send; 20745 } 20746 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 20747 (ctf_outstanding(tp) < (segsiz * 2))) { 20748 /* 20749 * We have less than two MSS outstanding (delayed ack) 20750 * and our rwnd will not let us send a full sized 20751 * MSS. Lets go ahead and let this small segment 20752 * out because we want to try to have at least two 20753 * packets inflight to not be caught by delayed ack. 20754 */ 20755 pass = 12; 20756 goto send; 20757 } 20758 } 20759 /* 20760 * Sending of standalone window updates. 20761 * 20762 * Window updates are important when we close our window due to a 20763 * full socket buffer and are opening it again after the application 20764 * reads data from it. Once the window has opened again and the 20765 * remote end starts to send again the ACK clock takes over and 20766 * provides the most current window information. 20767 * 20768 * We must avoid the silly window syndrome whereas every read from 20769 * the receive buffer, no matter how small, causes a window update 20770 * to be sent. We also should avoid sending a flurry of window 20771 * updates when the socket buffer had queued a lot of data and the 20772 * application is doing small reads. 20773 * 20774 * Prevent a flurry of pointless window updates by only sending an 20775 * update when we can increase the advertized window by more than 20776 * 1/4th of the socket buffer capacity. When the buffer is getting 20777 * full or is very small be more aggressive and send an update 20778 * whenever we can increase by two mss sized segments. In all other 20779 * situations the ACK's to new incoming data will carry further 20780 * window increases. 20781 * 20782 * Don't send an independent window update if a delayed ACK is 20783 * pending (it will get piggy-backed on it) or the remote side 20784 * already has done a half-close and won't send more data. Skip 20785 * this if the connection is in T/TCP half-open state. 20786 */ 20787 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 20788 !(tp->t_flags & TF_DELACK) && 20789 !TCPS_HAVERCVDFIN(tp->t_state)) { 20790 /* 20791 * "adv" is the amount we could increase the window, taking 20792 * into account that we are limited by TCP_MAXWIN << 20793 * tp->rcv_scale. 20794 */ 20795 int32_t adv; 20796 int oldwin; 20797 20798 adv = recwin; 20799 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 20800 oldwin = (tp->rcv_adv - tp->rcv_nxt); 20801 if (adv > oldwin) 20802 adv -= oldwin; 20803 else { 20804 /* We can't increase the window */ 20805 adv = 0; 20806 } 20807 } else 20808 oldwin = 0; 20809 20810 /* 20811 * If the new window size ends up being the same as or less 20812 * than the old size when it is scaled, then don't force 20813 * a window update. 20814 */ 20815 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 20816 goto dontupdate; 20817 20818 if (adv >= (int32_t)(2 * segsiz) && 20819 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 20820 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 20821 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 20822 pass = 7; 20823 goto send; 20824 } 20825 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 20826 pass = 23; 20827 goto send; 20828 } 20829 } 20830 dontupdate: 20831 20832 /* 20833 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 20834 * is also a catch-all for the retransmit timer timeout case. 20835 */ 20836 if (tp->t_flags & TF_ACKNOW) { 20837 pass = 8; 20838 goto send; 20839 } 20840 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 20841 pass = 9; 20842 goto send; 20843 } 20844 /* 20845 * If our state indicates that FIN should be sent and we have not 20846 * yet done so, then we need to send. 20847 */ 20848 if ((flags & TH_FIN) && 20849 (tp->snd_max == tp->snd_una)) { 20850 pass = 11; 20851 goto send; 20852 } 20853 /* 20854 * No reason to send a segment, just return. 20855 */ 20856 just_return: 20857 SOCK_SENDBUF_UNLOCK(so); 20858 just_return_nolock: 20859 { 20860 int app_limited = CTF_JR_SENT_DATA; 20861 20862 if ((tp->t_flags & TF_FASTOPEN) == 0 && 20863 (flags & TH_FIN) && 20864 (len == 0) && 20865 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 20866 ((tp->snd_max - tp->snd_una) <= segsiz)) { 20867 /* 20868 * Ok less than or right at a MSS is 20869 * outstanding. The original FreeBSD stack would 20870 * have sent a FIN, which can speed things up for 20871 * a transactional application doing a MSG_WAITALL. 20872 * To speed things up since we do *not* send a FIN 20873 * if data is outstanding, we send a "challenge ack". 20874 * The idea behind that is instead of having to have 20875 * the peer wait for the delayed-ack timer to run off 20876 * we send an ack that makes the peer send us an ack. 20877 */ 20878 rack_send_ack_challange(rack); 20879 } 20880 if (tot_len_this_send > 0) { 20881 rack->r_ctl.fsb.recwin = recwin; 20882 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__); 20883 if ((error == 0) && 20884 rack_use_rfo && 20885 ((flags & (TH_SYN|TH_FIN)) == 0) && 20886 (ipoptlen == 0) && 20887 rack->r_fsb_inited && 20888 TCPS_HAVEESTABLISHED(tp->t_state) && 20889 ((IN_RECOVERY(tp->t_flags)) == 0) && 20890 (doing_tlp == 0) && 20891 (rack->r_must_retran == 0) && 20892 ((tp->t_flags & TF_NEEDFIN) == 0) && 20893 (len > 0) && (orig_len > 0) && 20894 (orig_len > len) && 20895 ((orig_len - len) >= segsiz) && 20896 ((optlen == 0) || 20897 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 20898 /* We can send at least one more MSS using our fsb */ 20899 rack_setup_fast_output(tp, rack, sb, len, orig_len, 20900 segsiz, pace_max_seg, hw_tls, flags); 20901 } else 20902 rack->r_fast_output = 0; 20903 rack_log_fsb(rack, tp, so, flags, 20904 ipoptlen, orig_len, len, 0, 20905 1, optlen, __LINE__, 1); 20906 /* Assure when we leave that snd_nxt will point to top */ 20907 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 20908 tp->snd_nxt = tp->snd_max; 20909 } else { 20910 int end_window = 0; 20911 uint32_t seq = tp->gput_ack; 20912 20913 rsm = tqhash_max(rack->r_ctl.tqh); 20914 if (rsm) { 20915 /* 20916 * Mark the last sent that we just-returned (hinting 20917 * that delayed ack may play a role in any rtt measurement). 20918 */ 20919 rsm->r_just_ret = 1; 20920 } 20921 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 20922 rack->r_ctl.rc_agg_delayed = 0; 20923 rack->r_early = 0; 20924 rack->r_late = 0; 20925 rack->r_ctl.rc_agg_early = 0; 20926 if ((ctf_outstanding(tp) + 20927 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 20928 minseg)) >= tp->snd_wnd) { 20929 /* We are limited by the rwnd */ 20930 app_limited = CTF_JR_RWND_LIMITED; 20931 if (IN_FASTRECOVERY(tp->t_flags)) 20932 rack->r_ctl.rc_prr_sndcnt = 0; 20933 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 20934 /* We are limited by whats available -- app limited */ 20935 app_limited = CTF_JR_APP_LIMITED; 20936 if (IN_FASTRECOVERY(tp->t_flags)) 20937 rack->r_ctl.rc_prr_sndcnt = 0; 20938 } else if ((idle == 0) && 20939 ((tp->t_flags & TF_NODELAY) == 0) && 20940 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20941 (len < segsiz)) { 20942 /* 20943 * No delay is not on and the 20944 * user is sending less than 1MSS. This 20945 * brings out SWS avoidance so we 20946 * don't send. Another app-limited case. 20947 */ 20948 app_limited = CTF_JR_APP_LIMITED; 20949 } else if (tp->t_flags & TF_NOPUSH) { 20950 /* 20951 * The user has requested no push of 20952 * the last segment and we are 20953 * at the last segment. Another app 20954 * limited case. 20955 */ 20956 app_limited = CTF_JR_APP_LIMITED; 20957 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 20958 /* Its the cwnd */ 20959 app_limited = CTF_JR_CWND_LIMITED; 20960 } else if (IN_FASTRECOVERY(tp->t_flags) && 20961 (rack->rack_no_prr == 0) && 20962 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 20963 app_limited = CTF_JR_PRR; 20964 } else { 20965 /* Now why here are we not sending? */ 20966 #ifdef NOW 20967 #ifdef INVARIANTS 20968 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 20969 #endif 20970 #endif 20971 app_limited = CTF_JR_ASSESSING; 20972 } 20973 /* 20974 * App limited in some fashion, for our pacing GP 20975 * measurements we don't want any gap (even cwnd). 20976 * Close down the measurement window. 20977 */ 20978 if (rack_cwnd_block_ends_measure && 20979 ((app_limited == CTF_JR_CWND_LIMITED) || 20980 (app_limited == CTF_JR_PRR))) { 20981 /* 20982 * The reason we are not sending is 20983 * the cwnd (or prr). We have been configured 20984 * to end the measurement window in 20985 * this case. 20986 */ 20987 end_window = 1; 20988 } else if (rack_rwnd_block_ends_measure && 20989 (app_limited == CTF_JR_RWND_LIMITED)) { 20990 /* 20991 * We are rwnd limited and have been 20992 * configured to end the measurement 20993 * window in this case. 20994 */ 20995 end_window = 1; 20996 } else if (app_limited == CTF_JR_APP_LIMITED) { 20997 /* 20998 * A true application limited period, we have 20999 * ran out of data. 21000 */ 21001 end_window = 1; 21002 } else if (app_limited == CTF_JR_ASSESSING) { 21003 /* 21004 * In the assessing case we hit the end of 21005 * the if/else and had no known reason 21006 * This will panic us under invariants.. 21007 * 21008 * If we get this out in logs we need to 21009 * investagate which reason we missed. 21010 */ 21011 end_window = 1; 21012 } 21013 if (end_window) { 21014 uint8_t log = 0; 21015 21016 /* Adjust the Gput measurement */ 21017 if ((tp->t_flags & TF_GPUTINPROG) && 21018 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21019 tp->gput_ack = tp->snd_max; 21020 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21021 /* 21022 * There is not enough to measure. 21023 */ 21024 tp->t_flags &= ~TF_GPUTINPROG; 21025 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 21026 rack->r_ctl.rc_gp_srtt /*flex1*/, 21027 tp->gput_seq, 21028 0, 0, 18, __LINE__, NULL, 0); 21029 } else 21030 log = 1; 21031 } 21032 /* Mark the last packet as app limited */ 21033 rsm = tqhash_max(rack->r_ctl.tqh); 21034 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 21035 if (rack->r_ctl.rc_app_limited_cnt == 0) 21036 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 21037 else { 21038 /* 21039 * Go out to the end app limited and mark 21040 * this new one as next and move the end_appl up 21041 * to this guy. 21042 */ 21043 if (rack->r_ctl.rc_end_appl) 21044 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 21045 rack->r_ctl.rc_end_appl = rsm; 21046 } 21047 rsm->r_flags |= RACK_APP_LIMITED; 21048 rack->r_ctl.rc_app_limited_cnt++; 21049 } 21050 if (log) 21051 rack_log_pacing_delay_calc(rack, 21052 rack->r_ctl.rc_app_limited_cnt, seq, 21053 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 21054 } 21055 } 21056 /* Check if we need to go into persists or not */ 21057 if ((tp->snd_max == tp->snd_una) && 21058 TCPS_HAVEESTABLISHED(tp->t_state) && 21059 sbavail(sb) && 21060 (sbavail(sb) > tp->snd_wnd) && 21061 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 21062 /* Yes lets make sure to move to persist before timer-start */ 21063 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 21064 } 21065 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 21066 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 21067 } 21068 #ifdef NETFLIX_SHARED_CWND 21069 if ((sbavail(sb) == 0) && 21070 rack->r_ctl.rc_scw) { 21071 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21072 rack->rack_scwnd_is_idle = 1; 21073 } 21074 #endif 21075 #ifdef TCP_ACCOUNTING 21076 if (tot_len_this_send > 0) { 21077 crtsc = get_cyclecount(); 21078 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21079 tp->tcp_cnt_counters[SND_OUT_DATA]++; 21080 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21081 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 21082 } 21083 } else { 21084 crtsc = get_cyclecount(); 21085 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21086 tp->tcp_cnt_counters[SND_LIMITED]++; 21087 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 21088 } 21089 } 21090 sched_unpin(); 21091 #endif 21092 return (0); 21093 21094 send: 21095 if ((rack->r_ctl.crte != NULL) && 21096 (rsm == NULL) && 21097 ((rack->rc_hw_nobuf == 1) || 21098 (rack_hw_check_queue && (check_done == 0)))) { 21099 /* 21100 * We only want to do this once with the hw_check_queue, 21101 * for the enobuf case we would only do it once if 21102 * we come around to again, the flag will be clear. 21103 */ 21104 check_done = 1; 21105 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 21106 if (slot) { 21107 rack->r_ctl.rc_agg_delayed = 0; 21108 rack->r_ctl.rc_agg_early = 0; 21109 rack->r_early = 0; 21110 rack->r_late = 0; 21111 SOCK_SENDBUF_UNLOCK(so); 21112 goto skip_all_send; 21113 } 21114 } 21115 if (rsm || sack_rxmit) 21116 counter_u64_add(rack_nfto_resend, 1); 21117 else 21118 counter_u64_add(rack_non_fto_send, 1); 21119 if ((flags & TH_FIN) && 21120 sbavail(sb)) { 21121 /* 21122 * We do not transmit a FIN 21123 * with data outstanding. We 21124 * need to make it so all data 21125 * is acked first. 21126 */ 21127 flags &= ~TH_FIN; 21128 if (TCPS_HAVEESTABLISHED(tp->t_state) && 21129 (sbused(sb) == (tp->snd_max - tp->snd_una)) && 21130 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21131 /* 21132 * Ok less than or right at a MSS is 21133 * outstanding. The original FreeBSD stack would 21134 * have sent a FIN, which can speed things up for 21135 * a transactional application doing a MSG_WAITALL. 21136 * To speed things up since we do *not* send a FIN 21137 * if data is outstanding, we send a "challenge ack". 21138 * The idea behind that is instead of having to have 21139 * the peer wait for the delayed-ack timer to run off 21140 * we send an ack that makes the peer send us an ack. 21141 */ 21142 rack_send_ack_challange(rack); 21143 } 21144 } 21145 /* Enforce stack imposed max seg size if we have one */ 21146 if (pace_max_seg && 21147 (len > pace_max_seg)) { 21148 mark = 1; 21149 len = pace_max_seg; 21150 } 21151 if ((rsm == NULL) && 21152 (rack->pcm_in_progress == 0) && 21153 (rack->r_ctl.pcm_max_seg > 0) && 21154 (len >= rack->r_ctl.pcm_max_seg)) { 21155 /* It is large enough for a measurement */ 21156 add_flag |= RACK_IS_PCM; 21157 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); 21158 } else if (rack_verbose_logging) { 21159 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); 21160 } 21161 21162 SOCKBUF_LOCK_ASSERT(sb); 21163 if (len > 0) { 21164 if (len >= segsiz) 21165 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 21166 else 21167 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 21168 } 21169 /* 21170 * Before ESTABLISHED, force sending of initial options unless TCP 21171 * set not to do any options. NOTE: we assume that the IP/TCP header 21172 * plus TCP options always fit in a single mbuf, leaving room for a 21173 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 21174 * + optlen <= MCLBYTES 21175 */ 21176 optlen = 0; 21177 #ifdef INET6 21178 if (isipv6) 21179 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 21180 else 21181 #endif 21182 hdrlen = sizeof(struct tcpiphdr); 21183 21184 /* 21185 * Ok what seq are we sending from. If we have 21186 * no rsm to use, then we look at various bits, 21187 * if we are putting out a SYN it will be ISS. 21188 * If we are retransmitting a FIN it will 21189 * be snd_max-1 else its snd_max. 21190 */ 21191 if (rsm == NULL) { 21192 if (flags & TH_SYN) 21193 rack_seq = tp->iss; 21194 else if ((flags & TH_FIN) && 21195 (tp->t_flags & TF_SENTFIN)) 21196 rack_seq = tp->snd_max - 1; 21197 else 21198 rack_seq = tp->snd_max; 21199 } else { 21200 rack_seq = rsm->r_start; 21201 } 21202 /* 21203 * Compute options for segment. We only have to care about SYN and 21204 * established connection segments. Options for SYN-ACK segments 21205 * are handled in TCP syncache. 21206 */ 21207 to.to_flags = 0; 21208 if ((tp->t_flags & TF_NOOPT) == 0) { 21209 /* Maximum segment size. */ 21210 if (flags & TH_SYN) { 21211 to.to_mss = tcp_mssopt(&inp->inp_inc); 21212 if (tp->t_port) 21213 to.to_mss -= V_tcp_udp_tunneling_overhead; 21214 to.to_flags |= TOF_MSS; 21215 21216 /* 21217 * On SYN or SYN|ACK transmits on TFO connections, 21218 * only include the TFO option if it is not a 21219 * retransmit, as the presence of the TFO option may 21220 * have caused the original SYN or SYN|ACK to have 21221 * been dropped by a middlebox. 21222 */ 21223 if ((tp->t_flags & TF_FASTOPEN) && 21224 (tp->t_rxtshift == 0)) { 21225 if (tp->t_state == TCPS_SYN_RECEIVED) { 21226 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 21227 to.to_tfo_cookie = 21228 (u_int8_t *)&tp->t_tfo_cookie.server; 21229 to.to_flags |= TOF_FASTOPEN; 21230 wanted_cookie = 1; 21231 } else if (tp->t_state == TCPS_SYN_SENT) { 21232 to.to_tfo_len = 21233 tp->t_tfo_client_cookie_len; 21234 to.to_tfo_cookie = 21235 tp->t_tfo_cookie.client; 21236 to.to_flags |= TOF_FASTOPEN; 21237 wanted_cookie = 1; 21238 /* 21239 * If we wind up having more data to 21240 * send with the SYN than can fit in 21241 * one segment, don't send any more 21242 * until the SYN|ACK comes back from 21243 * the other end. 21244 */ 21245 sendalot = 0; 21246 } 21247 } 21248 } 21249 /* Window scaling. */ 21250 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 21251 to.to_wscale = tp->request_r_scale; 21252 to.to_flags |= TOF_SCALE; 21253 } 21254 /* Timestamps. */ 21255 if ((tp->t_flags & TF_RCVD_TSTMP) || 21256 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 21257 uint32_t ts_to_use; 21258 21259 if ((rack->r_rcvpath_rtt_up == 1) && 21260 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { 21261 /* 21262 * When we are doing a rcv_rtt probe all 21263 * other timestamps use the next msec. This 21264 * is safe since our previous ack is in the 21265 * air and we will just have a few more 21266 * on the next ms. This assures that only 21267 * the one ack has the ms_cts that was on 21268 * our ack-probe. 21269 */ 21270 ts_to_use = ms_cts + 1; 21271 } else { 21272 ts_to_use = ms_cts; 21273 } 21274 to.to_tsval = ts_to_use + tp->ts_offset; 21275 to.to_tsecr = tp->ts_recent; 21276 to.to_flags |= TOF_TS; 21277 if ((len == 0) && 21278 (TCPS_HAVEESTABLISHED(tp->t_state)) && 21279 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && 21280 (tp->snd_una == tp->snd_max) && 21281 (flags & TH_ACK) && 21282 (sbavail(sb) == 0) && 21283 (rack->r_ctl.current_round != 0) && 21284 ((flags & (TH_SYN|TH_FIN)) == 0) && 21285 (rack->r_rcvpath_rtt_up == 0)) { 21286 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; 21287 rack->r_ctl.last_time_of_arm_rcv = cts; 21288 rack->r_rcvpath_rtt_up = 1; 21289 /* Subtract 1 from seq to force a response */ 21290 rack_seq--; 21291 } 21292 } 21293 /* Set receive buffer autosizing timestamp. */ 21294 if (tp->rfbuf_ts == 0 && 21295 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 21296 tp->rfbuf_ts = ms_cts; 21297 } 21298 /* Selective ACK's. */ 21299 if (tp->t_flags & TF_SACK_PERMIT) { 21300 if (flags & TH_SYN) 21301 to.to_flags |= TOF_SACKPERM; 21302 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 21303 tp->rcv_numsacks > 0) { 21304 to.to_flags |= TOF_SACK; 21305 to.to_nsacks = tp->rcv_numsacks; 21306 to.to_sacks = (u_char *)tp->sackblks; 21307 } 21308 } 21309 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21310 /* TCP-MD5 (RFC2385). */ 21311 if (tp->t_flags & TF_SIGNATURE) 21312 to.to_flags |= TOF_SIGNATURE; 21313 #endif 21314 21315 /* Processing the options. */ 21316 hdrlen += optlen = tcp_addoptions(&to, opt); 21317 /* 21318 * If we wanted a TFO option to be added, but it was unable 21319 * to fit, ensure no data is sent. 21320 */ 21321 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && 21322 !(to.to_flags & TOF_FASTOPEN)) 21323 len = 0; 21324 } 21325 if (tp->t_port) { 21326 if (V_tcp_udp_tunneling_port == 0) { 21327 /* The port was removed?? */ 21328 SOCK_SENDBUF_UNLOCK(so); 21329 #ifdef TCP_ACCOUNTING 21330 crtsc = get_cyclecount(); 21331 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21332 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 21333 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 21334 } 21335 sched_unpin(); 21336 #endif 21337 return (EHOSTUNREACH); 21338 } 21339 hdrlen += sizeof(struct udphdr); 21340 } 21341 #ifdef INET6 21342 if (isipv6) 21343 ipoptlen = ip6_optlen(inp); 21344 else 21345 #endif 21346 if (inp->inp_options) 21347 ipoptlen = inp->inp_options->m_len - 21348 offsetof(struct ipoption, ipopt_list); 21349 else 21350 ipoptlen = 0; 21351 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21352 ipoptlen += ipsec_optlen; 21353 #endif 21354 21355 /* 21356 * Adjust data length if insertion of options will bump the packet 21357 * length beyond the t_maxseg length. Clear the FIN bit because we 21358 * cut off the tail of the segment. 21359 */ 21360 if (len + optlen + ipoptlen > tp->t_maxseg) { 21361 if (tso) { 21362 uint32_t if_hw_tsomax; 21363 uint32_t moff; 21364 int32_t max_len; 21365 21366 /* extract TSO information */ 21367 if_hw_tsomax = tp->t_tsomax; 21368 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 21369 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 21370 KASSERT(ipoptlen == 0, 21371 ("%s: TSO can't do IP options", __func__)); 21372 21373 /* 21374 * Check if we should limit by maximum payload 21375 * length: 21376 */ 21377 if (if_hw_tsomax != 0) { 21378 /* compute maximum TSO length */ 21379 max_len = (if_hw_tsomax - hdrlen - 21380 max_linkhdr); 21381 if (max_len <= 0) { 21382 len = 0; 21383 } else if (len > max_len) { 21384 if (doing_tlp == 0) 21385 sendalot = 1; 21386 len = max_len; 21387 mark = 2; 21388 } 21389 } 21390 /* 21391 * Prevent the last segment from being fractional 21392 * unless the send sockbuf can be emptied: 21393 */ 21394 max_len = (tp->t_maxseg - optlen); 21395 if ((sb_offset + len) < sbavail(sb)) { 21396 moff = len % (u_int)max_len; 21397 if (moff != 0) { 21398 mark = 3; 21399 len -= moff; 21400 } 21401 } 21402 /* 21403 * In case there are too many small fragments don't 21404 * use TSO: 21405 */ 21406 if (len <= max_len) { 21407 mark = 4; 21408 tso = 0; 21409 } 21410 /* 21411 * Send the FIN in a separate segment after the bulk 21412 * sending is done. We don't trust the TSO 21413 * implementations to clear the FIN flag on all but 21414 * the last segment. 21415 */ 21416 if (tp->t_flags & TF_NEEDFIN) { 21417 sendalot = 4; 21418 } 21419 } else { 21420 mark = 5; 21421 if (optlen + ipoptlen >= tp->t_maxseg) { 21422 /* 21423 * Since we don't have enough space to put 21424 * the IP header chain and the TCP header in 21425 * one packet as required by RFC 7112, don't 21426 * send it. Also ensure that at least one 21427 * byte of the payload can be put into the 21428 * TCP segment. 21429 */ 21430 SOCK_SENDBUF_UNLOCK(so); 21431 error = EMSGSIZE; 21432 sack_rxmit = 0; 21433 goto out; 21434 } 21435 len = tp->t_maxseg - optlen - ipoptlen; 21436 sendalot = 5; 21437 } 21438 } else { 21439 tso = 0; 21440 mark = 6; 21441 } 21442 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 21443 ("%s: len > IP_MAXPACKET", __func__)); 21444 #ifdef DIAGNOSTIC 21445 #ifdef INET6 21446 if (max_linkhdr + hdrlen > MCLBYTES) 21447 #else 21448 if (max_linkhdr + hdrlen > MHLEN) 21449 #endif 21450 panic("tcphdr too big"); 21451 #endif 21452 21453 /* 21454 * This KASSERT is here to catch edge cases at a well defined place. 21455 * Before, those had triggered (random) panic conditions further 21456 * down. 21457 */ 21458 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21459 if ((len == 0) && 21460 (flags & TH_FIN) && 21461 (sbused(sb))) { 21462 /* 21463 * We have outstanding data, don't send a fin by itself!. 21464 * 21465 * Check to see if we need to send a challenge ack. 21466 */ 21467 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && 21468 ((tp->snd_max - tp->snd_una) <= segsiz)) { 21469 /* 21470 * Ok less than or right at a MSS is 21471 * outstanding. The original FreeBSD stack would 21472 * have sent a FIN, which can speed things up for 21473 * a transactional application doing a MSG_WAITALL. 21474 * To speed things up since we do *not* send a FIN 21475 * if data is outstanding, we send a "challenge ack". 21476 * The idea behind that is instead of having to have 21477 * the peer wait for the delayed-ack timer to run off 21478 * we send an ack that makes the peer send us an ack. 21479 */ 21480 rack_send_ack_challange(rack); 21481 } 21482 goto just_return; 21483 } 21484 /* 21485 * Grab a header mbuf, attaching a copy of data to be transmitted, 21486 * and initialize the header from the template for sends on this 21487 * connection. 21488 */ 21489 hw_tls = tp->t_nic_ktls_xmit != 0; 21490 if (len) { 21491 uint32_t max_val; 21492 uint32_t moff; 21493 21494 if (pace_max_seg) 21495 max_val = pace_max_seg; 21496 else 21497 max_val = len; 21498 /* 21499 * We allow a limit on sending with hptsi. 21500 */ 21501 if (len > max_val) { 21502 mark = 7; 21503 len = max_val; 21504 } 21505 #ifdef INET6 21506 if (MHLEN < hdrlen + max_linkhdr) 21507 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 21508 else 21509 #endif 21510 m = m_gethdr(M_NOWAIT, MT_DATA); 21511 21512 if (m == NULL) { 21513 SOCK_SENDBUF_UNLOCK(so); 21514 error = ENOBUFS; 21515 sack_rxmit = 0; 21516 goto out; 21517 } 21518 m->m_data += max_linkhdr; 21519 m->m_len = hdrlen; 21520 21521 /* 21522 * Start the m_copy functions from the closest mbuf to the 21523 * sb_offset in the socket buffer chain. 21524 */ 21525 mb = sbsndptr_noadv(sb, sb_offset, &moff); 21526 s_mb = mb; 21527 s_moff = moff; 21528 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 21529 m_copydata(mb, moff, (int)len, 21530 mtod(m, caddr_t)+hdrlen); 21531 /* 21532 * If we are not retransmitting advance the 21533 * sndptr to help remember the next place in 21534 * the sb. 21535 */ 21536 if (rsm == NULL) 21537 sbsndptr_adv(sb, mb, len); 21538 m->m_len += len; 21539 } else { 21540 struct sockbuf *msb; 21541 21542 /* 21543 * If we are not retransmitting pass in msb so 21544 * the socket buffer can be advanced. Otherwise 21545 * set it to NULL if its a retransmission since 21546 * we don't want to change the sb remembered 21547 * location. 21548 */ 21549 if (rsm == NULL) 21550 msb = sb; 21551 else 21552 msb = NULL; 21553 m->m_next = tcp_m_copym( 21554 mb, moff, &len, 21555 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 21556 ((rsm == NULL) ? hw_tls : 0)); 21557 if (len <= (tp->t_maxseg - optlen)) { 21558 /* 21559 * Must have ran out of mbufs for the copy 21560 * shorten it to no longer need tso. Lets 21561 * not put on sendalot since we are low on 21562 * mbufs. 21563 */ 21564 tso = 0; 21565 } 21566 if (m->m_next == NULL) { 21567 SOCK_SENDBUF_UNLOCK(so); 21568 (void)m_free(m); 21569 error = ENOBUFS; 21570 sack_rxmit = 0; 21571 goto out; 21572 } 21573 } 21574 if (sack_rxmit) { 21575 if (rsm && (rsm->r_flags & RACK_TLP)) { 21576 /* 21577 * TLP should not count in retran count, but 21578 * in its own bin 21579 */ 21580 counter_u64_add(rack_tlp_retran, 1); 21581 counter_u64_add(rack_tlp_retran_bytes, len); 21582 } else { 21583 tp->t_sndrexmitpack++; 21584 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 21585 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 21586 } 21587 #ifdef STATS 21588 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 21589 len); 21590 #endif 21591 } else { 21592 KMOD_TCPSTAT_INC(tcps_sndpack); 21593 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 21594 #ifdef STATS 21595 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 21596 len); 21597 #endif 21598 } 21599 /* 21600 * If we're sending everything we've got, set PUSH. (This 21601 * will keep happy those implementations which only give 21602 * data to the user when a buffer fills or a PUSH comes in.) 21603 */ 21604 if (sb_offset + len == sbused(sb) && 21605 sbused(sb) && 21606 !(flags & TH_SYN)) { 21607 flags |= TH_PUSH; 21608 add_flag |= RACK_HAD_PUSH; 21609 } 21610 SOCK_SENDBUF_UNLOCK(so); 21611 } else { 21612 SOCK_SENDBUF_UNLOCK(so); 21613 if (tp->t_flags & TF_ACKNOW) 21614 KMOD_TCPSTAT_INC(tcps_sndacks); 21615 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 21616 KMOD_TCPSTAT_INC(tcps_sndctrl); 21617 else 21618 KMOD_TCPSTAT_INC(tcps_sndwinup); 21619 21620 m = m_gethdr(M_NOWAIT, MT_DATA); 21621 if (m == NULL) { 21622 error = ENOBUFS; 21623 sack_rxmit = 0; 21624 goto out; 21625 } 21626 #ifdef INET6 21627 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 21628 MHLEN >= hdrlen) { 21629 M_ALIGN(m, hdrlen); 21630 } else 21631 #endif 21632 m->m_data += max_linkhdr; 21633 m->m_len = hdrlen; 21634 } 21635 SOCK_SENDBUF_UNLOCK_ASSERT(so); 21636 m->m_pkthdr.rcvif = (struct ifnet *)0; 21637 #ifdef MAC 21638 mac_inpcb_create_mbuf(inp, m); 21639 #endif 21640 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21641 #ifdef INET6 21642 if (isipv6) 21643 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 21644 else 21645 #endif /* INET6 */ 21646 #ifdef INET 21647 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 21648 #endif 21649 th = rack->r_ctl.fsb.th; 21650 udp = rack->r_ctl.fsb.udp; 21651 if (udp) { 21652 #ifdef INET6 21653 if (isipv6) 21654 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21655 else 21656 #endif /* INET6 */ 21657 ulen = hdrlen + len - sizeof(struct ip); 21658 udp->uh_ulen = htons(ulen); 21659 } 21660 } else { 21661 #ifdef INET6 21662 if (isipv6) { 21663 ip6 = mtod(m, struct ip6_hdr *); 21664 if (tp->t_port) { 21665 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 21666 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21667 udp->uh_dport = tp->t_port; 21668 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21669 udp->uh_ulen = htons(ulen); 21670 th = (struct tcphdr *)(udp + 1); 21671 } else 21672 th = (struct tcphdr *)(ip6 + 1); 21673 tcpip_fillheaders(inp, tp->t_port, ip6, th); 21674 } else 21675 #endif /* INET6 */ 21676 { 21677 #ifdef INET 21678 ip = mtod(m, struct ip *); 21679 if (tp->t_port) { 21680 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 21681 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21682 udp->uh_dport = tp->t_port; 21683 ulen = hdrlen + len - sizeof(struct ip); 21684 udp->uh_ulen = htons(ulen); 21685 th = (struct tcphdr *)(udp + 1); 21686 } else 21687 th = (struct tcphdr *)(ip + 1); 21688 tcpip_fillheaders(inp, tp->t_port, ip, th); 21689 #endif 21690 } 21691 } 21692 /* 21693 * If we are starting a connection, send ECN setup SYN packet. If we 21694 * are on a retransmit, we may resend those bits a number of times 21695 * as per RFC 3168. 21696 */ 21697 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 21698 flags |= tcp_ecn_output_syn_sent(tp); 21699 } 21700 /* Also handle parallel SYN for ECN */ 21701 if (TCPS_HAVERCVDSYN(tp->t_state) && 21702 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 21703 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 21704 if ((tp->t_state == TCPS_SYN_RECEIVED) && 21705 (tp->t_flags2 & TF2_ECN_SND_ECE)) 21706 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 21707 #ifdef INET6 21708 if (isipv6) { 21709 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 21710 ip6->ip6_flow |= htonl(ect << 20); 21711 } 21712 else 21713 #endif 21714 { 21715 #ifdef INET 21716 ip->ip_tos &= ~IPTOS_ECN_MASK; 21717 ip->ip_tos |= ect; 21718 #endif 21719 } 21720 } 21721 th->th_seq = htonl(rack_seq); 21722 th->th_ack = htonl(tp->rcv_nxt); 21723 tcp_set_flags(th, flags); 21724 /* 21725 * Calculate receive window. Don't shrink window, but avoid silly 21726 * window syndrome. 21727 * If a RST segment is sent, advertise a window of zero. 21728 */ 21729 if (flags & TH_RST) { 21730 recwin = 0; 21731 } else { 21732 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 21733 recwin < (long)segsiz) { 21734 recwin = 0; 21735 } 21736 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 21737 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 21738 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 21739 } 21740 21741 /* 21742 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 21743 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 21744 * handled in syncache. 21745 */ 21746 if (flags & TH_SYN) 21747 th->th_win = htons((u_short) 21748 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 21749 else { 21750 /* Avoid shrinking window with window scaling. */ 21751 recwin = roundup2(recwin, 1 << tp->rcv_scale); 21752 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 21753 } 21754 /* 21755 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 21756 * window. This may cause the remote transmitter to stall. This 21757 * flag tells soreceive() to disable delayed acknowledgements when 21758 * draining the buffer. This can occur if the receiver is 21759 * attempting to read more data than can be buffered prior to 21760 * transmitting on the connection. 21761 */ 21762 if (th->th_win == 0) { 21763 tp->t_sndzerowin++; 21764 tp->t_flags |= TF_RXWIN0SENT; 21765 } else 21766 tp->t_flags &= ~TF_RXWIN0SENT; 21767 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 21768 /* Now are we using fsb?, if so copy the template data to the mbuf */ 21769 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21770 uint8_t *cpto; 21771 21772 cpto = mtod(m, uint8_t *); 21773 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 21774 /* 21775 * We have just copied in: 21776 * IP/IP6 21777 * <optional udphdr> 21778 * tcphdr (no options) 21779 * 21780 * We need to grab the correct pointers into the mbuf 21781 * for both the tcp header, and possibly the udp header (if tunneling). 21782 * We do this by using the offset in the copy buffer and adding it 21783 * to the mbuf base pointer (cpto). 21784 */ 21785 #ifdef INET6 21786 if (isipv6) 21787 ip6 = mtod(m, struct ip6_hdr *); 21788 else 21789 #endif /* INET6 */ 21790 #ifdef INET 21791 ip = mtod(m, struct ip *); 21792 #endif 21793 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 21794 /* If we have a udp header lets set it into the mbuf as well */ 21795 if (udp) 21796 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 21797 } 21798 if (optlen) { 21799 bcopy(opt, th + 1, optlen); 21800 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 21801 } 21802 /* 21803 * Put TCP length in extended header, and then checksum extended 21804 * header and data. 21805 */ 21806 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 21807 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21808 if (to.to_flags & TOF_SIGNATURE) { 21809 /* 21810 * Calculate MD5 signature and put it into the place 21811 * determined before. 21812 * NOTE: since TCP options buffer doesn't point into 21813 * mbuf's data, calculate offset and use it. 21814 */ 21815 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 21816 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 21817 /* 21818 * Do not send segment if the calculation of MD5 21819 * digest has failed. 21820 */ 21821 goto out; 21822 } 21823 } 21824 #endif 21825 #ifdef INET6 21826 if (isipv6) { 21827 /* 21828 * ip6_plen is not need to be filled now, and will be filled 21829 * in ip6_output. 21830 */ 21831 if (tp->t_port) { 21832 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 21833 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21834 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 21835 th->th_sum = htons(0); 21836 UDPSTAT_INC(udps_opackets); 21837 } else { 21838 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 21839 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21840 th->th_sum = in6_cksum_pseudo(ip6, 21841 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 21842 0); 21843 } 21844 } 21845 #endif 21846 #if defined(INET6) && defined(INET) 21847 else 21848 #endif 21849 #ifdef INET 21850 { 21851 if (tp->t_port) { 21852 m->m_pkthdr.csum_flags = CSUM_UDP; 21853 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21854 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 21855 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 21856 th->th_sum = htons(0); 21857 UDPSTAT_INC(udps_opackets); 21858 } else { 21859 m->m_pkthdr.csum_flags = CSUM_TCP; 21860 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21861 th->th_sum = in_pseudo(ip->ip_src.s_addr, 21862 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 21863 IPPROTO_TCP + len + optlen)); 21864 } 21865 /* IP version must be set here for ipv4/ipv6 checking later */ 21866 KASSERT(ip->ip_v == IPVERSION, 21867 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 21868 } 21869 #endif 21870 /* 21871 * Enable TSO and specify the size of the segments. The TCP pseudo 21872 * header checksum is always provided. XXX: Fixme: This is currently 21873 * not the case for IPv6. 21874 */ 21875 if (tso) { 21876 /* 21877 * Here we must use t_maxseg and the optlen since 21878 * the optlen may include SACK's (or DSACK). 21879 */ 21880 KASSERT(len > tp->t_maxseg - optlen, 21881 ("%s: len <= tso_segsz", __func__)); 21882 m->m_pkthdr.csum_flags |= CSUM_TSO; 21883 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 21884 } 21885 KASSERT(len + hdrlen == m_length(m, NULL), 21886 ("%s: mbuf chain different than expected: %d + %u != %u", 21887 __func__, len, hdrlen, m_length(m, NULL))); 21888 21889 #ifdef TCP_HHOOK 21890 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 21891 hhook_run_tcp_est_out(tp, th, &to, len, tso); 21892 #endif 21893 if ((rack->r_ctl.crte != NULL) && 21894 (rack->rc_hw_nobuf == 0) && 21895 tcp_bblogging_on(tp)) { 21896 rack_log_queue_level(tp, rack, len, &tv, cts); 21897 } 21898 /* We're getting ready to send; log now. */ 21899 if (tcp_bblogging_on(rack->rc_tp)) { 21900 union tcp_log_stackspecific log; 21901 21902 memset(&log, 0, sizeof(log)); 21903 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 21904 if (rack->rack_no_prr) 21905 log.u_bbr.flex1 = 0; 21906 else 21907 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 21908 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 21909 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 21910 log.u_bbr.flex4 = orig_len; 21911 /* Save off the early/late values */ 21912 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 21913 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 21914 log.u_bbr.bw_inuse = rack_get_bw(rack); 21915 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 21916 log.u_bbr.flex8 = 0; 21917 if (rsm) { 21918 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 21919 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 21920 counter_u64_add(rack_collapsed_win_rxt, 1); 21921 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 21922 } 21923 if (doing_tlp) 21924 log.u_bbr.flex8 = 2; 21925 else 21926 log.u_bbr.flex8 = 1; 21927 } else { 21928 if (doing_tlp) 21929 log.u_bbr.flex8 = 3; 21930 } 21931 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 21932 log.u_bbr.flex7 = mark; 21933 log.u_bbr.flex7 <<= 8; 21934 log.u_bbr.flex7 |= pass; 21935 log.u_bbr.pkts_out = tp->t_maxseg; 21936 log.u_bbr.timeStamp = cts; 21937 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 21938 if (rsm && (rsm->r_rtr_cnt > 0)) { 21939 /* 21940 * When we have a retransmit we want to log the 21941 * burst at send and flight at send from before. 21942 */ 21943 log.u_bbr.flex5 = rsm->r_fas; 21944 log.u_bbr.bbr_substate = rsm->r_bas; 21945 } else { 21946 /* 21947 * New transmits we log in flex5 the inflight again as 21948 * well as the number of segments in our send in the 21949 * substate field. 21950 */ 21951 log.u_bbr.flex5 = log.u_bbr.inflight; 21952 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 21953 } 21954 log.u_bbr.lt_epoch = cwnd_to_use; 21955 log.u_bbr.delivered = sendalot; 21956 log.u_bbr.rttProp = (uintptr_t)rsm; 21957 log.u_bbr.pkt_epoch = __LINE__; 21958 if (rsm) { 21959 log.u_bbr.delRate = rsm->r_flags; 21960 log.u_bbr.delRate <<= 31; 21961 log.u_bbr.delRate |= rack->r_must_retran; 21962 log.u_bbr.delRate <<= 1; 21963 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 21964 } else { 21965 log.u_bbr.delRate = rack->r_must_retran; 21966 log.u_bbr.delRate <<= 1; 21967 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 21968 } 21969 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 21970 len, &log, false, NULL, __func__, __LINE__, &tv); 21971 } else 21972 lgb = NULL; 21973 21974 /* 21975 * Fill in IP length and desired time to live and send to IP level. 21976 * There should be a better way to handle ttl and tos; we could keep 21977 * them in the template, but need a way to checksum without them. 21978 */ 21979 /* 21980 * m->m_pkthdr.len should have been set before cksum calcuration, 21981 * because in6_cksum() need it. 21982 */ 21983 #ifdef INET6 21984 if (isipv6) { 21985 /* 21986 * we separately set hoplimit for every segment, since the 21987 * user might want to change the value via setsockopt. Also, 21988 * desired default hop limit might be changed via Neighbor 21989 * Discovery. 21990 */ 21991 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 21992 21993 /* 21994 * Set the packet size here for the benefit of DTrace 21995 * probes. ip6_output() will set it properly; it's supposed 21996 * to include the option header lengths as well. 21997 */ 21998 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 21999 22000 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 22001 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22002 else 22003 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22004 22005 if (tp->t_state == TCPS_SYN_SENT) 22006 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 22007 22008 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 22009 /* TODO: IPv6 IP6TOS_ECT bit on */ 22010 error = ip6_output(m, 22011 inp->in6p_outputopts, 22012 &inp->inp_route6, 22013 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 22014 NULL, NULL, inp); 22015 22016 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 22017 mtu = inp->inp_route6.ro_nh->nh_mtu; 22018 } 22019 #endif /* INET6 */ 22020 #if defined(INET) && defined(INET6) 22021 else 22022 #endif 22023 #ifdef INET 22024 { 22025 ip->ip_len = htons(m->m_pkthdr.len); 22026 #ifdef INET6 22027 if (inp->inp_vflag & INP_IPV6PROTO) 22028 ip->ip_ttl = in6_selecthlim(inp, NULL); 22029 #endif /* INET6 */ 22030 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 22031 /* 22032 * If we do path MTU discovery, then we set DF on every 22033 * packet. This might not be the best thing to do according 22034 * to RFC3390 Section 2. However the tcp hostcache migitates 22035 * the problem so it affects only the first tcp connection 22036 * with a host. 22037 * 22038 * NB: Don't set DF on small MTU/MSS to have a safe 22039 * fallback. 22040 */ 22041 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 22042 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22043 if (tp->t_port == 0 || len < V_tcp_minmss) { 22044 ip->ip_off |= htons(IP_DF); 22045 } 22046 } else { 22047 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22048 } 22049 22050 if (tp->t_state == TCPS_SYN_SENT) 22051 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 22052 22053 TCP_PROBE5(send, NULL, tp, ip, tp, th); 22054 22055 error = ip_output(m, 22056 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22057 inp->inp_options, 22058 #else 22059 NULL, 22060 #endif 22061 &inp->inp_route, 22062 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 22063 inp); 22064 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 22065 mtu = inp->inp_route.ro_nh->nh_mtu; 22066 } 22067 #endif /* INET */ 22068 if (lgb) { 22069 lgb->tlb_errno = error; 22070 lgb = NULL; 22071 } 22072 22073 out: 22074 /* 22075 * In transmit state, time the transmission and arrange for the 22076 * retransmit. In persist state, just set snd_max. 22077 */ 22078 if ((rsm == NULL) && doing_tlp) 22079 add_flag |= RACK_TLP; 22080 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 22081 rack_to_usec_ts(&tv), 22082 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 22083 if (error == 0) { 22084 if (add_flag & RACK_IS_PCM) { 22085 /* We just launched a PCM */ 22086 /* rrs here log */ 22087 rack->pcm_in_progress = 1; 22088 rack->pcm_needed = 0; 22089 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); 22090 } 22091 if (rsm == NULL) { 22092 if (rack->lt_bw_up == 0) { 22093 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 22094 rack->r_ctl.lt_seq = tp->snd_una; 22095 rack->lt_bw_up = 1; 22096 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 22097 /* 22098 * Need to record what we have since we are 22099 * approaching seq wrap. 22100 */ 22101 uint64_t tmark; 22102 22103 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 22104 rack->r_ctl.lt_seq = tp->snd_una; 22105 tmark = tcp_get_u64_usecs(&tv); 22106 if (tmark > rack->r_ctl.lt_timemark) { 22107 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 22108 rack->r_ctl.lt_timemark = tmark; 22109 } 22110 } 22111 } 22112 rack->forced_ack = 0; /* If we send something zap the FA flag */ 22113 counter_u64_add(rack_total_bytes, len); 22114 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 22115 if (rsm && doing_tlp) { 22116 rack->rc_last_sent_tlp_past_cumack = 0; 22117 rack->rc_last_sent_tlp_seq_valid = 1; 22118 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 22119 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 22120 } 22121 if (rack->rc_hw_nobuf) { 22122 rack->rc_hw_nobuf = 0; 22123 rack->r_ctl.rc_agg_delayed = 0; 22124 rack->r_early = 0; 22125 rack->r_late = 0; 22126 rack->r_ctl.rc_agg_early = 0; 22127 } 22128 if (rsm && (doing_tlp == 0)) { 22129 /* Set we retransmitted */ 22130 rack->rc_gp_saw_rec = 1; 22131 } else { 22132 if (cwnd_to_use > tp->snd_ssthresh) { 22133 /* Set we sent in CA */ 22134 rack->rc_gp_saw_ca = 1; 22135 } else { 22136 /* Set we sent in SS */ 22137 rack->rc_gp_saw_ss = 1; 22138 } 22139 } 22140 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22141 (tp->t_flags & TF_SACK_PERMIT) && 22142 tp->rcv_numsacks > 0) 22143 tcp_clean_dsack_blocks(tp); 22144 tot_len_this_send += len; 22145 if (len == 0) { 22146 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 22147 } else { 22148 int idx; 22149 22150 idx = (len / segsiz) + 3; 22151 if (idx >= TCP_MSS_ACCT_ATIMER) 22152 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 22153 else 22154 counter_u64_add(rack_out_size[idx], 1); 22155 } 22156 } 22157 if ((rack->rack_no_prr == 0) && 22158 sub_from_prr && 22159 (error == 0)) { 22160 if (rack->r_ctl.rc_prr_sndcnt >= len) 22161 rack->r_ctl.rc_prr_sndcnt -= len; 22162 else 22163 rack->r_ctl.rc_prr_sndcnt = 0; 22164 } 22165 sub_from_prr = 0; 22166 if (rsm != NULL) { 22167 if (doing_tlp) 22168 /* Make sure the TLP is added */ 22169 rsm->r_flags |= RACK_TLP; 22170 else 22171 /* If its a resend without TLP then it must not have the flag */ 22172 rsm->r_flags &= ~RACK_TLP; 22173 } 22174 if ((error == 0) && 22175 (len > 0) && 22176 (tp->snd_una == tp->snd_max)) 22177 rack->r_ctl.rc_tlp_rxt_last_time = cts; 22178 22179 { 22180 /* 22181 * This block is not associated with the above error == 0 test. 22182 * It is used to advance snd_max if we have a new transmit. 22183 */ 22184 tcp_seq startseq = tp->snd_max; 22185 22186 22187 if (rsm && (doing_tlp == 0)) 22188 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 22189 if (error) 22190 /* We don't log or do anything with errors */ 22191 goto nomore; 22192 if (doing_tlp == 0) { 22193 if (rsm == NULL) { 22194 /* 22195 * Not a retransmission of some 22196 * sort, new data is going out so 22197 * clear our TLP count and flag. 22198 */ 22199 rack->rc_tlp_in_progress = 0; 22200 rack->r_ctl.rc_tlp_cnt_out = 0; 22201 } 22202 } else { 22203 /* 22204 * We have just sent a TLP, mark that it is true 22205 * and make sure our in progress is set so we 22206 * continue to check the count. 22207 */ 22208 rack->rc_tlp_in_progress = 1; 22209 rack->r_ctl.rc_tlp_cnt_out++; 22210 } 22211 /* 22212 * If we are retransmitting we are done, snd_max 22213 * does not get updated. 22214 */ 22215 if (sack_rxmit) 22216 goto nomore; 22217 if ((tp->snd_una == tp->snd_max) && (len > 0)) { 22218 /* 22219 * Update the time we just added data since 22220 * nothing was outstanding. 22221 */ 22222 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 22223 tp->t_acktime = ticks; 22224 } 22225 /* 22226 * Now for special SYN/FIN handling. 22227 */ 22228 if (flags & (TH_SYN | TH_FIN)) { 22229 if ((flags & TH_SYN) && 22230 ((tp->t_flags & TF_SENTSYN) == 0)) { 22231 tp->snd_max++; 22232 tp->t_flags |= TF_SENTSYN; 22233 } 22234 if ((flags & TH_FIN) && 22235 ((tp->t_flags & TF_SENTFIN) == 0)) { 22236 tp->snd_max++; 22237 tp->t_flags |= TF_SENTFIN; 22238 } 22239 } 22240 tp->snd_max += len; 22241 if (rack->rc_new_rnd_needed) { 22242 rack_new_round_starts(tp, rack, tp->snd_max); 22243 } 22244 /* 22245 * Time this transmission if not a retransmission and 22246 * not currently timing anything. 22247 * This is only relevant in case of switching back to 22248 * the base stack. 22249 */ 22250 if (tp->t_rtttime == 0) { 22251 tp->t_rtttime = ticks; 22252 tp->t_rtseq = startseq; 22253 KMOD_TCPSTAT_INC(tcps_segstimed); 22254 } 22255 if (len && 22256 ((tp->t_flags & TF_GPUTINPROG) == 0)) 22257 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 22258 /* 22259 * If we are doing FO we need to update the mbuf position and subtract 22260 * this happens when the peer sends us duplicate information and 22261 * we thus want to send a DSACK. 22262 * 22263 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 22264 * turned off? If not then we are going to echo multiple DSACK blocks 22265 * out (with the TSO), which we should not be doing. 22266 */ 22267 if (rack->r_fast_output && len) { 22268 if (rack->r_ctl.fsb.left_to_send > len) 22269 rack->r_ctl.fsb.left_to_send -= len; 22270 else 22271 rack->r_ctl.fsb.left_to_send = 0; 22272 if (rack->r_ctl.fsb.left_to_send < segsiz) 22273 rack->r_fast_output = 0; 22274 if (rack->r_fast_output) { 22275 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 22276 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 22277 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 22278 } 22279 } 22280 if (rack_pcm_blast == 0) { 22281 if ((orig_len > len) && 22282 (add_flag & RACK_IS_PCM) && 22283 (len < pace_max_seg) && 22284 ((pace_max_seg - len) > segsiz)) { 22285 /* 22286 * We are doing a PCM measurement and we did 22287 * not get enough data in the TSO to meet the 22288 * burst requirement. 22289 */ 22290 uint32_t n_len; 22291 22292 n_len = (orig_len - len); 22293 orig_len -= len; 22294 pace_max_seg -= len; 22295 len = n_len; 22296 sb_offset = tp->snd_max - tp->snd_una; 22297 /* Re-lock for the next spin */ 22298 SOCK_SENDBUF_LOCK(so); 22299 goto send; 22300 } 22301 } else { 22302 if ((orig_len > len) && 22303 (add_flag & RACK_IS_PCM) && 22304 ((orig_len - len) > segsiz)) { 22305 /* 22306 * We are doing a PCM measurement and we did 22307 * not get enough data in the TSO to meet the 22308 * burst requirement. 22309 */ 22310 uint32_t n_len; 22311 22312 n_len = (orig_len - len); 22313 orig_len -= len; 22314 len = n_len; 22315 sb_offset = tp->snd_max - tp->snd_una; 22316 /* Re-lock for the next spin */ 22317 SOCK_SENDBUF_LOCK(so); 22318 goto send; 22319 } 22320 } 22321 } 22322 nomore: 22323 if (error) { 22324 rack->r_ctl.rc_agg_delayed = 0; 22325 rack->r_early = 0; 22326 rack->r_late = 0; 22327 rack->r_ctl.rc_agg_early = 0; 22328 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 22329 /* 22330 * Failures do not advance the seq counter above. For the 22331 * case of ENOBUFS we will fall out and retry in 1ms with 22332 * the hpts. Everything else will just have to retransmit 22333 * with the timer. 22334 * 22335 * In any case, we do not want to loop around for another 22336 * send without a good reason. 22337 */ 22338 sendalot = 0; 22339 switch (error) { 22340 case EPERM: 22341 case EACCES: 22342 tp->t_softerror = error; 22343 #ifdef TCP_ACCOUNTING 22344 crtsc = get_cyclecount(); 22345 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22346 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22347 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22348 } 22349 sched_unpin(); 22350 #endif 22351 return (error); 22352 case ENOBUFS: 22353 /* 22354 * Pace us right away to retry in a some 22355 * time 22356 */ 22357 if (rack->r_ctl.crte != NULL) { 22358 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 22359 if (tcp_bblogging_on(rack->rc_tp)) 22360 rack_log_queue_level(tp, rack, len, &tv, cts); 22361 } else 22362 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 22363 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 22364 if (rack->rc_enobuf < 0x7f) 22365 rack->rc_enobuf++; 22366 if (slot < (10 * HPTS_USEC_IN_MSEC)) 22367 slot = 10 * HPTS_USEC_IN_MSEC; 22368 if (rack->r_ctl.crte != NULL) { 22369 counter_u64_add(rack_saw_enobuf_hw, 1); 22370 tcp_rl_log_enobuf(rack->r_ctl.crte); 22371 } 22372 counter_u64_add(rack_saw_enobuf, 1); 22373 goto enobufs; 22374 case EMSGSIZE: 22375 /* 22376 * For some reason the interface we used initially 22377 * to send segments changed to another or lowered 22378 * its MTU. If TSO was active we either got an 22379 * interface without TSO capabilits or TSO was 22380 * turned off. If we obtained mtu from ip_output() 22381 * then update it and try again. 22382 */ 22383 if (tso) 22384 tp->t_flags &= ~TF_TSO; 22385 if (mtu != 0) { 22386 int saved_mtu; 22387 22388 saved_mtu = tp->t_maxseg; 22389 tcp_mss_update(tp, -1, mtu, NULL, NULL); 22390 if (saved_mtu > tp->t_maxseg) { 22391 goto again; 22392 } 22393 } 22394 slot = 10 * HPTS_USEC_IN_MSEC; 22395 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22396 #ifdef TCP_ACCOUNTING 22397 crtsc = get_cyclecount(); 22398 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22399 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22400 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22401 } 22402 sched_unpin(); 22403 #endif 22404 return (error); 22405 case ENETUNREACH: 22406 counter_u64_add(rack_saw_enetunreach, 1); 22407 /* FALLTHROUGH */ 22408 case EHOSTDOWN: 22409 case EHOSTUNREACH: 22410 case ENETDOWN: 22411 if (TCPS_HAVERCVDSYN(tp->t_state)) { 22412 tp->t_softerror = error; 22413 error = 0; 22414 } 22415 /* FALLTHROUGH */ 22416 default: 22417 slot = 10 * HPTS_USEC_IN_MSEC; 22418 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22419 #ifdef TCP_ACCOUNTING 22420 crtsc = get_cyclecount(); 22421 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22422 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22423 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22424 } 22425 sched_unpin(); 22426 #endif 22427 return (error); 22428 } 22429 } else { 22430 rack->rc_enobuf = 0; 22431 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 22432 rack->r_ctl.retran_during_recovery += len; 22433 } 22434 KMOD_TCPSTAT_INC(tcps_sndtotal); 22435 22436 /* 22437 * Data sent (as far as we can tell). If this advertises a larger 22438 * window than any other segment, then remember the size of the 22439 * advertised window. Any pending ACK has now been sent. 22440 */ 22441 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 22442 tp->rcv_adv = tp->rcv_nxt + recwin; 22443 22444 tp->last_ack_sent = tp->rcv_nxt; 22445 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 22446 enobufs: 22447 if (sendalot) { 22448 /* Do we need to turn off sendalot? */ 22449 if (pace_max_seg && 22450 (tot_len_this_send >= pace_max_seg)) { 22451 /* We hit our max. */ 22452 sendalot = 0; 22453 } 22454 } 22455 if ((error == 0) && (flags & TH_FIN)) 22456 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 22457 if (flags & TH_RST) { 22458 /* 22459 * We don't send again after sending a RST. 22460 */ 22461 slot = 0; 22462 sendalot = 0; 22463 if (error == 0) 22464 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 22465 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 22466 /* 22467 * Get our pacing rate, if an error 22468 * occurred in sending (ENOBUF) we would 22469 * hit the else if with slot preset. Other 22470 * errors return. 22471 */ 22472 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__); 22473 } 22474 /* We have sent clear the flag */ 22475 rack->r_ent_rec_ns = 0; 22476 if (rack->r_must_retran) { 22477 if (rsm) { 22478 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 22479 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 22480 /* 22481 * We have retransmitted all. 22482 */ 22483 rack->r_must_retran = 0; 22484 rack->r_ctl.rc_out_at_rto = 0; 22485 } 22486 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22487 /* 22488 * Sending new data will also kill 22489 * the loop. 22490 */ 22491 rack->r_must_retran = 0; 22492 rack->r_ctl.rc_out_at_rto = 0; 22493 } 22494 } 22495 rack->r_ctl.fsb.recwin = recwin; 22496 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 22497 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22498 /* 22499 * We hit an RTO and now have past snd_max at the RTO 22500 * clear all the WAS flags. 22501 */ 22502 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 22503 } 22504 if (slot) { 22505 /* set the rack tcb into the slot N */ 22506 if ((error == 0) && 22507 rack_use_rfo && 22508 ((flags & (TH_SYN|TH_FIN)) == 0) && 22509 (rsm == NULL) && 22510 (ipoptlen == 0) && 22511 (doing_tlp == 0) && 22512 rack->r_fsb_inited && 22513 TCPS_HAVEESTABLISHED(tp->t_state) && 22514 ((IN_RECOVERY(tp->t_flags)) == 0) && 22515 (rack->r_must_retran == 0) && 22516 ((tp->t_flags & TF_NEEDFIN) == 0) && 22517 (len > 0) && (orig_len > 0) && 22518 (orig_len > len) && 22519 ((orig_len - len) >= segsiz) && 22520 ((optlen == 0) || 22521 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22522 /* We can send at least one more MSS using our fsb */ 22523 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22524 segsiz, pace_max_seg, hw_tls, flags); 22525 } else 22526 rack->r_fast_output = 0; 22527 rack_log_fsb(rack, tp, so, flags, 22528 ipoptlen, orig_len, len, error, 22529 (rsm == NULL), optlen, __LINE__, 2); 22530 } else if (sendalot) { 22531 int ret; 22532 22533 sack_rxmit = 0; 22534 if ((error == 0) && 22535 rack_use_rfo && 22536 ((flags & (TH_SYN|TH_FIN)) == 0) && 22537 (rsm == NULL) && 22538 (doing_tlp == 0) && 22539 (ipoptlen == 0) && 22540 (rack->r_must_retran == 0) && 22541 rack->r_fsb_inited && 22542 TCPS_HAVEESTABLISHED(tp->t_state) && 22543 ((IN_RECOVERY(tp->t_flags)) == 0) && 22544 ((tp->t_flags & TF_NEEDFIN) == 0) && 22545 (len > 0) && (orig_len > 0) && 22546 (orig_len > len) && 22547 ((orig_len - len) >= segsiz) && 22548 ((optlen == 0) || 22549 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22550 /* we can use fast_output for more */ 22551 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22552 segsiz, pace_max_seg, hw_tls, flags); 22553 if (rack->r_fast_output) { 22554 error = 0; 22555 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, &tot_len_this_send, &error, __LINE__); 22556 if (ret >= 0) 22557 return (ret); 22558 else if (error) 22559 goto nomore; 22560 22561 } 22562 } 22563 goto again; 22564 } 22565 skip_all_send: 22566 /* Assure when we leave that snd_nxt will point to top */ 22567 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22568 tp->snd_nxt = tp->snd_max; 22569 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 22570 #ifdef TCP_ACCOUNTING 22571 crtsc = get_cyclecount() - ts_val; 22572 if (tot_len_this_send) { 22573 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22574 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22575 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 22576 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 22577 } 22578 } else { 22579 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22580 tp->tcp_cnt_counters[SND_OUT_ACK]++; 22581 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 22582 } 22583 } 22584 sched_unpin(); 22585 #endif 22586 if (error == ENOBUFS) 22587 error = 0; 22588 return (error); 22589 } 22590 22591 static void 22592 rack_update_seg(struct tcp_rack *rack) 22593 { 22594 uint32_t orig_val; 22595 22596 orig_val = rack->r_ctl.rc_pace_max_segs; 22597 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 22598 if (orig_val != rack->r_ctl.rc_pace_max_segs) 22599 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 22600 } 22601 22602 static void 22603 rack_mtu_change(struct tcpcb *tp) 22604 { 22605 /* 22606 * The MSS may have changed 22607 */ 22608 struct tcp_rack *rack; 22609 struct rack_sendmap *rsm; 22610 22611 rack = (struct tcp_rack *)tp->t_fb_ptr; 22612 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 22613 /* 22614 * The MTU has changed we need to resend everything 22615 * since all we have sent is lost. We first fix 22616 * up the mtu though. 22617 */ 22618 rack_set_pace_segments(tp, rack, __LINE__, NULL); 22619 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 22620 rack_remxt_tmr(tp); 22621 rack->r_fast_output = 0; 22622 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 22623 rack->r_ctl.rc_sacked); 22624 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 22625 rack->r_must_retran = 1; 22626 /* Mark all inflight to needing to be rxt'd */ 22627 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 22628 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 22629 } 22630 } 22631 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 22632 /* We don't use snd_nxt to retransmit */ 22633 tp->snd_nxt = tp->snd_max; 22634 } 22635 22636 static int 22637 rack_set_dgp(struct tcp_rack *rack) 22638 { 22639 if (rack->dgp_on == 1) 22640 return(0); 22641 if ((rack->use_fixed_rate == 1) && 22642 (rack->rc_always_pace == 1)) { 22643 /* 22644 * We are already pacing another 22645 * way. 22646 */ 22647 return (EBUSY); 22648 } 22649 if (rack->rc_always_pace == 1) { 22650 rack_remove_pacing(rack); 22651 } 22652 if (tcp_incr_dgp_pacing_cnt() == 0) 22653 return (ENOSPC); 22654 rack->r_ctl.pacing_method |= RACK_DGP_PACING; 22655 rack->rc_fillcw_apply_discount = 0; 22656 rack->dgp_on = 1; 22657 rack->rc_always_pace = 1; 22658 rack->rc_pace_dnd = 1; 22659 rack->use_fixed_rate = 0; 22660 if (rack->gp_ready) 22661 rack_set_cc_pacing(rack); 22662 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22663 rack->rack_attempt_hdwr_pace = 0; 22664 /* rxt settings */ 22665 rack->full_size_rxt = 1; 22666 rack->shape_rxt_to_pacing_min = 0; 22667 /* cmpack=1 */ 22668 rack->r_use_cmp_ack = 1; 22669 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 22670 rack->r_use_cmp_ack) 22671 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22672 /* scwnd=1 */ 22673 rack->rack_enable_scwnd = 1; 22674 /* dynamic=100 */ 22675 rack->rc_gp_dyn_mul = 1; 22676 /* gp_inc_ca */ 22677 rack->r_ctl.rack_per_of_gp_ca = 100; 22678 /* rrr_conf=3 */ 22679 rack->r_rr_config = 3; 22680 /* npush=2 */ 22681 rack->r_ctl.rc_no_push_at_mrtt = 2; 22682 /* fillcw=1 */ 22683 rack->rc_pace_to_cwnd = 1; 22684 rack->rc_pace_fill_if_rttin_range = 0; 22685 rack->rtt_limit_mul = 0; 22686 /* noprr=1 */ 22687 rack->rack_no_prr = 1; 22688 /* lscwnd=1 */ 22689 rack->r_limit_scw = 1; 22690 /* gp_inc_rec */ 22691 rack->r_ctl.rack_per_of_gp_rec = 90; 22692 return (0); 22693 } 22694 22695 static int 22696 rack_set_profile(struct tcp_rack *rack, int prof) 22697 { 22698 int err = EINVAL; 22699 if (prof == 1) { 22700 /* 22701 * Profile 1 is "standard" DGP. It ignores 22702 * client buffer level. 22703 */ 22704 err = rack_set_dgp(rack); 22705 if (err) 22706 return (err); 22707 } else if (prof == 6) { 22708 err = rack_set_dgp(rack); 22709 if (err) 22710 return (err); 22711 /* 22712 * Profile 6 tweaks DGP so that it will apply to 22713 * fill-cw the same settings that profile5 does 22714 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 22715 */ 22716 rack->rc_fillcw_apply_discount = 1; 22717 } else if (prof == 0) { 22718 /* This changes things back to the default settings */ 22719 if (rack->rc_always_pace == 1) { 22720 rack_remove_pacing(rack); 22721 } else { 22722 /* Make sure any stray flags are off */ 22723 rack->dgp_on = 0; 22724 rack->rc_hybrid_mode = 0; 22725 rack->use_fixed_rate = 0; 22726 } 22727 err = 0; 22728 if (rack_fill_cw_state) 22729 rack->rc_pace_to_cwnd = 1; 22730 else 22731 rack->rc_pace_to_cwnd = 0; 22732 22733 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 22734 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22735 rack->rc_always_pace = 1; 22736 if (rack->rack_hibeta) 22737 rack_set_cc_pacing(rack); 22738 } else 22739 rack->rc_always_pace = 0; 22740 if (rack_dsack_std_based & 0x1) { 22741 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 22742 rack->rc_rack_tmr_std_based = 1; 22743 } 22744 if (rack_dsack_std_based & 0x2) { 22745 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 22746 rack->rc_rack_use_dsack = 1; 22747 } 22748 if (rack_use_cmp_acks) 22749 rack->r_use_cmp_ack = 1; 22750 else 22751 rack->r_use_cmp_ack = 0; 22752 if (rack_disable_prr) 22753 rack->rack_no_prr = 1; 22754 else 22755 rack->rack_no_prr = 0; 22756 if (rack_gp_no_rec_chg) 22757 rack->rc_gp_no_rec_chg = 1; 22758 else 22759 rack->rc_gp_no_rec_chg = 0; 22760 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 22761 rack->r_mbuf_queue = 1; 22762 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 22763 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22764 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22765 } else { 22766 rack->r_mbuf_queue = 0; 22767 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 22768 } 22769 if (rack_enable_shared_cwnd) 22770 rack->rack_enable_scwnd = 1; 22771 else 22772 rack->rack_enable_scwnd = 0; 22773 if (rack_do_dyn_mul) { 22774 /* When dynamic adjustment is on CA needs to start at 100% */ 22775 rack->rc_gp_dyn_mul = 1; 22776 if (rack_do_dyn_mul >= 100) 22777 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 22778 } else { 22779 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 22780 rack->rc_gp_dyn_mul = 0; 22781 } 22782 rack->r_rr_config = 0; 22783 rack->r_ctl.rc_no_push_at_mrtt = 0; 22784 rack->rc_pace_fill_if_rttin_range = 0; 22785 rack->rtt_limit_mul = 0; 22786 22787 if (rack_enable_hw_pacing) 22788 rack->rack_hdw_pace_ena = 1; 22789 else 22790 rack->rack_hdw_pace_ena = 0; 22791 if (rack_disable_prr) 22792 rack->rack_no_prr = 1; 22793 else 22794 rack->rack_no_prr = 0; 22795 if (rack_limits_scwnd) 22796 rack->r_limit_scw = 1; 22797 else 22798 rack->r_limit_scw = 0; 22799 rack_init_retransmit_value(rack, rack_rxt_controls); 22800 err = 0; 22801 } 22802 return (err); 22803 } 22804 22805 static int 22806 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 22807 { 22808 struct deferred_opt_list *dol; 22809 22810 dol = malloc(sizeof(struct deferred_opt_list), 22811 M_TCPDO, M_NOWAIT|M_ZERO); 22812 if (dol == NULL) { 22813 /* 22814 * No space yikes -- fail out.. 22815 */ 22816 return (0); 22817 } 22818 dol->optname = sopt_name; 22819 dol->optval = loptval; 22820 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 22821 return (1); 22822 } 22823 22824 static int 22825 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 22826 { 22827 #ifdef TCP_REQUEST_TRK 22828 struct tcp_sendfile_track *sft; 22829 struct timeval tv; 22830 tcp_seq seq; 22831 int err; 22832 22833 microuptime(&tv); 22834 22835 /* Make sure no fixed rate is on */ 22836 rack->use_fixed_rate = 0; 22837 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 22838 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 22839 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 22840 /* Now allocate or find our entry that will have these settings */ 22841 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 22842 if (sft == NULL) { 22843 rack->rc_tp->tcp_hybrid_error++; 22844 /* no space, where would it have gone? */ 22845 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 22846 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 22847 return (ENOSPC); 22848 } 22849 /* mask our internal flags */ 22850 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; 22851 /* The seq will be snd_una + everything in the buffer */ 22852 seq = sft->start_seq; 22853 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 22854 /* Disabling hybrid pacing */ 22855 if (rack->rc_hybrid_mode) { 22856 rack_set_profile(rack, 0); 22857 rack->rc_tp->tcp_hybrid_stop++; 22858 } 22859 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 22860 return (0); 22861 } 22862 if (rack->dgp_on == 0) { 22863 /* 22864 * If we have not yet turned DGP on, do so 22865 * now setting pure DGP mode, no buffer level 22866 * response. 22867 */ 22868 if ((err = rack_set_profile(rack, 1)) != 0){ 22869 /* Failed to turn pacing on */ 22870 rack->rc_tp->tcp_hybrid_error++; 22871 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 22872 return (err); 22873 } 22874 } 22875 /* 22876 * Now we must switch to hybrid mode as well which also 22877 * means moving to regular pacing. 22878 */ 22879 if (rack->rc_hybrid_mode == 0) { 22880 /* First time */ 22881 if (tcp_can_enable_pacing()) { 22882 rack->r_ctl.pacing_method |= RACK_REG_PACING; 22883 rack->rc_hybrid_mode = 1; 22884 } else { 22885 return (ENOSPC); 22886 } 22887 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { 22888 /* 22889 * This should be true. 22890 */ 22891 tcp_dec_dgp_pacing_cnt(); 22892 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 22893 } 22894 } 22895 /* Now set in our flags */ 22896 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 22897 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 22898 sft->cspr = hybrid->cspr; 22899 else 22900 sft->cspr = 0; 22901 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 22902 sft->hint_maxseg = hybrid->hint_maxseg; 22903 else 22904 sft->hint_maxseg = 0; 22905 rack->rc_tp->tcp_hybrid_start++; 22906 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 22907 return (0); 22908 #else 22909 return (ENOTSUP); 22910 #endif 22911 } 22912 22913 static int 22914 rack_stack_information(struct tcpcb *tp, struct stack_specific_info *si) 22915 { 22916 /* We pulled a SSI info log out what was there */ 22917 si->bytes_transmitted = tp->t_sndbytes; 22918 si->bytes_retransmitted = tp->t_snd_rxt_bytes; 22919 return (0); 22920 } 22921 22922 static int 22923 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 22924 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 22925 22926 { 22927 struct epoch_tracker et; 22928 struct sockopt sopt; 22929 struct cc_newreno_opts opt; 22930 uint64_t val; 22931 int error = 0; 22932 uint16_t ca, ss; 22933 22934 switch (sopt_name) { 22935 case TCP_RACK_SET_RXT_OPTIONS: 22936 if (optval <= 2) { 22937 rack_init_retransmit_value(rack, optval); 22938 } else { 22939 /* 22940 * You must send in 0, 1 or 2 all else is 22941 * invalid. 22942 */ 22943 error = EINVAL; 22944 } 22945 break; 22946 case TCP_RACK_DSACK_OPT: 22947 RACK_OPTS_INC(tcp_rack_dsack_opt); 22948 if (optval & 0x1) { 22949 rack->rc_rack_tmr_std_based = 1; 22950 } else { 22951 rack->rc_rack_tmr_std_based = 0; 22952 } 22953 if (optval & 0x2) { 22954 rack->rc_rack_use_dsack = 1; 22955 } else { 22956 rack->rc_rack_use_dsack = 0; 22957 } 22958 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 22959 break; 22960 case TCP_RACK_PACING_DIVISOR: 22961 RACK_OPTS_INC(tcp_rack_pacing_divisor); 22962 if (optval == 0) { 22963 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 22964 } else { 22965 if (optval < RL_MIN_DIVISOR) 22966 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 22967 else 22968 rack->r_ctl.pace_len_divisor = optval; 22969 } 22970 break; 22971 case TCP_RACK_HI_BETA: 22972 RACK_OPTS_INC(tcp_rack_hi_beta); 22973 if (optval > 0) { 22974 rack->rack_hibeta = 1; 22975 if ((optval >= 50) && 22976 (optval <= 100)) { 22977 /* 22978 * User wants to set a custom beta. 22979 */ 22980 rack->r_ctl.saved_hibeta = optval; 22981 if (rack->rc_pacing_cc_set) 22982 rack_undo_cc_pacing(rack); 22983 rack->r_ctl.rc_saved_beta = optval; 22984 } 22985 if (rack->rc_pacing_cc_set == 0) 22986 rack_set_cc_pacing(rack); 22987 } else { 22988 rack->rack_hibeta = 0; 22989 if (rack->rc_pacing_cc_set) 22990 rack_undo_cc_pacing(rack); 22991 } 22992 break; 22993 case TCP_RACK_PACING_BETA: 22994 error = EINVAL; 22995 break; 22996 case TCP_RACK_TIMER_SLOP: 22997 RACK_OPTS_INC(tcp_rack_timer_slop); 22998 rack->r_ctl.timer_slop = optval; 22999 if (rack->rc_tp->t_srtt) { 23000 /* 23001 * If we have an SRTT lets update t_rxtcur 23002 * to have the new slop. 23003 */ 23004 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 23005 rack_rto_min, rack_rto_max, 23006 rack->r_ctl.timer_slop); 23007 } 23008 break; 23009 case TCP_RACK_PACING_BETA_ECN: 23010 RACK_OPTS_INC(tcp_rack_beta_ecn); 23011 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23012 /* This only works for newreno. */ 23013 error = EINVAL; 23014 break; 23015 } 23016 if (rack->rc_pacing_cc_set) { 23017 /* 23018 * Set them into the real CC module 23019 * whats in the rack pcb is the old values 23020 * to be used on restoral/ 23021 */ 23022 sopt.sopt_dir = SOPT_SET; 23023 opt.name = CC_NEWRENO_BETA_ECN; 23024 opt.val = optval; 23025 if (CC_ALGO(tp)->ctl_output != NULL) 23026 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23027 else 23028 error = ENOENT; 23029 } else { 23030 /* 23031 * Not pacing yet so set it into our local 23032 * rack pcb storage. 23033 */ 23034 rack->r_ctl.rc_saved_beta_ecn = optval; 23035 } 23036 break; 23037 case TCP_DEFER_OPTIONS: 23038 RACK_OPTS_INC(tcp_defer_opt); 23039 if (optval) { 23040 if (rack->gp_ready) { 23041 /* Too late */ 23042 error = EINVAL; 23043 break; 23044 } 23045 rack->defer_options = 1; 23046 } else 23047 rack->defer_options = 0; 23048 break; 23049 case TCP_RACK_MEASURE_CNT: 23050 RACK_OPTS_INC(tcp_rack_measure_cnt); 23051 if (optval && (optval <= 0xff)) { 23052 rack->r_ctl.req_measurements = optval; 23053 } else 23054 error = EINVAL; 23055 break; 23056 case TCP_REC_ABC_VAL: 23057 RACK_OPTS_INC(tcp_rec_abc_val); 23058 if (optval > 0) 23059 rack->r_use_labc_for_rec = 1; 23060 else 23061 rack->r_use_labc_for_rec = 0; 23062 break; 23063 case TCP_RACK_ABC_VAL: 23064 RACK_OPTS_INC(tcp_rack_abc_val); 23065 if ((optval > 0) && (optval < 255)) 23066 rack->rc_labc = optval; 23067 else 23068 error = EINVAL; 23069 break; 23070 case TCP_HDWR_UP_ONLY: 23071 RACK_OPTS_INC(tcp_pacing_up_only); 23072 if (optval) 23073 rack->r_up_only = 1; 23074 else 23075 rack->r_up_only = 0; 23076 break; 23077 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 23078 RACK_OPTS_INC(tcp_fillcw_rate_cap); 23079 rack->r_ctl.fillcw_cap = loptval; 23080 break; 23081 case TCP_PACING_RATE_CAP: 23082 RACK_OPTS_INC(tcp_pacing_rate_cap); 23083 if ((rack->dgp_on == 1) && 23084 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23085 /* 23086 * If we are doing DGP we need to switch 23087 * to using the pacing limit. 23088 */ 23089 if (tcp_can_enable_pacing() == 0) { 23090 error = ENOSPC; 23091 break; 23092 } 23093 /* 23094 * Now change up the flags and counts to be correct. 23095 */ 23096 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23097 tcp_dec_dgp_pacing_cnt(); 23098 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23099 } 23100 rack->r_ctl.bw_rate_cap = loptval; 23101 break; 23102 case TCP_HYBRID_PACING: 23103 if (hybrid == NULL) { 23104 error = EINVAL; 23105 break; 23106 } 23107 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { 23108 error = EPERM; 23109 break; 23110 } 23111 error = process_hybrid_pacing(rack, hybrid); 23112 break; 23113 case TCP_SIDECHAN_DIS: /* URL:scodm */ 23114 if (optval) 23115 rack->r_ctl.side_chan_dis_mask = optval; 23116 else 23117 rack->r_ctl.side_chan_dis_mask = 0; 23118 break; 23119 case TCP_RACK_PROFILE: 23120 RACK_OPTS_INC(tcp_profile); 23121 error = rack_set_profile(rack, optval); 23122 break; 23123 case TCP_USE_CMP_ACKS: 23124 RACK_OPTS_INC(tcp_use_cmp_acks); 23125 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 23126 /* You can't turn it off once its on! */ 23127 error = EINVAL; 23128 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 23129 rack->r_use_cmp_ack = 1; 23130 rack->r_mbuf_queue = 1; 23131 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23132 } 23133 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 23134 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23135 break; 23136 case TCP_SHARED_CWND_TIME_LIMIT: 23137 RACK_OPTS_INC(tcp_lscwnd); 23138 if (optval) 23139 rack->r_limit_scw = 1; 23140 else 23141 rack->r_limit_scw = 0; 23142 break; 23143 case TCP_RACK_DGP_IN_REC: 23144 error = EINVAL; 23145 break; 23146 case TCP_RACK_PACE_TO_FILL: 23147 RACK_OPTS_INC(tcp_fillcw); 23148 if (optval == 0) 23149 rack->rc_pace_to_cwnd = 0; 23150 else { 23151 rack->rc_pace_to_cwnd = 1; 23152 } 23153 if ((optval >= rack_gp_rtt_maxmul) && 23154 rack_gp_rtt_maxmul && 23155 (optval < 0xf)) { 23156 rack->rc_pace_fill_if_rttin_range = 1; 23157 rack->rtt_limit_mul = optval; 23158 } else { 23159 rack->rc_pace_fill_if_rttin_range = 0; 23160 rack->rtt_limit_mul = 0; 23161 } 23162 break; 23163 case TCP_RACK_NO_PUSH_AT_MAX: 23164 RACK_OPTS_INC(tcp_npush); 23165 if (optval == 0) 23166 rack->r_ctl.rc_no_push_at_mrtt = 0; 23167 else if (optval < 0xff) 23168 rack->r_ctl.rc_no_push_at_mrtt = optval; 23169 else 23170 error = EINVAL; 23171 break; 23172 case TCP_SHARED_CWND_ENABLE: 23173 RACK_OPTS_INC(tcp_rack_scwnd); 23174 if (optval == 0) 23175 rack->rack_enable_scwnd = 0; 23176 else 23177 rack->rack_enable_scwnd = 1; 23178 break; 23179 case TCP_RACK_MBUF_QUEUE: 23180 /* Now do we use the LRO mbuf-queue feature */ 23181 RACK_OPTS_INC(tcp_rack_mbufq); 23182 if (optval || rack->r_use_cmp_ack) 23183 rack->r_mbuf_queue = 1; 23184 else 23185 rack->r_mbuf_queue = 0; 23186 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23187 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23188 else 23189 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23190 break; 23191 case TCP_RACK_NONRXT_CFG_RATE: 23192 RACK_OPTS_INC(tcp_rack_cfg_rate); 23193 if (optval == 0) 23194 rack->rack_rec_nonrxt_use_cr = 0; 23195 else 23196 rack->rack_rec_nonrxt_use_cr = 1; 23197 break; 23198 case TCP_NO_PRR: 23199 RACK_OPTS_INC(tcp_rack_noprr); 23200 if (optval == 0) 23201 rack->rack_no_prr = 0; 23202 else if (optval == 1) 23203 rack->rack_no_prr = 1; 23204 else if (optval == 2) 23205 rack->no_prr_addback = 1; 23206 else 23207 error = EINVAL; 23208 break; 23209 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 23210 if (optval > 0) 23211 rack->cspr_is_fcc = 1; 23212 else 23213 rack->cspr_is_fcc = 0; 23214 break; 23215 case TCP_TIMELY_DYN_ADJ: 23216 RACK_OPTS_INC(tcp_timely_dyn); 23217 if (optval == 0) 23218 rack->rc_gp_dyn_mul = 0; 23219 else { 23220 rack->rc_gp_dyn_mul = 1; 23221 if (optval >= 100) { 23222 /* 23223 * If the user sets something 100 or more 23224 * its the gp_ca value. 23225 */ 23226 rack->r_ctl.rack_per_of_gp_ca = optval; 23227 } 23228 } 23229 break; 23230 case TCP_RACK_DO_DETECTION: 23231 error = EINVAL; 23232 break; 23233 case TCP_RACK_TLP_USE: 23234 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 23235 error = EINVAL; 23236 break; 23237 } 23238 RACK_OPTS_INC(tcp_tlp_use); 23239 rack->rack_tlp_threshold_use = optval; 23240 break; 23241 case TCP_RACK_TLP_REDUCE: 23242 /* RACK TLP cwnd reduction (bool) */ 23243 RACK_OPTS_INC(tcp_rack_tlp_reduce); 23244 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 23245 break; 23246 /* Pacing related ones */ 23247 case TCP_RACK_PACE_ALWAYS: 23248 /* 23249 * zero is old rack method, 1 is new 23250 * method using a pacing rate. 23251 */ 23252 RACK_OPTS_INC(tcp_rack_pace_always); 23253 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23254 error = EPERM; 23255 break; 23256 } 23257 if (optval > 0) { 23258 if (rack->rc_always_pace) { 23259 error = EALREADY; 23260 break; 23261 } else if (tcp_can_enable_pacing()) { 23262 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23263 rack->rc_always_pace = 1; 23264 if (rack->rack_hibeta) 23265 rack_set_cc_pacing(rack); 23266 } 23267 else { 23268 error = ENOSPC; 23269 break; 23270 } 23271 } else { 23272 if (rack->rc_always_pace == 1) { 23273 rack_remove_pacing(rack); 23274 } 23275 } 23276 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23277 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23278 else 23279 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23280 /* A rate may be set irate or other, if so set seg size */ 23281 rack_update_seg(rack); 23282 break; 23283 case TCP_BBR_RACK_INIT_RATE: 23284 RACK_OPTS_INC(tcp_initial_rate); 23285 val = optval; 23286 /* Change from kbits per second to bytes per second */ 23287 val *= 1000; 23288 val /= 8; 23289 rack->r_ctl.init_rate = val; 23290 if (rack->rc_always_pace) 23291 rack_update_seg(rack); 23292 break; 23293 case TCP_BBR_IWINTSO: 23294 error = EINVAL; 23295 break; 23296 case TCP_RACK_FORCE_MSEG: 23297 RACK_OPTS_INC(tcp_rack_force_max_seg); 23298 if (optval) 23299 rack->rc_force_max_seg = 1; 23300 else 23301 rack->rc_force_max_seg = 0; 23302 break; 23303 case TCP_RACK_PACE_MIN_SEG: 23304 RACK_OPTS_INC(tcp_rack_min_seg); 23305 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 23306 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23307 break; 23308 case TCP_RACK_PACE_MAX_SEG: 23309 /* Max segments size in a pace in bytes */ 23310 RACK_OPTS_INC(tcp_rack_max_seg); 23311 if ((rack->dgp_on == 1) && 23312 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { 23313 /* 23314 * If we set a max-seg and are doing DGP then 23315 * we now fall under the pacing limits not the 23316 * DGP ones. 23317 */ 23318 if (tcp_can_enable_pacing() == 0) { 23319 error = ENOSPC; 23320 break; 23321 } 23322 /* 23323 * Now change up the flags and counts to be correct. 23324 */ 23325 rack->r_ctl.pacing_method |= RACK_REG_PACING; 23326 tcp_dec_dgp_pacing_cnt(); 23327 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; 23328 } 23329 if (optval <= MAX_USER_SET_SEG) 23330 rack->rc_user_set_max_segs = optval; 23331 else 23332 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 23333 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23334 break; 23335 case TCP_RACK_PACE_RATE_REC: 23336 /* Set the fixed pacing rate in Bytes per second ca */ 23337 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 23338 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23339 error = EPERM; 23340 break; 23341 } 23342 if (rack->dgp_on) { 23343 /* 23344 * We are already pacing another 23345 * way. 23346 */ 23347 error = EBUSY; 23348 break; 23349 } 23350 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23351 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23352 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23353 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23354 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23355 rack->use_fixed_rate = 1; 23356 if (rack->rack_hibeta) 23357 rack_set_cc_pacing(rack); 23358 rack_log_pacing_delay_calc(rack, 23359 rack->r_ctl.rc_fixed_pacing_rate_ss, 23360 rack->r_ctl.rc_fixed_pacing_rate_ca, 23361 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23362 __LINE__, NULL,0); 23363 break; 23364 23365 case TCP_RACK_PACE_RATE_SS: 23366 /* Set the fixed pacing rate in Bytes per second ca */ 23367 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 23368 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23369 error = EPERM; 23370 break; 23371 } 23372 if (rack->dgp_on) { 23373 /* 23374 * We are already pacing another 23375 * way. 23376 */ 23377 error = EBUSY; 23378 break; 23379 } 23380 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23381 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23382 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23383 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23384 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23385 rack->use_fixed_rate = 1; 23386 if (rack->rack_hibeta) 23387 rack_set_cc_pacing(rack); 23388 rack_log_pacing_delay_calc(rack, 23389 rack->r_ctl.rc_fixed_pacing_rate_ss, 23390 rack->r_ctl.rc_fixed_pacing_rate_ca, 23391 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23392 __LINE__, NULL, 0); 23393 break; 23394 23395 case TCP_RACK_PACE_RATE_CA: 23396 /* Set the fixed pacing rate in Bytes per second ca */ 23397 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 23398 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { 23399 error = EPERM; 23400 break; 23401 } 23402 if (rack->dgp_on) { 23403 /* 23404 * We are already pacing another 23405 * way. 23406 */ 23407 error = EBUSY; 23408 break; 23409 } 23410 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23411 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23412 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23413 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23414 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23415 rack->use_fixed_rate = 1; 23416 if (rack->rack_hibeta) 23417 rack_set_cc_pacing(rack); 23418 rack_log_pacing_delay_calc(rack, 23419 rack->r_ctl.rc_fixed_pacing_rate_ss, 23420 rack->r_ctl.rc_fixed_pacing_rate_ca, 23421 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23422 __LINE__, NULL, 0); 23423 break; 23424 case TCP_RACK_GP_INCREASE_REC: 23425 RACK_OPTS_INC(tcp_gp_inc_rec); 23426 rack->r_ctl.rack_per_of_gp_rec = optval; 23427 rack_log_pacing_delay_calc(rack, 23428 rack->r_ctl.rack_per_of_gp_ss, 23429 rack->r_ctl.rack_per_of_gp_ca, 23430 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23431 __LINE__, NULL, 0); 23432 break; 23433 case TCP_RACK_GP_INCREASE_CA: 23434 RACK_OPTS_INC(tcp_gp_inc_ca); 23435 ca = optval; 23436 if (ca < 100) { 23437 /* 23438 * We don't allow any reduction 23439 * over the GP b/w. 23440 */ 23441 error = EINVAL; 23442 break; 23443 } 23444 rack->r_ctl.rack_per_of_gp_ca = ca; 23445 rack_log_pacing_delay_calc(rack, 23446 rack->r_ctl.rack_per_of_gp_ss, 23447 rack->r_ctl.rack_per_of_gp_ca, 23448 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23449 __LINE__, NULL, 0); 23450 break; 23451 case TCP_RACK_GP_INCREASE_SS: 23452 RACK_OPTS_INC(tcp_gp_inc_ss); 23453 ss = optval; 23454 if (ss < 100) { 23455 /* 23456 * We don't allow any reduction 23457 * over the GP b/w. 23458 */ 23459 error = EINVAL; 23460 break; 23461 } 23462 rack->r_ctl.rack_per_of_gp_ss = ss; 23463 rack_log_pacing_delay_calc(rack, 23464 rack->r_ctl.rack_per_of_gp_ss, 23465 rack->r_ctl.rack_per_of_gp_ca, 23466 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23467 __LINE__, NULL, 0); 23468 break; 23469 case TCP_RACK_RR_CONF: 23470 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 23471 if (optval && optval <= 3) 23472 rack->r_rr_config = optval; 23473 else 23474 rack->r_rr_config = 0; 23475 break; 23476 case TCP_PACING_DND: /* URL:dnd */ 23477 if (optval > 0) 23478 rack->rc_pace_dnd = 1; 23479 else 23480 rack->rc_pace_dnd = 0; 23481 break; 23482 case TCP_HDWR_RATE_CAP: 23483 RACK_OPTS_INC(tcp_hdwr_rate_cap); 23484 if (optval) { 23485 if (rack->r_rack_hw_rate_caps == 0) 23486 rack->r_rack_hw_rate_caps = 1; 23487 else 23488 error = EALREADY; 23489 } else { 23490 rack->r_rack_hw_rate_caps = 0; 23491 } 23492 break; 23493 case TCP_DGP_UPPER_BOUNDS: 23494 { 23495 uint8_t val; 23496 val = optval & 0x0000ff; 23497 rack->r_ctl.rack_per_upper_bound_ca = val; 23498 val = (optval >> 16) & 0x0000ff; 23499 rack->r_ctl.rack_per_upper_bound_ss = val; 23500 break; 23501 } 23502 case TCP_SS_EEXIT: /* URL:eexit */ 23503 if (optval > 0) { 23504 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; 23505 if (optval & 0x10000) { 23506 rack->r_ctl.gate_to_fs = 1; 23507 } else { 23508 rack->r_ctl.gate_to_fs = 0; 23509 } 23510 if (optval & 0x20000) { 23511 rack->r_ctl.use_gp_not_last = 1; 23512 } else { 23513 rack->r_ctl.use_gp_not_last = 0; 23514 } 23515 if (optval & 0xfffc0000) { 23516 uint32_t v; 23517 23518 v = (optval >> 18) & 0x00003fff; 23519 if (v >= 1000) 23520 rack->r_ctl.gp_gain_req = v; 23521 } 23522 } else { 23523 /* We do not do ss early exit at all */ 23524 rack->rc_initial_ss_comp = 1; 23525 rack->r_ctl.gp_rnd_thresh = 0; 23526 } 23527 break; 23528 case TCP_RACK_SPLIT_LIMIT: 23529 RACK_OPTS_INC(tcp_split_limit); 23530 rack->r_ctl.rc_split_limit = optval; 23531 break; 23532 case TCP_BBR_HDWR_PACE: 23533 RACK_OPTS_INC(tcp_hdwr_pacing); 23534 if (optval){ 23535 if (rack->rack_hdrw_pacing == 0) { 23536 rack->rack_hdw_pace_ena = 1; 23537 rack->rack_attempt_hdwr_pace = 0; 23538 } else 23539 error = EALREADY; 23540 } else { 23541 rack->rack_hdw_pace_ena = 0; 23542 #ifdef RATELIMIT 23543 if (rack->r_ctl.crte != NULL) { 23544 rack->rack_hdrw_pacing = 0; 23545 rack->rack_attempt_hdwr_pace = 0; 23546 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 23547 rack->r_ctl.crte = NULL; 23548 } 23549 #endif 23550 } 23551 break; 23552 /* End Pacing related ones */ 23553 case TCP_RACK_PRR_SENDALOT: 23554 /* Allow PRR to send more than one seg */ 23555 RACK_OPTS_INC(tcp_rack_prr_sendalot); 23556 rack->r_ctl.rc_prr_sendalot = optval; 23557 break; 23558 case TCP_RACK_MIN_TO: 23559 /* Minimum time between rack t-o's in ms */ 23560 RACK_OPTS_INC(tcp_rack_min_to); 23561 rack->r_ctl.rc_min_to = optval; 23562 break; 23563 case TCP_RACK_EARLY_SEG: 23564 /* If early recovery max segments */ 23565 RACK_OPTS_INC(tcp_rack_early_seg); 23566 rack->r_ctl.rc_early_recovery_segs = optval; 23567 break; 23568 case TCP_RACK_ENABLE_HYSTART: 23569 { 23570 if (optval) { 23571 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23572 if (rack_do_hystart > RACK_HYSTART_ON) 23573 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23574 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23575 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23576 } else { 23577 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23578 } 23579 } 23580 break; 23581 case TCP_RACK_REORD_THRESH: 23582 /* RACK reorder threshold (shift amount) */ 23583 RACK_OPTS_INC(tcp_rack_reord_thresh); 23584 if ((optval > 0) && (optval < 31)) 23585 rack->r_ctl.rc_reorder_shift = optval; 23586 else 23587 error = EINVAL; 23588 break; 23589 case TCP_RACK_REORD_FADE: 23590 /* Does reordering fade after ms time */ 23591 RACK_OPTS_INC(tcp_rack_reord_fade); 23592 rack->r_ctl.rc_reorder_fade = optval; 23593 break; 23594 case TCP_RACK_TLP_THRESH: 23595 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 23596 RACK_OPTS_INC(tcp_rack_tlp_thresh); 23597 if (optval) 23598 rack->r_ctl.rc_tlp_threshold = optval; 23599 else 23600 error = EINVAL; 23601 break; 23602 case TCP_BBR_USE_RACK_RR: 23603 RACK_OPTS_INC(tcp_rack_rr); 23604 if (optval) 23605 rack->use_rack_rr = 1; 23606 else 23607 rack->use_rack_rr = 0; 23608 break; 23609 case TCP_RACK_PKT_DELAY: 23610 /* RACK added ms i.e. rack-rtt + reord + N */ 23611 RACK_OPTS_INC(tcp_rack_pkt_delay); 23612 rack->r_ctl.rc_pkt_delay = optval; 23613 break; 23614 case TCP_DELACK: 23615 RACK_OPTS_INC(tcp_rack_delayed_ack); 23616 if (optval == 0) 23617 tp->t_delayed_ack = 0; 23618 else 23619 tp->t_delayed_ack = 1; 23620 if (tp->t_flags & TF_DELACK) { 23621 tp->t_flags &= ~TF_DELACK; 23622 tp->t_flags |= TF_ACKNOW; 23623 NET_EPOCH_ENTER(et); 23624 rack_output(tp); 23625 NET_EPOCH_EXIT(et); 23626 } 23627 break; 23628 23629 case TCP_BBR_RACK_RTT_USE: 23630 RACK_OPTS_INC(tcp_rack_rtt_use); 23631 if ((optval != USE_RTT_HIGH) && 23632 (optval != USE_RTT_LOW) && 23633 (optval != USE_RTT_AVG)) 23634 error = EINVAL; 23635 else 23636 rack->r_ctl.rc_rate_sample_method = optval; 23637 break; 23638 case TCP_HONOR_HPTS_MIN: 23639 RACK_OPTS_INC(tcp_honor_hpts); 23640 if (optval) { 23641 rack->r_use_hpts_min = 1; 23642 /* 23643 * Must be between 2 - 80% to be a reduction else 23644 * we keep the default (10%). 23645 */ 23646 if ((optval > 1) && (optval <= 80)) { 23647 rack->r_ctl.max_reduction = optval; 23648 } 23649 } else 23650 rack->r_use_hpts_min = 0; 23651 break; 23652 case TCP_REC_IS_DYN: /* URL:dynrec */ 23653 RACK_OPTS_INC(tcp_dyn_rec); 23654 if (optval) 23655 rack->rc_gp_no_rec_chg = 1; 23656 else 23657 rack->rc_gp_no_rec_chg = 0; 23658 break; 23659 case TCP_NO_TIMELY: 23660 RACK_OPTS_INC(tcp_notimely); 23661 if (optval) { 23662 rack->rc_skip_timely = 1; 23663 rack->r_ctl.rack_per_of_gp_rec = 90; 23664 rack->r_ctl.rack_per_of_gp_ca = 100; 23665 rack->r_ctl.rack_per_of_gp_ss = 250; 23666 } else { 23667 rack->rc_skip_timely = 0; 23668 } 23669 break; 23670 case TCP_GP_USE_LTBW: 23671 if (optval == 0) { 23672 rack->use_lesser_lt_bw = 0; 23673 rack->dis_lt_bw = 1; 23674 } else if (optval == 1) { 23675 rack->use_lesser_lt_bw = 1; 23676 rack->dis_lt_bw = 0; 23677 } else if (optval == 2) { 23678 rack->use_lesser_lt_bw = 0; 23679 rack->dis_lt_bw = 0; 23680 } 23681 break; 23682 case TCP_DATA_AFTER_CLOSE: 23683 RACK_OPTS_INC(tcp_data_after_close); 23684 if (optval) 23685 rack->rc_allow_data_af_clo = 1; 23686 else 23687 rack->rc_allow_data_af_clo = 0; 23688 break; 23689 default: 23690 break; 23691 } 23692 tcp_log_socket_option(tp, sopt_name, optval, error); 23693 return (error); 23694 } 23695 23696 static void 23697 rack_inherit(struct tcpcb *tp, struct inpcb *parent) 23698 { 23699 /* 23700 * A new connection has been created (tp) and 23701 * the parent is the inpcb given. We want to 23702 * apply a read-lock to the parent (we are already 23703 * holding a write lock on the tp) and copy anything 23704 * out of the rack specific data as long as its tfb is 23705 * the same as ours i.e. we are the same stack. Otherwise 23706 * we just return. 23707 */ 23708 struct tcpcb *par; 23709 struct tcp_rack *dest, *src; 23710 int cnt = 0; 23711 23712 par = intotcpcb(parent); 23713 if (par->t_fb != tp->t_fb) { 23714 /* Not the same stack */ 23715 tcp_log_socket_option(tp, 0, 0, 1); 23716 return; 23717 } 23718 /* Ok if we reach here lets setup the two rack pointers */ 23719 dest = (struct tcp_rack *)tp->t_fb_ptr; 23720 src = (struct tcp_rack *)par->t_fb_ptr; 23721 if ((src == NULL) || (dest == NULL)) { 23722 /* Huh? */ 23723 tcp_log_socket_option(tp, 0, 0, 2); 23724 return; 23725 } 23726 /* Now copy out anything we wish to inherit i.e. things in socket-options */ 23727 /* TCP_RACK_PROFILE we can't know but we can set DGP if its on */ 23728 if ((src->dgp_on) && (dest->dgp_on == 0)) { 23729 /* Profile 1 had to be set via sock opt */ 23730 rack_set_dgp(dest); 23731 cnt++; 23732 } 23733 /* TCP_RACK_SET_RXT_OPTIONS */ 23734 if (dest->full_size_rxt != src->full_size_rxt) { 23735 dest->full_size_rxt = src->full_size_rxt; 23736 cnt++; 23737 } 23738 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { 23739 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; 23740 cnt++; 23741 } 23742 /* TCP_RACK_DSACK_OPT */ 23743 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { 23744 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; 23745 cnt++; 23746 } 23747 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { 23748 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; 23749 cnt++; 23750 } 23751 /* TCP_RACK_PACING_DIVISOR */ 23752 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { 23753 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; 23754 cnt++; 23755 } 23756 /* TCP_RACK_HI_BETA */ 23757 if (src->rack_hibeta != dest->rack_hibeta) { 23758 cnt++; 23759 if (src->rack_hibeta) { 23760 dest->r_ctl.rc_saved_beta = src->r_ctl.rc_saved_beta; 23761 dest->rack_hibeta = 1; 23762 } else { 23763 dest->rack_hibeta = 0; 23764 } 23765 } 23766 /* TCP_RACK_TIMER_SLOP */ 23767 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { 23768 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; 23769 cnt++; 23770 } 23771 /* TCP_RACK_PACING_BETA_ECN */ 23772 if (dest->r_ctl.rc_saved_beta_ecn != src->r_ctl.rc_saved_beta_ecn) { 23773 dest->r_ctl.rc_saved_beta_ecn = src->r_ctl.rc_saved_beta_ecn; 23774 cnt++; 23775 } 23776 /* We do not do TCP_DEFER_OPTIONS */ 23777 /* TCP_RACK_MEASURE_CNT */ 23778 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { 23779 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; 23780 cnt++; 23781 } 23782 /* TCP_HDWR_UP_ONLY */ 23783 if (dest->r_up_only != src->r_up_only) { 23784 dest->r_up_only = src->r_up_only; 23785 cnt++; 23786 } 23787 /* TCP_FILLCW_RATE_CAP */ 23788 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { 23789 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; 23790 cnt++; 23791 } 23792 /* TCP_PACING_RATE_CAP */ 23793 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { 23794 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; 23795 cnt++; 23796 } 23797 /* A listener can't set TCP_HYBRID_PACING */ 23798 /* TCP_SIDECHAN_DIS */ 23799 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { 23800 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; 23801 cnt++; 23802 } 23803 /* TCP_SHARED_CWND_TIME_LIMIT */ 23804 if (dest->r_limit_scw != src->r_limit_scw) { 23805 dest->r_limit_scw = src->r_limit_scw; 23806 cnt++; 23807 } 23808 /* TCP_RACK_PACE_TO_FILL */ 23809 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { 23810 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; 23811 cnt++; 23812 } 23813 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { 23814 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; 23815 cnt++; 23816 } 23817 if (dest->rtt_limit_mul != src->rtt_limit_mul) { 23818 dest->rtt_limit_mul = src->rtt_limit_mul; 23819 cnt++; 23820 } 23821 /* TCP_RACK_NO_PUSH_AT_MAX */ 23822 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { 23823 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; 23824 cnt++; 23825 } 23826 /* TCP_SHARED_CWND_ENABLE */ 23827 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { 23828 dest->rack_enable_scwnd = src->rack_enable_scwnd; 23829 cnt++; 23830 } 23831 /* TCP_USE_CMP_ACKS */ 23832 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { 23833 dest->r_use_cmp_ack = src->r_use_cmp_ack; 23834 cnt++; 23835 } 23836 23837 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23838 dest->r_mbuf_queue = src->r_mbuf_queue; 23839 cnt++; 23840 } 23841 /* TCP_RACK_MBUF_QUEUE */ 23842 if (dest->r_mbuf_queue != src->r_mbuf_queue) { 23843 dest->r_mbuf_queue = src->r_mbuf_queue; 23844 cnt++; 23845 } 23846 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { 23847 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23848 } else { 23849 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23850 } 23851 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { 23852 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23853 } 23854 /* TCP_RACK_NONRXT_CFG_RATE */ 23855 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { 23856 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; 23857 cnt++; 23858 } 23859 /* TCP_NO_PRR */ 23860 if (dest->rack_no_prr != src->rack_no_prr) { 23861 dest->rack_no_prr = src->rack_no_prr; 23862 cnt++; 23863 } 23864 if (dest->no_prr_addback != src->no_prr_addback) { 23865 dest->no_prr_addback = src->no_prr_addback; 23866 cnt++; 23867 } 23868 /* RACK_CSPR_IS_FCC */ 23869 if (dest->cspr_is_fcc != src->cspr_is_fcc) { 23870 dest->cspr_is_fcc = src->cspr_is_fcc; 23871 cnt++; 23872 } 23873 /* TCP_TIMELY_DYN_ADJ */ 23874 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { 23875 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; 23876 cnt++; 23877 } 23878 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23879 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23880 cnt++; 23881 } 23882 /* TCP_RACK_TLP_USE */ 23883 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { 23884 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; 23885 cnt++; 23886 } 23887 /* we don't allow inheritence of TCP_RACK_PACE_ALWAYS */ 23888 /* TCP_BBR_RACK_INIT_RATE */ 23889 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { 23890 dest->r_ctl.init_rate = src->r_ctl.init_rate; 23891 cnt++; 23892 } 23893 /* TCP_RACK_FORCE_MSEG */ 23894 if (dest->rc_force_max_seg != src->rc_force_max_seg) { 23895 dest->rc_force_max_seg = src->rc_force_max_seg; 23896 cnt++; 23897 } 23898 /* TCP_RACK_PACE_MIN_SEG */ 23899 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { 23900 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; 23901 cnt++; 23902 } 23903 /* we don't allow TCP_RACK_PACE_MAX_SEG */ 23904 /* TCP_RACK_PACE_RATE_REC, TCP_RACK_PACE_RATE_SS, TCP_RACK_PACE_RATE_CA */ 23905 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { 23906 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; 23907 cnt++; 23908 } 23909 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { 23910 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; 23911 cnt++; 23912 } 23913 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { 23914 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; 23915 cnt++; 23916 } 23917 /* TCP_RACK_GP_INCREASE_REC, TCP_RACK_GP_INCREASE_CA, TCP_RACK_GP_INCREASE_SS */ 23918 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { 23919 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; 23920 cnt++; 23921 } 23922 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { 23923 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; 23924 cnt++; 23925 } 23926 23927 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { 23928 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; 23929 cnt++; 23930 } 23931 /* TCP_RACK_RR_CONF */ 23932 if (dest->r_rr_config != src->r_rr_config) { 23933 dest->r_rr_config = src->r_rr_config; 23934 cnt++; 23935 } 23936 /* TCP_PACING_DND */ 23937 if (dest->rc_pace_dnd != src->rc_pace_dnd) { 23938 dest->rc_pace_dnd = src->rc_pace_dnd; 23939 cnt++; 23940 } 23941 /* TCP_HDWR_RATE_CAP */ 23942 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { 23943 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; 23944 cnt++; 23945 } 23946 /* TCP_DGP_UPPER_BOUNDS */ 23947 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { 23948 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; 23949 cnt++; 23950 } 23951 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { 23952 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; 23953 cnt++; 23954 } 23955 /* TCP_SS_EEXIT */ 23956 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { 23957 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; 23958 cnt++; 23959 } 23960 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { 23961 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; 23962 cnt++; 23963 } 23964 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { 23965 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; 23966 cnt++; 23967 } 23968 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { 23969 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; 23970 cnt++; 23971 } 23972 /* TCP_BBR_HDWR_PACE */ 23973 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { 23974 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; 23975 cnt++; 23976 } 23977 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { 23978 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; 23979 cnt++; 23980 } 23981 /* TCP_RACK_PRR_SENDALOT */ 23982 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { 23983 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; 23984 cnt++; 23985 } 23986 /* TCP_RACK_MIN_TO */ 23987 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { 23988 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; 23989 cnt++; 23990 } 23991 /* TCP_RACK_EARLY_SEG */ 23992 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { 23993 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; 23994 cnt++; 23995 } 23996 /* TCP_RACK_ENABLE_HYSTART */ 23997 if (par->t_ccv.flags != tp->t_ccv.flags) { 23998 cnt++; 23999 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24000 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 24001 if (rack_do_hystart > RACK_HYSTART_ON) 24002 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 24003 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 24004 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 24005 } else { 24006 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 24007 } 24008 } 24009 /* TCP_RACK_REORD_THRESH */ 24010 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { 24011 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; 24012 cnt++; 24013 } 24014 /* TCP_RACK_REORD_FADE */ 24015 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { 24016 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; 24017 cnt++; 24018 } 24019 /* TCP_RACK_TLP_THRESH */ 24020 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { 24021 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; 24022 cnt++; 24023 } 24024 /* TCP_BBR_USE_RACK_RR */ 24025 if (dest->use_rack_rr != src->use_rack_rr) { 24026 dest->use_rack_rr = src->use_rack_rr; 24027 cnt++; 24028 } 24029 /* TCP_RACK_PKT_DELAY */ 24030 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { 24031 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; 24032 cnt++; 24033 } 24034 /* TCP_DELACK will get copied via the main code if applicable */ 24035 /* TCP_BBR_RACK_RTT_USE */ 24036 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { 24037 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; 24038 cnt++; 24039 } 24040 /* TCP_HONOR_HPTS_MIN */ 24041 if (dest->r_use_hpts_min != src->r_use_hpts_min) { 24042 dest->r_use_hpts_min = src->r_use_hpts_min; 24043 cnt++; 24044 } 24045 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { 24046 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; 24047 cnt++; 24048 } 24049 /* TCP_REC_IS_DYN */ 24050 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { 24051 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; 24052 cnt++; 24053 } 24054 if (dest->rc_skip_timely != src->rc_skip_timely) { 24055 dest->rc_skip_timely = src->rc_skip_timely; 24056 cnt++; 24057 } 24058 /* TCP_DATA_AFTER_CLOSE */ 24059 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { 24060 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; 24061 cnt++; 24062 } 24063 /* TCP_GP_USE_LTBW */ 24064 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { 24065 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; 24066 cnt++; 24067 } 24068 if (dest->dis_lt_bw != src->dis_lt_bw) { 24069 dest->dis_lt_bw = src->dis_lt_bw; 24070 cnt++; 24071 } 24072 tcp_log_socket_option(tp, 0, cnt, 0); 24073 } 24074 24075 24076 static void 24077 rack_apply_deferred_options(struct tcp_rack *rack) 24078 { 24079 struct deferred_opt_list *dol, *sdol; 24080 uint32_t s_optval; 24081 24082 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 24083 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 24084 /* Disadvantage of deferal is you loose the error return */ 24085 s_optval = (uint32_t)dol->optval; 24086 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 24087 free(dol, M_TCPDO); 24088 } 24089 } 24090 24091 static void 24092 rack_hw_tls_change(struct tcpcb *tp, int chg) 24093 { 24094 /* Update HW tls state */ 24095 struct tcp_rack *rack; 24096 24097 rack = (struct tcp_rack *)tp->t_fb_ptr; 24098 if (chg) 24099 rack->r_ctl.fsb.hw_tls = 1; 24100 else 24101 rack->r_ctl.fsb.hw_tls = 0; 24102 } 24103 24104 static int 24105 rack_pru_options(struct tcpcb *tp, int flags) 24106 { 24107 if (flags & PRUS_OOB) 24108 return (EOPNOTSUPP); 24109 return (0); 24110 } 24111 24112 static bool 24113 rack_wake_check(struct tcpcb *tp) 24114 { 24115 struct tcp_rack *rack; 24116 struct timeval tv; 24117 uint32_t cts; 24118 24119 rack = (struct tcp_rack *)tp->t_fb_ptr; 24120 if (rack->r_ctl.rc_hpts_flags) { 24121 cts = tcp_get_usecs(&tv); 24122 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 24123 /* 24124 * Pacing timer is up, check if we are ready. 24125 */ 24126 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 24127 return (true); 24128 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 24129 /* 24130 * A timer is up, check if we are ready. 24131 */ 24132 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 24133 return (true); 24134 } 24135 } 24136 return (false); 24137 } 24138 24139 static struct tcp_function_block __tcp_rack = { 24140 .tfb_tcp_block_name = __XSTRING(STACKNAME), 24141 .tfb_tcp_output = rack_output, 24142 .tfb_do_queued_segments = ctf_do_queued_segments, 24143 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 24144 .tfb_tcp_do_segment = rack_do_segment, 24145 .tfb_tcp_ctloutput = rack_ctloutput, 24146 .tfb_tcp_fb_init = rack_init, 24147 .tfb_tcp_fb_fini = rack_fini, 24148 .tfb_tcp_timer_stop_all = rack_stopall, 24149 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 24150 .tfb_tcp_handoff_ok = rack_handoff_ok, 24151 .tfb_tcp_mtu_chg = rack_mtu_change, 24152 .tfb_pru_options = rack_pru_options, 24153 .tfb_hwtls_change = rack_hw_tls_change, 24154 .tfb_chg_query = rack_chg_query, 24155 .tfb_switch_failed = rack_switch_failed, 24156 .tfb_early_wake_check = rack_wake_check, 24157 .tfb_compute_pipe = rack_compute_pipe, 24158 .tfb_stack_info = rack_stack_information, 24159 .tfb_inherit = rack_inherit, 24160 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP | TCP_FUNC_DEFAULT_OK, 24161 24162 }; 24163 24164 /* 24165 * rack_ctloutput() must drop the inpcb lock before performing copyin on 24166 * socket option arguments. When it re-acquires the lock after the copy, it 24167 * has to revalidate that the connection is still valid for the socket 24168 * option. 24169 */ 24170 static int 24171 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24172 { 24173 struct inpcb *inp = tptoinpcb(tp); 24174 #ifdef INET 24175 struct ip *ip; 24176 #endif 24177 struct tcp_rack *rack; 24178 struct tcp_hybrid_req hybrid; 24179 uint64_t loptval; 24180 int32_t error = 0, optval; 24181 24182 rack = (struct tcp_rack *)tp->t_fb_ptr; 24183 if (rack == NULL) { 24184 INP_WUNLOCK(inp); 24185 return (EINVAL); 24186 } 24187 #ifdef INET 24188 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 24189 #endif 24190 24191 switch (sopt->sopt_level) { 24192 #ifdef INET6 24193 case IPPROTO_IPV6: 24194 MPASS(inp->inp_vflag & INP_IPV6PROTO); 24195 switch (sopt->sopt_name) { 24196 case IPV6_USE_MIN_MTU: 24197 tcp6_use_min_mtu(tp); 24198 break; 24199 } 24200 INP_WUNLOCK(inp); 24201 return (0); 24202 #endif 24203 #ifdef INET 24204 case IPPROTO_IP: 24205 switch (sopt->sopt_name) { 24206 case IP_TOS: 24207 /* 24208 * The DSCP codepoint has changed, update the fsb. 24209 */ 24210 ip->ip_tos = rack->rc_inp->inp_ip_tos; 24211 break; 24212 case IP_TTL: 24213 /* 24214 * The TTL has changed, update the fsb. 24215 */ 24216 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 24217 break; 24218 } 24219 INP_WUNLOCK(inp); 24220 return (0); 24221 #endif 24222 #ifdef SO_PEERPRIO 24223 case SOL_SOCKET: 24224 switch (sopt->sopt_name) { 24225 case SO_PEERPRIO: /* SC-URL:bs */ 24226 /* Already read in and sanity checked in sosetopt(). */ 24227 if (inp->inp_socket) { 24228 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 24229 } 24230 break; 24231 } 24232 INP_WUNLOCK(inp); 24233 return (0); 24234 #endif 24235 case IPPROTO_TCP: 24236 switch (sopt->sopt_name) { 24237 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 24238 /* Pacing related ones */ 24239 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 24240 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 24241 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 24242 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 24243 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 24244 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 24245 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 24246 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 24247 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 24248 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 24249 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 24250 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 24251 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 24252 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 24253 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 24254 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 24255 case TCP_FILLCW_RATE_CAP: /* URL:fillcw_cap */ 24256 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 24257 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 24258 /* End pacing related */ 24259 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 24260 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 24261 case TCP_RACK_MIN_TO: /* URL:min_to */ 24262 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 24263 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 24264 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 24265 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 24266 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 24267 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 24268 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 24269 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 24270 case TCP_NO_PRR: /* URL:noprr */ 24271 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 24272 case TCP_DATA_AFTER_CLOSE: /* no URL */ 24273 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 24274 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 24275 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 24276 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 24277 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 24278 case TCP_RACK_PROFILE: /* URL:profile */ 24279 case TCP_SIDECHAN_DIS: /* URL:scodm */ 24280 case TCP_HYBRID_PACING: /* URL:pacing=hybrid */ 24281 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 24282 case TCP_RACK_ABC_VAL: /* URL:labc */ 24283 case TCP_REC_ABC_VAL: /* URL:reclabc */ 24284 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 24285 case TCP_DEFER_OPTIONS: /* URL:defer */ 24286 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 24287 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 24288 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 24289 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 24290 case TCP_RACK_HI_BETA: /* URL:hibeta */ 24291 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 24292 case TCP_SS_EEXIT: /* URL:eexit */ 24293 case TCP_DGP_UPPER_BOUNDS: /* URL:upper */ 24294 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 24295 case TCP_PACING_DND: /* URL:dnd */ 24296 case TCP_NO_TIMELY: /* URL:notimely */ 24297 case RACK_CSPR_IS_FCC: /* URL:csprisfcc */ 24298 case TCP_HONOR_HPTS_MIN: /* URL:hptsmin */ 24299 case TCP_REC_IS_DYN: /* URL:dynrec */ 24300 case TCP_GP_USE_LTBW: /* URL:useltbw */ 24301 goto process_opt; 24302 break; 24303 default: 24304 /* Filter off all unknown options to the base stack */ 24305 return (tcp_default_ctloutput(tp, sopt)); 24306 break; 24307 } 24308 default: 24309 INP_WUNLOCK(inp); 24310 return (0); 24311 } 24312 process_opt: 24313 INP_WUNLOCK(inp); 24314 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24315 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { 24316 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 24317 /* 24318 * We truncate it down to 32 bits for the socket-option trace this 24319 * means rates > 34Gbps won't show right, but thats probably ok. 24320 */ 24321 optval = (uint32_t)loptval; 24322 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 24323 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 24324 } else { 24325 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 24326 /* Save it in 64 bit form too */ 24327 loptval = optval; 24328 } 24329 if (error) 24330 return (error); 24331 INP_WLOCK(inp); 24332 if (tp->t_fb != &__tcp_rack) { 24333 INP_WUNLOCK(inp); 24334 return (ENOPROTOOPT); 24335 } 24336 if (rack->defer_options && (rack->gp_ready == 0) && 24337 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 24338 (sopt->sopt_name != TCP_HYBRID_PACING) && 24339 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 24340 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 24341 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 24342 /* Options are being deferred */ 24343 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 24344 INP_WUNLOCK(inp); 24345 return (0); 24346 } else { 24347 /* No memory to defer, fail */ 24348 INP_WUNLOCK(inp); 24349 return (ENOMEM); 24350 } 24351 } 24352 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 24353 INP_WUNLOCK(inp); 24354 return (error); 24355 } 24356 24357 static void 24358 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 24359 { 24360 24361 INP_WLOCK_ASSERT(tptoinpcb(tp)); 24362 bzero(ti, sizeof(*ti)); 24363 24364 ti->tcpi_state = tp->t_state; 24365 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 24366 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 24367 if (tp->t_flags & TF_SACK_PERMIT) 24368 ti->tcpi_options |= TCPI_OPT_SACK; 24369 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 24370 ti->tcpi_options |= TCPI_OPT_WSCALE; 24371 ti->tcpi_snd_wscale = tp->snd_scale; 24372 ti->tcpi_rcv_wscale = tp->rcv_scale; 24373 } 24374 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 24375 ti->tcpi_options |= TCPI_OPT_ECN; 24376 if (tp->t_flags & TF_FASTOPEN) 24377 ti->tcpi_options |= TCPI_OPT_TFO; 24378 /* still kept in ticks is t_rcvtime */ 24379 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 24380 /* Since we hold everything in precise useconds this is easy */ 24381 ti->tcpi_rtt = tp->t_srtt; 24382 ti->tcpi_rttvar = tp->t_rttvar; 24383 ti->tcpi_rto = tp->t_rxtcur; 24384 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 24385 ti->tcpi_snd_cwnd = tp->snd_cwnd; 24386 /* 24387 * FreeBSD-specific extension fields for tcp_info. 24388 */ 24389 ti->tcpi_rcv_space = tp->rcv_wnd; 24390 ti->tcpi_rcv_nxt = tp->rcv_nxt; 24391 ti->tcpi_snd_wnd = tp->snd_wnd; 24392 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 24393 ti->tcpi_snd_nxt = tp->snd_nxt; 24394 ti->tcpi_snd_mss = tp->t_maxseg; 24395 ti->tcpi_rcv_mss = tp->t_maxseg; 24396 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 24397 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 24398 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 24399 ti->tcpi_total_tlp = tp->t_sndtlppack; 24400 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 24401 ti->tcpi_rttmin = tp->t_rttlow; 24402 #ifdef NETFLIX_STATS 24403 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 24404 #endif 24405 #ifdef TCP_OFFLOAD 24406 if (tp->t_flags & TF_TOE) { 24407 ti->tcpi_options |= TCPI_OPT_TOE; 24408 tcp_offload_tcp_info(tp, ti); 24409 } 24410 #endif 24411 } 24412 24413 static int 24414 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24415 { 24416 struct inpcb *inp = tptoinpcb(tp); 24417 struct tcp_rack *rack; 24418 int32_t error, optval; 24419 uint64_t val, loptval; 24420 struct tcp_info ti; 24421 /* 24422 * Because all our options are either boolean or an int, we can just 24423 * pull everything into optval and then unlock and copy. If we ever 24424 * add a option that is not a int, then this will have quite an 24425 * impact to this routine. 24426 */ 24427 error = 0; 24428 rack = (struct tcp_rack *)tp->t_fb_ptr; 24429 if (rack == NULL) { 24430 INP_WUNLOCK(inp); 24431 return (EINVAL); 24432 } 24433 switch (sopt->sopt_name) { 24434 case TCP_INFO: 24435 /* First get the info filled */ 24436 rack_fill_info(tp, &ti); 24437 /* Fix up the rtt related fields if needed */ 24438 INP_WUNLOCK(inp); 24439 error = sooptcopyout(sopt, &ti, sizeof ti); 24440 return (error); 24441 /* 24442 * Beta is the congestion control value for NewReno that influences how 24443 * much of a backoff happens when loss is detected. It is normally set 24444 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 24445 * when you exit recovery. 24446 */ 24447 case TCP_RACK_PACING_BETA: 24448 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24449 error = EINVAL; 24450 else if (rack->rc_pacing_cc_set == 0) 24451 optval = rack->r_ctl.rc_saved_beta; 24452 else { 24453 /* 24454 * Reach out into the CC data and report back what 24455 * I have previously set. Yeah it looks hackish but 24456 * we don't want to report the saved values. 24457 */ 24458 if (tp->t_ccv.cc_data) 24459 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 24460 else 24461 error = EINVAL; 24462 } 24463 break; 24464 /* 24465 * Beta_ecn is the congestion control value for NewReno that influences how 24466 * much of a backoff happens when a ECN mark is detected. It is normally set 24467 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 24468 * you exit recovery. Note that classic ECN has a beta of 50, it is only 24469 * ABE Ecn that uses this "less" value, but we do too with pacing :) 24470 */ 24471 case TCP_RACK_PACING_BETA_ECN: 24472 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24473 error = EINVAL; 24474 else if (rack->rc_pacing_cc_set == 0) 24475 optval = rack->r_ctl.rc_saved_beta_ecn; 24476 else { 24477 /* 24478 * Reach out into the CC data and report back what 24479 * I have previously set. Yeah it looks hackish but 24480 * we don't want to report the saved values. 24481 */ 24482 if (tp->t_ccv.cc_data) 24483 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 24484 else 24485 error = EINVAL; 24486 } 24487 break; 24488 case TCP_RACK_DSACK_OPT: 24489 optval = 0; 24490 if (rack->rc_rack_tmr_std_based) { 24491 optval |= 1; 24492 } 24493 if (rack->rc_rack_use_dsack) { 24494 optval |= 2; 24495 } 24496 break; 24497 case TCP_RACK_ENABLE_HYSTART: 24498 { 24499 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24500 optval = RACK_HYSTART_ON; 24501 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 24502 optval = RACK_HYSTART_ON_W_SC; 24503 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 24504 optval = RACK_HYSTART_ON_W_SC_C; 24505 } else { 24506 optval = RACK_HYSTART_OFF; 24507 } 24508 } 24509 break; 24510 case TCP_RACK_DGP_IN_REC: 24511 error = EINVAL; 24512 break; 24513 case TCP_RACK_HI_BETA: 24514 optval = rack->rack_hibeta; 24515 break; 24516 case TCP_DEFER_OPTIONS: 24517 optval = rack->defer_options; 24518 break; 24519 case TCP_RACK_MEASURE_CNT: 24520 optval = rack->r_ctl.req_measurements; 24521 break; 24522 case TCP_REC_ABC_VAL: 24523 optval = rack->r_use_labc_for_rec; 24524 break; 24525 case TCP_RACK_ABC_VAL: 24526 optval = rack->rc_labc; 24527 break; 24528 case TCP_HDWR_UP_ONLY: 24529 optval= rack->r_up_only; 24530 break; 24531 case TCP_FILLCW_RATE_CAP: 24532 loptval = rack->r_ctl.fillcw_cap; 24533 break; 24534 case TCP_PACING_RATE_CAP: 24535 loptval = rack->r_ctl.bw_rate_cap; 24536 break; 24537 case TCP_RACK_PROFILE: 24538 /* You cannot retrieve a profile, its write only */ 24539 error = EINVAL; 24540 break; 24541 case TCP_SIDECHAN_DIS: 24542 optval = rack->r_ctl.side_chan_dis_mask; 24543 break; 24544 case TCP_HYBRID_PACING: 24545 /* You cannot retrieve hybrid pacing information, its write only */ 24546 error = EINVAL; 24547 break; 24548 case TCP_USE_CMP_ACKS: 24549 optval = rack->r_use_cmp_ack; 24550 break; 24551 case TCP_RACK_PACE_TO_FILL: 24552 optval = rack->rc_pace_to_cwnd; 24553 break; 24554 case TCP_RACK_NO_PUSH_AT_MAX: 24555 optval = rack->r_ctl.rc_no_push_at_mrtt; 24556 break; 24557 case TCP_SHARED_CWND_ENABLE: 24558 optval = rack->rack_enable_scwnd; 24559 break; 24560 case TCP_RACK_NONRXT_CFG_RATE: 24561 optval = rack->rack_rec_nonrxt_use_cr; 24562 break; 24563 case TCP_NO_PRR: 24564 if (rack->rack_no_prr == 1) 24565 optval = 1; 24566 else if (rack->no_prr_addback == 1) 24567 optval = 2; 24568 else 24569 optval = 0; 24570 break; 24571 case TCP_GP_USE_LTBW: 24572 if (rack->dis_lt_bw) { 24573 /* It is not used */ 24574 optval = 0; 24575 } else if (rack->use_lesser_lt_bw) { 24576 /* we use min() */ 24577 optval = 1; 24578 } else { 24579 /* we use max() */ 24580 optval = 2; 24581 } 24582 break; 24583 case TCP_RACK_DO_DETECTION: 24584 error = EINVAL; 24585 break; 24586 case TCP_RACK_MBUF_QUEUE: 24587 /* Now do we use the LRO mbuf-queue feature */ 24588 optval = rack->r_mbuf_queue; 24589 break; 24590 case RACK_CSPR_IS_FCC: 24591 optval = rack->cspr_is_fcc; 24592 break; 24593 case TCP_TIMELY_DYN_ADJ: 24594 optval = rack->rc_gp_dyn_mul; 24595 break; 24596 case TCP_BBR_IWINTSO: 24597 error = EINVAL; 24598 break; 24599 case TCP_RACK_TLP_REDUCE: 24600 /* RACK TLP cwnd reduction (bool) */ 24601 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 24602 break; 24603 case TCP_BBR_RACK_INIT_RATE: 24604 val = rack->r_ctl.init_rate; 24605 /* convert to kbits per sec */ 24606 val *= 8; 24607 val /= 1000; 24608 optval = (uint32_t)val; 24609 break; 24610 case TCP_RACK_FORCE_MSEG: 24611 optval = rack->rc_force_max_seg; 24612 break; 24613 case TCP_RACK_PACE_MIN_SEG: 24614 optval = rack->r_ctl.rc_user_set_min_segs; 24615 break; 24616 case TCP_RACK_PACE_MAX_SEG: 24617 /* Max segments in a pace */ 24618 optval = rack->rc_user_set_max_segs; 24619 break; 24620 case TCP_RACK_PACE_ALWAYS: 24621 /* Use the always pace method */ 24622 optval = rack->rc_always_pace; 24623 break; 24624 case TCP_RACK_PRR_SENDALOT: 24625 /* Allow PRR to send more than one seg */ 24626 optval = rack->r_ctl.rc_prr_sendalot; 24627 break; 24628 case TCP_RACK_MIN_TO: 24629 /* Minimum time between rack t-o's in ms */ 24630 optval = rack->r_ctl.rc_min_to; 24631 break; 24632 case TCP_RACK_SPLIT_LIMIT: 24633 optval = rack->r_ctl.rc_split_limit; 24634 break; 24635 case TCP_RACK_EARLY_SEG: 24636 /* If early recovery max segments */ 24637 optval = rack->r_ctl.rc_early_recovery_segs; 24638 break; 24639 case TCP_RACK_REORD_THRESH: 24640 /* RACK reorder threshold (shift amount) */ 24641 optval = rack->r_ctl.rc_reorder_shift; 24642 break; 24643 case TCP_SS_EEXIT: 24644 if (rack->r_ctl.gp_rnd_thresh) { 24645 uint32_t v; 24646 24647 v = rack->r_ctl.gp_gain_req; 24648 v <<= 17; 24649 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); 24650 if (rack->r_ctl.gate_to_fs == 1) 24651 optval |= 0x10000; 24652 } else 24653 optval = 0; 24654 break; 24655 case TCP_RACK_REORD_FADE: 24656 /* Does reordering fade after ms time */ 24657 optval = rack->r_ctl.rc_reorder_fade; 24658 break; 24659 case TCP_BBR_USE_RACK_RR: 24660 /* Do we use the rack cheat for rxt */ 24661 optval = rack->use_rack_rr; 24662 break; 24663 case TCP_RACK_RR_CONF: 24664 optval = rack->r_rr_config; 24665 break; 24666 case TCP_HDWR_RATE_CAP: 24667 optval = rack->r_rack_hw_rate_caps; 24668 break; 24669 case TCP_BBR_HDWR_PACE: 24670 optval = rack->rack_hdw_pace_ena; 24671 break; 24672 case TCP_RACK_TLP_THRESH: 24673 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24674 optval = rack->r_ctl.rc_tlp_threshold; 24675 break; 24676 case TCP_RACK_PKT_DELAY: 24677 /* RACK added ms i.e. rack-rtt + reord + N */ 24678 optval = rack->r_ctl.rc_pkt_delay; 24679 break; 24680 case TCP_RACK_TLP_USE: 24681 optval = rack->rack_tlp_threshold_use; 24682 break; 24683 case TCP_PACING_DND: 24684 optval = rack->rc_pace_dnd; 24685 break; 24686 case TCP_RACK_PACE_RATE_CA: 24687 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 24688 break; 24689 case TCP_RACK_PACE_RATE_SS: 24690 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 24691 break; 24692 case TCP_RACK_PACE_RATE_REC: 24693 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 24694 break; 24695 case TCP_DGP_UPPER_BOUNDS: 24696 optval = rack->r_ctl.rack_per_upper_bound_ss; 24697 optval <<= 16; 24698 optval |= rack->r_ctl.rack_per_upper_bound_ca; 24699 break; 24700 case TCP_RACK_GP_INCREASE_SS: 24701 optval = rack->r_ctl.rack_per_of_gp_ca; 24702 break; 24703 case TCP_RACK_GP_INCREASE_CA: 24704 optval = rack->r_ctl.rack_per_of_gp_ss; 24705 break; 24706 case TCP_RACK_PACING_DIVISOR: 24707 optval = rack->r_ctl.pace_len_divisor; 24708 break; 24709 case TCP_BBR_RACK_RTT_USE: 24710 optval = rack->r_ctl.rc_rate_sample_method; 24711 break; 24712 case TCP_DELACK: 24713 optval = tp->t_delayed_ack; 24714 break; 24715 case TCP_DATA_AFTER_CLOSE: 24716 optval = rack->rc_allow_data_af_clo; 24717 break; 24718 case TCP_SHARED_CWND_TIME_LIMIT: 24719 optval = rack->r_limit_scw; 24720 break; 24721 case TCP_HONOR_HPTS_MIN: 24722 if (rack->r_use_hpts_min) 24723 optval = rack->r_ctl.max_reduction; 24724 else 24725 optval = 0; 24726 break; 24727 case TCP_REC_IS_DYN: 24728 optval = rack->rc_gp_no_rec_chg; 24729 break; 24730 case TCP_NO_TIMELY: 24731 optval = rack->rc_skip_timely; 24732 break; 24733 case TCP_RACK_TIMER_SLOP: 24734 optval = rack->r_ctl.timer_slop; 24735 break; 24736 default: 24737 return (tcp_default_ctloutput(tp, sopt)); 24738 break; 24739 } 24740 INP_WUNLOCK(inp); 24741 if (error == 0) { 24742 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || 24743 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) 24744 error = sooptcopyout(sopt, &loptval, sizeof loptval); 24745 else 24746 error = sooptcopyout(sopt, &optval, sizeof optval); 24747 } 24748 return (error); 24749 } 24750 24751 static int 24752 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 24753 { 24754 if (sopt->sopt_dir == SOPT_SET) { 24755 return (rack_set_sockopt(tp, sopt)); 24756 } else if (sopt->sopt_dir == SOPT_GET) { 24757 return (rack_get_sockopt(tp, sopt)); 24758 } else { 24759 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 24760 } 24761 } 24762 24763 static const char *rack_stack_names[] = { 24764 __XSTRING(STACKNAME), 24765 #ifdef STACKALIAS 24766 __XSTRING(STACKALIAS), 24767 #endif 24768 }; 24769 24770 static int 24771 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 24772 { 24773 memset(mem, 0, size); 24774 return (0); 24775 } 24776 24777 static void 24778 rack_dtor(void *mem, int32_t size, void *arg) 24779 { 24780 24781 } 24782 24783 static bool rack_mod_inited = false; 24784 24785 static int 24786 tcp_addrack(module_t mod, int32_t type, void *data) 24787 { 24788 int32_t err = 0; 24789 int num_stacks; 24790 24791 switch (type) { 24792 case MOD_LOAD: 24793 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 24794 sizeof(struct rack_sendmap), 24795 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 24796 24797 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 24798 sizeof(struct tcp_rack), 24799 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 24800 24801 sysctl_ctx_init(&rack_sysctl_ctx); 24802 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 24803 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 24804 OID_AUTO, 24805 #ifdef STACKALIAS 24806 __XSTRING(STACKALIAS), 24807 #else 24808 __XSTRING(STACKNAME), 24809 #endif 24810 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 24811 ""); 24812 if (rack_sysctl_root == NULL) { 24813 printf("Failed to add sysctl node\n"); 24814 err = EFAULT; 24815 goto free_uma; 24816 } 24817 rack_init_sysctls(); 24818 num_stacks = nitems(rack_stack_names); 24819 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 24820 rack_stack_names, &num_stacks); 24821 if (err) { 24822 printf("Failed to register %s stack name for " 24823 "%s module\n", rack_stack_names[num_stacks], 24824 __XSTRING(MODNAME)); 24825 sysctl_ctx_free(&rack_sysctl_ctx); 24826 free_uma: 24827 uma_zdestroy(rack_zone); 24828 uma_zdestroy(rack_pcb_zone); 24829 rack_counter_destroy(); 24830 printf("Failed to register rack module -- err:%d\n", err); 24831 return (err); 24832 } 24833 tcp_lro_reg_mbufq(); 24834 rack_mod_inited = true; 24835 break; 24836 case MOD_QUIESCE: 24837 err = deregister_tcp_functions(&__tcp_rack, true, false); 24838 break; 24839 case MOD_UNLOAD: 24840 err = deregister_tcp_functions(&__tcp_rack, false, true); 24841 if (err == EBUSY) 24842 break; 24843 if (rack_mod_inited) { 24844 uma_zdestroy(rack_zone); 24845 uma_zdestroy(rack_pcb_zone); 24846 sysctl_ctx_free(&rack_sysctl_ctx); 24847 rack_counter_destroy(); 24848 rack_mod_inited = false; 24849 } 24850 tcp_lro_dereg_mbufq(); 24851 err = 0; 24852 break; 24853 default: 24854 return (EOPNOTSUPP); 24855 } 24856 return (err); 24857 } 24858 24859 static moduledata_t tcp_rack = { 24860 .name = __XSTRING(MODNAME), 24861 .evhand = tcp_addrack, 24862 .priv = 0 24863 }; 24864 24865 MODULE_VERSION(MODNAME, 1); 24866 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 24867 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 24868 24869 #endif /* #if !defined(INET) && !defined(INET6) */ 24870