1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 #include "opt_ipsec.h" 31 #include "opt_ratelimit.h" 32 #include "opt_kern_tls.h" 33 #if defined(INET) || defined(INET6) 34 #include <sys/param.h> 35 #include <sys/arb.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #ifdef TCP_HHOOK 39 #include <sys/hhook.h> 40 #endif 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> /* for proc0 declaration */ 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #ifdef STATS 52 #include <sys/qmath.h> 53 #include <sys/tree.h> 54 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 55 #else 56 #include <sys/tree.h> 57 #endif 58 #include <sys/refcount.h> 59 #include <sys/queue.h> 60 #include <sys/tim_filter.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 #include <sys/protosw.h> 65 #ifdef TCP_ACCOUNTING 66 #include <sys/sched.h> 67 #include <machine/cpu.h> 68 #endif 69 #include <vm/uma.h> 70 71 #include <net/route.h> 72 #include <net/route/nhop.h> 73 #include <net/vnet.h> 74 75 #define TCPSTATES /* for logging */ 76 77 #include <netinet/in.h> 78 #include <netinet/in_kdtrace.h> 79 #include <netinet/in_pcb.h> 80 #include <netinet/ip.h> 81 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 82 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 83 #include <netinet/ip_var.h> 84 #include <netinet/ip6.h> 85 #include <netinet6/in6_pcb.h> 86 #include <netinet6/ip6_var.h> 87 #include <netinet/tcp.h> 88 #define TCPOUTFLAGS 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_log_buf.h> 94 #include <netinet/tcp_syncache.h> 95 #include <netinet/tcp_hpts.h> 96 #include <netinet/tcp_ratelimit.h> 97 #include <netinet/tcp_accounting.h> 98 #include <netinet/tcpip.h> 99 #include <netinet/cc/cc.h> 100 #include <netinet/cc/cc_newreno.h> 101 #include <netinet/tcp_fastopen.h> 102 #include <netinet/tcp_lro.h> 103 #ifdef NETFLIX_SHARED_CWND 104 #include <netinet/tcp_shared_cwnd.h> 105 #endif 106 #ifdef TCP_OFFLOAD 107 #include <netinet/tcp_offload.h> 108 #endif 109 #ifdef INET6 110 #include <netinet6/tcp6_var.h> 111 #endif 112 #include <netinet/tcp_ecn.h> 113 114 #include <netipsec/ipsec_support.h> 115 116 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 117 #include <netipsec/ipsec.h> 118 #include <netipsec/ipsec6.h> 119 #endif /* IPSEC */ 120 121 #include <netinet/udp.h> 122 #include <netinet/udp_var.h> 123 #include <machine/in_cksum.h> 124 125 #ifdef MAC 126 #include <security/mac/mac_framework.h> 127 #endif 128 #include "sack_filter.h" 129 #include "tcp_rack.h" 130 #include "tailq_hash.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 146 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 147 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 148 149 struct sysctl_ctx_list rack_sysctl_ctx; 150 struct sysctl_oid *rack_sysctl_root; 151 152 #define CUM_ACKED 1 153 #define SACKED 2 154 155 /* 156 * The RACK module incorporates a number of 157 * TCP ideas that have been put out into the IETF 158 * over the last few years: 159 * - Matt Mathis's Rate Halving which slowly drops 160 * the congestion window so that the ack clock can 161 * be maintained during a recovery. 162 * - Yuchung Cheng's RACK TCP (for which its named) that 163 * will stop us using the number of dup acks and instead 164 * use time as the gage of when we retransmit. 165 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 166 * of Dukkipati et.al. 167 * RACK depends on SACK, so if an endpoint arrives that 168 * cannot do SACK the state machine below will shuttle the 169 * connection back to using the "default" TCP stack that is 170 * in FreeBSD. 171 * 172 * To implement RACK the original TCP stack was first decomposed 173 * into a functional state machine with individual states 174 * for each of the possible TCP connection states. The do_segment 175 * functions role in life is to mandate the connection supports SACK 176 * initially and then assure that the RACK state matches the conenction 177 * state before calling the states do_segment function. Each 178 * state is simplified due to the fact that the original do_segment 179 * has been decomposed and we *know* what state we are in (no 180 * switches on the state) and all tests for SACK are gone. This 181 * greatly simplifies what each state does. 182 * 183 * TCP output is also over-written with a new version since it 184 * must maintain the new rack scoreboard. 185 * 186 */ 187 static int32_t rack_tlp_thresh = 1; 188 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 189 static int32_t rack_tlp_use_greater = 1; 190 static int32_t rack_reorder_thresh = 2; 191 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 192 * - 60 seconds */ 193 static uint32_t rack_clamp_ss_upper = 110; 194 static uint32_t rack_clamp_ca_upper = 105; 195 static uint32_t rack_rxt_min_rnds = 10; /* Min rounds if drastic rxt clamp is in place */ 196 static uint32_t rack_unclamp_round_thresh = 100; /* number of perfect rounds before we unclamp */ 197 static uint32_t rack_unclamp_rxt_thresh = 5; /* .5% and under */ 198 static uint64_t rack_rxt_clamp_thresh = 0; /* Do we do the rxt clamp thing */ 199 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 200 static int32_t rack_rxt_controls = 0; 201 static int32_t rack_fill_cw_state = 0; 202 static uint8_t rack_req_measurements = 1; 203 /* Attack threshold detections */ 204 static uint32_t rack_highest_sack_thresh_seen = 0; 205 static uint32_t rack_highest_move_thresh_seen = 0; 206 static uint32_t rack_merge_out_sacks_on_attack = 0; 207 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 208 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */ 209 static int32_t rack_hw_rate_caps = 0; /* 1; */ 210 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 211 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 212 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 213 static int32_t rack_hw_up_only = 0; 214 static int32_t rack_stats_gets_ms_rtt = 1; 215 static int32_t rack_prr_addbackmax = 2; 216 static int32_t rack_do_hystart = 0; 217 static int32_t rack_apply_rtt_with_reduced_conf = 0; 218 static int32_t rack_hibeta_setting = 0; 219 static int32_t rack_default_pacing_divisor = 250; 220 static int32_t rack_uses_full_dgp_in_rec = 1; 221 static uint16_t rack_pacing_min_seg = 0; 222 223 224 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 225 static int32_t rack_pkt_delay = 1000; 226 static int32_t rack_send_a_lot_in_prr = 1; 227 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 228 static int32_t rack_verbose_logging = 0; 229 static int32_t rack_ignore_data_after_close = 1; 230 static int32_t rack_enable_shared_cwnd = 1; 231 static int32_t rack_use_cmp_acks = 1; 232 static int32_t rack_use_fsb = 1; 233 static int32_t rack_use_rfo = 1; 234 static int32_t rack_use_rsm_rfo = 1; 235 static int32_t rack_max_abc_post_recovery = 2; 236 static int32_t rack_client_low_buf = 0; 237 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 238 static int32_t rack_bw_multipler = 2; /* Limit on fill cw's jump up to be this x gp_est */ 239 #ifdef TCP_ACCOUNTING 240 static int32_t rack_tcp_accounting = 0; 241 #endif 242 static int32_t rack_limits_scwnd = 1; 243 static int32_t rack_enable_mqueue_for_nonpaced = 0; 244 static int32_t rack_hybrid_allow_set_maxseg = 0; 245 static int32_t rack_disable_prr = 0; 246 static int32_t use_rack_rr = 1; 247 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 248 static int32_t rack_persist_min = 250000; /* 250usec */ 249 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 250 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 251 static int32_t rack_default_init_window = 0; /* Use system default */ 252 static int32_t rack_limit_time_with_srtt = 0; 253 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 254 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 255 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 256 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 257 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 258 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 259 static int32_t rack_full_buffer_discount = 10; 260 /* 261 * Currently regular tcp has a rto_min of 30ms 262 * the backoff goes 12 times so that ends up 263 * being a total of 122.850 seconds before a 264 * connection is killed. 265 */ 266 static uint32_t rack_def_data_window = 20; 267 static uint32_t rack_goal_bdp = 2; 268 static uint32_t rack_min_srtts = 1; 269 static uint32_t rack_min_measure_usec = 0; 270 static int32_t rack_tlp_min = 10000; /* 10ms */ 271 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 272 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 273 static const int32_t rack_free_cache = 2; 274 static int32_t rack_hptsi_segments = 40; 275 static int32_t rack_rate_sample_method = USE_RTT_LOW; 276 static int32_t rack_pace_every_seg = 0; 277 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 278 static int32_t rack_slot_reduction = 4; 279 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 280 static int32_t rack_cwnd_block_ends_measure = 0; 281 static int32_t rack_rwnd_block_ends_measure = 0; 282 static int32_t rack_def_profile = 0; 283 284 static int32_t rack_lower_cwnd_at_tlp = 0; 285 static int32_t rack_limited_retran = 0; 286 static int32_t rack_always_send_oldest = 0; 287 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 288 289 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 290 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 291 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 292 293 /* Probertt */ 294 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 295 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 296 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 297 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 298 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 299 300 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 301 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 302 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 303 static uint32_t rack_probertt_use_min_rtt_exit = 0; 304 static uint32_t rack_probe_rtt_sets_cwnd = 0; 305 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 306 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 307 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 308 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 309 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 310 static uint32_t rack_probertt_filter_life = 10000000; 311 static uint32_t rack_probertt_lower_within = 10; 312 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 313 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 314 static int32_t rack_probertt_clear_is = 1; 315 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 316 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 317 318 /* Part of pacing */ 319 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 320 321 /* Timely information: 322 * 323 * Here we have various control parameters on how 324 * timely may change the multiplier. rack_gain_p5_ub 325 * is associated with timely but not directly influencing 326 * the rate decision like the other variables. It controls 327 * the way fill-cw interacts with timely and caps how much 328 * timely can boost the fill-cw b/w. 329 * 330 * The other values are various boost/shrink numbers as well 331 * as potential caps when adjustments are made to the timely 332 * gain (returned by rack_get_output_gain(). Remember too that 333 * the gain returned can be overriden by other factors such as 334 * probeRTT as well as fixed-rate-pacing. 335 */ 336 static int32_t rack_gain_p5_ub = 250; 337 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 338 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 339 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 340 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 341 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 342 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 343 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 344 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 345 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 346 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 347 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 348 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 349 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 350 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 351 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 352 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 353 static int32_t rack_use_max_for_nobackoff = 0; 354 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 355 static int32_t rack_timely_no_stopping = 0; 356 static int32_t rack_down_raise_thresh = 100; 357 static int32_t rack_req_segs = 1; 358 static uint64_t rack_bw_rate_cap = 0; 359 360 361 /* Rack specific counters */ 362 counter_u64_t rack_saw_enobuf; 363 counter_u64_t rack_saw_enobuf_hw; 364 counter_u64_t rack_saw_enetunreach; 365 counter_u64_t rack_persists_sends; 366 counter_u64_t rack_persists_acks; 367 counter_u64_t rack_persists_loss; 368 counter_u64_t rack_persists_lost_ends; 369 counter_u64_t rack_total_bytes; 370 #ifdef INVARIANTS 371 counter_u64_t rack_adjust_map_bw; 372 #endif 373 /* Tail loss probe counters */ 374 counter_u64_t rack_tlp_tot; 375 counter_u64_t rack_tlp_newdata; 376 counter_u64_t rack_tlp_retran; 377 counter_u64_t rack_tlp_retran_bytes; 378 counter_u64_t rack_to_tot; 379 counter_u64_t rack_hot_alloc; 380 counter_u64_t rack_to_alloc; 381 counter_u64_t rack_to_alloc_hard; 382 counter_u64_t rack_to_alloc_emerg; 383 counter_u64_t rack_to_alloc_limited; 384 counter_u64_t rack_alloc_limited_conns; 385 counter_u64_t rack_split_limited; 386 counter_u64_t rack_rxt_clamps_cwnd; 387 counter_u64_t rack_rxt_clamps_cwnd_uniq; 388 389 counter_u64_t rack_multi_single_eq; 390 counter_u64_t rack_proc_non_comp_ack; 391 392 counter_u64_t rack_fto_send; 393 counter_u64_t rack_fto_rsm_send; 394 counter_u64_t rack_nfto_resend; 395 counter_u64_t rack_non_fto_send; 396 counter_u64_t rack_extended_rfo; 397 398 counter_u64_t rack_sack_proc_all; 399 counter_u64_t rack_sack_proc_short; 400 counter_u64_t rack_sack_proc_restart; 401 counter_u64_t rack_sack_attacks_detected; 402 counter_u64_t rack_sack_attacks_reversed; 403 counter_u64_t rack_sack_attacks_suspect; 404 counter_u64_t rack_sack_used_next_merge; 405 counter_u64_t rack_sack_splits; 406 counter_u64_t rack_sack_used_prev_merge; 407 counter_u64_t rack_sack_skipped_acked; 408 counter_u64_t rack_ack_total; 409 counter_u64_t rack_express_sack; 410 counter_u64_t rack_sack_total; 411 counter_u64_t rack_move_none; 412 counter_u64_t rack_move_some; 413 414 counter_u64_t rack_input_idle_reduces; 415 counter_u64_t rack_collapsed_win; 416 counter_u64_t rack_collapsed_win_seen; 417 counter_u64_t rack_collapsed_win_rxt; 418 counter_u64_t rack_collapsed_win_rxt_bytes; 419 counter_u64_t rack_try_scwnd; 420 counter_u64_t rack_hw_pace_init_fail; 421 counter_u64_t rack_hw_pace_lost; 422 423 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 424 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 425 426 427 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 428 429 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 430 (tv) = (value) + slop; \ 431 if ((u_long)(tv) < (u_long)(tvmin)) \ 432 (tv) = (tvmin); \ 433 if ((u_long)(tv) > (u_long)(tvmax)) \ 434 (tv) = (tvmax); \ 435 } while (0) 436 437 static void 438 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 439 440 static int 441 rack_process_ack(struct mbuf *m, struct tcphdr *th, 442 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 443 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 444 static int 445 rack_process_data(struct mbuf *m, struct tcphdr *th, 446 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 447 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 448 static void 449 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 450 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 451 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 452 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 453 uint8_t limit_type); 454 static struct rack_sendmap * 455 rack_check_recovery_mode(struct tcpcb *tp, 456 uint32_t tsused); 457 static void 458 rack_cong_signal(struct tcpcb *tp, 459 uint32_t type, uint32_t ack, int ); 460 static void rack_counter_destroy(void); 461 static int 462 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 463 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 464 static void 465 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 466 static void 467 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 468 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 469 static void rack_dtor(void *mem, int32_t size, void *arg); 470 static void 471 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 472 uint32_t flex1, uint32_t flex2, 473 uint32_t flex3, uint32_t flex4, 474 uint32_t flex5, uint32_t flex6, 475 uint16_t flex7, uint8_t mod); 476 477 static void 478 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 479 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 480 struct rack_sendmap *rsm, uint8_t quality); 481 static struct rack_sendmap * 482 rack_find_high_nonack(struct tcp_rack *rack, 483 struct rack_sendmap *rsm); 484 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 485 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 486 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 487 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 488 static void 489 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 490 tcp_seq th_ack, int line, uint8_t quality); 491 static void 492 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 493 494 static uint32_t 495 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 496 static int32_t rack_handoff_ok(struct tcpcb *tp); 497 static int32_t rack_init(struct tcpcb *tp, void **ptr); 498 static void rack_init_sysctls(void); 499 500 static void 501 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 502 struct tcphdr *th, int entered_rec, int dup_ack_struck, 503 int *dsack_seen, int *sacks_seen); 504 static void 505 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 506 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 507 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 508 509 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 510 511 static void 512 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 513 struct rack_sendmap *rsm); 514 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 515 static int32_t rack_output(struct tcpcb *tp); 516 517 static uint32_t 518 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 519 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 520 uint32_t cts, int *no_extra, int *moved_two, uint32_t segsiz); 521 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 522 static void rack_remxt_tmr(struct tcpcb *tp); 523 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 524 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 525 static int32_t rack_stopall(struct tcpcb *tp); 526 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 527 static uint32_t 528 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 529 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag, int segsiz); 530 static void 531 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 532 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz); 533 static int 534 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 535 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 536 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 537 static int 538 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 539 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 540 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 541 static int 542 rack_do_closing(struct mbuf *m, struct tcphdr *th, 543 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 544 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 545 static int 546 rack_do_established(struct mbuf *m, struct tcphdr *th, 547 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 548 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 549 static int 550 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 551 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 552 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 553 static int 554 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 555 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 556 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 557 static int 558 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 559 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 560 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 561 static int 562 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 563 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 564 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 565 static int 566 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 567 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 568 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 569 static int 570 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 571 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 572 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 573 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 574 struct rack_sendmap * 575 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 576 uint32_t tsused); 577 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 578 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 579 static void 580 tcp_rack_partialack(struct tcpcb *tp); 581 static int 582 rack_set_profile(struct tcp_rack *rack, int prof); 583 static void 584 rack_apply_deferred_options(struct tcp_rack *rack); 585 586 int32_t rack_clear_counter=0; 587 588 static uint64_t 589 rack_get_lt_bw(struct tcp_rack *rack) 590 { 591 struct timeval tv; 592 uint64_t tim, bytes; 593 594 tim = rack->r_ctl.lt_bw_time; 595 bytes = rack->r_ctl.lt_bw_bytes; 596 if (rack->lt_bw_up) { 597 /* Include all the current bytes too */ 598 microuptime(&tv); 599 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 600 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 601 } 602 if ((bytes != 0) && (tim != 0)) 603 return ((bytes * (uint64_t)1000000) / tim); 604 else 605 return (0); 606 } 607 608 static void 609 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 610 { 611 struct sockopt sopt; 612 struct cc_newreno_opts opt; 613 struct newreno old; 614 struct tcpcb *tp; 615 int error, failed = 0; 616 617 tp = rack->rc_tp; 618 if (tp->t_cc == NULL) { 619 /* Tcb is leaving */ 620 return; 621 } 622 rack->rc_pacing_cc_set = 1; 623 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 624 /* Not new-reno we can't play games with beta! */ 625 failed = 1; 626 goto out; 627 628 } 629 if (CC_ALGO(tp)->ctl_output == NULL) { 630 /* Huh, not using new-reno so no swaps.? */ 631 failed = 2; 632 goto out; 633 } 634 /* Get the current values out */ 635 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 636 sopt.sopt_dir = SOPT_GET; 637 opt.name = CC_NEWRENO_BETA; 638 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 639 if (error) { 640 failed = 3; 641 goto out; 642 } 643 old.beta = opt.val; 644 opt.name = CC_NEWRENO_BETA_ECN; 645 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 646 if (error) { 647 failed = 4; 648 goto out; 649 } 650 old.beta_ecn = opt.val; 651 652 /* Now lets set in the values we have stored */ 653 sopt.sopt_dir = SOPT_SET; 654 opt.name = CC_NEWRENO_BETA; 655 opt.val = rack->r_ctl.rc_saved_beta.beta; 656 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 657 if (error) { 658 failed = 5; 659 goto out; 660 } 661 opt.name = CC_NEWRENO_BETA_ECN; 662 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 663 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 664 if (error) { 665 failed = 6; 666 goto out; 667 } 668 /* Save off the values for restoral */ 669 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 670 out: 671 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 672 union tcp_log_stackspecific log; 673 struct timeval tv; 674 struct newreno *ptr; 675 676 ptr = ((struct newreno *)tp->t_ccv.cc_data); 677 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 678 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 679 log.u_bbr.flex1 = ptr->beta; 680 log.u_bbr.flex2 = ptr->beta_ecn; 681 log.u_bbr.flex3 = ptr->newreno_flags; 682 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 683 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 684 log.u_bbr.flex6 = failed; 685 log.u_bbr.flex7 = rack->gp_ready; 686 log.u_bbr.flex7 <<= 1; 687 log.u_bbr.flex7 |= rack->use_fixed_rate; 688 log.u_bbr.flex7 <<= 1; 689 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 690 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 691 log.u_bbr.flex8 = flex8; 692 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 693 0, &log, false, NULL, NULL, 0, &tv); 694 } 695 } 696 697 static void 698 rack_set_cc_pacing(struct tcp_rack *rack) 699 { 700 if (rack->rc_pacing_cc_set) 701 return; 702 /* 703 * Use the swap utility placing in 3 for flex8 to id a 704 * set of a new set of values. 705 */ 706 rack->rc_pacing_cc_set = 1; 707 rack_swap_beta_values(rack, 3); 708 } 709 710 static void 711 rack_undo_cc_pacing(struct tcp_rack *rack) 712 { 713 if (rack->rc_pacing_cc_set == 0) 714 return; 715 /* 716 * Use the swap utility placing in 4 for flex8 to id a 717 * restoral of the old values. 718 */ 719 rack->rc_pacing_cc_set = 0; 720 rack_swap_beta_values(rack, 4); 721 } 722 723 static void 724 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 725 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 726 { 727 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 728 union tcp_log_stackspecific log; 729 struct timeval tv; 730 731 memset(&log, 0, sizeof(log)); 732 log.u_bbr.flex1 = seq_end; 733 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 734 log.u_bbr.flex3 = ack_end_t; 735 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 736 log.u_bbr.flex5 = send_end_t; 737 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 738 log.u_bbr.flex7 = mode; 739 log.u_bbr.flex8 = 69; 740 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 741 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 742 log.u_bbr.pkts_out = line; 743 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 744 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 745 if (rsm != NULL) { 746 log.u_bbr.applimited = rsm->r_start; 747 log.u_bbr.delivered = rsm->r_end; 748 log.u_bbr.epoch = rsm->r_flags; 749 } 750 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 751 TCP_LOG_EVENTP(rack->rc_tp, NULL, 752 &rack->rc_inp->inp_socket->so_rcv, 753 &rack->rc_inp->inp_socket->so_snd, 754 BBR_LOG_HPTSI_CALC, 0, 755 0, &log, false, &tv); 756 } 757 } 758 759 static int 760 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 761 { 762 uint32_t stat; 763 int32_t error; 764 765 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 766 if (error || req->newptr == NULL) 767 return error; 768 769 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 770 if (error) 771 return (error); 772 if (stat == 1) { 773 #ifdef INVARIANTS 774 printf("Clearing RACK counters\n"); 775 #endif 776 counter_u64_zero(rack_tlp_tot); 777 counter_u64_zero(rack_tlp_newdata); 778 counter_u64_zero(rack_tlp_retran); 779 counter_u64_zero(rack_tlp_retran_bytes); 780 counter_u64_zero(rack_to_tot); 781 counter_u64_zero(rack_saw_enobuf); 782 counter_u64_zero(rack_saw_enobuf_hw); 783 counter_u64_zero(rack_saw_enetunreach); 784 counter_u64_zero(rack_persists_sends); 785 counter_u64_zero(rack_total_bytes); 786 counter_u64_zero(rack_persists_acks); 787 counter_u64_zero(rack_persists_loss); 788 counter_u64_zero(rack_persists_lost_ends); 789 #ifdef INVARIANTS 790 counter_u64_zero(rack_adjust_map_bw); 791 #endif 792 counter_u64_zero(rack_to_alloc_hard); 793 counter_u64_zero(rack_to_alloc_emerg); 794 counter_u64_zero(rack_sack_proc_all); 795 counter_u64_zero(rack_fto_send); 796 counter_u64_zero(rack_fto_rsm_send); 797 counter_u64_zero(rack_extended_rfo); 798 counter_u64_zero(rack_hw_pace_init_fail); 799 counter_u64_zero(rack_hw_pace_lost); 800 counter_u64_zero(rack_non_fto_send); 801 counter_u64_zero(rack_nfto_resend); 802 counter_u64_zero(rack_sack_proc_short); 803 counter_u64_zero(rack_sack_proc_restart); 804 counter_u64_zero(rack_to_alloc); 805 counter_u64_zero(rack_to_alloc_limited); 806 counter_u64_zero(rack_alloc_limited_conns); 807 counter_u64_zero(rack_split_limited); 808 counter_u64_zero(rack_rxt_clamps_cwnd); 809 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 810 counter_u64_zero(rack_multi_single_eq); 811 counter_u64_zero(rack_proc_non_comp_ack); 812 counter_u64_zero(rack_sack_attacks_detected); 813 counter_u64_zero(rack_sack_attacks_reversed); 814 counter_u64_zero(rack_sack_attacks_suspect); 815 counter_u64_zero(rack_sack_used_next_merge); 816 counter_u64_zero(rack_sack_used_prev_merge); 817 counter_u64_zero(rack_sack_splits); 818 counter_u64_zero(rack_sack_skipped_acked); 819 counter_u64_zero(rack_ack_total); 820 counter_u64_zero(rack_express_sack); 821 counter_u64_zero(rack_sack_total); 822 counter_u64_zero(rack_move_none); 823 counter_u64_zero(rack_move_some); 824 counter_u64_zero(rack_try_scwnd); 825 counter_u64_zero(rack_collapsed_win); 826 counter_u64_zero(rack_collapsed_win_rxt); 827 counter_u64_zero(rack_collapsed_win_seen); 828 counter_u64_zero(rack_collapsed_win_rxt_bytes); 829 } else if (stat == 2) { 830 #ifdef INVARIANTS 831 printf("Clearing RACK option array\n"); 832 #endif 833 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 834 } else if (stat == 3) { 835 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 836 } else if (stat == 4) { 837 #ifdef INVARIANTS 838 printf("Clearing RACK out size array\n"); 839 #endif 840 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 841 } 842 rack_clear_counter = 0; 843 return (0); 844 } 845 846 static void 847 rack_init_sysctls(void) 848 { 849 struct sysctl_oid *rack_counters; 850 struct sysctl_oid *rack_attack; 851 struct sysctl_oid *rack_pacing; 852 struct sysctl_oid *rack_timely; 853 struct sysctl_oid *rack_timers; 854 struct sysctl_oid *rack_tlp; 855 struct sysctl_oid *rack_misc; 856 struct sysctl_oid *rack_features; 857 struct sysctl_oid *rack_measure; 858 struct sysctl_oid *rack_probertt; 859 struct sysctl_oid *rack_hw_pacing; 860 861 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 862 SYSCTL_CHILDREN(rack_sysctl_root), 863 OID_AUTO, 864 "sack_attack", 865 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 866 "Rack Sack Attack Counters and Controls"); 867 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 868 SYSCTL_CHILDREN(rack_sysctl_root), 869 OID_AUTO, 870 "stats", 871 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 872 "Rack Counters"); 873 SYSCTL_ADD_S32(&rack_sysctl_ctx, 874 SYSCTL_CHILDREN(rack_sysctl_root), 875 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 876 &rack_rate_sample_method , USE_RTT_LOW, 877 "What method should we use for rate sampling 0=high, 1=low "); 878 /* Probe rtt related controls */ 879 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 880 SYSCTL_CHILDREN(rack_sysctl_root), 881 OID_AUTO, 882 "probertt", 883 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 884 "ProbeRTT related Controls"); 885 SYSCTL_ADD_U16(&rack_sysctl_ctx, 886 SYSCTL_CHILDREN(rack_probertt), 887 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 888 &rack_atexit_prtt_hbp, 130, 889 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 890 SYSCTL_ADD_U16(&rack_sysctl_ctx, 891 SYSCTL_CHILDREN(rack_probertt), 892 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 893 &rack_atexit_prtt, 130, 894 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 895 SYSCTL_ADD_U16(&rack_sysctl_ctx, 896 SYSCTL_CHILDREN(rack_probertt), 897 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 898 &rack_per_of_gp_probertt, 60, 899 "What percentage of goodput do we pace at in probertt"); 900 SYSCTL_ADD_U16(&rack_sysctl_ctx, 901 SYSCTL_CHILDREN(rack_probertt), 902 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 903 &rack_per_of_gp_probertt_reduce, 10, 904 "What percentage of goodput do we reduce every gp_srtt"); 905 SYSCTL_ADD_U16(&rack_sysctl_ctx, 906 SYSCTL_CHILDREN(rack_probertt), 907 OID_AUTO, "gp_per_low", CTLFLAG_RW, 908 &rack_per_of_gp_lowthresh, 40, 909 "What percentage of goodput do we allow the multiplier to fall to"); 910 SYSCTL_ADD_U32(&rack_sysctl_ctx, 911 SYSCTL_CHILDREN(rack_probertt), 912 OID_AUTO, "time_between", CTLFLAG_RW, 913 & rack_time_between_probertt, 96000000, 914 "How many useconds between the lowest rtt falling must past before we enter probertt"); 915 SYSCTL_ADD_U32(&rack_sysctl_ctx, 916 SYSCTL_CHILDREN(rack_probertt), 917 OID_AUTO, "safety", CTLFLAG_RW, 918 &rack_probe_rtt_safety_val, 2000000, 919 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 920 SYSCTL_ADD_U32(&rack_sysctl_ctx, 921 SYSCTL_CHILDREN(rack_probertt), 922 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 923 &rack_probe_rtt_sets_cwnd, 0, 924 "Do we set the cwnd too (if always_lower is on)"); 925 SYSCTL_ADD_U32(&rack_sysctl_ctx, 926 SYSCTL_CHILDREN(rack_probertt), 927 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 928 &rack_max_drain_wait, 2, 929 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 930 SYSCTL_ADD_U32(&rack_sysctl_ctx, 931 SYSCTL_CHILDREN(rack_probertt), 932 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 933 &rack_must_drain, 1, 934 "We must drain this many gp_srtt's waiting for flight to reach goal"); 935 SYSCTL_ADD_U32(&rack_sysctl_ctx, 936 SYSCTL_CHILDREN(rack_probertt), 937 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 938 &rack_probertt_use_min_rtt_entry, 1, 939 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 940 SYSCTL_ADD_U32(&rack_sysctl_ctx, 941 SYSCTL_CHILDREN(rack_probertt), 942 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 943 &rack_probertt_use_min_rtt_exit, 0, 944 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 945 SYSCTL_ADD_U32(&rack_sysctl_ctx, 946 SYSCTL_CHILDREN(rack_probertt), 947 OID_AUTO, "length_div", CTLFLAG_RW, 948 &rack_probertt_gpsrtt_cnt_div, 0, 949 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 950 SYSCTL_ADD_U32(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_probertt), 952 OID_AUTO, "length_mul", CTLFLAG_RW, 953 &rack_probertt_gpsrtt_cnt_mul, 0, 954 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 955 SYSCTL_ADD_U32(&rack_sysctl_ctx, 956 SYSCTL_CHILDREN(rack_probertt), 957 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 958 &rack_min_probertt_hold, 200000, 959 "What is the minimum time we hold probertt at target"); 960 SYSCTL_ADD_U32(&rack_sysctl_ctx, 961 SYSCTL_CHILDREN(rack_probertt), 962 OID_AUTO, "filter_life", CTLFLAG_RW, 963 &rack_probertt_filter_life, 10000000, 964 "What is the time for the filters life in useconds"); 965 SYSCTL_ADD_U32(&rack_sysctl_ctx, 966 SYSCTL_CHILDREN(rack_probertt), 967 OID_AUTO, "lower_within", CTLFLAG_RW, 968 &rack_probertt_lower_within, 10, 969 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 970 SYSCTL_ADD_U32(&rack_sysctl_ctx, 971 SYSCTL_CHILDREN(rack_probertt), 972 OID_AUTO, "must_move", CTLFLAG_RW, 973 &rack_min_rtt_movement, 250, 974 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 975 SYSCTL_ADD_U32(&rack_sysctl_ctx, 976 SYSCTL_CHILDREN(rack_probertt), 977 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 978 &rack_probertt_clear_is, 1, 979 "Do we clear I/S counts on exiting probe-rtt"); 980 SYSCTL_ADD_S32(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_probertt), 982 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 983 &rack_max_drain_hbp, 1, 984 "How many extra drain gpsrtt's do we get in highly buffered paths"); 985 SYSCTL_ADD_S32(&rack_sysctl_ctx, 986 SYSCTL_CHILDREN(rack_probertt), 987 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 988 &rack_hbp_thresh, 3, 989 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 990 /* Pacing related sysctls */ 991 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 992 SYSCTL_CHILDREN(rack_sysctl_root), 993 OID_AUTO, 994 "pacing", 995 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 996 "Pacing related Controls"); 997 SYSCTL_ADD_S32(&rack_sysctl_ctx, 998 SYSCTL_CHILDREN(rack_pacing), 999 OID_AUTO, "fulldgpinrec", CTLFLAG_RW, 1000 &rack_uses_full_dgp_in_rec, 1, 1001 "Do we use all DGP features in recovery (fillcw, timely et.al.)?"); 1002 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_pacing), 1004 OID_AUTO, "fullbufdisc", CTLFLAG_RW, 1005 &rack_full_buffer_discount, 10, 1006 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?"); 1007 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_pacing), 1009 OID_AUTO, "fillcw", CTLFLAG_RW, 1010 &rack_fill_cw_state, 0, 1011 "Enable fillcw on new connections (default=0 off)?"); 1012 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_pacing), 1014 OID_AUTO, "min_burst", CTLFLAG_RW, 1015 &rack_pacing_min_seg, 0, 1016 "What is the min burst size for pacing (0 disables)?"); 1017 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1018 SYSCTL_CHILDREN(rack_pacing), 1019 OID_AUTO, "divisor", CTLFLAG_RW, 1020 &rack_default_pacing_divisor, 4, 1021 "What is the default divisor given to the rl code?"); 1022 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_pacing), 1024 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1025 &rack_bw_multipler, 2, 1026 "What is the multiplier of the current gp_est that fillcw can increase the b/w too?"); 1027 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_pacing), 1029 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1030 &rack_max_per_above, 30, 1031 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1032 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1033 SYSCTL_CHILDREN(rack_pacing), 1034 OID_AUTO, "allow1mss", CTLFLAG_RW, 1035 &rack_pace_one_seg, 0, 1036 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1037 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1038 SYSCTL_CHILDREN(rack_pacing), 1039 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1040 &rack_limit_time_with_srtt, 0, 1041 "Do we limit pacing time based on srtt"); 1042 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1043 SYSCTL_CHILDREN(rack_pacing), 1044 OID_AUTO, "init_win", CTLFLAG_RW, 1045 &rack_default_init_window, 0, 1046 "Do we have a rack initial window 0 = system default"); 1047 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_pacing), 1049 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1050 &rack_per_of_gp_ss, 250, 1051 "If non zero, what percentage of goodput to pace at in slow start"); 1052 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1053 SYSCTL_CHILDREN(rack_pacing), 1054 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1055 &rack_per_of_gp_ca, 150, 1056 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1057 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1058 SYSCTL_CHILDREN(rack_pacing), 1059 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1060 &rack_per_of_gp_rec, 200, 1061 "If non zero, what percentage of goodput to pace at in recovery"); 1062 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1063 SYSCTL_CHILDREN(rack_pacing), 1064 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1065 &rack_hptsi_segments, 40, 1066 "What size is the max for TSO segments in pacing and burst mitigation"); 1067 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1068 SYSCTL_CHILDREN(rack_pacing), 1069 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1070 &rack_slot_reduction, 4, 1071 "When doing only burst mitigation what is the reduce divisor"); 1072 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1073 SYSCTL_CHILDREN(rack_sysctl_root), 1074 OID_AUTO, "use_pacing", CTLFLAG_RW, 1075 &rack_pace_every_seg, 0, 1076 "If set we use pacing, if clear we use only the original burst mitigation"); 1077 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1078 SYSCTL_CHILDREN(rack_pacing), 1079 OID_AUTO, "rate_cap", CTLFLAG_RW, 1080 &rack_bw_rate_cap, 0, 1081 "If set we apply this value to the absolute rate cap used by pacing"); 1082 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1083 SYSCTL_CHILDREN(rack_sysctl_root), 1084 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1085 &rack_req_measurements, 1, 1086 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1087 /* Hardware pacing */ 1088 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1089 SYSCTL_CHILDREN(rack_sysctl_root), 1090 OID_AUTO, 1091 "hdwr_pacing", 1092 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1093 "Pacing related Controls"); 1094 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_hw_pacing), 1096 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1097 &rack_hw_rwnd_factor, 2, 1098 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1099 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_hw_pacing), 1101 OID_AUTO, "precheck", CTLFLAG_RW, 1102 &rack_hw_check_queue, 0, 1103 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_hw_pacing), 1106 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1107 &rack_enobuf_hw_boost_mult, 0, 1108 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1109 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_hw_pacing), 1111 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1112 &rack_enobuf_hw_max, 2, 1113 "What is the max boost the pacing time if we see a ENOBUFS?"); 1114 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_hw_pacing), 1116 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1117 &rack_enobuf_hw_min, 2, 1118 "What is the min boost the pacing time if we see a ENOBUFS?"); 1119 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_hw_pacing), 1121 OID_AUTO, "enable", CTLFLAG_RW, 1122 &rack_enable_hw_pacing, 0, 1123 "Should RACK attempt to use hw pacing?"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_hw_pacing), 1126 OID_AUTO, "rate_cap", CTLFLAG_RW, 1127 &rack_hw_rate_caps, 0, 1128 "Does the highest hardware pacing rate cap the rate we will send at??"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_hw_pacing), 1131 OID_AUTO, "uncap_per", CTLFLAG_RW, 1132 &rack_hw_rate_cap_per, 0, 1133 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_hw_pacing), 1136 OID_AUTO, "rate_min", CTLFLAG_RW, 1137 &rack_hw_rate_min, 0, 1138 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1139 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_hw_pacing), 1141 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1142 &rack_hw_rate_to_low, 0, 1143 "If we fall below this rate, dis-engage hw pacing?"); 1144 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_hw_pacing), 1146 OID_AUTO, "up_only", CTLFLAG_RW, 1147 &rack_hw_up_only, 0, 1148 "Do we allow hw pacing to lower the rate selected?"); 1149 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_hw_pacing), 1151 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1152 &rack_hw_pace_extra_slots, 0, 1153 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1154 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1155 SYSCTL_CHILDREN(rack_sysctl_root), 1156 OID_AUTO, 1157 "timely", 1158 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1159 "Rack Timely RTT Controls"); 1160 /* Timely based GP dynmics */ 1161 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1162 SYSCTL_CHILDREN(rack_timely), 1163 OID_AUTO, "upper", CTLFLAG_RW, 1164 &rack_gp_per_bw_mul_up, 2, 1165 "Rack timely upper range for equal b/w (in percentage)"); 1166 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1167 SYSCTL_CHILDREN(rack_timely), 1168 OID_AUTO, "lower", CTLFLAG_RW, 1169 &rack_gp_per_bw_mul_down, 4, 1170 "Rack timely lower range for equal b/w (in percentage)"); 1171 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1172 SYSCTL_CHILDREN(rack_timely), 1173 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1174 &rack_gp_rtt_maxmul, 3, 1175 "Rack timely multiplier of lowest rtt for rtt_max"); 1176 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1177 SYSCTL_CHILDREN(rack_timely), 1178 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1179 &rack_gp_rtt_mindiv, 4, 1180 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1181 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1182 SYSCTL_CHILDREN(rack_timely), 1183 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1184 &rack_gp_rtt_minmul, 1, 1185 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1186 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1187 SYSCTL_CHILDREN(rack_timely), 1188 OID_AUTO, "decrease", CTLFLAG_RW, 1189 &rack_gp_decrease_per, 80, 1190 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1191 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1192 SYSCTL_CHILDREN(rack_timely), 1193 OID_AUTO, "increase", CTLFLAG_RW, 1194 &rack_gp_increase_per, 2, 1195 "Rack timely increase perentage of our GP multiplication factor"); 1196 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1197 SYSCTL_CHILDREN(rack_timely), 1198 OID_AUTO, "lowerbound", CTLFLAG_RW, 1199 &rack_per_lower_bound, 50, 1200 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1201 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1202 SYSCTL_CHILDREN(rack_timely), 1203 OID_AUTO, "p5_upper", CTLFLAG_RW, 1204 &rack_gain_p5_ub, 250, 1205 "Profile 5 upper bound to timely gain"); 1206 1207 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1208 SYSCTL_CHILDREN(rack_timely), 1209 OID_AUTO, "upperboundss", CTLFLAG_RW, 1210 &rack_per_upper_bound_ss, 0, 1211 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1212 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1213 SYSCTL_CHILDREN(rack_timely), 1214 OID_AUTO, "upperboundca", CTLFLAG_RW, 1215 &rack_per_upper_bound_ca, 0, 1216 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1217 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1218 SYSCTL_CHILDREN(rack_timely), 1219 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1220 &rack_do_dyn_mul, 0, 1221 "Rack timely do we enable dynmaic timely goodput by default"); 1222 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1223 SYSCTL_CHILDREN(rack_timely), 1224 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1225 &rack_gp_no_rec_chg, 1, 1226 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1227 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1228 SYSCTL_CHILDREN(rack_timely), 1229 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1230 &rack_timely_dec_clear, 6, 1231 "Rack timely what threshold do we count to before another boost during b/w decent"); 1232 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1233 SYSCTL_CHILDREN(rack_timely), 1234 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1235 &rack_timely_max_push_rise, 3, 1236 "Rack timely how many times do we push up with b/w increase"); 1237 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1238 SYSCTL_CHILDREN(rack_timely), 1239 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1240 &rack_timely_max_push_drop, 3, 1241 "Rack timely how many times do we push back on b/w decent"); 1242 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1243 SYSCTL_CHILDREN(rack_timely), 1244 OID_AUTO, "min_segs", CTLFLAG_RW, 1245 &rack_timely_min_segs, 4, 1246 "Rack timely when setting the cwnd what is the min num segments"); 1247 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1248 SYSCTL_CHILDREN(rack_timely), 1249 OID_AUTO, "noback_max", CTLFLAG_RW, 1250 &rack_use_max_for_nobackoff, 0, 1251 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1252 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1253 SYSCTL_CHILDREN(rack_timely), 1254 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1255 &rack_timely_int_timely_only, 0, 1256 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1257 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1258 SYSCTL_CHILDREN(rack_timely), 1259 OID_AUTO, "nonstop", CTLFLAG_RW, 1260 &rack_timely_no_stopping, 0, 1261 "Rack timely don't stop increase"); 1262 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1263 SYSCTL_CHILDREN(rack_timely), 1264 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1265 &rack_down_raise_thresh, 100, 1266 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1267 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1268 SYSCTL_CHILDREN(rack_timely), 1269 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1270 &rack_req_segs, 1, 1271 "Bottom dragging if not these many segments outstanding and room"); 1272 1273 /* TLP and Rack related parameters */ 1274 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1275 SYSCTL_CHILDREN(rack_sysctl_root), 1276 OID_AUTO, 1277 "tlp", 1278 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1279 "TLP and Rack related Controls"); 1280 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1281 SYSCTL_CHILDREN(rack_tlp), 1282 OID_AUTO, "use_rrr", CTLFLAG_RW, 1283 &use_rack_rr, 1, 1284 "Do we use Rack Rapid Recovery"); 1285 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1286 SYSCTL_CHILDREN(rack_tlp), 1287 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1288 &rack_max_abc_post_recovery, 2, 1289 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1290 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1291 SYSCTL_CHILDREN(rack_tlp), 1292 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1293 &rack_non_rxt_use_cr, 0, 1294 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1295 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1296 SYSCTL_CHILDREN(rack_tlp), 1297 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1298 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1299 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1300 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1301 SYSCTL_CHILDREN(rack_tlp), 1302 OID_AUTO, "limit", CTLFLAG_RW, 1303 &rack_tlp_limit, 2, 1304 "How many TLP's can be sent without sending new data"); 1305 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1306 SYSCTL_CHILDREN(rack_tlp), 1307 OID_AUTO, "use_greater", CTLFLAG_RW, 1308 &rack_tlp_use_greater, 1, 1309 "Should we use the rack_rtt time if its greater than srtt"); 1310 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1311 SYSCTL_CHILDREN(rack_tlp), 1312 OID_AUTO, "tlpminto", CTLFLAG_RW, 1313 &rack_tlp_min, 10000, 1314 "TLP minimum timeout per the specification (in microseconds)"); 1315 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1316 SYSCTL_CHILDREN(rack_tlp), 1317 OID_AUTO, "send_oldest", CTLFLAG_RW, 1318 &rack_always_send_oldest, 0, 1319 "Should we always send the oldest TLP and RACK-TLP"); 1320 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1321 SYSCTL_CHILDREN(rack_tlp), 1322 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1323 &rack_limited_retran, 0, 1324 "How many times can a rack timeout drive out sends"); 1325 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1326 SYSCTL_CHILDREN(rack_tlp), 1327 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1328 &rack_lower_cwnd_at_tlp, 0, 1329 "When a TLP completes a retran should we enter recovery"); 1330 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1331 SYSCTL_CHILDREN(rack_tlp), 1332 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1333 &rack_reorder_thresh, 2, 1334 "What factor for rack will be added when seeing reordering (shift right)"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_tlp), 1337 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1338 &rack_tlp_thresh, 1, 1339 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_tlp), 1342 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1343 &rack_reorder_fade, 60000000, 1344 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1345 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_tlp), 1347 OID_AUTO, "pktdelay", CTLFLAG_RW, 1348 &rack_pkt_delay, 1000, 1349 "Extra RACK time (in microseconds) besides reordering thresh"); 1350 1351 /* Timer related controls */ 1352 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1353 SYSCTL_CHILDREN(rack_sysctl_root), 1354 OID_AUTO, 1355 "timers", 1356 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1357 "Timer related controls"); 1358 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1359 SYSCTL_CHILDREN(rack_timers), 1360 OID_AUTO, "persmin", CTLFLAG_RW, 1361 &rack_persist_min, 250000, 1362 "What is the minimum time in microseconds between persists"); 1363 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1364 SYSCTL_CHILDREN(rack_timers), 1365 OID_AUTO, "persmax", CTLFLAG_RW, 1366 &rack_persist_max, 2000000, 1367 "What is the largest delay in microseconds between persists"); 1368 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1369 SYSCTL_CHILDREN(rack_timers), 1370 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1371 &rack_delayed_ack_time, 40000, 1372 "Delayed ack time (40ms in microseconds)"); 1373 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1374 SYSCTL_CHILDREN(rack_timers), 1375 OID_AUTO, "minrto", CTLFLAG_RW, 1376 &rack_rto_min, 30000, 1377 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1378 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1379 SYSCTL_CHILDREN(rack_timers), 1380 OID_AUTO, "maxrto", CTLFLAG_RW, 1381 &rack_rto_max, 4000000, 1382 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1383 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1384 SYSCTL_CHILDREN(rack_timers), 1385 OID_AUTO, "minto", CTLFLAG_RW, 1386 &rack_min_to, 1000, 1387 "Minimum rack timeout in microseconds"); 1388 /* Measure controls */ 1389 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1390 SYSCTL_CHILDREN(rack_sysctl_root), 1391 OID_AUTO, 1392 "measure", 1393 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1394 "Measure related controls"); 1395 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1396 SYSCTL_CHILDREN(rack_measure), 1397 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1398 &rack_wma_divisor, 8, 1399 "When doing b/w calculation what is the divisor for the WMA"); 1400 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1401 SYSCTL_CHILDREN(rack_measure), 1402 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1403 &rack_cwnd_block_ends_measure, 0, 1404 "Does a cwnd just-return end the measurement window (app limited)"); 1405 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1406 SYSCTL_CHILDREN(rack_measure), 1407 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1408 &rack_rwnd_block_ends_measure, 0, 1409 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1410 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1411 SYSCTL_CHILDREN(rack_measure), 1412 OID_AUTO, "min_target", CTLFLAG_RW, 1413 &rack_def_data_window, 20, 1414 "What is the minimum target window (in mss) for a GP measurements"); 1415 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1416 SYSCTL_CHILDREN(rack_measure), 1417 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1418 &rack_goal_bdp, 2, 1419 "What is the goal BDP to measure"); 1420 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1421 SYSCTL_CHILDREN(rack_measure), 1422 OID_AUTO, "min_srtts", CTLFLAG_RW, 1423 &rack_min_srtts, 1, 1424 "What is the goal BDP to measure"); 1425 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1426 SYSCTL_CHILDREN(rack_measure), 1427 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1428 &rack_min_measure_usec, 0, 1429 "What is the Minimum time time for a measurement if 0, this is off"); 1430 /* Features */ 1431 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1432 SYSCTL_CHILDREN(rack_sysctl_root), 1433 OID_AUTO, 1434 "features", 1435 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1436 "Feature controls"); 1437 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1438 SYSCTL_CHILDREN(rack_features), 1439 OID_AUTO, "rxt_clamp_thresh", CTLFLAG_RW, 1440 &rack_rxt_clamp_thresh, 0, 1441 "Bit encoded clamping setup bits CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP"); 1442 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1443 SYSCTL_CHILDREN(rack_features), 1444 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1445 &rack_hybrid_allow_set_maxseg, 0, 1446 "Should hybrid pacing allow the setmss command"); 1447 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1448 SYSCTL_CHILDREN(rack_features), 1449 OID_AUTO, "cmpack", CTLFLAG_RW, 1450 &rack_use_cmp_acks, 1, 1451 "Should RACK have LRO send compressed acks"); 1452 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1453 SYSCTL_CHILDREN(rack_features), 1454 OID_AUTO, "fsb", CTLFLAG_RW, 1455 &rack_use_fsb, 1, 1456 "Should RACK use the fast send block?"); 1457 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1458 SYSCTL_CHILDREN(rack_features), 1459 OID_AUTO, "rfo", CTLFLAG_RW, 1460 &rack_use_rfo, 1, 1461 "Should RACK use rack_fast_output()?"); 1462 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1463 SYSCTL_CHILDREN(rack_features), 1464 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1465 &rack_use_rsm_rfo, 1, 1466 "Should RACK use rack_fast_rsm_output()?"); 1467 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1468 SYSCTL_CHILDREN(rack_features), 1469 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1470 &rack_enable_mqueue_for_nonpaced, 0, 1471 "Should RACK use mbuf queuing for non-paced connections"); 1472 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1473 SYSCTL_CHILDREN(rack_features), 1474 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1475 &rack_do_hystart, 0, 1476 "Should RACK enable HyStart++ on connections?"); 1477 /* Misc rack controls */ 1478 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1479 SYSCTL_CHILDREN(rack_sysctl_root), 1480 OID_AUTO, 1481 "misc", 1482 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1483 "Misc related controls"); 1484 #ifdef TCP_ACCOUNTING 1485 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1486 SYSCTL_CHILDREN(rack_misc), 1487 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1488 &rack_tcp_accounting, 0, 1489 "Should we turn on TCP accounting for all rack sessions?"); 1490 #endif 1491 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1492 SYSCTL_CHILDREN(rack_misc), 1493 OID_AUTO, "dnd", CTLFLAG_RW, 1494 &rack_dnd_default, 0, 1495 "Do not disturb default for rack_rrr = 3"); 1496 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1497 SYSCTL_CHILDREN(rack_misc), 1498 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1499 &sad_seg_size_per, 800, 1500 "Percentage of segment size needed in a sack 800 = 80.0?"); 1501 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1502 SYSCTL_CHILDREN(rack_misc), 1503 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1504 &rack_rxt_controls, 0, 1505 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1506 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1507 SYSCTL_CHILDREN(rack_misc), 1508 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1509 &rack_hibeta_setting, 0, 1510 "Do we ue a high beta (80 instead of 50)?"); 1511 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1512 SYSCTL_CHILDREN(rack_misc), 1513 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1514 &rack_apply_rtt_with_reduced_conf, 0, 1515 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1516 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1517 SYSCTL_CHILDREN(rack_misc), 1518 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1519 &rack_dsack_std_based, 3, 1520 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1521 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_misc), 1523 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1524 &rack_prr_addbackmax, 2, 1525 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1526 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1527 SYSCTL_CHILDREN(rack_misc), 1528 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1529 &rack_stats_gets_ms_rtt, 1, 1530 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1531 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1532 SYSCTL_CHILDREN(rack_misc), 1533 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1534 &rack_client_low_buf, 0, 1535 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1536 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1537 SYSCTL_CHILDREN(rack_misc), 1538 OID_AUTO, "defprofile", CTLFLAG_RW, 1539 &rack_def_profile, 0, 1540 "Should RACK use a default profile (0=no, num == profile num)?"); 1541 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1542 SYSCTL_CHILDREN(rack_misc), 1543 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1544 &rack_enable_shared_cwnd, 1, 1545 "Should RACK try to use the shared cwnd on connections where allowed"); 1546 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1547 SYSCTL_CHILDREN(rack_misc), 1548 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1549 &rack_limits_scwnd, 1, 1550 "Should RACK place low end time limits on the shared cwnd feature"); 1551 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1552 SYSCTL_CHILDREN(rack_misc), 1553 OID_AUTO, "no_prr", CTLFLAG_RW, 1554 &rack_disable_prr, 0, 1555 "Should RACK not use prr and only pace (must have pacing on)"); 1556 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1557 SYSCTL_CHILDREN(rack_misc), 1558 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1559 &rack_verbose_logging, 0, 1560 "Should RACK black box logging be verbose"); 1561 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_misc), 1563 OID_AUTO, "data_after_close", CTLFLAG_RW, 1564 &rack_ignore_data_after_close, 1, 1565 "Do we hold off sending a RST until all pending data is ack'd"); 1566 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1567 SYSCTL_CHILDREN(rack_misc), 1568 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1569 &rack_sack_not_required, 1, 1570 "Do we allow rack to run on connections not supporting SACK"); 1571 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1572 SYSCTL_CHILDREN(rack_misc), 1573 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1574 &rack_send_a_lot_in_prr, 1, 1575 "Send a lot in prr"); 1576 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1577 SYSCTL_CHILDREN(rack_misc), 1578 OID_AUTO, "autoscale", CTLFLAG_RW, 1579 &rack_autosndbuf_inc, 20, 1580 "What percentage should rack scale up its snd buffer by?"); 1581 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_misc), 1583 OID_AUTO, "rnds_for_rxt_clamp", CTLFLAG_RW, 1584 &rack_rxt_min_rnds, 10, 1585 "Number of rounds needed between RTT clamps due to high loss rates"); 1586 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1587 SYSCTL_CHILDREN(rack_misc), 1588 OID_AUTO, "rnds_for_unclamp", CTLFLAG_RW, 1589 &rack_unclamp_round_thresh, 100, 1590 "Number of rounds needed with no loss to unclamp"); 1591 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_misc), 1593 OID_AUTO, "rxt_threshs_for_unclamp", CTLFLAG_RW, 1594 &rack_unclamp_rxt_thresh, 5, 1595 "Percentage of retransmits we need to be under to unclamp (5 = .5 percent)\n"); 1596 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1597 SYSCTL_CHILDREN(rack_misc), 1598 OID_AUTO, "clamp_ss_upper", CTLFLAG_RW, 1599 &rack_clamp_ss_upper, 110, 1600 "Clamp percentage ceiling in SS?"); 1601 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1602 SYSCTL_CHILDREN(rack_misc), 1603 OID_AUTO, "clamp_ca_upper", CTLFLAG_RW, 1604 &rack_clamp_ca_upper, 110, 1605 "Clamp percentage ceiling in CA?"); 1606 /* Sack Attacker detection stuff */ 1607 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1608 SYSCTL_CHILDREN(rack_attack), 1609 OID_AUTO, "merge_out", CTLFLAG_RW, 1610 &rack_merge_out_sacks_on_attack, 0, 1611 "Do we merge the sendmap when we decide we are being attacked?"); 1612 1613 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1614 SYSCTL_CHILDREN(rack_attack), 1615 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1616 &rack_highest_sack_thresh_seen, 0, 1617 "Highest sack to ack ratio seen"); 1618 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1619 SYSCTL_CHILDREN(rack_attack), 1620 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1621 &rack_highest_move_thresh_seen, 0, 1622 "Highest move to non-move ratio seen"); 1623 rack_ack_total = counter_u64_alloc(M_WAITOK); 1624 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1625 SYSCTL_CHILDREN(rack_attack), 1626 OID_AUTO, "acktotal", CTLFLAG_RD, 1627 &rack_ack_total, 1628 "Total number of Ack's"); 1629 rack_express_sack = counter_u64_alloc(M_WAITOK); 1630 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1631 SYSCTL_CHILDREN(rack_attack), 1632 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1633 &rack_express_sack, 1634 "Total expresss number of Sack's"); 1635 rack_sack_total = counter_u64_alloc(M_WAITOK); 1636 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1637 SYSCTL_CHILDREN(rack_attack), 1638 OID_AUTO, "sacktotal", CTLFLAG_RD, 1639 &rack_sack_total, 1640 "Total number of SACKs"); 1641 rack_move_none = counter_u64_alloc(M_WAITOK); 1642 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1643 SYSCTL_CHILDREN(rack_attack), 1644 OID_AUTO, "move_none", CTLFLAG_RD, 1645 &rack_move_none, 1646 "Total number of SACK index reuse of positions under threshold"); 1647 rack_move_some = counter_u64_alloc(M_WAITOK); 1648 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1649 SYSCTL_CHILDREN(rack_attack), 1650 OID_AUTO, "move_some", CTLFLAG_RD, 1651 &rack_move_some, 1652 "Total number of SACK index reuse of positions over threshold"); 1653 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1654 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1655 SYSCTL_CHILDREN(rack_attack), 1656 OID_AUTO, "attacks", CTLFLAG_RD, 1657 &rack_sack_attacks_detected, 1658 "Total number of SACK attackers that had sack disabled"); 1659 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1660 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1661 SYSCTL_CHILDREN(rack_attack), 1662 OID_AUTO, "reversed", CTLFLAG_RD, 1663 &rack_sack_attacks_reversed, 1664 "Total number of SACK attackers that were later determined false positive"); 1665 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1666 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1667 SYSCTL_CHILDREN(rack_attack), 1668 OID_AUTO, "suspect", CTLFLAG_RD, 1669 &rack_sack_attacks_suspect, 1670 "Total number of SACKs that triggered early detection"); 1671 1672 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_attack), 1675 OID_AUTO, "nextmerge", CTLFLAG_RD, 1676 &rack_sack_used_next_merge, 1677 "Total number of times we used the next merge"); 1678 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_attack), 1681 OID_AUTO, "prevmerge", CTLFLAG_RD, 1682 &rack_sack_used_prev_merge, 1683 "Total number of times we used the prev merge"); 1684 /* Counters */ 1685 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1686 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1687 SYSCTL_CHILDREN(rack_counters), 1688 OID_AUTO, "totalbytes", CTLFLAG_RD, 1689 &rack_total_bytes, 1690 "Total number of bytes sent"); 1691 rack_fto_send = counter_u64_alloc(M_WAITOK); 1692 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1693 SYSCTL_CHILDREN(rack_counters), 1694 OID_AUTO, "fto_send", CTLFLAG_RD, 1695 &rack_fto_send, "Total number of rack_fast_output sends"); 1696 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1698 SYSCTL_CHILDREN(rack_counters), 1699 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1700 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1701 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1702 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1703 SYSCTL_CHILDREN(rack_counters), 1704 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1705 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1706 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1707 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1708 SYSCTL_CHILDREN(rack_counters), 1709 OID_AUTO, "nfto_send", CTLFLAG_RD, 1710 &rack_non_fto_send, "Total number of rack_output first sends"); 1711 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1712 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1713 SYSCTL_CHILDREN(rack_counters), 1714 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1715 &rack_extended_rfo, "Total number of times we extended rfo"); 1716 1717 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1718 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1719 SYSCTL_CHILDREN(rack_counters), 1720 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1721 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1722 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1723 1724 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1725 SYSCTL_CHILDREN(rack_counters), 1726 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1727 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1728 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1729 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1730 SYSCTL_CHILDREN(rack_counters), 1731 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1732 &rack_tlp_tot, 1733 "Total number of tail loss probe expirations"); 1734 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1735 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1736 SYSCTL_CHILDREN(rack_counters), 1737 OID_AUTO, "tlp_new", CTLFLAG_RD, 1738 &rack_tlp_newdata, 1739 "Total number of tail loss probe sending new data"); 1740 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1741 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1742 SYSCTL_CHILDREN(rack_counters), 1743 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1744 &rack_tlp_retran, 1745 "Total number of tail loss probe sending retransmitted data"); 1746 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1747 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1748 SYSCTL_CHILDREN(rack_counters), 1749 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1750 &rack_tlp_retran_bytes, 1751 "Total bytes of tail loss probe sending retransmitted data"); 1752 rack_to_tot = counter_u64_alloc(M_WAITOK); 1753 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1754 SYSCTL_CHILDREN(rack_counters), 1755 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1756 &rack_to_tot, 1757 "Total number of times the rack to expired"); 1758 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1759 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1760 SYSCTL_CHILDREN(rack_counters), 1761 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1762 &rack_saw_enobuf, 1763 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1764 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1765 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1766 SYSCTL_CHILDREN(rack_counters), 1767 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1768 &rack_saw_enobuf_hw, 1769 "Total number of times a send returned enobuf for hdwr paced connections"); 1770 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1771 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1772 SYSCTL_CHILDREN(rack_counters), 1773 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1774 &rack_saw_enetunreach, 1775 "Total number of times a send received a enetunreachable"); 1776 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1777 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1778 SYSCTL_CHILDREN(rack_counters), 1779 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1780 &rack_hot_alloc, 1781 "Total allocations from the top of our list"); 1782 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1783 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1784 SYSCTL_CHILDREN(rack_counters), 1785 OID_AUTO, "allocs", CTLFLAG_RD, 1786 &rack_to_alloc, 1787 "Total allocations of tracking structures"); 1788 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1789 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1790 SYSCTL_CHILDREN(rack_counters), 1791 OID_AUTO, "allochard", CTLFLAG_RD, 1792 &rack_to_alloc_hard, 1793 "Total allocations done with sleeping the hard way"); 1794 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1795 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1796 SYSCTL_CHILDREN(rack_counters), 1797 OID_AUTO, "allocemerg", CTLFLAG_RD, 1798 &rack_to_alloc_emerg, 1799 "Total allocations done from emergency cache"); 1800 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1801 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1802 SYSCTL_CHILDREN(rack_counters), 1803 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1804 &rack_to_alloc_limited, 1805 "Total allocations dropped due to limit"); 1806 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1807 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1808 SYSCTL_CHILDREN(rack_counters), 1809 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1810 &rack_alloc_limited_conns, 1811 "Connections with allocations dropped due to limit"); 1812 rack_split_limited = counter_u64_alloc(M_WAITOK); 1813 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1814 SYSCTL_CHILDREN(rack_counters), 1815 OID_AUTO, "split_limited", CTLFLAG_RD, 1816 &rack_split_limited, 1817 "Split allocations dropped due to limit"); 1818 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1819 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1820 SYSCTL_CHILDREN(rack_counters), 1821 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1822 &rack_rxt_clamps_cwnd, 1823 "Number of times that excessive rxt clamped the cwnd down"); 1824 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1825 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1826 SYSCTL_CHILDREN(rack_counters), 1827 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1828 &rack_rxt_clamps_cwnd_uniq, 1829 "Number of connections that have had excessive rxt clamped the cwnd down"); 1830 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1831 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1832 SYSCTL_CHILDREN(rack_counters), 1833 OID_AUTO, "persist_sends", CTLFLAG_RD, 1834 &rack_persists_sends, 1835 "Number of times we sent a persist probe"); 1836 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1837 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1838 SYSCTL_CHILDREN(rack_counters), 1839 OID_AUTO, "persist_acks", CTLFLAG_RD, 1840 &rack_persists_acks, 1841 "Number of times a persist probe was acked"); 1842 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1843 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1844 SYSCTL_CHILDREN(rack_counters), 1845 OID_AUTO, "persist_loss", CTLFLAG_RD, 1846 &rack_persists_loss, 1847 "Number of times we detected a lost persist probe (no ack)"); 1848 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1849 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1850 SYSCTL_CHILDREN(rack_counters), 1851 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1852 &rack_persists_lost_ends, 1853 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1854 #ifdef INVARIANTS 1855 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1856 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1857 SYSCTL_CHILDREN(rack_counters), 1858 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1859 &rack_adjust_map_bw, 1860 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1861 #endif 1862 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1863 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1864 SYSCTL_CHILDREN(rack_counters), 1865 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1866 &rack_multi_single_eq, 1867 "Number of compressed acks total represented"); 1868 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1869 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1870 SYSCTL_CHILDREN(rack_counters), 1871 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1872 &rack_proc_non_comp_ack, 1873 "Number of non compresseds acks that we processed"); 1874 1875 1876 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1877 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1878 SYSCTL_CHILDREN(rack_counters), 1879 OID_AUTO, "sack_long", CTLFLAG_RD, 1880 &rack_sack_proc_all, 1881 "Total times we had to walk whole list for sack processing"); 1882 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1883 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1884 SYSCTL_CHILDREN(rack_counters), 1885 OID_AUTO, "sack_restart", CTLFLAG_RD, 1886 &rack_sack_proc_restart, 1887 "Total times we had to walk whole list due to a restart"); 1888 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1889 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1890 SYSCTL_CHILDREN(rack_counters), 1891 OID_AUTO, "sack_short", CTLFLAG_RD, 1892 &rack_sack_proc_short, 1893 "Total times we took shortcut for sack processing"); 1894 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1895 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1896 SYSCTL_CHILDREN(rack_attack), 1897 OID_AUTO, "skipacked", CTLFLAG_RD, 1898 &rack_sack_skipped_acked, 1899 "Total number of times we skipped previously sacked"); 1900 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1901 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1902 SYSCTL_CHILDREN(rack_attack), 1903 OID_AUTO, "ofsplit", CTLFLAG_RD, 1904 &rack_sack_splits, 1905 "Total number of times we did the old fashion tree split"); 1906 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1907 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1908 SYSCTL_CHILDREN(rack_counters), 1909 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1910 &rack_input_idle_reduces, 1911 "Total number of idle reductions on input"); 1912 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1913 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1914 SYSCTL_CHILDREN(rack_counters), 1915 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1916 &rack_collapsed_win_seen, 1917 "Total number of collapsed window events seen (where our window shrinks)"); 1918 1919 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1920 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1921 SYSCTL_CHILDREN(rack_counters), 1922 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1923 &rack_collapsed_win, 1924 "Total number of collapsed window events where we mark packets"); 1925 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1926 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1927 SYSCTL_CHILDREN(rack_counters), 1928 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1929 &rack_collapsed_win_rxt, 1930 "Total number of packets that were retransmitted"); 1931 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1932 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1933 SYSCTL_CHILDREN(rack_counters), 1934 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1935 &rack_collapsed_win_rxt_bytes, 1936 "Total number of bytes that were retransmitted"); 1937 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1938 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1939 SYSCTL_CHILDREN(rack_counters), 1940 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1941 &rack_try_scwnd, 1942 "Total number of scwnd attempts"); 1943 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1944 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1945 OID_AUTO, "outsize", CTLFLAG_RD, 1946 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1947 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1948 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1949 OID_AUTO, "opts", CTLFLAG_RD, 1950 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1951 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1952 SYSCTL_CHILDREN(rack_sysctl_root), 1953 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1954 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1955 } 1956 1957 static uint32_t 1958 rc_init_window(struct tcp_rack *rack) 1959 { 1960 uint32_t win; 1961 1962 if (rack->rc_init_win == 0) { 1963 /* 1964 * Nothing set by the user, use the system stack 1965 * default. 1966 */ 1967 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1968 } 1969 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1970 return (win); 1971 } 1972 1973 static uint64_t 1974 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1975 { 1976 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1977 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1978 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1979 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1980 else 1981 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1982 } 1983 1984 static void 1985 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 1986 uint64_t data, uint8_t mod, uint16_t aux, 1987 struct tcp_sendfile_track *cur, int line) 1988 { 1989 #ifdef TCP_REQUEST_TRK 1990 int do_log = 0; 1991 1992 /* 1993 * The rate cap one is noisy and only should come out when normal BB logging 1994 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 1995 * once per chunk and make up the BBpoint that can be turned on by the client. 1996 */ 1997 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 1998 /* 1999 * The very noisy two need to only come out when 2000 * we have verbose logging on. 2001 */ 2002 if (rack_verbose_logging != 0) 2003 do_log = tcp_bblogging_on(rack->rc_tp); 2004 else 2005 do_log = 0; 2006 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2007 /* 2008 * All other less noisy logs here except the measure which 2009 * also needs to come out on the point and the log. 2010 */ 2011 do_log = tcp_bblogging_on(rack->rc_tp); 2012 } else { 2013 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2014 } 2015 2016 if (do_log) { 2017 union tcp_log_stackspecific log; 2018 struct timeval tv; 2019 uint64_t lt_bw; 2020 2021 /* Convert our ms to a microsecond */ 2022 memset(&log, 0, sizeof(log)); 2023 2024 log.u_bbr.cwnd_gain = line; 2025 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2026 log.u_bbr.rttProp = tim; 2027 log.u_bbr.bw_inuse = cbw; 2028 log.u_bbr.delRate = rack_get_gp_est(rack); 2029 lt_bw = rack_get_lt_bw(rack); 2030 log.u_bbr.flex1 = seq; 2031 log.u_bbr.pacing_gain = aux; 2032 /* lt_bw = < flex3 | flex2 > */ 2033 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2034 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2035 /* Record the last obtained us rtt in inflight */ 2036 if (cur == NULL) { 2037 /* Make sure we are looking at the right log if an overide comes in */ 2038 cur = rack->r_ctl.rc_last_sft; 2039 } 2040 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2041 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2042 else { 2043 /* Use the last known rtt i.e. the rack-rtt */ 2044 log.u_bbr.inflight = rack->rc_rack_rtt; 2045 } 2046 if (cur != NULL) { 2047 uint64_t off; 2048 2049 log.u_bbr.cur_del_rate = cur->deadline; 2050 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2051 /* start = < lost | pkt_epoch > */ 2052 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2053 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2054 log.u_bbr.flex6 = cur->start_seq; 2055 log.u_bbr.pkts_out = cur->end_seq; 2056 } else { 2057 /* start = < lost | pkt_epoch > */ 2058 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2059 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2060 /* end = < pkts_out | flex6 > */ 2061 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2062 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2063 } 2064 /* first_send = <lt_epoch | epoch> */ 2065 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2066 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2067 /* localtime = <delivered | applimited>*/ 2068 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2069 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2070 #ifdef TCP_REQUEST_TRK 2071 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2072 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2073 #endif 2074 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2075 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2076 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2077 } else { 2078 log.u_bbr.flex7 = 0xffff; 2079 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2080 } 2081 /* 2082 * Compose bbr_state to be a bit wise 0000ADHF 2083 * where A is the always_pace flag 2084 * where D is the dgp_on flag 2085 * where H is the hybrid_mode on flag 2086 * where F is the use_fixed_rate flag. 2087 */ 2088 log.u_bbr.bbr_state = rack->rc_always_pace; 2089 log.u_bbr.bbr_state <<= 1; 2090 log.u_bbr.bbr_state |= rack->dgp_on; 2091 log.u_bbr.bbr_state <<= 1; 2092 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2093 log.u_bbr.bbr_state <<= 1; 2094 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2095 log.u_bbr.flex8 = mod; 2096 tcp_log_event(rack->rc_tp, NULL, 2097 &rack->rc_inp->inp_socket->so_rcv, 2098 &rack->rc_inp->inp_socket->so_snd, 2099 TCP_HYBRID_PACING_LOG, 0, 2100 0, &log, false, NULL, __func__, __LINE__, &tv); 2101 2102 } 2103 #endif 2104 } 2105 2106 #ifdef TCP_REQUEST_TRK 2107 static void 2108 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2109 { 2110 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2111 union tcp_log_stackspecific log; 2112 struct timeval tv; 2113 uint64_t off; 2114 2115 /* Convert our ms to a microsecond */ 2116 memset(&log, 0, sizeof(log)); 2117 2118 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2119 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2120 log.u_bbr.delRate = cur->sent_at_fs; 2121 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2122 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2123 log.u_bbr.cwnd_gain = line; 2124 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2125 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2126 /* start = < flex1 | flex2 > */ 2127 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2128 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2129 /* end = < flex3 | flex4 > */ 2130 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2131 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2132 2133 /* localtime = <delivered | applimited>*/ 2134 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2135 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2136 /* client timestamp = <lt_epoch | epoch>*/ 2137 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2138 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2139 /* now set all the flags in */ 2140 log.u_bbr.pkts_out = cur->hybrid_flags; 2141 log.u_bbr.flex6 = cur->flags; 2142 /* 2143 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2144 * where a false retransmit occurred so first_send <-> lastsend may 2145 * include longer time then it actually took if we have a false rxt. 2146 */ 2147 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2148 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2149 2150 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2151 tcp_log_event(rack->rc_tp, NULL, 2152 &rack->rc_inp->inp_socket->so_rcv, 2153 &rack->rc_inp->inp_socket->so_snd, 2154 TCP_HYBRID_PACING_LOG, 0, 2155 0, &log, false, NULL, __func__, __LINE__, &tv); 2156 } 2157 } 2158 #endif 2159 2160 static inline uint64_t 2161 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2162 { 2163 uint64_t ret_bw, ether; 2164 uint64_t u_segsiz; 2165 2166 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2167 if (rack->r_is_v6){ 2168 #ifdef INET6 2169 ether += sizeof(struct ip6_hdr); 2170 #endif 2171 ether += 14; /* eheader size 6+6+2 */ 2172 } else { 2173 #ifdef INET 2174 ether += sizeof(struct ip); 2175 #endif 2176 ether += 14; /* eheader size 6+6+2 */ 2177 } 2178 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2179 ret_bw = bw; 2180 ret_bw *= ether; 2181 ret_bw /= u_segsiz; 2182 return (ret_bw); 2183 } 2184 2185 static void 2186 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2187 { 2188 #ifdef TCP_REQUEST_TRK 2189 struct timeval tv; 2190 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2191 #endif 2192 2193 if (rack->r_ctl.bw_rate_cap == 0) 2194 return; 2195 #ifdef TCP_REQUEST_TRK 2196 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2197 (rack->r_ctl.rc_last_sft != NULL)) { 2198 /* 2199 * We have a dynamic cap. The original target 2200 * is in bw_rate_cap, but we need to look at 2201 * how long it is until we hit the deadline. 2202 */ 2203 struct tcp_sendfile_track *ent; 2204 2205 ent = rack->r_ctl.rc_last_sft; 2206 microuptime(&tv); 2207 timenow = tcp_tv_to_lusectick(&tv); 2208 if (timenow >= ent->deadline) { 2209 /* No time left we do DGP only */ 2210 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2211 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2212 rack->r_ctl.bw_rate_cap = 0; 2213 return; 2214 } 2215 /* We have the time */ 2216 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2217 if (timeleft < HPTS_MSEC_IN_SEC) { 2218 /* If there is less than a ms left just use DGPs rate */ 2219 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2220 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2221 rack->r_ctl.bw_rate_cap = 0; 2222 return; 2223 } 2224 /* 2225 * Now lets find the amount of data left to send. 2226 * 2227 * Now ideally we want to use the end_seq to figure out how much more 2228 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2229 */ 2230 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2231 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2232 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2233 else { 2234 /* TSNH, we should catch it at the send */ 2235 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2236 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2237 rack->r_ctl.bw_rate_cap = 0; 2238 return; 2239 } 2240 } else { 2241 /* 2242 * The hard way, figure out how much is gone and then 2243 * take that away from the total the client asked for 2244 * (thats off by tls overhead if this is tls). 2245 */ 2246 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2247 lengone = rack->rc_tp->snd_una - ent->start_seq; 2248 else 2249 lengone = 0; 2250 if (lengone < (ent->end - ent->start)) 2251 lenleft = (ent->end - ent->start) - lengone; 2252 else { 2253 /* TSNH, we should catch it at the send */ 2254 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2255 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2256 rack->r_ctl.bw_rate_cap = 0; 2257 return; 2258 } 2259 } 2260 if (lenleft == 0) { 2261 /* We have it all sent */ 2262 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2263 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2264 if (rack->r_ctl.bw_rate_cap) 2265 goto normal_ratecap; 2266 else 2267 return; 2268 } 2269 calcbw = lenleft * HPTS_USEC_IN_SEC; 2270 calcbw /= timeleft; 2271 /* Now we must compensate for IP/TCP overhead */ 2272 calcbw = rack_compensate_for_linerate(rack, calcbw); 2273 /* Update the bit rate cap */ 2274 rack->r_ctl.bw_rate_cap = calcbw; 2275 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2276 (rack_hybrid_allow_set_maxseg == 1) && 2277 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2278 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2279 uint32_t orig_max; 2280 2281 orig_max = rack->r_ctl.rc_pace_max_segs; 2282 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2283 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2284 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2285 } 2286 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2287 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2288 if ((calcbw > 0) && (*bw > calcbw)) { 2289 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2290 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2291 *capped = 1; 2292 *bw = calcbw; 2293 } 2294 return; 2295 } 2296 normal_ratecap: 2297 #endif 2298 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2299 #ifdef TCP_REQUEST_TRK 2300 if (rack->rc_hybrid_mode && 2301 rack->rc_catch_up && 2302 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2303 (rack_hybrid_allow_set_maxseg == 1) && 2304 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2305 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2306 uint32_t orig_max; 2307 2308 orig_max = rack->r_ctl.rc_pace_max_segs; 2309 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2310 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2311 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2312 } 2313 #endif 2314 *capped = 1; 2315 *bw = rack->r_ctl.bw_rate_cap; 2316 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2317 *bw, 0, 0, 2318 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2319 } 2320 } 2321 2322 static uint64_t 2323 rack_get_gp_est(struct tcp_rack *rack) 2324 { 2325 uint64_t bw, lt_bw, ret_bw; 2326 2327 if (rack->rc_gp_filled == 0) { 2328 /* 2329 * We have yet no b/w measurement, 2330 * if we have a user set initial bw 2331 * return it. If we don't have that and 2332 * we have an srtt, use the tcp IW (10) to 2333 * calculate a fictional b/w over the SRTT 2334 * which is more or less a guess. Note 2335 * we don't use our IW from rack on purpose 2336 * so if we have like IW=30, we are not 2337 * calculating a "huge" b/w. 2338 */ 2339 uint64_t srtt; 2340 2341 lt_bw = rack_get_lt_bw(rack); 2342 if (lt_bw) { 2343 /* 2344 * No goodput bw but a long-term b/w does exist 2345 * lets use that. 2346 */ 2347 ret_bw = lt_bw; 2348 goto compensate; 2349 } 2350 if (rack->r_ctl.init_rate) 2351 return (rack->r_ctl.init_rate); 2352 2353 /* Ok lets come up with the IW guess, if we have a srtt */ 2354 if (rack->rc_tp->t_srtt == 0) { 2355 /* 2356 * Go with old pacing method 2357 * i.e. burst mitigation only. 2358 */ 2359 return (0); 2360 } 2361 /* Ok lets get the initial TCP win (not racks) */ 2362 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2363 srtt = (uint64_t)rack->rc_tp->t_srtt; 2364 bw *= (uint64_t)USECS_IN_SECOND; 2365 bw /= srtt; 2366 ret_bw = bw; 2367 goto compensate; 2368 2369 } 2370 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2371 /* Averaging is done, we can return the value */ 2372 bw = rack->r_ctl.gp_bw; 2373 } else { 2374 /* Still doing initial average must calculate */ 2375 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2376 } 2377 lt_bw = rack_get_lt_bw(rack); 2378 if (lt_bw == 0) { 2379 /* If we don't have one then equate it to the gp_bw */ 2380 lt_bw = rack->r_ctl.gp_bw; 2381 } 2382 if ((rack->r_cwnd_was_clamped == 1) && (rack->r_clamped_gets_lower > 0)){ 2383 /* if clamped take the lowest */ 2384 if (lt_bw < bw) 2385 ret_bw = lt_bw; 2386 else 2387 ret_bw = bw; 2388 } else { 2389 /* If not set for clamped to get lowest, take the highest */ 2390 if (lt_bw > bw) 2391 ret_bw = lt_bw; 2392 else 2393 ret_bw = bw; 2394 } 2395 /* 2396 * Now lets compensate based on the TCP/IP overhead. Our 2397 * Goodput estimate does not include this so we must pace out 2398 * a bit faster since our pacing calculations do. The pacing 2399 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2400 * we are using to do this, so we do that here in the opposite 2401 * direction as well. This means that if we are tunneled and the 2402 * segsiz is say 1200 bytes we will get quite a boost, but its 2403 * compensated for in the pacing time the opposite way. 2404 */ 2405 compensate: 2406 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2407 return(ret_bw); 2408 } 2409 2410 2411 static uint64_t 2412 rack_get_bw(struct tcp_rack *rack) 2413 { 2414 uint64_t bw; 2415 2416 if (rack->use_fixed_rate) { 2417 /* Return the fixed pacing rate */ 2418 return (rack_get_fixed_pacing_bw(rack)); 2419 } 2420 bw = rack_get_gp_est(rack); 2421 return (bw); 2422 } 2423 2424 static uint16_t 2425 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2426 { 2427 if (rack->use_fixed_rate) { 2428 return (100); 2429 } else if (rack->in_probe_rtt && (rsm == NULL)) 2430 return (rack->r_ctl.rack_per_of_gp_probertt); 2431 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2432 rack->r_ctl.rack_per_of_gp_rec)) { 2433 if (rsm) { 2434 /* a retransmission always use the recovery rate */ 2435 return (rack->r_ctl.rack_per_of_gp_rec); 2436 } else if (rack->rack_rec_nonrxt_use_cr) { 2437 /* Directed to use the configured rate */ 2438 goto configured_rate; 2439 } else if (rack->rack_no_prr && 2440 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2441 /* No PRR, lets just use the b/w estimate only */ 2442 return (100); 2443 } else { 2444 /* 2445 * Here we may have a non-retransmit but we 2446 * have no overrides, so just use the recovery 2447 * rate (prr is in effect). 2448 */ 2449 return (rack->r_ctl.rack_per_of_gp_rec); 2450 } 2451 } 2452 configured_rate: 2453 /* For the configured rate we look at our cwnd vs the ssthresh */ 2454 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2455 return (rack->r_ctl.rack_per_of_gp_ss); 2456 else 2457 return (rack->r_ctl.rack_per_of_gp_ca); 2458 } 2459 2460 static void 2461 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2462 { 2463 /* 2464 * Types of logs (mod value) 2465 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2466 * 2 = a dsack round begins, persist is reset to 16. 2467 * 3 = a dsack round ends 2468 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2469 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2470 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2471 */ 2472 if (tcp_bblogging_on(rack->rc_tp)) { 2473 union tcp_log_stackspecific log; 2474 struct timeval tv; 2475 2476 memset(&log, 0, sizeof(log)); 2477 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2478 log.u_bbr.flex1 <<= 1; 2479 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2480 log.u_bbr.flex1 <<= 1; 2481 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2482 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2483 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2484 log.u_bbr.flex4 = flex4; 2485 log.u_bbr.flex5 = flex5; 2486 log.u_bbr.flex6 = flex6; 2487 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2488 log.u_bbr.flex8 = mod; 2489 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2490 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2491 &rack->rc_inp->inp_socket->so_rcv, 2492 &rack->rc_inp->inp_socket->so_snd, 2493 RACK_DSACK_HANDLING, 0, 2494 0, &log, false, &tv); 2495 } 2496 } 2497 2498 static void 2499 rack_log_hdwr_pacing(struct tcp_rack *rack, 2500 uint64_t rate, uint64_t hw_rate, int line, 2501 int error, uint16_t mod) 2502 { 2503 if (tcp_bblogging_on(rack->rc_tp)) { 2504 union tcp_log_stackspecific log; 2505 struct timeval tv; 2506 const struct ifnet *ifp; 2507 2508 memset(&log, 0, sizeof(log)); 2509 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2510 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2511 if (rack->r_ctl.crte) { 2512 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2513 } else if (rack->rc_inp->inp_route.ro_nh && 2514 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2515 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2516 } else 2517 ifp = NULL; 2518 if (ifp) { 2519 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2520 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2521 } 2522 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2523 log.u_bbr.bw_inuse = rate; 2524 log.u_bbr.flex5 = line; 2525 log.u_bbr.flex6 = error; 2526 log.u_bbr.flex7 = mod; 2527 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2528 log.u_bbr.flex8 = rack->use_fixed_rate; 2529 log.u_bbr.flex8 <<= 1; 2530 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2531 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2532 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2533 if (rack->r_ctl.crte) 2534 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2535 else 2536 log.u_bbr.cur_del_rate = 0; 2537 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2538 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2539 &rack->rc_inp->inp_socket->so_rcv, 2540 &rack->rc_inp->inp_socket->so_snd, 2541 BBR_LOG_HDWR_PACE, 0, 2542 0, &log, false, &tv); 2543 } 2544 } 2545 2546 static uint64_t 2547 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2548 { 2549 /* 2550 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2551 */ 2552 uint64_t bw_est, high_rate; 2553 uint64_t gain; 2554 2555 if ((rack->r_pacing_discount == 0) || 2556 (rack_full_buffer_discount == 0)) { 2557 /* 2558 * No buffer level based discount from client buffer 2559 * level is enabled or the feature is disabled. 2560 */ 2561 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2562 bw_est = bw * gain; 2563 bw_est /= (uint64_t)100; 2564 } else { 2565 /* 2566 * We have a discount in place apply it with 2567 * just a 100% gain (we get no boost if the buffer 2568 * is full). 2569 */ 2570 uint64_t discount; 2571 2572 discount = bw * (uint64_t)(rack_full_buffer_discount * rack->r_ctl.pacing_discount_amm); 2573 discount /= 100; 2574 /* What %% of the b/w do we discount */ 2575 bw_est = bw - discount; 2576 } 2577 /* Never fall below the minimum (def 64kbps) */ 2578 if (bw_est < RACK_MIN_BW) 2579 bw_est = RACK_MIN_BW; 2580 if (rack->r_rack_hw_rate_caps) { 2581 /* Rate caps are in place */ 2582 if (rack->r_ctl.crte != NULL) { 2583 /* We have a hdwr rate already */ 2584 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2585 if (bw_est >= high_rate) { 2586 /* We are capping bw at the highest rate table entry */ 2587 if (rack_hw_rate_cap_per && 2588 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2589 rack->r_rack_hw_rate_caps = 0; 2590 goto done; 2591 } 2592 rack_log_hdwr_pacing(rack, 2593 bw_est, high_rate, __LINE__, 2594 0, 3); 2595 bw_est = high_rate; 2596 if (capped) 2597 *capped = 1; 2598 } 2599 } else if ((rack->rack_hdrw_pacing == 0) && 2600 (rack->rack_hdw_pace_ena) && 2601 (rack->rack_attempt_hdwr_pace == 0) && 2602 (rack->rc_inp->inp_route.ro_nh != NULL) && 2603 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2604 /* 2605 * Special case, we have not yet attempted hardware 2606 * pacing, and yet we may, when we do, find out if we are 2607 * above the highest rate. We need to know the maxbw for the interface 2608 * in question (if it supports ratelimiting). We get back 2609 * a 0, if the interface is not found in the RL lists. 2610 */ 2611 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2612 if (high_rate) { 2613 /* Yep, we have a rate is it above this rate? */ 2614 if (bw_est > high_rate) { 2615 bw_est = high_rate; 2616 if (capped) 2617 *capped = 1; 2618 } 2619 } 2620 } 2621 } 2622 done: 2623 return (bw_est); 2624 } 2625 2626 static void 2627 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2628 { 2629 if (tcp_bblogging_on(rack->rc_tp)) { 2630 union tcp_log_stackspecific log; 2631 struct timeval tv; 2632 2633 if (rack->sack_attack_disable > 0) 2634 goto log_anyway; 2635 if ((mod != 1) && (rack_verbose_logging == 0)) { 2636 /* 2637 * We get 3 values currently for mod 2638 * 1 - We are retransmitting and this tells the reason. 2639 * 2 - We are clearing a dup-ack count. 2640 * 3 - We are incrementing a dup-ack count. 2641 * 2642 * The clear/increment are only logged 2643 * if you have BBverbose on. 2644 */ 2645 return; 2646 } 2647 log_anyway: 2648 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2649 log.u_bbr.flex1 = tsused; 2650 log.u_bbr.flex2 = thresh; 2651 log.u_bbr.flex3 = rsm->r_flags; 2652 log.u_bbr.flex4 = rsm->r_dupack; 2653 log.u_bbr.flex5 = rsm->r_start; 2654 log.u_bbr.flex6 = rsm->r_end; 2655 log.u_bbr.flex8 = mod; 2656 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2657 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2658 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2659 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2660 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2661 log.u_bbr.pacing_gain = rack->r_must_retran; 2662 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2663 &rack->rc_inp->inp_socket->so_rcv, 2664 &rack->rc_inp->inp_socket->so_snd, 2665 BBR_LOG_SETTINGS_CHG, 0, 2666 0, &log, false, &tv); 2667 } 2668 } 2669 2670 static void 2671 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2672 { 2673 if (tcp_bblogging_on(rack->rc_tp)) { 2674 union tcp_log_stackspecific log; 2675 struct timeval tv; 2676 2677 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2678 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2679 log.u_bbr.flex2 = to; 2680 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2681 log.u_bbr.flex4 = slot; 2682 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2683 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2684 log.u_bbr.flex7 = rack->rc_in_persist; 2685 log.u_bbr.flex8 = which; 2686 if (rack->rack_no_prr) 2687 log.u_bbr.pkts_out = 0; 2688 else 2689 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2690 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2691 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2692 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2693 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2694 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2695 log.u_bbr.pacing_gain = rack->r_must_retran; 2696 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2697 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2698 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2699 log.u_bbr.lost = rack_rto_min; 2700 log.u_bbr.epoch = rack->r_ctl.roundends; 2701 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2702 &rack->rc_inp->inp_socket->so_rcv, 2703 &rack->rc_inp->inp_socket->so_snd, 2704 BBR_LOG_TIMERSTAR, 0, 2705 0, &log, false, &tv); 2706 } 2707 } 2708 2709 static void 2710 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2711 { 2712 if (tcp_bblogging_on(rack->rc_tp)) { 2713 union tcp_log_stackspecific log; 2714 struct timeval tv; 2715 2716 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2717 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2718 log.u_bbr.flex8 = to_num; 2719 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2720 log.u_bbr.flex2 = rack->rc_rack_rtt; 2721 if (rsm == NULL) 2722 log.u_bbr.flex3 = 0; 2723 else 2724 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2725 if (rack->rack_no_prr) 2726 log.u_bbr.flex5 = 0; 2727 else 2728 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2729 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2730 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2731 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2732 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2733 log.u_bbr.pacing_gain = rack->r_must_retran; 2734 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2735 &rack->rc_inp->inp_socket->so_rcv, 2736 &rack->rc_inp->inp_socket->so_snd, 2737 BBR_LOG_RTO, 0, 2738 0, &log, false, &tv); 2739 } 2740 } 2741 2742 static void 2743 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2744 struct rack_sendmap *prev, 2745 struct rack_sendmap *rsm, 2746 struct rack_sendmap *next, 2747 int flag, uint32_t th_ack, int line) 2748 { 2749 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2750 union tcp_log_stackspecific log; 2751 struct timeval tv; 2752 2753 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2754 log.u_bbr.flex8 = flag; 2755 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2756 log.u_bbr.cur_del_rate = (uint64_t)prev; 2757 log.u_bbr.delRate = (uint64_t)rsm; 2758 log.u_bbr.rttProp = (uint64_t)next; 2759 log.u_bbr.flex7 = 0; 2760 if (prev) { 2761 log.u_bbr.flex1 = prev->r_start; 2762 log.u_bbr.flex2 = prev->r_end; 2763 log.u_bbr.flex7 |= 0x4; 2764 } 2765 if (rsm) { 2766 log.u_bbr.flex3 = rsm->r_start; 2767 log.u_bbr.flex4 = rsm->r_end; 2768 log.u_bbr.flex7 |= 0x2; 2769 } 2770 if (next) { 2771 log.u_bbr.flex5 = next->r_start; 2772 log.u_bbr.flex6 = next->r_end; 2773 log.u_bbr.flex7 |= 0x1; 2774 } 2775 log.u_bbr.applimited = line; 2776 log.u_bbr.pkts_out = th_ack; 2777 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2778 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2779 if (rack->rack_no_prr) 2780 log.u_bbr.lost = 0; 2781 else 2782 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2783 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2784 &rack->rc_inp->inp_socket->so_rcv, 2785 &rack->rc_inp->inp_socket->so_snd, 2786 TCP_LOG_MAPCHG, 0, 2787 0, &log, false, &tv); 2788 } 2789 } 2790 2791 static void 2792 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2793 struct rack_sendmap *rsm, int conf) 2794 { 2795 if (tcp_bblogging_on(tp)) { 2796 union tcp_log_stackspecific log; 2797 struct timeval tv; 2798 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2799 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2800 log.u_bbr.flex1 = t; 2801 log.u_bbr.flex2 = len; 2802 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2803 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2804 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2805 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2806 log.u_bbr.flex7 = conf; 2807 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2808 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2809 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2810 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2811 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2812 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2813 if (rsm) { 2814 log.u_bbr.pkt_epoch = rsm->r_start; 2815 log.u_bbr.lost = rsm->r_end; 2816 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2817 /* We loose any upper of the 24 bits */ 2818 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2819 } else { 2820 /* Its a SYN */ 2821 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2822 log.u_bbr.lost = 0; 2823 log.u_bbr.cwnd_gain = 0; 2824 log.u_bbr.pacing_gain = 0; 2825 } 2826 /* Write out general bits of interest rrs here */ 2827 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2828 log.u_bbr.use_lt_bw <<= 1; 2829 log.u_bbr.use_lt_bw |= rack->forced_ack; 2830 log.u_bbr.use_lt_bw <<= 1; 2831 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2832 log.u_bbr.use_lt_bw <<= 1; 2833 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2834 log.u_bbr.use_lt_bw <<= 1; 2835 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2836 log.u_bbr.use_lt_bw <<= 1; 2837 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2838 log.u_bbr.use_lt_bw <<= 1; 2839 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2840 log.u_bbr.use_lt_bw <<= 1; 2841 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2842 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2843 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2844 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2845 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2846 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2847 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2848 log.u_bbr.bw_inuse <<= 32; 2849 if (rsm) 2850 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2851 TCP_LOG_EVENTP(tp, NULL, 2852 &rack->rc_inp->inp_socket->so_rcv, 2853 &rack->rc_inp->inp_socket->so_snd, 2854 BBR_LOG_BBRRTT, 0, 2855 0, &log, false, &tv); 2856 2857 2858 } 2859 } 2860 2861 static void 2862 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2863 { 2864 /* 2865 * Log the rtt sample we are 2866 * applying to the srtt algorithm in 2867 * useconds. 2868 */ 2869 if (tcp_bblogging_on(rack->rc_tp)) { 2870 union tcp_log_stackspecific log; 2871 struct timeval tv; 2872 2873 /* Convert our ms to a microsecond */ 2874 memset(&log, 0, sizeof(log)); 2875 log.u_bbr.flex1 = rtt; 2876 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2877 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2878 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2879 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2880 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2881 log.u_bbr.flex7 = 1; 2882 log.u_bbr.flex8 = rack->sack_attack_disable; 2883 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2884 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2885 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2886 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2887 log.u_bbr.pacing_gain = rack->r_must_retran; 2888 /* 2889 * We capture in delRate the upper 32 bits as 2890 * the confidence level we had declared, and the 2891 * lower 32 bits as the actual RTT using the arrival 2892 * timestamp. 2893 */ 2894 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2895 log.u_bbr.delRate <<= 32; 2896 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2897 /* Lets capture all the things that make up t_rtxcur */ 2898 log.u_bbr.applimited = rack_rto_min; 2899 log.u_bbr.epoch = rack_rto_max; 2900 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2901 log.u_bbr.lost = rack_rto_min; 2902 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2903 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2904 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2905 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2906 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2907 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2908 &rack->rc_inp->inp_socket->so_rcv, 2909 &rack->rc_inp->inp_socket->so_snd, 2910 TCP_LOG_RTT, 0, 2911 0, &log, false, &tv); 2912 } 2913 } 2914 2915 static void 2916 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2917 { 2918 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2919 union tcp_log_stackspecific log; 2920 struct timeval tv; 2921 2922 /* Convert our ms to a microsecond */ 2923 memset(&log, 0, sizeof(log)); 2924 log.u_bbr.flex1 = rtt; 2925 log.u_bbr.flex2 = send_time; 2926 log.u_bbr.flex3 = ack_time; 2927 log.u_bbr.flex4 = where; 2928 log.u_bbr.flex7 = 2; 2929 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2930 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2931 &rack->rc_inp->inp_socket->so_rcv, 2932 &rack->rc_inp->inp_socket->so_snd, 2933 TCP_LOG_RTT, 0, 2934 0, &log, false, &tv); 2935 } 2936 } 2937 2938 2939 static void 2940 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 2941 { 2942 if (tcp_bblogging_on(rack->rc_tp)) { 2943 union tcp_log_stackspecific log; 2944 struct timeval tv; 2945 2946 /* Convert our ms to a microsecond */ 2947 memset(&log, 0, sizeof(log)); 2948 log.u_bbr.flex1 = idx; 2949 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 2950 log.u_bbr.flex3 = tsecho; 2951 log.u_bbr.flex7 = 3; 2952 log.u_bbr.rttProp = tsv; 2953 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2954 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2955 &rack->rc_inp->inp_socket->so_rcv, 2956 &rack->rc_inp->inp_socket->so_snd, 2957 TCP_LOG_RTT, 0, 2958 0, &log, false, &tv); 2959 } 2960 } 2961 2962 2963 static inline void 2964 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2965 { 2966 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2967 union tcp_log_stackspecific log; 2968 struct timeval tv; 2969 2970 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2971 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2972 log.u_bbr.flex1 = line; 2973 log.u_bbr.flex2 = tick; 2974 log.u_bbr.flex3 = tp->t_maxunacktime; 2975 log.u_bbr.flex4 = tp->t_acktime; 2976 log.u_bbr.flex8 = event; 2977 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2978 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2979 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2980 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2981 log.u_bbr.pacing_gain = rack->r_must_retran; 2982 TCP_LOG_EVENTP(tp, NULL, 2983 &rack->rc_inp->inp_socket->so_rcv, 2984 &rack->rc_inp->inp_socket->so_snd, 2985 BBR_LOG_PROGRESS, 0, 2986 0, &log, false, &tv); 2987 } 2988 } 2989 2990 static void 2991 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 2992 { 2993 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2994 union tcp_log_stackspecific log; 2995 2996 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2997 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2998 log.u_bbr.flex1 = slot; 2999 if (rack->rack_no_prr) 3000 log.u_bbr.flex2 = 0; 3001 else 3002 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3003 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3004 log.u_bbr.flex5 = rack->r_ctl.ack_during_sd; 3005 log.u_bbr.flex6 = line; 3006 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3007 log.u_bbr.flex8 = rack->rc_in_persist; 3008 log.u_bbr.timeStamp = cts; 3009 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3010 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3011 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3012 log.u_bbr.pacing_gain = rack->r_must_retran; 3013 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3014 &rack->rc_inp->inp_socket->so_rcv, 3015 &rack->rc_inp->inp_socket->so_snd, 3016 BBR_LOG_BBRSND, 0, 3017 0, &log, false, tv); 3018 } 3019 } 3020 3021 static void 3022 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3023 { 3024 if (tcp_bblogging_on(rack->rc_tp)) { 3025 union tcp_log_stackspecific log; 3026 struct timeval tv; 3027 3028 memset(&log, 0, sizeof(log)); 3029 log.u_bbr.flex1 = did_out; 3030 log.u_bbr.flex2 = nxt_pkt; 3031 log.u_bbr.flex3 = way_out; 3032 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3033 if (rack->rack_no_prr) 3034 log.u_bbr.flex5 = 0; 3035 else 3036 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3037 log.u_bbr.flex6 = nsegs; 3038 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3039 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3040 log.u_bbr.flex7 <<= 1; 3041 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3042 log.u_bbr.flex7 <<= 1; 3043 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3044 log.u_bbr.flex8 = rack->rc_in_persist; 3045 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3046 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3047 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3048 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3049 log.u_bbr.use_lt_bw <<= 1; 3050 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3051 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3052 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3053 log.u_bbr.pacing_gain = rack->r_must_retran; 3054 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3055 &rack->rc_inp->inp_socket->so_rcv, 3056 &rack->rc_inp->inp_socket->so_snd, 3057 BBR_LOG_DOSEG_DONE, 0, 3058 0, &log, false, &tv); 3059 } 3060 } 3061 3062 static void 3063 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3064 { 3065 if (tcp_bblogging_on(rack->rc_tp)) { 3066 union tcp_log_stackspecific log; 3067 struct timeval tv; 3068 3069 memset(&log, 0, sizeof(log)); 3070 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3071 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3072 log.u_bbr.flex4 = arg1; 3073 log.u_bbr.flex5 = arg2; 3074 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3075 log.u_bbr.flex6 = arg3; 3076 log.u_bbr.flex8 = frm; 3077 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3078 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3079 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3080 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3081 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3082 log.u_bbr.pacing_gain = rack->r_must_retran; 3083 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3084 &tptosocket(tp)->so_snd, 3085 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3086 } 3087 } 3088 3089 static void 3090 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3091 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3092 { 3093 if (tcp_bblogging_on(rack->rc_tp)) { 3094 union tcp_log_stackspecific log; 3095 struct timeval tv; 3096 3097 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3098 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3099 log.u_bbr.flex1 = slot; 3100 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3101 log.u_bbr.flex4 = reason; 3102 if (rack->rack_no_prr) 3103 log.u_bbr.flex5 = 0; 3104 else 3105 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3106 log.u_bbr.flex7 = hpts_calling; 3107 log.u_bbr.flex8 = rack->rc_in_persist; 3108 log.u_bbr.lt_epoch = cwnd_to_use; 3109 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3110 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3111 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3112 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3113 log.u_bbr.pacing_gain = rack->r_must_retran; 3114 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3115 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3116 &rack->rc_inp->inp_socket->so_rcv, 3117 &rack->rc_inp->inp_socket->so_snd, 3118 BBR_LOG_JUSTRET, 0, 3119 tlen, &log, false, &tv); 3120 } 3121 } 3122 3123 static void 3124 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3125 struct timeval *tv, uint32_t flags_on_entry) 3126 { 3127 if (tcp_bblogging_on(rack->rc_tp)) { 3128 union tcp_log_stackspecific log; 3129 3130 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3131 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3132 log.u_bbr.flex1 = line; 3133 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3134 log.u_bbr.flex3 = flags_on_entry; 3135 log.u_bbr.flex4 = us_cts; 3136 if (rack->rack_no_prr) 3137 log.u_bbr.flex5 = 0; 3138 else 3139 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3140 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3141 log.u_bbr.flex7 = hpts_removed; 3142 log.u_bbr.flex8 = 1; 3143 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3144 log.u_bbr.timeStamp = us_cts; 3145 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3146 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3147 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3148 log.u_bbr.pacing_gain = rack->r_must_retran; 3149 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3150 &rack->rc_inp->inp_socket->so_rcv, 3151 &rack->rc_inp->inp_socket->so_snd, 3152 BBR_LOG_TIMERCANC, 0, 3153 0, &log, false, tv); 3154 } 3155 } 3156 3157 static void 3158 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3159 uint32_t flex1, uint32_t flex2, 3160 uint32_t flex3, uint32_t flex4, 3161 uint32_t flex5, uint32_t flex6, 3162 uint16_t flex7, uint8_t mod) 3163 { 3164 if (tcp_bblogging_on(rack->rc_tp)) { 3165 union tcp_log_stackspecific log; 3166 struct timeval tv; 3167 3168 if (mod == 1) { 3169 /* No you can't use 1, its for the real to cancel */ 3170 return; 3171 } 3172 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3173 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3174 log.u_bbr.flex1 = flex1; 3175 log.u_bbr.flex2 = flex2; 3176 log.u_bbr.flex3 = flex3; 3177 log.u_bbr.flex4 = flex4; 3178 log.u_bbr.flex5 = flex5; 3179 log.u_bbr.flex6 = flex6; 3180 log.u_bbr.flex7 = flex7; 3181 log.u_bbr.flex8 = mod; 3182 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3183 &rack->rc_inp->inp_socket->so_rcv, 3184 &rack->rc_inp->inp_socket->so_snd, 3185 BBR_LOG_TIMERCANC, 0, 3186 0, &log, false, &tv); 3187 } 3188 } 3189 3190 static void 3191 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3192 { 3193 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3194 union tcp_log_stackspecific log; 3195 struct timeval tv; 3196 3197 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3198 log.u_bbr.flex1 = timers; 3199 log.u_bbr.flex2 = ret; 3200 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3201 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3202 log.u_bbr.flex5 = cts; 3203 if (rack->rack_no_prr) 3204 log.u_bbr.flex6 = 0; 3205 else 3206 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3207 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3208 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3209 log.u_bbr.pacing_gain = rack->r_must_retran; 3210 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3211 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3212 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3213 &rack->rc_inp->inp_socket->so_rcv, 3214 &rack->rc_inp->inp_socket->so_snd, 3215 BBR_LOG_TO_PROCESS, 0, 3216 0, &log, false, &tv); 3217 } 3218 } 3219 3220 static void 3221 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3222 { 3223 if (tcp_bblogging_on(rack->rc_tp)) { 3224 union tcp_log_stackspecific log; 3225 struct timeval tv; 3226 3227 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3228 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3229 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3230 if (rack->rack_no_prr) 3231 log.u_bbr.flex3 = 0; 3232 else 3233 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3234 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3235 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3236 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3237 log.u_bbr.flex7 = line; 3238 log.u_bbr.flex8 = frm; 3239 log.u_bbr.pkts_out = orig_cwnd; 3240 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3241 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3242 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3243 log.u_bbr.use_lt_bw <<= 1; 3244 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3245 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3246 &rack->rc_inp->inp_socket->so_rcv, 3247 &rack->rc_inp->inp_socket->so_snd, 3248 BBR_LOG_BBRUPD, 0, 3249 0, &log, false, &tv); 3250 } 3251 } 3252 3253 #ifdef TCP_SAD_DETECTION 3254 static void 3255 rack_log_sad(struct tcp_rack *rack, int event) 3256 { 3257 if (tcp_bblogging_on(rack->rc_tp)) { 3258 union tcp_log_stackspecific log; 3259 struct timeval tv; 3260 3261 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3262 log.u_bbr.flex1 = rack->r_ctl.sack_count; 3263 log.u_bbr.flex2 = rack->r_ctl.ack_count; 3264 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 3265 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 3266 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 3267 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 3268 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 3269 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 3270 log.u_bbr.lt_epoch |= rack->do_detection; 3271 log.u_bbr.applimited = tcp_map_minimum; 3272 log.u_bbr.flex7 = rack->sack_attack_disable; 3273 log.u_bbr.flex8 = event; 3274 log.u_bbr.bbr_state = rack->rc_suspicious; 3275 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3276 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3277 log.u_bbr.delivered = tcp_sad_decay_val; 3278 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3279 &rack->rc_inp->inp_socket->so_rcv, 3280 &rack->rc_inp->inp_socket->so_snd, 3281 TCP_SAD_DETECT, 0, 3282 0, &log, false, &tv); 3283 } 3284 } 3285 #endif 3286 3287 static void 3288 rack_counter_destroy(void) 3289 { 3290 counter_u64_free(rack_total_bytes); 3291 counter_u64_free(rack_fto_send); 3292 counter_u64_free(rack_fto_rsm_send); 3293 counter_u64_free(rack_nfto_resend); 3294 counter_u64_free(rack_hw_pace_init_fail); 3295 counter_u64_free(rack_hw_pace_lost); 3296 counter_u64_free(rack_non_fto_send); 3297 counter_u64_free(rack_extended_rfo); 3298 counter_u64_free(rack_ack_total); 3299 counter_u64_free(rack_express_sack); 3300 counter_u64_free(rack_sack_total); 3301 counter_u64_free(rack_move_none); 3302 counter_u64_free(rack_move_some); 3303 counter_u64_free(rack_sack_attacks_detected); 3304 counter_u64_free(rack_sack_attacks_reversed); 3305 counter_u64_free(rack_sack_attacks_suspect); 3306 counter_u64_free(rack_sack_used_next_merge); 3307 counter_u64_free(rack_sack_used_prev_merge); 3308 counter_u64_free(rack_tlp_tot); 3309 counter_u64_free(rack_tlp_newdata); 3310 counter_u64_free(rack_tlp_retran); 3311 counter_u64_free(rack_tlp_retran_bytes); 3312 counter_u64_free(rack_to_tot); 3313 counter_u64_free(rack_saw_enobuf); 3314 counter_u64_free(rack_saw_enobuf_hw); 3315 counter_u64_free(rack_saw_enetunreach); 3316 counter_u64_free(rack_hot_alloc); 3317 counter_u64_free(rack_to_alloc); 3318 counter_u64_free(rack_to_alloc_hard); 3319 counter_u64_free(rack_to_alloc_emerg); 3320 counter_u64_free(rack_to_alloc_limited); 3321 counter_u64_free(rack_alloc_limited_conns); 3322 counter_u64_free(rack_split_limited); 3323 counter_u64_free(rack_multi_single_eq); 3324 counter_u64_free(rack_rxt_clamps_cwnd); 3325 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3326 counter_u64_free(rack_proc_non_comp_ack); 3327 counter_u64_free(rack_sack_proc_all); 3328 counter_u64_free(rack_sack_proc_restart); 3329 counter_u64_free(rack_sack_proc_short); 3330 counter_u64_free(rack_sack_skipped_acked); 3331 counter_u64_free(rack_sack_splits); 3332 counter_u64_free(rack_input_idle_reduces); 3333 counter_u64_free(rack_collapsed_win); 3334 counter_u64_free(rack_collapsed_win_rxt); 3335 counter_u64_free(rack_collapsed_win_rxt_bytes); 3336 counter_u64_free(rack_collapsed_win_seen); 3337 counter_u64_free(rack_try_scwnd); 3338 counter_u64_free(rack_persists_sends); 3339 counter_u64_free(rack_persists_acks); 3340 counter_u64_free(rack_persists_loss); 3341 counter_u64_free(rack_persists_lost_ends); 3342 #ifdef INVARIANTS 3343 counter_u64_free(rack_adjust_map_bw); 3344 #endif 3345 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3346 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3347 } 3348 3349 static struct rack_sendmap * 3350 rack_alloc(struct tcp_rack *rack) 3351 { 3352 struct rack_sendmap *rsm; 3353 3354 /* 3355 * First get the top of the list it in 3356 * theory is the "hottest" rsm we have, 3357 * possibly just freed by ack processing. 3358 */ 3359 if (rack->rc_free_cnt > rack_free_cache) { 3360 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3361 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3362 counter_u64_add(rack_hot_alloc, 1); 3363 rack->rc_free_cnt--; 3364 return (rsm); 3365 } 3366 /* 3367 * Once we get under our free cache we probably 3368 * no longer have a "hot" one available. Lets 3369 * get one from UMA. 3370 */ 3371 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3372 if (rsm) { 3373 rack->r_ctl.rc_num_maps_alloced++; 3374 counter_u64_add(rack_to_alloc, 1); 3375 return (rsm); 3376 } 3377 /* 3378 * Dig in to our aux rsm's (the last two) since 3379 * UMA failed to get us one. 3380 */ 3381 if (rack->rc_free_cnt) { 3382 counter_u64_add(rack_to_alloc_emerg, 1); 3383 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3384 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3385 rack->rc_free_cnt--; 3386 return (rsm); 3387 } 3388 return (NULL); 3389 } 3390 3391 static struct rack_sendmap * 3392 rack_alloc_full_limit(struct tcp_rack *rack) 3393 { 3394 if ((V_tcp_map_entries_limit > 0) && 3395 (rack->do_detection == 0) && 3396 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3397 counter_u64_add(rack_to_alloc_limited, 1); 3398 if (!rack->alloc_limit_reported) { 3399 rack->alloc_limit_reported = 1; 3400 counter_u64_add(rack_alloc_limited_conns, 1); 3401 } 3402 return (NULL); 3403 } 3404 return (rack_alloc(rack)); 3405 } 3406 3407 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3408 static struct rack_sendmap * 3409 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3410 { 3411 struct rack_sendmap *rsm; 3412 3413 if (limit_type) { 3414 /* currently there is only one limit type */ 3415 if (rack->r_ctl.rc_split_limit > 0 && 3416 (rack->do_detection == 0) && 3417 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3418 counter_u64_add(rack_split_limited, 1); 3419 if (!rack->alloc_limit_reported) { 3420 rack->alloc_limit_reported = 1; 3421 counter_u64_add(rack_alloc_limited_conns, 1); 3422 } 3423 return (NULL); 3424 #ifdef TCP_SAD_DETECTION 3425 } else if ((tcp_sad_limit != 0) && 3426 (rack->do_detection == 1) && 3427 (rack->r_ctl.rc_num_split_allocs >= tcp_sad_limit)) { 3428 counter_u64_add(rack_split_limited, 1); 3429 if (!rack->alloc_limit_reported) { 3430 rack->alloc_limit_reported = 1; 3431 counter_u64_add(rack_alloc_limited_conns, 1); 3432 } 3433 return (NULL); 3434 #endif 3435 } 3436 } 3437 3438 /* allocate and mark in the limit type, if set */ 3439 rsm = rack_alloc(rack); 3440 if (rsm != NULL && limit_type) { 3441 rsm->r_limit_type = limit_type; 3442 rack->r_ctl.rc_num_split_allocs++; 3443 } 3444 return (rsm); 3445 } 3446 3447 static void 3448 rack_free_trim(struct tcp_rack *rack) 3449 { 3450 struct rack_sendmap *rsm; 3451 3452 /* 3453 * Free up all the tail entries until 3454 * we get our list down to the limit. 3455 */ 3456 while (rack->rc_free_cnt > rack_free_cache) { 3457 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3458 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3459 rack->rc_free_cnt--; 3460 rack->r_ctl.rc_num_maps_alloced--; 3461 uma_zfree(rack_zone, rsm); 3462 } 3463 } 3464 3465 static void 3466 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3467 { 3468 if (rsm->r_flags & RACK_APP_LIMITED) { 3469 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3470 rack->r_ctl.rc_app_limited_cnt--; 3471 } 3472 } 3473 if (rsm->r_limit_type) { 3474 /* currently there is only one limit type */ 3475 rack->r_ctl.rc_num_split_allocs--; 3476 } 3477 if (rsm == rack->r_ctl.rc_first_appl) { 3478 if (rack->r_ctl.rc_app_limited_cnt == 0) 3479 rack->r_ctl.rc_first_appl = NULL; 3480 else 3481 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3482 } 3483 if (rsm == rack->r_ctl.rc_resend) 3484 rack->r_ctl.rc_resend = NULL; 3485 if (rsm == rack->r_ctl.rc_end_appl) 3486 rack->r_ctl.rc_end_appl = NULL; 3487 if (rack->r_ctl.rc_tlpsend == rsm) 3488 rack->r_ctl.rc_tlpsend = NULL; 3489 if (rack->r_ctl.rc_sacklast == rsm) 3490 rack->r_ctl.rc_sacklast = NULL; 3491 memset(rsm, 0, sizeof(struct rack_sendmap)); 3492 /* Make sure we are not going to overrun our count limit of 0xff */ 3493 if ((rack->rc_free_cnt + 1) > 0xff) { 3494 rack_free_trim(rack); 3495 } 3496 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3497 rack->rc_free_cnt++; 3498 } 3499 3500 static uint32_t 3501 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3502 { 3503 uint64_t srtt, bw, len, tim; 3504 uint32_t segsiz, def_len, minl; 3505 3506 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3507 def_len = rack_def_data_window * segsiz; 3508 if (rack->rc_gp_filled == 0) { 3509 /* 3510 * We have no measurement (IW is in flight?) so 3511 * we can only guess using our data_window sysctl 3512 * value (usually 20MSS). 3513 */ 3514 return (def_len); 3515 } 3516 /* 3517 * Now we have a number of factors to consider. 3518 * 3519 * 1) We have a desired BDP which is usually 3520 * at least 2. 3521 * 2) We have a minimum number of rtt's usually 1 SRTT 3522 * but we allow it too to be more. 3523 * 3) We want to make sure a measurement last N useconds (if 3524 * we have set rack_min_measure_usec. 3525 * 3526 * We handle the first concern here by trying to create a data 3527 * window of max(rack_def_data_window, DesiredBDP). The 3528 * second concern we handle in not letting the measurement 3529 * window end normally until at least the required SRTT's 3530 * have gone by which is done further below in 3531 * rack_enough_for_measurement(). Finally the third concern 3532 * we also handle here by calculating how long that time 3533 * would take at the current BW and then return the 3534 * max of our first calculation and that length. Note 3535 * that if rack_min_measure_usec is 0, we don't deal 3536 * with concern 3. Also for both Concern 1 and 3 an 3537 * application limited period could end the measurement 3538 * earlier. 3539 * 3540 * So lets calculate the BDP with the "known" b/w using 3541 * the SRTT has our rtt and then multiply it by the 3542 * goal. 3543 */ 3544 bw = rack_get_bw(rack); 3545 srtt = (uint64_t)tp->t_srtt; 3546 len = bw * srtt; 3547 len /= (uint64_t)HPTS_USEC_IN_SEC; 3548 len *= max(1, rack_goal_bdp); 3549 /* Now we need to round up to the nearest MSS */ 3550 len = roundup(len, segsiz); 3551 if (rack_min_measure_usec) { 3552 /* Now calculate our min length for this b/w */ 3553 tim = rack_min_measure_usec; 3554 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3555 if (minl == 0) 3556 minl = 1; 3557 minl = roundup(minl, segsiz); 3558 if (len < minl) 3559 len = minl; 3560 } 3561 /* 3562 * Now if we have a very small window we want 3563 * to attempt to get the window that is 3564 * as small as possible. This happens on 3565 * low b/w connections and we don't want to 3566 * span huge numbers of rtt's between measurements. 3567 * 3568 * We basically include 2 over our "MIN window" so 3569 * that the measurement can be shortened (possibly) by 3570 * an ack'ed packet. 3571 */ 3572 if (len < def_len) 3573 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3574 else 3575 return (max((uint32_t)len, def_len)); 3576 3577 } 3578 3579 static int 3580 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3581 { 3582 uint32_t tim, srtts, segsiz; 3583 3584 /* 3585 * Has enough time passed for the GP measurement to be valid? 3586 */ 3587 if (SEQ_LT(th_ack, tp->gput_seq)) { 3588 /* Not enough bytes yet */ 3589 return (0); 3590 } 3591 if ((tp->snd_max == tp->snd_una) || 3592 (th_ack == tp->snd_max)){ 3593 /* 3594 * All is acked quality of all acked is 3595 * usually low or medium, but we in theory could split 3596 * all acked into two cases, where you got 3597 * a signifigant amount of your window and 3598 * where you did not. For now we leave it 3599 * but it is something to contemplate in the 3600 * future. The danger here is that delayed ack 3601 * is effecting the last byte (which is a 50:50 chance). 3602 */ 3603 *quality = RACK_QUALITY_ALLACKED; 3604 return (1); 3605 } 3606 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3607 /* 3608 * We obtained our entire window of data we wanted 3609 * no matter if we are in recovery or not then 3610 * its ok since expanding the window does not 3611 * make things fuzzy (or at least not as much). 3612 */ 3613 *quality = RACK_QUALITY_HIGH; 3614 return (1); 3615 } 3616 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3617 if (SEQ_LT(th_ack, tp->gput_ack) && 3618 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3619 /* Not enough bytes yet */ 3620 return (0); 3621 } 3622 if (rack->r_ctl.rc_first_appl && 3623 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3624 /* 3625 * We are up to the app limited send point 3626 * we have to measure irrespective of the time.. 3627 */ 3628 *quality = RACK_QUALITY_APPLIMITED; 3629 return (1); 3630 } 3631 /* Now what about time? */ 3632 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3633 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3634 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3635 /* 3636 * We do not allow a measurement if we are in recovery 3637 * that would shrink the goodput window we wanted. 3638 * This is to prevent cloudyness of when the last send 3639 * was actually made. 3640 */ 3641 *quality = RACK_QUALITY_HIGH; 3642 return (1); 3643 } 3644 /* Nope not even a full SRTT has passed */ 3645 return (0); 3646 } 3647 3648 static void 3649 rack_log_timely(struct tcp_rack *rack, 3650 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3651 uint64_t up_bnd, int line, uint8_t method) 3652 { 3653 if (tcp_bblogging_on(rack->rc_tp)) { 3654 union tcp_log_stackspecific log; 3655 struct timeval tv; 3656 3657 memset(&log, 0, sizeof(log)); 3658 log.u_bbr.flex1 = logged; 3659 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3660 log.u_bbr.flex2 <<= 4; 3661 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3662 log.u_bbr.flex2 <<= 4; 3663 log.u_bbr.flex2 |= rack->rc_gp_incr; 3664 log.u_bbr.flex2 <<= 4; 3665 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3666 log.u_bbr.flex3 = rack->rc_gp_incr; 3667 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3668 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3669 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3670 log.u_bbr.flex7 = rack->rc_gp_bwred; 3671 log.u_bbr.flex8 = method; 3672 log.u_bbr.cur_del_rate = cur_bw; 3673 log.u_bbr.delRate = low_bnd; 3674 log.u_bbr.bw_inuse = up_bnd; 3675 log.u_bbr.rttProp = rack_get_bw(rack); 3676 log.u_bbr.pkt_epoch = line; 3677 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3678 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3679 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3680 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3681 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3682 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3683 log.u_bbr.cwnd_gain <<= 1; 3684 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3685 log.u_bbr.cwnd_gain <<= 1; 3686 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3687 log.u_bbr.cwnd_gain <<= 1; 3688 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3689 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3690 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3691 &rack->rc_inp->inp_socket->so_rcv, 3692 &rack->rc_inp->inp_socket->so_snd, 3693 TCP_TIMELY_WORK, 0, 3694 0, &log, false, &tv); 3695 } 3696 } 3697 3698 static int 3699 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3700 { 3701 /* 3702 * Before we increase we need to know if 3703 * the estimate just made was less than 3704 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3705 * 3706 * If we already are pacing at a fast enough 3707 * rate to push us faster there is no sense of 3708 * increasing. 3709 * 3710 * We first caculate our actual pacing rate (ss or ca multiplier 3711 * times our cur_bw). 3712 * 3713 * Then we take the last measured rate and multipy by our 3714 * maximum pacing overage to give us a max allowable rate. 3715 * 3716 * If our act_rate is smaller than our max_allowable rate 3717 * then we should increase. Else we should hold steady. 3718 * 3719 */ 3720 uint64_t act_rate, max_allow_rate; 3721 3722 if (rack_timely_no_stopping) 3723 return (1); 3724 3725 if ((cur_bw == 0) || (last_bw_est == 0)) { 3726 /* 3727 * Initial startup case or 3728 * everything is acked case. 3729 */ 3730 rack_log_timely(rack, mult, cur_bw, 0, 0, 3731 __LINE__, 9); 3732 return (1); 3733 } 3734 if (mult <= 100) { 3735 /* 3736 * We can always pace at or slightly above our rate. 3737 */ 3738 rack_log_timely(rack, mult, cur_bw, 0, 0, 3739 __LINE__, 9); 3740 return (1); 3741 } 3742 act_rate = cur_bw * (uint64_t)mult; 3743 act_rate /= 100; 3744 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3745 max_allow_rate /= 100; 3746 if (act_rate < max_allow_rate) { 3747 /* 3748 * Here the rate we are actually pacing at 3749 * is smaller than 10% above our last measurement. 3750 * This means we are pacing below what we would 3751 * like to try to achieve (plus some wiggle room). 3752 */ 3753 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3754 __LINE__, 9); 3755 return (1); 3756 } else { 3757 /* 3758 * Here we are already pacing at least rack_max_per_above(10%) 3759 * what we are getting back. This indicates most likely 3760 * that we are being limited (cwnd/rwnd/app) and can't 3761 * get any more b/w. There is no sense of trying to 3762 * raise up the pacing rate its not speeding us up 3763 * and we already are pacing faster than we are getting. 3764 */ 3765 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3766 __LINE__, 8); 3767 return (0); 3768 } 3769 } 3770 3771 static void 3772 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3773 { 3774 /* 3775 * When we drag bottom, we want to assure 3776 * that no multiplier is below 1.0, if so 3777 * we want to restore it to at least that. 3778 */ 3779 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3780 /* This is unlikely we usually do not touch recovery */ 3781 rack->r_ctl.rack_per_of_gp_rec = 100; 3782 } 3783 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3784 rack->r_ctl.rack_per_of_gp_ca = 100; 3785 } 3786 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3787 rack->r_ctl.rack_per_of_gp_ss = 100; 3788 } 3789 } 3790 3791 static void 3792 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3793 { 3794 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3795 rack->r_ctl.rack_per_of_gp_ca = 100; 3796 } 3797 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3798 rack->r_ctl.rack_per_of_gp_ss = 100; 3799 } 3800 } 3801 3802 static void 3803 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3804 { 3805 int32_t calc, logged, plus; 3806 3807 logged = 0; 3808 3809 if (override) { 3810 /* 3811 * override is passed when we are 3812 * loosing b/w and making one last 3813 * gasp at trying to not loose out 3814 * to a new-reno flow. 3815 */ 3816 goto extra_boost; 3817 } 3818 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3819 if (rack->rc_gp_incr && 3820 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3821 /* 3822 * Reset and get 5 strokes more before the boost. Note 3823 * that the count is 0 based so we have to add one. 3824 */ 3825 extra_boost: 3826 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3827 rack->rc_gp_timely_inc_cnt = 0; 3828 } else 3829 plus = (uint32_t)rack_gp_increase_per; 3830 /* Must be at least 1% increase for true timely increases */ 3831 if ((plus < 1) && 3832 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3833 plus = 1; 3834 if (rack->rc_gp_saw_rec && 3835 (rack->rc_gp_no_rec_chg == 0) && 3836 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3837 rack->r_ctl.rack_per_of_gp_rec)) { 3838 /* We have been in recovery ding it too */ 3839 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3840 if (calc > 0xffff) 3841 calc = 0xffff; 3842 logged |= 1; 3843 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3844 if (rack->r_ctl.rack_per_upper_bound_ca && 3845 (rack->rc_dragged_bottom == 0) && 3846 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3847 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3848 } 3849 if (rack->rc_gp_saw_ca && 3850 (rack->rc_gp_saw_ss == 0) && 3851 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3852 rack->r_ctl.rack_per_of_gp_ca)) { 3853 /* In CA */ 3854 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3855 if (calc > 0xffff) 3856 calc = 0xffff; 3857 logged |= 2; 3858 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3859 if (rack->r_ctl.rack_per_upper_bound_ca && 3860 (rack->rc_dragged_bottom == 0) && 3861 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3862 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3863 } 3864 if (rack->rc_gp_saw_ss && 3865 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3866 rack->r_ctl.rack_per_of_gp_ss)) { 3867 /* In SS */ 3868 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3869 if (calc > 0xffff) 3870 calc = 0xffff; 3871 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3872 if (rack->r_ctl.rack_per_upper_bound_ss && 3873 (rack->rc_dragged_bottom == 0) && 3874 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3875 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3876 logged |= 4; 3877 } 3878 if (logged && 3879 (rack->rc_gp_incr == 0)){ 3880 /* Go into increment mode */ 3881 rack->rc_gp_incr = 1; 3882 rack->rc_gp_timely_inc_cnt = 0; 3883 } 3884 if (rack->rc_gp_incr && 3885 logged && 3886 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3887 rack->rc_gp_timely_inc_cnt++; 3888 } 3889 rack_log_timely(rack, logged, plus, 0, 0, 3890 __LINE__, 1); 3891 } 3892 3893 static uint32_t 3894 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3895 { 3896 /*- 3897 * norm_grad = rtt_diff / minrtt; 3898 * new_per = curper * (1 - B * norm_grad) 3899 * 3900 * B = rack_gp_decrease_per (default 80%) 3901 * rtt_dif = input var current rtt-diff 3902 * curper = input var current percentage 3903 * minrtt = from rack filter 3904 * 3905 * In order to do the floating point calculations above we 3906 * do an integer conversion. The code looks confusing so let me 3907 * translate it into something that use more variables and 3908 * is clearer for us humans :) 3909 * 3910 * uint64_t norm_grad, inverse, reduce_by, final_result; 3911 * uint32_t perf; 3912 * 3913 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 3914 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 3915 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 3916 * inverse /= 1000000; 3917 * reduce_by = (1000000 - inverse); 3918 * final_result = (cur_per * reduce_by) / 1000000; 3919 * perf = (uint32_t)final_result; 3920 */ 3921 uint64_t perf; 3922 3923 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3924 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3925 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3926 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3927 (uint64_t)1000000)) / 3928 (uint64_t)1000000); 3929 if (perf > curper) { 3930 /* TSNH */ 3931 perf = curper - 1; 3932 } 3933 return ((uint32_t)perf); 3934 } 3935 3936 static uint32_t 3937 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3938 { 3939 /* 3940 * highrttthresh 3941 * result = curper * (1 - (B * ( 1 - ------ )) 3942 * gp_srtt 3943 * 3944 * B = rack_gp_decrease_per (default .8 i.e. 80) 3945 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3946 */ 3947 uint64_t perf; 3948 uint32_t highrttthresh; 3949 3950 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3951 3952 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3953 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3954 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3955 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3956 if (tcp_bblogging_on(rack->rc_tp)) { 3957 uint64_t log1; 3958 3959 log1 = rtt; 3960 log1 <<= 32; 3961 log1 |= highrttthresh; 3962 rack_log_timely(rack, 3963 rack_gp_decrease_per, 3964 (uint64_t)curper, 3965 log1, 3966 perf, 3967 __LINE__, 3968 15); 3969 } 3970 return (perf); 3971 } 3972 3973 static void 3974 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3975 { 3976 uint64_t logvar, logvar2, logvar3; 3977 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3978 3979 if (rack->rc_gp_incr) { 3980 /* Turn off increment counting */ 3981 rack->rc_gp_incr = 0; 3982 rack->rc_gp_timely_inc_cnt = 0; 3983 } 3984 ss_red = ca_red = rec_red = 0; 3985 logged = 0; 3986 /* Calculate the reduction value */ 3987 if (rtt_diff < 0) { 3988 rtt_diff *= -1; 3989 } 3990 /* Must be at least 1% reduction */ 3991 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3992 /* We have been in recovery ding it too */ 3993 if (timely_says == 2) { 3994 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3995 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3996 if (alt < new_per) 3997 val = alt; 3998 else 3999 val = new_per; 4000 } else 4001 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4002 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4003 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4004 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4005 } else { 4006 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4007 rec_red = 0; 4008 } 4009 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4010 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4011 logged |= 1; 4012 } 4013 if (rack->rc_gp_saw_ss) { 4014 /* Sent in SS */ 4015 if (timely_says == 2) { 4016 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4017 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4018 if (alt < new_per) 4019 val = alt; 4020 else 4021 val = new_per; 4022 } else 4023 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4024 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4025 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4026 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4027 } else { 4028 ss_red = new_per; 4029 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4030 logvar = new_per; 4031 logvar <<= 32; 4032 logvar |= alt; 4033 logvar2 = (uint32_t)rtt; 4034 logvar2 <<= 32; 4035 logvar2 |= (uint32_t)rtt_diff; 4036 logvar3 = rack_gp_rtt_maxmul; 4037 logvar3 <<= 32; 4038 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4039 rack_log_timely(rack, timely_says, 4040 logvar2, logvar3, 4041 logvar, __LINE__, 10); 4042 } 4043 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4044 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4045 logged |= 4; 4046 } else if (rack->rc_gp_saw_ca) { 4047 /* Sent in CA */ 4048 if (timely_says == 2) { 4049 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4050 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4051 if (alt < new_per) 4052 val = alt; 4053 else 4054 val = new_per; 4055 } else 4056 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4057 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4058 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4059 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4060 } else { 4061 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4062 ca_red = 0; 4063 logvar = new_per; 4064 logvar <<= 32; 4065 logvar |= alt; 4066 logvar2 = (uint32_t)rtt; 4067 logvar2 <<= 32; 4068 logvar2 |= (uint32_t)rtt_diff; 4069 logvar3 = rack_gp_rtt_maxmul; 4070 logvar3 <<= 32; 4071 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4072 rack_log_timely(rack, timely_says, 4073 logvar2, logvar3, 4074 logvar, __LINE__, 10); 4075 } 4076 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4077 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4078 logged |= 2; 4079 } 4080 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4081 rack->rc_gp_timely_dec_cnt++; 4082 if (rack_timely_dec_clear && 4083 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4084 rack->rc_gp_timely_dec_cnt = 0; 4085 } 4086 logvar = ss_red; 4087 logvar <<= 32; 4088 logvar |= ca_red; 4089 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4090 __LINE__, 2); 4091 } 4092 4093 static void 4094 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4095 uint32_t rtt, uint32_t line, uint8_t reas) 4096 { 4097 if (tcp_bblogging_on(rack->rc_tp)) { 4098 union tcp_log_stackspecific log; 4099 struct timeval tv; 4100 4101 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4102 log.u_bbr.flex1 = line; 4103 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4104 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4105 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4106 log.u_bbr.flex5 = rtt; 4107 log.u_bbr.flex6 = rack->rc_highly_buffered; 4108 log.u_bbr.flex6 <<= 1; 4109 log.u_bbr.flex6 |= rack->forced_ack; 4110 log.u_bbr.flex6 <<= 1; 4111 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4112 log.u_bbr.flex6 <<= 1; 4113 log.u_bbr.flex6 |= rack->in_probe_rtt; 4114 log.u_bbr.flex6 <<= 1; 4115 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4116 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4117 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4118 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4119 log.u_bbr.flex8 = reas; 4120 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4121 log.u_bbr.delRate = rack_get_bw(rack); 4122 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4123 log.u_bbr.cur_del_rate <<= 32; 4124 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4125 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4126 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4127 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4128 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4129 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4130 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4131 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4132 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4133 log.u_bbr.rttProp = us_cts; 4134 log.u_bbr.rttProp <<= 32; 4135 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4136 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4137 &rack->rc_inp->inp_socket->so_rcv, 4138 &rack->rc_inp->inp_socket->so_snd, 4139 BBR_LOG_RTT_SHRINKS, 0, 4140 0, &log, false, &rack->r_ctl.act_rcv_time); 4141 } 4142 } 4143 4144 static void 4145 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4146 { 4147 uint64_t bwdp; 4148 4149 bwdp = rack_get_bw(rack); 4150 bwdp *= (uint64_t)rtt; 4151 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4152 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4153 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4154 /* 4155 * A window protocol must be able to have 4 packets 4156 * outstanding as the floor in order to function 4157 * (especially considering delayed ack :D). 4158 */ 4159 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4160 } 4161 } 4162 4163 static void 4164 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4165 { 4166 /** 4167 * ProbeRTT is a bit different in rack_pacing than in 4168 * BBR. It is like BBR in that it uses the lowering of 4169 * the RTT as a signal that we saw something new and 4170 * counts from there for how long between. But it is 4171 * different in that its quite simple. It does not 4172 * play with the cwnd and wait until we get down 4173 * to N segments outstanding and hold that for 4174 * 200ms. Instead it just sets the pacing reduction 4175 * rate to a set percentage (70 by default) and hold 4176 * that for a number of recent GP Srtt's. 4177 */ 4178 uint32_t segsiz; 4179 4180 if (rack->rc_gp_dyn_mul == 0) 4181 return; 4182 4183 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4184 /* We are idle */ 4185 return; 4186 } 4187 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4188 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4189 /* 4190 * Stop the goodput now, the idea here is 4191 * that future measurements with in_probe_rtt 4192 * won't register if they are not greater so 4193 * we want to get what info (if any) is available 4194 * now. 4195 */ 4196 rack_do_goodput_measurement(rack->rc_tp, rack, 4197 rack->rc_tp->snd_una, __LINE__, 4198 RACK_QUALITY_PROBERTT); 4199 } 4200 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4201 rack->r_ctl.rc_time_probertt_entered = us_cts; 4202 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4203 rack->r_ctl.rc_pace_min_segs); 4204 rack->in_probe_rtt = 1; 4205 rack->measure_saw_probe_rtt = 1; 4206 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4207 rack->r_ctl.rc_time_probertt_starts = 0; 4208 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4209 if (rack_probertt_use_min_rtt_entry) 4210 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4211 else 4212 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4213 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4214 __LINE__, RACK_RTTS_ENTERPROBE); 4215 } 4216 4217 static void 4218 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4219 { 4220 struct rack_sendmap *rsm; 4221 uint32_t segsiz; 4222 4223 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4224 rack->r_ctl.rc_pace_min_segs); 4225 rack->in_probe_rtt = 0; 4226 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4227 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4228 /* 4229 * Stop the goodput now, the idea here is 4230 * that future measurements with in_probe_rtt 4231 * won't register if they are not greater so 4232 * we want to get what info (if any) is available 4233 * now. 4234 */ 4235 rack_do_goodput_measurement(rack->rc_tp, rack, 4236 rack->rc_tp->snd_una, __LINE__, 4237 RACK_QUALITY_PROBERTT); 4238 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4239 /* 4240 * We don't have enough data to make a measurement. 4241 * So lets just stop and start here after exiting 4242 * probe-rtt. We probably are not interested in 4243 * the results anyway. 4244 */ 4245 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4246 } 4247 /* 4248 * Measurements through the current snd_max are going 4249 * to be limited by the slower pacing rate. 4250 * 4251 * We need to mark these as app-limited so we 4252 * don't collapse the b/w. 4253 */ 4254 rsm = tqhash_max(rack->r_ctl.tqh); 4255 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4256 if (rack->r_ctl.rc_app_limited_cnt == 0) 4257 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4258 else { 4259 /* 4260 * Go out to the end app limited and mark 4261 * this new one as next and move the end_appl up 4262 * to this guy. 4263 */ 4264 if (rack->r_ctl.rc_end_appl) 4265 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4266 rack->r_ctl.rc_end_appl = rsm; 4267 } 4268 rsm->r_flags |= RACK_APP_LIMITED; 4269 rack->r_ctl.rc_app_limited_cnt++; 4270 } 4271 /* 4272 * Now, we need to examine our pacing rate multipliers. 4273 * If its under 100%, we need to kick it back up to 4274 * 100%. We also don't let it be over our "max" above 4275 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4276 * Note setting clamp_atexit_prtt to 0 has the effect 4277 * of setting CA/SS to 100% always at exit (which is 4278 * the default behavior). 4279 */ 4280 if (rack_probertt_clear_is) { 4281 rack->rc_gp_incr = 0; 4282 rack->rc_gp_bwred = 0; 4283 rack->rc_gp_timely_inc_cnt = 0; 4284 rack->rc_gp_timely_dec_cnt = 0; 4285 } 4286 /* Do we do any clamping at exit? */ 4287 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4288 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4289 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4290 } 4291 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4292 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4293 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4294 } 4295 /* 4296 * Lets set rtt_diff to 0, so that we will get a "boost" 4297 * after exiting. 4298 */ 4299 rack->r_ctl.rc_rtt_diff = 0; 4300 4301 /* Clear all flags so we start fresh */ 4302 rack->rc_tp->t_bytes_acked = 0; 4303 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4304 /* 4305 * If configured to, set the cwnd and ssthresh to 4306 * our targets. 4307 */ 4308 if (rack_probe_rtt_sets_cwnd) { 4309 uint64_t ebdp; 4310 uint32_t setto; 4311 4312 /* Set ssthresh so we get into CA once we hit our target */ 4313 if (rack_probertt_use_min_rtt_exit == 1) { 4314 /* Set to min rtt */ 4315 rack_set_prtt_target(rack, segsiz, 4316 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4317 } else if (rack_probertt_use_min_rtt_exit == 2) { 4318 /* Set to current gp rtt */ 4319 rack_set_prtt_target(rack, segsiz, 4320 rack->r_ctl.rc_gp_srtt); 4321 } else if (rack_probertt_use_min_rtt_exit == 3) { 4322 /* Set to entry gp rtt */ 4323 rack_set_prtt_target(rack, segsiz, 4324 rack->r_ctl.rc_entry_gp_rtt); 4325 } else { 4326 uint64_t sum; 4327 uint32_t setval; 4328 4329 sum = rack->r_ctl.rc_entry_gp_rtt; 4330 sum *= 10; 4331 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4332 if (sum >= 20) { 4333 /* 4334 * A highly buffered path needs 4335 * cwnd space for timely to work. 4336 * Lets set things up as if 4337 * we are heading back here again. 4338 */ 4339 setval = rack->r_ctl.rc_entry_gp_rtt; 4340 } else if (sum >= 15) { 4341 /* 4342 * Lets take the smaller of the 4343 * two since we are just somewhat 4344 * buffered. 4345 */ 4346 setval = rack->r_ctl.rc_gp_srtt; 4347 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4348 setval = rack->r_ctl.rc_entry_gp_rtt; 4349 } else { 4350 /* 4351 * Here we are not highly buffered 4352 * and should pick the min we can to 4353 * keep from causing loss. 4354 */ 4355 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4356 } 4357 rack_set_prtt_target(rack, segsiz, 4358 setval); 4359 } 4360 if (rack_probe_rtt_sets_cwnd > 1) { 4361 /* There is a percentage here to boost */ 4362 ebdp = rack->r_ctl.rc_target_probertt_flight; 4363 ebdp *= rack_probe_rtt_sets_cwnd; 4364 ebdp /= 100; 4365 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4366 } else 4367 setto = rack->r_ctl.rc_target_probertt_flight; 4368 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4369 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4370 /* Enforce a min */ 4371 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4372 } 4373 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4374 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4375 } 4376 rack_log_rtt_shrinks(rack, us_cts, 4377 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4378 __LINE__, RACK_RTTS_EXITPROBE); 4379 /* Clear times last so log has all the info */ 4380 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4381 rack->r_ctl.rc_time_probertt_entered = us_cts; 4382 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4383 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4384 } 4385 4386 static void 4387 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4388 { 4389 /* Check in on probe-rtt */ 4390 if (rack->rc_gp_filled == 0) { 4391 /* We do not do p-rtt unless we have gp measurements */ 4392 return; 4393 } 4394 if (rack->in_probe_rtt) { 4395 uint64_t no_overflow; 4396 uint32_t endtime, must_stay; 4397 4398 if (rack->r_ctl.rc_went_idle_time && 4399 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4400 /* 4401 * We went idle during prtt, just exit now. 4402 */ 4403 rack_exit_probertt(rack, us_cts); 4404 } else if (rack_probe_rtt_safety_val && 4405 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4406 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4407 /* 4408 * Probe RTT safety value triggered! 4409 */ 4410 rack_log_rtt_shrinks(rack, us_cts, 4411 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4412 __LINE__, RACK_RTTS_SAFETY); 4413 rack_exit_probertt(rack, us_cts); 4414 } 4415 /* Calculate the max we will wait */ 4416 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4417 if (rack->rc_highly_buffered) 4418 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4419 /* Calculate the min we must wait */ 4420 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4421 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4422 TSTMP_LT(us_cts, endtime)) { 4423 uint32_t calc; 4424 /* Do we lower more? */ 4425 no_exit: 4426 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4427 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4428 else 4429 calc = 0; 4430 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4431 if (calc) { 4432 /* Maybe */ 4433 calc *= rack_per_of_gp_probertt_reduce; 4434 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4435 /* Limit it too */ 4436 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4437 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4438 } 4439 /* We must reach target or the time set */ 4440 return; 4441 } 4442 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4443 if ((TSTMP_LT(us_cts, must_stay) && 4444 rack->rc_highly_buffered) || 4445 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4446 rack->r_ctl.rc_target_probertt_flight)) { 4447 /* We are not past the must_stay time */ 4448 goto no_exit; 4449 } 4450 rack_log_rtt_shrinks(rack, us_cts, 4451 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4452 __LINE__, RACK_RTTS_REACHTARGET); 4453 rack->r_ctl.rc_time_probertt_starts = us_cts; 4454 if (rack->r_ctl.rc_time_probertt_starts == 0) 4455 rack->r_ctl.rc_time_probertt_starts = 1; 4456 /* Restore back to our rate we want to pace at in prtt */ 4457 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4458 } 4459 /* 4460 * Setup our end time, some number of gp_srtts plus 200ms. 4461 */ 4462 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4463 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4464 if (rack_probertt_gpsrtt_cnt_div) 4465 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4466 else 4467 endtime = 0; 4468 endtime += rack_min_probertt_hold; 4469 endtime += rack->r_ctl.rc_time_probertt_starts; 4470 if (TSTMP_GEQ(us_cts, endtime)) { 4471 /* yes, exit probertt */ 4472 rack_exit_probertt(rack, us_cts); 4473 } 4474 4475 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 4476 /* Go into probertt, its been too long since we went lower */ 4477 rack_enter_probertt(rack, us_cts); 4478 } 4479 } 4480 4481 static void 4482 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4483 uint32_t rtt, int32_t rtt_diff) 4484 { 4485 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4486 uint32_t losses; 4487 4488 if ((rack->rc_gp_dyn_mul == 0) || 4489 (rack->use_fixed_rate) || 4490 (rack->in_probe_rtt) || 4491 (rack->rc_always_pace == 0)) { 4492 /* No dynamic GP multiplier in play */ 4493 return; 4494 } 4495 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4496 cur_bw = rack_get_bw(rack); 4497 /* Calculate our up and down range */ 4498 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4499 up_bnd /= 100; 4500 up_bnd += rack->r_ctl.last_gp_comp_bw; 4501 4502 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4503 subfr /= 100; 4504 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4505 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4506 /* 4507 * This is the case where our RTT is above 4508 * the max target and we have been configured 4509 * to just do timely no bonus up stuff in that case. 4510 * 4511 * There are two configurations, set to 1, and we 4512 * just do timely if we are over our max. If its 4513 * set above 1 then we slam the multipliers down 4514 * to 100 and then decrement per timely. 4515 */ 4516 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4517 __LINE__, 3); 4518 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4519 rack_validate_multipliers_at_or_below_100(rack); 4520 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4521 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4522 /* 4523 * We are decreasing this is a bit complicated this 4524 * means we are loosing ground. This could be 4525 * because another flow entered and we are competing 4526 * for b/w with it. This will push the RTT up which 4527 * makes timely unusable unless we want to get shoved 4528 * into a corner and just be backed off (the age 4529 * old problem with delay based CC). 4530 * 4531 * On the other hand if it was a route change we 4532 * would like to stay somewhat contained and not 4533 * blow out the buffers. 4534 */ 4535 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4536 __LINE__, 3); 4537 rack->r_ctl.last_gp_comp_bw = cur_bw; 4538 if (rack->rc_gp_bwred == 0) { 4539 /* Go into reduction counting */ 4540 rack->rc_gp_bwred = 1; 4541 rack->rc_gp_timely_dec_cnt = 0; 4542 } 4543 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4544 /* 4545 * Push another time with a faster pacing 4546 * to try to gain back (we include override to 4547 * get a full raise factor). 4548 */ 4549 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4550 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4551 (timely_says == 0) || 4552 (rack_down_raise_thresh == 0)) { 4553 /* 4554 * Do an override up in b/w if we were 4555 * below the threshold or if the threshold 4556 * is zero we always do the raise. 4557 */ 4558 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4559 } else { 4560 /* Log it stays the same */ 4561 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4562 __LINE__, 11); 4563 } 4564 rack->rc_gp_timely_dec_cnt++; 4565 /* We are not incrementing really no-count */ 4566 rack->rc_gp_incr = 0; 4567 rack->rc_gp_timely_inc_cnt = 0; 4568 } else { 4569 /* 4570 * Lets just use the RTT 4571 * information and give up 4572 * pushing. 4573 */ 4574 goto use_timely; 4575 } 4576 } else if ((timely_says != 2) && 4577 !losses && 4578 (last_bw_est > up_bnd)) { 4579 /* 4580 * We are increasing b/w lets keep going, updating 4581 * our b/w and ignoring any timely input, unless 4582 * of course we are at our max raise (if there is one). 4583 */ 4584 4585 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4586 __LINE__, 3); 4587 rack->r_ctl.last_gp_comp_bw = cur_bw; 4588 if (rack->rc_gp_saw_ss && 4589 rack->r_ctl.rack_per_upper_bound_ss && 4590 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4591 /* 4592 * In cases where we can't go higher 4593 * we should just use timely. 4594 */ 4595 goto use_timely; 4596 } 4597 if (rack->rc_gp_saw_ca && 4598 rack->r_ctl.rack_per_upper_bound_ca && 4599 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4600 /* 4601 * In cases where we can't go higher 4602 * we should just use timely. 4603 */ 4604 goto use_timely; 4605 } 4606 rack->rc_gp_bwred = 0; 4607 rack->rc_gp_timely_dec_cnt = 0; 4608 /* You get a set number of pushes if timely is trying to reduce */ 4609 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4610 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4611 } else { 4612 /* Log it stays the same */ 4613 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4614 __LINE__, 12); 4615 } 4616 return; 4617 } else { 4618 /* 4619 * We are staying between the lower and upper range bounds 4620 * so use timely to decide. 4621 */ 4622 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4623 __LINE__, 3); 4624 use_timely: 4625 if (timely_says) { 4626 rack->rc_gp_incr = 0; 4627 rack->rc_gp_timely_inc_cnt = 0; 4628 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4629 !losses && 4630 (last_bw_est < low_bnd)) { 4631 /* We are loosing ground */ 4632 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4633 rack->rc_gp_timely_dec_cnt++; 4634 /* We are not incrementing really no-count */ 4635 rack->rc_gp_incr = 0; 4636 rack->rc_gp_timely_inc_cnt = 0; 4637 } else 4638 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4639 } else { 4640 rack->rc_gp_bwred = 0; 4641 rack->rc_gp_timely_dec_cnt = 0; 4642 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4643 } 4644 } 4645 } 4646 4647 static int32_t 4648 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4649 { 4650 int32_t timely_says; 4651 uint64_t log_mult, log_rtt_a_diff; 4652 4653 log_rtt_a_diff = rtt; 4654 log_rtt_a_diff <<= 32; 4655 log_rtt_a_diff |= (uint32_t)rtt_diff; 4656 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4657 rack_gp_rtt_maxmul)) { 4658 /* Reduce the b/w multiplier */ 4659 timely_says = 2; 4660 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4661 log_mult <<= 32; 4662 log_mult |= prev_rtt; 4663 rack_log_timely(rack, timely_says, log_mult, 4664 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4665 log_rtt_a_diff, __LINE__, 4); 4666 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4667 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4668 max(rack_gp_rtt_mindiv , 1)))) { 4669 /* Increase the b/w multiplier */ 4670 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4671 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4672 max(rack_gp_rtt_mindiv , 1)); 4673 log_mult <<= 32; 4674 log_mult |= prev_rtt; 4675 timely_says = 0; 4676 rack_log_timely(rack, timely_says, log_mult , 4677 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4678 log_rtt_a_diff, __LINE__, 5); 4679 } else { 4680 /* 4681 * Use a gradient to find it the timely gradient 4682 * is: 4683 * grad = rc_rtt_diff / min_rtt; 4684 * 4685 * anything below or equal to 0 will be 4686 * a increase indication. Anything above 4687 * zero is a decrease. Note we take care 4688 * of the actual gradient calculation 4689 * in the reduction (its not needed for 4690 * increase). 4691 */ 4692 log_mult = prev_rtt; 4693 if (rtt_diff <= 0) { 4694 /* 4695 * Rttdiff is less than zero, increase the 4696 * b/w multiplier (its 0 or negative) 4697 */ 4698 timely_says = 0; 4699 rack_log_timely(rack, timely_says, log_mult, 4700 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4701 } else { 4702 /* Reduce the b/w multiplier */ 4703 timely_says = 1; 4704 rack_log_timely(rack, timely_says, log_mult, 4705 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4706 } 4707 } 4708 return (timely_says); 4709 } 4710 4711 static __inline int 4712 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4713 { 4714 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4715 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4716 /** 4717 * This covers the case that the 4718 * resent is completely inside 4719 * the gp range or up to it. 4720 * |----------------| 4721 * |-----| <or> 4722 * |----| 4723 * <or> |---| 4724 */ 4725 return (1); 4726 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4727 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4728 /** 4729 * This covers the case of 4730 * |--------------| 4731 * |-------->| 4732 */ 4733 return (1); 4734 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4735 SEQ_LT(rsm->r_start, tp->gput_ack) && 4736 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4737 4738 /** 4739 * This covers the case of 4740 * |--------------| 4741 * |-------->| 4742 */ 4743 return (1); 4744 } 4745 return (0); 4746 } 4747 4748 static __inline void 4749 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4750 { 4751 4752 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4753 return; 4754 /* 4755 * We have a Goodput measurement in progress. Mark 4756 * the send if its within the window. If its not 4757 * in the window make sure it does not have the mark. 4758 */ 4759 if (rack_in_gp_window(tp, rsm)) 4760 rsm->r_flags |= RACK_IN_GP_WIN; 4761 else 4762 rsm->r_flags &= ~RACK_IN_GP_WIN; 4763 } 4764 4765 static __inline void 4766 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4767 { 4768 /* A GP measurement is ending, clear all marks on the send map*/ 4769 struct rack_sendmap *rsm = NULL; 4770 4771 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4772 if (rsm == NULL) { 4773 rsm = tqhash_min(rack->r_ctl.tqh); 4774 } 4775 /* Nothing left? */ 4776 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4777 rsm->r_flags &= ~RACK_IN_GP_WIN; 4778 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4779 } 4780 } 4781 4782 4783 static __inline void 4784 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4785 { 4786 struct rack_sendmap *rsm = NULL; 4787 4788 if (tp->snd_una == tp->snd_max) { 4789 /* Nothing outstanding yet, nothing to do here */ 4790 return; 4791 } 4792 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4793 /* 4794 * We are measuring ahead of some outstanding 4795 * data. We need to walk through up until we get 4796 * to gp_seq marking so that no rsm is set incorrectly 4797 * with RACK_IN_GP_WIN. 4798 */ 4799 rsm = tqhash_min(rack->r_ctl.tqh); 4800 while (rsm != NULL) { 4801 rack_mark_in_gp_win(tp, rsm); 4802 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4803 break; 4804 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4805 } 4806 } 4807 if (rsm == NULL) { 4808 /* 4809 * Need to find the GP seq, if rsm is 4810 * set we stopped as we hit it. 4811 */ 4812 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4813 if (rsm == NULL) 4814 return; 4815 rack_mark_in_gp_win(tp, rsm); 4816 } 4817 /* 4818 * Now we may need to mark already sent rsm, ahead of 4819 * gput_seq in the window since they may have been sent 4820 * *before* we started our measurment. The rsm, if non-null 4821 * has been marked (note if rsm would have been NULL we would have 4822 * returned in the previous block). So we go to the next, and continue 4823 * until we run out of entries or we exceed the gp_ack value. 4824 */ 4825 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4826 while (rsm) { 4827 rack_mark_in_gp_win(tp, rsm); 4828 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4829 break; 4830 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4831 } 4832 } 4833 4834 static void 4835 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4836 tcp_seq th_ack, int line, uint8_t quality) 4837 { 4838 uint64_t tim, bytes_ps, stim, utim; 4839 uint32_t segsiz, bytes, reqbytes, us_cts; 4840 int32_t gput, new_rtt_diff, timely_says; 4841 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4842 int did_add = 0; 4843 4844 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4845 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4846 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4847 tim = us_cts - tp->gput_ts; 4848 else 4849 tim = 0; 4850 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4851 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4852 else 4853 stim = 0; 4854 /* 4855 * Use the larger of the send time or ack time. This prevents us 4856 * from being influenced by ack artifacts to come up with too 4857 * high of measurement. Note that since we are spanning over many more 4858 * bytes in most of our measurements hopefully that is less likely to 4859 * occur. 4860 */ 4861 if (tim > stim) 4862 utim = max(tim, 1); 4863 else 4864 utim = max(stim, 1); 4865 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4866 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 4867 if ((tim == 0) && (stim == 0)) { 4868 /* 4869 * Invalid measurement time, maybe 4870 * all on one ack/one send? 4871 */ 4872 bytes = 0; 4873 bytes_ps = 0; 4874 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4875 0, 0, 0, 10, __LINE__, NULL, quality); 4876 goto skip_measurement; 4877 } 4878 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4879 /* We never made a us_rtt measurement? */ 4880 bytes = 0; 4881 bytes_ps = 0; 4882 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4883 0, 0, 0, 10, __LINE__, NULL, quality); 4884 goto skip_measurement; 4885 } 4886 /* 4887 * Calculate the maximum possible b/w this connection 4888 * could have. We base our calculation on the lowest 4889 * rtt we have seen during the measurement and the 4890 * largest rwnd the client has given us in that time. This 4891 * forms a BDP that is the maximum that we could ever 4892 * get to the client. Anything larger is not valid. 4893 * 4894 * I originally had code here that rejected measurements 4895 * where the time was less than 1/2 the latest us_rtt. 4896 * But after thinking on that I realized its wrong since 4897 * say you had a 150Mbps or even 1Gbps link, and you 4898 * were a long way away.. example I am in Europe (100ms rtt) 4899 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4900 * bytes my time would be 1.2ms, and yet my rtt would say 4901 * the measurement was invalid the time was < 50ms. The 4902 * same thing is true for 150Mb (8ms of time). 4903 * 4904 * A better way I realized is to look at what the maximum 4905 * the connection could possibly do. This is gated on 4906 * the lowest RTT we have seen and the highest rwnd. 4907 * We should in theory never exceed that, if we are 4908 * then something on the path is storing up packets 4909 * and then feeding them all at once to our endpoint 4910 * messing up our measurement. 4911 */ 4912 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4913 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4914 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4915 if (SEQ_LT(th_ack, tp->gput_seq)) { 4916 /* No measurement can be made */ 4917 bytes = 0; 4918 bytes_ps = 0; 4919 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4920 0, 0, 0, 10, __LINE__, NULL, quality); 4921 goto skip_measurement; 4922 } else 4923 bytes = (th_ack - tp->gput_seq); 4924 bytes_ps = (uint64_t)bytes; 4925 /* 4926 * Don't measure a b/w for pacing unless we have gotten at least 4927 * an initial windows worth of data in this measurement interval. 4928 * 4929 * Small numbers of bytes get badly influenced by delayed ack and 4930 * other artifacts. Note we take the initial window or our 4931 * defined minimum GP (defaulting to 10 which hopefully is the 4932 * IW). 4933 */ 4934 if (rack->rc_gp_filled == 0) { 4935 /* 4936 * The initial estimate is special. We 4937 * have blasted out an IW worth of packets 4938 * without a real valid ack ts results. We 4939 * then setup the app_limited_needs_set flag, 4940 * this should get the first ack in (probably 2 4941 * MSS worth) to be recorded as the timestamp. 4942 * We thus allow a smaller number of bytes i.e. 4943 * IW - 2MSS. 4944 */ 4945 reqbytes -= (2 * segsiz); 4946 /* Also lets fill previous for our first measurement to be neutral */ 4947 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4948 } 4949 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4950 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4951 rack->r_ctl.rc_app_limited_cnt, 4952 0, 0, 10, __LINE__, NULL, quality); 4953 goto skip_measurement; 4954 } 4955 /* 4956 * We now need to calculate the Timely like status so 4957 * we can update (possibly) the b/w multipliers. 4958 */ 4959 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4960 if (rack->rc_gp_filled == 0) { 4961 /* No previous reading */ 4962 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4963 } else { 4964 if (rack->measure_saw_probe_rtt == 0) { 4965 /* 4966 * We don't want a probertt to be counted 4967 * since it will be negative incorrectly. We 4968 * expect to be reducing the RTT when we 4969 * pace at a slower rate. 4970 */ 4971 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4972 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4973 } 4974 } 4975 timely_says = rack_make_timely_judgement(rack, 4976 rack->r_ctl.rc_gp_srtt, 4977 rack->r_ctl.rc_rtt_diff, 4978 rack->r_ctl.rc_prev_gp_srtt 4979 ); 4980 bytes_ps *= HPTS_USEC_IN_SEC; 4981 bytes_ps /= utim; 4982 if (bytes_ps > rack->r_ctl.last_max_bw) { 4983 /* 4984 * Something is on path playing 4985 * since this b/w is not possible based 4986 * on our BDP (highest rwnd and lowest rtt 4987 * we saw in the measurement window). 4988 * 4989 * Another option here would be to 4990 * instead skip the measurement. 4991 */ 4992 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4993 bytes_ps, rack->r_ctl.last_max_bw, 0, 4994 11, __LINE__, NULL, quality); 4995 bytes_ps = rack->r_ctl.last_max_bw; 4996 } 4997 /* We store gp for b/w in bytes per second */ 4998 if (rack->rc_gp_filled == 0) { 4999 /* Initial measurement */ 5000 if (bytes_ps) { 5001 rack->r_ctl.gp_bw = bytes_ps; 5002 rack->rc_gp_filled = 1; 5003 rack->r_ctl.num_measurements = 1; 5004 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5005 } else { 5006 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5007 rack->r_ctl.rc_app_limited_cnt, 5008 0, 0, 10, __LINE__, NULL, quality); 5009 } 5010 if (tcp_in_hpts(rack->rc_tp) && 5011 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5012 /* 5013 * Ok we can't trust the pacer in this case 5014 * where we transition from un-paced to paced. 5015 * Or for that matter when the burst mitigation 5016 * was making a wild guess and got it wrong. 5017 * Stop the pacer and clear up all the aggregate 5018 * delays etc. 5019 */ 5020 tcp_hpts_remove(rack->rc_tp); 5021 rack->r_ctl.rc_hpts_flags = 0; 5022 rack->r_ctl.rc_last_output_to = 0; 5023 } 5024 did_add = 2; 5025 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5026 /* Still a small number run an average */ 5027 rack->r_ctl.gp_bw += bytes_ps; 5028 addpart = rack->r_ctl.num_measurements; 5029 rack->r_ctl.num_measurements++; 5030 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5031 /* We have collected enough to move forward */ 5032 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5033 } 5034 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5035 did_add = 3; 5036 } else { 5037 /* 5038 * We want to take 1/wma of the goodput and add in to 7/8th 5039 * of the old value weighted by the srtt. So if your measurement 5040 * period is say 2 SRTT's long you would get 1/4 as the 5041 * value, if it was like 1/2 SRTT then you would get 1/16th. 5042 * 5043 * But we must be careful not to take too much i.e. if the 5044 * srtt is say 20ms and the measurement is taken over 5045 * 400ms our weight would be 400/20 i.e. 20. On the 5046 * other hand if we get a measurement over 1ms with a 5047 * 10ms rtt we only want to take a much smaller portion. 5048 */ 5049 if (rack->r_ctl.num_measurements < 0xff) { 5050 rack->r_ctl.num_measurements++; 5051 } 5052 srtt = (uint64_t)tp->t_srtt; 5053 if (srtt == 0) { 5054 /* 5055 * Strange why did t_srtt go back to zero? 5056 */ 5057 if (rack->r_ctl.rc_rack_min_rtt) 5058 srtt = rack->r_ctl.rc_rack_min_rtt; 5059 else 5060 srtt = HPTS_USEC_IN_MSEC; 5061 } 5062 /* 5063 * XXXrrs: Note for reviewers, in playing with 5064 * dynamic pacing I discovered this GP calculation 5065 * as done originally leads to some undesired results. 5066 * Basically you can get longer measurements contributing 5067 * too much to the WMA. Thus I changed it if you are doing 5068 * dynamic adjustments to only do the aportioned adjustment 5069 * if we have a very small (time wise) measurement. Longer 5070 * measurements just get there weight (defaulting to 1/8) 5071 * add to the WMA. We may want to think about changing 5072 * this to always do that for both sides i.e. dynamic 5073 * and non-dynamic... but considering lots of folks 5074 * were playing with this I did not want to change the 5075 * calculation per.se. without your thoughts.. Lawerence? 5076 * Peter?? 5077 */ 5078 if (rack->rc_gp_dyn_mul == 0) { 5079 subpart = rack->r_ctl.gp_bw * utim; 5080 subpart /= (srtt * 8); 5081 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5082 /* 5083 * The b/w update takes no more 5084 * away then 1/2 our running total 5085 * so factor it in. 5086 */ 5087 addpart = bytes_ps * utim; 5088 addpart /= (srtt * 8); 5089 } else { 5090 /* 5091 * Don't allow a single measurement 5092 * to account for more than 1/2 of the 5093 * WMA. This could happen on a retransmission 5094 * where utim becomes huge compared to 5095 * srtt (multiple retransmissions when using 5096 * the sending rate which factors in all the 5097 * transmissions from the first one). 5098 */ 5099 subpart = rack->r_ctl.gp_bw / 2; 5100 addpart = bytes_ps / 2; 5101 } 5102 resid_bw = rack->r_ctl.gp_bw - subpart; 5103 rack->r_ctl.gp_bw = resid_bw + addpart; 5104 did_add = 1; 5105 } else { 5106 if ((utim / srtt) <= 1) { 5107 /* 5108 * The b/w update was over a small period 5109 * of time. The idea here is to prevent a small 5110 * measurement time period from counting 5111 * too much. So we scale it based on the 5112 * time so it attributes less than 1/rack_wma_divisor 5113 * of its measurement. 5114 */ 5115 subpart = rack->r_ctl.gp_bw * utim; 5116 subpart /= (srtt * rack_wma_divisor); 5117 addpart = bytes_ps * utim; 5118 addpart /= (srtt * rack_wma_divisor); 5119 } else { 5120 /* 5121 * The scaled measurement was long 5122 * enough so lets just add in the 5123 * portion of the measurement i.e. 1/rack_wma_divisor 5124 */ 5125 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5126 addpart = bytes_ps / rack_wma_divisor; 5127 } 5128 if ((rack->measure_saw_probe_rtt == 0) || 5129 (bytes_ps > rack->r_ctl.gp_bw)) { 5130 /* 5131 * For probe-rtt we only add it in 5132 * if its larger, all others we just 5133 * add in. 5134 */ 5135 did_add = 1; 5136 resid_bw = rack->r_ctl.gp_bw - subpart; 5137 rack->r_ctl.gp_bw = resid_bw + addpart; 5138 } 5139 } 5140 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5141 } 5142 if ((rack->gp_ready == 0) && 5143 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5144 /* We have enough measurements now */ 5145 rack->gp_ready = 1; 5146 if (rack->dgp_on || 5147 rack->rack_hibeta) 5148 rack_set_cc_pacing(rack); 5149 if (rack->defer_options) 5150 rack_apply_deferred_options(rack); 5151 } 5152 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5153 rack_get_bw(rack), 22, did_add, NULL, quality); 5154 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5155 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 5156 rack_update_multiplier(rack, timely_says, bytes_ps, 5157 rack->r_ctl.rc_gp_srtt, 5158 rack->r_ctl.rc_rtt_diff); 5159 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5160 rack_get_bw(rack), 3, line, NULL, quality); 5161 rack_log_pacing_delay_calc(rack, 5162 bytes, /* flex2 */ 5163 tim, /* flex1 */ 5164 bytes_ps, /* bw_inuse */ 5165 rack->r_ctl.gp_bw, /* delRate */ 5166 rack_get_lt_bw(rack), /* rttProp */ 5167 20, line, NULL, 0); 5168 /* reset the gp srtt and setup the new prev */ 5169 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5170 /* Record the lost count for the next measurement */ 5171 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5172 skip_measurement: 5173 /* 5174 * We restart our diffs based on the gpsrtt in the 5175 * measurement window. 5176 */ 5177 rack->rc_gp_rtt_set = 0; 5178 rack->rc_gp_saw_rec = 0; 5179 rack->rc_gp_saw_ca = 0; 5180 rack->rc_gp_saw_ss = 0; 5181 rack->rc_dragged_bottom = 0; 5182 5183 if (quality == RACK_QUALITY_HIGH) { 5184 /* 5185 * Gput in the stats world is in kbps where bytes_ps is 5186 * bytes per second so we do ((x * 8)/ 1000). 5187 */ 5188 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5189 #ifdef STATS 5190 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5191 gput); 5192 /* 5193 * XXXLAS: This is a temporary hack, and should be 5194 * chained off VOI_TCP_GPUT when stats(9) grows an 5195 * API to deal with chained VOIs. 5196 */ 5197 if (tp->t_stats_gput_prev > 0) 5198 stats_voi_update_abs_s32(tp->t_stats, 5199 VOI_TCP_GPUT_ND, 5200 ((gput - tp->t_stats_gput_prev) * 100) / 5201 tp->t_stats_gput_prev); 5202 #endif 5203 tp->t_stats_gput_prev = gput; 5204 } 5205 tp->t_flags &= ~TF_GPUTINPROG; 5206 /* 5207 * Now are we app limited now and there is space from where we 5208 * were to where we want to go? 5209 * 5210 * We don't do the other case i.e. non-applimited here since 5211 * the next send will trigger us picking up the missing data. 5212 */ 5213 if (rack->r_ctl.rc_first_appl && 5214 TCPS_HAVEESTABLISHED(tp->t_state) && 5215 rack->r_ctl.rc_app_limited_cnt && 5216 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5217 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5218 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5219 /* 5220 * Yep there is enough outstanding to make a measurement here. 5221 */ 5222 struct rack_sendmap *rsm; 5223 5224 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5225 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5226 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5227 rack->app_limited_needs_set = 0; 5228 tp->gput_seq = th_ack; 5229 if (rack->in_probe_rtt) 5230 rack->measure_saw_probe_rtt = 1; 5231 else if ((rack->measure_saw_probe_rtt) && 5232 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5233 rack->measure_saw_probe_rtt = 0; 5234 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5235 /* There is a full window to gain info from */ 5236 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5237 } else { 5238 /* We can only measure up to the applimited point */ 5239 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5240 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5241 /* 5242 * We don't have enough to make a measurement. 5243 */ 5244 tp->t_flags &= ~TF_GPUTINPROG; 5245 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5246 0, 0, 0, 6, __LINE__, NULL, quality); 5247 return; 5248 } 5249 } 5250 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5251 /* 5252 * We will get no more data into the SB 5253 * this means we need to have the data available 5254 * before we start a measurement. 5255 */ 5256 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5257 /* Nope not enough data. */ 5258 return; 5259 } 5260 } 5261 tp->t_flags |= TF_GPUTINPROG; 5262 /* 5263 * Now we need to find the timestamp of the send at tp->gput_seq 5264 * for the send based measurement. 5265 */ 5266 rack->r_ctl.rc_gp_cumack_ts = 0; 5267 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5268 if (rsm) { 5269 /* Ok send-based limit is set */ 5270 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5271 /* 5272 * Move back to include the earlier part 5273 * so our ack time lines up right (this may 5274 * make an overlapping measurement but thats 5275 * ok). 5276 */ 5277 tp->gput_seq = rsm->r_start; 5278 } 5279 if (rsm->r_flags & RACK_ACKED) { 5280 struct rack_sendmap *nrsm; 5281 5282 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5283 tp->gput_seq = rsm->r_end; 5284 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5285 if (nrsm) 5286 rsm = nrsm; 5287 else { 5288 rack->app_limited_needs_set = 1; 5289 } 5290 } else 5291 rack->app_limited_needs_set = 1; 5292 /* We always go from the first send */ 5293 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5294 } else { 5295 /* 5296 * If we don't find the rsm due to some 5297 * send-limit set the current time, which 5298 * basically disables the send-limit. 5299 */ 5300 struct timeval tv; 5301 5302 microuptime(&tv); 5303 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5304 } 5305 rack_tend_gp_marks(tp, rack); 5306 rack_log_pacing_delay_calc(rack, 5307 tp->gput_seq, 5308 tp->gput_ack, 5309 (uint64_t)rsm, 5310 tp->gput_ts, 5311 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5312 9, 5313 __LINE__, rsm, quality); 5314 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5315 } else { 5316 /* 5317 * To make sure proper timestamp merging occurs, we need to clear 5318 * all GP marks if we don't start a measurement. 5319 */ 5320 rack_clear_gp_marks(tp, rack); 5321 } 5322 } 5323 5324 /* 5325 * CC wrapper hook functions 5326 */ 5327 static void 5328 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5329 uint16_t type, int32_t recovery) 5330 { 5331 uint32_t prior_cwnd, acked; 5332 struct tcp_log_buffer *lgb = NULL; 5333 uint8_t labc_to_use, quality; 5334 5335 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5336 tp->t_ccv.nsegs = nsegs; 5337 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5338 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5339 uint32_t max; 5340 5341 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5342 if (tp->t_ccv.bytes_this_ack > max) { 5343 tp->t_ccv.bytes_this_ack = max; 5344 } 5345 } 5346 #ifdef STATS 5347 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5348 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5349 #endif 5350 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5351 /* We will ack all, time 5352 * to end any lt_bw_up we 5353 * have running until something 5354 * new is sent. 5355 */ 5356 struct timeval tv; 5357 5358 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5359 rack->r_ctl.lt_seq = tp->snd_max; 5360 (void)tcp_get_usecs(&tv); 5361 rack->r_ctl.lt_bw_time += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 5362 rack->lt_bw_up = 0; 5363 } 5364 quality = RACK_QUALITY_NONE; 5365 if ((tp->t_flags & TF_GPUTINPROG) && 5366 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5367 /* Measure the Goodput */ 5368 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5369 } 5370 /* Which way our we limited, if not cwnd limited no advance in CA */ 5371 if (tp->snd_cwnd <= tp->snd_wnd) 5372 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5373 else 5374 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5375 if (tp->snd_cwnd > tp->snd_ssthresh) { 5376 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5377 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5378 /* For the setting of a window past use the actual scwnd we are using */ 5379 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5380 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5381 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5382 } 5383 } else { 5384 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5385 tp->t_bytes_acked = 0; 5386 } 5387 prior_cwnd = tp->snd_cwnd; 5388 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5389 (rack_client_low_buf && rack->client_bufferlvl && 5390 (rack->client_bufferlvl < rack_client_low_buf))) 5391 labc_to_use = rack->rc_labc; 5392 else 5393 labc_to_use = rack_max_abc_post_recovery; 5394 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5395 union tcp_log_stackspecific log; 5396 struct timeval tv; 5397 5398 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5399 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5400 log.u_bbr.flex1 = th_ack; 5401 log.u_bbr.flex2 = tp->t_ccv.flags; 5402 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5403 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5404 log.u_bbr.flex5 = labc_to_use; 5405 log.u_bbr.flex6 = prior_cwnd; 5406 log.u_bbr.flex7 = V_tcp_do_newsack; 5407 log.u_bbr.flex8 = 1; 5408 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5409 0, &log, false, NULL, __func__, __LINE__,&tv); 5410 } 5411 if (CC_ALGO(tp)->ack_received != NULL) { 5412 /* XXXLAS: Find a way to live without this */ 5413 tp->t_ccv.curack = th_ack; 5414 tp->t_ccv.labc = labc_to_use; 5415 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5416 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5417 } 5418 if (lgb) { 5419 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5420 } 5421 if (rack->r_must_retran) { 5422 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5423 /* 5424 * We now are beyond the rxt point so lets disable 5425 * the flag. 5426 */ 5427 rack->r_ctl.rc_out_at_rto = 0; 5428 rack->r_must_retran = 0; 5429 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5430 /* 5431 * Only decrement the rc_out_at_rto if the cwnd advances 5432 * at least a whole segment. Otherwise next time the peer 5433 * acks, we won't be able to send this generaly happens 5434 * when we are in Congestion Avoidance. 5435 */ 5436 if (acked <= rack->r_ctl.rc_out_at_rto){ 5437 rack->r_ctl.rc_out_at_rto -= acked; 5438 } else { 5439 rack->r_ctl.rc_out_at_rto = 0; 5440 } 5441 } 5442 } 5443 #ifdef STATS 5444 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5445 #endif 5446 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5447 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5448 } 5449 } 5450 5451 static void 5452 tcp_rack_partialack(struct tcpcb *tp) 5453 { 5454 struct tcp_rack *rack; 5455 5456 rack = (struct tcp_rack *)tp->t_fb_ptr; 5457 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5458 /* 5459 * If we are doing PRR and have enough 5460 * room to send <or> we are pacing and prr 5461 * is disabled we will want to see if we 5462 * can send data (by setting r_wanted_output to 5463 * true). 5464 */ 5465 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5466 rack->rack_no_prr) 5467 rack->r_wanted_output = 1; 5468 } 5469 5470 static inline void 5471 rack_set_most_aggr(struct tcp_rack *rack) 5472 { 5473 rack->r_fill_less_agg = 0; 5474 /* Once the cwnd as been clamped we don't do fill_cw */ 5475 if (rack->r_cwnd_was_clamped == 0) 5476 rack->rc_pace_to_cwnd = 1; 5477 rack->r_pacing_discount = 0; 5478 } 5479 5480 static inline void 5481 rack_limit_fillcw(struct tcp_rack *rack) 5482 { 5483 rack->r_fill_less_agg = 1; 5484 /* Once the cwnd as been clamped we don't do fill_cw */ 5485 if (rack->r_cwnd_was_clamped == 0) 5486 rack->rc_pace_to_cwnd = 1; 5487 rack->r_pacing_discount = 0; 5488 } 5489 5490 static inline void 5491 rack_disable_fillcw(struct tcp_rack *rack) 5492 { 5493 rack->r_fill_less_agg = 1; 5494 rack->rc_pace_to_cwnd = 0; 5495 rack->r_pacing_discount = 0; 5496 } 5497 5498 static void 5499 rack_client_buffer_level_set(struct tcp_rack *rack) 5500 { 5501 /* 5502 * Only if DGP is on do we do anything that 5503 * changes stack behavior. If DGP is off all 5504 * we will do is issue a BB log (if BB logging is 5505 * on) and return. 5506 */ 5507 if (rack->dgp_on == 0) { 5508 rack_log_pacing_delay_calc(rack, 0, rack->client_bufferlvl, 5509 0, 0, 0, 30, __LINE__, NULL, 0); 5510 return; 5511 } 5512 if (IN_RECOVERY(rack->rc_tp->t_flags) && rack->r_ctl.full_dgp_in_rec) { 5513 goto set_most_agg; 5514 } 5515 /* 5516 * We are in DGP so what setting should we 5517 * apply based on where the client is? 5518 */ 5519 switch(rack->r_ctl.rc_dgp_bl_agg) { 5520 default: 5521 case DGP_LEVEL0: 5522 set_most_agg: 5523 rack_set_most_aggr(rack); 5524 break; 5525 case DGP_LEVEL1: 5526 if (rack->client_bufferlvl == 4) 5527 rack_limit_fillcw(rack); 5528 else if (rack->client_bufferlvl == 5) 5529 rack_disable_fillcw(rack); 5530 else 5531 rack_set_most_aggr(rack); 5532 break; 5533 case DGP_LEVEL2: 5534 if (rack->client_bufferlvl == 3) 5535 rack_limit_fillcw(rack); 5536 else if (rack->client_bufferlvl == 4) 5537 rack_disable_fillcw(rack); 5538 else if (rack->client_bufferlvl == 5) { 5539 rack_disable_fillcw(rack); 5540 rack->r_pacing_discount = 1; 5541 rack->r_ctl.pacing_discount_amm = 1; 5542 } else 5543 rack_set_most_aggr(rack); 5544 break; 5545 case DGP_LEVEL3: 5546 if (rack->client_bufferlvl == 2) 5547 rack_limit_fillcw(rack); 5548 else if (rack->client_bufferlvl == 3) 5549 rack_disable_fillcw(rack); 5550 else if (rack->client_bufferlvl == 4) { 5551 rack_disable_fillcw(rack); 5552 rack->r_pacing_discount = 1; 5553 rack->r_ctl.pacing_discount_amm = 1; 5554 } else if (rack->client_bufferlvl == 5) { 5555 rack_disable_fillcw(rack); 5556 rack->r_pacing_discount = 1; 5557 rack->r_ctl.pacing_discount_amm = 2; 5558 } else 5559 rack_set_most_aggr(rack); 5560 break; 5561 } 5562 rack_log_pacing_delay_calc(rack, rack->r_ctl.rc_dgp_bl_agg, rack->client_bufferlvl, 0, 5563 0, 0, 30, __LINE__, NULL, 0); 5564 } 5565 5566 static void 5567 do_rack_check_for_unclamp(struct tcpcb *tp, struct tcp_rack *rack) 5568 { 5569 /* 5570 * Can we unclamp. We unclamp if more than 5571 * N rounds have transpired with no loss. 5572 */ 5573 uint64_t snds, rxts, rxt_per; 5574 uint32_t rnds; 5575 5576 rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped; 5577 if ((rack_unclamp_round_thresh > 0) && 5578 (rnds >= rack_unclamp_round_thresh)) { 5579 snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes; 5580 KASSERT ((snds > 0), ("rack:%p tp:%p snds:%ju is 0", rack, tp, 5581 (uintmax_t)snds)); 5582 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes; 5583 rxt_per = rxts * 1000; 5584 rxt_per /= snds; 5585 if ((uint32_t)rxt_per <= rack_unclamp_rxt_thresh) { 5586 /* Unclamp */ 5587 if (tcp_bblogging_on(rack->rc_tp)) { 5588 union tcp_log_stackspecific log; 5589 struct timeval tv; 5590 5591 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5592 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5593 log.u_bbr.flex3 = rnds; 5594 log.u_bbr.flex4 = rack_unclamp_round_thresh; 5595 log.u_bbr.flex5 = (uint32_t)rxt_per; 5596 log.u_bbr.flex8 = 6; 5597 log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs; 5598 log.u_bbr.bbr_state = rack->rc_pace_to_cwnd; 5599 log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied; 5600 log.u_bbr.applimited = rack->r_ctl.max_clamps; 5601 log.u_bbr.epoch = rack->r_ctl.clamp_options; 5602 log.u_bbr.cur_del_rate = rxts; 5603 log.u_bbr.bw_inuse = rack_get_lt_bw(rack); 5604 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5605 log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff); 5606 log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff); 5607 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5608 0, &log, false, NULL, NULL, 0, &tv); 5609 } 5610 rack->r_ctl.num_of_clamps_applied = 0; 5611 rack->r_cwnd_was_clamped = 0; 5612 rack->excess_rxt_on = 1; 5613 if (rack->r_ctl.clamp_options) { 5614 /* 5615 * We only allow fillcw to be toggled 5616 * if you are setting a max seg too. 5617 */ 5618 if (rack->r_ctl.clamp_options & 0x1) { 5619 if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) { 5620 /* turn on fill cw for non-dgp*/ 5621 rack->rc_pace_to_cwnd = 0; 5622 } else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) { 5623 /* For DGP we want it off */ 5624 rack->rc_pace_to_cwnd = 1; 5625 } 5626 } 5627 } 5628 if (rack->dgp_on) { 5629 /* Reset all multipliers to 100.0 so just the measured bw */ 5630 /* Crash any per boosts down to 100% */ 5631 rack->r_ctl.rack_per_of_gp_rec = 100; 5632 rack->r_ctl.rack_per_of_gp_ss = 100; 5633 rack->r_ctl.rack_per_of_gp_ca = 100; 5634 /* Set in an upper bound for ss/ca % increase */ 5635 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 5636 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 5637 } 5638 } 5639 } 5640 } 5641 5642 static void 5643 do_rack_excess_rxt(struct tcpcb *tp, struct tcp_rack *rack) 5644 { 5645 /* 5646 * Rack excess rxt accounting is turned on. If we 5647 * are above a threshold of rxt's in at least N 5648 * rounds, then back off the cwnd and ssthresh 5649 * to fit into the long-term b/w. 5650 */ 5651 uint64_t snds, rxts, rxt_per, lt_bw, bdp; 5652 uint32_t rnds, new_cwnd, new_ssthresh, rtt, shared_cwnd_was_enabled = 0; 5653 5654 /* Is it shut off by 0 rounds? */ 5655 if (rack_rxt_min_rnds == 0) 5656 return; 5657 if ((rack->r_ctl.max_clamps > 0) && 5658 (rack->r_ctl.num_of_clamps_applied >= rack->r_ctl.max_clamps)) { 5659 /* 5660 * The idea, if max_clamps is set, is that if clamping it 5661 * N times did not work again, then there is no sense 5662 * clamping it again. The link is just a lossy link and 5663 * our clamps are doing no good. Turn it off so we don't come 5664 * back here again. 5665 */ 5666 rack->excess_rxt_on = 0; 5667 rack->r_cwnd_was_clamped = 0; 5668 rack->r_ctl.num_of_clamps_applied = 0; 5669 return; 5670 } 5671 snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes; 5672 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes; 5673 rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped; 5674 /* Has enough rounds progressed for us to re-measure? */ 5675 if ((rnds >= rack_rxt_min_rnds) && 5676 (rack->r_ctl.rxt_threshold > 0)){ 5677 rxt_per = rxts * 1000; 5678 rxt_per /= snds; 5679 if (rxt_per >= rack->r_ctl.rxt_threshold) { 5680 /* 5681 * Action required: 5682 * We are above our excess retransmit level, lets 5683 * cut down the cwnd and ssthresh to match the long-term 5684 * b/w we are getting. 5685 */ 5686 /* First disable scwnd if enabled */ 5687 #ifdef NETFLIX_SHARED_CWND 5688 rack->rack_enable_scwnd = 0; 5689 if (rack->r_ctl.rc_scw) { 5690 uint32_t limit; 5691 5692 shared_cwnd_was_enabled = 1; 5693 if (rack->r_limit_scw) 5694 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 5695 else 5696 limit = 0; 5697 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 5698 rack->r_ctl.rc_scw_index, 5699 limit); 5700 rack->r_ctl.rc_scw = NULL; 5701 } 5702 5703 #endif 5704 /* Calculate what the cwnd and ssthresh should be */ 5705 tcp_trace_point(rack->rc_tp, TCP_TP_EXCESS_RXT); 5706 lt_bw = rack_get_lt_bw(rack); 5707 if (lt_bw == 0) { 5708 /* 5709 * No lt_bw, lets chop things to one MSS 5710 * and the ssthresh to the iwnd. 5711 */ 5712 reset_to_iw: 5713 new_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5714 new_ssthresh = tcp_compute_initwnd(tcp_maxseg(tp)); 5715 } else { 5716 rtt = rack->rc_rack_rtt; 5717 if (rtt == 0) { 5718 /* If we have no rack_rtt drop to the IW situation */ 5719 goto reset_to_iw; 5720 } 5721 bdp = lt_bw * (uint64_t)rtt; 5722 bdp /= HPTS_USEC_IN_SEC; 5723 new_cwnd = (uint32_t)bdp; 5724 new_ssthresh = new_cwnd - 1; 5725 if (new_cwnd < ctf_fixed_maxseg(tp)) { 5726 /* Rock bottom, goto IW settings */ 5727 goto reset_to_iw; 5728 } 5729 } 5730 rack->r_cwnd_was_clamped = 1; 5731 rack->r_ctl.num_of_clamps_applied++; 5732 /* Reset the counter fromn now */ 5733 tp->t_bytes_acked = 0; 5734 /* 5735 * Now what about options? 5736 * We look at the bottom 8 bits: 5737 * F = fill cw bit (toggle it if set) 5738 * S = Segment bits 5739 * M = set max segment bit 5740 * 5741 * SSSS SSMF 5742 */ 5743 if (rack->r_ctl.clamp_options) { 5744 if (rack->r_ctl.clamp_options & 0x1) { 5745 if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) { 5746 /* turn on fill cw for non-dgp*/ 5747 rack->rc_pace_to_cwnd = 1; 5748 } else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) { 5749 /* For DGP we want it off */ 5750 rack->rc_pace_to_cwnd = 0; 5751 } 5752 } 5753 } 5754 if (rack->dgp_on) { 5755 /* Reset all multipliers to 100.0 so just the measured bw */ 5756 /* Crash any per boosts down to 100% */ 5757 rack->r_ctl.rack_per_of_gp_rec = 100; 5758 rack->r_ctl.rack_per_of_gp_ss = 100; 5759 rack->r_ctl.rack_per_of_gp_ca = 100; 5760 /* Set in an upper bound for ss/ca % increase */ 5761 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_clamp_ss_upper; 5762 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_clamp_ca_upper; 5763 /* Now move to the lt_bw */ 5764 rack->r_ctl.gp_bw = lt_bw; 5765 rack->rc_gp_filled = 1; 5766 rack->r_ctl.num_measurements = RACK_REQ_AVG; 5767 } 5768 if (tcp_bblogging_on(rack->rc_tp)) { 5769 union tcp_log_stackspecific log; 5770 struct timeval tv; 5771 5772 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5773 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5774 log.u_bbr.flex1 = new_cwnd; 5775 log.u_bbr.flex2 = new_ssthresh; 5776 log.u_bbr.flex3 = rnds; 5777 log.u_bbr.flex4 = rack_rxt_min_rnds; 5778 log.u_bbr.flex5 = rtt; 5779 log.u_bbr.flex6 = shared_cwnd_was_enabled; 5780 log.u_bbr.flex8 = 5; 5781 log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs; 5782 log.u_bbr.bbr_state = rack->rc_pace_to_cwnd; 5783 log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied; 5784 log.u_bbr.applimited = rack->r_ctl.max_clamps; 5785 log.u_bbr.epoch = rack->r_ctl.clamp_options; 5786 log.u_bbr.cur_del_rate = rxts; 5787 log.u_bbr.delRate = snds; 5788 log.u_bbr.rttProp = rack->r_ctl.rxt_threshold; 5789 log.u_bbr.bw_inuse = lt_bw; 5790 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5791 log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff); 5792 log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff); 5793 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5794 0, &log, false, NULL, NULL, 0, &tv); 5795 } 5796 /* Update our point where we did it */ 5797 if (rack->r_ctl.already_had_a_excess == 0) { 5798 rack->r_ctl.already_had_a_excess = 1; 5799 counter_u64_add(rack_rxt_clamps_cwnd_uniq, 1); 5800 } 5801 counter_u64_add(rack_rxt_clamps_cwnd, 1); 5802 rack->r_ctl.last_sndbytes = tp->t_sndbytes; 5803 rack->r_ctl.last_snd_rxt_bytes = tp->t_snd_rxt_bytes; 5804 rack->r_ctl.last_rnd_rxt_clamped = rack->r_ctl.current_round; 5805 if (new_cwnd < tp->snd_cwnd) 5806 tp->snd_cwnd = new_cwnd; 5807 if (new_ssthresh < tp->snd_ssthresh) 5808 tp->snd_ssthresh = new_ssthresh; 5809 } 5810 } 5811 } 5812 5813 static void 5814 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 5815 { 5816 struct tcp_rack *rack; 5817 uint32_t orig_cwnd; 5818 5819 orig_cwnd = tp->snd_cwnd; 5820 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5821 rack = (struct tcp_rack *)tp->t_fb_ptr; 5822 /* only alert CC if we alerted when we entered */ 5823 if (CC_ALGO(tp)->post_recovery != NULL) { 5824 tp->t_ccv.curack = th_ack; 5825 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 5826 if (tp->snd_cwnd < tp->snd_ssthresh) { 5827 /* 5828 * Rack has burst control and pacing 5829 * so lets not set this any lower than 5830 * snd_ssthresh per RFC-6582 (option 2). 5831 */ 5832 tp->snd_cwnd = tp->snd_ssthresh; 5833 } 5834 } 5835 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5836 union tcp_log_stackspecific log; 5837 struct timeval tv; 5838 5839 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5840 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5841 log.u_bbr.flex1 = th_ack; 5842 log.u_bbr.flex2 = tp->t_ccv.flags; 5843 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5844 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5845 log.u_bbr.flex5 = V_tcp_abc_l_var; 5846 log.u_bbr.flex6 = orig_cwnd; 5847 log.u_bbr.flex7 = V_tcp_do_newsack; 5848 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 5849 log.u_bbr.flex8 = 2; 5850 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5851 0, &log, false, NULL, __func__, __LINE__, &tv); 5852 } 5853 if ((rack->rack_no_prr == 0) && 5854 (rack->no_prr_addback == 0) && 5855 (rack->r_ctl.rc_prr_sndcnt > 0)) { 5856 /* 5857 * Suck the next prr cnt back into cwnd, but 5858 * only do that if we are not application limited. 5859 */ 5860 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 5861 /* 5862 * We are allowed to add back to the cwnd the amount we did 5863 * not get out if: 5864 * a) no_prr_addback is off. 5865 * b) we are not app limited 5866 * c) we are doing prr 5867 * <and> 5868 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 5869 */ 5870 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 5871 rack->r_ctl.rc_prr_sndcnt); 5872 } 5873 rack->r_ctl.rc_prr_sndcnt = 0; 5874 rack_log_to_prr(rack, 1, 0, __LINE__); 5875 } 5876 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 5877 tp->snd_recover = tp->snd_una; 5878 if (rack->r_ctl.dsack_persist) { 5879 rack->r_ctl.dsack_persist--; 5880 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 5881 rack->r_ctl.num_dsack = 0; 5882 } 5883 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 5884 } 5885 EXIT_RECOVERY(tp->t_flags); 5886 if (rack->r_ctl.full_dgp_in_rec) 5887 rack_client_buffer_level_set(rack); 5888 } 5889 5890 static void 5891 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 5892 { 5893 struct tcp_rack *rack; 5894 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 5895 5896 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5897 #ifdef STATS 5898 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 5899 #endif 5900 if (IN_RECOVERY(tp->t_flags) == 0) { 5901 in_rec_at_entry = 0; 5902 ssthresh_enter = tp->snd_ssthresh; 5903 cwnd_enter = tp->snd_cwnd; 5904 } else 5905 in_rec_at_entry = 1; 5906 rack = (struct tcp_rack *)tp->t_fb_ptr; 5907 switch (type) { 5908 case CC_NDUPACK: 5909 tp->t_flags &= ~TF_WASFRECOVERY; 5910 tp->t_flags &= ~TF_WASCRECOVERY; 5911 if (!IN_FASTRECOVERY(tp->t_flags)) { 5912 if (rack->dgp_on && rack->r_cwnd_was_clamped) { 5913 /* Reset the gains so that on exit we will be softer longer */ 5914 rack->r_ctl.rack_per_of_gp_rec = 100; 5915 rack->r_ctl.rack_per_of_gp_ss = 98; 5916 rack->r_ctl.rack_per_of_gp_ca = 98; 5917 } 5918 rack->r_ctl.rc_prr_delivered = 0; 5919 rack->r_ctl.rc_prr_out = 0; 5920 rack->r_fast_output = 0; 5921 if (rack->rack_no_prr == 0) { 5922 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5923 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 5924 } 5925 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 5926 tp->snd_recover = tp->snd_max; 5927 if (tp->t_flags2 & TF2_ECN_PERMIT) 5928 tp->t_flags2 |= TF2_ECN_SND_CWR; 5929 } 5930 break; 5931 case CC_ECN: 5932 if (!IN_CONGRECOVERY(tp->t_flags) || 5933 /* 5934 * Allow ECN reaction on ACK to CWR, if 5935 * that data segment was also CE marked. 5936 */ 5937 SEQ_GEQ(ack, tp->snd_recover)) { 5938 EXIT_CONGRECOVERY(tp->t_flags); 5939 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5940 rack->r_fast_output = 0; 5941 tp->snd_recover = tp->snd_max + 1; 5942 if (tp->t_flags2 & TF2_ECN_PERMIT) 5943 tp->t_flags2 |= TF2_ECN_SND_CWR; 5944 } 5945 break; 5946 case CC_RTO: 5947 tp->t_dupacks = 0; 5948 tp->t_bytes_acked = 0; 5949 rack->r_fast_output = 0; 5950 EXIT_RECOVERY(tp->t_flags); 5951 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5952 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5953 orig_cwnd = tp->snd_cwnd; 5954 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5955 rack_log_to_prr(rack, 16, orig_cwnd, line); 5956 if (tp->t_flags2 & TF2_ECN_PERMIT) 5957 tp->t_flags2 |= TF2_ECN_SND_CWR; 5958 break; 5959 case CC_RTO_ERR: 5960 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5961 /* RTO was unnecessary, so reset everything. */ 5962 tp->snd_cwnd = tp->snd_cwnd_prev; 5963 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5964 tp->snd_recover = tp->snd_recover_prev; 5965 if (tp->t_flags & TF_WASFRECOVERY) { 5966 ENTER_FASTRECOVERY(tp->t_flags); 5967 tp->t_flags &= ~TF_WASFRECOVERY; 5968 } 5969 if (tp->t_flags & TF_WASCRECOVERY) { 5970 ENTER_CONGRECOVERY(tp->t_flags); 5971 tp->t_flags &= ~TF_WASCRECOVERY; 5972 } 5973 tp->snd_nxt = tp->snd_max; 5974 tp->t_badrxtwin = 0; 5975 break; 5976 } 5977 if ((CC_ALGO(tp)->cong_signal != NULL) && 5978 (type != CC_RTO)){ 5979 tp->t_ccv.curack = ack; 5980 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 5981 } 5982 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5983 rack_log_to_prr(rack, 15, cwnd_enter, line); 5984 if (rack->r_ctl.full_dgp_in_rec) 5985 rack_client_buffer_level_set(rack); 5986 rack->r_ctl.dsack_byte_cnt = 0; 5987 rack->r_ctl.retran_during_recovery = 0; 5988 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5989 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5990 rack->r_ent_rec_ns = 1; 5991 } 5992 } 5993 5994 static inline void 5995 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5996 { 5997 uint32_t i_cwnd; 5998 5999 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6000 6001 if (CC_ALGO(tp)->after_idle != NULL) 6002 CC_ALGO(tp)->after_idle(&tp->t_ccv); 6003 6004 if (tp->snd_cwnd == 1) 6005 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 6006 else 6007 i_cwnd = rc_init_window(rack); 6008 6009 /* 6010 * Being idle is no different than the initial window. If the cc 6011 * clamps it down below the initial window raise it to the initial 6012 * window. 6013 */ 6014 if (tp->snd_cwnd < i_cwnd) { 6015 tp->snd_cwnd = i_cwnd; 6016 } 6017 } 6018 6019 /* 6020 * Indicate whether this ack should be delayed. We can delay the ack if 6021 * following conditions are met: 6022 * - There is no delayed ack timer in progress. 6023 * - Our last ack wasn't a 0-sized window. We never want to delay 6024 * the ack that opens up a 0-sized window. 6025 * - LRO wasn't used for this segment. We make sure by checking that the 6026 * segment size is not larger than the MSS. 6027 * - Delayed acks are enabled or this is a half-synchronized T/TCP 6028 * connection. 6029 */ 6030 #define DELAY_ACK(tp, tlen) \ 6031 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 6032 ((tp->t_flags & TF_DELACK) == 0) && \ 6033 (tlen <= tp->t_maxseg) && \ 6034 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 6035 6036 static struct rack_sendmap * 6037 rack_find_lowest_rsm(struct tcp_rack *rack) 6038 { 6039 struct rack_sendmap *rsm; 6040 6041 /* 6042 * Walk the time-order transmitted list looking for an rsm that is 6043 * not acked. This will be the one that was sent the longest time 6044 * ago that is still outstanding. 6045 */ 6046 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6047 if (rsm->r_flags & RACK_ACKED) { 6048 continue; 6049 } 6050 goto finish; 6051 } 6052 finish: 6053 return (rsm); 6054 } 6055 6056 static struct rack_sendmap * 6057 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 6058 { 6059 struct rack_sendmap *prsm; 6060 6061 /* 6062 * Walk the sequence order list backward until we hit and arrive at 6063 * the highest seq not acked. In theory when this is called it 6064 * should be the last segment (which it was not). 6065 */ 6066 prsm = rsm; 6067 6068 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 6069 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 6070 continue; 6071 } 6072 return (prsm); 6073 } 6074 return (NULL); 6075 } 6076 6077 static uint32_t 6078 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 6079 { 6080 int32_t lro; 6081 uint32_t thresh; 6082 6083 /* 6084 * lro is the flag we use to determine if we have seen reordering. 6085 * If it gets set we have seen reordering. The reorder logic either 6086 * works in one of two ways: 6087 * 6088 * If reorder-fade is configured, then we track the last time we saw 6089 * re-ordering occur. If we reach the point where enough time as 6090 * passed we no longer consider reordering has occuring. 6091 * 6092 * Or if reorder-face is 0, then once we see reordering we consider 6093 * the connection to alway be subject to reordering and just set lro 6094 * to 1. 6095 * 6096 * In the end if lro is non-zero we add the extra time for 6097 * reordering in. 6098 */ 6099 if (srtt == 0) 6100 srtt = 1; 6101 if (rack->r_ctl.rc_reorder_ts) { 6102 if (rack->r_ctl.rc_reorder_fade) { 6103 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 6104 lro = cts - rack->r_ctl.rc_reorder_ts; 6105 if (lro == 0) { 6106 /* 6107 * No time as passed since the last 6108 * reorder, mark it as reordering. 6109 */ 6110 lro = 1; 6111 } 6112 } else { 6113 /* Negative time? */ 6114 lro = 0; 6115 } 6116 if (lro > rack->r_ctl.rc_reorder_fade) { 6117 /* Turn off reordering seen too */ 6118 rack->r_ctl.rc_reorder_ts = 0; 6119 lro = 0; 6120 } 6121 } else { 6122 /* Reodering does not fade */ 6123 lro = 1; 6124 } 6125 } else { 6126 lro = 0; 6127 } 6128 if (rack->rc_rack_tmr_std_based == 0) { 6129 thresh = srtt + rack->r_ctl.rc_pkt_delay; 6130 } else { 6131 /* Standards based pkt-delay is 1/4 srtt */ 6132 thresh = srtt + (srtt >> 2); 6133 } 6134 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 6135 /* It must be set, if not you get 1/4 rtt */ 6136 if (rack->r_ctl.rc_reorder_shift) 6137 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 6138 else 6139 thresh += (srtt >> 2); 6140 } 6141 if (rack->rc_rack_use_dsack && 6142 lro && 6143 (rack->r_ctl.num_dsack > 0)) { 6144 /* 6145 * We only increase the reordering window if we 6146 * have seen reordering <and> we have a DSACK count. 6147 */ 6148 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 6149 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 6150 } 6151 /* SRTT * 2 is the ceiling */ 6152 if (thresh > (srtt * 2)) { 6153 thresh = srtt * 2; 6154 } 6155 /* And we don't want it above the RTO max either */ 6156 if (thresh > rack_rto_max) { 6157 thresh = rack_rto_max; 6158 } 6159 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 6160 return (thresh); 6161 } 6162 6163 static uint32_t 6164 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 6165 struct rack_sendmap *rsm, uint32_t srtt) 6166 { 6167 struct rack_sendmap *prsm; 6168 uint32_t thresh, len; 6169 int segsiz; 6170 6171 if (srtt == 0) 6172 srtt = 1; 6173 if (rack->r_ctl.rc_tlp_threshold) 6174 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 6175 else 6176 thresh = (srtt * 2); 6177 6178 /* Get the previous sent packet, if any */ 6179 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6180 len = rsm->r_end - rsm->r_start; 6181 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6182 /* Exactly like the ID */ 6183 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6184 uint32_t alt_thresh; 6185 /* 6186 * Compensate for delayed-ack with the d-ack time. 6187 */ 6188 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6189 if (alt_thresh > thresh) 6190 thresh = alt_thresh; 6191 } 6192 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6193 /* 2.1 behavior */ 6194 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6195 if (prsm && (len <= segsiz)) { 6196 /* 6197 * Two packets outstanding, thresh should be (2*srtt) + 6198 * possible inter-packet delay (if any). 6199 */ 6200 uint32_t inter_gap = 0; 6201 int idx, nidx; 6202 6203 idx = rsm->r_rtr_cnt - 1; 6204 nidx = prsm->r_rtr_cnt - 1; 6205 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6206 /* Yes it was sent later (or at the same time) */ 6207 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6208 } 6209 thresh += inter_gap; 6210 } else if (len <= segsiz) { 6211 /* 6212 * Possibly compensate for delayed-ack. 6213 */ 6214 uint32_t alt_thresh; 6215 6216 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6217 if (alt_thresh > thresh) 6218 thresh = alt_thresh; 6219 } 6220 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6221 /* 2.2 behavior */ 6222 if (len <= segsiz) { 6223 uint32_t alt_thresh; 6224 /* 6225 * Compensate for delayed-ack with the d-ack time. 6226 */ 6227 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6228 if (alt_thresh > thresh) 6229 thresh = alt_thresh; 6230 } 6231 } 6232 /* Not above an RTO */ 6233 if (thresh > tp->t_rxtcur) { 6234 thresh = tp->t_rxtcur; 6235 } 6236 /* Not above a RTO max */ 6237 if (thresh > rack_rto_max) { 6238 thresh = rack_rto_max; 6239 } 6240 /* Apply user supplied min TLP */ 6241 if (thresh < rack_tlp_min) { 6242 thresh = rack_tlp_min; 6243 } 6244 return (thresh); 6245 } 6246 6247 static uint32_t 6248 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6249 { 6250 /* 6251 * We want the rack_rtt which is the 6252 * last rtt we measured. However if that 6253 * does not exist we fallback to the srtt (which 6254 * we probably will never do) and then as a last 6255 * resort we use RACK_INITIAL_RTO if no srtt is 6256 * yet set. 6257 */ 6258 if (rack->rc_rack_rtt) 6259 return (rack->rc_rack_rtt); 6260 else if (tp->t_srtt == 0) 6261 return (RACK_INITIAL_RTO); 6262 return (tp->t_srtt); 6263 } 6264 6265 static struct rack_sendmap * 6266 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6267 { 6268 /* 6269 * Check to see that we don't need to fall into recovery. We will 6270 * need to do so if our oldest transmit is past the time we should 6271 * have had an ack. 6272 */ 6273 struct tcp_rack *rack; 6274 struct rack_sendmap *rsm; 6275 int32_t idx; 6276 uint32_t srtt, thresh; 6277 6278 rack = (struct tcp_rack *)tp->t_fb_ptr; 6279 if (tqhash_empty(rack->r_ctl.tqh)) { 6280 return (NULL); 6281 } 6282 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6283 if (rsm == NULL) 6284 return (NULL); 6285 6286 6287 if (rsm->r_flags & RACK_ACKED) { 6288 rsm = rack_find_lowest_rsm(rack); 6289 if (rsm == NULL) 6290 return (NULL); 6291 } 6292 idx = rsm->r_rtr_cnt - 1; 6293 srtt = rack_grab_rtt(tp, rack); 6294 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 6295 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6296 return (NULL); 6297 } 6298 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6299 return (NULL); 6300 } 6301 /* Ok if we reach here we are over-due and this guy can be sent */ 6302 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6303 return (rsm); 6304 } 6305 6306 static uint32_t 6307 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6308 { 6309 int32_t t; 6310 int32_t tt; 6311 uint32_t ret_val; 6312 6313 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6314 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6315 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6316 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6317 ret_val = (uint32_t)tt; 6318 return (ret_val); 6319 } 6320 6321 static uint32_t 6322 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6323 { 6324 /* 6325 * Start the FR timer, we do this based on getting the first one in 6326 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6327 * events we need to stop the running timer (if its running) before 6328 * starting the new one. 6329 */ 6330 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6331 uint32_t srtt_cur; 6332 int32_t idx; 6333 int32_t is_tlp_timer = 0; 6334 struct rack_sendmap *rsm; 6335 6336 if (rack->t_timers_stopped) { 6337 /* All timers have been stopped none are to run */ 6338 return (0); 6339 } 6340 if (rack->rc_in_persist) { 6341 /* We can't start any timer in persists */ 6342 return (rack_get_persists_timer_val(tp, rack)); 6343 } 6344 rack->rc_on_min_to = 0; 6345 if ((tp->t_state < TCPS_ESTABLISHED) || 6346 (rack->sack_attack_disable > 0) || 6347 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6348 goto activate_rxt; 6349 } 6350 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6351 if ((rsm == NULL) || sup_rack) { 6352 /* Nothing on the send map or no rack */ 6353 activate_rxt: 6354 time_since_sent = 0; 6355 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6356 if (rsm) { 6357 /* 6358 * Should we discount the RTX timer any? 6359 * 6360 * We want to discount it the smallest amount. 6361 * If a timer (Rack/TLP or RXT) has gone off more 6362 * recently thats the discount we want to use (now - timer time). 6363 * If the retransmit of the oldest packet was more recent then 6364 * we want to use that (now - oldest-packet-last_transmit_time). 6365 * 6366 */ 6367 idx = rsm->r_rtr_cnt - 1; 6368 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6369 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6370 else 6371 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6372 if (TSTMP_GT(cts, tstmp_touse)) 6373 time_since_sent = cts - tstmp_touse; 6374 } 6375 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6376 sbavail(&tptosocket(tp)->so_snd)) { 6377 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6378 to = tp->t_rxtcur; 6379 if (to > time_since_sent) 6380 to -= time_since_sent; 6381 else 6382 to = rack->r_ctl.rc_min_to; 6383 if (to == 0) 6384 to = 1; 6385 /* Special case for KEEPINIT */ 6386 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6387 (TP_KEEPINIT(tp) != 0) && 6388 rsm) { 6389 /* 6390 * We have to put a ceiling on the rxt timer 6391 * of the keep-init timeout. 6392 */ 6393 uint32_t max_time, red; 6394 6395 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6396 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6397 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6398 if (red < max_time) 6399 max_time -= red; 6400 else 6401 max_time = 1; 6402 } 6403 /* Reduce timeout to the keep value if needed */ 6404 if (max_time < to) 6405 to = max_time; 6406 } 6407 return (to); 6408 } 6409 return (0); 6410 } 6411 if (rsm->r_flags & RACK_ACKED) { 6412 rsm = rack_find_lowest_rsm(rack); 6413 if (rsm == NULL) { 6414 /* No lowest? */ 6415 goto activate_rxt; 6416 } 6417 } 6418 if (rack->sack_attack_disable) { 6419 /* 6420 * We don't want to do 6421 * any TLP's if you are an attacker. 6422 * Though if you are doing what 6423 * is expected you may still have 6424 * SACK-PASSED marks. 6425 */ 6426 goto activate_rxt; 6427 } 6428 /* Convert from ms to usecs */ 6429 if ((rsm->r_flags & RACK_SACK_PASSED) || 6430 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6431 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6432 if ((tp->t_flags & TF_SENTFIN) && 6433 ((tp->snd_max - tp->snd_una) == 1) && 6434 (rsm->r_flags & RACK_HAS_FIN)) { 6435 /* 6436 * We don't start a rack timer if all we have is a 6437 * FIN outstanding. 6438 */ 6439 goto activate_rxt; 6440 } 6441 if ((rack->use_rack_rr == 0) && 6442 (IN_FASTRECOVERY(tp->t_flags)) && 6443 (rack->rack_no_prr == 0) && 6444 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6445 /* 6446 * We are not cheating, in recovery and 6447 * not enough ack's to yet get our next 6448 * retransmission out. 6449 * 6450 * Note that classified attackers do not 6451 * get to use the rack-cheat. 6452 */ 6453 goto activate_tlp; 6454 } 6455 srtt = rack_grab_rtt(tp, rack); 6456 thresh = rack_calc_thresh_rack(rack, srtt, cts); 6457 idx = rsm->r_rtr_cnt - 1; 6458 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6459 if (SEQ_GEQ(exp, cts)) { 6460 to = exp - cts; 6461 if (to < rack->r_ctl.rc_min_to) { 6462 to = rack->r_ctl.rc_min_to; 6463 if (rack->r_rr_config == 3) 6464 rack->rc_on_min_to = 1; 6465 } 6466 } else { 6467 to = rack->r_ctl.rc_min_to; 6468 if (rack->r_rr_config == 3) 6469 rack->rc_on_min_to = 1; 6470 } 6471 } else { 6472 /* Ok we need to do a TLP not RACK */ 6473 activate_tlp: 6474 if ((rack->rc_tlp_in_progress != 0) && 6475 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6476 /* 6477 * The previous send was a TLP and we have sent 6478 * N TLP's without sending new data. 6479 */ 6480 goto activate_rxt; 6481 } 6482 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6483 if (rsm == NULL) { 6484 /* We found no rsm to TLP with. */ 6485 goto activate_rxt; 6486 } 6487 if (rsm->r_flags & RACK_HAS_FIN) { 6488 /* If its a FIN we dont do TLP */ 6489 rsm = NULL; 6490 goto activate_rxt; 6491 } 6492 idx = rsm->r_rtr_cnt - 1; 6493 time_since_sent = 0; 6494 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6495 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6496 else 6497 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6498 if (TSTMP_GT(cts, tstmp_touse)) 6499 time_since_sent = cts - tstmp_touse; 6500 is_tlp_timer = 1; 6501 if (tp->t_srtt) { 6502 if ((rack->rc_srtt_measure_made == 0) && 6503 (tp->t_srtt == 1)) { 6504 /* 6505 * If another stack as run and set srtt to 1, 6506 * then the srtt was 0, so lets use the initial. 6507 */ 6508 srtt = RACK_INITIAL_RTO; 6509 } else { 6510 srtt_cur = tp->t_srtt; 6511 srtt = srtt_cur; 6512 } 6513 } else 6514 srtt = RACK_INITIAL_RTO; 6515 /* 6516 * If the SRTT is not keeping up and the 6517 * rack RTT has spiked we want to use 6518 * the last RTT not the smoothed one. 6519 */ 6520 if (rack_tlp_use_greater && 6521 tp->t_srtt && 6522 (srtt < rack_grab_rtt(tp, rack))) { 6523 srtt = rack_grab_rtt(tp, rack); 6524 } 6525 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6526 if (thresh > time_since_sent) { 6527 to = thresh - time_since_sent; 6528 } else { 6529 to = rack->r_ctl.rc_min_to; 6530 rack_log_alt_to_to_cancel(rack, 6531 thresh, /* flex1 */ 6532 time_since_sent, /* flex2 */ 6533 tstmp_touse, /* flex3 */ 6534 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6535 (uint32_t)rsm->r_tim_lastsent[idx], 6536 srtt, 6537 idx, 99); 6538 } 6539 if (to < rack_tlp_min) { 6540 to = rack_tlp_min; 6541 } 6542 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 6543 /* 6544 * If the TLP time works out to larger than the max 6545 * RTO lets not do TLP.. just RTO. 6546 */ 6547 goto activate_rxt; 6548 } 6549 } 6550 if (is_tlp_timer == 0) { 6551 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6552 } else { 6553 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6554 } 6555 if (to == 0) 6556 to = 1; 6557 return (to); 6558 } 6559 6560 static void 6561 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6562 { 6563 struct timeval tv; 6564 6565 if (rack->rc_in_persist == 0) { 6566 if (tp->t_flags & TF_GPUTINPROG) { 6567 /* 6568 * Stop the goodput now, the calling of the 6569 * measurement function clears the flag. 6570 */ 6571 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6572 RACK_QUALITY_PERSIST); 6573 } 6574 #ifdef NETFLIX_SHARED_CWND 6575 if (rack->r_ctl.rc_scw) { 6576 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6577 rack->rack_scwnd_is_idle = 1; 6578 } 6579 #endif 6580 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(&tv); 6581 if (rack->lt_bw_up) { 6582 /* Suspend our LT BW measurement */ 6583 uint64_t tmark; 6584 6585 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6586 rack->r_ctl.lt_seq = snd_una; 6587 tmark = tcp_tv_to_lusectick(&tv); 6588 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6589 rack->r_ctl.lt_timemark = tmark; 6590 rack->lt_bw_up = 0; 6591 rack->r_persist_lt_bw_off = 1; 6592 } 6593 if (rack->r_ctl.rc_went_idle_time == 0) 6594 rack->r_ctl.rc_went_idle_time = 1; 6595 rack_timer_cancel(tp, rack, cts, __LINE__); 6596 rack->r_ctl.persist_lost_ends = 0; 6597 rack->probe_not_answered = 0; 6598 rack->forced_ack = 0; 6599 tp->t_rxtshift = 0; 6600 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6601 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6602 rack->rc_in_persist = 1; 6603 } 6604 } 6605 6606 static void 6607 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6608 { 6609 struct timeval tv; 6610 uint32_t t_time; 6611 6612 if (tcp_in_hpts(rack->rc_tp)) { 6613 tcp_hpts_remove(rack->rc_tp); 6614 rack->r_ctl.rc_hpts_flags = 0; 6615 } 6616 #ifdef NETFLIX_SHARED_CWND 6617 if (rack->r_ctl.rc_scw) { 6618 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6619 rack->rack_scwnd_is_idle = 0; 6620 } 6621 #endif 6622 t_time = tcp_get_usecs(&tv); 6623 if (rack->rc_gp_dyn_mul && 6624 (rack->use_fixed_rate == 0) && 6625 (rack->rc_always_pace)) { 6626 /* 6627 * Do we count this as if a probe-rtt just 6628 * finished? 6629 */ 6630 uint32_t time_idle, idle_min; 6631 6632 time_idle = t_time - rack->r_ctl.rc_went_idle_time; 6633 idle_min = rack_min_probertt_hold; 6634 if (rack_probertt_gpsrtt_cnt_div) { 6635 uint64_t extra; 6636 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 6637 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 6638 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 6639 idle_min += (uint32_t)extra; 6640 } 6641 if (time_idle >= idle_min) { 6642 /* Yes, we count it as a probe-rtt. */ 6643 uint32_t us_cts; 6644 6645 us_cts = tcp_get_usecs(NULL); 6646 if (rack->in_probe_rtt == 0) { 6647 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 6648 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 6649 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 6650 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 6651 } else { 6652 rack_exit_probertt(rack, us_cts); 6653 } 6654 } 6655 } 6656 if (rack->r_persist_lt_bw_off) { 6657 /* Continue where we left off */ 6658 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 6659 rack->lt_bw_up = 1; 6660 rack->r_persist_lt_bw_off = 0; 6661 } 6662 rack->rc_in_persist = 0; 6663 rack->r_ctl.rc_went_idle_time = 0; 6664 tp->t_rxtshift = 0; 6665 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6666 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6667 rack->r_ctl.rc_agg_delayed = 0; 6668 rack->r_early = 0; 6669 rack->r_late = 0; 6670 rack->r_ctl.rc_agg_early = 0; 6671 } 6672 6673 static void 6674 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 6675 struct hpts_diag *diag, struct timeval *tv) 6676 { 6677 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6678 union tcp_log_stackspecific log; 6679 6680 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6681 log.u_bbr.flex1 = diag->p_nxt_slot; 6682 log.u_bbr.flex2 = diag->p_cur_slot; 6683 log.u_bbr.flex3 = diag->slot_req; 6684 log.u_bbr.flex4 = diag->inp_hptsslot; 6685 log.u_bbr.flex5 = diag->slot_remaining; 6686 log.u_bbr.flex6 = diag->need_new_to; 6687 log.u_bbr.flex7 = diag->p_hpts_active; 6688 log.u_bbr.flex8 = diag->p_on_min_sleep; 6689 /* Hijack other fields as needed */ 6690 log.u_bbr.epoch = diag->have_slept; 6691 log.u_bbr.lt_epoch = diag->yet_to_sleep; 6692 log.u_bbr.pkts_out = diag->co_ret; 6693 log.u_bbr.applimited = diag->hpts_sleep_time; 6694 log.u_bbr.delivered = diag->p_prev_slot; 6695 log.u_bbr.inflight = diag->p_runningslot; 6696 log.u_bbr.bw_inuse = diag->wheel_slot; 6697 log.u_bbr.rttProp = diag->wheel_cts; 6698 log.u_bbr.timeStamp = cts; 6699 log.u_bbr.delRate = diag->maxslots; 6700 log.u_bbr.cur_del_rate = diag->p_curtick; 6701 log.u_bbr.cur_del_rate <<= 32; 6702 log.u_bbr.cur_del_rate |= diag->p_lasttick; 6703 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6704 &rack->rc_inp->inp_socket->so_rcv, 6705 &rack->rc_inp->inp_socket->so_snd, 6706 BBR_LOG_HPTSDIAG, 0, 6707 0, &log, false, tv); 6708 } 6709 6710 } 6711 6712 static void 6713 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 6714 { 6715 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6716 union tcp_log_stackspecific log; 6717 struct timeval tv; 6718 6719 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6720 log.u_bbr.flex1 = sb->sb_flags; 6721 log.u_bbr.flex2 = len; 6722 log.u_bbr.flex3 = sb->sb_state; 6723 log.u_bbr.flex8 = type; 6724 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6725 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6726 &rack->rc_inp->inp_socket->so_rcv, 6727 &rack->rc_inp->inp_socket->so_snd, 6728 TCP_LOG_SB_WAKE, 0, 6729 len, &log, false, &tv); 6730 } 6731 } 6732 6733 static void 6734 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 6735 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 6736 { 6737 struct hpts_diag diag; 6738 struct inpcb *inp = tptoinpcb(tp); 6739 struct timeval tv; 6740 uint32_t delayed_ack = 0; 6741 uint32_t hpts_timeout; 6742 uint32_t entry_slot = slot; 6743 uint8_t stopped; 6744 uint32_t left = 0; 6745 uint32_t us_cts; 6746 6747 if ((tp->t_state == TCPS_CLOSED) || 6748 (tp->t_state == TCPS_LISTEN)) { 6749 return; 6750 } 6751 if (tcp_in_hpts(tp)) { 6752 /* Already on the pacer */ 6753 return; 6754 } 6755 stopped = rack->rc_tmr_stopped; 6756 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 6757 left = rack->r_ctl.rc_timer_exp - cts; 6758 } 6759 rack->r_ctl.rc_timer_exp = 0; 6760 rack->r_ctl.rc_hpts_flags = 0; 6761 us_cts = tcp_get_usecs(&tv); 6762 /* Now early/late accounting */ 6763 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 6764 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 6765 /* 6766 * We have a early carry over set, 6767 * we can always add more time so we 6768 * can always make this compensation. 6769 * 6770 * Note if ack's are allowed to wake us do not 6771 * penalize the next timer for being awoke 6772 * by an ack aka the rc_agg_early (non-paced mode). 6773 */ 6774 slot += rack->r_ctl.rc_agg_early; 6775 rack->r_early = 0; 6776 rack->r_ctl.rc_agg_early = 0; 6777 } 6778 if (rack->r_late) { 6779 /* 6780 * This is harder, we can 6781 * compensate some but it 6782 * really depends on what 6783 * the current pacing time is. 6784 */ 6785 if (rack->r_ctl.rc_agg_delayed >= slot) { 6786 /* 6787 * We can't compensate for it all. 6788 * And we have to have some time 6789 * on the clock. We always have a min 6790 * 10 slots (10 x 10 i.e. 100 usecs). 6791 */ 6792 if (slot <= HPTS_TICKS_PER_SLOT) { 6793 /* We gain delay */ 6794 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 6795 slot = HPTS_TICKS_PER_SLOT; 6796 } else { 6797 /* We take off some */ 6798 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 6799 slot = HPTS_TICKS_PER_SLOT; 6800 } 6801 } else { 6802 slot -= rack->r_ctl.rc_agg_delayed; 6803 rack->r_ctl.rc_agg_delayed = 0; 6804 /* Make sure we have 100 useconds at minimum */ 6805 if (slot < HPTS_TICKS_PER_SLOT) { 6806 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 6807 slot = HPTS_TICKS_PER_SLOT; 6808 } 6809 if (rack->r_ctl.rc_agg_delayed == 0) 6810 rack->r_late = 0; 6811 } 6812 } 6813 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 6814 #ifdef TCP_SAD_DETECTION 6815 if (rack->sack_attack_disable && 6816 (rack->r_ctl.ack_during_sd > 0) && 6817 (slot < tcp_sad_pacing_interval)) { 6818 /* 6819 * We have a potential attacker on 6820 * the line. We have possibly some 6821 * (or now) pacing time set. We want to 6822 * slow down the processing of sacks by some 6823 * amount (if it is an attacker). Set the default 6824 * slot for attackers in place (unless the original 6825 * interval is longer). Its stored in 6826 * micro-seconds, so lets convert to msecs. 6827 */ 6828 slot = tcp_sad_pacing_interval; 6829 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 6830 rack->r_ctl.ack_during_sd = 0; 6831 } 6832 #endif 6833 if (tp->t_flags & TF_DELACK) { 6834 delayed_ack = TICKS_2_USEC(tcp_delacktime); 6835 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 6836 } 6837 if (delayed_ack && ((hpts_timeout == 0) || 6838 (delayed_ack < hpts_timeout))) 6839 hpts_timeout = delayed_ack; 6840 else 6841 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6842 /* 6843 * If no timers are going to run and we will fall off the hptsi 6844 * wheel, we resort to a keep-alive timer if its configured. 6845 */ 6846 if ((hpts_timeout == 0) && 6847 (slot == 0)) { 6848 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6849 (tp->t_state <= TCPS_CLOSING)) { 6850 /* 6851 * Ok we have no timer (persists, rack, tlp, rxt or 6852 * del-ack), we don't have segments being paced. So 6853 * all that is left is the keepalive timer. 6854 */ 6855 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6856 /* Get the established keep-alive time */ 6857 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 6858 } else { 6859 /* 6860 * Get the initial setup keep-alive time, 6861 * note that this is probably not going to 6862 * happen, since rack will be running a rxt timer 6863 * if a SYN of some sort is outstanding. It is 6864 * actually handled in rack_timeout_rxt(). 6865 */ 6866 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 6867 } 6868 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 6869 if (rack->in_probe_rtt) { 6870 /* 6871 * We want to instead not wake up a long time from 6872 * now but to wake up about the time we would 6873 * exit probe-rtt and initiate a keep-alive ack. 6874 * This will get us out of probe-rtt and update 6875 * our min-rtt. 6876 */ 6877 hpts_timeout = rack_min_probertt_hold; 6878 } 6879 } 6880 } 6881 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 6882 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 6883 /* 6884 * RACK, TLP, persists and RXT timers all are restartable 6885 * based on actions input .. i.e we received a packet (ack 6886 * or sack) and that changes things (rw, or snd_una etc). 6887 * Thus we can restart them with a new value. For 6888 * keep-alive, delayed_ack we keep track of what was left 6889 * and restart the timer with a smaller value. 6890 */ 6891 if (left < hpts_timeout) 6892 hpts_timeout = left; 6893 } 6894 if (hpts_timeout) { 6895 /* 6896 * Hack alert for now we can't time-out over 2,147,483 6897 * seconds (a bit more than 596 hours), which is probably ok 6898 * :). 6899 */ 6900 if (hpts_timeout > 0x7ffffffe) 6901 hpts_timeout = 0x7ffffffe; 6902 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 6903 } 6904 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 6905 if ((rack->gp_ready == 0) && 6906 (rack->use_fixed_rate == 0) && 6907 (hpts_timeout < slot) && 6908 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 6909 /* 6910 * We have no good estimate yet for the 6911 * old clunky burst mitigation or the 6912 * real pacing. And the tlp or rxt is smaller 6913 * than the pacing calculation. Lets not 6914 * pace that long since we know the calculation 6915 * so far is not accurate. 6916 */ 6917 slot = hpts_timeout; 6918 } 6919 /** 6920 * Turn off all the flags for queuing by default. The 6921 * flags have important meanings to what happens when 6922 * LRO interacts with the transport. Most likely (by default now) 6923 * mbuf_queueing and ack compression are on. So the transport 6924 * has a couple of flags that control what happens (if those 6925 * are not on then these flags won't have any effect since it 6926 * won't go through the queuing LRO path). 6927 * 6928 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 6929 * pacing output, so don't disturb. But 6930 * it also means LRO can wake me if there 6931 * is a SACK arrival. 6932 * 6933 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 6934 * with the above flag (QUEUE_READY) and 6935 * when present it says don't even wake me 6936 * if a SACK arrives. 6937 * 6938 * The idea behind these flags is that if we are pacing we 6939 * set the MBUF_QUEUE_READY and only get woken up if 6940 * a SACK arrives (which could change things) or if 6941 * our pacing timer expires. If, however, we have a rack 6942 * timer running, then we don't even want a sack to wake 6943 * us since the rack timer has to expire before we can send. 6944 * 6945 * Other cases should usually have none of the flags set 6946 * so LRO can call into us. 6947 */ 6948 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 6949 if (slot) { 6950 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 6951 rack->r_ctl.rc_last_output_to = us_cts + slot; 6952 /* 6953 * A pacing timer (slot) is being set, in 6954 * such a case we cannot send (we are blocked by 6955 * the timer). So lets tell LRO that it should not 6956 * wake us unless there is a SACK. Note this only 6957 * will be effective if mbuf queueing is on or 6958 * compressed acks are being processed. 6959 */ 6960 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 6961 /* 6962 * But wait if we have a Rack timer running 6963 * even a SACK should not disturb us (with 6964 * the exception of r_rr_config 3). 6965 */ 6966 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 6967 (IN_RECOVERY(tp->t_flags))) { 6968 if (rack->r_rr_config != 3) 6969 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6970 else if (rack->rc_pace_dnd) { 6971 /* 6972 * When DND is on, we only let a sack 6973 * interrupt us if we are not in recovery. 6974 * 6975 * If DND is off, then we never hit here 6976 * and let all sacks wake us up. 6977 * 6978 */ 6979 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6980 } 6981 } 6982 /* For sack attackers we want to ignore sack */ 6983 if (rack->sack_attack_disable == 1) { 6984 tp->t_flags2 |= (TF2_DONT_SACK_QUEUE | 6985 TF2_MBUF_QUEUE_READY); 6986 } else if (rack->rc_ack_can_sendout_data) { 6987 /* 6988 * Ahh but wait, this is that special case 6989 * where the pacing timer can be disturbed 6990 * backout the changes (used for non-paced 6991 * burst limiting). 6992 */ 6993 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 6994 TF2_MBUF_QUEUE_READY); 6995 } 6996 if ((rack->use_rack_rr) && 6997 (rack->r_rr_config < 2) && 6998 ((hpts_timeout) && (hpts_timeout < slot))) { 6999 /* 7000 * Arrange for the hpts to kick back in after the 7001 * t-o if the t-o does not cause a send. 7002 */ 7003 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7004 __LINE__, &diag); 7005 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7006 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7007 } else { 7008 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 7009 __LINE__, &diag); 7010 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7011 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 7012 } 7013 } else if (hpts_timeout) { 7014 /* 7015 * With respect to t_flags2(?) here, lets let any new acks wake 7016 * us up here. Since we are not pacing (no pacing timer), output 7017 * can happen so we should let it. If its a Rack timer, then any inbound 7018 * packet probably won't change the sending (we will be blocked) 7019 * but it may change the prr stats so letting it in (the set defaults 7020 * at the start of this block) are good enough. 7021 */ 7022 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7023 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7024 __LINE__, &diag); 7025 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7026 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7027 } else { 7028 /* No timer starting */ 7029 #ifdef INVARIANTS 7030 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 7031 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 7032 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 7033 } 7034 #endif 7035 } 7036 rack->rc_tmr_stopped = 0; 7037 if (slot) 7038 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 7039 } 7040 7041 /* 7042 * RACK Timer, here we simply do logging and house keeping. 7043 * the normal rack_output() function will call the 7044 * appropriate thing to check if we need to do a RACK retransmit. 7045 * We return 1, saying don't proceed with rack_output only 7046 * when all timers have been stopped (destroyed PCB?). 7047 */ 7048 static int 7049 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7050 { 7051 /* 7052 * This timer simply provides an internal trigger to send out data. 7053 * The check_recovery_mode call will see if there are needed 7054 * retransmissions, if so we will enter fast-recovery. The output 7055 * call may or may not do the same thing depending on sysctl 7056 * settings. 7057 */ 7058 struct rack_sendmap *rsm; 7059 7060 counter_u64_add(rack_to_tot, 1); 7061 if (rack->r_state && (rack->r_state != tp->t_state)) 7062 rack_set_state(tp, rack); 7063 rack->rc_on_min_to = 0; 7064 rsm = rack_check_recovery_mode(tp, cts); 7065 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 7066 if (rsm) { 7067 rack->r_ctl.rc_resend = rsm; 7068 rack->r_timer_override = 1; 7069 if (rack->use_rack_rr) { 7070 /* 7071 * Don't accumulate extra pacing delay 7072 * we are allowing the rack timer to 7073 * over-ride pacing i.e. rrr takes precedence 7074 * if the pacing interval is longer than the rrr 7075 * time (in other words we get the min pacing 7076 * time versus rrr pacing time). 7077 */ 7078 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7079 } 7080 } 7081 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 7082 if (rsm == NULL) { 7083 /* restart a timer and return 1 */ 7084 rack_start_hpts_timer(rack, tp, cts, 7085 0, 0, 0); 7086 return (1); 7087 } 7088 return (0); 7089 } 7090 7091 7092 7093 static void 7094 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 7095 { 7096 7097 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 7098 /* 7099 * The trailing space changed, mbufs can grow 7100 * at the tail but they can't shrink from 7101 * it, KASSERT that. Adjust the orig_m_len to 7102 * compensate for this change. 7103 */ 7104 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 7105 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 7106 rsm->m, 7107 rsm, 7108 (intmax_t)M_TRAILINGROOM(rsm->m), 7109 rsm->orig_t_space, 7110 rsm->orig_m_len, 7111 rsm->m->m_len)); 7112 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 7113 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7114 } 7115 if (rsm->m->m_len < rsm->orig_m_len) { 7116 /* 7117 * Mbuf shrank, trimmed off the top by an ack, our 7118 * offset changes. 7119 */ 7120 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 7121 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 7122 rsm->m, rsm->m->m_len, 7123 rsm, rsm->orig_m_len, 7124 rsm->soff)); 7125 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 7126 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 7127 else 7128 rsm->soff = 0; 7129 rsm->orig_m_len = rsm->m->m_len; 7130 #ifdef INVARIANTS 7131 } else if (rsm->m->m_len > rsm->orig_m_len) { 7132 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 7133 rsm, rsm->m); 7134 #endif 7135 } 7136 } 7137 7138 static void 7139 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 7140 { 7141 struct mbuf *m; 7142 uint32_t soff; 7143 7144 if (src_rsm->m && 7145 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 7146 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 7147 /* Fix up the orig_m_len and possibly the mbuf offset */ 7148 rack_adjust_orig_mlen(src_rsm); 7149 } 7150 m = src_rsm->m; 7151 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 7152 while (soff >= m->m_len) { 7153 /* Move out past this mbuf */ 7154 soff -= m->m_len; 7155 m = m->m_next; 7156 KASSERT((m != NULL), 7157 ("rsm:%p nrsm:%p hit at soff:%u null m", 7158 src_rsm, rsm, soff)); 7159 if (m == NULL) { 7160 /* This should *not* happen which is why there is a kassert */ 7161 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7162 (src_rsm->r_start - rack->rc_tp->snd_una), 7163 &src_rsm->soff); 7164 src_rsm->orig_m_len = src_rsm->m->m_len; 7165 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7166 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7167 (rsm->r_start - rack->rc_tp->snd_una), 7168 &rsm->soff); 7169 rsm->orig_m_len = rsm->m->m_len; 7170 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7171 return; 7172 } 7173 } 7174 rsm->m = m; 7175 rsm->soff = soff; 7176 rsm->orig_m_len = m->m_len; 7177 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7178 } 7179 7180 static __inline void 7181 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7182 struct rack_sendmap *rsm, uint32_t start) 7183 { 7184 int idx; 7185 7186 nrsm->r_start = start; 7187 nrsm->r_end = rsm->r_end; 7188 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7189 nrsm->r_flags = rsm->r_flags; 7190 nrsm->r_dupack = rsm->r_dupack; 7191 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7192 nrsm->r_rtr_bytes = 0; 7193 nrsm->r_fas = rsm->r_fas; 7194 nrsm->r_bas = rsm->r_bas; 7195 rsm->r_end = nrsm->r_start; 7196 nrsm->r_just_ret = rsm->r_just_ret; 7197 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7198 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7199 } 7200 /* Now if we have SYN flag we keep it on the left edge */ 7201 if (nrsm->r_flags & RACK_HAS_SYN) 7202 nrsm->r_flags &= ~RACK_HAS_SYN; 7203 /* Now if we have a FIN flag we keep it on the right edge */ 7204 if (rsm->r_flags & RACK_HAS_FIN) 7205 rsm->r_flags &= ~RACK_HAS_FIN; 7206 /* Push bit must go to the right edge as well */ 7207 if (rsm->r_flags & RACK_HAD_PUSH) 7208 rsm->r_flags &= ~RACK_HAD_PUSH; 7209 /* Clone over the state of the hw_tls flag */ 7210 nrsm->r_hw_tls = rsm->r_hw_tls; 7211 /* 7212 * Now we need to find nrsm's new location in the mbuf chain 7213 * we basically calculate a new offset, which is soff + 7214 * how much is left in original rsm. Then we walk out the mbuf 7215 * chain to find the righ position, it may be the same mbuf 7216 * or maybe not. 7217 */ 7218 KASSERT(((rsm->m != NULL) || 7219 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7220 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7221 if (rsm->m) 7222 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7223 } 7224 7225 static struct rack_sendmap * 7226 rack_merge_rsm(struct tcp_rack *rack, 7227 struct rack_sendmap *l_rsm, 7228 struct rack_sendmap *r_rsm) 7229 { 7230 /* 7231 * We are merging two ack'd RSM's, 7232 * the l_rsm is on the left (lower seq 7233 * values) and the r_rsm is on the right 7234 * (higher seq value). The simplest way 7235 * to merge these is to move the right 7236 * one into the left. I don't think there 7237 * is any reason we need to try to find 7238 * the oldest (or last oldest retransmitted). 7239 */ 7240 rack_log_map_chg(rack->rc_tp, rack, NULL, 7241 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7242 l_rsm->r_end = r_rsm->r_end; 7243 if (l_rsm->r_dupack < r_rsm->r_dupack) 7244 l_rsm->r_dupack = r_rsm->r_dupack; 7245 if (r_rsm->r_rtr_bytes) 7246 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7247 if (r_rsm->r_in_tmap) { 7248 /* This really should not happen */ 7249 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7250 r_rsm->r_in_tmap = 0; 7251 } 7252 7253 /* Now the flags */ 7254 if (r_rsm->r_flags & RACK_HAS_FIN) 7255 l_rsm->r_flags |= RACK_HAS_FIN; 7256 if (r_rsm->r_flags & RACK_TLP) 7257 l_rsm->r_flags |= RACK_TLP; 7258 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7259 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7260 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7261 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7262 /* 7263 * If both are app-limited then let the 7264 * free lower the count. If right is app 7265 * limited and left is not, transfer. 7266 */ 7267 l_rsm->r_flags |= RACK_APP_LIMITED; 7268 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7269 if (r_rsm == rack->r_ctl.rc_first_appl) 7270 rack->r_ctl.rc_first_appl = l_rsm; 7271 } 7272 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7273 /* 7274 * We keep the largest value, which is the newest 7275 * send. We do this in case a segment that is 7276 * joined together and not part of a GP estimate 7277 * later gets expanded into the GP estimate. 7278 * 7279 * We prohibit the merging of unlike kinds i.e. 7280 * all pieces that are in the GP estimate can be 7281 * merged and all pieces that are not in a GP estimate 7282 * can be merged, but not disimilar pieces. Combine 7283 * this with taking the highest here and we should 7284 * be ok unless of course the client reneges. Then 7285 * all bets are off. 7286 */ 7287 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7288 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7289 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7290 } 7291 /* 7292 * When merging two RSM's we also need to consider the ack time and keep 7293 * newest. If the ack gets merged into a measurement then that is the 7294 * one we will want to be using. 7295 */ 7296 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7297 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7298 7299 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7300 /* Transfer the split limit to the map we free */ 7301 r_rsm->r_limit_type = l_rsm->r_limit_type; 7302 l_rsm->r_limit_type = 0; 7303 } 7304 rack_free(rack, r_rsm); 7305 l_rsm->r_flags |= RACK_MERGED; 7306 return (l_rsm); 7307 } 7308 7309 /* 7310 * TLP Timer, here we simply setup what segment we want to 7311 * have the TLP expire on, the normal rack_output() will then 7312 * send it out. 7313 * 7314 * We return 1, saying don't proceed with rack_output only 7315 * when all timers have been stopped (destroyed PCB?). 7316 */ 7317 static int 7318 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7319 { 7320 /* 7321 * Tail Loss Probe. 7322 */ 7323 struct rack_sendmap *rsm = NULL; 7324 int insret __diagused; 7325 struct socket *so = tptosocket(tp); 7326 uint32_t amm; 7327 uint32_t out, avail; 7328 int collapsed_win = 0; 7329 7330 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7331 /* Its not time yet */ 7332 return (0); 7333 } 7334 if (ctf_progress_timeout_check(tp, true)) { 7335 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7336 return (-ETIMEDOUT); /* tcp_drop() */ 7337 } 7338 /* 7339 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7340 * need to figure out how to force a full MSS segment out. 7341 */ 7342 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7343 rack->r_ctl.retran_during_recovery = 0; 7344 rack->r_ctl.dsack_byte_cnt = 0; 7345 counter_u64_add(rack_tlp_tot, 1); 7346 if (rack->r_state && (rack->r_state != tp->t_state)) 7347 rack_set_state(tp, rack); 7348 avail = sbavail(&so->so_snd); 7349 out = tp->snd_max - tp->snd_una; 7350 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7351 /* special case, we need a retransmission */ 7352 collapsed_win = 1; 7353 goto need_retran; 7354 } 7355 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7356 rack->r_ctl.dsack_persist--; 7357 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7358 rack->r_ctl.num_dsack = 0; 7359 } 7360 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7361 } 7362 if ((tp->t_flags & TF_GPUTINPROG) && 7363 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7364 /* 7365 * If this is the second in a row 7366 * TLP and we are doing a measurement 7367 * its time to abandon the measurement. 7368 * Something is likely broken on 7369 * the clients network and measuring a 7370 * broken network does us no good. 7371 */ 7372 tp->t_flags &= ~TF_GPUTINPROG; 7373 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7374 rack->r_ctl.rc_gp_srtt /*flex1*/, 7375 tp->gput_seq, 7376 0, 0, 18, __LINE__, NULL, 0); 7377 } 7378 /* 7379 * Check our send oldest always settings, and if 7380 * there is an oldest to send jump to the need_retran. 7381 */ 7382 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7383 goto need_retran; 7384 7385 if (avail > out) { 7386 /* New data is available */ 7387 amm = avail - out; 7388 if (amm > ctf_fixed_maxseg(tp)) { 7389 amm = ctf_fixed_maxseg(tp); 7390 if ((amm + out) > tp->snd_wnd) { 7391 /* We are rwnd limited */ 7392 goto need_retran; 7393 } 7394 } else if (amm < ctf_fixed_maxseg(tp)) { 7395 /* not enough to fill a MTU */ 7396 goto need_retran; 7397 } 7398 if (IN_FASTRECOVERY(tp->t_flags)) { 7399 /* Unlikely */ 7400 if (rack->rack_no_prr == 0) { 7401 if (out + amm <= tp->snd_wnd) { 7402 rack->r_ctl.rc_prr_sndcnt = amm; 7403 rack->r_ctl.rc_tlp_new_data = amm; 7404 rack_log_to_prr(rack, 4, 0, __LINE__); 7405 } 7406 } else 7407 goto need_retran; 7408 } else { 7409 /* Set the send-new override */ 7410 if (out + amm <= tp->snd_wnd) 7411 rack->r_ctl.rc_tlp_new_data = amm; 7412 else 7413 goto need_retran; 7414 } 7415 rack->r_ctl.rc_tlpsend = NULL; 7416 counter_u64_add(rack_tlp_newdata, 1); 7417 goto send; 7418 } 7419 need_retran: 7420 /* 7421 * Ok we need to arrange the last un-acked segment to be re-sent, or 7422 * optionally the first un-acked segment. 7423 */ 7424 if (collapsed_win == 0) { 7425 if (rack_always_send_oldest) 7426 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7427 else { 7428 rsm = tqhash_max(rack->r_ctl.tqh); 7429 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7430 rsm = rack_find_high_nonack(rack, rsm); 7431 } 7432 } 7433 if (rsm == NULL) { 7434 #ifdef TCP_BLACKBOX 7435 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7436 #endif 7437 goto out; 7438 } 7439 } else { 7440 /* 7441 * We had a collapsed window, lets find 7442 * the point before the collapse. 7443 */ 7444 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7445 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7446 else { 7447 rsm = tqhash_min(rack->r_ctl.tqh); 7448 } 7449 if (rsm == NULL) { 7450 /* Huh */ 7451 goto out; 7452 } 7453 } 7454 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7455 /* 7456 * We need to split this the last segment in two. 7457 */ 7458 struct rack_sendmap *nrsm; 7459 7460 nrsm = rack_alloc_full_limit(rack); 7461 if (nrsm == NULL) { 7462 /* 7463 * No memory to split, we will just exit and punt 7464 * off to the RXT timer. 7465 */ 7466 goto out; 7467 } 7468 rack_clone_rsm(rack, nrsm, rsm, 7469 (rsm->r_end - ctf_fixed_maxseg(tp))); 7470 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7471 #ifndef INVARIANTS 7472 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7473 #else 7474 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7475 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 7476 nrsm, insret, rack, rsm); 7477 } 7478 #endif 7479 if (rsm->r_in_tmap) { 7480 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7481 nrsm->r_in_tmap = 1; 7482 } 7483 rsm = nrsm; 7484 } 7485 rack->r_ctl.rc_tlpsend = rsm; 7486 send: 7487 /* Make sure output path knows we are doing a TLP */ 7488 *doing_tlp = 1; 7489 rack->r_timer_override = 1; 7490 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7491 return (0); 7492 out: 7493 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7494 return (0); 7495 } 7496 7497 /* 7498 * Delayed ack Timer, here we simply need to setup the 7499 * ACK_NOW flag and remove the DELACK flag. From there 7500 * the output routine will send the ack out. 7501 * 7502 * We only return 1, saying don't proceed, if all timers 7503 * are stopped (destroyed PCB?). 7504 */ 7505 static int 7506 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7507 { 7508 7509 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7510 tp->t_flags &= ~TF_DELACK; 7511 tp->t_flags |= TF_ACKNOW; 7512 KMOD_TCPSTAT_INC(tcps_delack); 7513 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7514 return (0); 7515 } 7516 7517 /* 7518 * Persists timer, here we simply send the 7519 * same thing as a keepalive will. 7520 * the one byte send. 7521 * 7522 * We only return 1, saying don't proceed, if all timers 7523 * are stopped (destroyed PCB?). 7524 */ 7525 static int 7526 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7527 { 7528 struct tcptemp *t_template; 7529 int32_t retval = 1; 7530 7531 if (rack->rc_in_persist == 0) 7532 return (0); 7533 if (ctf_progress_timeout_check(tp, false)) { 7534 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7535 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7536 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7537 return (-ETIMEDOUT); /* tcp_drop() */ 7538 } 7539 /* 7540 * Persistence timer into zero window. Force a byte to be output, if 7541 * possible. 7542 */ 7543 KMOD_TCPSTAT_INC(tcps_persisttimeo); 7544 /* 7545 * Hack: if the peer is dead/unreachable, we do not time out if the 7546 * window is closed. After a full backoff, drop the connection if 7547 * the idle time (no responses to probes) reaches the maximum 7548 * backoff that we would use if retransmitting. 7549 */ 7550 if (tp->t_rxtshift >= V_tcp_retries && 7551 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 7552 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 7553 KMOD_TCPSTAT_INC(tcps_persistdrop); 7554 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7555 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7556 retval = -ETIMEDOUT; /* tcp_drop() */ 7557 goto out; 7558 } 7559 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 7560 tp->snd_una == tp->snd_max) 7561 rack_exit_persist(tp, rack, cts); 7562 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 7563 /* 7564 * If the user has closed the socket then drop a persisting 7565 * connection after a much reduced timeout. 7566 */ 7567 if (tp->t_state > TCPS_CLOSE_WAIT && 7568 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 7569 KMOD_TCPSTAT_INC(tcps_persistdrop); 7570 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7571 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7572 retval = -ETIMEDOUT; /* tcp_drop() */ 7573 goto out; 7574 } 7575 t_template = tcpip_maketemplate(rack->rc_inp); 7576 if (t_template) { 7577 /* only set it if we were answered */ 7578 if (rack->forced_ack == 0) { 7579 rack->forced_ack = 1; 7580 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7581 } else { 7582 rack->probe_not_answered = 1; 7583 counter_u64_add(rack_persists_loss, 1); 7584 rack->r_ctl.persist_lost_ends++; 7585 } 7586 counter_u64_add(rack_persists_sends, 1); 7587 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 7588 tcp_respond(tp, t_template->tt_ipgen, 7589 &t_template->tt_t, (struct mbuf *)NULL, 7590 tp->rcv_nxt, tp->snd_una - 1, 0); 7591 /* This sends an ack */ 7592 if (tp->t_flags & TF_DELACK) 7593 tp->t_flags &= ~TF_DELACK; 7594 free(t_template, M_TEMP); 7595 } 7596 if (tp->t_rxtshift < V_tcp_retries) 7597 tp->t_rxtshift++; 7598 out: 7599 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 7600 rack_start_hpts_timer(rack, tp, cts, 7601 0, 0, 0); 7602 return (retval); 7603 } 7604 7605 /* 7606 * If a keepalive goes off, we had no other timers 7607 * happening. We always return 1 here since this 7608 * routine either drops the connection or sends 7609 * out a segment with respond. 7610 */ 7611 static int 7612 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7613 { 7614 struct tcptemp *t_template; 7615 struct inpcb *inp = tptoinpcb(tp); 7616 7617 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 7618 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 7619 /* 7620 * Keep-alive timer went off; send something or drop connection if 7621 * idle for too long. 7622 */ 7623 KMOD_TCPSTAT_INC(tcps_keeptimeo); 7624 if (tp->t_state < TCPS_ESTABLISHED) 7625 goto dropit; 7626 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7627 tp->t_state <= TCPS_CLOSING) { 7628 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 7629 goto dropit; 7630 /* 7631 * Send a packet designed to force a response if the peer is 7632 * up and reachable: either an ACK if the connection is 7633 * still alive, or an RST if the peer has closed the 7634 * connection due to timeout or reboot. Using sequence 7635 * number tp->snd_una-1 causes the transmitted zero-length 7636 * segment to lie outside the receive window; by the 7637 * protocol spec, this requires the correspondent TCP to 7638 * respond. 7639 */ 7640 KMOD_TCPSTAT_INC(tcps_keepprobe); 7641 t_template = tcpip_maketemplate(inp); 7642 if (t_template) { 7643 if (rack->forced_ack == 0) { 7644 rack->forced_ack = 1; 7645 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7646 } else { 7647 rack->probe_not_answered = 1; 7648 } 7649 tcp_respond(tp, t_template->tt_ipgen, 7650 &t_template->tt_t, (struct mbuf *)NULL, 7651 tp->rcv_nxt, tp->snd_una - 1, 0); 7652 free(t_template, M_TEMP); 7653 } 7654 } 7655 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7656 return (1); 7657 dropit: 7658 KMOD_TCPSTAT_INC(tcps_keepdrops); 7659 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7660 return (-ETIMEDOUT); /* tcp_drop() */ 7661 } 7662 7663 /* 7664 * Retransmit helper function, clear up all the ack 7665 * flags and take care of important book keeping. 7666 */ 7667 static void 7668 rack_remxt_tmr(struct tcpcb *tp) 7669 { 7670 /* 7671 * The retransmit timer went off, all sack'd blocks must be 7672 * un-acked. 7673 */ 7674 struct rack_sendmap *rsm, *trsm = NULL; 7675 struct tcp_rack *rack; 7676 7677 rack = (struct tcp_rack *)tp->t_fb_ptr; 7678 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 7679 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 7680 if (rack->r_state && (rack->r_state != tp->t_state)) 7681 rack_set_state(tp, rack); 7682 /* 7683 * Ideally we would like to be able to 7684 * mark SACK-PASS on anything not acked here. 7685 * 7686 * However, if we do that we would burst out 7687 * all that data 1ms apart. This would be unwise, 7688 * so for now we will just let the normal rxt timer 7689 * and tlp timer take care of it. 7690 * 7691 * Also we really need to stick them back in sequence 7692 * order. This way we send in the proper order and any 7693 * sacks that come floating in will "re-ack" the data. 7694 * To do this we zap the tmap with an INIT and then 7695 * walk through and place every rsm in the tail queue 7696 * hash table back in its seq ordered place. 7697 */ 7698 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7699 7700 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 7701 rsm->r_dupack = 0; 7702 if (rack_verbose_logging) 7703 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7704 /* We must re-add it back to the tlist */ 7705 if (trsm == NULL) { 7706 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7707 } else { 7708 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 7709 } 7710 rsm->r_in_tmap = 1; 7711 trsm = rsm; 7712 if (rsm->r_flags & RACK_ACKED) 7713 rsm->r_flags |= RACK_WAS_ACKED; 7714 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 7715 rsm->r_flags |= RACK_MUST_RXT; 7716 } 7717 /* Clear the count (we just un-acked them) */ 7718 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 7719 rack->r_ctl.rc_sacked = 0; 7720 rack->r_ctl.rc_sacklast = NULL; 7721 rack->r_ctl.rc_agg_delayed = 0; 7722 rack->r_early = 0; 7723 rack->r_ctl.rc_agg_early = 0; 7724 rack->r_late = 0; 7725 /* Clear the tlp rtx mark */ 7726 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7727 if (rack->r_ctl.rc_resend != NULL) 7728 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7729 rack->r_ctl.rc_prr_sndcnt = 0; 7730 rack_log_to_prr(rack, 6, 0, __LINE__); 7731 rack->r_timer_override = 1; 7732 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 7733 #ifdef TCP_SAD_DETECTION 7734 || (rack->sack_attack_disable != 0) 7735 #endif 7736 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 7737 /* 7738 * For non-sack customers new data 7739 * needs to go out as retransmits until 7740 * we retransmit up to snd_max. 7741 */ 7742 rack->r_must_retran = 1; 7743 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 7744 rack->r_ctl.rc_sacked); 7745 } 7746 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 7747 } 7748 7749 static void 7750 rack_convert_rtts(struct tcpcb *tp) 7751 { 7752 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 7753 tp->t_rxtcur = RACK_REXMTVAL(tp); 7754 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7755 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 7756 } 7757 if (tp->t_rxtcur > rack_rto_max) { 7758 tp->t_rxtcur = rack_rto_max; 7759 } 7760 } 7761 7762 static void 7763 rack_cc_conn_init(struct tcpcb *tp) 7764 { 7765 struct tcp_rack *rack; 7766 uint32_t srtt; 7767 7768 rack = (struct tcp_rack *)tp->t_fb_ptr; 7769 srtt = tp->t_srtt; 7770 cc_conn_init(tp); 7771 /* 7772 * Now convert to rack's internal format, 7773 * if required. 7774 */ 7775 if ((srtt == 0) && (tp->t_srtt != 0)) 7776 rack_convert_rtts(tp); 7777 /* 7778 * We want a chance to stay in slowstart as 7779 * we create a connection. TCP spec says that 7780 * initially ssthresh is infinite. For our 7781 * purposes that is the snd_wnd. 7782 */ 7783 if (tp->snd_ssthresh < tp->snd_wnd) { 7784 tp->snd_ssthresh = tp->snd_wnd; 7785 } 7786 /* 7787 * We also want to assure a IW worth of 7788 * data can get inflight. 7789 */ 7790 if (rc_init_window(rack) < tp->snd_cwnd) 7791 tp->snd_cwnd = rc_init_window(rack); 7792 } 7793 7794 /* 7795 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 7796 * we will setup to retransmit the lowest seq number outstanding. 7797 */ 7798 static int 7799 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7800 { 7801 struct inpcb *inp = tptoinpcb(tp); 7802 int32_t rexmt; 7803 int32_t retval = 0; 7804 bool isipv6; 7805 7806 if ((tp->t_flags & TF_GPUTINPROG) && 7807 (tp->t_rxtshift)) { 7808 /* 7809 * We have had a second timeout 7810 * measurements on successive rxt's are not profitable. 7811 * It is unlikely to be of any use (the network is 7812 * broken or the client went away). 7813 */ 7814 tp->t_flags &= ~TF_GPUTINPROG; 7815 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7816 rack->r_ctl.rc_gp_srtt /*flex1*/, 7817 tp->gput_seq, 7818 0, 0, 18, __LINE__, NULL, 0); 7819 } 7820 if (ctf_progress_timeout_check(tp, false)) { 7821 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7822 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7823 return (-ETIMEDOUT); /* tcp_drop() */ 7824 } 7825 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 7826 rack->r_ctl.retran_during_recovery = 0; 7827 rack->rc_ack_required = 1; 7828 rack->r_ctl.dsack_byte_cnt = 0; 7829 if (IN_FASTRECOVERY(tp->t_flags)) 7830 tp->t_flags |= TF_WASFRECOVERY; 7831 else 7832 tp->t_flags &= ~TF_WASFRECOVERY; 7833 if (IN_CONGRECOVERY(tp->t_flags)) 7834 tp->t_flags |= TF_WASCRECOVERY; 7835 else 7836 tp->t_flags &= ~TF_WASCRECOVERY; 7837 if (TCPS_HAVEESTABLISHED(tp->t_state) && 7838 (tp->snd_una == tp->snd_max)) { 7839 /* Nothing outstanding .. nothing to do */ 7840 return (0); 7841 } 7842 if (rack->r_ctl.dsack_persist) { 7843 rack->r_ctl.dsack_persist--; 7844 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7845 rack->r_ctl.num_dsack = 0; 7846 } 7847 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7848 } 7849 /* 7850 * Rack can only run one timer at a time, so we cannot 7851 * run a KEEPINIT (gating SYN sending) and a retransmit 7852 * timer for the SYN. So if we are in a front state and 7853 * have a KEEPINIT timer we need to check the first transmit 7854 * against now to see if we have exceeded the KEEPINIT time 7855 * (if one is set). 7856 */ 7857 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 7858 (TP_KEEPINIT(tp) != 0)) { 7859 struct rack_sendmap *rsm; 7860 7861 rsm = tqhash_min(rack->r_ctl.tqh); 7862 if (rsm) { 7863 /* Ok we have something outstanding to test keepinit with */ 7864 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 7865 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 7866 /* We have exceeded the KEEPINIT time */ 7867 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7868 goto drop_it; 7869 } 7870 } 7871 } 7872 /* 7873 * Retransmission timer went off. Message has not been acked within 7874 * retransmit interval. Back off to a longer retransmit interval 7875 * and retransmit one segment. 7876 */ 7877 rack_remxt_tmr(tp); 7878 if ((rack->r_ctl.rc_resend == NULL) || 7879 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 7880 /* 7881 * If the rwnd collapsed on 7882 * the one we are retransmitting 7883 * it does not count against the 7884 * rxt count. 7885 */ 7886 tp->t_rxtshift++; 7887 } 7888 if (tp->t_rxtshift > V_tcp_retries) { 7889 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7890 drop_it: 7891 tp->t_rxtshift = V_tcp_retries; 7892 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 7893 /* XXXGL: previously t_softerror was casted to uint16_t */ 7894 MPASS(tp->t_softerror >= 0); 7895 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 7896 goto out; /* tcp_drop() */ 7897 } 7898 if (tp->t_state == TCPS_SYN_SENT) { 7899 /* 7900 * If the SYN was retransmitted, indicate CWND to be limited 7901 * to 1 segment in cc_conn_init(). 7902 */ 7903 tp->snd_cwnd = 1; 7904 } else if (tp->t_rxtshift == 1) { 7905 /* 7906 * first retransmit; record ssthresh and cwnd so they can be 7907 * recovered if this turns out to be a "bad" retransmit. A 7908 * retransmit is considered "bad" if an ACK for this segment 7909 * is received within RTT/2 interval; the assumption here is 7910 * that the ACK was already in flight. See "On Estimating 7911 * End-to-End Network Path Properties" by Allman and Paxson 7912 * for more details. 7913 */ 7914 tp->snd_cwnd_prev = tp->snd_cwnd; 7915 tp->snd_ssthresh_prev = tp->snd_ssthresh; 7916 tp->snd_recover_prev = tp->snd_recover; 7917 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 7918 tp->t_flags |= TF_PREVVALID; 7919 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 7920 tp->t_flags &= ~TF_PREVVALID; 7921 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 7922 if ((tp->t_state == TCPS_SYN_SENT) || 7923 (tp->t_state == TCPS_SYN_RECEIVED)) 7924 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 7925 else 7926 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 7927 7928 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 7929 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 7930 /* 7931 * We enter the path for PLMTUD if connection is established or, if 7932 * connection is FIN_WAIT_1 status, reason for the last is that if 7933 * amount of data we send is very small, we could send it in couple 7934 * of packets and process straight to FIN. In that case we won't 7935 * catch ESTABLISHED state. 7936 */ 7937 #ifdef INET6 7938 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 7939 #else 7940 isipv6 = false; 7941 #endif 7942 if (((V_tcp_pmtud_blackhole_detect == 1) || 7943 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 7944 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 7945 ((tp->t_state == TCPS_ESTABLISHED) || 7946 (tp->t_state == TCPS_FIN_WAIT_1))) { 7947 /* 7948 * Idea here is that at each stage of mtu probe (usually, 7949 * 1448 -> 1188 -> 524) should be given 2 chances to recover 7950 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 7951 * should take care of that. 7952 */ 7953 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 7954 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 7955 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 7956 tp->t_rxtshift % 2 == 0)) { 7957 /* 7958 * Enter Path MTU Black-hole Detection mechanism: - 7959 * Disable Path MTU Discovery (IP "DF" bit). - 7960 * Reduce MTU to lower value than what we negotiated 7961 * with peer. 7962 */ 7963 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7964 /* Record that we may have found a black hole. */ 7965 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7966 /* Keep track of previous MSS. */ 7967 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7968 } 7969 7970 /* 7971 * Reduce the MSS to blackhole value or to the 7972 * default in an attempt to retransmit. 7973 */ 7974 #ifdef INET6 7975 if (isipv6 && 7976 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7977 /* Use the sysctl tuneable blackhole MSS. */ 7978 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7979 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7980 } else if (isipv6) { 7981 /* Use the default MSS. */ 7982 tp->t_maxseg = V_tcp_v6mssdflt; 7983 /* 7984 * Disable Path MTU Discovery when we switch 7985 * to minmss. 7986 */ 7987 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7988 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7989 } 7990 #endif 7991 #if defined(INET6) && defined(INET) 7992 else 7993 #endif 7994 #ifdef INET 7995 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7996 /* Use the sysctl tuneable blackhole MSS. */ 7997 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7998 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7999 } else { 8000 /* Use the default MSS. */ 8001 tp->t_maxseg = V_tcp_mssdflt; 8002 /* 8003 * Disable Path MTU Discovery when we switch 8004 * to minmss. 8005 */ 8006 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8007 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8008 } 8009 #endif 8010 } else { 8011 /* 8012 * If further retransmissions are still unsuccessful 8013 * with a lowered MTU, maybe this isn't a blackhole 8014 * and we restore the previous MSS and blackhole 8015 * detection flags. The limit '6' is determined by 8016 * giving each probe stage (1448, 1188, 524) 2 8017 * chances to recover. 8018 */ 8019 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 8020 (tp->t_rxtshift >= 6)) { 8021 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 8022 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 8023 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 8024 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 8025 } 8026 } 8027 } 8028 /* 8029 * Disable RFC1323 and SACK if we haven't got any response to 8030 * our third SYN to work-around some broken terminal servers 8031 * (most of which have hopefully been retired) that have bad VJ 8032 * header compression code which trashes TCP segments containing 8033 * unknown-to-them TCP options. 8034 */ 8035 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 8036 (tp->t_rxtshift == 3)) 8037 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 8038 /* 8039 * If we backed off this far, our srtt estimate is probably bogus. 8040 * Clobber it so we'll take the next rtt measurement as our srtt; 8041 * move the current srtt into rttvar to keep the current retransmit 8042 * times until then. 8043 */ 8044 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 8045 #ifdef INET6 8046 if ((inp->inp_vflag & INP_IPV6) != 0) 8047 in6_losing(inp); 8048 else 8049 #endif 8050 in_losing(inp); 8051 tp->t_rttvar += tp->t_srtt; 8052 tp->t_srtt = 0; 8053 } 8054 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 8055 tp->snd_recover = tp->snd_max; 8056 tp->t_flags |= TF_ACKNOW; 8057 tp->t_rtttime = 0; 8058 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 8059 out: 8060 return (retval); 8061 } 8062 8063 static int 8064 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 8065 { 8066 int32_t ret = 0; 8067 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 8068 8069 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 8070 (tp->t_flags & TF_GPUTINPROG)) { 8071 /* 8072 * We have a goodput in progress 8073 * and we have entered a late state. 8074 * Do we have enough data in the sb 8075 * to handle the GPUT request? 8076 */ 8077 uint32_t bytes; 8078 8079 bytes = tp->gput_ack - tp->gput_seq; 8080 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 8081 bytes += tp->gput_seq - tp->snd_una; 8082 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 8083 /* 8084 * There are not enough bytes in the socket 8085 * buffer that have been sent to cover this 8086 * measurement. Cancel it. 8087 */ 8088 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8089 rack->r_ctl.rc_gp_srtt /*flex1*/, 8090 tp->gput_seq, 8091 0, 0, 18, __LINE__, NULL, 0); 8092 tp->t_flags &= ~TF_GPUTINPROG; 8093 } 8094 } 8095 if (timers == 0) { 8096 return (0); 8097 } 8098 if (tp->t_state == TCPS_LISTEN) { 8099 /* no timers on listen sockets */ 8100 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 8101 return (0); 8102 return (1); 8103 } 8104 if ((timers & PACE_TMR_RACK) && 8105 rack->rc_on_min_to) { 8106 /* 8107 * For the rack timer when we 8108 * are on a min-timeout (which means rrr_conf = 3) 8109 * we don't want to check the timer. It may 8110 * be going off for a pace and thats ok we 8111 * want to send the retransmit (if its ready). 8112 * 8113 * If its on a normal rack timer (non-min) then 8114 * we will check if its expired. 8115 */ 8116 goto skip_time_check; 8117 } 8118 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 8119 uint32_t left; 8120 8121 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8122 ret = -1; 8123 rack_log_to_processing(rack, cts, ret, 0); 8124 return (0); 8125 } 8126 if (hpts_calling == 0) { 8127 /* 8128 * A user send or queued mbuf (sack) has called us? We 8129 * return 0 and let the pacing guards 8130 * deal with it if they should or 8131 * should not cause a send. 8132 */ 8133 ret = -2; 8134 rack_log_to_processing(rack, cts, ret, 0); 8135 return (0); 8136 } 8137 /* 8138 * Ok our timer went off early and we are not paced false 8139 * alarm, go back to sleep. We make sure we don't have 8140 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8141 * flag in place. 8142 */ 8143 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8144 ret = -3; 8145 left = rack->r_ctl.rc_timer_exp - cts; 8146 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8147 rack_log_to_processing(rack, cts, ret, left); 8148 return (1); 8149 } 8150 skip_time_check: 8151 rack->rc_tmr_stopped = 0; 8152 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8153 if (timers & PACE_TMR_DELACK) { 8154 ret = rack_timeout_delack(tp, rack, cts); 8155 } else if (timers & PACE_TMR_RACK) { 8156 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8157 rack->r_fast_output = 0; 8158 ret = rack_timeout_rack(tp, rack, cts); 8159 } else if (timers & PACE_TMR_TLP) { 8160 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8161 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8162 } else if (timers & PACE_TMR_RXT) { 8163 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8164 rack->r_fast_output = 0; 8165 ret = rack_timeout_rxt(tp, rack, cts); 8166 } else if (timers & PACE_TMR_PERSIT) { 8167 ret = rack_timeout_persist(tp, rack, cts); 8168 } else if (timers & PACE_TMR_KEEP) { 8169 ret = rack_timeout_keepalive(tp, rack, cts); 8170 } 8171 rack_log_to_processing(rack, cts, ret, timers); 8172 return (ret); 8173 } 8174 8175 static void 8176 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8177 { 8178 struct timeval tv; 8179 uint32_t us_cts, flags_on_entry; 8180 uint8_t hpts_removed = 0; 8181 8182 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8183 us_cts = tcp_get_usecs(&tv); 8184 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8185 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8186 ((tp->snd_max - tp->snd_una) == 0))) { 8187 tcp_hpts_remove(rack->rc_tp); 8188 hpts_removed = 1; 8189 /* If we were not delayed cancel out the flag. */ 8190 if ((tp->snd_max - tp->snd_una) == 0) 8191 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8192 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8193 } 8194 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8195 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8196 if (tcp_in_hpts(rack->rc_tp) && 8197 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8198 /* 8199 * Canceling timer's when we have no output being 8200 * paced. We also must remove ourselves from the 8201 * hpts. 8202 */ 8203 tcp_hpts_remove(rack->rc_tp); 8204 hpts_removed = 1; 8205 } 8206 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8207 } 8208 if (hpts_removed == 0) 8209 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8210 } 8211 8212 static int 8213 rack_stopall(struct tcpcb *tp) 8214 { 8215 struct tcp_rack *rack; 8216 8217 rack = (struct tcp_rack *)tp->t_fb_ptr; 8218 rack->t_timers_stopped = 1; 8219 8220 tcp_hpts_remove(tp); 8221 8222 return (0); 8223 } 8224 8225 static void 8226 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8227 { 8228 /* 8229 * Assure no timers are running. 8230 */ 8231 if (tcp_timer_active(tp, TT_PERSIST)) { 8232 /* We enter in persists, set the flag appropriately */ 8233 rack->rc_in_persist = 1; 8234 } 8235 if (tcp_in_hpts(rack->rc_tp)) { 8236 tcp_hpts_remove(rack->rc_tp); 8237 } 8238 } 8239 8240 static void 8241 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8242 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz) 8243 { 8244 int32_t idx; 8245 8246 rsm->r_rtr_cnt++; 8247 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8248 rsm->r_dupack = 0; 8249 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8250 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8251 rsm->r_flags |= RACK_OVERMAX; 8252 } 8253 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8254 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8255 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8256 } 8257 idx = rsm->r_rtr_cnt - 1; 8258 rsm->r_tim_lastsent[idx] = ts; 8259 /* 8260 * Here we don't add in the len of send, since its already 8261 * in snduna <->snd_max. 8262 */ 8263 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8264 rack->r_ctl.rc_sacked); 8265 if (rsm->r_flags & RACK_ACKED) { 8266 /* Problably MTU discovery messing with us */ 8267 rsm->r_flags &= ~RACK_ACKED; 8268 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8269 } 8270 if (rsm->r_in_tmap) { 8271 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8272 rsm->r_in_tmap = 0; 8273 } 8274 /* Lets make sure it really is in or not the GP window */ 8275 rack_mark_in_gp_win(tp, rsm); 8276 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8277 rsm->r_in_tmap = 1; 8278 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8279 /* Take off the must retransmit flag, if its on */ 8280 if (rsm->r_flags & RACK_MUST_RXT) { 8281 if (rack->r_must_retran) 8282 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8283 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8284 /* 8285 * We have retransmitted all we need. Clear 8286 * any must retransmit flags. 8287 */ 8288 rack->r_must_retran = 0; 8289 rack->r_ctl.rc_out_at_rto = 0; 8290 } 8291 rsm->r_flags &= ~RACK_MUST_RXT; 8292 } 8293 /* Remove any collapsed flag */ 8294 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8295 if (rsm->r_flags & RACK_SACK_PASSED) { 8296 /* We have retransmitted due to the SACK pass */ 8297 rsm->r_flags &= ~RACK_SACK_PASSED; 8298 rsm->r_flags |= RACK_WAS_SACKPASS; 8299 } 8300 } 8301 8302 static uint32_t 8303 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8304 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag, int segsiz) 8305 { 8306 /* 8307 * We (re-)transmitted starting at rsm->r_start for some length 8308 * (possibly less than r_end. 8309 */ 8310 struct rack_sendmap *nrsm; 8311 int insret __diagused; 8312 uint32_t c_end; 8313 int32_t len; 8314 8315 len = *lenp; 8316 c_end = rsm->r_start + len; 8317 if (SEQ_GEQ(c_end, rsm->r_end)) { 8318 /* 8319 * We retransmitted the whole piece or more than the whole 8320 * slopping into the next rsm. 8321 */ 8322 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8323 if (c_end == rsm->r_end) { 8324 *lenp = 0; 8325 return (0); 8326 } else { 8327 int32_t act_len; 8328 8329 /* Hangs over the end return whats left */ 8330 act_len = rsm->r_end - rsm->r_start; 8331 *lenp = (len - act_len); 8332 return (rsm->r_end); 8333 } 8334 /* We don't get out of this block. */ 8335 } 8336 /* 8337 * Here we retransmitted less than the whole thing which means we 8338 * have to split this into what was transmitted and what was not. 8339 */ 8340 nrsm = rack_alloc_full_limit(rack); 8341 if (nrsm == NULL) { 8342 /* 8343 * We can't get memory, so lets not proceed. 8344 */ 8345 *lenp = 0; 8346 return (0); 8347 } 8348 /* 8349 * So here we are going to take the original rsm and make it what we 8350 * retransmitted. nrsm will be the tail portion we did not 8351 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8352 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8353 * 1, 6 and the new piece will be 6, 11. 8354 */ 8355 rack_clone_rsm(rack, nrsm, rsm, c_end); 8356 nrsm->r_dupack = 0; 8357 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8358 #ifndef INVARIANTS 8359 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8360 #else 8361 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8362 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8363 nrsm, insret, rack, rsm); 8364 } 8365 #endif 8366 if (rsm->r_in_tmap) { 8367 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8368 nrsm->r_in_tmap = 1; 8369 } 8370 rsm->r_flags &= (~RACK_HAS_FIN); 8371 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8372 /* Log a split of rsm into rsm and nrsm */ 8373 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8374 *lenp = 0; 8375 return (0); 8376 } 8377 8378 static void 8379 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8380 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8381 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, 8382 uint32_t s_moff, int hw_tls, int segsiz) 8383 { 8384 struct tcp_rack *rack; 8385 struct rack_sendmap *rsm, *nrsm; 8386 int insret __diagused; 8387 8388 register uint32_t snd_max, snd_una; 8389 8390 /* 8391 * Add to the RACK log of packets in flight or retransmitted. If 8392 * there is a TS option we will use the TS echoed, if not we will 8393 * grab a TS. 8394 * 8395 * Retransmissions will increment the count and move the ts to its 8396 * proper place. Note that if options do not include TS's then we 8397 * won't be able to effectively use the ACK for an RTT on a retran. 8398 * 8399 * Notes about r_start and r_end. Lets consider a send starting at 8400 * sequence 1 for 10 bytes. In such an example the r_start would be 8401 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8402 * This means that r_end is actually the first sequence for the next 8403 * slot (11). 8404 * 8405 */ 8406 /* 8407 * If err is set what do we do XXXrrs? should we not add the thing? 8408 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8409 * i.e. proceed with add ** do this for now. 8410 */ 8411 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8412 if (err) 8413 /* 8414 * We don't log errors -- we could but snd_max does not 8415 * advance in this case either. 8416 */ 8417 return; 8418 8419 if (th_flags & TH_RST) { 8420 /* 8421 * We don't log resets and we return immediately from 8422 * sending 8423 */ 8424 return; 8425 } 8426 rack = (struct tcp_rack *)tp->t_fb_ptr; 8427 snd_una = tp->snd_una; 8428 snd_max = tp->snd_max; 8429 if (th_flags & (TH_SYN | TH_FIN)) { 8430 /* 8431 * The call to rack_log_output is made before bumping 8432 * snd_max. This means we can record one extra byte on a SYN 8433 * or FIN if seq_out is adding more on and a FIN is present 8434 * (and we are not resending). 8435 */ 8436 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 8437 len++; 8438 if (th_flags & TH_FIN) 8439 len++; 8440 if (SEQ_LT(snd_max, tp->snd_nxt)) { 8441 /* 8442 * The add/update as not been done for the FIN/SYN 8443 * yet. 8444 */ 8445 snd_max = tp->snd_nxt; 8446 } 8447 } 8448 if (SEQ_LEQ((seq_out + len), snd_una)) { 8449 /* Are sending an old segment to induce an ack (keep-alive)? */ 8450 return; 8451 } 8452 if (SEQ_LT(seq_out, snd_una)) { 8453 /* huh? should we panic? */ 8454 uint32_t end; 8455 8456 end = seq_out + len; 8457 seq_out = snd_una; 8458 if (SEQ_GEQ(end, seq_out)) 8459 len = end - seq_out; 8460 else 8461 len = 0; 8462 } 8463 if (len == 0) { 8464 /* We don't log zero window probes */ 8465 return; 8466 } 8467 if (IN_FASTRECOVERY(tp->t_flags)) { 8468 rack->r_ctl.rc_prr_out += len; 8469 } 8470 /* First question is it a retransmission or new? */ 8471 if (seq_out == snd_max) { 8472 /* Its new */ 8473 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 8474 again: 8475 rsm = rack_alloc(rack); 8476 if (rsm == NULL) { 8477 /* 8478 * Hmm out of memory and the tcb got destroyed while 8479 * we tried to wait. 8480 */ 8481 return; 8482 } 8483 if (th_flags & TH_FIN) { 8484 rsm->r_flags = RACK_HAS_FIN|add_flag; 8485 } else { 8486 rsm->r_flags = add_flag; 8487 } 8488 if (hw_tls) 8489 rsm->r_hw_tls = 1; 8490 rsm->r_tim_lastsent[0] = cts; 8491 rsm->r_rtr_cnt = 1; 8492 rsm->r_rtr_bytes = 0; 8493 if (th_flags & TH_SYN) { 8494 /* The data space is one beyond snd_una */ 8495 rsm->r_flags |= RACK_HAS_SYN; 8496 } 8497 rsm->r_start = seq_out; 8498 rsm->r_end = rsm->r_start + len; 8499 rack_mark_in_gp_win(tp, rsm); 8500 rsm->r_dupack = 0; 8501 /* 8502 * save off the mbuf location that 8503 * sndmbuf_noadv returned (which is 8504 * where we started copying from).. 8505 */ 8506 rsm->m = s_mb; 8507 rsm->soff = s_moff; 8508 /* 8509 * Here we do add in the len of send, since its not yet 8510 * reflected in in snduna <->snd_max 8511 */ 8512 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 8513 rack->r_ctl.rc_sacked) + 8514 (rsm->r_end - rsm->r_start)); 8515 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 8516 if (rsm->m) { 8517 if (rsm->m->m_len <= rsm->soff) { 8518 /* 8519 * XXXrrs Question, will this happen? 8520 * 8521 * If sbsndptr is set at the correct place 8522 * then s_moff should always be somewhere 8523 * within rsm->m. But if the sbsndptr was 8524 * off then that won't be true. If it occurs 8525 * we need to walkout to the correct location. 8526 */ 8527 struct mbuf *lm; 8528 8529 lm = rsm->m; 8530 while (lm->m_len <= rsm->soff) { 8531 rsm->soff -= lm->m_len; 8532 lm = lm->m_next; 8533 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 8534 __func__, rack, s_moff, s_mb, rsm->soff)); 8535 } 8536 rsm->m = lm; 8537 } 8538 rsm->orig_m_len = rsm->m->m_len; 8539 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 8540 } else { 8541 rsm->orig_m_len = 0; 8542 rsm->orig_t_space = 0; 8543 } 8544 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 8545 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8546 /* Log a new rsm */ 8547 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 8548 #ifndef INVARIANTS 8549 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 8550 #else 8551 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 8552 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8553 nrsm, insret, rack, rsm); 8554 } 8555 #endif 8556 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8557 rsm->r_in_tmap = 1; 8558 /* 8559 * Special case detection, is there just a single 8560 * packet outstanding when we are not in recovery? 8561 * 8562 * If this is true mark it so. 8563 */ 8564 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 8565 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 8566 struct rack_sendmap *prsm; 8567 8568 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 8569 if (prsm) 8570 prsm->r_one_out_nr = 1; 8571 } 8572 return; 8573 } 8574 /* 8575 * If we reach here its a retransmission and we need to find it. 8576 */ 8577 more: 8578 if (hintrsm && (hintrsm->r_start == seq_out)) { 8579 rsm = hintrsm; 8580 hintrsm = NULL; 8581 } else { 8582 /* No hints sorry */ 8583 rsm = NULL; 8584 } 8585 if ((rsm) && (rsm->r_start == seq_out)) { 8586 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8587 if (len == 0) { 8588 return; 8589 } else { 8590 goto more; 8591 } 8592 } 8593 /* Ok it was not the last pointer go through it the hard way. */ 8594 refind: 8595 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 8596 if (rsm) { 8597 if (rsm->r_start == seq_out) { 8598 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8599 if (len == 0) { 8600 return; 8601 } else { 8602 goto refind; 8603 } 8604 } 8605 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 8606 /* Transmitted within this piece */ 8607 /* 8608 * Ok we must split off the front and then let the 8609 * update do the rest 8610 */ 8611 nrsm = rack_alloc_full_limit(rack); 8612 if (nrsm == NULL) { 8613 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 8614 return; 8615 } 8616 /* 8617 * copy rsm to nrsm and then trim the front of rsm 8618 * to not include this part. 8619 */ 8620 rack_clone_rsm(rack, nrsm, rsm, seq_out); 8621 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8622 #ifndef INVARIANTS 8623 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8624 #else 8625 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8626 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 8627 nrsm, insret, rack, rsm); 8628 } 8629 #endif 8630 if (rsm->r_in_tmap) { 8631 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8632 nrsm->r_in_tmap = 1; 8633 } 8634 rsm->r_flags &= (~RACK_HAS_FIN); 8635 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 8636 if (len == 0) { 8637 return; 8638 } else if (len > 0) 8639 goto refind; 8640 } 8641 } 8642 /* 8643 * Hmm not found in map did they retransmit both old and on into the 8644 * new? 8645 */ 8646 if (seq_out == tp->snd_max) { 8647 goto again; 8648 } else if (SEQ_LT(seq_out, tp->snd_max)) { 8649 #ifdef INVARIANTS 8650 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 8651 seq_out, len, tp->snd_una, tp->snd_max); 8652 printf("Starting Dump of all rack entries\n"); 8653 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8654 printf("rsm:%p start:%u end:%u\n", 8655 rsm, rsm->r_start, rsm->r_end); 8656 } 8657 printf("Dump complete\n"); 8658 panic("seq_out not found rack:%p tp:%p", 8659 rack, tp); 8660 #endif 8661 } else { 8662 #ifdef INVARIANTS 8663 /* 8664 * Hmm beyond sndmax? (only if we are using the new rtt-pack 8665 * flag) 8666 */ 8667 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 8668 seq_out, len, tp->snd_max, tp); 8669 #endif 8670 } 8671 } 8672 8673 /* 8674 * Record one of the RTT updates from an ack into 8675 * our sample structure. 8676 */ 8677 8678 static void 8679 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 8680 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 8681 { 8682 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8683 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 8684 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 8685 } 8686 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8687 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 8688 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 8689 } 8690 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 8691 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 8692 rack->r_ctl.rc_gp_lowrtt = us_rtt; 8693 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 8694 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 8695 } 8696 if ((confidence == 1) && 8697 ((rsm == NULL) || 8698 (rsm->r_just_ret) || 8699 (rsm->r_one_out_nr && 8700 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 8701 /* 8702 * If the rsm had a just return 8703 * hit it then we can't trust the 8704 * rtt measurement for buffer deterimination 8705 * Note that a confidence of 2, indicates 8706 * SACK'd which overrides the r_just_ret or 8707 * the r_one_out_nr. If it was a CUM-ACK and 8708 * we had only two outstanding, but get an 8709 * ack for only 1. Then that also lowers our 8710 * confidence. 8711 */ 8712 confidence = 0; 8713 } 8714 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8715 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 8716 if (rack->r_ctl.rack_rs.confidence == 0) { 8717 /* 8718 * We take anything with no current confidence 8719 * saved. 8720 */ 8721 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8722 rack->r_ctl.rack_rs.confidence = confidence; 8723 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8724 } else if (confidence != 0) { 8725 /* 8726 * Once we have a confident number, 8727 * we can update it with a smaller 8728 * value since this confident number 8729 * may include the DSACK time until 8730 * the next segment (the second one) arrived. 8731 */ 8732 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8733 rack->r_ctl.rack_rs.confidence = confidence; 8734 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8735 } 8736 } 8737 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 8738 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 8739 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 8740 rack->r_ctl.rack_rs.rs_rtt_cnt++; 8741 } 8742 8743 /* 8744 * Collect new round-trip time estimate 8745 * and update averages and current timeout. 8746 */ 8747 static void 8748 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 8749 { 8750 int32_t delta; 8751 int32_t rtt; 8752 8753 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 8754 /* No valid sample */ 8755 return; 8756 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 8757 /* We are to use the lowest RTT seen in a single ack */ 8758 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 8759 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 8760 /* We are to use the highest RTT seen in a single ack */ 8761 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 8762 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 8763 /* We are to use the average RTT seen in a single ack */ 8764 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 8765 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 8766 } else { 8767 #ifdef INVARIANTS 8768 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 8769 #endif 8770 return; 8771 } 8772 if (rtt == 0) 8773 rtt = 1; 8774 if (rack->rc_gp_rtt_set == 0) { 8775 /* 8776 * With no RTT we have to accept 8777 * even one we are not confident of. 8778 */ 8779 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 8780 rack->rc_gp_rtt_set = 1; 8781 } else if (rack->r_ctl.rack_rs.confidence) { 8782 /* update the running gp srtt */ 8783 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 8784 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 8785 } 8786 if (rack->r_ctl.rack_rs.confidence) { 8787 /* 8788 * record the low and high for highly buffered path computation, 8789 * we only do this if we are confident (not a retransmission). 8790 */ 8791 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 8792 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8793 } 8794 if (rack->rc_highly_buffered == 0) { 8795 /* 8796 * Currently once we declare a path has 8797 * highly buffered there is no going 8798 * back, which may be a problem... 8799 */ 8800 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 8801 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 8802 rack->r_ctl.rc_highest_us_rtt, 8803 rack->r_ctl.rc_lowest_us_rtt, 8804 RACK_RTTS_SEEHBP); 8805 rack->rc_highly_buffered = 1; 8806 } 8807 } 8808 } 8809 if ((rack->r_ctl.rack_rs.confidence) || 8810 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 8811 /* 8812 * If we are highly confident of it <or> it was 8813 * never retransmitted we accept it as the last us_rtt. 8814 */ 8815 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8816 /* The lowest rtt can be set if its was not retransmited */ 8817 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 8818 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8819 if (rack->r_ctl.rc_lowest_us_rtt == 0) 8820 rack->r_ctl.rc_lowest_us_rtt = 1; 8821 } 8822 } 8823 rack = (struct tcp_rack *)tp->t_fb_ptr; 8824 if (tp->t_srtt != 0) { 8825 /* 8826 * We keep a simple srtt in microseconds, like our rtt 8827 * measurement. We don't need to do any tricks with shifting 8828 * etc. Instead we just add in 1/8th of the new measurement 8829 * and subtract out 1/8 of the old srtt. We do the same with 8830 * the variance after finding the absolute value of the 8831 * difference between this sample and the current srtt. 8832 */ 8833 delta = tp->t_srtt - rtt; 8834 /* Take off 1/8th of the current sRTT */ 8835 tp->t_srtt -= (tp->t_srtt >> 3); 8836 /* Add in 1/8th of the new RTT just measured */ 8837 tp->t_srtt += (rtt >> 3); 8838 if (tp->t_srtt <= 0) 8839 tp->t_srtt = 1; 8840 /* Now lets make the absolute value of the variance */ 8841 if (delta < 0) 8842 delta = -delta; 8843 /* Subtract out 1/8th */ 8844 tp->t_rttvar -= (tp->t_rttvar >> 3); 8845 /* Add in 1/8th of the new variance we just saw */ 8846 tp->t_rttvar += (delta >> 3); 8847 if (tp->t_rttvar <= 0) 8848 tp->t_rttvar = 1; 8849 } else { 8850 /* 8851 * No rtt measurement yet - use the unsmoothed rtt. Set the 8852 * variance to half the rtt (so our first retransmit happens 8853 * at 3*rtt). 8854 */ 8855 tp->t_srtt = rtt; 8856 tp->t_rttvar = rtt >> 1; 8857 } 8858 rack->rc_srtt_measure_made = 1; 8859 KMOD_TCPSTAT_INC(tcps_rttupdated); 8860 if (tp->t_rttupdated < UCHAR_MAX) 8861 tp->t_rttupdated++; 8862 #ifdef STATS 8863 if (rack_stats_gets_ms_rtt == 0) { 8864 /* Send in the microsecond rtt used for rxt timeout purposes */ 8865 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 8866 } else if (rack_stats_gets_ms_rtt == 1) { 8867 /* Send in the millisecond rtt used for rxt timeout purposes */ 8868 int32_t ms_rtt; 8869 8870 /* Round up */ 8871 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8872 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8873 } else if (rack_stats_gets_ms_rtt == 2) { 8874 /* Send in the millisecond rtt has close to the path RTT as we can get */ 8875 int32_t ms_rtt; 8876 8877 /* Round up */ 8878 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8879 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8880 } else { 8881 /* Send in the microsecond rtt has close to the path RTT as we can get */ 8882 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8883 } 8884 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8885 #endif 8886 /* 8887 * the retransmit should happen at rtt + 4 * rttvar. Because of the 8888 * way we do the smoothing, srtt and rttvar will each average +1/2 8889 * tick of bias. When we compute the retransmit timer, we want 1/2 8890 * tick of rounding and 1 extra tick because of +-1/2 tick 8891 * uncertainty in the firing of the timer. The bias will give us 8892 * exactly the 1.5 tick we need. But, because the bias is 8893 * statistical, we have to test that we don't drop below the minimum 8894 * feasible timer (which is 2 ticks). 8895 */ 8896 tp->t_rxtshift = 0; 8897 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8898 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 8899 rack_log_rtt_sample(rack, rtt); 8900 tp->t_softerror = 0; 8901 } 8902 8903 8904 static void 8905 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 8906 { 8907 /* 8908 * Apply to filter the inbound us-rtt at us_cts. 8909 */ 8910 uint32_t old_rtt; 8911 8912 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 8913 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 8914 us_rtt, us_cts); 8915 if (old_rtt > us_rtt) { 8916 /* We just hit a new lower rtt time */ 8917 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 8918 __LINE__, RACK_RTTS_NEWRTT); 8919 /* 8920 * Only count it if its lower than what we saw within our 8921 * calculated range. 8922 */ 8923 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 8924 if (rack_probertt_lower_within && 8925 rack->rc_gp_dyn_mul && 8926 (rack->use_fixed_rate == 0) && 8927 (rack->rc_always_pace)) { 8928 /* 8929 * We are seeing a new lower rtt very close 8930 * to the time that we would have entered probe-rtt. 8931 * This is probably due to the fact that a peer flow 8932 * has entered probe-rtt. Lets go in now too. 8933 */ 8934 uint32_t val; 8935 8936 val = rack_probertt_lower_within * rack_time_between_probertt; 8937 val /= 100; 8938 if ((rack->in_probe_rtt == 0) && 8939 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 8940 rack_enter_probertt(rack, us_cts); 8941 } 8942 } 8943 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 8944 } 8945 } 8946 } 8947 8948 static int 8949 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 8950 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 8951 { 8952 uint32_t us_rtt; 8953 int32_t i, all; 8954 uint32_t t, len_acked; 8955 8956 if ((rsm->r_flags & RACK_ACKED) || 8957 (rsm->r_flags & RACK_WAS_ACKED)) 8958 /* Already done */ 8959 return (0); 8960 if (rsm->r_no_rtt_allowed) { 8961 /* Not allowed */ 8962 return (0); 8963 } 8964 if (ack_type == CUM_ACKED) { 8965 if (SEQ_GT(th_ack, rsm->r_end)) { 8966 len_acked = rsm->r_end - rsm->r_start; 8967 all = 1; 8968 } else { 8969 len_acked = th_ack - rsm->r_start; 8970 all = 0; 8971 } 8972 } else { 8973 len_acked = rsm->r_end - rsm->r_start; 8974 all = 0; 8975 } 8976 if (rsm->r_rtr_cnt == 1) { 8977 8978 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8979 if ((int)t <= 0) 8980 t = 1; 8981 if (!tp->t_rttlow || tp->t_rttlow > t) 8982 tp->t_rttlow = t; 8983 if (!rack->r_ctl.rc_rack_min_rtt || 8984 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8985 rack->r_ctl.rc_rack_min_rtt = t; 8986 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8987 rack->r_ctl.rc_rack_min_rtt = 1; 8988 } 8989 } 8990 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8991 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8992 else 8993 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8994 if (us_rtt == 0) 8995 us_rtt = 1; 8996 if (CC_ALGO(tp)->rttsample != NULL) { 8997 /* Kick the RTT to the CC */ 8998 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8999 } 9000 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 9001 if (ack_type == SACKED) { 9002 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 9003 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 9004 } else { 9005 /* 9006 * We need to setup what our confidence 9007 * is in this ack. 9008 * 9009 * If the rsm was app limited and it is 9010 * less than a mss in length (the end 9011 * of the send) then we have a gap. If we 9012 * were app limited but say we were sending 9013 * multiple MSS's then we are more confident 9014 * int it. 9015 * 9016 * When we are not app-limited then we see if 9017 * the rsm is being included in the current 9018 * measurement, we tell this by the app_limited_needs_set 9019 * flag. 9020 * 9021 * Note that being cwnd blocked is not applimited 9022 * as well as the pacing delay between packets which 9023 * are sending only 1 or 2 MSS's also will show up 9024 * in the RTT. We probably need to examine this algorithm 9025 * a bit more and enhance it to account for the delay 9026 * between rsm's. We could do that by saving off the 9027 * pacing delay of each rsm (in an rsm) and then 9028 * factoring that in somehow though for now I am 9029 * not sure how :) 9030 */ 9031 int calc_conf = 0; 9032 9033 if (rsm->r_flags & RACK_APP_LIMITED) { 9034 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 9035 calc_conf = 0; 9036 else 9037 calc_conf = 1; 9038 } else if (rack->app_limited_needs_set == 0) { 9039 calc_conf = 1; 9040 } else { 9041 calc_conf = 0; 9042 } 9043 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 9044 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 9045 calc_conf, rsm, rsm->r_rtr_cnt); 9046 } 9047 if ((rsm->r_flags & RACK_TLP) && 9048 (!IN_FASTRECOVERY(tp->t_flags))) { 9049 /* Segment was a TLP and our retrans matched */ 9050 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 9051 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9052 } 9053 } 9054 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9055 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9056 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9057 /* New more recent rack_tmit_time */ 9058 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9059 if (rack->r_ctl.rc_rack_tmit_time == 0) 9060 rack->r_ctl.rc_rack_tmit_time = 1; 9061 rack->rc_rack_rtt = t; 9062 } 9063 return (1); 9064 } 9065 /* 9066 * We clear the soft/rxtshift since we got an ack. 9067 * There is no assurance we will call the commit() function 9068 * so we need to clear these to avoid incorrect handling. 9069 */ 9070 tp->t_rxtshift = 0; 9071 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9072 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 9073 tp->t_softerror = 0; 9074 if (to && (to->to_flags & TOF_TS) && 9075 (ack_type == CUM_ACKED) && 9076 (to->to_tsecr) && 9077 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 9078 /* 9079 * Now which timestamp does it match? In this block the ACK 9080 * must be coming from a previous transmission. 9081 */ 9082 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9083 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 9084 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9085 if ((int)t <= 0) 9086 t = 1; 9087 if (CC_ALGO(tp)->rttsample != NULL) { 9088 /* 9089 * Kick the RTT to the CC, here 9090 * we lie a bit in that we know the 9091 * retransmission is correct even though 9092 * we retransmitted. This is because 9093 * we match the timestamps. 9094 */ 9095 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 9096 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 9097 else 9098 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 9099 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9100 } 9101 if ((i + 1) < rsm->r_rtr_cnt) { 9102 /* 9103 * The peer ack'd from our previous 9104 * transmission. We have a spurious 9105 * retransmission and thus we dont 9106 * want to update our rack_rtt. 9107 * 9108 * Hmm should there be a CC revert here? 9109 * 9110 */ 9111 return (0); 9112 } 9113 if (!tp->t_rttlow || tp->t_rttlow > t) 9114 tp->t_rttlow = t; 9115 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9116 rack->r_ctl.rc_rack_min_rtt = t; 9117 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9118 rack->r_ctl.rc_rack_min_rtt = 1; 9119 } 9120 } 9121 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9122 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9123 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9124 /* New more recent rack_tmit_time */ 9125 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9126 if (rack->r_ctl.rc_rack_tmit_time == 0) 9127 rack->r_ctl.rc_rack_tmit_time = 1; 9128 rack->rc_rack_rtt = t; 9129 } 9130 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9131 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9132 rsm->r_rtr_cnt); 9133 return (1); 9134 } 9135 } 9136 /* If we are logging log out the sendmap */ 9137 if (tcp_bblogging_on(rack->rc_tp)) { 9138 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9139 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9140 } 9141 } 9142 goto ts_not_found; 9143 } else { 9144 /* 9145 * Ok its a SACK block that we retransmitted. or a windows 9146 * machine without timestamps. We can tell nothing from the 9147 * time-stamp since its not there or the time the peer last 9148 * received a segment that moved forward its cum-ack point. 9149 */ 9150 ts_not_found: 9151 i = rsm->r_rtr_cnt - 1; 9152 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9153 if ((int)t <= 0) 9154 t = 1; 9155 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9156 /* 9157 * We retransmitted and the ack came back in less 9158 * than the smallest rtt we have observed. We most 9159 * likely did an improper retransmit as outlined in 9160 * 6.2 Step 2 point 2 in the rack-draft so we 9161 * don't want to update our rack_rtt. We in 9162 * theory (in future) might want to think about reverting our 9163 * cwnd state but we won't for now. 9164 */ 9165 return (0); 9166 } else if (rack->r_ctl.rc_rack_min_rtt) { 9167 /* 9168 * We retransmitted it and the retransmit did the 9169 * job. 9170 */ 9171 if (!rack->r_ctl.rc_rack_min_rtt || 9172 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9173 rack->r_ctl.rc_rack_min_rtt = t; 9174 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9175 rack->r_ctl.rc_rack_min_rtt = 1; 9176 } 9177 } 9178 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9179 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9180 (uint32_t)rsm->r_tim_lastsent[i]))) { 9181 /* New more recent rack_tmit_time */ 9182 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9183 if (rack->r_ctl.rc_rack_tmit_time == 0) 9184 rack->r_ctl.rc_rack_tmit_time = 1; 9185 rack->rc_rack_rtt = t; 9186 } 9187 return (1); 9188 } 9189 } 9190 return (0); 9191 } 9192 9193 /* 9194 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9195 */ 9196 static void 9197 rack_log_sack_passed(struct tcpcb *tp, 9198 struct tcp_rack *rack, struct rack_sendmap *rsm) 9199 { 9200 struct rack_sendmap *nrsm; 9201 9202 nrsm = rsm; 9203 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9204 rack_head, r_tnext) { 9205 if (nrsm == rsm) { 9206 /* Skip original segment he is acked */ 9207 continue; 9208 } 9209 if (nrsm->r_flags & RACK_ACKED) { 9210 /* 9211 * Skip ack'd segments, though we 9212 * should not see these, since tmap 9213 * should not have ack'd segments. 9214 */ 9215 continue; 9216 } 9217 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9218 /* 9219 * If the peer dropped the rwnd on 9220 * these then we don't worry about them. 9221 */ 9222 continue; 9223 } 9224 if (nrsm->r_flags & RACK_SACK_PASSED) { 9225 /* 9226 * We found one that is already marked 9227 * passed, we have been here before and 9228 * so all others below this are marked. 9229 */ 9230 break; 9231 } 9232 nrsm->r_flags |= RACK_SACK_PASSED; 9233 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9234 } 9235 } 9236 9237 static void 9238 rack_need_set_test(struct tcpcb *tp, 9239 struct tcp_rack *rack, 9240 struct rack_sendmap *rsm, 9241 tcp_seq th_ack, 9242 int line, 9243 int use_which) 9244 { 9245 struct rack_sendmap *s_rsm; 9246 9247 if ((tp->t_flags & TF_GPUTINPROG) && 9248 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9249 /* 9250 * We were app limited, and this ack 9251 * butts up or goes beyond the point where we want 9252 * to start our next measurement. We need 9253 * to record the new gput_ts as here and 9254 * possibly update the start sequence. 9255 */ 9256 uint32_t seq, ts; 9257 9258 if (rsm->r_rtr_cnt > 1) { 9259 /* 9260 * This is a retransmit, can we 9261 * really make any assessment at this 9262 * point? We are not really sure of 9263 * the timestamp, is it this or the 9264 * previous transmission? 9265 * 9266 * Lets wait for something better that 9267 * is not retransmitted. 9268 */ 9269 return; 9270 } 9271 seq = tp->gput_seq; 9272 ts = tp->gput_ts; 9273 rack->app_limited_needs_set = 0; 9274 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9275 /* Do we start at a new end? */ 9276 if ((use_which == RACK_USE_BEG) && 9277 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9278 /* 9279 * When we get an ACK that just eats 9280 * up some of the rsm, we set RACK_USE_BEG 9281 * since whats at r_start (i.e. th_ack) 9282 * is left unacked and thats where the 9283 * measurement now starts. 9284 */ 9285 tp->gput_seq = rsm->r_start; 9286 } 9287 if ((use_which == RACK_USE_END) && 9288 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9289 /* 9290 * We use the end when the cumack 9291 * is moving forward and completely 9292 * deleting the rsm passed so basically 9293 * r_end holds th_ack. 9294 * 9295 * For SACK's we also want to use the end 9296 * since this piece just got sacked and 9297 * we want to target anything after that 9298 * in our measurement. 9299 */ 9300 tp->gput_seq = rsm->r_end; 9301 } 9302 if (use_which == RACK_USE_END_OR_THACK) { 9303 /* 9304 * special case for ack moving forward, 9305 * not a sack, we need to move all the 9306 * way up to where this ack cum-ack moves 9307 * to. 9308 */ 9309 if (SEQ_GT(th_ack, rsm->r_end)) 9310 tp->gput_seq = th_ack; 9311 else 9312 tp->gput_seq = rsm->r_end; 9313 } 9314 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9315 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9316 else 9317 s_rsm = NULL; 9318 /* 9319 * Pick up the correct send time if we can the rsm passed in 9320 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9321 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9322 * find a different seq i.e. the next send up. 9323 * 9324 * If that has not been sent, s_rsm will be NULL and we must 9325 * arrange it so this function will get called again by setting 9326 * app_limited_needs_set. 9327 */ 9328 if (s_rsm) 9329 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9330 else { 9331 /* If we hit here we have to have *not* sent tp->gput_seq */ 9332 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9333 /* Set it up so we will go through here again */ 9334 rack->app_limited_needs_set = 1; 9335 } 9336 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9337 /* 9338 * We moved beyond this guy's range, re-calculate 9339 * the new end point. 9340 */ 9341 if (rack->rc_gp_filled == 0) { 9342 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9343 } else { 9344 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9345 } 9346 } 9347 /* 9348 * We are moving the goal post, we may be able to clear the 9349 * measure_saw_probe_rtt flag. 9350 */ 9351 if ((rack->in_probe_rtt == 0) && 9352 (rack->measure_saw_probe_rtt) && 9353 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9354 rack->measure_saw_probe_rtt = 0; 9355 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9356 seq, tp->gput_seq, 9357 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9358 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9359 5, line, NULL, 0); 9360 if (rack->rc_gp_filled && 9361 ((tp->gput_ack - tp->gput_seq) < 9362 max(rc_init_window(rack), (MIN_GP_WIN * 9363 ctf_fixed_maxseg(tp))))) { 9364 uint32_t ideal_amount; 9365 9366 ideal_amount = rack_get_measure_window(tp, rack); 9367 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9368 /* 9369 * There is no sense of continuing this measurement 9370 * because its too small to gain us anything we 9371 * trust. Skip it and that way we can start a new 9372 * measurement quicker. 9373 */ 9374 tp->t_flags &= ~TF_GPUTINPROG; 9375 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9376 0, 0, 9377 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9378 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9379 6, __LINE__, NULL, 0); 9380 } else { 9381 /* 9382 * Reset the window further out. 9383 */ 9384 tp->gput_ack = tp->gput_seq + ideal_amount; 9385 } 9386 } 9387 rack_tend_gp_marks(tp, rack); 9388 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9389 } 9390 } 9391 9392 static inline int 9393 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9394 { 9395 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 9396 /* Behind our TLP definition or right at */ 9397 return (0); 9398 } 9399 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 9400 /* The start is beyond or right at our end of TLP definition */ 9401 return (0); 9402 } 9403 /* It has to be a sub-part of the original TLP recorded */ 9404 return (1); 9405 } 9406 9407 9408 9409 static uint32_t 9410 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 9411 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 9412 int *no_extra, 9413 int *moved_two, uint32_t segsiz) 9414 { 9415 uint32_t start, end, changed = 0; 9416 struct rack_sendmap stack_map; 9417 struct rack_sendmap *rsm, *nrsm, *prev, *next; 9418 int insret __diagused; 9419 int32_t used_ref = 1; 9420 int moved = 0; 9421 #ifdef TCP_SAD_DETECTION 9422 int allow_segsiz; 9423 int first_time_through = 1; 9424 #endif 9425 int noextra = 0; 9426 int can_use_hookery = 0; 9427 9428 start = sack->start; 9429 end = sack->end; 9430 rsm = *prsm; 9431 9432 #ifdef TCP_SAD_DETECTION 9433 /* 9434 * There are a strange number of proxys and meddle boxes in the world 9435 * that seem to cut up segments on different boundaries. This gets us 9436 * smaller sacks that are still ok in terms of it being an attacker. 9437 * We use the base segsiz to calculate an allowable smallness but 9438 * also enforce a min on the segsiz in case it is an attacker playing 9439 * games with MSS. So basically if the sack arrives and it is 9440 * larger than a worse case 960 bytes, we don't classify the guy 9441 * as supicious. 9442 */ 9443 allow_segsiz = max(segsiz, 1200) * sad_seg_size_per; 9444 allow_segsiz /= 1000; 9445 #endif 9446 do_rest_ofb: 9447 if ((rsm == NULL) || 9448 (SEQ_LT(end, rsm->r_start)) || 9449 (SEQ_GEQ(start, rsm->r_end)) || 9450 (SEQ_LT(start, rsm->r_start))) { 9451 /* 9452 * We are not in the right spot, 9453 * find the correct spot in the tree. 9454 */ 9455 used_ref = 0; 9456 rsm = tqhash_find(rack->r_ctl.tqh, start); 9457 moved++; 9458 } 9459 if (rsm == NULL) { 9460 /* TSNH */ 9461 goto out; 9462 } 9463 #ifdef TCP_SAD_DETECTION 9464 /* Now we must check for suspicous activity */ 9465 if ((first_time_through == 1) && 9466 ((end - start) < min((rsm->r_end - rsm->r_start), allow_segsiz)) && 9467 ((rsm->r_flags & RACK_PMTU_CHG) == 0) && 9468 ((rsm->r_flags & RACK_TLP) == 0)) { 9469 /* 9470 * Its less than a full MSS or the segment being acked 9471 * this should only happen if the rsm in question had the 9472 * r_just_ret flag set <and> the end matches the end of 9473 * the rsm block. 9474 * 9475 * Note we do not look at segments that have had TLP's on 9476 * them since we can get un-reported rwnd collapses that 9477 * basically we TLP on and then we get back a sack block 9478 * that goes from the start to only a small way. 9479 * 9480 */ 9481 int loss, ok; 9482 9483 ok = 0; 9484 if (SEQ_GEQ(end, rsm->r_end)) { 9485 if (rsm->r_just_ret == 1) { 9486 /* This was at the end of a send which is ok */ 9487 ok = 1; 9488 } else { 9489 /* A bit harder was it the end of our segment */ 9490 int segs, len; 9491 9492 len = (rsm->r_end - rsm->r_start); 9493 segs = len / segsiz; 9494 segs *= segsiz; 9495 if ((segs + (rsm->r_end - start)) == len) { 9496 /* 9497 * So this last bit was the 9498 * end of our send if we cut it 9499 * up into segsiz pieces so its ok. 9500 */ 9501 ok = 1; 9502 } 9503 } 9504 } 9505 if (ok == 0) { 9506 /* 9507 * This guy is doing something suspicious 9508 * lets start detection. 9509 */ 9510 if (rack->rc_suspicious == 0) { 9511 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_SUSPECT); 9512 counter_u64_add(rack_sack_attacks_suspect, 1); 9513 rack->rc_suspicious = 1; 9514 rack_log_sad(rack, 4); 9515 if (tcp_bblogging_on(rack->rc_tp)) { 9516 union tcp_log_stackspecific log; 9517 struct timeval tv; 9518 9519 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 9520 log.u_bbr.flex1 = end; 9521 log.u_bbr.flex2 = start; 9522 log.u_bbr.flex3 = rsm->r_end; 9523 log.u_bbr.flex4 = rsm->r_start; 9524 log.u_bbr.flex5 = segsiz; 9525 log.u_bbr.flex6 = rsm->r_fas; 9526 log.u_bbr.flex7 = rsm->r_bas; 9527 log.u_bbr.flex8 = 5; 9528 log.u_bbr.pkts_out = rsm->r_flags; 9529 log.u_bbr.bbr_state = rack->rc_suspicious; 9530 log.u_bbr.bbr_substate = rsm->r_just_ret; 9531 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 9532 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9533 TCP_LOG_EVENTP(rack->rc_tp, NULL, 9534 &rack->rc_inp->inp_socket->so_rcv, 9535 &rack->rc_inp->inp_socket->so_snd, 9536 TCP_SAD_DETECTION, 0, 9537 0, &log, false, &tv); 9538 } 9539 } 9540 /* You loose some ack count every time you sack 9541 * a small bit that is not butting to the end of 9542 * what we have sent. This is because we never 9543 * send small bits unless its the end of the sb. 9544 * Anyone sending a sack that is not at the end 9545 * is thus very very suspicious. 9546 */ 9547 loss = (segsiz/2) / (end - start); 9548 if (loss < rack->r_ctl.ack_count) 9549 rack->r_ctl.ack_count -= loss; 9550 else 9551 rack->r_ctl.ack_count = 0; 9552 } 9553 } 9554 first_time_through = 0; 9555 #endif 9556 /* Ok we have an ACK for some piece of this rsm */ 9557 if (rsm->r_start != start) { 9558 if ((rsm->r_flags & RACK_ACKED) == 0) { 9559 /* 9560 * Before any splitting or hookery is 9561 * done is it a TLP of interest i.e. rxt? 9562 */ 9563 if ((rsm->r_flags & RACK_TLP) && 9564 (rsm->r_rtr_cnt > 1)) { 9565 /* 9566 * We are splitting a rxt TLP, check 9567 * if we need to save off the start/end 9568 */ 9569 if (rack->rc_last_tlp_acked_set && 9570 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9571 /* 9572 * We already turned this on since we are inside 9573 * the previous one was a partially sack now we 9574 * are getting another one (maybe all of it). 9575 * 9576 */ 9577 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9578 /* 9579 * Lets make sure we have all of it though. 9580 */ 9581 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9582 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9583 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9584 rack->r_ctl.last_tlp_acked_end); 9585 } 9586 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9587 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9588 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9589 rack->r_ctl.last_tlp_acked_end); 9590 } 9591 } else { 9592 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9593 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9594 rack->rc_last_tlp_past_cumack = 0; 9595 rack->rc_last_tlp_acked_set = 1; 9596 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9597 } 9598 } 9599 /** 9600 * Need to split this in two pieces the before and after, 9601 * the before remains in the map, the after must be 9602 * added. In other words we have: 9603 * rsm |--------------| 9604 * sackblk |-------> 9605 * rsm will become 9606 * rsm |---| 9607 * and nrsm will be the sacked piece 9608 * nrsm |----------| 9609 * 9610 * But before we start down that path lets 9611 * see if the sack spans over on top of 9612 * the next guy and it is already sacked. 9613 * 9614 */ 9615 /* 9616 * Hookery can only be used if the two entries 9617 * are in the same bucket and neither one of 9618 * them staddle the bucket line. 9619 */ 9620 next = tqhash_next(rack->r_ctl.tqh, rsm); 9621 if (next && 9622 (rsm->bindex == next->bindex) && 9623 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9624 ((next->r_flags & RACK_STRADDLE) == 0) && 9625 (rsm->r_flags & RACK_IN_GP_WIN) && 9626 (next->r_flags & RACK_IN_GP_WIN)) 9627 can_use_hookery = 1; 9628 else if (next && 9629 (rsm->bindex == next->bindex) && 9630 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9631 ((next->r_flags & RACK_STRADDLE) == 0) && 9632 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) && 9633 ((next->r_flags & RACK_IN_GP_WIN) == 0)) 9634 can_use_hookery = 1; 9635 else 9636 can_use_hookery = 0; 9637 if (next && can_use_hookery && 9638 (next->r_flags & RACK_ACKED) && 9639 SEQ_GEQ(end, next->r_start)) { 9640 /** 9641 * So the next one is already acked, and 9642 * we can thus by hookery use our stack_map 9643 * to reflect the piece being sacked and 9644 * then adjust the two tree entries moving 9645 * the start and ends around. So we start like: 9646 * rsm |------------| (not-acked) 9647 * next |-----------| (acked) 9648 * sackblk |--------> 9649 * We want to end like so: 9650 * rsm |------| (not-acked) 9651 * next |-----------------| (acked) 9652 * nrsm |-----| 9653 * Where nrsm is a temporary stack piece we 9654 * use to update all the gizmos. 9655 */ 9656 /* Copy up our fudge block */ 9657 noextra++; 9658 nrsm = &stack_map; 9659 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9660 /* Now adjust our tree blocks */ 9661 rsm->r_end = start; 9662 next->r_start = start; 9663 rsm->r_flags |= RACK_SHUFFLED; 9664 next->r_flags |= RACK_SHUFFLED; 9665 /* Now we must adjust back where next->m is */ 9666 rack_setup_offset_for_rsm(rack, rsm, next); 9667 /* 9668 * Which timestamp do we keep? It is rather 9669 * important in GP measurements to have the 9670 * accurate end of the send window. 9671 * 9672 * We keep the largest value, which is the newest 9673 * send. We do this in case a segment that is 9674 * joined together and not part of a GP estimate 9675 * later gets expanded into the GP estimate. 9676 * 9677 * We prohibit the merging of unlike kinds i.e. 9678 * all pieces that are in the GP estimate can be 9679 * merged and all pieces that are not in a GP estimate 9680 * can be merged, but not disimilar pieces. Combine 9681 * this with taking the highest here and we should 9682 * be ok unless of course the client reneges. Then 9683 * all bets are off. 9684 */ 9685 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 9686 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 9687 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 9688 /* 9689 * And we must keep the newest ack arrival time. 9690 */ 9691 if (next->r_ack_arrival < 9692 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9693 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9694 9695 9696 /* We don't need to adjust rsm, it did not change */ 9697 /* Clear out the dup ack count of the remainder */ 9698 rsm->r_dupack = 0; 9699 rsm->r_just_ret = 0; 9700 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9701 /* Now lets make sure our fudge block is right */ 9702 nrsm->r_start = start; 9703 /* Now lets update all the stats and such */ 9704 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9705 if (rack->app_limited_needs_set) 9706 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9707 changed += (nrsm->r_end - nrsm->r_start); 9708 /* You get a count for acking a whole segment or more */ 9709 if ((nrsm->r_end - nrsm->r_start) >= segsiz) 9710 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz); 9711 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9712 if (nrsm->r_flags & RACK_SACK_PASSED) { 9713 rack->r_ctl.rc_reorder_ts = cts; 9714 if (rack->r_ctl.rc_reorder_ts == 0) 9715 rack->r_ctl.rc_reorder_ts = 1; 9716 } 9717 /* 9718 * Now we want to go up from rsm (the 9719 * one left un-acked) to the next one 9720 * in the tmap. We do this so when 9721 * we walk backwards we include marking 9722 * sack-passed on rsm (The one passed in 9723 * is skipped since it is generally called 9724 * on something sacked before removing it 9725 * from the tmap). 9726 */ 9727 if (rsm->r_in_tmap) { 9728 nrsm = TAILQ_NEXT(rsm, r_tnext); 9729 /* 9730 * Now that we have the next 9731 * one walk backwards from there. 9732 */ 9733 if (nrsm && nrsm->r_in_tmap) 9734 rack_log_sack_passed(tp, rack, nrsm); 9735 } 9736 /* Now are we done? */ 9737 if (SEQ_LT(end, next->r_end) || 9738 (end == next->r_end)) { 9739 /* Done with block */ 9740 goto out; 9741 } 9742 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 9743 counter_u64_add(rack_sack_used_next_merge, 1); 9744 /* Postion for the next block */ 9745 start = next->r_end; 9746 rsm = tqhash_next(rack->r_ctl.tqh, next); 9747 if (rsm == NULL) 9748 goto out; 9749 } else { 9750 /** 9751 * We can't use any hookery here, so we 9752 * need to split the map. We enter like 9753 * so: 9754 * rsm |--------| 9755 * sackblk |-----> 9756 * We will add the new block nrsm and 9757 * that will be the new portion, and then 9758 * fall through after reseting rsm. So we 9759 * split and look like this: 9760 * rsm |----| 9761 * sackblk |-----> 9762 * nrsm |---| 9763 * We then fall through reseting 9764 * rsm to nrsm, so the next block 9765 * picks it up. 9766 */ 9767 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9768 if (nrsm == NULL) { 9769 /* 9770 * failed XXXrrs what can we do but loose the sack 9771 * info? 9772 */ 9773 goto out; 9774 } 9775 counter_u64_add(rack_sack_splits, 1); 9776 rack_clone_rsm(rack, nrsm, rsm, start); 9777 moved++; 9778 rsm->r_just_ret = 0; 9779 #ifndef INVARIANTS 9780 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9781 #else 9782 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9783 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 9784 nrsm, insret, rack, rsm); 9785 } 9786 #endif 9787 if (rsm->r_in_tmap) { 9788 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9789 nrsm->r_in_tmap = 1; 9790 } 9791 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 9792 rsm->r_flags &= (~RACK_HAS_FIN); 9793 /* Position us to point to the new nrsm that starts the sack blk */ 9794 rsm = nrsm; 9795 } 9796 } else { 9797 /* Already sacked this piece */ 9798 counter_u64_add(rack_sack_skipped_acked, 1); 9799 moved++; 9800 if (end == rsm->r_end) { 9801 /* Done with block */ 9802 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9803 goto out; 9804 } else if (SEQ_LT(end, rsm->r_end)) { 9805 /* A partial sack to a already sacked block */ 9806 moved++; 9807 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9808 goto out; 9809 } else { 9810 /* 9811 * The end goes beyond this guy 9812 * reposition the start to the 9813 * next block. 9814 */ 9815 start = rsm->r_end; 9816 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9817 if (rsm == NULL) 9818 goto out; 9819 } 9820 } 9821 } 9822 if (SEQ_GEQ(end, rsm->r_end)) { 9823 /** 9824 * The end of this block is either beyond this guy or right 9825 * at this guy. I.e.: 9826 * rsm --- |-----| 9827 * end |-----| 9828 * <or> 9829 * end |---------| 9830 */ 9831 if ((rsm->r_flags & RACK_ACKED) == 0) { 9832 /* 9833 * Is it a TLP of interest? 9834 */ 9835 if ((rsm->r_flags & RACK_TLP) && 9836 (rsm->r_rtr_cnt > 1)) { 9837 /* 9838 * We are splitting a rxt TLP, check 9839 * if we need to save off the start/end 9840 */ 9841 if (rack->rc_last_tlp_acked_set && 9842 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9843 /* 9844 * We already turned this on since we are inside 9845 * the previous one was a partially sack now we 9846 * are getting another one (maybe all of it). 9847 */ 9848 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9849 /* 9850 * Lets make sure we have all of it though. 9851 */ 9852 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9853 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9854 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9855 rack->r_ctl.last_tlp_acked_end); 9856 } 9857 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9858 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9859 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9860 rack->r_ctl.last_tlp_acked_end); 9861 } 9862 } else { 9863 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9864 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9865 rack->rc_last_tlp_past_cumack = 0; 9866 rack->rc_last_tlp_acked_set = 1; 9867 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9868 } 9869 } 9870 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9871 changed += (rsm->r_end - rsm->r_start); 9872 /* You get a count for acking a whole segment or more */ 9873 if ((rsm->r_end - rsm->r_start) >= segsiz) 9874 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz); 9875 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9876 if (rsm->r_in_tmap) /* should be true */ 9877 rack_log_sack_passed(tp, rack, rsm); 9878 /* Is Reordering occuring? */ 9879 if (rsm->r_flags & RACK_SACK_PASSED) { 9880 rsm->r_flags &= ~RACK_SACK_PASSED; 9881 rack->r_ctl.rc_reorder_ts = cts; 9882 if (rack->r_ctl.rc_reorder_ts == 0) 9883 rack->r_ctl.rc_reorder_ts = 1; 9884 } 9885 if (rack->app_limited_needs_set) 9886 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9887 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9888 rsm->r_flags |= RACK_ACKED; 9889 if (rsm->r_in_tmap) { 9890 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9891 rsm->r_in_tmap = 0; 9892 } 9893 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 9894 } else { 9895 counter_u64_add(rack_sack_skipped_acked, 1); 9896 moved++; 9897 } 9898 if (end == rsm->r_end) { 9899 /* This block only - done, setup for next */ 9900 goto out; 9901 } 9902 /* 9903 * There is more not coverend by this rsm move on 9904 * to the next block in the tail queue hash table. 9905 */ 9906 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 9907 start = rsm->r_end; 9908 rsm = nrsm; 9909 if (rsm == NULL) 9910 goto out; 9911 goto do_rest_ofb; 9912 } 9913 /** 9914 * The end of this sack block is smaller than 9915 * our rsm i.e.: 9916 * rsm --- |-----| 9917 * end |--| 9918 */ 9919 if ((rsm->r_flags & RACK_ACKED) == 0) { 9920 /* 9921 * Is it a TLP of interest? 9922 */ 9923 if ((rsm->r_flags & RACK_TLP) && 9924 (rsm->r_rtr_cnt > 1)) { 9925 /* 9926 * We are splitting a rxt TLP, check 9927 * if we need to save off the start/end 9928 */ 9929 if (rack->rc_last_tlp_acked_set && 9930 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9931 /* 9932 * We already turned this on since we are inside 9933 * the previous one was a partially sack now we 9934 * are getting another one (maybe all of it). 9935 */ 9936 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9937 /* 9938 * Lets make sure we have all of it though. 9939 */ 9940 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9941 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9942 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9943 rack->r_ctl.last_tlp_acked_end); 9944 } 9945 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9946 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9947 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9948 rack->r_ctl.last_tlp_acked_end); 9949 } 9950 } else { 9951 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9952 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9953 rack->rc_last_tlp_past_cumack = 0; 9954 rack->rc_last_tlp_acked_set = 1; 9955 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9956 } 9957 } 9958 /* 9959 * Hookery can only be used if the two entries 9960 * are in the same bucket and neither one of 9961 * them staddle the bucket line. 9962 */ 9963 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 9964 if (prev && 9965 (rsm->bindex == prev->bindex) && 9966 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9967 ((prev->r_flags & RACK_STRADDLE) == 0) && 9968 (rsm->r_flags & RACK_IN_GP_WIN) && 9969 (prev->r_flags & RACK_IN_GP_WIN)) 9970 can_use_hookery = 1; 9971 else if (prev && 9972 (rsm->bindex == prev->bindex) && 9973 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9974 ((prev->r_flags & RACK_STRADDLE) == 0) && 9975 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) && 9976 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) 9977 can_use_hookery = 1; 9978 else 9979 can_use_hookery = 0; 9980 9981 if (prev && can_use_hookery && 9982 (prev->r_flags & RACK_ACKED)) { 9983 /** 9984 * Goal, we want the right remainder of rsm to shrink 9985 * in place and span from (rsm->r_start = end) to rsm->r_end. 9986 * We want to expand prev to go all the way 9987 * to prev->r_end <- end. 9988 * so in the tree we have before: 9989 * prev |--------| (acked) 9990 * rsm |-------| (non-acked) 9991 * sackblk |-| 9992 * We churn it so we end up with 9993 * prev |----------| (acked) 9994 * rsm |-----| (non-acked) 9995 * nrsm |-| (temporary) 9996 * 9997 * Note if either prev/rsm is a TLP we don't 9998 * do this. 9999 */ 10000 noextra++; 10001 nrsm = &stack_map; 10002 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10003 prev->r_end = end; 10004 rsm->r_start = end; 10005 rsm->r_flags |= RACK_SHUFFLED; 10006 prev->r_flags |= RACK_SHUFFLED; 10007 /* Now adjust nrsm (stack copy) to be 10008 * the one that is the small 10009 * piece that was "sacked". 10010 */ 10011 nrsm->r_end = end; 10012 rsm->r_dupack = 0; 10013 /* 10014 * Which timestamp do we keep? It is rather 10015 * important in GP measurements to have the 10016 * accurate end of the send window. 10017 * 10018 * We keep the largest value, which is the newest 10019 * send. We do this in case a segment that is 10020 * joined together and not part of a GP estimate 10021 * later gets expanded into the GP estimate. 10022 * 10023 * We prohibit the merging of unlike kinds i.e. 10024 * all pieces that are in the GP estimate can be 10025 * merged and all pieces that are not in a GP estimate 10026 * can be merged, but not disimilar pieces. Combine 10027 * this with taking the highest here and we should 10028 * be ok unless of course the client reneges. Then 10029 * all bets are off. 10030 */ 10031 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 10032 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 10033 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 10034 } 10035 /* 10036 * And we must keep the newest ack arrival time. 10037 */ 10038 10039 if(prev->r_ack_arrival < 10040 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10041 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10042 10043 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10044 /* 10045 * Now that the rsm has had its start moved forward 10046 * lets go ahead and get its new place in the world. 10047 */ 10048 rack_setup_offset_for_rsm(rack, prev, rsm); 10049 /* 10050 * Now nrsm is our new little piece 10051 * that is acked (which was merged 10052 * to prev). Update the rtt and changed 10053 * based on that. Also check for reordering. 10054 */ 10055 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10056 if (rack->app_limited_needs_set) 10057 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10058 changed += (nrsm->r_end - nrsm->r_start); 10059 /* You get a count for acking a whole segment or more */ 10060 if ((nrsm->r_end - nrsm->r_start) >= segsiz) 10061 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz); 10062 10063 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10064 if (nrsm->r_flags & RACK_SACK_PASSED) { 10065 rack->r_ctl.rc_reorder_ts = cts; 10066 if (rack->r_ctl.rc_reorder_ts == 0) 10067 rack->r_ctl.rc_reorder_ts = 1; 10068 } 10069 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 10070 rsm = prev; 10071 counter_u64_add(rack_sack_used_prev_merge, 1); 10072 } else { 10073 /** 10074 * This is the case where our previous 10075 * block is not acked either, so we must 10076 * split the block in two. 10077 */ 10078 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10079 if (nrsm == NULL) { 10080 /* failed rrs what can we do but loose the sack info? */ 10081 goto out; 10082 } 10083 if ((rsm->r_flags & RACK_TLP) && 10084 (rsm->r_rtr_cnt > 1)) { 10085 /* 10086 * We are splitting a rxt TLP, check 10087 * if we need to save off the start/end 10088 */ 10089 if (rack->rc_last_tlp_acked_set && 10090 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10091 /* 10092 * We already turned this on since this block is inside 10093 * the previous one was a partially sack now we 10094 * are getting another one (maybe all of it). 10095 */ 10096 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10097 /* 10098 * Lets make sure we have all of it though. 10099 */ 10100 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10101 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10102 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10103 rack->r_ctl.last_tlp_acked_end); 10104 } 10105 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10106 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10107 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10108 rack->r_ctl.last_tlp_acked_end); 10109 } 10110 } else { 10111 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10112 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10113 rack->rc_last_tlp_acked_set = 1; 10114 rack->rc_last_tlp_past_cumack = 0; 10115 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10116 } 10117 } 10118 /** 10119 * In this case nrsm becomes 10120 * nrsm->r_start = end; 10121 * nrsm->r_end = rsm->r_end; 10122 * which is un-acked. 10123 * <and> 10124 * rsm->r_end = nrsm->r_start; 10125 * i.e. the remaining un-acked 10126 * piece is left on the left 10127 * hand side. 10128 * 10129 * So we start like this 10130 * rsm |----------| (not acked) 10131 * sackblk |---| 10132 * build it so we have 10133 * rsm |---| (acked) 10134 * nrsm |------| (not acked) 10135 */ 10136 counter_u64_add(rack_sack_splits, 1); 10137 rack_clone_rsm(rack, nrsm, rsm, end); 10138 moved++; 10139 rsm->r_flags &= (~RACK_HAS_FIN); 10140 rsm->r_just_ret = 0; 10141 #ifndef INVARIANTS 10142 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10143 #else 10144 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10145 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p", 10146 nrsm, insret, rack, rsm); 10147 } 10148 #endif 10149 if (rsm->r_in_tmap) { 10150 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10151 nrsm->r_in_tmap = 1; 10152 } 10153 nrsm->r_dupack = 0; 10154 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 10155 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10156 changed += (rsm->r_end - rsm->r_start); 10157 /* You get a count for acking a whole segment or more */ 10158 if ((rsm->r_end - rsm->r_start) >= segsiz) 10159 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz); 10160 10161 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10162 if (rsm->r_in_tmap) /* should be true */ 10163 rack_log_sack_passed(tp, rack, rsm); 10164 /* Is Reordering occuring? */ 10165 if (rsm->r_flags & RACK_SACK_PASSED) { 10166 rsm->r_flags &= ~RACK_SACK_PASSED; 10167 rack->r_ctl.rc_reorder_ts = cts; 10168 if (rack->r_ctl.rc_reorder_ts == 0) 10169 rack->r_ctl.rc_reorder_ts = 1; 10170 } 10171 if (rack->app_limited_needs_set) 10172 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10173 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10174 rsm->r_flags |= RACK_ACKED; 10175 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 10176 if (rsm->r_in_tmap) { 10177 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10178 rsm->r_in_tmap = 0; 10179 } 10180 } 10181 } else if (start != end){ 10182 /* 10183 * The block was already acked. 10184 */ 10185 counter_u64_add(rack_sack_skipped_acked, 1); 10186 moved++; 10187 } 10188 out: 10189 if (rsm && 10190 ((rsm->r_flags & RACK_TLP) == 0) && 10191 (rsm->r_flags & RACK_ACKED)) { 10192 /* 10193 * Now can we merge where we worked 10194 * with either the previous or 10195 * next block? 10196 */ 10197 next = tqhash_next(rack->r_ctl.tqh, rsm); 10198 while (next) { 10199 if (next->r_flags & RACK_TLP) 10200 break; 10201 /* Only allow merges between ones in or out of GP window */ 10202 if ((next->r_flags & RACK_IN_GP_WIN) && 10203 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10204 break; 10205 } 10206 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10207 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10208 break; 10209 } 10210 if (rsm->bindex != next->bindex) 10211 break; 10212 if (rsm->r_flags & RACK_STRADDLE) 10213 break; 10214 if (next->r_flags & RACK_STRADDLE) 10215 break; 10216 if (next->r_flags & RACK_ACKED) { 10217 /* yep this and next can be merged */ 10218 rsm = rack_merge_rsm(rack, rsm, next); 10219 noextra++; 10220 next = tqhash_next(rack->r_ctl.tqh, rsm); 10221 } else 10222 break; 10223 } 10224 /* Now what about the previous? */ 10225 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10226 while (prev) { 10227 if (prev->r_flags & RACK_TLP) 10228 break; 10229 /* Only allow merges between ones in or out of GP window */ 10230 if ((prev->r_flags & RACK_IN_GP_WIN) && 10231 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10232 break; 10233 } 10234 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10235 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10236 break; 10237 } 10238 if (rsm->bindex != prev->bindex) 10239 break; 10240 if (rsm->r_flags & RACK_STRADDLE) 10241 break; 10242 if (prev->r_flags & RACK_STRADDLE) 10243 break; 10244 if (prev->r_flags & RACK_ACKED) { 10245 /* yep the previous and this can be merged */ 10246 rsm = rack_merge_rsm(rack, prev, rsm); 10247 noextra++; 10248 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10249 } else 10250 break; 10251 } 10252 } 10253 if (used_ref == 0) { 10254 counter_u64_add(rack_sack_proc_all, 1); 10255 } else { 10256 counter_u64_add(rack_sack_proc_short, 1); 10257 } 10258 /* Save off the next one for quick reference. */ 10259 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10260 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10261 /* Pass back the moved. */ 10262 *moved_two = moved; 10263 *no_extra = noextra; 10264 return (changed); 10265 } 10266 10267 static void inline 10268 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10269 { 10270 struct rack_sendmap *tmap; 10271 10272 tmap = NULL; 10273 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10274 /* Its no longer sacked, mark it so */ 10275 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10276 #ifdef INVARIANTS 10277 if (rsm->r_in_tmap) { 10278 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10279 rack, rsm, rsm->r_flags); 10280 } 10281 #endif 10282 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10283 /* Rebuild it into our tmap */ 10284 if (tmap == NULL) { 10285 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10286 tmap = rsm; 10287 } else { 10288 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10289 tmap = rsm; 10290 } 10291 tmap->r_in_tmap = 1; 10292 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10293 } 10294 /* 10295 * Now lets possibly clear the sack filter so we start 10296 * recognizing sacks that cover this area. 10297 */ 10298 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10299 10300 } 10301 10302 static void 10303 rack_do_decay(struct tcp_rack *rack) 10304 { 10305 struct timeval res; 10306 10307 #define timersub(tvp, uvp, vvp) \ 10308 do { \ 10309 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 10310 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 10311 if ((vvp)->tv_usec < 0) { \ 10312 (vvp)->tv_sec--; \ 10313 (vvp)->tv_usec += 1000000; \ 10314 } \ 10315 } while (0) 10316 10317 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 10318 #undef timersub 10319 10320 rack->r_ctl.input_pkt++; 10321 if ((rack->rc_in_persist) || 10322 (res.tv_sec >= 1) || 10323 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 10324 /* 10325 * Check for decay of non-SAD, 10326 * we want all SAD detection metrics to 10327 * decay 1/4 per second (or more) passed. 10328 * Current default is 800 so it decays 10329 * 80% every second. 10330 */ 10331 #ifdef TCP_SAD_DETECTION 10332 uint32_t pkt_delta; 10333 10334 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 10335 #endif 10336 /* Update our saved tracking values */ 10337 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 10338 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 10339 /* Now do we escape without decay? */ 10340 #ifdef TCP_SAD_DETECTION 10341 if (rack->rc_in_persist || 10342 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 10343 (pkt_delta < tcp_sad_low_pps)){ 10344 /* 10345 * We don't decay idle connections 10346 * or ones that have a low input pps. 10347 */ 10348 return; 10349 } 10350 /* Decay the counters */ 10351 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 10352 tcp_sad_decay_val); 10353 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 10354 tcp_sad_decay_val); 10355 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 10356 tcp_sad_decay_val); 10357 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 10358 tcp_sad_decay_val); 10359 #endif 10360 } 10361 } 10362 10363 static void inline 10364 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10365 { 10366 /* 10367 * We look at advancing the end send time for our GP 10368 * measurement tracking only as the cumulative acknowledgment 10369 * moves forward. You might wonder about this, why not 10370 * at every transmission or retransmission within the 10371 * GP window update the rc_gp_cumack_ts? Well its rather 10372 * nuanced but basically the GP window *may* expand (as 10373 * it does below) or worse and harder to track it may shrink. 10374 * 10375 * This last makes it impossible to track at the time of 10376 * the send, since you may set forward your rc_gp_cumack_ts 10377 * when you send, because that send *is* in your currently 10378 * "guessed" window, but then it shrinks. Now which was 10379 * the send time of the last bytes in the window, by the 10380 * time you ask that question that part of the sendmap 10381 * is freed. So you don't know and you will have too 10382 * long of send window. Instead by updating the time 10383 * marker only when the cumack advances this assures us 10384 * that we will have only the sends in the window of our 10385 * GP measurement. 10386 * 10387 * Another complication from this is the 10388 * merging of sendmap entries. During SACK processing this 10389 * can happen to conserve the sendmap size. That breaks 10390 * everything down in tracking the send window of the GP 10391 * estimate. So to prevent that and keep it working with 10392 * a tiny bit more limited merging, we only allow like 10393 * types to be merged. I.e. if two sends are in the GP window 10394 * then its ok to merge them together. If two sends are not 10395 * in the GP window its ok to merge them together too. Though 10396 * one send in and one send out cannot be merged. We combine 10397 * this with never allowing the shrinking of the GP window when 10398 * we are in recovery so that we can properly calculate the 10399 * sending times. 10400 * 10401 * This all of course seems complicated, because it is.. :) 10402 * 10403 * The cum-ack is being advanced upon the sendmap. 10404 * If we are not doing a GP estimate don't 10405 * proceed. 10406 */ 10407 uint64_t ts; 10408 10409 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10410 return; 10411 /* 10412 * If this sendmap entry is going 10413 * beyond the measurement window we had picked, 10414 * expand the measurement window by that much. 10415 */ 10416 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10417 tp->gput_ack = rsm->r_end; 10418 } 10419 /* 10420 * If we have not setup a ack, then we 10421 * have no idea if the newly acked pieces 10422 * will be "in our seq measurement range". If 10423 * it is when we clear the app_limited_needs_set 10424 * flag the timestamp will be updated. 10425 */ 10426 if (rack->app_limited_needs_set) 10427 return; 10428 /* 10429 * Finally, we grab out the latest timestamp 10430 * that this packet was sent and then see 10431 * if: 10432 * a) The packet touches are newly defined GP range. 10433 * b) The time is greater than (newer) than the 10434 * one we currently have. If so we update 10435 * our sending end time window. 10436 * 10437 * Note we *do not* do this at send time. The reason 10438 * is that if you do you *may* pick up a newer timestamp 10439 * for a range you are not going to measure. We project 10440 * out how far and then sometimes modify that to be 10441 * smaller. If that occurs then you will have a send 10442 * that does not belong to the range included. 10443 */ 10444 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10445 rack->r_ctl.rc_gp_cumack_ts) 10446 return; 10447 if (rack_in_gp_window(tp, rsm)) { 10448 rack->r_ctl.rc_gp_cumack_ts = ts; 10449 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10450 __LINE__, from, rsm); 10451 } 10452 } 10453 10454 static void 10455 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10456 { 10457 struct rack_sendmap *rsm; 10458 /* 10459 * The ACK point is advancing to th_ack, we must drop off 10460 * the packets in the rack log and calculate any eligble 10461 * RTT's. 10462 */ 10463 10464 rack->r_wanted_output = 1; 10465 if (SEQ_GT(th_ack, tp->snd_una)) 10466 rack->r_ctl.last_cumack_advance = acktime; 10467 10468 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10469 if ((rack->rc_last_tlp_acked_set == 1)&& 10470 (rack->rc_last_tlp_past_cumack == 1) && 10471 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10472 /* 10473 * We have reached the point where our last rack 10474 * tlp retransmit sequence is ahead of the cum-ack. 10475 * This can only happen when the cum-ack moves all 10476 * the way around (its been a full 2^^31+1 bytes 10477 * or more since we sent a retransmitted TLP). Lets 10478 * turn off the valid flag since its not really valid. 10479 * 10480 * Note since sack's also turn on this event we have 10481 * a complication, we have to wait to age it out until 10482 * the cum-ack is by the TLP before checking which is 10483 * what the next else clause does. 10484 */ 10485 rack_log_dsack_event(rack, 9, __LINE__, 10486 rack->r_ctl.last_tlp_acked_start, 10487 rack->r_ctl.last_tlp_acked_end); 10488 rack->rc_last_tlp_acked_set = 0; 10489 rack->rc_last_tlp_past_cumack = 0; 10490 } else if ((rack->rc_last_tlp_acked_set == 1) && 10491 (rack->rc_last_tlp_past_cumack == 0) && 10492 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10493 /* 10494 * It is safe to start aging TLP's out. 10495 */ 10496 rack->rc_last_tlp_past_cumack = 1; 10497 } 10498 /* We do the same for the tlp send seq as well */ 10499 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10500 (rack->rc_last_sent_tlp_past_cumack == 1) && 10501 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10502 rack_log_dsack_event(rack, 9, __LINE__, 10503 rack->r_ctl.last_sent_tlp_seq, 10504 (rack->r_ctl.last_sent_tlp_seq + 10505 rack->r_ctl.last_sent_tlp_len)); 10506 rack->rc_last_sent_tlp_seq_valid = 0; 10507 rack->rc_last_sent_tlp_past_cumack = 0; 10508 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10509 (rack->rc_last_sent_tlp_past_cumack == 0) && 10510 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10511 /* 10512 * It is safe to start aging TLP's send. 10513 */ 10514 rack->rc_last_sent_tlp_past_cumack = 1; 10515 } 10516 more: 10517 rsm = tqhash_min(rack->r_ctl.tqh); 10518 if (rsm == NULL) { 10519 if ((th_ack - 1) == tp->iss) { 10520 /* 10521 * For the SYN incoming case we will not 10522 * have called tcp_output for the sending of 10523 * the SYN, so there will be no map. All 10524 * other cases should probably be a panic. 10525 */ 10526 return; 10527 } 10528 if (tp->t_flags & TF_SENTFIN) { 10529 /* if we sent a FIN we often will not have map */ 10530 return; 10531 } 10532 #ifdef INVARIANTS 10533 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 10534 tp, 10535 tp->t_state, th_ack, rack, 10536 tp->snd_una, tp->snd_max, tp->snd_nxt); 10537 #endif 10538 return; 10539 } 10540 if (SEQ_LT(th_ack, rsm->r_start)) { 10541 /* Huh map is missing this */ 10542 #ifdef INVARIANTS 10543 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 10544 rsm->r_start, 10545 th_ack, tp->t_state, rack->r_state); 10546 #endif 10547 return; 10548 } 10549 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 10550 10551 /* Now was it a retransmitted TLP? */ 10552 if ((rsm->r_flags & RACK_TLP) && 10553 (rsm->r_rtr_cnt > 1)) { 10554 /* 10555 * Yes, this rsm was a TLP and retransmitted, remember that 10556 * since if a DSACK comes back on this we don't want 10557 * to think of it as a reordered segment. This may 10558 * get updated again with possibly even other TLPs 10559 * in flight, but thats ok. Only when we don't send 10560 * a retransmitted TLP for 1/2 the sequences space 10561 * will it get turned off (above). 10562 */ 10563 if (rack->rc_last_tlp_acked_set && 10564 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10565 /* 10566 * We already turned this on since the end matches, 10567 * the previous one was a partially ack now we 10568 * are getting another one (maybe all of it). 10569 */ 10570 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10571 /* 10572 * Lets make sure we have all of it though. 10573 */ 10574 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10575 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10576 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10577 rack->r_ctl.last_tlp_acked_end); 10578 } 10579 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10580 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10581 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10582 rack->r_ctl.last_tlp_acked_end); 10583 } 10584 } else { 10585 rack->rc_last_tlp_past_cumack = 1; 10586 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10587 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10588 rack->rc_last_tlp_acked_set = 1; 10589 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10590 } 10591 } 10592 /* Now do we consume the whole thing? */ 10593 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 10594 if (SEQ_GEQ(th_ack, rsm->r_end)) { 10595 /* Its all consumed. */ 10596 uint32_t left; 10597 uint8_t newly_acked; 10598 10599 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 10600 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 10601 rsm->r_rtr_bytes = 0; 10602 /* 10603 * Record the time of highest cumack sent if its in our measurement 10604 * window and possibly bump out the end. 10605 */ 10606 rack_rsm_sender_update(rack, tp, rsm, 4); 10607 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 10608 if (rsm->r_in_tmap) { 10609 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10610 rsm->r_in_tmap = 0; 10611 } 10612 newly_acked = 1; 10613 if (rsm->r_flags & RACK_ACKED) { 10614 /* 10615 * It was acked on the scoreboard -- remove 10616 * it from total 10617 */ 10618 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10619 newly_acked = 0; 10620 } else if (rsm->r_flags & RACK_SACK_PASSED) { 10621 /* 10622 * There are segments ACKED on the 10623 * scoreboard further up. We are seeing 10624 * reordering. 10625 */ 10626 rsm->r_flags &= ~RACK_SACK_PASSED; 10627 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10628 rsm->r_flags |= RACK_ACKED; 10629 rack->r_ctl.rc_reorder_ts = cts; 10630 if (rack->r_ctl.rc_reorder_ts == 0) 10631 rack->r_ctl.rc_reorder_ts = 1; 10632 if (rack->r_ent_rec_ns) { 10633 /* 10634 * We have sent no more, and we saw an sack 10635 * then ack arrive. 10636 */ 10637 rack->r_might_revert = 1; 10638 } 10639 } 10640 if ((rsm->r_flags & RACK_TO_REXT) && 10641 (tp->t_flags & TF_RCVD_TSTMP) && 10642 (to->to_flags & TOF_TS) && 10643 (to->to_tsecr != 0) && 10644 (tp->t_flags & TF_PREVVALID)) { 10645 /* 10646 * We can use the timestamp to see 10647 * if this retransmission was from the 10648 * first transmit. If so we made a mistake. 10649 */ 10650 tp->t_flags &= ~TF_PREVVALID; 10651 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 10652 /* The first transmit is what this ack is for */ 10653 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 10654 } 10655 } 10656 left = th_ack - rsm->r_end; 10657 if (rack->app_limited_needs_set && newly_acked) 10658 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 10659 /* Free back to zone */ 10660 rack_free(rack, rsm); 10661 if (left) { 10662 goto more; 10663 } 10664 /* Check for reneging */ 10665 rsm = tqhash_min(rack->r_ctl.tqh); 10666 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 10667 /* 10668 * The peer has moved snd_una up to 10669 * the edge of this send, i.e. one 10670 * that it had previously acked. The only 10671 * way that can be true if the peer threw 10672 * away data (space issues) that it had 10673 * previously sacked (else it would have 10674 * given us snd_una up to (rsm->r_end). 10675 * We need to undo the acked markings here. 10676 * 10677 * Note we have to look to make sure th_ack is 10678 * our rsm->r_start in case we get an old ack 10679 * where th_ack is behind snd_una. 10680 */ 10681 rack_peer_reneges(rack, rsm, th_ack); 10682 } 10683 return; 10684 } 10685 if (rsm->r_flags & RACK_ACKED) { 10686 /* 10687 * It was acked on the scoreboard -- remove it from 10688 * total for the part being cum-acked. 10689 */ 10690 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 10691 } 10692 /* 10693 * Clear the dup ack count for 10694 * the piece that remains. 10695 */ 10696 rsm->r_dupack = 0; 10697 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10698 if (rsm->r_rtr_bytes) { 10699 /* 10700 * It was retransmitted adjust the 10701 * sack holes for what was acked. 10702 */ 10703 int ack_am; 10704 10705 ack_am = (th_ack - rsm->r_start); 10706 if (ack_am >= rsm->r_rtr_bytes) { 10707 rack->r_ctl.rc_holes_rxt -= ack_am; 10708 rsm->r_rtr_bytes -= ack_am; 10709 } 10710 } 10711 /* 10712 * Update where the piece starts and record 10713 * the time of send of highest cumack sent if 10714 * its in our GP range. 10715 */ 10716 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 10717 /* Now we need to move our offset forward too */ 10718 if (rsm->m && 10719 ((rsm->orig_m_len != rsm->m->m_len) || 10720 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 10721 /* Fix up the orig_m_len and possibly the mbuf offset */ 10722 rack_adjust_orig_mlen(rsm); 10723 } 10724 rsm->soff += (th_ack - rsm->r_start); 10725 rack_rsm_sender_update(rack, tp, rsm, 5); 10726 /* The trim will move th_ack into r_start for us */ 10727 tqhash_trim(rack->r_ctl.tqh, th_ack); 10728 /* Now do we need to move the mbuf fwd too? */ 10729 { 10730 struct mbuf *m; 10731 uint32_t soff; 10732 10733 m = rsm->m; 10734 soff = rsm->soff; 10735 if (m) { 10736 while (soff >= m->m_len) { 10737 soff -= m->m_len; 10738 KASSERT((m->m_next != NULL), 10739 (" rsm:%p off:%u soff:%u m:%p", 10740 rsm, rsm->soff, soff, m)); 10741 m = m->m_next; 10742 if (m == NULL) { 10743 /* 10744 * This is a fall-back that prevents a panic. In reality 10745 * we should be able to walk the mbuf's and find our place. 10746 * At this point snd_una has not been updated with the sbcut() yet 10747 * but tqhash_trim did update rsm->r_start so the offset calcuation 10748 * should work fine. This is undesirable since we will take cache 10749 * hits to access the socket buffer. And even more puzzling is that 10750 * it happens occasionally. It should not :( 10751 */ 10752 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 10753 (rsm->r_start - tp->snd_una), 10754 &soff); 10755 break; 10756 } 10757 } 10758 /* 10759 * Now save in our updated values. 10760 */ 10761 rsm->m = m; 10762 rsm->soff = soff; 10763 rsm->orig_m_len = rsm->m->m_len; 10764 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 10765 } 10766 } 10767 if (rack->app_limited_needs_set && 10768 SEQ_GEQ(th_ack, tp->gput_seq)) 10769 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 10770 } 10771 10772 static void 10773 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 10774 { 10775 struct rack_sendmap *rsm; 10776 int sack_pass_fnd = 0; 10777 10778 if (rack->r_might_revert) { 10779 /* 10780 * Ok we have reordering, have not sent anything, we 10781 * might want to revert the congestion state if nothing 10782 * further has SACK_PASSED on it. Lets check. 10783 * 10784 * We also get here when we have DSACKs come in for 10785 * all the data that we FR'd. Note that a rxt or tlp 10786 * timer clears this from happening. 10787 */ 10788 10789 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 10790 if (rsm->r_flags & RACK_SACK_PASSED) { 10791 sack_pass_fnd = 1; 10792 break; 10793 } 10794 } 10795 if (sack_pass_fnd == 0) { 10796 /* 10797 * We went into recovery 10798 * incorrectly due to reordering! 10799 */ 10800 int orig_cwnd; 10801 10802 rack->r_ent_rec_ns = 0; 10803 orig_cwnd = tp->snd_cwnd; 10804 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 10805 tp->snd_recover = tp->snd_una; 10806 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 10807 EXIT_RECOVERY(tp->t_flags); 10808 } 10809 rack->r_might_revert = 0; 10810 } 10811 } 10812 10813 #ifdef TCP_SAD_DETECTION 10814 10815 static void 10816 rack_merge_out_sacks(struct tcp_rack *rack) 10817 { 10818 struct rack_sendmap *cur, *next, *rsm, *trsm = NULL; 10819 10820 cur = tqhash_min(rack->r_ctl.tqh); 10821 while(cur) { 10822 next = tqhash_next(rack->r_ctl.tqh, cur); 10823 /* 10824 * The idea is to go through all and merge back 10825 * together the pieces sent together, 10826 */ 10827 if ((next != NULL) && 10828 (cur->r_tim_lastsent[0] == next->r_tim_lastsent[0])) { 10829 rack_merge_rsm(rack, cur, next); 10830 } else { 10831 cur = next; 10832 } 10833 } 10834 /* 10835 * now treat it like a rxt event, everything is outstanding 10836 * and sent nothing acvked and dupacks are all zero. If this 10837 * is not an attacker it will have to dupack its way through 10838 * it all. 10839 */ 10840 TAILQ_INIT(&rack->r_ctl.rc_tmap); 10841 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 10842 rsm->r_dupack = 0; 10843 /* We must re-add it back to the tlist */ 10844 if (trsm == NULL) { 10845 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10846 } else { 10847 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 10848 } 10849 rsm->r_in_tmap = 1; 10850 trsm = rsm; 10851 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 10852 } 10853 sack_filter_clear(&rack->r_ctl.rack_sf, rack->rc_tp->snd_una); 10854 } 10855 10856 static void 10857 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 10858 { 10859 int do_detection = 0; 10860 10861 if (rack->sack_attack_disable || rack->rc_suspicious) { 10862 /* 10863 * If we have been disabled we must detect 10864 * to possibly reverse it. Or if the guy has 10865 * sent in suspicious sacks we want to do detection too. 10866 */ 10867 do_detection = 1; 10868 10869 } else if ((rack->do_detection || tcp_force_detection) && 10870 (tcp_sack_to_ack_thresh > 0) && 10871 (tcp_sack_to_move_thresh > 0) && 10872 (rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum)) { 10873 /* 10874 * We only detect here if: 10875 * 1) System wide forcing is on <or> do_detection is on 10876 * <and> 10877 * 2) We have thresholds for move and ack (set one to 0 and we are off) 10878 * <and> 10879 * 3) We have maps allocated larger than our min (500). 10880 */ 10881 do_detection = 1; 10882 } 10883 if (do_detection > 0) { 10884 /* 10885 * We have thresholds set to find 10886 * possible attackers and disable sack. 10887 * Check them. 10888 */ 10889 uint64_t ackratio, moveratio, movetotal; 10890 10891 /* Log detecting */ 10892 rack_log_sad(rack, 1); 10893 /* Do we establish a ack ratio */ 10894 if ((rack->r_ctl.sack_count > tcp_map_minimum) || 10895 (rack->rc_suspicious == 1) || 10896 (rack->sack_attack_disable > 0)) { 10897 ackratio = (uint64_t)(rack->r_ctl.sack_count); 10898 ackratio *= (uint64_t)(1000); 10899 if (rack->r_ctl.ack_count) 10900 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 10901 else { 10902 /* We can hit this due to ack totals degregation (via small sacks) */ 10903 ackratio = 1000; 10904 } 10905 } else { 10906 /* 10907 * No ack ratio needed if we have not 10908 * seen more sacks then the number of map entries. 10909 * The exception to that is if we have disabled sack then 10910 * we need to find a ratio. 10911 */ 10912 ackratio = 0; 10913 } 10914 10915 if ((rack->sack_attack_disable == 0) && 10916 (ackratio > rack_highest_sack_thresh_seen)) 10917 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 10918 /* Do we establish a move ratio? */ 10919 if ((rack->r_ctl.sack_moved_extra > tcp_map_minimum) || 10920 (rack->rc_suspicious == 1) || 10921 (rack->sack_attack_disable > 0)) { 10922 /* 10923 * We need to have more sack moves than maps 10924 * allocated to have a move ratio considered. 10925 */ 10926 movetotal = rack->r_ctl.sack_moved_extra; 10927 movetotal += rack->r_ctl.sack_noextra_move; 10928 moveratio = rack->r_ctl.sack_moved_extra; 10929 moveratio *= (uint64_t)1000; 10930 if (movetotal) 10931 moveratio /= movetotal; 10932 else { 10933 /* No moves, thats pretty good */ 10934 moveratio = 0; 10935 } 10936 } else { 10937 /* 10938 * Not enough moves have occured to consider 10939 * if we are out of whack in that ratio. 10940 * The exception to that is if we have disabled sack then 10941 * we need to find a ratio. 10942 */ 10943 moveratio = 0; 10944 } 10945 if ((rack->sack_attack_disable == 0) && 10946 (moveratio > rack_highest_move_thresh_seen)) 10947 rack_highest_move_thresh_seen = (uint32_t)moveratio; 10948 /* Now the tests */ 10949 if (rack->sack_attack_disable == 0) { 10950 /* Not disabled, do we need to disable? */ 10951 if ((ackratio > tcp_sack_to_ack_thresh) && 10952 (moveratio > tcp_sack_to_move_thresh)) { 10953 /* Disable sack processing */ 10954 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED); 10955 rack->sack_attack_disable = 1; 10956 /* set it so we have the built in delay */ 10957 rack->r_ctl.ack_during_sd = 1; 10958 if (rack_merge_out_sacks_on_attack) 10959 rack_merge_out_sacks(rack); 10960 counter_u64_add(rack_sack_attacks_detected, 1); 10961 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED); 10962 /* Clamp the cwnd at flight size */ 10963 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 10964 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10965 rack_log_sad(rack, 2); 10966 } 10967 } else { 10968 /* We are sack-disabled check for false positives */ 10969 if ((ackratio <= tcp_restoral_thresh) || 10970 ((rack_merge_out_sacks_on_attack == 0) && 10971 (rack->rc_suspicious == 0) && 10972 (rack->r_ctl.rc_num_maps_alloced <= (tcp_map_minimum/2)))) { 10973 rack->sack_attack_disable = 0; 10974 rack_log_sad(rack, 3); 10975 /* Restart counting */ 10976 rack->r_ctl.sack_count = 0; 10977 rack->r_ctl.sack_moved_extra = 0; 10978 rack->r_ctl.sack_noextra_move = 1; 10979 rack->rc_suspicious = 0; 10980 rack->r_ctl.ack_count = max(1, 10981 (bytes_this_ack / segsiz)); 10982 10983 counter_u64_add(rack_sack_attacks_reversed, 1); 10984 /* Restore the cwnd */ 10985 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 10986 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 10987 } 10988 } 10989 } 10990 } 10991 #endif 10992 10993 static int 10994 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 10995 { 10996 10997 uint32_t am, l_end; 10998 int was_tlp = 0; 10999 11000 if (SEQ_GT(end, start)) 11001 am = end - start; 11002 else 11003 am = 0; 11004 if ((rack->rc_last_tlp_acked_set ) && 11005 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 11006 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 11007 /* 11008 * The DSACK is because of a TLP which we don't 11009 * do anything with the reordering window over since 11010 * it was not reordering that caused the DSACK but 11011 * our previous retransmit TLP. 11012 */ 11013 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11014 was_tlp = 1; 11015 goto skip_dsack_round; 11016 } 11017 if (rack->rc_last_sent_tlp_seq_valid) { 11018 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 11019 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 11020 (SEQ_LEQ(end, l_end))) { 11021 /* 11022 * This dsack is from the last sent TLP, ignore it 11023 * for reordering purposes. 11024 */ 11025 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11026 was_tlp = 1; 11027 goto skip_dsack_round; 11028 } 11029 } 11030 if (rack->rc_dsack_round_seen == 0) { 11031 rack->rc_dsack_round_seen = 1; 11032 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 11033 rack->r_ctl.num_dsack++; 11034 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 11035 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 11036 } 11037 skip_dsack_round: 11038 /* 11039 * We keep track of how many DSACK blocks we get 11040 * after a recovery incident. 11041 */ 11042 rack->r_ctl.dsack_byte_cnt += am; 11043 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 11044 rack->r_ctl.retran_during_recovery && 11045 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 11046 /* 11047 * False recovery most likely culprit is reordering. If 11048 * nothing else is missing we need to revert. 11049 */ 11050 rack->r_might_revert = 1; 11051 rack_handle_might_revert(rack->rc_tp, rack); 11052 rack->r_might_revert = 0; 11053 rack->r_ctl.retran_during_recovery = 0; 11054 rack->r_ctl.dsack_byte_cnt = 0; 11055 } 11056 return (was_tlp); 11057 } 11058 11059 static uint32_t 11060 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 11061 { 11062 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt); 11063 } 11064 11065 static int32_t 11066 rack_compute_pipe(struct tcpcb *tp) 11067 { 11068 return ((int32_t)do_rack_compute_pipe(tp, 11069 (struct tcp_rack *)tp->t_fb_ptr, 11070 tp->snd_una)); 11071 } 11072 11073 static void 11074 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 11075 { 11076 /* Deal with changed and PRR here (in recovery only) */ 11077 uint32_t pipe, snd_una; 11078 11079 rack->r_ctl.rc_prr_delivered += changed; 11080 11081 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 11082 /* 11083 * It is all outstanding, we are application limited 11084 * and thus we don't need more room to send anything. 11085 * Note we use tp->snd_una here and not th_ack because 11086 * the data as yet not been cut from the sb. 11087 */ 11088 rack->r_ctl.rc_prr_sndcnt = 0; 11089 return; 11090 } 11091 /* Compute prr_sndcnt */ 11092 if (SEQ_GT(tp->snd_una, th_ack)) { 11093 snd_una = tp->snd_una; 11094 } else { 11095 snd_una = th_ack; 11096 } 11097 pipe = do_rack_compute_pipe(tp, rack, snd_una); 11098 if (pipe > tp->snd_ssthresh) { 11099 long sndcnt; 11100 11101 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 11102 if (rack->r_ctl.rc_prr_recovery_fs > 0) 11103 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 11104 else { 11105 rack->r_ctl.rc_prr_sndcnt = 0; 11106 rack_log_to_prr(rack, 9, 0, __LINE__); 11107 sndcnt = 0; 11108 } 11109 sndcnt++; 11110 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 11111 sndcnt -= rack->r_ctl.rc_prr_out; 11112 else 11113 sndcnt = 0; 11114 rack->r_ctl.rc_prr_sndcnt = sndcnt; 11115 rack_log_to_prr(rack, 10, 0, __LINE__); 11116 } else { 11117 uint32_t limit; 11118 11119 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 11120 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 11121 else 11122 limit = 0; 11123 if (changed > limit) 11124 limit = changed; 11125 limit += ctf_fixed_maxseg(tp); 11126 if (tp->snd_ssthresh > pipe) { 11127 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 11128 rack_log_to_prr(rack, 11, 0, __LINE__); 11129 } else { 11130 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 11131 rack_log_to_prr(rack, 12, 0, __LINE__); 11132 } 11133 } 11134 } 11135 11136 static void 11137 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 11138 int *dsack_seen, int *sacks_seen) 11139 { 11140 uint32_t changed; 11141 struct tcp_rack *rack; 11142 struct rack_sendmap *rsm; 11143 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 11144 register uint32_t th_ack; 11145 int32_t i, j, k, num_sack_blks = 0; 11146 uint32_t cts, acked, ack_point; 11147 int loop_start = 0, moved_two = 0, no_extra = 0; 11148 uint32_t tsused; 11149 uint32_t segsiz, o_cnt; 11150 11151 11152 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11153 if (tcp_get_flags(th) & TH_RST) { 11154 /* We don't log resets */ 11155 return; 11156 } 11157 rack = (struct tcp_rack *)tp->t_fb_ptr; 11158 cts = tcp_get_usecs(NULL); 11159 rsm = tqhash_min(rack->r_ctl.tqh); 11160 changed = 0; 11161 th_ack = th->th_ack; 11162 if (rack->sack_attack_disable == 0) 11163 rack_do_decay(rack); 11164 segsiz = ctf_fixed_maxseg(rack->rc_tp); 11165 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 11166 /* 11167 * You only get credit for 11168 * MSS and greater (and you get extra 11169 * credit for larger cum-ack moves). 11170 */ 11171 int ac; 11172 11173 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 11174 rack->r_ctl.ack_count += ac; 11175 counter_u64_add(rack_ack_total, ac); 11176 } 11177 if (rack->r_ctl.ack_count > 0xfff00000) { 11178 /* 11179 * reduce the number to keep us under 11180 * a uint32_t. 11181 */ 11182 rack->r_ctl.ack_count /= 2; 11183 rack->r_ctl.sack_count /= 2; 11184 } 11185 if (SEQ_GT(th_ack, tp->snd_una)) { 11186 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 11187 tp->t_acktime = ticks; 11188 } 11189 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 11190 changed = th_ack - rsm->r_start; 11191 if (changed) { 11192 rack_process_to_cumack(tp, rack, th_ack, cts, to, 11193 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 11194 } 11195 if ((to->to_flags & TOF_SACK) == 0) { 11196 /* We are done nothing left and no sack. */ 11197 rack_handle_might_revert(tp, rack); 11198 /* 11199 * For cases where we struck a dup-ack 11200 * with no SACK, add to the changes so 11201 * PRR will work right. 11202 */ 11203 if (dup_ack_struck && (changed == 0)) { 11204 changed += ctf_fixed_maxseg(rack->rc_tp); 11205 } 11206 goto out; 11207 } 11208 /* Sack block processing */ 11209 if (SEQ_GT(th_ack, tp->snd_una)) 11210 ack_point = th_ack; 11211 else 11212 ack_point = tp->snd_una; 11213 for (i = 0; i < to->to_nsacks; i++) { 11214 bcopy((to->to_sacks + i * TCPOLEN_SACK), 11215 &sack, sizeof(sack)); 11216 sack.start = ntohl(sack.start); 11217 sack.end = ntohl(sack.end); 11218 if (SEQ_GT(sack.end, sack.start) && 11219 SEQ_GT(sack.start, ack_point) && 11220 SEQ_LT(sack.start, tp->snd_max) && 11221 SEQ_GT(sack.end, ack_point) && 11222 SEQ_LEQ(sack.end, tp->snd_max)) { 11223 sack_blocks[num_sack_blks] = sack; 11224 num_sack_blks++; 11225 } else if (SEQ_LEQ(sack.start, th_ack) && 11226 SEQ_LEQ(sack.end, th_ack)) { 11227 int was_tlp; 11228 11229 if (dsack_seen != NULL) 11230 *dsack_seen = 1; 11231 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 11232 /* 11233 * Its a D-SACK block. 11234 */ 11235 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 11236 } 11237 } 11238 if (rack->rc_dsack_round_seen) { 11239 /* Is the dsack roound over? */ 11240 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 11241 /* Yes it is */ 11242 rack->rc_dsack_round_seen = 0; 11243 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 11244 } 11245 } 11246 /* 11247 * Sort the SACK blocks so we can update the rack scoreboard with 11248 * just one pass. 11249 */ 11250 o_cnt = num_sack_blks; 11251 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 11252 num_sack_blks, th->th_ack); 11253 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 11254 if (sacks_seen != NULL) 11255 *sacks_seen = num_sack_blks; 11256 if (num_sack_blks == 0) { 11257 /* Nothing to sack, but we need to update counts */ 11258 if ((o_cnt == 1) && 11259 (*dsack_seen != 1)) 11260 rack->r_ctl.sack_count++; 11261 else if (o_cnt > 1) 11262 rack->r_ctl.sack_count++; 11263 goto out_with_totals; 11264 } 11265 if (rack->sack_attack_disable) { 11266 /* 11267 * An attacker disablement is in place, for 11268 * every sack block that is not at least a full MSS 11269 * count up sack_count. 11270 */ 11271 for (i = 0; i < num_sack_blks; i++) { 11272 if ((sack_blocks[i].end - sack_blocks[i].start) < segsiz) { 11273 rack->r_ctl.sack_count++; 11274 } 11275 if (rack->r_ctl.sack_count > 0xfff00000) { 11276 /* 11277 * reduce the number to keep us under 11278 * a uint32_t. 11279 */ 11280 rack->r_ctl.ack_count /= 2; 11281 rack->r_ctl.sack_count /= 2; 11282 } 11283 } 11284 goto out; 11285 } 11286 /* Its a sack of some sort */ 11287 rack->r_ctl.sack_count += num_sack_blks; 11288 if (rack->r_ctl.sack_count > 0xfff00000) { 11289 /* 11290 * reduce the number to keep us under 11291 * a uint32_t. 11292 */ 11293 rack->r_ctl.ack_count /= 2; 11294 rack->r_ctl.sack_count /= 2; 11295 } 11296 if (num_sack_blks < 2) { 11297 /* Only one, we don't need to sort */ 11298 goto do_sack_work; 11299 } 11300 /* Sort the sacks */ 11301 for (i = 0; i < num_sack_blks; i++) { 11302 for (j = i + 1; j < num_sack_blks; j++) { 11303 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 11304 sack = sack_blocks[i]; 11305 sack_blocks[i] = sack_blocks[j]; 11306 sack_blocks[j] = sack; 11307 } 11308 } 11309 } 11310 /* 11311 * Now are any of the sack block ends the same (yes some 11312 * implementations send these)? 11313 */ 11314 again: 11315 if (num_sack_blks == 0) 11316 goto out_with_totals; 11317 if (num_sack_blks > 1) { 11318 for (i = 0; i < num_sack_blks; i++) { 11319 for (j = i + 1; j < num_sack_blks; j++) { 11320 if (sack_blocks[i].end == sack_blocks[j].end) { 11321 /* 11322 * Ok these two have the same end we 11323 * want the smallest end and then 11324 * throw away the larger and start 11325 * again. 11326 */ 11327 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 11328 /* 11329 * The second block covers 11330 * more area use that 11331 */ 11332 sack_blocks[i].start = sack_blocks[j].start; 11333 } 11334 /* 11335 * Now collapse out the dup-sack and 11336 * lower the count 11337 */ 11338 for (k = (j + 1); k < num_sack_blks; k++) { 11339 sack_blocks[j].start = sack_blocks[k].start; 11340 sack_blocks[j].end = sack_blocks[k].end; 11341 j++; 11342 } 11343 num_sack_blks--; 11344 goto again; 11345 } 11346 } 11347 } 11348 } 11349 do_sack_work: 11350 /* 11351 * First lets look to see if 11352 * we have retransmitted and 11353 * can use the transmit next? 11354 */ 11355 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11356 if (rsm && 11357 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 11358 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 11359 /* 11360 * We probably did the FR and the next 11361 * SACK in continues as we would expect. 11362 */ 11363 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &no_extra, &moved_two, segsiz); 11364 if (acked) { 11365 rack->r_wanted_output = 1; 11366 changed += acked; 11367 } 11368 if (num_sack_blks == 1) { 11369 /* 11370 * This is what we would expect from 11371 * a normal implementation to happen 11372 * after we have retransmitted the FR, 11373 * i.e the sack-filter pushes down 11374 * to 1 block and the next to be retransmitted 11375 * is the sequence in the sack block (has more 11376 * are acked). Count this as ACK'd data to boost 11377 * up the chances of recovering any false positives. 11378 */ 11379 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 11380 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 11381 counter_u64_add(rack_express_sack, 1); 11382 if (rack->r_ctl.ack_count > 0xfff00000) { 11383 /* 11384 * reduce the number to keep us under 11385 * a uint32_t. 11386 */ 11387 rack->r_ctl.ack_count /= 2; 11388 rack->r_ctl.sack_count /= 2; 11389 } 11390 if (moved_two) { 11391 /* 11392 * If we did not get a SACK for at least a MSS and 11393 * had to move at all, or if we moved more than our 11394 * threshold, it counts against the "extra" move. 11395 */ 11396 rack->r_ctl.sack_moved_extra += moved_two; 11397 rack->r_ctl.sack_noextra_move += no_extra; 11398 counter_u64_add(rack_move_some, 1); 11399 } else { 11400 /* 11401 * else we did not have to move 11402 * any more than we would expect. 11403 */ 11404 rack->r_ctl.sack_noextra_move += no_extra; 11405 rack->r_ctl.sack_noextra_move++; 11406 counter_u64_add(rack_move_none, 1); 11407 } 11408 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 11409 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 11410 rack->r_ctl.sack_moved_extra /= 2; 11411 rack->r_ctl.sack_noextra_move /= 2; 11412 } 11413 goto out_with_totals; 11414 } else { 11415 /* 11416 * Start the loop through the 11417 * rest of blocks, past the first block. 11418 */ 11419 loop_start = 1; 11420 } 11421 } 11422 counter_u64_add(rack_sack_total, 1); 11423 rsm = rack->r_ctl.rc_sacklast; 11424 for (i = loop_start; i < num_sack_blks; i++) { 11425 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &no_extra, &moved_two, segsiz); 11426 if (acked) { 11427 rack->r_wanted_output = 1; 11428 changed += acked; 11429 } 11430 if (moved_two) { 11431 /* 11432 * If we did not get a SACK for at least a MSS and 11433 * had to move at all, or if we moved more than our 11434 * threshold, it counts against the "extra" move. 11435 */ 11436 rack->r_ctl.sack_moved_extra += moved_two; 11437 rack->r_ctl.sack_noextra_move += no_extra; 11438 counter_u64_add(rack_move_some, 1); 11439 } else { 11440 /* 11441 * else we did not have to move 11442 * any more than we would expect. 11443 */ 11444 rack->r_ctl.sack_noextra_move += no_extra; 11445 rack->r_ctl.sack_noextra_move++; 11446 counter_u64_add(rack_move_none, 1); 11447 } 11448 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 11449 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 11450 rack->r_ctl.sack_moved_extra /= 2; 11451 rack->r_ctl.sack_noextra_move /= 2; 11452 } 11453 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 11454 /* 11455 * If the SACK was not a full MSS then 11456 * we add to sack_count the number of 11457 * MSS's (or possibly more than 11458 * a MSS if its a TSO send) we had to skip by. 11459 */ 11460 rack->r_ctl.sack_count += moved_two; 11461 if (rack->r_ctl.sack_count > 0xfff00000) { 11462 rack->r_ctl.ack_count /= 2; 11463 rack->r_ctl.sack_count /= 2; 11464 } 11465 counter_u64_add(rack_sack_total, moved_two); 11466 } 11467 /* 11468 * Now we need to setup for the next 11469 * round. First we make sure we won't 11470 * exceed the size of our uint32_t on 11471 * the various counts, and then clear out 11472 * moved_two. 11473 */ 11474 moved_two = 0; 11475 no_extra = 0; 11476 } 11477 out_with_totals: 11478 if (num_sack_blks > 1) { 11479 /* 11480 * You get an extra stroke if 11481 * you have more than one sack-blk, this 11482 * could be where we are skipping forward 11483 * and the sack-filter is still working, or 11484 * it could be an attacker constantly 11485 * moving us. 11486 */ 11487 rack->r_ctl.sack_moved_extra++; 11488 counter_u64_add(rack_move_some, 1); 11489 } 11490 out: 11491 #ifdef TCP_SAD_DETECTION 11492 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 11493 #endif 11494 if (changed) { 11495 /* Something changed cancel the rack timer */ 11496 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11497 } 11498 tsused = tcp_get_usecs(NULL); 11499 rsm = tcp_rack_output(tp, rack, tsused); 11500 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11501 rsm && 11502 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11503 /* Enter recovery */ 11504 entered_recovery = 1; 11505 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 11506 /* 11507 * When we enter recovery we need to assure we send 11508 * one packet. 11509 */ 11510 if (rack->rack_no_prr == 0) { 11511 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11512 rack_log_to_prr(rack, 8, 0, __LINE__); 11513 } 11514 rack->r_timer_override = 1; 11515 rack->r_early = 0; 11516 rack->r_ctl.rc_agg_early = 0; 11517 } else if (IN_FASTRECOVERY(tp->t_flags) && 11518 rsm && 11519 (rack->r_rr_config == 3)) { 11520 /* 11521 * Assure we can output and we get no 11522 * remembered pace time except the retransmit. 11523 */ 11524 rack->r_timer_override = 1; 11525 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11526 rack->r_ctl.rc_resend = rsm; 11527 } 11528 if (IN_FASTRECOVERY(tp->t_flags) && 11529 (rack->rack_no_prr == 0) && 11530 (entered_recovery == 0)) { 11531 rack_update_prr(tp, rack, changed, th_ack); 11532 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11533 ((tcp_in_hpts(rack->rc_tp) == 0) && 11534 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11535 /* 11536 * If you are pacing output you don't want 11537 * to override. 11538 */ 11539 rack->r_early = 0; 11540 rack->r_ctl.rc_agg_early = 0; 11541 rack->r_timer_override = 1; 11542 } 11543 } 11544 } 11545 11546 static void 11547 rack_strike_dupack(struct tcp_rack *rack) 11548 { 11549 struct rack_sendmap *rsm; 11550 11551 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11552 while (rsm) { 11553 /* 11554 * We need to skip anything already set 11555 * to be retransmitted. 11556 */ 11557 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11558 (rsm->r_flags & RACK_MUST_RXT)) { 11559 rsm = TAILQ_NEXT(rsm, r_tnext); 11560 continue; 11561 } 11562 break; 11563 } 11564 if (rsm && (rsm->r_dupack < 0xff)) { 11565 rsm->r_dupack++; 11566 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11567 struct timeval tv; 11568 uint32_t cts; 11569 /* 11570 * Here we see if we need to retransmit. For 11571 * a SACK type connection if enough time has passed 11572 * we will get a return of the rsm. For a non-sack 11573 * connection we will get the rsm returned if the 11574 * dupack value is 3 or more. 11575 */ 11576 cts = tcp_get_usecs(&tv); 11577 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11578 if (rack->r_ctl.rc_resend != NULL) { 11579 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11580 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11581 rack->rc_tp->snd_una, __LINE__); 11582 } 11583 rack->r_wanted_output = 1; 11584 rack->r_timer_override = 1; 11585 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11586 } 11587 } else { 11588 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11589 } 11590 } 11591 } 11592 11593 static void 11594 rack_check_bottom_drag(struct tcpcb *tp, 11595 struct tcp_rack *rack, 11596 struct socket *so) 11597 { 11598 uint32_t segsiz, minseg; 11599 11600 segsiz = ctf_fixed_maxseg(tp); 11601 minseg = segsiz; 11602 if (tp->snd_max == tp->snd_una) { 11603 /* 11604 * We are doing dynamic pacing and we are way 11605 * under. Basically everything got acked while 11606 * we were still waiting on the pacer to expire. 11607 * 11608 * This means we need to boost the b/w in 11609 * addition to any earlier boosting of 11610 * the multiplier. 11611 */ 11612 uint64_t lt_bw; 11613 11614 lt_bw = rack_get_lt_bw(rack); 11615 rack->rc_dragged_bottom = 1; 11616 rack_validate_multipliers_at_or_above100(rack); 11617 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11618 (lt_bw > 0)) { 11619 /* 11620 * Lets use the long-term b/w we have 11621 * been getting as a base. 11622 */ 11623 if (rack->rc_gp_filled == 0) { 11624 if (lt_bw > ONE_POINT_TWO_MEG) { 11625 /* 11626 * If we have no measurement 11627 * don't let us set in more than 11628 * 1.2Mbps. If we are still too 11629 * low after pacing with this we 11630 * will hopefully have a max b/w 11631 * available to sanity check things. 11632 */ 11633 lt_bw = ONE_POINT_TWO_MEG; 11634 } 11635 rack->r_ctl.rc_rtt_diff = 0; 11636 rack->r_ctl.gp_bw = lt_bw; 11637 rack->rc_gp_filled = 1; 11638 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11639 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11640 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11641 } else if (lt_bw > rack->r_ctl.gp_bw) { 11642 rack->r_ctl.rc_rtt_diff = 0; 11643 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11644 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11645 rack->r_ctl.gp_bw = lt_bw; 11646 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11647 } else 11648 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11649 if ((rack->gp_ready == 0) && 11650 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11651 /* We have enough measurements now */ 11652 rack->gp_ready = 1; 11653 if (rack->dgp_on || 11654 rack->rack_hibeta) 11655 rack_set_cc_pacing(rack); 11656 if (rack->defer_options) 11657 rack_apply_deferred_options(rack); 11658 } 11659 } else { 11660 /* 11661 * zero rtt possibly?, settle for just an old increase. 11662 */ 11663 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11664 } 11665 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11666 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11667 minseg)) && 11668 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11669 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11670 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11671 (segsiz * rack_req_segs))) { 11672 /* 11673 * We are doing dynamic GP pacing and 11674 * we have everything except 1MSS or less 11675 * bytes left out. We are still pacing away. 11676 * And there is data that could be sent, This 11677 * means we are inserting delayed ack time in 11678 * our measurements because we are pacing too slow. 11679 */ 11680 rack_validate_multipliers_at_or_above100(rack); 11681 rack->rc_dragged_bottom = 1; 11682 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11683 } 11684 } 11685 11686 #ifdef TCP_REQUEST_TRK 11687 static void 11688 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11689 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11690 { 11691 int do_log; 11692 11693 do_log = tcp_bblogging_on(rack->rc_tp); 11694 if (do_log == 0) { 11695 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11696 return; 11697 /* We only allow the three below with point logging on */ 11698 if ((mod != HYBRID_LOG_RULES_APP) && 11699 (mod != HYBRID_LOG_RULES_SET) && 11700 (mod != HYBRID_LOG_REQ_COMP)) 11701 return; 11702 11703 } 11704 if (do_log) { 11705 union tcp_log_stackspecific log; 11706 struct timeval tv; 11707 11708 /* Convert our ms to a microsecond */ 11709 memset(&log, 0, sizeof(log)); 11710 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11711 log.u_bbr.flex1 = seq; 11712 log.u_bbr.cwnd_gain = line; 11713 if (cur != NULL) { 11714 uint64_t off; 11715 11716 log.u_bbr.flex2 = cur->start_seq; 11717 log.u_bbr.flex3 = cur->end_seq; 11718 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11719 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11720 log.u_bbr.flex6 = cur->flags; 11721 log.u_bbr.pkts_out = cur->hybrid_flags; 11722 log.u_bbr.rttProp = cur->timestamp; 11723 log.u_bbr.cur_del_rate = cur->cspr; 11724 log.u_bbr.bw_inuse = cur->start; 11725 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11726 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11727 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11728 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11729 log.u_bbr.bbr_state = 1; 11730 #ifdef TCP_REQUEST_TRK 11731 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11732 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11733 #endif 11734 } else { 11735 log.u_bbr.flex2 = err; 11736 } 11737 /* 11738 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11739 */ 11740 log.u_bbr.flex7 = rack->rc_catch_up; 11741 log.u_bbr.flex7 <<= 1; 11742 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11743 log.u_bbr.flex7 <<= 1; 11744 log.u_bbr.flex7 |= rack->dgp_on; 11745 log.u_bbr.flex8 = mod; 11746 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11747 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11748 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11749 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 11750 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 11751 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 11752 tcp_log_event(rack->rc_tp, NULL, 11753 &rack->rc_inp->inp_socket->so_rcv, 11754 &rack->rc_inp->inp_socket->so_snd, 11755 TCP_HYBRID_PACING_LOG, 0, 11756 0, &log, false, NULL, __func__, __LINE__, &tv); 11757 } 11758 } 11759 #endif 11760 11761 #ifdef TCP_REQUEST_TRK 11762 static void 11763 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len) 11764 { 11765 struct tcp_sendfile_track *rc_cur; 11766 struct tcpcb *tp; 11767 int err = 0; 11768 11769 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 11770 if (rc_cur == NULL) { 11771 /* If not in the beginning what about the end piece */ 11772 if (rack->rc_hybrid_mode) 11773 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11774 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 11775 } else { 11776 err = 12345; 11777 } 11778 /* If we find no parameters we are in straight DGP mode */ 11779 if(rc_cur == NULL) { 11780 /* None found for this seq, just DGP for now */ 11781 rack->r_ctl.client_suggested_maxseg = 0; 11782 rack->rc_catch_up = 0; 11783 rack->r_ctl.bw_rate_cap = 0; 11784 if (rack->rc_hybrid_mode) 11785 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11786 if (rack->r_ctl.rc_last_sft) { 11787 rack->r_ctl.rc_last_sft = NULL; 11788 } 11789 return; 11790 } 11791 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 11792 /* This entry was never setup for hybrid pacing on/off etc */ 11793 return; 11794 } 11795 /* 11796 * Ok if we have a new entry *or* have never 11797 * set up an entry we need to proceed. If 11798 * we have already set it up this entry we 11799 * just continue along with what we already 11800 * setup. 11801 */ 11802 tp = rack->rc_tp; 11803 if ((rack->r_ctl.rc_last_sft != NULL) && 11804 (rack->r_ctl.rc_last_sft == rc_cur)) { 11805 /* Its already in place */ 11806 if (rack->rc_hybrid_mode) 11807 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 11808 return; 11809 } 11810 if (rack->rc_hybrid_mode == 0) { 11811 rack->r_ctl.rc_last_sft = rc_cur; 11812 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11813 return; 11814 } 11815 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 11816 /* Compensate for all the header overhead's */ 11817 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11818 } else 11819 rack->r_ctl.bw_rate_cap = 0; 11820 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 11821 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 11822 else 11823 rack->r_ctl.client_suggested_maxseg = 0; 11824 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 11825 (rc_cur->cspr > 0)) { 11826 uint64_t len; 11827 11828 rack->rc_catch_up = 1; 11829 /* 11830 * Calculate the deadline time, first set the 11831 * time to when the request arrived. 11832 */ 11833 rc_cur->deadline = rc_cur->localtime; 11834 /* 11835 * Next calculate the length and compensate for 11836 * TLS if need be. 11837 */ 11838 len = rc_cur->end - rc_cur->start; 11839 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 11840 /* 11841 * This session is doing TLS. Take a swag guess 11842 * at the overhead. 11843 */ 11844 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 11845 } 11846 /* 11847 * Now considering the size, and the cspr, what is the time that 11848 * would be required at the cspr rate. Here we use the raw 11849 * cspr value since the client only looks at the raw data. We 11850 * do use len which includes TLS overhead, but not the TCP/IP etc. 11851 * That will get made up for in the CU pacing rate set. 11852 */ 11853 len *= HPTS_USEC_IN_SEC; 11854 len /= rc_cur->cspr; 11855 rc_cur->deadline += len; 11856 } else { 11857 rack->rc_catch_up = 0; 11858 rc_cur->deadline = 0; 11859 } 11860 if (rack->r_ctl.client_suggested_maxseg != 0) { 11861 /* 11862 * We need to reset the max pace segs if we have a 11863 * client_suggested_maxseg. 11864 */ 11865 rack_set_pace_segments(tp, rack, __LINE__, NULL); 11866 } 11867 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11868 /* Remember it for next time and for CU mode */ 11869 rack->r_ctl.rc_last_sft = rc_cur; 11870 } 11871 #endif 11872 11873 static void 11874 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11875 { 11876 #ifdef TCP_REQUEST_TRK 11877 struct tcp_sendfile_track *ent; 11878 11879 ent = rack->r_ctl.rc_last_sft; 11880 if ((ent == NULL) || 11881 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 11882 (SEQ_GEQ(seq, ent->end_seq))) { 11883 /* Time to update the track. */ 11884 rack_set_dgp_hybrid_mode(rack, seq, len); 11885 ent = rack->r_ctl.rc_last_sft; 11886 } 11887 /* Out of all */ 11888 if (ent == NULL) { 11889 return; 11890 } 11891 if (SEQ_LT(ent->end_seq, (seq + len))) { 11892 /* 11893 * This is the case where our end_seq guess 11894 * was wrong. This is usually due to TLS having 11895 * more bytes then our guess. It could also be the 11896 * case that the client sent in two requests closely 11897 * and the SB is full of both so we are sending part 11898 * of each (end|beg). In such a case lets move this 11899 * guys end to match the end of this send. That 11900 * way it will complete when all of it is acked. 11901 */ 11902 ent->end_seq = (seq + len); 11903 if (rack->rc_hybrid_mode) 11904 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 11905 } 11906 /* Now validate we have set the send time of this one */ 11907 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11908 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 11909 ent->first_send = cts; 11910 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 11911 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11912 } 11913 #endif 11914 } 11915 11916 static void 11917 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 11918 { 11919 /* 11920 * The fast output path is enabled and we 11921 * have moved the cumack forward. Lets see if 11922 * we can expand forward the fast path length by 11923 * that amount. What we would ideally like to 11924 * do is increase the number of bytes in the 11925 * fast path block (left_to_send) by the 11926 * acked amount. However we have to gate that 11927 * by two factors: 11928 * 1) The amount outstanding and the rwnd of the peer 11929 * (i.e. we don't want to exceed the rwnd of the peer). 11930 * <and> 11931 * 2) The amount of data left in the socket buffer (i.e. 11932 * we can't send beyond what is in the buffer). 11933 * 11934 * Note that this does not take into account any increase 11935 * in the cwnd. We will only extend the fast path by 11936 * what was acked. 11937 */ 11938 uint32_t new_total, gating_val; 11939 11940 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 11941 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 11942 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 11943 if (new_total <= gating_val) { 11944 /* We can increase left_to_send by the acked amount */ 11945 counter_u64_add(rack_extended_rfo, 1); 11946 rack->r_ctl.fsb.left_to_send = new_total; 11947 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 11948 ("rack:%p left_to_send:%u sbavail:%u out:%u", 11949 rack, rack->r_ctl.fsb.left_to_send, 11950 sbavail(&rack->rc_inp->inp_socket->so_snd), 11951 (tp->snd_max - tp->snd_una))); 11952 11953 } 11954 } 11955 11956 static void 11957 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 11958 { 11959 /* 11960 * Here any sendmap entry that points to the 11961 * beginning mbuf must be adjusted to the correct 11962 * offset. This must be called with: 11963 * 1) The socket buffer locked 11964 * 2) snd_una adjusted to its new position. 11965 * 11966 * Note that (2) implies rack_ack_received has also 11967 * been called and all the sbcut's have been done. 11968 * 11969 * We grab the first mbuf in the socket buffer and 11970 * then go through the front of the sendmap, recalculating 11971 * the stored offset for any sendmap entry that has 11972 * that mbuf. We must use the sb functions to do this 11973 * since its possible an add was done has well as 11974 * the subtraction we may have just completed. This should 11975 * not be a penalty though, since we just referenced the sb 11976 * to go in and trim off the mbufs that we freed (of course 11977 * there will be a penalty for the sendmap references though). 11978 * 11979 * Note also with INVARIANT on, we validate with a KASSERT 11980 * that the first sendmap entry has a soff of 0. 11981 * 11982 */ 11983 struct mbuf *m; 11984 struct rack_sendmap *rsm; 11985 tcp_seq snd_una; 11986 #ifdef INVARIANTS 11987 int first_processed = 0; 11988 #endif 11989 11990 snd_una = rack->rc_tp->snd_una; 11991 SOCKBUF_LOCK_ASSERT(sb); 11992 m = sb->sb_mb; 11993 rsm = tqhash_min(rack->r_ctl.tqh); 11994 if ((rsm == NULL) || (m == NULL)) { 11995 /* Nothing outstanding */ 11996 return; 11997 } 11998 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 11999 KASSERT((rsm->m == m), 12000 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 12001 rack, sb, rsm)); 12002 while (rsm->m && (rsm->m == m)) { 12003 /* one to adjust */ 12004 #ifdef INVARIANTS 12005 struct mbuf *tm; 12006 uint32_t soff; 12007 12008 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 12009 if ((rsm->orig_m_len != m->m_len) || 12010 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 12011 rack_adjust_orig_mlen(rsm); 12012 } 12013 if (first_processed == 0) { 12014 KASSERT((rsm->soff == 0), 12015 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 12016 rack, rsm)); 12017 first_processed = 1; 12018 } 12019 if ((rsm->soff != soff) || (rsm->m != tm)) { 12020 /* 12021 * This is not a fatal error, we anticipate it 12022 * might happen (the else code), so we count it here 12023 * so that under invariant we can see that it really 12024 * does happen. 12025 */ 12026 counter_u64_add(rack_adjust_map_bw, 1); 12027 } 12028 rsm->m = tm; 12029 rsm->soff = soff; 12030 if (tm) { 12031 rsm->orig_m_len = rsm->m->m_len; 12032 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12033 } else { 12034 rsm->orig_m_len = 0; 12035 rsm->orig_t_space = 0; 12036 } 12037 #else 12038 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 12039 if (rsm->m) { 12040 rsm->orig_m_len = rsm->m->m_len; 12041 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12042 } else { 12043 rsm->orig_m_len = 0; 12044 rsm->orig_t_space = 0; 12045 } 12046 #endif 12047 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 12048 if (rsm == NULL) 12049 break; 12050 } 12051 } 12052 12053 #ifdef TCP_REQUEST_TRK 12054 static inline void 12055 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 12056 { 12057 struct tcp_sendfile_track *ent; 12058 int i; 12059 12060 if ((rack->rc_hybrid_mode == 0) && 12061 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 12062 /* 12063 * Just do normal completions hybrid pacing is not on 12064 * and CLDL is off as well. 12065 */ 12066 tcp_req_check_for_comp(rack->rc_tp, th_ack); 12067 return; 12068 } 12069 /* 12070 * Originally I was just going to find the th_ack associated 12071 * with an entry. But then I realized a large strech ack could 12072 * in theory ack two or more requests at once. So instead we 12073 * need to find all entries that are completed by th_ack not 12074 * just a single entry and do our logging. 12075 */ 12076 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12077 while (ent != NULL) { 12078 /* 12079 * We may be doing hybrid pacing or CLDL and need more details possibly 12080 * so we do it manually instead of calling 12081 * tcp_req_check_for_comp() 12082 */ 12083 uint64_t laa, tim, data, cbw, ftim; 12084 12085 /* Ok this ack frees it */ 12086 rack_log_hybrid(rack, th_ack, 12087 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 12088 rack_log_hybrid_sends(rack, ent, __LINE__); 12089 /* calculate the time based on the ack arrival */ 12090 data = ent->end - ent->start; 12091 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 12092 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 12093 if (ent->first_send > ent->localtime) 12094 ftim = ent->first_send; 12095 else 12096 ftim = ent->localtime; 12097 } else { 12098 /* TSNH */ 12099 ftim = ent->localtime; 12100 } 12101 if (laa > ent->localtime) 12102 tim = laa - ftim; 12103 else 12104 tim = 0; 12105 cbw = data * HPTS_USEC_IN_SEC; 12106 if (tim > 0) 12107 cbw /= tim; 12108 else 12109 cbw = 0; 12110 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 12111 /* 12112 * Check to see if we are freeing what we are pointing to send wise 12113 * if so be sure to NULL the pointer so we know we are no longer 12114 * set to anything. 12115 */ 12116 if (ent == rack->r_ctl.rc_last_sft) 12117 rack->r_ctl.rc_last_sft = NULL; 12118 /* Generate the log that the tcp_netflix call would have */ 12119 tcp_req_log_req_info(rack->rc_tp, ent, 12120 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 12121 /* Free it and see if there is another one */ 12122 tcp_req_free_a_slot(rack->rc_tp, ent); 12123 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12124 } 12125 } 12126 #endif 12127 12128 12129 /* 12130 * Return value of 1, we do not need to call rack_process_data(). 12131 * return value of 0, rack_process_data can be called. 12132 * For ret_val if its 0 the TCP is locked, if its non-zero 12133 * its unlocked and probably unsafe to touch the TCB. 12134 */ 12135 static int 12136 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12137 struct tcpcb *tp, struct tcpopt *to, 12138 uint32_t tiwin, int32_t tlen, 12139 int32_t * ofia, int32_t thflags, int32_t *ret_val) 12140 { 12141 int32_t ourfinisacked = 0; 12142 int32_t nsegs, acked_amount; 12143 int32_t acked; 12144 struct mbuf *mfree; 12145 struct tcp_rack *rack; 12146 int32_t under_pacing = 0; 12147 int32_t recovery = 0; 12148 12149 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12150 12151 rack = (struct tcp_rack *)tp->t_fb_ptr; 12152 if (SEQ_GT(th->th_ack, tp->snd_max)) { 12153 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 12154 &rack->r_ctl.challenge_ack_ts, 12155 &rack->r_ctl.challenge_ack_cnt); 12156 rack->r_wanted_output = 1; 12157 return (1); 12158 } 12159 if (rack->gp_ready && 12160 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12161 under_pacing = 1; 12162 } 12163 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 12164 int in_rec, dup_ack_struck = 0; 12165 int dsack_seen = 0, sacks_seen = 0; 12166 12167 in_rec = IN_FASTRECOVERY(tp->t_flags); 12168 if (rack->rc_in_persist) { 12169 tp->t_rxtshift = 0; 12170 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12171 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12172 } 12173 12174 if ((th->th_ack == tp->snd_una) && 12175 (tiwin == tp->snd_wnd) && 12176 ((to->to_flags & TOF_SACK) == 0)) { 12177 rack_strike_dupack(rack); 12178 dup_ack_struck = 1; 12179 } 12180 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 12181 dup_ack_struck, &dsack_seen, &sacks_seen); 12182 if ((rack->sack_attack_disable > 0) && 12183 (th->th_ack == tp->snd_una) && 12184 (tiwin == tp->snd_wnd) && 12185 (dsack_seen == 0) && 12186 (sacks_seen > 0)) { 12187 /* 12188 * If sacks have been disabled we may 12189 * want to strike a dup-ack "ignoring" the 12190 * sack as long as the sack was not a "dsack". Note 12191 * that if no sack is sent (TOF_SACK is off) then the 12192 * normal dsack code above rack_log_ack() would have 12193 * already struck. So this is just to catch the case 12194 * were we are ignoring sacks from this guy due to 12195 * it being a suspected attacker. 12196 */ 12197 rack_strike_dupack(rack); 12198 } 12199 12200 } 12201 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12202 /* 12203 * Old ack, behind (or duplicate to) the last one rcv'd 12204 * Note: We mark reordering is occuring if its 12205 * less than and we have not closed our window. 12206 */ 12207 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 12208 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12209 if (rack->r_ctl.rc_reorder_ts == 0) 12210 rack->r_ctl.rc_reorder_ts = 1; 12211 } 12212 return (0); 12213 } 12214 /* 12215 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 12216 * something we sent. 12217 */ 12218 if (tp->t_flags & TF_NEEDSYN) { 12219 /* 12220 * T/TCP: Connection was half-synchronized, and our SYN has 12221 * been ACK'd (so connection is now fully synchronized). Go 12222 * to non-starred state, increment snd_una for ACK of SYN, 12223 * and check if we can do window scaling. 12224 */ 12225 tp->t_flags &= ~TF_NEEDSYN; 12226 tp->snd_una++; 12227 /* Do window scaling? */ 12228 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12229 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12230 tp->rcv_scale = tp->request_r_scale; 12231 /* Send window already scaled. */ 12232 } 12233 } 12234 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12235 12236 acked = BYTES_THIS_ACK(tp, th); 12237 if (acked) { 12238 /* 12239 * Any time we move the cum-ack forward clear 12240 * keep-alive tied probe-not-answered. The 12241 * persists clears its own on entry. 12242 */ 12243 rack->probe_not_answered = 0; 12244 } 12245 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12246 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12247 /* 12248 * If we just performed our first retransmit, and the ACK arrives 12249 * within our recovery window, then it was a mistake to do the 12250 * retransmit in the first place. Recover our original cwnd and 12251 * ssthresh, and proceed to transmit where we left off. 12252 */ 12253 if ((tp->t_flags & TF_PREVVALID) && 12254 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12255 tp->t_flags &= ~TF_PREVVALID; 12256 if (tp->t_rxtshift == 1 && 12257 (int)(ticks - tp->t_badrxtwin) < 0) 12258 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12259 } 12260 if (acked) { 12261 /* assure we are not backed off */ 12262 tp->t_rxtshift = 0; 12263 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12264 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12265 rack->rc_tlp_in_progress = 0; 12266 rack->r_ctl.rc_tlp_cnt_out = 0; 12267 /* 12268 * If it is the RXT timer we want to 12269 * stop it, so we can restart a TLP. 12270 */ 12271 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12272 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12273 #ifdef TCP_REQUEST_TRK 12274 rack_req_check_for_comp(rack, th->th_ack); 12275 #endif 12276 } 12277 /* 12278 * If we have a timestamp reply, update smoothed round trip time. If 12279 * no timestamp is present but transmit timer is running and timed 12280 * sequence number was acked, update smoothed round trip time. Since 12281 * we now have an rtt measurement, cancel the timer backoff (cf., 12282 * Phil Karn's retransmit alg.). Recompute the initial retransmit 12283 * timer. 12284 * 12285 * Some boxes send broken timestamp replies during the SYN+ACK 12286 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12287 * and blow up the retransmit timer. 12288 */ 12289 /* 12290 * If all outstanding data is acked, stop retransmit timer and 12291 * remember to restart (more output or persist). If there is more 12292 * data to be acked, restart retransmit timer, using current 12293 * (possibly backed-off) value. 12294 */ 12295 if (acked == 0) { 12296 if (ofia) 12297 *ofia = ourfinisacked; 12298 return (0); 12299 } 12300 if (IN_RECOVERY(tp->t_flags)) { 12301 if (SEQ_LT(th->th_ack, tp->snd_recover) && 12302 (SEQ_LT(th->th_ack, tp->snd_max))) { 12303 tcp_rack_partialack(tp); 12304 } else { 12305 rack_post_recovery(tp, th->th_ack); 12306 recovery = 1; 12307 } 12308 } 12309 /* 12310 * Let the congestion control algorithm update congestion control 12311 * related information. This typically means increasing the 12312 * congestion window. 12313 */ 12314 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 12315 SOCKBUF_LOCK(&so->so_snd); 12316 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 12317 tp->snd_wnd -= acked_amount; 12318 mfree = sbcut_locked(&so->so_snd, acked_amount); 12319 if ((sbused(&so->so_snd) == 0) && 12320 (acked > acked_amount) && 12321 (tp->t_state >= TCPS_FIN_WAIT_1) && 12322 (tp->t_flags & TF_SENTFIN)) { 12323 /* 12324 * We must be sure our fin 12325 * was sent and acked (we can be 12326 * in FIN_WAIT_1 without having 12327 * sent the fin). 12328 */ 12329 ourfinisacked = 1; 12330 } 12331 tp->snd_una = th->th_ack; 12332 /* wakeups? */ 12333 if (acked_amount && sbavail(&so->so_snd)) 12334 rack_adjust_sendmap_head(rack, &so->so_snd); 12335 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12336 /* NB: sowwakeup_locked() does an implicit unlock. */ 12337 sowwakeup_locked(so); 12338 /* now check the rxt clamps */ 12339 if ((recovery == 1) && 12340 (rack->excess_rxt_on) && 12341 (rack->r_cwnd_was_clamped == 0)) { 12342 do_rack_excess_rxt(tp, rack); 12343 } else if (rack->r_cwnd_was_clamped) 12344 do_rack_check_for_unclamp(tp, rack); 12345 m_freem(mfree); 12346 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12347 tp->snd_recover = tp->snd_una; 12348 12349 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12350 tp->snd_nxt = tp->snd_max; 12351 } 12352 if (under_pacing && 12353 (rack->use_fixed_rate == 0) && 12354 (rack->in_probe_rtt == 0) && 12355 rack->rc_gp_dyn_mul && 12356 rack->rc_always_pace) { 12357 /* Check if we are dragging bottom */ 12358 rack_check_bottom_drag(tp, rack, so); 12359 } 12360 if (tp->snd_una == tp->snd_max) { 12361 /* Nothing left outstanding */ 12362 tp->t_flags &= ~TF_PREVVALID; 12363 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12364 rack->r_ctl.retran_during_recovery = 0; 12365 rack->r_ctl.dsack_byte_cnt = 0; 12366 if (rack->r_ctl.rc_went_idle_time == 0) 12367 rack->r_ctl.rc_went_idle_time = 1; 12368 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12369 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12370 tp->t_acktime = 0; 12371 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12372 rack->rc_suspicious = 0; 12373 /* Set need output so persist might get set */ 12374 rack->r_wanted_output = 1; 12375 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12376 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12377 (sbavail(&so->so_snd) == 0) && 12378 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12379 /* 12380 * The socket was gone and the 12381 * peer sent data (now or in the past), time to 12382 * reset him. 12383 */ 12384 *ret_val = 1; 12385 /* tcp_close will kill the inp pre-log the Reset */ 12386 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12387 tp = tcp_close(tp); 12388 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12389 return (1); 12390 } 12391 } 12392 if (ofia) 12393 *ofia = ourfinisacked; 12394 return (0); 12395 } 12396 12397 12398 static void 12399 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12400 int dir, uint32_t flags, struct rack_sendmap *rsm) 12401 { 12402 if (tcp_bblogging_on(rack->rc_tp)) { 12403 union tcp_log_stackspecific log; 12404 struct timeval tv; 12405 12406 memset(&log, 0, sizeof(log)); 12407 log.u_bbr.flex1 = cnt; 12408 log.u_bbr.flex2 = split; 12409 log.u_bbr.flex3 = out; 12410 log.u_bbr.flex4 = line; 12411 log.u_bbr.flex5 = rack->r_must_retran; 12412 log.u_bbr.flex6 = flags; 12413 log.u_bbr.flex7 = rack->rc_has_collapsed; 12414 log.u_bbr.flex8 = dir; /* 12415 * 1 is collapsed, 0 is uncollapsed, 12416 * 2 is log of a rsm being marked, 3 is a split. 12417 */ 12418 if (rsm == NULL) 12419 log.u_bbr.rttProp = 0; 12420 else 12421 log.u_bbr.rttProp = (uint64_t)rsm; 12422 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12423 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12424 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12425 &rack->rc_inp->inp_socket->so_rcv, 12426 &rack->rc_inp->inp_socket->so_snd, 12427 TCP_RACK_LOG_COLLAPSE, 0, 12428 0, &log, false, &tv); 12429 } 12430 } 12431 12432 static void 12433 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12434 { 12435 /* 12436 * Here all we do is mark the collapsed point and set the flag. 12437 * This may happen again and again, but there is no 12438 * sense splitting our map until we know where the 12439 * peer finally lands in the collapse. 12440 */ 12441 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12442 if ((rack->rc_has_collapsed == 0) || 12443 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12444 counter_u64_add(rack_collapsed_win_seen, 1); 12445 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12446 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12447 rack->rc_has_collapsed = 1; 12448 rack->r_collapse_point_valid = 1; 12449 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12450 } 12451 12452 static void 12453 rack_un_collapse_window(struct tcp_rack *rack, int line) 12454 { 12455 struct rack_sendmap *nrsm, *rsm; 12456 int cnt = 0, split = 0; 12457 int insret __diagused; 12458 12459 12460 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12461 rack->rc_has_collapsed = 0; 12462 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12463 if (rsm == NULL) { 12464 /* Nothing to do maybe the peer ack'ed it all */ 12465 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12466 return; 12467 } 12468 /* Now do we need to split this one? */ 12469 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12470 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12471 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12472 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12473 if (nrsm == NULL) { 12474 /* We can't get a rsm, mark all? */ 12475 nrsm = rsm; 12476 goto no_split; 12477 } 12478 /* Clone it */ 12479 split = 1; 12480 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12481 #ifndef INVARIANTS 12482 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12483 #else 12484 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12485 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p", 12486 nrsm, insret, rack, rsm); 12487 } 12488 #endif 12489 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12490 rack->r_ctl.last_collapse_point, __LINE__); 12491 if (rsm->r_in_tmap) { 12492 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12493 nrsm->r_in_tmap = 1; 12494 } 12495 /* 12496 * Set in the new RSM as the 12497 * collapsed starting point 12498 */ 12499 rsm = nrsm; 12500 } 12501 12502 no_split: 12503 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12504 cnt++; 12505 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12506 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12507 cnt++; 12508 } 12509 if (cnt) { 12510 counter_u64_add(rack_collapsed_win, 1); 12511 } 12512 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12513 } 12514 12515 static void 12516 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12517 int32_t tlen, int32_t tfo_syn) 12518 { 12519 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12520 rack_timer_cancel(tp, rack, 12521 rack->r_ctl.rc_rcvtime, __LINE__); 12522 tp->t_flags |= TF_DELACK; 12523 } else { 12524 rack->r_wanted_output = 1; 12525 tp->t_flags |= TF_ACKNOW; 12526 } 12527 } 12528 12529 static void 12530 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12531 { 12532 /* 12533 * If fast output is in progress, lets validate that 12534 * the new window did not shrink on us and make it 12535 * so fast output should end. 12536 */ 12537 if (rack->r_fast_output) { 12538 uint32_t out; 12539 12540 /* 12541 * Calculate what we will send if left as is 12542 * and compare that to our send window. 12543 */ 12544 out = ctf_outstanding(tp); 12545 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12546 /* ok we have an issue */ 12547 if (out >= tp->snd_wnd) { 12548 /* Turn off fast output the window is met or collapsed */ 12549 rack->r_fast_output = 0; 12550 } else { 12551 /* we have some room left */ 12552 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12553 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12554 /* If not at least 1 full segment never mind */ 12555 rack->r_fast_output = 0; 12556 } 12557 } 12558 } 12559 } 12560 } 12561 12562 12563 /* 12564 * Return value of 1, the TCB is unlocked and most 12565 * likely gone, return value of 0, the TCP is still 12566 * locked. 12567 */ 12568 static int 12569 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12570 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12571 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12572 { 12573 /* 12574 * Update window information. Don't look at window if no ACK: TAC's 12575 * send garbage on first SYN. 12576 */ 12577 int32_t nsegs; 12578 int32_t tfo_syn; 12579 struct tcp_rack *rack; 12580 12581 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12582 12583 rack = (struct tcp_rack *)tp->t_fb_ptr; 12584 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12585 if ((thflags & TH_ACK) && 12586 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12587 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12588 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12589 /* keep track of pure window updates */ 12590 if (tlen == 0 && 12591 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12592 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12593 tp->snd_wnd = tiwin; 12594 rack_validate_fo_sendwin_up(tp, rack); 12595 tp->snd_wl1 = th->th_seq; 12596 tp->snd_wl2 = th->th_ack; 12597 if (tp->snd_wnd > tp->max_sndwnd) 12598 tp->max_sndwnd = tp->snd_wnd; 12599 rack->r_wanted_output = 1; 12600 } else if (thflags & TH_ACK) { 12601 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12602 tp->snd_wnd = tiwin; 12603 rack_validate_fo_sendwin_up(tp, rack); 12604 tp->snd_wl1 = th->th_seq; 12605 tp->snd_wl2 = th->th_ack; 12606 } 12607 } 12608 if (tp->snd_wnd < ctf_outstanding(tp)) 12609 /* The peer collapsed the window */ 12610 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12611 else if (rack->rc_has_collapsed) 12612 rack_un_collapse_window(rack, __LINE__); 12613 if ((rack->r_collapse_point_valid) && 12614 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12615 rack->r_collapse_point_valid = 0; 12616 /* Was persist timer active and now we have window space? */ 12617 if ((rack->rc_in_persist != 0) && 12618 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12619 rack->r_ctl.rc_pace_min_segs))) { 12620 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12621 tp->snd_nxt = tp->snd_max; 12622 /* Make sure we output to start the timer */ 12623 rack->r_wanted_output = 1; 12624 } 12625 /* Do we enter persists? */ 12626 if ((rack->rc_in_persist == 0) && 12627 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12628 TCPS_HAVEESTABLISHED(tp->t_state) && 12629 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12630 sbavail(&tptosocket(tp)->so_snd) && 12631 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12632 /* 12633 * Here the rwnd is less than 12634 * the pacing size, we are established, 12635 * nothing is outstanding, and there is 12636 * data to send. Enter persists. 12637 */ 12638 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12639 } 12640 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12641 m_freem(m); 12642 return (0); 12643 } 12644 /* 12645 * don't process the URG bit, ignore them drag 12646 * along the up. 12647 */ 12648 tp->rcv_up = tp->rcv_nxt; 12649 12650 /* 12651 * Process the segment text, merging it into the TCP sequencing 12652 * queue, and arranging for acknowledgment of receipt if necessary. 12653 * This process logically involves adjusting tp->rcv_wnd as data is 12654 * presented to the user (this happens in tcp_usrreq.c, case 12655 * PRU_RCVD). If a FIN has already been received on this connection 12656 * then we just ignore the text. 12657 */ 12658 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12659 IS_FASTOPEN(tp->t_flags)); 12660 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12661 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12662 tcp_seq save_start = th->th_seq; 12663 tcp_seq save_rnxt = tp->rcv_nxt; 12664 int save_tlen = tlen; 12665 12666 m_adj(m, drop_hdrlen); /* delayed header drop */ 12667 /* 12668 * Insert segment which includes th into TCP reassembly 12669 * queue with control block tp. Set thflags to whether 12670 * reassembly now includes a segment with FIN. This handles 12671 * the common case inline (segment is the next to be 12672 * received on an established connection, and the queue is 12673 * empty), avoiding linkage into and removal from the queue 12674 * and repetition of various conversions. Set DELACK for 12675 * segments received in order, but ack immediately when 12676 * segments are out of order (so fast retransmit can work). 12677 */ 12678 if (th->th_seq == tp->rcv_nxt && 12679 SEGQ_EMPTY(tp) && 12680 (TCPS_HAVEESTABLISHED(tp->t_state) || 12681 tfo_syn)) { 12682 #ifdef NETFLIX_SB_LIMITS 12683 u_int mcnt, appended; 12684 12685 if (so->so_rcv.sb_shlim) { 12686 mcnt = m_memcnt(m); 12687 appended = 0; 12688 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12689 CFO_NOSLEEP, NULL) == false) { 12690 counter_u64_add(tcp_sb_shlim_fails, 1); 12691 m_freem(m); 12692 return (0); 12693 } 12694 } 12695 #endif 12696 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 12697 tp->rcv_nxt += tlen; 12698 if (tlen && 12699 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12700 (tp->t_fbyte_in == 0)) { 12701 tp->t_fbyte_in = ticks; 12702 if (tp->t_fbyte_in == 0) 12703 tp->t_fbyte_in = 1; 12704 if (tp->t_fbyte_out && tp->t_fbyte_in) 12705 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12706 } 12707 thflags = tcp_get_flags(th) & TH_FIN; 12708 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12709 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12710 SOCKBUF_LOCK(&so->so_rcv); 12711 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12712 m_freem(m); 12713 } else 12714 #ifdef NETFLIX_SB_LIMITS 12715 appended = 12716 #endif 12717 sbappendstream_locked(&so->so_rcv, m, 0); 12718 12719 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12720 /* NB: sorwakeup_locked() does an implicit unlock. */ 12721 sorwakeup_locked(so); 12722 #ifdef NETFLIX_SB_LIMITS 12723 if (so->so_rcv.sb_shlim && appended != mcnt) 12724 counter_fo_release(so->so_rcv.sb_shlim, 12725 mcnt - appended); 12726 #endif 12727 } else { 12728 /* 12729 * XXX: Due to the header drop above "th" is 12730 * theoretically invalid by now. Fortunately 12731 * m_adj() doesn't actually frees any mbufs when 12732 * trimming from the head. 12733 */ 12734 tcp_seq temp = save_start; 12735 12736 thflags = tcp_reass(tp, th, &temp, &tlen, m); 12737 tp->t_flags |= TF_ACKNOW; 12738 if (tp->t_flags & TF_WAKESOR) { 12739 tp->t_flags &= ~TF_WAKESOR; 12740 /* NB: sorwakeup_locked() does an implicit unlock. */ 12741 sorwakeup_locked(so); 12742 } 12743 } 12744 if ((tp->t_flags & TF_SACK_PERMIT) && 12745 (save_tlen > 0) && 12746 TCPS_HAVEESTABLISHED(tp->t_state)) { 12747 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 12748 /* 12749 * DSACK actually handled in the fastpath 12750 * above. 12751 */ 12752 tcp_update_sack_list(tp, save_start, 12753 save_start + save_tlen); 12754 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 12755 if ((tp->rcv_numsacks >= 1) && 12756 (tp->sackblks[0].end == save_start)) { 12757 /* 12758 * Partial overlap, recorded at todrop 12759 * above. 12760 */ 12761 tcp_update_sack_list(tp, 12762 tp->sackblks[0].start, 12763 tp->sackblks[0].end); 12764 } else { 12765 tcp_update_dsack_list(tp, save_start, 12766 save_start + save_tlen); 12767 } 12768 } else if (tlen >= save_tlen) { 12769 /* Update of sackblks. */ 12770 tcp_update_dsack_list(tp, save_start, 12771 save_start + save_tlen); 12772 } else if (tlen > 0) { 12773 tcp_update_dsack_list(tp, save_start, 12774 save_start + tlen); 12775 } 12776 } 12777 } else { 12778 m_freem(m); 12779 thflags &= ~TH_FIN; 12780 } 12781 12782 /* 12783 * If FIN is received ACK the FIN and let the user know that the 12784 * connection is closing. 12785 */ 12786 if (thflags & TH_FIN) { 12787 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12788 /* The socket upcall is handled by socantrcvmore. */ 12789 socantrcvmore(so); 12790 /* 12791 * If connection is half-synchronized (ie NEEDSYN 12792 * flag on) then delay ACK, so it may be piggybacked 12793 * when SYN is sent. Otherwise, since we received a 12794 * FIN then no more input can be expected, send ACK 12795 * now. 12796 */ 12797 if (tp->t_flags & TF_NEEDSYN) { 12798 rack_timer_cancel(tp, rack, 12799 rack->r_ctl.rc_rcvtime, __LINE__); 12800 tp->t_flags |= TF_DELACK; 12801 } else { 12802 tp->t_flags |= TF_ACKNOW; 12803 } 12804 tp->rcv_nxt++; 12805 } 12806 switch (tp->t_state) { 12807 /* 12808 * In SYN_RECEIVED and ESTABLISHED STATES enter the 12809 * CLOSE_WAIT state. 12810 */ 12811 case TCPS_SYN_RECEIVED: 12812 tp->t_starttime = ticks; 12813 /* FALLTHROUGH */ 12814 case TCPS_ESTABLISHED: 12815 rack_timer_cancel(tp, rack, 12816 rack->r_ctl.rc_rcvtime, __LINE__); 12817 tcp_state_change(tp, TCPS_CLOSE_WAIT); 12818 break; 12819 12820 /* 12821 * If still in FIN_WAIT_1 STATE FIN has not been 12822 * acked so enter the CLOSING state. 12823 */ 12824 case TCPS_FIN_WAIT_1: 12825 rack_timer_cancel(tp, rack, 12826 rack->r_ctl.rc_rcvtime, __LINE__); 12827 tcp_state_change(tp, TCPS_CLOSING); 12828 break; 12829 12830 /* 12831 * In FIN_WAIT_2 state enter the TIME_WAIT state, 12832 * starting the time-wait timer, turning off the 12833 * other standard timers. 12834 */ 12835 case TCPS_FIN_WAIT_2: 12836 rack_timer_cancel(tp, rack, 12837 rack->r_ctl.rc_rcvtime, __LINE__); 12838 tcp_twstart(tp); 12839 return (1); 12840 } 12841 } 12842 /* 12843 * Return any desired output. 12844 */ 12845 if ((tp->t_flags & TF_ACKNOW) || 12846 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 12847 rack->r_wanted_output = 1; 12848 } 12849 return (0); 12850 } 12851 12852 /* 12853 * Here nothing is really faster, its just that we 12854 * have broken out the fast-data path also just like 12855 * the fast-ack. 12856 */ 12857 static int 12858 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 12859 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12860 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 12861 { 12862 int32_t nsegs; 12863 int32_t newsize = 0; /* automatic sockbuf scaling */ 12864 struct tcp_rack *rack; 12865 #ifdef NETFLIX_SB_LIMITS 12866 u_int mcnt, appended; 12867 #endif 12868 12869 /* 12870 * If last ACK falls within this segment's sequence numbers, record 12871 * the timestamp. NOTE that the test is modified according to the 12872 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12873 */ 12874 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 12875 return (0); 12876 } 12877 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 12878 return (0); 12879 } 12880 if (tiwin && tiwin != tp->snd_wnd) { 12881 return (0); 12882 } 12883 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 12884 return (0); 12885 } 12886 if (__predict_false((to->to_flags & TOF_TS) && 12887 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 12888 return (0); 12889 } 12890 if (__predict_false((th->th_ack != tp->snd_una))) { 12891 return (0); 12892 } 12893 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 12894 return (0); 12895 } 12896 if ((to->to_flags & TOF_TS) != 0 && 12897 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12898 tp->ts_recent_age = tcp_ts_getticks(); 12899 tp->ts_recent = to->to_tsval; 12900 } 12901 rack = (struct tcp_rack *)tp->t_fb_ptr; 12902 /* 12903 * This is a pure, in-sequence data packet with nothing on the 12904 * reassembly queue and we have enough buffer space to take it. 12905 */ 12906 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12907 12908 #ifdef NETFLIX_SB_LIMITS 12909 if (so->so_rcv.sb_shlim) { 12910 mcnt = m_memcnt(m); 12911 appended = 0; 12912 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12913 CFO_NOSLEEP, NULL) == false) { 12914 counter_u64_add(tcp_sb_shlim_fails, 1); 12915 m_freem(m); 12916 return (1); 12917 } 12918 } 12919 #endif 12920 /* Clean receiver SACK report if present */ 12921 if (tp->rcv_numsacks) 12922 tcp_clean_sackreport(tp); 12923 KMOD_TCPSTAT_INC(tcps_preddat); 12924 tp->rcv_nxt += tlen; 12925 if (tlen && 12926 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12927 (tp->t_fbyte_in == 0)) { 12928 tp->t_fbyte_in = ticks; 12929 if (tp->t_fbyte_in == 0) 12930 tp->t_fbyte_in = 1; 12931 if (tp->t_fbyte_out && tp->t_fbyte_in) 12932 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12933 } 12934 /* 12935 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 12936 */ 12937 tp->snd_wl1 = th->th_seq; 12938 /* 12939 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 12940 */ 12941 tp->rcv_up = tp->rcv_nxt; 12942 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12943 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12944 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12945 12946 /* Add data to socket buffer. */ 12947 SOCKBUF_LOCK(&so->so_rcv); 12948 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12949 m_freem(m); 12950 } else { 12951 /* 12952 * Set new socket buffer size. Give up when limit is 12953 * reached. 12954 */ 12955 if (newsize) 12956 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12957 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12958 m_adj(m, drop_hdrlen); /* delayed header drop */ 12959 #ifdef NETFLIX_SB_LIMITS 12960 appended = 12961 #endif 12962 sbappendstream_locked(&so->so_rcv, m, 0); 12963 ctf_calc_rwin(so, tp); 12964 } 12965 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12966 /* NB: sorwakeup_locked() does an implicit unlock. */ 12967 sorwakeup_locked(so); 12968 #ifdef NETFLIX_SB_LIMITS 12969 if (so->so_rcv.sb_shlim && mcnt != appended) 12970 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 12971 #endif 12972 rack_handle_delayed_ack(tp, rack, tlen, 0); 12973 if (tp->snd_una == tp->snd_max) 12974 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12975 return (1); 12976 } 12977 12978 /* 12979 * This subfunction is used to try to highly optimize the 12980 * fast path. We again allow window updates that are 12981 * in sequence to remain in the fast-path. We also add 12982 * in the __predict's to attempt to help the compiler. 12983 * Note that if we return a 0, then we can *not* process 12984 * it and the caller should push the packet into the 12985 * slow-path. 12986 */ 12987 static int 12988 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12989 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12990 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 12991 { 12992 int32_t acked; 12993 int32_t nsegs; 12994 int32_t under_pacing = 0; 12995 struct tcp_rack *rack; 12996 12997 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12998 /* Old ack, behind (or duplicate to) the last one rcv'd */ 12999 return (0); 13000 } 13001 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 13002 /* Above what we have sent? */ 13003 return (0); 13004 } 13005 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 13006 /* We are retransmitting */ 13007 return (0); 13008 } 13009 if (__predict_false(tiwin == 0)) { 13010 /* zero window */ 13011 return (0); 13012 } 13013 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 13014 /* We need a SYN or a FIN, unlikely.. */ 13015 return (0); 13016 } 13017 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 13018 /* Timestamp is behind .. old ack with seq wrap? */ 13019 return (0); 13020 } 13021 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 13022 /* Still recovering */ 13023 return (0); 13024 } 13025 rack = (struct tcp_rack *)tp->t_fb_ptr; 13026 if (rack->r_ctl.rc_sacked) { 13027 /* We have sack holes on our scoreboard */ 13028 return (0); 13029 } 13030 /* Ok if we reach here, we can process a fast-ack */ 13031 if (rack->gp_ready && 13032 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 13033 under_pacing = 1; 13034 } 13035 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13036 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 13037 /* Did the window get updated? */ 13038 if (tiwin != tp->snd_wnd) { 13039 tp->snd_wnd = tiwin; 13040 rack_validate_fo_sendwin_up(tp, rack); 13041 tp->snd_wl1 = th->th_seq; 13042 if (tp->snd_wnd > tp->max_sndwnd) 13043 tp->max_sndwnd = tp->snd_wnd; 13044 } 13045 /* Do we exit persists? */ 13046 if ((rack->rc_in_persist != 0) && 13047 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13048 rack->r_ctl.rc_pace_min_segs))) { 13049 rack_exit_persist(tp, rack, cts); 13050 } 13051 /* Do we enter persists? */ 13052 if ((rack->rc_in_persist == 0) && 13053 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13054 TCPS_HAVEESTABLISHED(tp->t_state) && 13055 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13056 sbavail(&tptosocket(tp)->so_snd) && 13057 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13058 /* 13059 * Here the rwnd is less than 13060 * the pacing size, we are established, 13061 * nothing is outstanding, and there is 13062 * data to send. Enter persists. 13063 */ 13064 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 13065 } 13066 /* 13067 * If last ACK falls within this segment's sequence numbers, record 13068 * the timestamp. NOTE that the test is modified according to the 13069 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 13070 */ 13071 if ((to->to_flags & TOF_TS) != 0 && 13072 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 13073 tp->ts_recent_age = tcp_ts_getticks(); 13074 tp->ts_recent = to->to_tsval; 13075 } 13076 /* 13077 * This is a pure ack for outstanding data. 13078 */ 13079 KMOD_TCPSTAT_INC(tcps_predack); 13080 13081 /* 13082 * "bad retransmit" recovery. 13083 */ 13084 if ((tp->t_flags & TF_PREVVALID) && 13085 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13086 tp->t_flags &= ~TF_PREVVALID; 13087 if (tp->t_rxtshift == 1 && 13088 (int)(ticks - tp->t_badrxtwin) < 0) 13089 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 13090 } 13091 /* 13092 * Recalculate the transmit timer / rtt. 13093 * 13094 * Some boxes send broken timestamp replies during the SYN+ACK 13095 * phase, ignore timestamps of 0 or we could calculate a huge RTT 13096 * and blow up the retransmit timer. 13097 */ 13098 acked = BYTES_THIS_ACK(tp, th); 13099 13100 #ifdef TCP_HHOOK 13101 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 13102 hhook_run_tcp_est_in(tp, th, to); 13103 #endif 13104 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 13105 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13106 if (acked) { 13107 struct mbuf *mfree; 13108 13109 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 13110 SOCKBUF_LOCK(&so->so_snd); 13111 mfree = sbcut_locked(&so->so_snd, acked); 13112 tp->snd_una = th->th_ack; 13113 /* Note we want to hold the sb lock through the sendmap adjust */ 13114 rack_adjust_sendmap_head(rack, &so->so_snd); 13115 /* Wake up the socket if we have room to write more */ 13116 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13117 sowwakeup_locked(so); 13118 m_freem(mfree); 13119 tp->t_rxtshift = 0; 13120 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13121 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13122 rack->rc_tlp_in_progress = 0; 13123 rack->r_ctl.rc_tlp_cnt_out = 0; 13124 /* 13125 * If it is the RXT timer we want to 13126 * stop it, so we can restart a TLP. 13127 */ 13128 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13129 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13130 13131 #ifdef TCP_REQUEST_TRK 13132 rack_req_check_for_comp(rack, th->th_ack); 13133 #endif 13134 } 13135 /* 13136 * Let the congestion control algorithm update congestion control 13137 * related information. This typically means increasing the 13138 * congestion window. 13139 */ 13140 if (tp->snd_wnd < ctf_outstanding(tp)) { 13141 /* The peer collapsed the window */ 13142 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 13143 } else if (rack->rc_has_collapsed) 13144 rack_un_collapse_window(rack, __LINE__); 13145 if ((rack->r_collapse_point_valid) && 13146 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 13147 rack->r_collapse_point_valid = 0; 13148 /* 13149 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 13150 */ 13151 tp->snd_wl2 = th->th_ack; 13152 tp->t_dupacks = 0; 13153 m_freem(m); 13154 /* ND6_HINT(tp); *//* Some progress has been made. */ 13155 13156 /* 13157 * If all outstanding data are acked, stop retransmit timer, 13158 * otherwise restart timer using current (possibly backed-off) 13159 * value. If process is waiting for space, wakeup/selwakeup/signal. 13160 * If data are ready to send, let tcp_output decide between more 13161 * output or persist. 13162 */ 13163 if (under_pacing && 13164 (rack->use_fixed_rate == 0) && 13165 (rack->in_probe_rtt == 0) && 13166 rack->rc_gp_dyn_mul && 13167 rack->rc_always_pace) { 13168 /* Check if we are dragging bottom */ 13169 rack_check_bottom_drag(tp, rack, so); 13170 } 13171 if (tp->snd_una == tp->snd_max) { 13172 tp->t_flags &= ~TF_PREVVALID; 13173 rack->r_ctl.retran_during_recovery = 0; 13174 rack->rc_suspicious = 0; 13175 rack->r_ctl.dsack_byte_cnt = 0; 13176 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13177 if (rack->r_ctl.rc_went_idle_time == 0) 13178 rack->r_ctl.rc_went_idle_time = 1; 13179 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13180 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13181 tp->t_acktime = 0; 13182 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13183 } 13184 if (acked && rack->r_fast_output) 13185 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 13186 if (sbavail(&so->so_snd)) { 13187 rack->r_wanted_output = 1; 13188 } 13189 return (1); 13190 } 13191 13192 /* 13193 * Return value of 1, the TCB is unlocked and most 13194 * likely gone, return value of 0, the TCP is still 13195 * locked. 13196 */ 13197 static int 13198 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 13199 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13200 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13201 { 13202 int32_t ret_val = 0; 13203 int32_t todrop; 13204 int32_t ourfinisacked = 0; 13205 struct tcp_rack *rack; 13206 13207 INP_WLOCK_ASSERT(tptoinpcb(tp)); 13208 13209 ctf_calc_rwin(so, tp); 13210 /* 13211 * If the state is SYN_SENT: if seg contains an ACK, but not for our 13212 * SYN, drop the input. if seg contains a RST, then drop the 13213 * connection. if seg does not contain SYN, then drop it. Otherwise 13214 * this is an acceptable SYN segment initialize tp->rcv_nxt and 13215 * tp->irs if seg contains ack then advance tp->snd_una if seg 13216 * contains an ECE and ECN support is enabled, the stream is ECN 13217 * capable. if SYN has been acked change to ESTABLISHED else 13218 * SYN_RCVD state arrange for segment to be acked (eventually) 13219 * continue processing rest of data/controls. 13220 */ 13221 if ((thflags & TH_ACK) && 13222 (SEQ_LEQ(th->th_ack, tp->iss) || 13223 SEQ_GT(th->th_ack, tp->snd_max))) { 13224 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13225 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13226 return (1); 13227 } 13228 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 13229 TCP_PROBE5(connect__refused, NULL, tp, 13230 mtod(m, const char *), tp, th); 13231 tp = tcp_drop(tp, ECONNREFUSED); 13232 ctf_do_drop(m, tp); 13233 return (1); 13234 } 13235 if (thflags & TH_RST) { 13236 ctf_do_drop(m, tp); 13237 return (1); 13238 } 13239 if (!(thflags & TH_SYN)) { 13240 ctf_do_drop(m, tp); 13241 return (1); 13242 } 13243 tp->irs = th->th_seq; 13244 tcp_rcvseqinit(tp); 13245 rack = (struct tcp_rack *)tp->t_fb_ptr; 13246 if (thflags & TH_ACK) { 13247 int tfo_partial = 0; 13248 13249 KMOD_TCPSTAT_INC(tcps_connects); 13250 soisconnected(so); 13251 #ifdef MAC 13252 mac_socketpeer_set_from_mbuf(m, so); 13253 #endif 13254 /* Do window scaling on this connection? */ 13255 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13256 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13257 tp->rcv_scale = tp->request_r_scale; 13258 } 13259 tp->rcv_adv += min(tp->rcv_wnd, 13260 TCP_MAXWIN << tp->rcv_scale); 13261 /* 13262 * If not all the data that was sent in the TFO SYN 13263 * has been acked, resend the remainder right away. 13264 */ 13265 if (IS_FASTOPEN(tp->t_flags) && 13266 (tp->snd_una != tp->snd_max)) { 13267 tp->snd_nxt = th->th_ack; 13268 tfo_partial = 1; 13269 } 13270 /* 13271 * If there's data, delay ACK; if there's also a FIN ACKNOW 13272 * will be turned on later. 13273 */ 13274 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 13275 rack_timer_cancel(tp, rack, 13276 rack->r_ctl.rc_rcvtime, __LINE__); 13277 tp->t_flags |= TF_DELACK; 13278 } else { 13279 rack->r_wanted_output = 1; 13280 tp->t_flags |= TF_ACKNOW; 13281 } 13282 13283 tcp_ecn_input_syn_sent(tp, thflags, iptos); 13284 13285 if (SEQ_GT(th->th_ack, tp->snd_una)) { 13286 /* 13287 * We advance snd_una for the 13288 * fast open case. If th_ack is 13289 * acknowledging data beyond 13290 * snd_una we can't just call 13291 * ack-processing since the 13292 * data stream in our send-map 13293 * will start at snd_una + 1 (one 13294 * beyond the SYN). If its just 13295 * equal we don't need to do that 13296 * and there is no send_map. 13297 */ 13298 tp->snd_una++; 13299 } 13300 /* 13301 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 13302 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 13303 */ 13304 tp->t_starttime = ticks; 13305 if (tp->t_flags & TF_NEEDFIN) { 13306 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13307 tp->t_flags &= ~TF_NEEDFIN; 13308 thflags &= ~TH_SYN; 13309 } else { 13310 tcp_state_change(tp, TCPS_ESTABLISHED); 13311 TCP_PROBE5(connect__established, NULL, tp, 13312 mtod(m, const char *), tp, th); 13313 rack_cc_conn_init(tp); 13314 } 13315 } else { 13316 /* 13317 * Received initial SYN in SYN-SENT[*] state => simultaneous 13318 * open. If segment contains CC option and there is a 13319 * cached CC, apply TAO test. If it succeeds, connection is * 13320 * half-synchronized. Otherwise, do 3-way handshake: 13321 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 13322 * there was no CC option, clear cached CC value. 13323 */ 13324 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 13325 tcp_state_change(tp, TCPS_SYN_RECEIVED); 13326 } 13327 /* 13328 * Advance th->th_seq to correspond to first data byte. If data, 13329 * trim to stay within window, dropping FIN if necessary. 13330 */ 13331 th->th_seq++; 13332 if (tlen > tp->rcv_wnd) { 13333 todrop = tlen - tp->rcv_wnd; 13334 m_adj(m, -todrop); 13335 tlen = tp->rcv_wnd; 13336 thflags &= ~TH_FIN; 13337 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13338 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13339 } 13340 tp->snd_wl1 = th->th_seq - 1; 13341 tp->rcv_up = th->th_seq; 13342 /* 13343 * Client side of transaction: already sent SYN and data. If the 13344 * remote host used T/TCP to validate the SYN, our data will be 13345 * ACK'd; if so, enter normal data segment processing in the middle 13346 * of step 5, ack processing. Otherwise, goto step 6. 13347 */ 13348 if (thflags & TH_ACK) { 13349 /* For syn-sent we need to possibly update the rtt */ 13350 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13351 uint32_t t, mcts; 13352 13353 mcts = tcp_ts_getticks(); 13354 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13355 if (!tp->t_rttlow || tp->t_rttlow > t) 13356 tp->t_rttlow = t; 13357 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13358 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13359 tcp_rack_xmit_timer_commit(rack, tp); 13360 } 13361 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 13362 return (ret_val); 13363 /* We may have changed to FIN_WAIT_1 above */ 13364 if (tp->t_state == TCPS_FIN_WAIT_1) { 13365 /* 13366 * In FIN_WAIT_1 STATE in addition to the processing 13367 * for the ESTABLISHED state if our FIN is now 13368 * acknowledged then enter FIN_WAIT_2. 13369 */ 13370 if (ourfinisacked) { 13371 /* 13372 * If we can't receive any more data, then 13373 * closing user can proceed. Starting the 13374 * timer is contrary to the specification, 13375 * but if we don't get a FIN we'll hang 13376 * forever. 13377 * 13378 * XXXjl: we should release the tp also, and 13379 * use a compressed state. 13380 */ 13381 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13382 soisdisconnected(so); 13383 tcp_timer_activate(tp, TT_2MSL, 13384 (tcp_fast_finwait2_recycle ? 13385 tcp_finwait2_timeout : 13386 TP_MAXIDLE(tp))); 13387 } 13388 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13389 } 13390 } 13391 } 13392 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13393 tiwin, thflags, nxt_pkt)); 13394 } 13395 13396 /* 13397 * Return value of 1, the TCB is unlocked and most 13398 * likely gone, return value of 0, the TCP is still 13399 * locked. 13400 */ 13401 static int 13402 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13403 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13404 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13405 { 13406 struct tcp_rack *rack; 13407 int32_t ret_val = 0; 13408 int32_t ourfinisacked = 0; 13409 13410 rack = (struct tcp_rack *)tp->t_fb_ptr; 13411 ctf_calc_rwin(so, tp); 13412 if ((thflags & TH_RST) || 13413 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13414 return (__ctf_process_rst(m, th, so, tp, 13415 &rack->r_ctl.challenge_ack_ts, 13416 &rack->r_ctl.challenge_ack_cnt)); 13417 if ((thflags & TH_ACK) && 13418 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13419 SEQ_GT(th->th_ack, tp->snd_max))) { 13420 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13421 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13422 return (1); 13423 } 13424 if (IS_FASTOPEN(tp->t_flags)) { 13425 /* 13426 * When a TFO connection is in SYN_RECEIVED, the 13427 * only valid packets are the initial SYN, a 13428 * retransmit/copy of the initial SYN (possibly with 13429 * a subset of the original data), a valid ACK, a 13430 * FIN, or a RST. 13431 */ 13432 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13433 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13434 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13435 return (1); 13436 } else if (thflags & TH_SYN) { 13437 /* non-initial SYN is ignored */ 13438 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13439 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13440 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13441 ctf_do_drop(m, NULL); 13442 return (0); 13443 } 13444 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13445 ctf_do_drop(m, NULL); 13446 return (0); 13447 } 13448 } 13449 13450 /* 13451 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13452 * it's less than ts_recent, drop it. 13453 */ 13454 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13455 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13456 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13457 return (ret_val); 13458 } 13459 /* 13460 * In the SYN-RECEIVED state, validate that the packet belongs to 13461 * this connection before trimming the data to fit the receive 13462 * window. Check the sequence number versus IRS since we know the 13463 * sequence numbers haven't wrapped. This is a partial fix for the 13464 * "LAND" DoS attack. 13465 */ 13466 if (SEQ_LT(th->th_seq, tp->irs)) { 13467 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13468 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13469 return (1); 13470 } 13471 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13472 &rack->r_ctl.challenge_ack_ts, 13473 &rack->r_ctl.challenge_ack_cnt)) { 13474 return (ret_val); 13475 } 13476 /* 13477 * If last ACK falls within this segment's sequence numbers, record 13478 * its timestamp. NOTE: 1) That the test incorporates suggestions 13479 * from the latest proposal of the tcplw@cray.com list (Braden 13480 * 1993/04/26). 2) That updating only on newer timestamps interferes 13481 * with our earlier PAWS tests, so this check should be solely 13482 * predicated on the sequence space of this segment. 3) That we 13483 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13484 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13485 * SEG.Len, This modified check allows us to overcome RFC1323's 13486 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13487 * p.869. In such cases, we can still calculate the RTT correctly 13488 * when RCV.NXT == Last.ACK.Sent. 13489 */ 13490 if ((to->to_flags & TOF_TS) != 0 && 13491 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13492 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13493 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13494 tp->ts_recent_age = tcp_ts_getticks(); 13495 tp->ts_recent = to->to_tsval; 13496 } 13497 tp->snd_wnd = tiwin; 13498 rack_validate_fo_sendwin_up(tp, rack); 13499 /* 13500 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13501 * is on (half-synchronized state), then queue data for later 13502 * processing; else drop segment and return. 13503 */ 13504 if ((thflags & TH_ACK) == 0) { 13505 if (IS_FASTOPEN(tp->t_flags)) { 13506 rack_cc_conn_init(tp); 13507 } 13508 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13509 tiwin, thflags, nxt_pkt)); 13510 } 13511 KMOD_TCPSTAT_INC(tcps_connects); 13512 if (tp->t_flags & TF_SONOTCONN) { 13513 tp->t_flags &= ~TF_SONOTCONN; 13514 soisconnected(so); 13515 } 13516 /* Do window scaling? */ 13517 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13518 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13519 tp->rcv_scale = tp->request_r_scale; 13520 } 13521 /* 13522 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13523 * FIN-WAIT-1 13524 */ 13525 tp->t_starttime = ticks; 13526 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 13527 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13528 tp->t_tfo_pending = NULL; 13529 } 13530 if (tp->t_flags & TF_NEEDFIN) { 13531 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13532 tp->t_flags &= ~TF_NEEDFIN; 13533 } else { 13534 tcp_state_change(tp, TCPS_ESTABLISHED); 13535 TCP_PROBE5(accept__established, NULL, tp, 13536 mtod(m, const char *), tp, th); 13537 /* 13538 * TFO connections call cc_conn_init() during SYN 13539 * processing. Calling it again here for such connections 13540 * is not harmless as it would undo the snd_cwnd reduction 13541 * that occurs when a TFO SYN|ACK is retransmitted. 13542 */ 13543 if (!IS_FASTOPEN(tp->t_flags)) 13544 rack_cc_conn_init(tp); 13545 } 13546 /* 13547 * Account for the ACK of our SYN prior to 13548 * regular ACK processing below, except for 13549 * simultaneous SYN, which is handled later. 13550 */ 13551 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13552 tp->snd_una++; 13553 /* 13554 * If segment contains data or ACK, will call tcp_reass() later; if 13555 * not, do so now to pass queued data to user. 13556 */ 13557 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13558 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13559 (struct mbuf *)0); 13560 if (tp->t_flags & TF_WAKESOR) { 13561 tp->t_flags &= ~TF_WAKESOR; 13562 /* NB: sorwakeup_locked() does an implicit unlock. */ 13563 sorwakeup_locked(so); 13564 } 13565 } 13566 tp->snd_wl1 = th->th_seq - 1; 13567 /* For syn-recv we need to possibly update the rtt */ 13568 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13569 uint32_t t, mcts; 13570 13571 mcts = tcp_ts_getticks(); 13572 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13573 if (!tp->t_rttlow || tp->t_rttlow > t) 13574 tp->t_rttlow = t; 13575 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13576 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13577 tcp_rack_xmit_timer_commit(rack, tp); 13578 } 13579 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 13580 return (ret_val); 13581 } 13582 if (tp->t_state == TCPS_FIN_WAIT_1) { 13583 /* We could have went to FIN_WAIT_1 (or EST) above */ 13584 /* 13585 * In FIN_WAIT_1 STATE in addition to the processing for the 13586 * ESTABLISHED state if our FIN is now acknowledged then 13587 * enter FIN_WAIT_2. 13588 */ 13589 if (ourfinisacked) { 13590 /* 13591 * If we can't receive any more data, then closing 13592 * user can proceed. Starting the timer is contrary 13593 * to the specification, but if we don't get a FIN 13594 * we'll hang forever. 13595 * 13596 * XXXjl: we should release the tp also, and use a 13597 * compressed state. 13598 */ 13599 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13600 soisdisconnected(so); 13601 tcp_timer_activate(tp, TT_2MSL, 13602 (tcp_fast_finwait2_recycle ? 13603 tcp_finwait2_timeout : 13604 TP_MAXIDLE(tp))); 13605 } 13606 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13607 } 13608 } 13609 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13610 tiwin, thflags, nxt_pkt)); 13611 } 13612 13613 /* 13614 * Return value of 1, the TCB is unlocked and most 13615 * likely gone, return value of 0, the TCP is still 13616 * locked. 13617 */ 13618 static int 13619 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13620 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13621 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13622 { 13623 int32_t ret_val = 0; 13624 struct tcp_rack *rack; 13625 13626 /* 13627 * Header prediction: check for the two common cases of a 13628 * uni-directional data xfer. If the packet has no control flags, 13629 * is in-sequence, the window didn't change and we're not 13630 * retransmitting, it's a candidate. If the length is zero and the 13631 * ack moved forward, we're the sender side of the xfer. Just free 13632 * the data acked & wake any higher level process that was blocked 13633 * waiting for space. If the length is non-zero and the ack didn't 13634 * move, we're the receiver side. If we're getting packets in-order 13635 * (the reassembly queue is empty), add the data toc The socket 13636 * buffer and note that we need a delayed ack. Make sure that the 13637 * hidden state-flags are also off. Since we check for 13638 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13639 */ 13640 rack = (struct tcp_rack *)tp->t_fb_ptr; 13641 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13642 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13643 __predict_true(SEGQ_EMPTY(tp)) && 13644 __predict_true(th->th_seq == tp->rcv_nxt)) { 13645 if (tlen == 0) { 13646 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13647 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13648 return (0); 13649 } 13650 } else { 13651 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13652 tiwin, nxt_pkt, iptos)) { 13653 return (0); 13654 } 13655 } 13656 } 13657 ctf_calc_rwin(so, tp); 13658 13659 if ((thflags & TH_RST) || 13660 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13661 return (__ctf_process_rst(m, th, so, tp, 13662 &rack->r_ctl.challenge_ack_ts, 13663 &rack->r_ctl.challenge_ack_cnt)); 13664 13665 /* 13666 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13667 * synchronized state. 13668 */ 13669 if (thflags & TH_SYN) { 13670 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13671 return (ret_val); 13672 } 13673 /* 13674 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13675 * it's less than ts_recent, drop it. 13676 */ 13677 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13678 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13679 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13680 return (ret_val); 13681 } 13682 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13683 &rack->r_ctl.challenge_ack_ts, 13684 &rack->r_ctl.challenge_ack_cnt)) { 13685 return (ret_val); 13686 } 13687 /* 13688 * If last ACK falls within this segment's sequence numbers, record 13689 * its timestamp. NOTE: 1) That the test incorporates suggestions 13690 * from the latest proposal of the tcplw@cray.com list (Braden 13691 * 1993/04/26). 2) That updating only on newer timestamps interferes 13692 * with our earlier PAWS tests, so this check should be solely 13693 * predicated on the sequence space of this segment. 3) That we 13694 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13695 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13696 * SEG.Len, This modified check allows us to overcome RFC1323's 13697 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13698 * p.869. In such cases, we can still calculate the RTT correctly 13699 * when RCV.NXT == Last.ACK.Sent. 13700 */ 13701 if ((to->to_flags & TOF_TS) != 0 && 13702 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13703 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13704 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13705 tp->ts_recent_age = tcp_ts_getticks(); 13706 tp->ts_recent = to->to_tsval; 13707 } 13708 /* 13709 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13710 * is on (half-synchronized state), then queue data for later 13711 * processing; else drop segment and return. 13712 */ 13713 if ((thflags & TH_ACK) == 0) { 13714 if (tp->t_flags & TF_NEEDSYN) { 13715 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13716 tiwin, thflags, nxt_pkt)); 13717 13718 } else if (tp->t_flags & TF_ACKNOW) { 13719 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13720 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13721 return (ret_val); 13722 } else { 13723 ctf_do_drop(m, NULL); 13724 return (0); 13725 } 13726 } 13727 /* 13728 * Ack processing. 13729 */ 13730 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 13731 return (ret_val); 13732 } 13733 if (sbavail(&so->so_snd)) { 13734 if (ctf_progress_timeout_check(tp, true)) { 13735 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 13736 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13737 return (1); 13738 } 13739 } 13740 /* State changes only happen in rack_process_data() */ 13741 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13742 tiwin, thflags, nxt_pkt)); 13743 } 13744 13745 /* 13746 * Return value of 1, the TCB is unlocked and most 13747 * likely gone, return value of 0, the TCP is still 13748 * locked. 13749 */ 13750 static int 13751 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 13752 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13753 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13754 { 13755 int32_t ret_val = 0; 13756 struct tcp_rack *rack; 13757 13758 rack = (struct tcp_rack *)tp->t_fb_ptr; 13759 ctf_calc_rwin(so, tp); 13760 if ((thflags & TH_RST) || 13761 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13762 return (__ctf_process_rst(m, th, so, tp, 13763 &rack->r_ctl.challenge_ack_ts, 13764 &rack->r_ctl.challenge_ack_cnt)); 13765 /* 13766 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13767 * synchronized state. 13768 */ 13769 if (thflags & TH_SYN) { 13770 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13771 return (ret_val); 13772 } 13773 /* 13774 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13775 * it's less than ts_recent, drop it. 13776 */ 13777 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13778 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13779 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13780 return (ret_val); 13781 } 13782 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13783 &rack->r_ctl.challenge_ack_ts, 13784 &rack->r_ctl.challenge_ack_cnt)) { 13785 return (ret_val); 13786 } 13787 /* 13788 * If last ACK falls within this segment's sequence numbers, record 13789 * its timestamp. NOTE: 1) That the test incorporates suggestions 13790 * from the latest proposal of the tcplw@cray.com list (Braden 13791 * 1993/04/26). 2) That updating only on newer timestamps interferes 13792 * with our earlier PAWS tests, so this check should be solely 13793 * predicated on the sequence space of this segment. 3) That we 13794 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13795 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13796 * SEG.Len, This modified check allows us to overcome RFC1323's 13797 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13798 * p.869. In such cases, we can still calculate the RTT correctly 13799 * when RCV.NXT == Last.ACK.Sent. 13800 */ 13801 if ((to->to_flags & TOF_TS) != 0 && 13802 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13803 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13804 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13805 tp->ts_recent_age = tcp_ts_getticks(); 13806 tp->ts_recent = to->to_tsval; 13807 } 13808 /* 13809 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13810 * is on (half-synchronized state), then queue data for later 13811 * processing; else drop segment and return. 13812 */ 13813 if ((thflags & TH_ACK) == 0) { 13814 if (tp->t_flags & TF_NEEDSYN) { 13815 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13816 tiwin, thflags, nxt_pkt)); 13817 13818 } else if (tp->t_flags & TF_ACKNOW) { 13819 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13820 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13821 return (ret_val); 13822 } else { 13823 ctf_do_drop(m, NULL); 13824 return (0); 13825 } 13826 } 13827 /* 13828 * Ack processing. 13829 */ 13830 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 13831 return (ret_val); 13832 } 13833 if (sbavail(&so->so_snd)) { 13834 if (ctf_progress_timeout_check(tp, true)) { 13835 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13836 tp, tick, PROGRESS_DROP, __LINE__); 13837 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13838 return (1); 13839 } 13840 } 13841 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13842 tiwin, thflags, nxt_pkt)); 13843 } 13844 13845 static int 13846 rack_check_data_after_close(struct mbuf *m, 13847 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 13848 { 13849 struct tcp_rack *rack; 13850 13851 rack = (struct tcp_rack *)tp->t_fb_ptr; 13852 if (rack->rc_allow_data_af_clo == 0) { 13853 close_now: 13854 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13855 /* tcp_close will kill the inp pre-log the Reset */ 13856 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13857 tp = tcp_close(tp); 13858 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 13859 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 13860 return (1); 13861 } 13862 if (sbavail(&so->so_snd) == 0) 13863 goto close_now; 13864 /* Ok we allow data that is ignored and a followup reset */ 13865 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13866 tp->rcv_nxt = th->th_seq + *tlen; 13867 tp->t_flags2 |= TF2_DROP_AF_DATA; 13868 rack->r_wanted_output = 1; 13869 *tlen = 0; 13870 return (0); 13871 } 13872 13873 /* 13874 * Return value of 1, the TCB is unlocked and most 13875 * likely gone, return value of 0, the TCP is still 13876 * locked. 13877 */ 13878 static int 13879 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 13880 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13881 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13882 { 13883 int32_t ret_val = 0; 13884 int32_t ourfinisacked = 0; 13885 struct tcp_rack *rack; 13886 13887 rack = (struct tcp_rack *)tp->t_fb_ptr; 13888 ctf_calc_rwin(so, tp); 13889 13890 if ((thflags & TH_RST) || 13891 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13892 return (__ctf_process_rst(m, th, so, tp, 13893 &rack->r_ctl.challenge_ack_ts, 13894 &rack->r_ctl.challenge_ack_cnt)); 13895 /* 13896 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13897 * synchronized state. 13898 */ 13899 if (thflags & TH_SYN) { 13900 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13901 return (ret_val); 13902 } 13903 /* 13904 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13905 * it's less than ts_recent, drop it. 13906 */ 13907 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13908 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13909 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13910 return (ret_val); 13911 } 13912 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13913 &rack->r_ctl.challenge_ack_ts, 13914 &rack->r_ctl.challenge_ack_cnt)) { 13915 return (ret_val); 13916 } 13917 /* 13918 * If new data are received on a connection after the user processes 13919 * are gone, then RST the other end. 13920 */ 13921 if ((tp->t_flags & TF_CLOSED) && tlen && 13922 rack_check_data_after_close(m, tp, &tlen, th, so)) 13923 return (1); 13924 /* 13925 * If last ACK falls within this segment's sequence numbers, record 13926 * its timestamp. NOTE: 1) That the test incorporates suggestions 13927 * from the latest proposal of the tcplw@cray.com list (Braden 13928 * 1993/04/26). 2) That updating only on newer timestamps interferes 13929 * with our earlier PAWS tests, so this check should be solely 13930 * predicated on the sequence space of this segment. 3) That we 13931 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13932 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13933 * SEG.Len, This modified check allows us to overcome RFC1323's 13934 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13935 * p.869. In such cases, we can still calculate the RTT correctly 13936 * when RCV.NXT == Last.ACK.Sent. 13937 */ 13938 if ((to->to_flags & TOF_TS) != 0 && 13939 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13940 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13941 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13942 tp->ts_recent_age = tcp_ts_getticks(); 13943 tp->ts_recent = to->to_tsval; 13944 } 13945 /* 13946 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13947 * is on (half-synchronized state), then queue data for later 13948 * processing; else drop segment and return. 13949 */ 13950 if ((thflags & TH_ACK) == 0) { 13951 if (tp->t_flags & TF_NEEDSYN) { 13952 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13953 tiwin, thflags, nxt_pkt)); 13954 } else if (tp->t_flags & TF_ACKNOW) { 13955 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13956 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13957 return (ret_val); 13958 } else { 13959 ctf_do_drop(m, NULL); 13960 return (0); 13961 } 13962 } 13963 /* 13964 * Ack processing. 13965 */ 13966 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 13967 return (ret_val); 13968 } 13969 if (ourfinisacked) { 13970 /* 13971 * If we can't receive any more data, then closing user can 13972 * proceed. Starting the timer is contrary to the 13973 * specification, but if we don't get a FIN we'll hang 13974 * forever. 13975 * 13976 * XXXjl: we should release the tp also, and use a 13977 * compressed state. 13978 */ 13979 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13980 soisdisconnected(so); 13981 tcp_timer_activate(tp, TT_2MSL, 13982 (tcp_fast_finwait2_recycle ? 13983 tcp_finwait2_timeout : 13984 TP_MAXIDLE(tp))); 13985 } 13986 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13987 } 13988 if (sbavail(&so->so_snd)) { 13989 if (ctf_progress_timeout_check(tp, true)) { 13990 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13991 tp, tick, PROGRESS_DROP, __LINE__); 13992 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13993 return (1); 13994 } 13995 } 13996 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13997 tiwin, thflags, nxt_pkt)); 13998 } 13999 14000 /* 14001 * Return value of 1, the TCB is unlocked and most 14002 * likely gone, return value of 0, the TCP is still 14003 * locked. 14004 */ 14005 static int 14006 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 14007 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14008 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14009 { 14010 int32_t ret_val = 0; 14011 int32_t ourfinisacked = 0; 14012 struct tcp_rack *rack; 14013 14014 rack = (struct tcp_rack *)tp->t_fb_ptr; 14015 ctf_calc_rwin(so, tp); 14016 14017 if ((thflags & TH_RST) || 14018 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14019 return (__ctf_process_rst(m, th, so, tp, 14020 &rack->r_ctl.challenge_ack_ts, 14021 &rack->r_ctl.challenge_ack_cnt)); 14022 /* 14023 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14024 * synchronized state. 14025 */ 14026 if (thflags & TH_SYN) { 14027 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14028 return (ret_val); 14029 } 14030 /* 14031 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14032 * it's less than ts_recent, drop it. 14033 */ 14034 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14035 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14036 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14037 return (ret_val); 14038 } 14039 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14040 &rack->r_ctl.challenge_ack_ts, 14041 &rack->r_ctl.challenge_ack_cnt)) { 14042 return (ret_val); 14043 } 14044 /* 14045 * If new data are received on a connection after the user processes 14046 * are gone, then RST the other end. 14047 */ 14048 if ((tp->t_flags & TF_CLOSED) && tlen && 14049 rack_check_data_after_close(m, tp, &tlen, th, so)) 14050 return (1); 14051 /* 14052 * If last ACK falls within this segment's sequence numbers, record 14053 * its timestamp. NOTE: 1) That the test incorporates suggestions 14054 * from the latest proposal of the tcplw@cray.com list (Braden 14055 * 1993/04/26). 2) That updating only on newer timestamps interferes 14056 * with our earlier PAWS tests, so this check should be solely 14057 * predicated on the sequence space of this segment. 3) That we 14058 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14059 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14060 * SEG.Len, This modified check allows us to overcome RFC1323's 14061 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14062 * p.869. In such cases, we can still calculate the RTT correctly 14063 * when RCV.NXT == Last.ACK.Sent. 14064 */ 14065 if ((to->to_flags & TOF_TS) != 0 && 14066 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14067 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14068 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14069 tp->ts_recent_age = tcp_ts_getticks(); 14070 tp->ts_recent = to->to_tsval; 14071 } 14072 /* 14073 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14074 * is on (half-synchronized state), then queue data for later 14075 * processing; else drop segment and return. 14076 */ 14077 if ((thflags & TH_ACK) == 0) { 14078 if (tp->t_flags & TF_NEEDSYN) { 14079 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14080 tiwin, thflags, nxt_pkt)); 14081 } else if (tp->t_flags & TF_ACKNOW) { 14082 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14083 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14084 return (ret_val); 14085 } else { 14086 ctf_do_drop(m, NULL); 14087 return (0); 14088 } 14089 } 14090 /* 14091 * Ack processing. 14092 */ 14093 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 14094 return (ret_val); 14095 } 14096 if (ourfinisacked) { 14097 tcp_twstart(tp); 14098 m_freem(m); 14099 return (1); 14100 } 14101 if (sbavail(&so->so_snd)) { 14102 if (ctf_progress_timeout_check(tp, true)) { 14103 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14104 tp, tick, PROGRESS_DROP, __LINE__); 14105 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14106 return (1); 14107 } 14108 } 14109 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14110 tiwin, thflags, nxt_pkt)); 14111 } 14112 14113 /* 14114 * Return value of 1, the TCB is unlocked and most 14115 * likely gone, return value of 0, the TCP is still 14116 * locked. 14117 */ 14118 static int 14119 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 14120 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14121 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14122 { 14123 int32_t ret_val = 0; 14124 int32_t ourfinisacked = 0; 14125 struct tcp_rack *rack; 14126 14127 rack = (struct tcp_rack *)tp->t_fb_ptr; 14128 ctf_calc_rwin(so, tp); 14129 14130 if ((thflags & TH_RST) || 14131 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14132 return (__ctf_process_rst(m, th, so, tp, 14133 &rack->r_ctl.challenge_ack_ts, 14134 &rack->r_ctl.challenge_ack_cnt)); 14135 /* 14136 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14137 * synchronized state. 14138 */ 14139 if (thflags & TH_SYN) { 14140 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14141 return (ret_val); 14142 } 14143 /* 14144 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14145 * it's less than ts_recent, drop it. 14146 */ 14147 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14148 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14149 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14150 return (ret_val); 14151 } 14152 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14153 &rack->r_ctl.challenge_ack_ts, 14154 &rack->r_ctl.challenge_ack_cnt)) { 14155 return (ret_val); 14156 } 14157 /* 14158 * If new data are received on a connection after the user processes 14159 * are gone, then RST the other end. 14160 */ 14161 if ((tp->t_flags & TF_CLOSED) && tlen && 14162 rack_check_data_after_close(m, tp, &tlen, th, so)) 14163 return (1); 14164 /* 14165 * If last ACK falls within this segment's sequence numbers, record 14166 * its timestamp. NOTE: 1) That the test incorporates suggestions 14167 * from the latest proposal of the tcplw@cray.com list (Braden 14168 * 1993/04/26). 2) That updating only on newer timestamps interferes 14169 * with our earlier PAWS tests, so this check should be solely 14170 * predicated on the sequence space of this segment. 3) That we 14171 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14172 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14173 * SEG.Len, This modified check allows us to overcome RFC1323's 14174 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14175 * p.869. In such cases, we can still calculate the RTT correctly 14176 * when RCV.NXT == Last.ACK.Sent. 14177 */ 14178 if ((to->to_flags & TOF_TS) != 0 && 14179 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14180 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14181 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14182 tp->ts_recent_age = tcp_ts_getticks(); 14183 tp->ts_recent = to->to_tsval; 14184 } 14185 /* 14186 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14187 * is on (half-synchronized state), then queue data for later 14188 * processing; else drop segment and return. 14189 */ 14190 if ((thflags & TH_ACK) == 0) { 14191 if (tp->t_flags & TF_NEEDSYN) { 14192 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14193 tiwin, thflags, nxt_pkt)); 14194 } else if (tp->t_flags & TF_ACKNOW) { 14195 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14196 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14197 return (ret_val); 14198 } else { 14199 ctf_do_drop(m, NULL); 14200 return (0); 14201 } 14202 } 14203 /* 14204 * case TCPS_LAST_ACK: Ack processing. 14205 */ 14206 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 14207 return (ret_val); 14208 } 14209 if (ourfinisacked) { 14210 tp = tcp_close(tp); 14211 ctf_do_drop(m, tp); 14212 return (1); 14213 } 14214 if (sbavail(&so->so_snd)) { 14215 if (ctf_progress_timeout_check(tp, true)) { 14216 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14217 tp, tick, PROGRESS_DROP, __LINE__); 14218 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14219 return (1); 14220 } 14221 } 14222 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14223 tiwin, thflags, nxt_pkt)); 14224 } 14225 14226 /* 14227 * Return value of 1, the TCB is unlocked and most 14228 * likely gone, return value of 0, the TCP is still 14229 * locked. 14230 */ 14231 static int 14232 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 14233 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14234 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14235 { 14236 int32_t ret_val = 0; 14237 int32_t ourfinisacked = 0; 14238 struct tcp_rack *rack; 14239 14240 rack = (struct tcp_rack *)tp->t_fb_ptr; 14241 ctf_calc_rwin(so, tp); 14242 14243 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 14244 if ((thflags & TH_RST) || 14245 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14246 return (__ctf_process_rst(m, th, so, tp, 14247 &rack->r_ctl.challenge_ack_ts, 14248 &rack->r_ctl.challenge_ack_cnt)); 14249 /* 14250 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14251 * synchronized state. 14252 */ 14253 if (thflags & TH_SYN) { 14254 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14255 return (ret_val); 14256 } 14257 /* 14258 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14259 * it's less than ts_recent, drop it. 14260 */ 14261 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14262 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14263 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14264 return (ret_val); 14265 } 14266 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14267 &rack->r_ctl.challenge_ack_ts, 14268 &rack->r_ctl.challenge_ack_cnt)) { 14269 return (ret_val); 14270 } 14271 /* 14272 * If new data are received on a connection after the user processes 14273 * are gone, then RST the other end. 14274 */ 14275 if ((tp->t_flags & TF_CLOSED) && tlen && 14276 rack_check_data_after_close(m, tp, &tlen, th, so)) 14277 return (1); 14278 /* 14279 * If last ACK falls within this segment's sequence numbers, record 14280 * its timestamp. NOTE: 1) That the test incorporates suggestions 14281 * from the latest proposal of the tcplw@cray.com list (Braden 14282 * 1993/04/26). 2) That updating only on newer timestamps interferes 14283 * with our earlier PAWS tests, so this check should be solely 14284 * predicated on the sequence space of this segment. 3) That we 14285 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14286 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14287 * SEG.Len, This modified check allows us to overcome RFC1323's 14288 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14289 * p.869. In such cases, we can still calculate the RTT correctly 14290 * when RCV.NXT == Last.ACK.Sent. 14291 */ 14292 if ((to->to_flags & TOF_TS) != 0 && 14293 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14294 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14295 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14296 tp->ts_recent_age = tcp_ts_getticks(); 14297 tp->ts_recent = to->to_tsval; 14298 } 14299 /* 14300 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14301 * is on (half-synchronized state), then queue data for later 14302 * processing; else drop segment and return. 14303 */ 14304 if ((thflags & TH_ACK) == 0) { 14305 if (tp->t_flags & TF_NEEDSYN) { 14306 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14307 tiwin, thflags, nxt_pkt)); 14308 } else if (tp->t_flags & TF_ACKNOW) { 14309 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14310 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14311 return (ret_val); 14312 } else { 14313 ctf_do_drop(m, NULL); 14314 return (0); 14315 } 14316 } 14317 /* 14318 * Ack processing. 14319 */ 14320 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 14321 return (ret_val); 14322 } 14323 if (sbavail(&so->so_snd)) { 14324 if (ctf_progress_timeout_check(tp, true)) { 14325 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14326 tp, tick, PROGRESS_DROP, __LINE__); 14327 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14328 return (1); 14329 } 14330 } 14331 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14332 tiwin, thflags, nxt_pkt)); 14333 } 14334 14335 static void inline 14336 rack_clear_rate_sample(struct tcp_rack *rack) 14337 { 14338 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 14339 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 14340 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 14341 } 14342 14343 static void 14344 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 14345 { 14346 uint64_t bw_est, rate_wanted; 14347 int chged = 0; 14348 uint32_t user_max, orig_min, orig_max; 14349 14350 #ifdef TCP_REQUEST_TRK 14351 if (rack->rc_hybrid_mode && 14352 (rack->r_ctl.rc_pace_max_segs != 0) && 14353 (rack_hybrid_allow_set_maxseg == 1) && 14354 (rack->r_ctl.rc_last_sft != NULL)) { 14355 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 14356 return; 14357 } 14358 #endif 14359 orig_min = rack->r_ctl.rc_pace_min_segs; 14360 orig_max = rack->r_ctl.rc_pace_max_segs; 14361 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 14362 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 14363 chged = 1; 14364 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 14365 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 14366 if (user_max != rack->r_ctl.rc_pace_max_segs) 14367 chged = 1; 14368 } 14369 if (rack->rc_force_max_seg) { 14370 rack->r_ctl.rc_pace_max_segs = user_max; 14371 } else if (rack->use_fixed_rate) { 14372 bw_est = rack_get_bw(rack); 14373 if ((rack->r_ctl.crte == NULL) || 14374 (bw_est != rack->r_ctl.crte->rate)) { 14375 rack->r_ctl.rc_pace_max_segs = user_max; 14376 } else { 14377 /* We are pacing right at the hardware rate */ 14378 uint32_t segsiz, pace_one; 14379 14380 if (rack_pace_one_seg || 14381 (rack->r_ctl.rc_user_set_min_segs == 1)) 14382 pace_one = 1; 14383 else 14384 pace_one = 0; 14385 segsiz = min(ctf_fixed_maxseg(tp), 14386 rack->r_ctl.rc_pace_min_segs); 14387 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14388 tp, bw_est, segsiz, pace_one, 14389 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14390 } 14391 } else if (rack->rc_always_pace) { 14392 if (rack->r_ctl.gp_bw || 14393 rack->r_ctl.init_rate) { 14394 /* We have a rate of some sort set */ 14395 uint32_t orig; 14396 14397 bw_est = rack_get_bw(rack); 14398 orig = rack->r_ctl.rc_pace_max_segs; 14399 if (fill_override) 14400 rate_wanted = *fill_override; 14401 else 14402 rate_wanted = rack_get_gp_est(rack); 14403 if (rate_wanted) { 14404 /* We have something */ 14405 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14406 rate_wanted, 14407 ctf_fixed_maxseg(rack->rc_tp)); 14408 } else 14409 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14410 if (orig != rack->r_ctl.rc_pace_max_segs) 14411 chged = 1; 14412 } else if ((rack->r_ctl.gp_bw == 0) && 14413 (rack->r_ctl.rc_pace_max_segs == 0)) { 14414 /* 14415 * If we have nothing limit us to bursting 14416 * out IW sized pieces. 14417 */ 14418 chged = 1; 14419 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14420 } 14421 } 14422 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14423 chged = 1; 14424 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14425 } 14426 if (chged) 14427 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14428 } 14429 14430 14431 static void 14432 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14433 { 14434 #ifdef INET6 14435 struct ip6_hdr *ip6 = NULL; 14436 #endif 14437 #ifdef INET 14438 struct ip *ip = NULL; 14439 #endif 14440 struct udphdr *udp = NULL; 14441 14442 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14443 #ifdef INET6 14444 if (rack->r_is_v6) { 14445 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14446 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14447 if (tp->t_port) { 14448 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14449 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14450 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14451 udp->uh_dport = tp->t_port; 14452 rack->r_ctl.fsb.udp = udp; 14453 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14454 } else 14455 { 14456 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14457 rack->r_ctl.fsb.udp = NULL; 14458 } 14459 tcpip_fillheaders(rack->rc_inp, 14460 tp->t_port, 14461 ip6, rack->r_ctl.fsb.th); 14462 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14463 } else 14464 #endif /* INET6 */ 14465 #ifdef INET 14466 { 14467 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14468 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14469 if (tp->t_port) { 14470 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14471 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14472 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14473 udp->uh_dport = tp->t_port; 14474 rack->r_ctl.fsb.udp = udp; 14475 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14476 } else 14477 { 14478 rack->r_ctl.fsb.udp = NULL; 14479 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14480 } 14481 tcpip_fillheaders(rack->rc_inp, 14482 tp->t_port, 14483 ip, rack->r_ctl.fsb.th); 14484 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14485 } 14486 #endif 14487 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14488 (long)TCP_MAXWIN << tp->rcv_scale); 14489 rack->r_fsb_inited = 1; 14490 } 14491 14492 static int 14493 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14494 { 14495 /* 14496 * Allocate the larger of spaces V6 if available else just 14497 * V4 and include udphdr (overbook) 14498 */ 14499 #ifdef INET6 14500 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14501 #else 14502 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14503 #endif 14504 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14505 M_TCPFSB, M_NOWAIT|M_ZERO); 14506 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14507 return (ENOMEM); 14508 } 14509 rack->r_fsb_inited = 0; 14510 return (0); 14511 } 14512 14513 static void 14514 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14515 { 14516 /* 14517 * Types of logs (mod value) 14518 * 20 - Initial round setup 14519 * 21 - Rack declares a new round. 14520 */ 14521 struct tcpcb *tp; 14522 14523 tp = rack->rc_tp; 14524 if (tcp_bblogging_on(tp)) { 14525 union tcp_log_stackspecific log; 14526 struct timeval tv; 14527 14528 memset(&log, 0, sizeof(log)); 14529 log.u_bbr.flex1 = rack->r_ctl.current_round; 14530 log.u_bbr.flex2 = rack->r_ctl.roundends; 14531 log.u_bbr.flex3 = high_seq; 14532 log.u_bbr.flex4 = tp->snd_max; 14533 log.u_bbr.flex8 = mod; 14534 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14535 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14536 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14537 TCP_LOG_EVENTP(tp, NULL, 14538 &tptosocket(tp)->so_rcv, 14539 &tptosocket(tp)->so_snd, 14540 TCP_HYSTART, 0, 14541 0, &log, false, &tv); 14542 } 14543 } 14544 14545 static void 14546 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14547 { 14548 rack->rack_deferred_inited = 1; 14549 rack->r_ctl.roundends = tp->snd_max; 14550 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14551 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14552 } 14553 14554 static void 14555 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14556 { 14557 /* Retransmit bit controls. 14558 * 14559 * The setting of these values control one of 14560 * three settings you can have and dictate 14561 * how rack does retransmissions. Note this 14562 * is in *any* mode i.e. pacing on or off DGP 14563 * fixed rate pacing, or just bursting rack. 14564 * 14565 * 1 - Use full sized retransmits i.e. limit 14566 * the size to whatever the pace_max_segments 14567 * size is. 14568 * 14569 * 2 - Use pacer min granularity as a guide to 14570 * the size combined with the current calculated 14571 * goodput b/w measurement. So for example if 14572 * the goodput is measured at 20Mbps we would 14573 * calculate 8125 (pacer minimum 250usec in 14574 * that b/w) and then round it up to the next 14575 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14576 * 14577 * 0 - The rack default 1 MSS (anything not 0/1/2 14578 * fall here too if we are setting via rack_init()). 14579 * 14580 */ 14581 if (ctl == 1) { 14582 rack->full_size_rxt = 1; 14583 rack->shape_rxt_to_pacing_min = 0; 14584 } else if (ctl == 2) { 14585 rack->full_size_rxt = 0; 14586 rack->shape_rxt_to_pacing_min = 1; 14587 } else { 14588 rack->full_size_rxt = 0; 14589 rack->shape_rxt_to_pacing_min = 0; 14590 } 14591 } 14592 14593 static void 14594 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14595 uint32_t flex1, 14596 uint32_t flex2, 14597 uint32_t flex3) 14598 { 14599 if (tcp_bblogging_on(rack->rc_tp)) { 14600 union tcp_log_stackspecific log; 14601 struct timeval tv; 14602 14603 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14604 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14605 log.u_bbr.flex8 = mod; 14606 log.u_bbr.flex1 = flex1; 14607 log.u_bbr.flex2 = flex2; 14608 log.u_bbr.flex3 = flex3; 14609 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14610 0, &log, false, NULL, __func__, __LINE__, &tv); 14611 } 14612 } 14613 14614 static int 14615 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14616 { 14617 struct tcp_rack *rack; 14618 struct rack_sendmap *rsm; 14619 int i; 14620 14621 14622 rack = (struct tcp_rack *)tp->t_fb_ptr; 14623 switch (reqr->req) { 14624 case TCP_QUERY_SENDMAP: 14625 if ((reqr->req_param == tp->snd_max) || 14626 (tp->snd_max == tp->snd_una)){ 14627 /* Unlikely */ 14628 return (0); 14629 } 14630 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14631 if (rsm == NULL) { 14632 /* Can't find that seq -- unlikely */ 14633 return (0); 14634 } 14635 reqr->sendmap_start = rsm->r_start; 14636 reqr->sendmap_end = rsm->r_end; 14637 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14638 reqr->sendmap_fas = rsm->r_fas; 14639 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14640 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14641 for(i=0; i<reqr->sendmap_send_cnt; i++) 14642 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14643 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14644 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14645 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14646 reqr->sendmap_dupacks = rsm->r_dupack; 14647 rack_log_chg_info(tp, rack, 1, 14648 rsm->r_start, 14649 rsm->r_end, 14650 rsm->r_flags); 14651 return(1); 14652 break; 14653 case TCP_QUERY_TIMERS_UP: 14654 if (rack->r_ctl.rc_hpts_flags == 0) { 14655 /* no timers up */ 14656 return (0); 14657 } 14658 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14659 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14660 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14661 } 14662 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14663 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 14664 } 14665 rack_log_chg_info(tp, rack, 2, 14666 rack->r_ctl.rc_hpts_flags, 14667 rack->r_ctl.rc_last_output_to, 14668 rack->r_ctl.rc_timer_exp); 14669 return (1); 14670 break; 14671 case TCP_QUERY_RACK_TIMES: 14672 /* Reordering items */ 14673 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 14674 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 14675 /* Timerstamps and timers */ 14676 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 14677 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 14678 reqr->rack_rtt = rack->rc_rack_rtt; 14679 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 14680 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 14681 /* PRR data */ 14682 reqr->rack_sacked = rack->r_ctl.rc_sacked; 14683 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 14684 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 14685 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 14686 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 14687 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 14688 /* TLP and persists info */ 14689 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 14690 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 14691 if (rack->rc_in_persist) { 14692 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 14693 reqr->rack_in_persist = 1; 14694 } else { 14695 reqr->rack_time_went_idle = 0; 14696 reqr->rack_in_persist = 0; 14697 } 14698 if (rack->r_wanted_output) 14699 reqr->rack_wanted_output = 1; 14700 else 14701 reqr->rack_wanted_output = 0; 14702 return (1); 14703 break; 14704 default: 14705 return (-EINVAL); 14706 } 14707 } 14708 14709 static void 14710 rack_switch_failed(struct tcpcb *tp) 14711 { 14712 /* 14713 * This method gets called if a stack switch was 14714 * attempted and it failed. We are left 14715 * but our hpts timers were stopped and we 14716 * need to validate time units and t_flags2. 14717 */ 14718 struct tcp_rack *rack; 14719 struct timeval tv; 14720 uint32_t cts; 14721 uint32_t toval; 14722 struct hpts_diag diag; 14723 14724 rack = (struct tcp_rack *)tp->t_fb_ptr; 14725 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 14726 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14727 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14728 else 14729 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14730 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14731 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14732 if (tp->t_in_hpts > IHPTS_NONE) { 14733 /* Strange */ 14734 return; 14735 } 14736 cts = tcp_get_usecs(&tv); 14737 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14738 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 14739 toval = rack->r_ctl.rc_last_output_to - cts; 14740 } else { 14741 /* one slot please */ 14742 toval = HPTS_TICKS_PER_SLOT; 14743 } 14744 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14745 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 14746 toval = rack->r_ctl.rc_timer_exp - cts; 14747 } else { 14748 /* one slot please */ 14749 toval = HPTS_TICKS_PER_SLOT; 14750 } 14751 } else 14752 toval = HPTS_TICKS_PER_SLOT; 14753 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 14754 __LINE__, &diag); 14755 rack_log_hpts_diag(rack, cts, &diag, &tv); 14756 } 14757 14758 static int 14759 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 14760 { 14761 struct rack_sendmap *rsm, *ersm; 14762 int insret __diagused; 14763 /* 14764 * When initing outstanding, we must be quite careful 14765 * to not refer to tp->t_fb_ptr. This has the old rack 14766 * pointer in it, not the "new" one (when we are doing 14767 * a stack switch). 14768 */ 14769 14770 14771 if (tp->t_fb->tfb_chg_query == NULL) { 14772 /* Create a send map for the current outstanding data */ 14773 14774 rsm = rack_alloc(rack); 14775 if (rsm == NULL) { 14776 uma_zfree(rack_pcb_zone, ptr); 14777 return (ENOMEM); 14778 } 14779 rsm->r_no_rtt_allowed = 1; 14780 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 14781 rsm->r_rtr_cnt = 1; 14782 rsm->r_rtr_bytes = 0; 14783 if (tp->t_flags & TF_SENTFIN) 14784 rsm->r_flags |= RACK_HAS_FIN; 14785 rsm->r_end = tp->snd_max; 14786 if (tp->snd_una == tp->iss) { 14787 /* The data space is one beyond snd_una */ 14788 rsm->r_flags |= RACK_HAS_SYN; 14789 rsm->r_start = tp->iss; 14790 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 14791 } else 14792 rsm->r_start = tp->snd_una; 14793 rsm->r_dupack = 0; 14794 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 14795 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 14796 if (rsm->m) { 14797 rsm->orig_m_len = rsm->m->m_len; 14798 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14799 } else { 14800 rsm->orig_m_len = 0; 14801 rsm->orig_t_space = 0; 14802 } 14803 } else { 14804 /* 14805 * This can happen if we have a stand-alone FIN or 14806 * SYN. 14807 */ 14808 rsm->m = NULL; 14809 rsm->orig_m_len = 0; 14810 rsm->orig_t_space = 0; 14811 rsm->soff = 0; 14812 } 14813 #ifdef INVARIANTS 14814 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14815 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14816 insret, rack, rsm); 14817 } 14818 #else 14819 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14820 #endif 14821 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14822 rsm->r_in_tmap = 1; 14823 } else { 14824 /* We have a query mechanism, lets use it */ 14825 struct tcp_query_resp qr; 14826 int i; 14827 tcp_seq at; 14828 14829 at = tp->snd_una; 14830 while (at != tp->snd_max) { 14831 memset(&qr, 0, sizeof(qr)); 14832 qr.req = TCP_QUERY_SENDMAP; 14833 qr.req_param = at; 14834 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 14835 break; 14836 /* Move forward */ 14837 at = qr.sendmap_end; 14838 /* Now lets build the entry for this one */ 14839 rsm = rack_alloc(rack); 14840 if (rsm == NULL) { 14841 uma_zfree(rack_pcb_zone, ptr); 14842 return (ENOMEM); 14843 } 14844 memset(rsm, 0, sizeof(struct rack_sendmap)); 14845 /* Now configure the rsm and insert it */ 14846 rsm->r_dupack = qr.sendmap_dupacks; 14847 rsm->r_start = qr.sendmap_start; 14848 rsm->r_end = qr.sendmap_end; 14849 if (qr.sendmap_fas) 14850 rsm->r_fas = qr.sendmap_end; 14851 else 14852 rsm->r_fas = rsm->r_start - tp->snd_una; 14853 /* 14854 * We have carefully aligned the bits 14855 * so that all we have to do is copy over 14856 * the bits with the mask. 14857 */ 14858 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 14859 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 14860 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 14861 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 14862 for (i=0 ; i<rsm->r_rtr_cnt; i++) 14863 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 14864 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 14865 (rsm->r_start - tp->snd_una), &rsm->soff); 14866 if (rsm->m) { 14867 rsm->orig_m_len = rsm->m->m_len; 14868 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14869 } else { 14870 rsm->orig_m_len = 0; 14871 rsm->orig_t_space = 0; 14872 } 14873 #ifdef INVARIANTS 14874 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14875 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p", 14876 insret, rack, rsm); 14877 } 14878 #else 14879 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14880 #endif 14881 if ((rsm->r_flags & RACK_ACKED) == 0) { 14882 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 14883 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 14884 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 14885 /* 14886 * If the existing ersm was sent at 14887 * a later time than the new one, then 14888 * the new one should appear ahead of this 14889 * ersm. 14890 */ 14891 rsm->r_in_tmap = 1; 14892 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 14893 break; 14894 } 14895 } 14896 if (rsm->r_in_tmap == 0) { 14897 /* 14898 * Not found so shove it on the tail. 14899 */ 14900 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14901 rsm->r_in_tmap = 1; 14902 } 14903 } else { 14904 if ((rack->r_ctl.rc_sacklast == NULL) || 14905 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 14906 rack->r_ctl.rc_sacklast = rsm; 14907 } 14908 } 14909 rack_log_chg_info(tp, rack, 3, 14910 rsm->r_start, 14911 rsm->r_end, 14912 rsm->r_flags); 14913 } 14914 } 14915 return (0); 14916 } 14917 14918 static void 14919 rack_translate_clamp_value(struct tcp_rack *rack, uint32_t optval) 14920 { 14921 /* 14922 * P = percent bits 14923 * F = fill cw bit -- Toggle fillcw if this bit is set. 14924 * S = Segment bits 14925 * M = set max segment bit 14926 * U = Unclamined 14927 * C = If set to non-zero override the max number of clamps. 14928 * L = Bit to indicate if clamped gets lower. 14929 * 14930 * CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP 14931 * 14932 * The lowest 3 nibbles is the perentage .1 - 6553.5% 14933 * where 10.1 = 101, max 6553.5 14934 * The upper 16 bits holds some options. 14935 * The F bit will turn on fill-cw on if you are 14936 * not pacing, it will turn it off if dgp is on. 14937 * The L bit will change it so when clamped we get 14938 * the min(gp, lt-bw) for dgp. 14939 */ 14940 uint16_t per; 14941 14942 rack->r_ctl.saved_rxt_clamp_val = optval; 14943 per = optval & 0x0000ffff; 14944 rack->r_ctl.rxt_threshold = (uint64_t)(per & 0xffff); 14945 if (optval > 0) { 14946 uint16_t clamp_opt; 14947 14948 rack->excess_rxt_on = 1; 14949 clamp_opt = ((optval & 0xffff0000) >> 16); 14950 rack->r_ctl.clamp_options = clamp_opt & 0x00ff; 14951 if (clamp_opt & 0xff00) { 14952 /* A max clamps is also present */ 14953 rack->r_ctl.max_clamps = (clamp_opt >> 8); 14954 } else { 14955 /* No specified clamps means no limit */ 14956 rack->r_ctl.max_clamps = 0; 14957 } 14958 if (rack->r_ctl.clamp_options & 0x0002) { 14959 rack->r_clamped_gets_lower = 1; 14960 } else { 14961 rack->r_clamped_gets_lower = 0; 14962 } 14963 } else { 14964 /* Turn it off back to default */ 14965 rack->excess_rxt_on = 0; 14966 rack->r_clamped_gets_lower = 0; 14967 } 14968 14969 } 14970 14971 14972 static int32_t 14973 rack_init(struct tcpcb *tp, void **ptr) 14974 { 14975 struct inpcb *inp = tptoinpcb(tp); 14976 struct tcp_rack *rack = NULL; 14977 uint32_t iwin, snt, us_cts; 14978 int err, no_query; 14979 14980 tcp_hpts_init(tp); 14981 14982 /* 14983 * First are we the initial or are we a switched stack? 14984 * If we are initing via tcp_newtcppcb the ptr passed 14985 * will be tp->t_fb_ptr. If its a stack switch that 14986 * has a previous stack we can query it will be a local 14987 * var that will in the end be set into t_fb_ptr. 14988 */ 14989 if (ptr == &tp->t_fb_ptr) 14990 no_query = 1; 14991 else 14992 no_query = 0; 14993 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 14994 if (*ptr == NULL) { 14995 /* 14996 * We need to allocate memory but cant. The INP and INP_INFO 14997 * locks and they are recursive (happens during setup. So a 14998 * scheme to drop the locks fails :( 14999 * 15000 */ 15001 return(ENOMEM); 15002 } 15003 memset(*ptr, 0, sizeof(struct tcp_rack)); 15004 rack = (struct tcp_rack *)*ptr; 15005 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 15006 if (rack->r_ctl.tqh == NULL) { 15007 uma_zfree(rack_pcb_zone, rack); 15008 return(ENOMEM); 15009 } 15010 tqhash_init(rack->r_ctl.tqh); 15011 TAILQ_INIT(&rack->r_ctl.rc_free); 15012 TAILQ_INIT(&rack->r_ctl.rc_tmap); 15013 rack->rc_tp = tp; 15014 rack->rc_inp = inp; 15015 /* Set the flag */ 15016 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 15017 /* Probably not needed but lets be sure */ 15018 rack_clear_rate_sample(rack); 15019 /* 15020 * Save off the default values, socket options will poke 15021 * at these if pacing is not on or we have not yet 15022 * reached where pacing is on (gp_ready/fixed enabled). 15023 * When they get set into the CC module (when gp_ready 15024 * is enabled or we enable fixed) then we will set these 15025 * values into the CC and place in here the old values 15026 * so we have a restoral. Then we will set the flag 15027 * rc_pacing_cc_set. That way whenever we turn off pacing 15028 * or switch off this stack, we will know to go restore 15029 * the saved values. 15030 * 15031 * We specifically put into the beta the ecn value for pacing. 15032 */ 15033 rack->rc_new_rnd_needed = 1; 15034 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 15035 /* We want abe like behavior as well */ 15036 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 15037 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 15038 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 15039 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 15040 if (rack_rxt_clamp_thresh) { 15041 rack_translate_clamp_value(rack, rack_rxt_clamp_thresh); 15042 rack->excess_rxt_on = 1; 15043 } 15044 if (rack_uses_full_dgp_in_rec) 15045 rack->r_ctl.full_dgp_in_rec = 1; 15046 if (rack_fill_cw_state) 15047 rack->rc_pace_to_cwnd = 1; 15048 if (rack_pacing_min_seg) 15049 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 15050 if (use_rack_rr) 15051 rack->use_rack_rr = 1; 15052 if (rack_dnd_default) { 15053 rack->rc_pace_dnd = 1; 15054 } 15055 if (V_tcp_delack_enabled) 15056 tp->t_delayed_ack = 1; 15057 else 15058 tp->t_delayed_ack = 0; 15059 #ifdef TCP_ACCOUNTING 15060 if (rack_tcp_accounting) { 15061 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 15062 } 15063 #endif 15064 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 15065 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 15066 if (rack_enable_shared_cwnd) 15067 rack->rack_enable_scwnd = 1; 15068 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 15069 rack->rc_user_set_max_segs = rack_hptsi_segments; 15070 rack->rc_force_max_seg = 0; 15071 TAILQ_INIT(&rack->r_ctl.opt_list); 15072 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 15073 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 15074 if (rack_hibeta_setting) { 15075 rack->rack_hibeta = 1; 15076 if ((rack_hibeta_setting >= 50) && 15077 (rack_hibeta_setting <= 100)) { 15078 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; 15079 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 15080 } 15081 } else { 15082 rack->r_ctl.saved_hibeta = 50; 15083 } 15084 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 15085 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 15086 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 15087 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 15088 rack->r_ctl.rc_highest_us_rtt = 0; 15089 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 15090 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 15091 if (rack_use_cmp_acks) 15092 rack->r_use_cmp_ack = 1; 15093 if (rack_disable_prr) 15094 rack->rack_no_prr = 1; 15095 if (rack_gp_no_rec_chg) 15096 rack->rc_gp_no_rec_chg = 1; 15097 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 15098 rack->rc_always_pace = 1; 15099 if (rack->rack_hibeta) 15100 rack_set_cc_pacing(rack); 15101 } else 15102 rack->rc_always_pace = 0; 15103 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 15104 rack->r_mbuf_queue = 1; 15105 else 15106 rack->r_mbuf_queue = 0; 15107 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15108 if (rack_limits_scwnd) 15109 rack->r_limit_scw = 1; 15110 else 15111 rack->r_limit_scw = 0; 15112 rack_init_retransmit_value(rack, rack_rxt_controls); 15113 rack->rc_labc = V_tcp_abc_l_var; 15114 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 15115 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 15116 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 15117 rack->r_ctl.rc_min_to = rack_min_to; 15118 microuptime(&rack->r_ctl.act_rcv_time); 15119 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 15120 rack->rc_init_win = rack_default_init_window; 15121 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 15122 if (rack_hw_up_only) 15123 rack->r_up_only = 1; 15124 if (rack_do_dyn_mul) { 15125 /* When dynamic adjustment is on CA needs to start at 100% */ 15126 rack->rc_gp_dyn_mul = 1; 15127 if (rack_do_dyn_mul >= 100) 15128 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 15129 } else 15130 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 15131 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 15132 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 15133 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 15134 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 15135 rack_probertt_filter_life); 15136 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15137 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 15138 rack->r_ctl.rc_time_of_last_probertt = us_cts; 15139 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 15140 rack->r_ctl.rc_time_probertt_starts = 0; 15141 if (rack_dsack_std_based & 0x1) { 15142 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 15143 rack->rc_rack_tmr_std_based = 1; 15144 } 15145 if (rack_dsack_std_based & 0x2) { 15146 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 15147 rack->rc_rack_use_dsack = 1; 15148 } 15149 /* We require at least one measurement, even if the sysctl is 0 */ 15150 if (rack_req_measurements) 15151 rack->r_ctl.req_measurements = rack_req_measurements; 15152 else 15153 rack->r_ctl.req_measurements = 1; 15154 if (rack_enable_hw_pacing) 15155 rack->rack_hdw_pace_ena = 1; 15156 if (rack_hw_rate_caps) 15157 rack->r_rack_hw_rate_caps = 1; 15158 #ifdef TCP_SAD_DETECTION 15159 rack->do_detection = 1; 15160 #else 15161 rack->do_detection = 0; 15162 #endif 15163 if (rack_non_rxt_use_cr) 15164 rack->rack_rec_nonrxt_use_cr = 1; 15165 /* Lets setup the fsb block */ 15166 err = rack_init_fsb(tp, rack); 15167 if (err) { 15168 uma_zfree(rack_pcb_zone, *ptr); 15169 *ptr = NULL; 15170 return (err); 15171 } 15172 if (rack_do_hystart) { 15173 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 15174 if (rack_do_hystart > 1) 15175 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 15176 if (rack_do_hystart > 2) 15177 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 15178 } 15179 /* Log what we will do with queries */ 15180 rack_log_chg_info(tp, rack, 7, 15181 no_query, 0, 0); 15182 if (rack_def_profile) 15183 rack_set_profile(rack, rack_def_profile); 15184 /* Cancel the GP measurement in progress */ 15185 tp->t_flags &= ~TF_GPUTINPROG; 15186 if ((tp->t_state != TCPS_CLOSED) && 15187 (tp->t_state != TCPS_TIME_WAIT)) { 15188 /* 15189 * We are already open, we may 15190 * need to adjust a few things. 15191 */ 15192 if (SEQ_GT(tp->snd_max, tp->iss)) 15193 snt = tp->snd_max - tp->iss; 15194 else 15195 snt = 0; 15196 iwin = rc_init_window(rack); 15197 if ((snt < iwin) && 15198 (no_query == 1)) { 15199 /* We are not past the initial window 15200 * on the first init (i.e. a stack switch 15201 * has not yet occured) so we need to make 15202 * sure cwnd and ssthresh is correct. 15203 */ 15204 if (tp->snd_cwnd < iwin) 15205 tp->snd_cwnd = iwin; 15206 /* 15207 * If we are within the initial window 15208 * we want ssthresh to be unlimited. Setting 15209 * it to the rwnd (which the default stack does 15210 * and older racks) is not really a good idea 15211 * since we want to be in SS and grow both the 15212 * cwnd and the rwnd (via dynamic rwnd growth). If 15213 * we set it to the rwnd then as the peer grows its 15214 * rwnd we will be stuck in CA and never hit SS. 15215 * 15216 * Its far better to raise it up high (this takes the 15217 * risk that there as been a loss already, probably 15218 * we should have an indicator in all stacks of loss 15219 * but we don't), but considering the normal use this 15220 * is a risk worth taking. The consequences of not 15221 * hitting SS are far worse than going one more time 15222 * into it early on (before we have sent even a IW). 15223 * It is highly unlikely that we will have had a loss 15224 * before getting the IW out. 15225 */ 15226 tp->snd_ssthresh = 0xffffffff; 15227 } 15228 /* 15229 * Any init based on sequence numbers 15230 * should be done in the deferred init path 15231 * since we can be CLOSED and not have them 15232 * inited when rack_init() is called. We 15233 * are not closed so lets call it. 15234 */ 15235 rack_deferred_init(tp, rack); 15236 } 15237 if ((tp->t_state != TCPS_CLOSED) && 15238 (tp->t_state != TCPS_TIME_WAIT) && 15239 (no_query == 0) && 15240 (tp->snd_una != tp->snd_max)) { 15241 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 15242 if (err) { 15243 *ptr = NULL; 15244 return(err); 15245 } 15246 } 15247 rack_stop_all_timers(tp, rack); 15248 /* Setup all the t_flags2 */ 15249 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 15250 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 15251 else 15252 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 15253 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15254 tp->t_flags2 |= TF2_MBUF_ACKCMP; 15255 /* 15256 * Timers in Rack are kept in microseconds so lets 15257 * convert any initial incoming variables 15258 * from ticks into usecs. Note that we 15259 * also change the values of t_srtt and t_rttvar, if 15260 * they are non-zero. They are kept with a 5 15261 * bit decimal so we have to carefully convert 15262 * these to get the full precision. 15263 */ 15264 rack_convert_rtts(tp); 15265 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 15266 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 15267 /* We do not start any timers on DROPPED connections */ 15268 if (tp->t_fb->tfb_chg_query == NULL) { 15269 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15270 } else { 15271 struct tcp_query_resp qr; 15272 int ret; 15273 15274 memset(&qr, 0, sizeof(qr)); 15275 15276 /* Get the misc time stamps and such for rack */ 15277 qr.req = TCP_QUERY_RACK_TIMES; 15278 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15279 if (ret == 1) { 15280 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 15281 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 15282 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 15283 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 15284 rack->rc_rack_rtt = qr.rack_rtt; 15285 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 15286 rack->r_ctl.rc_sacked = qr.rack_sacked; 15287 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 15288 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 15289 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 15290 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 15291 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 15292 if (qr.rack_tlp_out) { 15293 rack->rc_tlp_in_progress = 1; 15294 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 15295 } else { 15296 rack->rc_tlp_in_progress = 0; 15297 rack->r_ctl.rc_tlp_cnt_out = 0; 15298 } 15299 if (qr.rack_srtt_measured) 15300 rack->rc_srtt_measure_made = 1; 15301 if (qr.rack_in_persist == 1) { 15302 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 15303 #ifdef NETFLIX_SHARED_CWND 15304 if (rack->r_ctl.rc_scw) { 15305 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 15306 rack->rack_scwnd_is_idle = 1; 15307 } 15308 #endif 15309 rack->r_ctl.persist_lost_ends = 0; 15310 rack->probe_not_answered = 0; 15311 rack->forced_ack = 0; 15312 tp->t_rxtshift = 0; 15313 rack->rc_in_persist = 1; 15314 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 15315 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 15316 } 15317 if (qr.rack_wanted_output) 15318 rack->r_wanted_output = 1; 15319 rack_log_chg_info(tp, rack, 6, 15320 qr.rack_min_rtt, 15321 qr.rack_rtt, 15322 qr.rack_reorder_ts); 15323 } 15324 /* Get the old stack timers */ 15325 qr.req_param = 0; 15326 qr.req = TCP_QUERY_TIMERS_UP; 15327 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15328 if (ret) { 15329 /* 15330 * non-zero return means we have a timer('s) 15331 * to start. Zero means no timer (no keepalive 15332 * I suppose). 15333 */ 15334 uint32_t tov = 0; 15335 15336 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 15337 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 15338 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 15339 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 15340 tov = qr.timer_pacing_to - us_cts; 15341 else 15342 tov = HPTS_TICKS_PER_SLOT; 15343 } 15344 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 15345 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 15346 if (tov == 0) { 15347 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 15348 tov = qr.timer_timer_exp - us_cts; 15349 else 15350 tov = HPTS_TICKS_PER_SLOT; 15351 } 15352 } 15353 rack_log_chg_info(tp, rack, 4, 15354 rack->r_ctl.rc_hpts_flags, 15355 rack->r_ctl.rc_last_output_to, 15356 rack->r_ctl.rc_timer_exp); 15357 if (tov) { 15358 struct hpts_diag diag; 15359 15360 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 15361 __LINE__, &diag); 15362 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 15363 } 15364 } 15365 } 15366 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 15367 __LINE__, RACK_RTTS_INIT); 15368 } 15369 return (0); 15370 } 15371 15372 static int 15373 rack_handoff_ok(struct tcpcb *tp) 15374 { 15375 if ((tp->t_state == TCPS_CLOSED) || 15376 (tp->t_state == TCPS_LISTEN)) { 15377 /* Sure no problem though it may not stick */ 15378 return (0); 15379 } 15380 if ((tp->t_state == TCPS_SYN_SENT) || 15381 (tp->t_state == TCPS_SYN_RECEIVED)) { 15382 /* 15383 * We really don't know if you support sack, 15384 * you have to get to ESTAB or beyond to tell. 15385 */ 15386 return (EAGAIN); 15387 } 15388 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15389 /* 15390 * Rack will only send a FIN after all data is acknowledged. 15391 * So in this case we have more data outstanding. We can't 15392 * switch stacks until either all data and only the FIN 15393 * is left (in which case rack_init() now knows how 15394 * to deal with that) <or> all is acknowledged and we 15395 * are only left with incoming data, though why you 15396 * would want to switch to rack after all data is acknowledged 15397 * I have no idea (rrs)! 15398 */ 15399 return (EAGAIN); 15400 } 15401 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15402 return (0); 15403 } 15404 /* 15405 * If we reach here we don't do SACK on this connection so we can 15406 * never do rack. 15407 */ 15408 return (EINVAL); 15409 } 15410 15411 static void 15412 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15413 { 15414 15415 if (tp->t_fb_ptr) { 15416 uint32_t cnt_free = 0; 15417 struct tcp_rack *rack; 15418 struct rack_sendmap *rsm; 15419 15420 tcp_handle_orphaned_packets(tp); 15421 tp->t_flags &= ~TF_FORCEDATA; 15422 rack = (struct tcp_rack *)tp->t_fb_ptr; 15423 rack_log_pacing_delay_calc(rack, 15424 0, 15425 0, 15426 0, 15427 rack_get_gp_est(rack), /* delRate */ 15428 rack_get_lt_bw(rack), /* rttProp */ 15429 20, __LINE__, NULL, 0); 15430 #ifdef NETFLIX_SHARED_CWND 15431 if (rack->r_ctl.rc_scw) { 15432 uint32_t limit; 15433 15434 if (rack->r_limit_scw) 15435 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15436 else 15437 limit = 0; 15438 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15439 rack->r_ctl.rc_scw_index, 15440 limit); 15441 rack->r_ctl.rc_scw = NULL; 15442 } 15443 #endif 15444 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15445 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15446 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15447 rack->r_ctl.fsb.th = NULL; 15448 } 15449 if (rack->rc_always_pace) { 15450 tcp_decrement_paced_conn(); 15451 rack_undo_cc_pacing(rack); 15452 rack->rc_always_pace = 0; 15453 } 15454 /* Clean up any options if they were not applied */ 15455 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15456 struct deferred_opt_list *dol; 15457 15458 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15459 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15460 free(dol, M_TCPDO); 15461 } 15462 /* rack does not use force data but other stacks may clear it */ 15463 if (rack->r_ctl.crte != NULL) { 15464 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15465 rack->rack_hdrw_pacing = 0; 15466 rack->r_ctl.crte = NULL; 15467 } 15468 #ifdef TCP_BLACKBOX 15469 tcp_log_flowend(tp); 15470 #endif 15471 /* 15472 * Lets take a different approach to purging just 15473 * get each one and free it like a cum-ack would and 15474 * not use a foreach loop. 15475 */ 15476 rsm = tqhash_min(rack->r_ctl.tqh); 15477 while (rsm) { 15478 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15479 rack->r_ctl.rc_num_maps_alloced--; 15480 uma_zfree(rack_zone, rsm); 15481 rsm = tqhash_min(rack->r_ctl.tqh); 15482 } 15483 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15484 while (rsm) { 15485 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15486 rack->r_ctl.rc_num_maps_alloced--; 15487 rack->rc_free_cnt--; 15488 cnt_free++; 15489 uma_zfree(rack_zone, rsm); 15490 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15491 } 15492 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15493 (tcp_bblogging_on(tp))) { 15494 union tcp_log_stackspecific log; 15495 struct timeval tv; 15496 15497 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15498 log.u_bbr.flex8 = 10; 15499 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15500 log.u_bbr.flex2 = rack->rc_free_cnt; 15501 log.u_bbr.flex3 = cnt_free; 15502 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15503 rsm = tqhash_min(rack->r_ctl.tqh); 15504 log.u_bbr.delRate = (uint64_t)rsm; 15505 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15506 log.u_bbr.cur_del_rate = (uint64_t)rsm; 15507 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15508 log.u_bbr.pkt_epoch = __LINE__; 15509 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15510 0, &log, false, NULL, NULL, 0, &tv); 15511 } 15512 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15513 ("rack:%p num_aloc:%u after freeing all?", 15514 rack, 15515 rack->r_ctl.rc_num_maps_alloced)); 15516 rack->rc_free_cnt = 0; 15517 free(rack->r_ctl.tqh, M_TCPFSB); 15518 rack->r_ctl.tqh = NULL; 15519 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15520 tp->t_fb_ptr = NULL; 15521 } 15522 /* Make sure snd_nxt is correctly set */ 15523 tp->snd_nxt = tp->snd_max; 15524 } 15525 15526 static void 15527 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15528 { 15529 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15530 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15531 } 15532 switch (tp->t_state) { 15533 case TCPS_SYN_SENT: 15534 rack->r_state = TCPS_SYN_SENT; 15535 rack->r_substate = rack_do_syn_sent; 15536 break; 15537 case TCPS_SYN_RECEIVED: 15538 rack->r_state = TCPS_SYN_RECEIVED; 15539 rack->r_substate = rack_do_syn_recv; 15540 break; 15541 case TCPS_ESTABLISHED: 15542 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15543 rack->r_state = TCPS_ESTABLISHED; 15544 rack->r_substate = rack_do_established; 15545 break; 15546 case TCPS_CLOSE_WAIT: 15547 rack->r_state = TCPS_CLOSE_WAIT; 15548 rack->r_substate = rack_do_close_wait; 15549 break; 15550 case TCPS_FIN_WAIT_1: 15551 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15552 rack->r_state = TCPS_FIN_WAIT_1; 15553 rack->r_substate = rack_do_fin_wait_1; 15554 break; 15555 case TCPS_CLOSING: 15556 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15557 rack->r_state = TCPS_CLOSING; 15558 rack->r_substate = rack_do_closing; 15559 break; 15560 case TCPS_LAST_ACK: 15561 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15562 rack->r_state = TCPS_LAST_ACK; 15563 rack->r_substate = rack_do_lastack; 15564 break; 15565 case TCPS_FIN_WAIT_2: 15566 rack->r_state = TCPS_FIN_WAIT_2; 15567 rack->r_substate = rack_do_fin_wait_2; 15568 break; 15569 case TCPS_LISTEN: 15570 case TCPS_CLOSED: 15571 case TCPS_TIME_WAIT: 15572 default: 15573 break; 15574 }; 15575 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15576 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15577 15578 } 15579 15580 static void 15581 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15582 { 15583 /* 15584 * We received an ack, and then did not 15585 * call send or were bounced out due to the 15586 * hpts was running. Now a timer is up as well, is 15587 * it the right timer? 15588 */ 15589 struct rack_sendmap *rsm; 15590 int tmr_up; 15591 15592 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15593 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15594 return; 15595 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15596 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15597 (tmr_up == PACE_TMR_RXT)) { 15598 /* Should be an RXT */ 15599 return; 15600 } 15601 if (rsm == NULL) { 15602 /* Nothing outstanding? */ 15603 if (tp->t_flags & TF_DELACK) { 15604 if (tmr_up == PACE_TMR_DELACK) 15605 /* We are supposed to have delayed ack up and we do */ 15606 return; 15607 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 15608 /* 15609 * if we hit enobufs then we would expect the possibility 15610 * of nothing outstanding and the RXT up (and the hptsi timer). 15611 */ 15612 return; 15613 } else if (((V_tcp_always_keepalive || 15614 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 15615 (tp->t_state <= TCPS_CLOSING)) && 15616 (tmr_up == PACE_TMR_KEEP) && 15617 (tp->snd_max == tp->snd_una)) { 15618 /* We should have keep alive up and we do */ 15619 return; 15620 } 15621 } 15622 if (SEQ_GT(tp->snd_max, tp->snd_una) && 15623 ((tmr_up == PACE_TMR_TLP) || 15624 (tmr_up == PACE_TMR_RACK) || 15625 (tmr_up == PACE_TMR_RXT))) { 15626 /* 15627 * Either a Rack, TLP or RXT is fine if we 15628 * have outstanding data. 15629 */ 15630 return; 15631 } else if (tmr_up == PACE_TMR_DELACK) { 15632 /* 15633 * If the delayed ack was going to go off 15634 * before the rtx/tlp/rack timer were going to 15635 * expire, then that would be the timer in control. 15636 * Note we don't check the time here trusting the 15637 * code is correct. 15638 */ 15639 return; 15640 } 15641 /* 15642 * Ok the timer originally started is not what we want now. 15643 * We will force the hpts to be stopped if any, and restart 15644 * with the slot set to what was in the saved slot. 15645 */ 15646 if (tcp_in_hpts(rack->rc_tp)) { 15647 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15648 uint32_t us_cts; 15649 15650 us_cts = tcp_get_usecs(NULL); 15651 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 15652 rack->r_early = 1; 15653 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 15654 } 15655 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 15656 } 15657 tcp_hpts_remove(rack->rc_tp); 15658 } 15659 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15660 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15661 } 15662 15663 15664 static void 15665 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 15666 { 15667 if ((SEQ_LT(tp->snd_wl1, seq) || 15668 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 15669 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 15670 /* keep track of pure window updates */ 15671 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 15672 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 15673 tp->snd_wnd = tiwin; 15674 rack_validate_fo_sendwin_up(tp, rack); 15675 tp->snd_wl1 = seq; 15676 tp->snd_wl2 = ack; 15677 if (tp->snd_wnd > tp->max_sndwnd) 15678 tp->max_sndwnd = tp->snd_wnd; 15679 rack->r_wanted_output = 1; 15680 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 15681 tp->snd_wnd = tiwin; 15682 rack_validate_fo_sendwin_up(tp, rack); 15683 tp->snd_wl1 = seq; 15684 tp->snd_wl2 = ack; 15685 } else { 15686 /* Not a valid win update */ 15687 return; 15688 } 15689 if (tp->snd_wnd > tp->max_sndwnd) 15690 tp->max_sndwnd = tp->snd_wnd; 15691 /* Do we exit persists? */ 15692 if ((rack->rc_in_persist != 0) && 15693 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 15694 rack->r_ctl.rc_pace_min_segs))) { 15695 rack_exit_persist(tp, rack, cts); 15696 } 15697 /* Do we enter persists? */ 15698 if ((rack->rc_in_persist == 0) && 15699 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 15700 TCPS_HAVEESTABLISHED(tp->t_state) && 15701 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 15702 sbavail(&tptosocket(tp)->so_snd) && 15703 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 15704 /* 15705 * Here the rwnd is less than 15706 * the pacing size, we are established, 15707 * nothing is outstanding, and there is 15708 * data to send. Enter persists. 15709 */ 15710 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 15711 } 15712 } 15713 15714 static void 15715 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 15716 { 15717 15718 if (tcp_bblogging_on(rack->rc_tp)) { 15719 struct inpcb *inp = tptoinpcb(tp); 15720 union tcp_log_stackspecific log; 15721 struct timeval ltv; 15722 char tcp_hdr_buf[60]; 15723 struct tcphdr *th; 15724 struct timespec ts; 15725 uint32_t orig_snd_una; 15726 uint8_t xx = 0; 15727 15728 #ifdef TCP_REQUEST_TRK 15729 struct tcp_sendfile_track *tcp_req; 15730 15731 if (SEQ_GT(ae->ack, tp->snd_una)) { 15732 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 15733 } else { 15734 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 15735 } 15736 #endif 15737 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15738 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 15739 if (rack->rack_no_prr == 0) 15740 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15741 else 15742 log.u_bbr.flex1 = 0; 15743 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 15744 log.u_bbr.use_lt_bw <<= 1; 15745 log.u_bbr.use_lt_bw |= rack->r_might_revert; 15746 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 15747 log.u_bbr.bbr_state = rack->rc_free_cnt; 15748 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15749 log.u_bbr.pkts_out = tp->t_maxseg; 15750 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 15751 log.u_bbr.flex7 = 1; 15752 log.u_bbr.lost = ae->flags; 15753 log.u_bbr.cwnd_gain = ackval; 15754 log.u_bbr.pacing_gain = 0x2; 15755 if (ae->flags & TSTMP_HDWR) { 15756 /* Record the hardware timestamp if present */ 15757 log.u_bbr.flex3 = M_TSTMP; 15758 ts.tv_sec = ae->timestamp / 1000000000; 15759 ts.tv_nsec = ae->timestamp % 1000000000; 15760 ltv.tv_sec = ts.tv_sec; 15761 ltv.tv_usec = ts.tv_nsec / 1000; 15762 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 15763 } else if (ae->flags & TSTMP_LRO) { 15764 /* Record the LRO the arrival timestamp */ 15765 log.u_bbr.flex3 = M_TSTMP_LRO; 15766 ts.tv_sec = ae->timestamp / 1000000000; 15767 ts.tv_nsec = ae->timestamp % 1000000000; 15768 ltv.tv_sec = ts.tv_sec; 15769 ltv.tv_usec = ts.tv_nsec / 1000; 15770 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 15771 } 15772 log.u_bbr.timeStamp = tcp_get_usecs(<v); 15773 /* Log the rcv time */ 15774 log.u_bbr.delRate = ae->timestamp; 15775 #ifdef TCP_REQUEST_TRK 15776 log.u_bbr.applimited = tp->t_tcpreq_closed; 15777 log.u_bbr.applimited <<= 8; 15778 log.u_bbr.applimited |= tp->t_tcpreq_open; 15779 log.u_bbr.applimited <<= 8; 15780 log.u_bbr.applimited |= tp->t_tcpreq_req; 15781 if (tcp_req) { 15782 /* Copy out any client req info */ 15783 /* seconds */ 15784 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 15785 /* useconds */ 15786 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 15787 log.u_bbr.rttProp = tcp_req->timestamp; 15788 log.u_bbr.cur_del_rate = tcp_req->start; 15789 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 15790 log.u_bbr.flex8 |= 1; 15791 } else { 15792 log.u_bbr.flex8 |= 2; 15793 log.u_bbr.bw_inuse = tcp_req->end; 15794 } 15795 log.u_bbr.flex6 = tcp_req->start_seq; 15796 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 15797 log.u_bbr.flex8 |= 4; 15798 log.u_bbr.epoch = tcp_req->end_seq; 15799 } 15800 } 15801 #endif 15802 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 15803 th = (struct tcphdr *)tcp_hdr_buf; 15804 th->th_seq = ae->seq; 15805 th->th_ack = ae->ack; 15806 th->th_win = ae->win; 15807 /* Now fill in the ports */ 15808 th->th_sport = inp->inp_fport; 15809 th->th_dport = inp->inp_lport; 15810 tcp_set_flags(th, ae->flags); 15811 /* Now do we have a timestamp option? */ 15812 if (ae->flags & HAS_TSTMP) { 15813 u_char *cp; 15814 uint32_t val; 15815 15816 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 15817 cp = (u_char *)(th + 1); 15818 *cp = TCPOPT_NOP; 15819 cp++; 15820 *cp = TCPOPT_NOP; 15821 cp++; 15822 *cp = TCPOPT_TIMESTAMP; 15823 cp++; 15824 *cp = TCPOLEN_TIMESTAMP; 15825 cp++; 15826 val = htonl(ae->ts_value); 15827 bcopy((char *)&val, 15828 (char *)cp, sizeof(uint32_t)); 15829 val = htonl(ae->ts_echo); 15830 bcopy((char *)&val, 15831 (char *)(cp + 4), sizeof(uint32_t)); 15832 } else 15833 th->th_off = (sizeof(struct tcphdr) >> 2); 15834 15835 /* 15836 * For sane logging we need to play a little trick. 15837 * If the ack were fully processed we would have moved 15838 * snd_una to high_seq, but since compressed acks are 15839 * processed in two phases, at this point (logging) snd_una 15840 * won't be advanced. So we would see multiple acks showing 15841 * the advancement. We can prevent that by "pretending" that 15842 * snd_una was advanced and then un-advancing it so that the 15843 * logging code has the right value for tlb_snd_una. 15844 */ 15845 if (tp->snd_una != high_seq) { 15846 orig_snd_una = tp->snd_una; 15847 tp->snd_una = high_seq; 15848 xx = 1; 15849 } else 15850 xx = 0; 15851 TCP_LOG_EVENTP(tp, th, 15852 &tptosocket(tp)->so_rcv, 15853 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 15854 0, &log, true, <v); 15855 if (xx) { 15856 tp->snd_una = orig_snd_una; 15857 } 15858 } 15859 15860 } 15861 15862 static void 15863 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 15864 { 15865 uint32_t us_rtt; 15866 /* 15867 * A persist or keep-alive was forced out, update our 15868 * min rtt time. Note now worry about lost responses. 15869 * When a subsequent keep-alive or persist times out 15870 * and forced_ack is still on, then the last probe 15871 * was not responded to. In such cases we have a 15872 * sysctl that controls the behavior. Either we apply 15873 * the rtt but with reduced confidence (0). Or we just 15874 * plain don't apply the rtt estimate. Having data flow 15875 * will clear the probe_not_answered flag i.e. cum-ack 15876 * move forward <or> exiting and reentering persists. 15877 */ 15878 15879 rack->forced_ack = 0; 15880 rack->rc_tp->t_rxtshift = 0; 15881 if ((rack->rc_in_persist && 15882 (tiwin == rack->rc_tp->snd_wnd)) || 15883 (rack->rc_in_persist == 0)) { 15884 /* 15885 * In persists only apply the RTT update if this is 15886 * a response to our window probe. And that 15887 * means the rwnd sent must match the current 15888 * snd_wnd. If it does not, then we got a 15889 * window update ack instead. For keepalive 15890 * we allow the answer no matter what the window. 15891 * 15892 * Note that if the probe_not_answered is set then 15893 * the forced_ack_ts is the oldest one i.e. the first 15894 * probe sent that might have been lost. This assures 15895 * us that if we do calculate an RTT it is longer not 15896 * some short thing. 15897 */ 15898 if (rack->rc_in_persist) 15899 counter_u64_add(rack_persists_acks, 1); 15900 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 15901 if (us_rtt == 0) 15902 us_rtt = 1; 15903 if (rack->probe_not_answered == 0) { 15904 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15905 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 15906 } else { 15907 /* We have a retransmitted probe here too */ 15908 if (rack_apply_rtt_with_reduced_conf) { 15909 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15910 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 15911 } 15912 } 15913 } 15914 } 15915 15916 static int 15917 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 15918 { 15919 /* 15920 * Handle a "special" compressed ack mbuf. Each incoming 15921 * ack has only four possible dispositions: 15922 * 15923 * A) It moves the cum-ack forward 15924 * B) It is behind the cum-ack. 15925 * C) It is a window-update ack. 15926 * D) It is a dup-ack. 15927 * 15928 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 15929 * in the incoming mbuf. We also need to still pay attention 15930 * to nxt_pkt since there may be another packet after this 15931 * one. 15932 */ 15933 #ifdef TCP_ACCOUNTING 15934 uint64_t ts_val; 15935 uint64_t rdstc; 15936 #endif 15937 int segsiz; 15938 struct timespec ts; 15939 struct tcp_rack *rack; 15940 struct tcp_ackent *ae; 15941 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 15942 int cnt, i, did_out, ourfinisacked = 0; 15943 struct tcpopt to_holder, *to = NULL; 15944 #ifdef TCP_ACCOUNTING 15945 int win_up_req = 0; 15946 #endif 15947 int nsegs = 0; 15948 int under_pacing = 0; 15949 int recovery = 0; 15950 #ifdef TCP_ACCOUNTING 15951 sched_pin(); 15952 #endif 15953 rack = (struct tcp_rack *)tp->t_fb_ptr; 15954 if (rack->gp_ready && 15955 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 15956 under_pacing = 1; 15957 15958 if (rack->r_state != tp->t_state) 15959 rack_set_state(tp, rack); 15960 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 15961 (tp->t_flags & TF_GPUTINPROG)) { 15962 /* 15963 * We have a goodput in progress 15964 * and we have entered a late state. 15965 * Do we have enough data in the sb 15966 * to handle the GPUT request? 15967 */ 15968 uint32_t bytes; 15969 15970 bytes = tp->gput_ack - tp->gput_seq; 15971 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 15972 bytes += tp->gput_seq - tp->snd_una; 15973 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 15974 /* 15975 * There are not enough bytes in the socket 15976 * buffer that have been sent to cover this 15977 * measurement. Cancel it. 15978 */ 15979 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 15980 rack->r_ctl.rc_gp_srtt /*flex1*/, 15981 tp->gput_seq, 15982 0, 0, 18, __LINE__, NULL, 0); 15983 tp->t_flags &= ~TF_GPUTINPROG; 15984 } 15985 } 15986 to = &to_holder; 15987 to->to_flags = 0; 15988 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 15989 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 15990 cnt = m->m_len / sizeof(struct tcp_ackent); 15991 counter_u64_add(rack_multi_single_eq, cnt); 15992 high_seq = tp->snd_una; 15993 the_win = tp->snd_wnd; 15994 win_seq = tp->snd_wl1; 15995 win_upd_ack = tp->snd_wl2; 15996 cts = tcp_tv_to_usectick(tv); 15997 ms_cts = tcp_tv_to_mssectick(tv); 15998 rack->r_ctl.rc_rcvtime = cts; 15999 segsiz = ctf_fixed_maxseg(tp); 16000 if ((rack->rc_gp_dyn_mul) && 16001 (rack->use_fixed_rate == 0) && 16002 (rack->rc_always_pace)) { 16003 /* Check in on probertt */ 16004 rack_check_probe_rtt(rack, cts); 16005 } 16006 for (i = 0; i < cnt; i++) { 16007 #ifdef TCP_ACCOUNTING 16008 ts_val = get_cyclecount(); 16009 #endif 16010 rack_clear_rate_sample(rack); 16011 ae = ((mtod(m, struct tcp_ackent *)) + i); 16012 if (ae->flags & TH_FIN) 16013 rack_log_pacing_delay_calc(rack, 16014 0, 16015 0, 16016 0, 16017 rack_get_gp_est(rack), /* delRate */ 16018 rack_get_lt_bw(rack), /* rttProp */ 16019 20, __LINE__, NULL, 0); 16020 /* Setup the window */ 16021 tiwin = ae->win << tp->snd_scale; 16022 if (tiwin > rack->r_ctl.rc_high_rwnd) 16023 rack->r_ctl.rc_high_rwnd = tiwin; 16024 /* figure out the type of ack */ 16025 if (SEQ_LT(ae->ack, high_seq)) { 16026 /* Case B*/ 16027 ae->ack_val_set = ACK_BEHIND; 16028 } else if (SEQ_GT(ae->ack, high_seq)) { 16029 /* Case A */ 16030 ae->ack_val_set = ACK_CUMACK; 16031 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 16032 /* Case D */ 16033 ae->ack_val_set = ACK_DUPACK; 16034 } else { 16035 /* Case C */ 16036 ae->ack_val_set = ACK_RWND; 16037 } 16038 if (rack->sack_attack_disable > 0) { 16039 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16040 rack->r_ctl.ack_during_sd++; 16041 } 16042 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 16043 /* Validate timestamp */ 16044 if (ae->flags & HAS_TSTMP) { 16045 /* Setup for a timestamp */ 16046 to->to_flags = TOF_TS; 16047 ae->ts_echo -= tp->ts_offset; 16048 to->to_tsecr = ae->ts_echo; 16049 to->to_tsval = ae->ts_value; 16050 /* 16051 * If echoed timestamp is later than the current time, fall back to 16052 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16053 * were used when this connection was established. 16054 */ 16055 if (TSTMP_GT(ae->ts_echo, ms_cts)) 16056 to->to_tsecr = 0; 16057 if (tp->ts_recent && 16058 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 16059 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 16060 #ifdef TCP_ACCOUNTING 16061 rdstc = get_cyclecount(); 16062 if (rdstc > ts_val) { 16063 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16064 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16065 } 16066 } 16067 #endif 16068 continue; 16069 } 16070 } 16071 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 16072 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 16073 tp->ts_recent_age = tcp_ts_getticks(); 16074 tp->ts_recent = ae->ts_value; 16075 } 16076 } else { 16077 /* Setup for a no options */ 16078 to->to_flags = 0; 16079 } 16080 /* Update the rcv time and perform idle reduction possibly */ 16081 if (tp->t_idle_reduce && 16082 (tp->snd_max == tp->snd_una) && 16083 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16084 counter_u64_add(rack_input_idle_reduces, 1); 16085 rack_cc_after_idle(rack, tp); 16086 } 16087 tp->t_rcvtime = ticks; 16088 /* Now what about ECN of a chain of pure ACKs? */ 16089 if (tcp_ecn_input_segment(tp, ae->flags, 0, 16090 tcp_packets_this_ack(tp, ae->ack), 16091 ae->codepoint)) 16092 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 16093 #ifdef TCP_ACCOUNTING 16094 /* Count for the specific type of ack in */ 16095 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16096 tp->tcp_cnt_counters[ae->ack_val_set]++; 16097 } 16098 #endif 16099 /* 16100 * Note how we could move up these in the determination 16101 * above, but we don't so that way the timestamp checks (and ECN) 16102 * is done first before we do any processing on the ACK. 16103 * The non-compressed path through the code has this 16104 * weakness (noted by @jtl) that it actually does some 16105 * processing before verifying the timestamp information. 16106 * We don't take that path here which is why we set 16107 * the ack_val_set first, do the timestamp and ecn 16108 * processing, and then look at what we have setup. 16109 */ 16110 if (ae->ack_val_set == ACK_BEHIND) { 16111 /* 16112 * Case B flag reordering, if window is not closed 16113 * or it could be a keep-alive or persists 16114 */ 16115 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 16116 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16117 if (rack->r_ctl.rc_reorder_ts == 0) 16118 rack->r_ctl.rc_reorder_ts = 1; 16119 } 16120 } else if (ae->ack_val_set == ACK_DUPACK) { 16121 /* Case D */ 16122 rack_strike_dupack(rack); 16123 } else if (ae->ack_val_set == ACK_RWND) { 16124 /* Case C */ 16125 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16126 ts.tv_sec = ae->timestamp / 1000000000; 16127 ts.tv_nsec = ae->timestamp % 1000000000; 16128 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16129 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16130 } else { 16131 rack->r_ctl.act_rcv_time = *tv; 16132 } 16133 if (rack->forced_ack) { 16134 rack_handle_probe_response(rack, tiwin, 16135 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 16136 } 16137 #ifdef TCP_ACCOUNTING 16138 win_up_req = 1; 16139 #endif 16140 win_upd_ack = ae->ack; 16141 win_seq = ae->seq; 16142 the_win = tiwin; 16143 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16144 } else { 16145 /* Case A */ 16146 if (SEQ_GT(ae->ack, tp->snd_max)) { 16147 /* 16148 * We just send an ack since the incoming 16149 * ack is beyond the largest seq we sent. 16150 */ 16151 if ((tp->t_flags & TF_ACKNOW) == 0) { 16152 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 16153 if (tp->t_flags && TF_ACKNOW) 16154 rack->r_wanted_output = 1; 16155 } 16156 } else { 16157 nsegs++; 16158 /* If the window changed setup to update */ 16159 if (tiwin != tp->snd_wnd) { 16160 win_upd_ack = ae->ack; 16161 win_seq = ae->seq; 16162 the_win = tiwin; 16163 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16164 } 16165 #ifdef TCP_ACCOUNTING 16166 /* Account for the acks */ 16167 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16168 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 16169 } 16170 #endif 16171 high_seq = ae->ack; 16172 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) 16173 rack_log_hystart_event(rack, high_seq, 8); 16174 /* Setup our act_rcv_time */ 16175 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16176 ts.tv_sec = ae->timestamp / 1000000000; 16177 ts.tv_nsec = ae->timestamp % 1000000000; 16178 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16179 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16180 } else { 16181 rack->r_ctl.act_rcv_time = *tv; 16182 } 16183 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 16184 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 16185 #ifdef TCP_REQUEST_TRK 16186 rack_req_check_for_comp(rack, high_seq); 16187 #endif 16188 if (rack->rc_dsack_round_seen) { 16189 /* Is the dsack round over? */ 16190 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 16191 /* Yes it is */ 16192 rack->rc_dsack_round_seen = 0; 16193 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 16194 } 16195 } 16196 } 16197 } 16198 /* And lets be sure to commit the rtt measurements for this ack */ 16199 tcp_rack_xmit_timer_commit(rack, tp); 16200 #ifdef TCP_ACCOUNTING 16201 rdstc = get_cyclecount(); 16202 if (rdstc > ts_val) { 16203 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16204 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16205 if (ae->ack_val_set == ACK_CUMACK) 16206 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 16207 } 16208 } 16209 #endif 16210 } 16211 #ifdef TCP_ACCOUNTING 16212 ts_val = get_cyclecount(); 16213 #endif 16214 /* Tend to any collapsed window */ 16215 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 16216 /* The peer collapsed the window */ 16217 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 16218 } else if (rack->rc_has_collapsed) 16219 rack_un_collapse_window(rack, __LINE__); 16220 if ((rack->r_collapse_point_valid) && 16221 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 16222 rack->r_collapse_point_valid = 0; 16223 acked_amount = acked = (high_seq - tp->snd_una); 16224 if (acked) { 16225 /* 16226 * The draft (v3) calls for us to use SEQ_GEQ, but that 16227 * causes issues when we are just going app limited. Lets 16228 * instead use SEQ_GT <or> where its equal but more data 16229 * is outstanding. 16230 * 16231 * Also make sure we are on the last ack of a series. We 16232 * have to have all the ack's processed in queue to know 16233 * if there is something left outstanding. 16234 * 16235 */ 16236 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 16237 (rack->rc_new_rnd_needed == 0) && 16238 (nxt_pkt == 0)) { 16239 rack_log_hystart_event(rack, high_seq, 21); 16240 rack->r_ctl.current_round++; 16241 /* Force the next send to setup the next round */ 16242 rack->rc_new_rnd_needed = 1; 16243 if (CC_ALGO(tp)->newround != NULL) { 16244 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 16245 } 16246 } 16247 /* 16248 * Clear the probe not answered flag 16249 * since cum-ack moved forward. 16250 */ 16251 rack->probe_not_answered = 0; 16252 if (rack->sack_attack_disable == 0) 16253 rack_do_decay(rack); 16254 if (acked >= segsiz) { 16255 /* 16256 * You only get credit for 16257 * MSS and greater (and you get extra 16258 * credit for larger cum-ack moves). 16259 */ 16260 int ac; 16261 16262 ac = acked / segsiz; 16263 rack->r_ctl.ack_count += ac; 16264 counter_u64_add(rack_ack_total, ac); 16265 } 16266 if (rack->r_ctl.ack_count > 0xfff00000) { 16267 /* 16268 * reduce the number to keep us under 16269 * a uint32_t. 16270 */ 16271 rack->r_ctl.ack_count /= 2; 16272 rack->r_ctl.sack_count /= 2; 16273 } 16274 if (tp->t_flags & TF_NEEDSYN) { 16275 /* 16276 * T/TCP: Connection was half-synchronized, and our SYN has 16277 * been ACK'd (so connection is now fully synchronized). Go 16278 * to non-starred state, increment snd_una for ACK of SYN, 16279 * and check if we can do window scaling. 16280 */ 16281 tp->t_flags &= ~TF_NEEDSYN; 16282 tp->snd_una++; 16283 acked_amount = acked = (high_seq - tp->snd_una); 16284 } 16285 if (acked > sbavail(&so->so_snd)) 16286 acked_amount = sbavail(&so->so_snd); 16287 #ifdef TCP_SAD_DETECTION 16288 /* 16289 * We only care on a cum-ack move if we are in a sack-disabled 16290 * state. We have already added in to the ack_count, and we never 16291 * would disable on a cum-ack move, so we only care to do the 16292 * detection if it may "undo" it, i.e. we were in disabled already. 16293 */ 16294 if (rack->sack_attack_disable) 16295 rack_do_detection(tp, rack, acked_amount, segsiz); 16296 #endif 16297 if (IN_FASTRECOVERY(tp->t_flags) && 16298 (rack->rack_no_prr == 0)) 16299 rack_update_prr(tp, rack, acked_amount, high_seq); 16300 if (IN_RECOVERY(tp->t_flags)) { 16301 if (SEQ_LT(high_seq, tp->snd_recover) && 16302 (SEQ_LT(high_seq, tp->snd_max))) { 16303 tcp_rack_partialack(tp); 16304 } else { 16305 rack_post_recovery(tp, high_seq); 16306 recovery = 1; 16307 } 16308 } 16309 /* Handle the rack-log-ack part (sendmap) */ 16310 if ((sbused(&so->so_snd) == 0) && 16311 (acked > acked_amount) && 16312 (tp->t_state >= TCPS_FIN_WAIT_1) && 16313 (tp->t_flags & TF_SENTFIN)) { 16314 /* 16315 * We must be sure our fin 16316 * was sent and acked (we can be 16317 * in FIN_WAIT_1 without having 16318 * sent the fin). 16319 */ 16320 ourfinisacked = 1; 16321 /* 16322 * Lets make sure snd_una is updated 16323 * since most likely acked_amount = 0 (it 16324 * should be). 16325 */ 16326 tp->snd_una = high_seq; 16327 } 16328 /* Did we make a RTO error? */ 16329 if ((tp->t_flags & TF_PREVVALID) && 16330 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16331 tp->t_flags &= ~TF_PREVVALID; 16332 if (tp->t_rxtshift == 1 && 16333 (int)(ticks - tp->t_badrxtwin) < 0) 16334 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16335 } 16336 /* Handle the data in the socket buffer */ 16337 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16338 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16339 if (acked_amount > 0) { 16340 struct mbuf *mfree; 16341 16342 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 16343 SOCKBUF_LOCK(&so->so_snd); 16344 mfree = sbcut_locked(&so->so_snd, acked_amount); 16345 tp->snd_una = high_seq; 16346 /* Note we want to hold the sb lock through the sendmap adjust */ 16347 rack_adjust_sendmap_head(rack, &so->so_snd); 16348 /* Wake up the socket if we have room to write more */ 16349 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16350 sowwakeup_locked(so); 16351 if ((recovery == 1) && 16352 (rack->excess_rxt_on) && 16353 (rack->r_cwnd_was_clamped == 0)) { 16354 do_rack_excess_rxt(tp, rack); 16355 } else if (rack->r_cwnd_was_clamped) 16356 do_rack_check_for_unclamp(tp, rack); 16357 m_freem(mfree); 16358 } 16359 /* update progress */ 16360 tp->t_acktime = ticks; 16361 rack_log_progress_event(rack, tp, tp->t_acktime, 16362 PROGRESS_UPDATE, __LINE__); 16363 /* Clear out shifts and such */ 16364 tp->t_rxtshift = 0; 16365 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16366 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16367 rack->rc_tlp_in_progress = 0; 16368 rack->r_ctl.rc_tlp_cnt_out = 0; 16369 /* Send recover and snd_nxt must be dragged along */ 16370 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16371 tp->snd_recover = tp->snd_una; 16372 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 16373 tp->snd_nxt = tp->snd_max; 16374 /* 16375 * If the RXT timer is running we want to 16376 * stop it, so we can restart a TLP (or new RXT). 16377 */ 16378 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16379 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16380 tp->snd_wl2 = high_seq; 16381 tp->t_dupacks = 0; 16382 if (under_pacing && 16383 (rack->use_fixed_rate == 0) && 16384 (rack->in_probe_rtt == 0) && 16385 rack->rc_gp_dyn_mul && 16386 rack->rc_always_pace) { 16387 /* Check if we are dragging bottom */ 16388 rack_check_bottom_drag(tp, rack, so); 16389 } 16390 if (tp->snd_una == tp->snd_max) { 16391 tp->t_flags &= ~TF_PREVVALID; 16392 rack->r_ctl.retran_during_recovery = 0; 16393 rack->rc_suspicious = 0; 16394 rack->r_ctl.dsack_byte_cnt = 0; 16395 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16396 if (rack->r_ctl.rc_went_idle_time == 0) 16397 rack->r_ctl.rc_went_idle_time = 1; 16398 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16399 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16400 tp->t_acktime = 0; 16401 /* Set so we might enter persists... */ 16402 rack->r_wanted_output = 1; 16403 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16404 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16405 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16406 (sbavail(&so->so_snd) == 0) && 16407 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16408 /* 16409 * The socket was gone and the 16410 * peer sent data (not now in the past), time to 16411 * reset him. 16412 */ 16413 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16414 /* tcp_close will kill the inp pre-log the Reset */ 16415 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16416 #ifdef TCP_ACCOUNTING 16417 rdstc = get_cyclecount(); 16418 if (rdstc > ts_val) { 16419 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16420 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16421 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16422 } 16423 } 16424 #endif 16425 m_freem(m); 16426 tp = tcp_close(tp); 16427 if (tp == NULL) { 16428 #ifdef TCP_ACCOUNTING 16429 sched_unpin(); 16430 #endif 16431 return (1); 16432 } 16433 /* 16434 * We would normally do drop-with-reset which would 16435 * send back a reset. We can't since we don't have 16436 * all the needed bits. Instead lets arrange for 16437 * a call to tcp_output(). That way since we 16438 * are in the closed state we will generate a reset. 16439 * 16440 * Note if tcp_accounting is on we don't unpin since 16441 * we do that after the goto label. 16442 */ 16443 goto send_out_a_rst; 16444 } 16445 if ((sbused(&so->so_snd) == 0) && 16446 (tp->t_state >= TCPS_FIN_WAIT_1) && 16447 (tp->t_flags & TF_SENTFIN)) { 16448 /* 16449 * If we can't receive any more data, then closing user can 16450 * proceed. Starting the timer is contrary to the 16451 * specification, but if we don't get a FIN we'll hang 16452 * forever. 16453 * 16454 */ 16455 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16456 soisdisconnected(so); 16457 tcp_timer_activate(tp, TT_2MSL, 16458 (tcp_fast_finwait2_recycle ? 16459 tcp_finwait2_timeout : 16460 TP_MAXIDLE(tp))); 16461 } 16462 if (ourfinisacked == 0) { 16463 /* 16464 * We don't change to fin-wait-2 if we have our fin acked 16465 * which means we are probably in TCPS_CLOSING. 16466 */ 16467 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16468 } 16469 } 16470 } 16471 /* Wake up the socket if we have room to write more */ 16472 if (sbavail(&so->so_snd)) { 16473 rack->r_wanted_output = 1; 16474 if (ctf_progress_timeout_check(tp, true)) { 16475 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16476 tp, tick, PROGRESS_DROP, __LINE__); 16477 /* 16478 * We cheat here and don't send a RST, we should send one 16479 * when the pacer drops the connection. 16480 */ 16481 #ifdef TCP_ACCOUNTING 16482 rdstc = get_cyclecount(); 16483 if (rdstc > ts_val) { 16484 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16485 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16486 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16487 } 16488 } 16489 sched_unpin(); 16490 #endif 16491 (void)tcp_drop(tp, ETIMEDOUT); 16492 m_freem(m); 16493 return (1); 16494 } 16495 } 16496 if (ourfinisacked) { 16497 switch(tp->t_state) { 16498 case TCPS_CLOSING: 16499 #ifdef TCP_ACCOUNTING 16500 rdstc = get_cyclecount(); 16501 if (rdstc > ts_val) { 16502 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16503 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16504 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16505 } 16506 } 16507 sched_unpin(); 16508 #endif 16509 tcp_twstart(tp); 16510 m_freem(m); 16511 return (1); 16512 break; 16513 case TCPS_LAST_ACK: 16514 #ifdef TCP_ACCOUNTING 16515 rdstc = get_cyclecount(); 16516 if (rdstc > ts_val) { 16517 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16518 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16519 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16520 } 16521 } 16522 sched_unpin(); 16523 #endif 16524 tp = tcp_close(tp); 16525 ctf_do_drop(m, tp); 16526 return (1); 16527 break; 16528 case TCPS_FIN_WAIT_1: 16529 #ifdef TCP_ACCOUNTING 16530 rdstc = get_cyclecount(); 16531 if (rdstc > ts_val) { 16532 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16533 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16534 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16535 } 16536 } 16537 #endif 16538 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16539 soisdisconnected(so); 16540 tcp_timer_activate(tp, TT_2MSL, 16541 (tcp_fast_finwait2_recycle ? 16542 tcp_finwait2_timeout : 16543 TP_MAXIDLE(tp))); 16544 } 16545 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16546 break; 16547 default: 16548 break; 16549 } 16550 } 16551 if (rack->r_fast_output) { 16552 /* 16553 * We re doing fast output.. can we expand that? 16554 */ 16555 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 16556 } 16557 #ifdef TCP_ACCOUNTING 16558 rdstc = get_cyclecount(); 16559 if (rdstc > ts_val) { 16560 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16561 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16562 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16563 } 16564 } 16565 16566 } else if (win_up_req) { 16567 rdstc = get_cyclecount(); 16568 if (rdstc > ts_val) { 16569 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16570 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 16571 } 16572 } 16573 #endif 16574 } 16575 /* Now is there a next packet, if so we are done */ 16576 m_freem(m); 16577 did_out = 0; 16578 if (nxt_pkt) { 16579 #ifdef TCP_ACCOUNTING 16580 sched_unpin(); 16581 #endif 16582 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 16583 return (0); 16584 } 16585 rack_handle_might_revert(tp, rack); 16586 ctf_calc_rwin(so, tp); 16587 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 16588 send_out_a_rst: 16589 if (tcp_output(tp) < 0) { 16590 #ifdef TCP_ACCOUNTING 16591 sched_unpin(); 16592 #endif 16593 return (1); 16594 } 16595 did_out = 1; 16596 } 16597 if (tp->t_flags2 & TF2_HPTS_CALLS) 16598 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16599 rack_free_trim(rack); 16600 #ifdef TCP_ACCOUNTING 16601 sched_unpin(); 16602 #endif 16603 rack_timer_audit(tp, rack, &so->so_snd); 16604 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 16605 return (0); 16606 } 16607 16608 #define TCP_LRO_TS_OPTION \ 16609 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 16610 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 16611 16612 static int 16613 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16614 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 16615 struct timeval *tv) 16616 { 16617 struct inpcb *inp = tptoinpcb(tp); 16618 struct socket *so = tptosocket(tp); 16619 #ifdef TCP_ACCOUNTING 16620 uint64_t ts_val; 16621 #endif 16622 int32_t thflags, retval, did_out = 0; 16623 int32_t way_out = 0; 16624 /* 16625 * cts - is the current time from tv (caller gets ts) in microseconds. 16626 * ms_cts - is the current time from tv in milliseconds. 16627 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 16628 */ 16629 uint32_t cts, us_cts, ms_cts; 16630 uint32_t tiwin, high_seq; 16631 struct timespec ts; 16632 struct tcpopt to; 16633 struct tcp_rack *rack; 16634 struct rack_sendmap *rsm; 16635 int32_t prev_state = 0; 16636 int no_output = 0; 16637 int slot_remaining = 0; 16638 #ifdef TCP_ACCOUNTING 16639 int ack_val_set = 0xf; 16640 #endif 16641 int nsegs; 16642 16643 NET_EPOCH_ASSERT(); 16644 INP_WLOCK_ASSERT(inp); 16645 16646 /* 16647 * tv passed from common code is from either M_TSTMP_LRO or 16648 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 16649 */ 16650 rack = (struct tcp_rack *)tp->t_fb_ptr; 16651 if (rack->rack_deferred_inited == 0) { 16652 /* 16653 * If we are the connecting socket we will 16654 * hit rack_init() when no sequence numbers 16655 * are setup. This makes it so we must defer 16656 * some initialization. Call that now. 16657 */ 16658 rack_deferred_init(tp, rack); 16659 } 16660 /* 16661 * Check to see if we need to skip any output plans. This 16662 * can happen in the non-LRO path where we are pacing and 16663 * must process the ack coming in but need to defer sending 16664 * anything becase a pacing timer is running. 16665 */ 16666 us_cts = tcp_tv_to_usectick(tv); 16667 if (m->m_flags & M_ACKCMP) { 16668 /* 16669 * All compressed ack's are ack's by definition so 16670 * remove any ack required flag and then do the processing. 16671 */ 16672 rack->rc_ack_required = 0; 16673 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 16674 } 16675 thflags = tcp_get_flags(th); 16676 if ((rack->rc_always_pace == 1) && 16677 (rack->rc_ack_can_sendout_data == 0) && 16678 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16679 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 16680 /* 16681 * Ok conditions are right for queuing the packets 16682 * but we do have to check the flags in the inp, it 16683 * could be, if a sack is present, we want to be awoken and 16684 * so should process the packets. 16685 */ 16686 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 16687 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 16688 no_output = 1; 16689 } else { 16690 /* 16691 * If there is no options, or just a 16692 * timestamp option, we will want to queue 16693 * the packets. This is the same that LRO does 16694 * and will need to change with accurate ECN. 16695 */ 16696 uint32_t *ts_ptr; 16697 int optlen; 16698 16699 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 16700 ts_ptr = (uint32_t *)(th + 1); 16701 if ((optlen == 0) || 16702 ((optlen == TCPOLEN_TSTAMP_APPA) && 16703 (*ts_ptr == TCP_LRO_TS_OPTION))) 16704 no_output = 1; 16705 } 16706 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 16707 /* 16708 * It is unrealistic to think we can pace in less than 16709 * the minimum granularity of the pacer (def:250usec). So 16710 * if we have less than that time remaining we should go 16711 * ahead and allow output to be "early". We will attempt to 16712 * make up for it in any pacing time we try to apply on 16713 * the outbound packet. 16714 */ 16715 no_output = 0; 16716 } 16717 } 16718 /* 16719 * If there is a RST or FIN lets dump out the bw 16720 * with a FIN the connection may go on but we 16721 * may not. 16722 */ 16723 if ((thflags & TH_FIN) || (thflags & TH_RST)) 16724 rack_log_pacing_delay_calc(rack, 16725 rack->r_ctl.gp_bw, 16726 0, 16727 0, 16728 rack_get_gp_est(rack), /* delRate */ 16729 rack_get_lt_bw(rack), /* rttProp */ 16730 20, __LINE__, NULL, 0); 16731 if (m->m_flags & M_ACKCMP) { 16732 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 16733 } 16734 cts = tcp_tv_to_usectick(tv); 16735 ms_cts = tcp_tv_to_mssectick(tv); 16736 nsegs = m->m_pkthdr.lro_nsegs; 16737 counter_u64_add(rack_proc_non_comp_ack, 1); 16738 #ifdef TCP_ACCOUNTING 16739 sched_pin(); 16740 if (thflags & TH_ACK) 16741 ts_val = get_cyclecount(); 16742 #endif 16743 if ((m->m_flags & M_TSTMP) || 16744 (m->m_flags & M_TSTMP_LRO)) { 16745 mbuf_tstmp2timespec(m, &ts); 16746 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16747 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16748 } else 16749 rack->r_ctl.act_rcv_time = *tv; 16750 kern_prefetch(rack, &prev_state); 16751 prev_state = 0; 16752 /* 16753 * Unscale the window into a 32-bit value. For the SYN_SENT state 16754 * the scale is zero. 16755 */ 16756 tiwin = th->th_win << tp->snd_scale; 16757 #ifdef TCP_ACCOUNTING 16758 if (thflags & TH_ACK) { 16759 /* 16760 * We have a tradeoff here. We can either do what we are 16761 * doing i.e. pinning to this CPU and then doing the accounting 16762 * <or> we could do a critical enter, setup the rdtsc and cpu 16763 * as in below, and then validate we are on the same CPU on 16764 * exit. I have choosen to not do the critical enter since 16765 * that often will gain you a context switch, and instead lock 16766 * us (line above this if) to the same CPU with sched_pin(). This 16767 * means we may be context switched out for a higher priority 16768 * interupt but we won't be moved to another CPU. 16769 * 16770 * If this occurs (which it won't very often since we most likely 16771 * are running this code in interupt context and only a higher 16772 * priority will bump us ... clock?) we will falsely add in 16773 * to the time the interupt processing time plus the ack processing 16774 * time. This is ok since its a rare event. 16775 */ 16776 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 16777 ctf_fixed_maxseg(tp)); 16778 } 16779 #endif 16780 /* 16781 * Parse options on any incoming segment. 16782 */ 16783 memset(&to, 0, sizeof(to)); 16784 tcp_dooptions(&to, (u_char *)(th + 1), 16785 (th->th_off << 2) - sizeof(struct tcphdr), 16786 (thflags & TH_SYN) ? TO_SYN : 0); 16787 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 16788 __func__)); 16789 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 16790 __func__)); 16791 16792 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16793 (tp->t_flags & TF_GPUTINPROG)) { 16794 /* 16795 * We have a goodput in progress 16796 * and we have entered a late state. 16797 * Do we have enough data in the sb 16798 * to handle the GPUT request? 16799 */ 16800 uint32_t bytes; 16801 16802 bytes = tp->gput_ack - tp->gput_seq; 16803 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16804 bytes += tp->gput_seq - tp->snd_una; 16805 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16806 /* 16807 * There are not enough bytes in the socket 16808 * buffer that have been sent to cover this 16809 * measurement. Cancel it. 16810 */ 16811 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16812 rack->r_ctl.rc_gp_srtt /*flex1*/, 16813 tp->gput_seq, 16814 0, 0, 18, __LINE__, NULL, 0); 16815 tp->t_flags &= ~TF_GPUTINPROG; 16816 } 16817 } 16818 high_seq = th->th_ack; 16819 if (tcp_bblogging_on(rack->rc_tp)) { 16820 union tcp_log_stackspecific log; 16821 struct timeval ltv; 16822 #ifdef TCP_REQUEST_TRK 16823 struct tcp_sendfile_track *tcp_req; 16824 16825 if (SEQ_GT(th->th_ack, tp->snd_una)) { 16826 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 16827 } else { 16828 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 16829 } 16830 #endif 16831 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16832 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16833 if (rack->rack_no_prr == 0) 16834 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16835 else 16836 log.u_bbr.flex1 = 0; 16837 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16838 log.u_bbr.use_lt_bw <<= 1; 16839 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16840 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16841 log.u_bbr.bbr_state = rack->rc_free_cnt; 16842 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16843 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 16844 log.u_bbr.flex3 = m->m_flags; 16845 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16846 log.u_bbr.lost = thflags; 16847 log.u_bbr.pacing_gain = 0x1; 16848 #ifdef TCP_ACCOUNTING 16849 log.u_bbr.cwnd_gain = ack_val_set; 16850 #endif 16851 log.u_bbr.flex7 = 2; 16852 if (m->m_flags & M_TSTMP) { 16853 /* Record the hardware timestamp if present */ 16854 mbuf_tstmp2timespec(m, &ts); 16855 ltv.tv_sec = ts.tv_sec; 16856 ltv.tv_usec = ts.tv_nsec / 1000; 16857 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16858 } else if (m->m_flags & M_TSTMP_LRO) { 16859 /* Record the LRO the arrival timestamp */ 16860 mbuf_tstmp2timespec(m, &ts); 16861 ltv.tv_sec = ts.tv_sec; 16862 ltv.tv_usec = ts.tv_nsec / 1000; 16863 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16864 } 16865 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16866 /* Log the rcv time */ 16867 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 16868 #ifdef TCP_REQUEST_TRK 16869 log.u_bbr.applimited = tp->t_tcpreq_closed; 16870 log.u_bbr.applimited <<= 8; 16871 log.u_bbr.applimited |= tp->t_tcpreq_open; 16872 log.u_bbr.applimited <<= 8; 16873 log.u_bbr.applimited |= tp->t_tcpreq_req; 16874 if (tcp_req) { 16875 /* Copy out any client req info */ 16876 /* seconds */ 16877 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16878 /* useconds */ 16879 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16880 log.u_bbr.rttProp = tcp_req->timestamp; 16881 log.u_bbr.cur_del_rate = tcp_req->start; 16882 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16883 log.u_bbr.flex8 |= 1; 16884 } else { 16885 log.u_bbr.flex8 |= 2; 16886 log.u_bbr.bw_inuse = tcp_req->end; 16887 } 16888 log.u_bbr.flex6 = tcp_req->start_seq; 16889 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16890 log.u_bbr.flex8 |= 4; 16891 log.u_bbr.epoch = tcp_req->end_seq; 16892 } 16893 } 16894 #endif 16895 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 16896 tlen, &log, true, <v); 16897 } 16898 /* Remove ack required flag if set, we have one */ 16899 if (thflags & TH_ACK) 16900 rack->rc_ack_required = 0; 16901 if (rack->sack_attack_disable > 0) { 16902 rack->r_ctl.ack_during_sd++; 16903 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16904 } 16905 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 16906 way_out = 4; 16907 retval = 0; 16908 m_freem(m); 16909 goto done_with_input; 16910 } 16911 /* 16912 * If a segment with the ACK-bit set arrives in the SYN-SENT state 16913 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 16914 */ 16915 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 16916 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 16917 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 16918 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 16919 #ifdef TCP_ACCOUNTING 16920 sched_unpin(); 16921 #endif 16922 return (1); 16923 } 16924 /* 16925 * If timestamps were negotiated during SYN/ACK and a 16926 * segment without a timestamp is received, silently drop 16927 * the segment, unless it is a RST segment or missing timestamps are 16928 * tolerated. 16929 * See section 3.2 of RFC 7323. 16930 */ 16931 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 16932 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 16933 way_out = 5; 16934 retval = 0; 16935 m_freem(m); 16936 goto done_with_input; 16937 } 16938 16939 /* 16940 * Segment received on connection. Reset idle time and keep-alive 16941 * timer. XXX: This should be done after segment validation to 16942 * ignore broken/spoofed segs. 16943 */ 16944 if (tp->t_idle_reduce && 16945 (tp->snd_max == tp->snd_una) && 16946 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16947 counter_u64_add(rack_input_idle_reduces, 1); 16948 rack_cc_after_idle(rack, tp); 16949 } 16950 tp->t_rcvtime = ticks; 16951 #ifdef STATS 16952 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 16953 #endif 16954 if (tiwin > rack->r_ctl.rc_high_rwnd) 16955 rack->r_ctl.rc_high_rwnd = tiwin; 16956 /* 16957 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 16958 * this to occur after we've validated the segment. 16959 */ 16960 if (tcp_ecn_input_segment(tp, thflags, tlen, 16961 tcp_packets_this_ack(tp, th->th_ack), 16962 iptos)) 16963 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 16964 16965 /* 16966 * If echoed timestamp is later than the current time, fall back to 16967 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16968 * were used when this connection was established. 16969 */ 16970 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 16971 to.to_tsecr -= tp->ts_offset; 16972 if (TSTMP_GT(to.to_tsecr, ms_cts)) 16973 to.to_tsecr = 0; 16974 } 16975 16976 /* 16977 * If its the first time in we need to take care of options and 16978 * verify we can do SACK for rack! 16979 */ 16980 if (rack->r_state == 0) { 16981 /* Should be init'd by rack_init() */ 16982 KASSERT(rack->rc_inp != NULL, 16983 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 16984 if (rack->rc_inp == NULL) { 16985 rack->rc_inp = inp; 16986 } 16987 16988 /* 16989 * Process options only when we get SYN/ACK back. The SYN 16990 * case for incoming connections is handled in tcp_syncache. 16991 * According to RFC1323 the window field in a SYN (i.e., a 16992 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 16993 * this is traditional behavior, may need to be cleaned up. 16994 */ 16995 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 16996 /* Handle parallel SYN for ECN */ 16997 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 16998 if ((to.to_flags & TOF_SCALE) && 16999 (tp->t_flags & TF_REQ_SCALE)) { 17000 tp->t_flags |= TF_RCVD_SCALE; 17001 tp->snd_scale = to.to_wscale; 17002 } else 17003 tp->t_flags &= ~TF_REQ_SCALE; 17004 /* 17005 * Initial send window. It will be updated with the 17006 * next incoming segment to the scaled value. 17007 */ 17008 tp->snd_wnd = th->th_win; 17009 rack_validate_fo_sendwin_up(tp, rack); 17010 if ((to.to_flags & TOF_TS) && 17011 (tp->t_flags & TF_REQ_TSTMP)) { 17012 tp->t_flags |= TF_RCVD_TSTMP; 17013 tp->ts_recent = to.to_tsval; 17014 tp->ts_recent_age = cts; 17015 } else 17016 tp->t_flags &= ~TF_REQ_TSTMP; 17017 if (to.to_flags & TOF_MSS) { 17018 tcp_mss(tp, to.to_mss); 17019 } 17020 if ((tp->t_flags & TF_SACK_PERMIT) && 17021 (to.to_flags & TOF_SACKPERM) == 0) 17022 tp->t_flags &= ~TF_SACK_PERMIT; 17023 if (IS_FASTOPEN(tp->t_flags)) { 17024 if (to.to_flags & TOF_FASTOPEN) { 17025 uint16_t mss; 17026 17027 if (to.to_flags & TOF_MSS) 17028 mss = to.to_mss; 17029 else 17030 if ((inp->inp_vflag & INP_IPV6) != 0) 17031 mss = TCP6_MSS; 17032 else 17033 mss = TCP_MSS; 17034 tcp_fastopen_update_cache(tp, mss, 17035 to.to_tfo_len, to.to_tfo_cookie); 17036 } else 17037 tcp_fastopen_disable_path(tp); 17038 } 17039 } 17040 /* 17041 * At this point we are at the initial call. Here we decide 17042 * if we are doing RACK or not. We do this by seeing if 17043 * TF_SACK_PERMIT is set and the sack-not-required is clear. 17044 * The code now does do dup-ack counting so if you don't 17045 * switch back you won't get rack & TLP, but you will still 17046 * get this stack. 17047 */ 17048 17049 if ((rack_sack_not_required == 0) && 17050 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 17051 tcp_switch_back_to_default(tp); 17052 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 17053 tlen, iptos); 17054 #ifdef TCP_ACCOUNTING 17055 sched_unpin(); 17056 #endif 17057 return (1); 17058 } 17059 tcp_set_hpts(tp); 17060 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 17061 } 17062 if (thflags & TH_FIN) 17063 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 17064 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 17065 if ((rack->rc_gp_dyn_mul) && 17066 (rack->use_fixed_rate == 0) && 17067 (rack->rc_always_pace)) { 17068 /* Check in on probertt */ 17069 rack_check_probe_rtt(rack, us_cts); 17070 } 17071 rack_clear_rate_sample(rack); 17072 if ((rack->forced_ack) && 17073 ((tcp_get_flags(th) & TH_RST) == 0)) { 17074 rack_handle_probe_response(rack, tiwin, us_cts); 17075 } 17076 /* 17077 * This is the one exception case where we set the rack state 17078 * always. All other times (timers etc) we must have a rack-state 17079 * set (so we assure we have done the checks above for SACK). 17080 */ 17081 rack->r_ctl.rc_rcvtime = cts; 17082 if (rack->r_state != tp->t_state) 17083 rack_set_state(tp, rack); 17084 if (SEQ_GT(th->th_ack, tp->snd_una) && 17085 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 17086 kern_prefetch(rsm, &prev_state); 17087 prev_state = rack->r_state; 17088 if ((thflags & TH_RST) && 17089 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 17090 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 17091 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 17092 /* The connection will be killed by a reset check the tracepoint */ 17093 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 17094 } 17095 retval = (*rack->r_substate) (m, th, so, 17096 tp, &to, drop_hdrlen, 17097 tlen, tiwin, thflags, nxt_pkt, iptos); 17098 if (retval == 0) { 17099 /* 17100 * If retval is 1 the tcb is unlocked and most likely the tp 17101 * is gone. 17102 */ 17103 INP_WLOCK_ASSERT(inp); 17104 if ((rack->rc_gp_dyn_mul) && 17105 (rack->rc_always_pace) && 17106 (rack->use_fixed_rate == 0) && 17107 rack->in_probe_rtt && 17108 (rack->r_ctl.rc_time_probertt_starts == 0)) { 17109 /* 17110 * If we are going for target, lets recheck before 17111 * we output. 17112 */ 17113 rack_check_probe_rtt(rack, us_cts); 17114 } 17115 if (rack->set_pacing_done_a_iw == 0) { 17116 /* How much has been acked? */ 17117 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 17118 /* We have enough to set in the pacing segment size */ 17119 rack->set_pacing_done_a_iw = 1; 17120 rack_set_pace_segments(tp, rack, __LINE__, NULL); 17121 } 17122 } 17123 tcp_rack_xmit_timer_commit(rack, tp); 17124 #ifdef TCP_ACCOUNTING 17125 /* 17126 * If we set the ack_val_se to what ack processing we are doing 17127 * we also want to track how many cycles we burned. Note 17128 * the bits after tcp_output we let be "free". This is because 17129 * we are also tracking the tcp_output times as well. Note the 17130 * use of 0xf here since we only have 11 counter (0 - 0xa) and 17131 * 0xf cannot be returned and is what we initialize it too to 17132 * indicate we are not doing the tabulations. 17133 */ 17134 if (ack_val_set != 0xf) { 17135 uint64_t crtsc; 17136 17137 crtsc = get_cyclecount(); 17138 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17139 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 17140 } 17141 } 17142 #endif 17143 if ((nxt_pkt == 0) && (no_output == 0)) { 17144 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 17145 do_output_now: 17146 if (tcp_output(tp) < 0) { 17147 #ifdef TCP_ACCOUNTING 17148 sched_unpin(); 17149 #endif 17150 return (1); 17151 } 17152 did_out = 1; 17153 } 17154 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 17155 rack_free_trim(rack); 17156 } else if ((no_output == 1) && 17157 (nxt_pkt == 0) && 17158 (tcp_in_hpts(rack->rc_tp) == 0)) { 17159 /* 17160 * We are not in hpts and we had a pacing timer up. Use 17161 * the remaining time (slot_remaining) to restart the timer. 17162 */ 17163 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 17164 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 17165 rack_free_trim(rack); 17166 } 17167 /* Clear the flag, it may have been cleared by output but we may not have */ 17168 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 17169 tp->t_flags2 &= ~TF2_HPTS_CALLS; 17170 /* Update any rounds needed */ 17171 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) 17172 rack_log_hystart_event(rack, high_seq, 8); 17173 /* 17174 * The draft (v3) calls for us to use SEQ_GEQ, but that 17175 * causes issues when we are just going app limited. Lets 17176 * instead use SEQ_GT <or> where its equal but more data 17177 * is outstanding. 17178 * 17179 * Also make sure we are on the last ack of a series. We 17180 * have to have all the ack's processed in queue to know 17181 * if there is something left outstanding. 17182 */ 17183 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 17184 (rack->rc_new_rnd_needed == 0) && 17185 (nxt_pkt == 0)) { 17186 rack_log_hystart_event(rack, tp->snd_una, 21); 17187 rack->r_ctl.current_round++; 17188 /* Force the next send to setup the next round */ 17189 rack->rc_new_rnd_needed = 1; 17190 if (CC_ALGO(tp)->newround != NULL) { 17191 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 17192 } 17193 } 17194 if ((nxt_pkt == 0) && 17195 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 17196 (SEQ_GT(tp->snd_max, tp->snd_una) || 17197 (tp->t_flags & TF_DELACK) || 17198 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 17199 (tp->t_state <= TCPS_CLOSING)))) { 17200 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 17201 if ((tp->snd_max == tp->snd_una) && 17202 ((tp->t_flags & TF_DELACK) == 0) && 17203 (tcp_in_hpts(rack->rc_tp)) && 17204 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 17205 /* keep alive not needed if we are hptsi output yet */ 17206 ; 17207 } else { 17208 int late = 0; 17209 if (tcp_in_hpts(tp)) { 17210 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 17211 us_cts = tcp_get_usecs(NULL); 17212 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 17213 rack->r_early = 1; 17214 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 17215 } else 17216 late = 1; 17217 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 17218 } 17219 tcp_hpts_remove(tp); 17220 } 17221 if (late && (did_out == 0)) { 17222 /* 17223 * We are late in the sending 17224 * and we did not call the output 17225 * (this probably should not happen). 17226 */ 17227 goto do_output_now; 17228 } 17229 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 17230 } 17231 way_out = 1; 17232 } else if (nxt_pkt == 0) { 17233 /* Do we have the correct timer running? */ 17234 rack_timer_audit(tp, rack, &so->so_snd); 17235 way_out = 2; 17236 } 17237 done_with_input: 17238 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 17239 if (did_out) 17240 rack->r_wanted_output = 0; 17241 } 17242 #ifdef TCP_ACCOUNTING 17243 sched_unpin(); 17244 #endif 17245 return (retval); 17246 } 17247 17248 static void 17249 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17250 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17251 { 17252 struct timeval tv; 17253 17254 /* First lets see if we have old packets */ 17255 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17256 if (ctf_do_queued_segments(tp, 1)) { 17257 m_freem(m); 17258 return; 17259 } 17260 } 17261 if (m->m_flags & M_TSTMP_LRO) { 17262 mbuf_tstmp2timeval(m, &tv); 17263 } else { 17264 /* Should not be should we kassert instead? */ 17265 tcp_get_usecs(&tv); 17266 } 17267 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17268 &tv) == 0) { 17269 INP_WUNLOCK(tptoinpcb(tp)); 17270 } 17271 } 17272 17273 struct rack_sendmap * 17274 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17275 { 17276 struct rack_sendmap *rsm = NULL; 17277 int32_t idx; 17278 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17279 int no_sack = 0; 17280 17281 /* Return the next guy to be re-transmitted */ 17282 if (tqhash_empty(rack->r_ctl.tqh)) { 17283 return (NULL); 17284 } 17285 if (tp->t_flags & TF_SENTFIN) { 17286 /* retran the end FIN? */ 17287 return (NULL); 17288 } 17289 /* ok lets look at this one */ 17290 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17291 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17292 return (rsm); 17293 } 17294 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17295 goto check_it; 17296 } 17297 rsm = rack_find_lowest_rsm(rack); 17298 if (rsm == NULL) { 17299 return (NULL); 17300 } 17301 check_it: 17302 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) || 17303 (rack->sack_attack_disable > 0)) { 17304 no_sack = 1; 17305 } 17306 if ((no_sack > 0) && 17307 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17308 /* 17309 * No sack so we automatically do the 3 strikes and 17310 * retransmit (no rack timer would be started). 17311 */ 17312 return (rsm); 17313 } 17314 if (rsm->r_flags & RACK_ACKED) { 17315 return (NULL); 17316 } 17317 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17318 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17319 /* Its not yet ready */ 17320 return (NULL); 17321 } 17322 srtt = rack_grab_rtt(tp, rack); 17323 idx = rsm->r_rtr_cnt - 1; 17324 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17325 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 17326 if ((tsused == ts_low) || 17327 (TSTMP_LT(tsused, ts_low))) { 17328 /* No time since sending */ 17329 return (NULL); 17330 } 17331 if ((tsused - ts_low) < thresh) { 17332 /* It has not been long enough yet */ 17333 return (NULL); 17334 } 17335 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17336 ((rsm->r_flags & RACK_SACK_PASSED) && 17337 (rack->sack_attack_disable == 0))) { 17338 /* 17339 * We have passed the dup-ack threshold <or> 17340 * a SACK has indicated this is missing. 17341 * Note that if you are a declared attacker 17342 * it is only the dup-ack threshold that 17343 * will cause retransmits. 17344 */ 17345 /* log retransmit reason */ 17346 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17347 rack->r_fast_output = 0; 17348 return (rsm); 17349 } 17350 return (NULL); 17351 } 17352 17353 static void 17354 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 17355 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17356 int line, struct rack_sendmap *rsm, uint8_t quality) 17357 { 17358 if (tcp_bblogging_on(rack->rc_tp)) { 17359 union tcp_log_stackspecific log; 17360 struct timeval tv; 17361 17362 if (rack_verbose_logging == 0) { 17363 /* 17364 * We are not verbose screen out all but 17365 * ones we always want. 17366 */ 17367 if ((method != 2) && 17368 (method != 3) && 17369 (method != 7) && 17370 (method != 14) && 17371 (method != 20)) { 17372 return; 17373 } 17374 } 17375 memset(&log, 0, sizeof(log)); 17376 log.u_bbr.flex1 = slot; 17377 log.u_bbr.flex2 = len; 17378 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17379 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17380 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17381 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17382 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17383 log.u_bbr.use_lt_bw <<= 1; 17384 log.u_bbr.use_lt_bw |= rack->r_late; 17385 log.u_bbr.use_lt_bw <<= 1; 17386 log.u_bbr.use_lt_bw |= rack->r_early; 17387 log.u_bbr.use_lt_bw <<= 1; 17388 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17389 log.u_bbr.use_lt_bw <<= 1; 17390 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17391 log.u_bbr.use_lt_bw <<= 1; 17392 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17393 log.u_bbr.use_lt_bw <<= 1; 17394 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17395 log.u_bbr.use_lt_bw <<= 1; 17396 log.u_bbr.use_lt_bw |= rack->gp_ready; 17397 log.u_bbr.pkt_epoch = line; 17398 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17399 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17400 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17401 log.u_bbr.bw_inuse = bw_est; 17402 log.u_bbr.delRate = bw; 17403 if (rack->r_ctl.gp_bw == 0) 17404 log.u_bbr.cur_del_rate = 0; 17405 else 17406 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17407 log.u_bbr.rttProp = len_time; 17408 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17409 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17410 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17411 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17412 /* We are in slow start */ 17413 log.u_bbr.flex7 = 1; 17414 } else { 17415 /* we are on congestion avoidance */ 17416 log.u_bbr.flex7 = 0; 17417 } 17418 log.u_bbr.flex8 = method; 17419 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17420 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17421 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17422 log.u_bbr.cwnd_gain <<= 1; 17423 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17424 log.u_bbr.cwnd_gain <<= 1; 17425 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17426 log.u_bbr.bbr_substate = quality; 17427 log.u_bbr.bbr_state = rack->dgp_on; 17428 log.u_bbr.bbr_state <<= 1; 17429 log.u_bbr.bbr_state |= rack->r_fill_less_agg; 17430 log.u_bbr.bbr_state <<= 1; 17431 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17432 log.u_bbr.bbr_state <<= 2; 17433 log.u_bbr.bbr_state |= rack->r_pacing_discount; 17434 log.u_bbr.flex7 = ((rack->r_ctl.pacing_discount_amm << 1) | log.u_bbr.flex7); 17435 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17436 &rack->rc_inp->inp_socket->so_rcv, 17437 &rack->rc_inp->inp_socket->so_snd, 17438 BBR_LOG_HPTSI_CALC, 0, 17439 0, &log, false, &tv); 17440 } 17441 } 17442 17443 static uint32_t 17444 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17445 { 17446 uint32_t new_tso, user_max, pace_one; 17447 17448 user_max = rack->rc_user_set_max_segs * mss; 17449 if (rack->rc_force_max_seg) { 17450 return (user_max); 17451 } 17452 if (rack->use_fixed_rate && 17453 ((rack->r_ctl.crte == NULL) || 17454 (bw != rack->r_ctl.crte->rate))) { 17455 /* Use the user mss since we are not exactly matched */ 17456 return (user_max); 17457 } 17458 if (rack_pace_one_seg || 17459 (rack->r_ctl.rc_user_set_min_segs == 1)) 17460 pace_one = 1; 17461 else 17462 pace_one = 0; 17463 17464 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17465 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17466 if (new_tso > user_max) 17467 new_tso = user_max; 17468 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17469 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17470 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17471 } 17472 if (rack->r_ctl.rc_user_set_min_segs && 17473 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17474 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17475 return (new_tso); 17476 } 17477 17478 static uint64_t 17479 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17480 { 17481 uint64_t reduced_win; 17482 uint32_t gain; 17483 17484 if (window_input < rc_init_window(rack)) { 17485 /* 17486 * The cwnd is collapsed to 17487 * nearly zero, maybe because of a time-out? 17488 * Lets drop back to the lt-bw. 17489 */ 17490 reduced_win = rack_get_lt_bw(rack); 17491 /* Set the flag so the caller knows its a rate and not a reduced window */ 17492 *rate_set = 1; 17493 gain = 100; 17494 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 17495 /* 17496 * If we are in recover our cwnd needs to be less for 17497 * our pacing consideration. 17498 */ 17499 if (rack->rack_hibeta == 0) { 17500 reduced_win = window_input / 2; 17501 gain = 50; 17502 } else { 17503 reduced_win = window_input * rack->r_ctl.saved_hibeta; 17504 reduced_win /= 100; 17505 gain = rack->r_ctl.saved_hibeta; 17506 } 17507 } else { 17508 /* 17509 * Apply Timely factor to increase/decrease the 17510 * amount we are pacing at. 17511 */ 17512 gain = rack_get_output_gain(rack, NULL); 17513 if (gain > rack_gain_p5_ub) { 17514 gain = rack_gain_p5_ub; 17515 } 17516 reduced_win = window_input * gain; 17517 reduced_win /= 100; 17518 } 17519 if (gain_b != NULL) 17520 *gain_b = gain; 17521 /* 17522 * What is being returned here is a trimmed down 17523 * window values in all cases where rate_set is left 17524 * at 0. In one case we actually return the rate (lt_bw). 17525 * the "reduced_win" is returned as a slimmed down cwnd that 17526 * is then calculated by the caller into a rate when rate_set 17527 * is 0. 17528 */ 17529 return (reduced_win); 17530 } 17531 17532 static int32_t 17533 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 17534 { 17535 uint64_t lentim, fill_bw; 17536 17537 /* Lets first see if we are full, if so continue with normal rate */ 17538 rack->r_via_fill_cw = 0; 17539 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 17540 return (slot); 17541 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 17542 return (slot); 17543 if (rack->r_ctl.rc_last_us_rtt == 0) 17544 return (slot); 17545 if (rack->rc_pace_fill_if_rttin_range && 17546 (rack->r_ctl.rc_last_us_rtt >= 17547 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 17548 /* The rtt is huge, N * smallest, lets not fill */ 17549 return (slot); 17550 } 17551 /* 17552 * first lets calculate the b/w based on the last us-rtt 17553 * and the the smallest send window. 17554 */ 17555 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17556 if (rack->rc_fillcw_apply_discount) { 17557 uint32_t rate_set = 0; 17558 17559 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 17560 if (rate_set) { 17561 goto at_lt_bw; 17562 } 17563 } 17564 /* Take the rwnd if its smaller */ 17565 if (fill_bw > rack->rc_tp->snd_wnd) 17566 fill_bw = rack->rc_tp->snd_wnd; 17567 /* Now lets make it into a b/w */ 17568 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 17569 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17570 at_lt_bw: 17571 if (rack->r_fill_less_agg) { 17572 /* 17573 * We want the average of the rate_wanted 17574 * and our fill-cw calculated bw. We also want 17575 * to cap any increase to be no more than 17576 * X times the lt_bw (where X is the rack_bw_multipler). 17577 */ 17578 uint64_t lt_bw, rate; 17579 17580 lt_bw = rack_get_lt_bw(rack); 17581 if (lt_bw > *rate_wanted) 17582 rate = lt_bw; 17583 else 17584 rate = *rate_wanted; 17585 fill_bw += rate; 17586 fill_bw /= 2; 17587 if (rack_bw_multipler && (fill_bw > (rate * rack_bw_multipler))) { 17588 fill_bw = rate * rack_bw_multipler; 17589 } 17590 } 17591 /* We are below the min b/w */ 17592 if (non_paced) 17593 *rate_wanted = fill_bw; 17594 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 17595 return (slot); 17596 rack->r_via_fill_cw = 1; 17597 if (rack->r_rack_hw_rate_caps && 17598 (rack->r_ctl.crte != NULL)) { 17599 uint64_t high_rate; 17600 17601 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 17602 if (fill_bw > high_rate) { 17603 /* We are capping bw at the highest rate table entry */ 17604 if (*rate_wanted > high_rate) { 17605 /* The original rate was also capped */ 17606 rack->r_via_fill_cw = 0; 17607 } 17608 rack_log_hdwr_pacing(rack, 17609 fill_bw, high_rate, __LINE__, 17610 0, 3); 17611 fill_bw = high_rate; 17612 if (capped) 17613 *capped = 1; 17614 } 17615 } else if ((rack->r_ctl.crte == NULL) && 17616 (rack->rack_hdrw_pacing == 0) && 17617 (rack->rack_hdw_pace_ena) && 17618 rack->r_rack_hw_rate_caps && 17619 (rack->rack_attempt_hdwr_pace == 0) && 17620 (rack->rc_inp->inp_route.ro_nh != NULL) && 17621 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17622 /* 17623 * Ok we may have a first attempt that is greater than our top rate 17624 * lets check. 17625 */ 17626 uint64_t high_rate; 17627 17628 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 17629 if (high_rate) { 17630 if (fill_bw > high_rate) { 17631 fill_bw = high_rate; 17632 if (capped) 17633 *capped = 1; 17634 } 17635 } 17636 } 17637 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 17638 if (rack->rc_hybrid_mode) 17639 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 17640 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 17641 fill_bw = rack->r_ctl.bw_rate_cap; 17642 } 17643 /* 17644 * Ok fill_bw holds our mythical b/w to fill the cwnd 17645 * in an rtt (unless it was capped), what does that 17646 * time wise equate too? 17647 */ 17648 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 17649 lentim /= fill_bw; 17650 *rate_wanted = fill_bw; 17651 if (non_paced || (lentim < slot)) { 17652 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 17653 0, lentim, 12, __LINE__, NULL, 0); 17654 return ((int32_t)lentim); 17655 } else 17656 return (slot); 17657 } 17658 17659 static int32_t 17660 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 17661 { 17662 uint64_t srtt; 17663 int32_t slot = 0; 17664 int32_t minslot = 0; 17665 int can_start_hw_pacing = 1; 17666 int err; 17667 int pace_one; 17668 17669 if (rack_pace_one_seg || 17670 (rack->r_ctl.rc_user_set_min_segs == 1)) 17671 pace_one = 1; 17672 else 17673 pace_one = 0; 17674 if (rack->rc_always_pace == 0) { 17675 /* 17676 * We use the most optimistic possible cwnd/srtt for 17677 * sending calculations. This will make our 17678 * calculation anticipate getting more through 17679 * quicker then possible. But thats ok we don't want 17680 * the peer to have a gap in data sending. 17681 */ 17682 uint64_t cwnd, tr_perms = 0; 17683 int32_t reduce = 0; 17684 17685 old_method: 17686 /* 17687 * We keep no precise pacing with the old method 17688 * instead we use the pacer to mitigate bursts. 17689 */ 17690 if (rack->r_ctl.rc_rack_min_rtt) 17691 srtt = rack->r_ctl.rc_rack_min_rtt; 17692 else 17693 srtt = max(tp->t_srtt, 1); 17694 if (rack->r_ctl.rc_rack_largest_cwnd) 17695 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 17696 else 17697 cwnd = rack->r_ctl.cwnd_to_use; 17698 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 17699 tr_perms = (cwnd * 1000) / srtt; 17700 if (tr_perms == 0) { 17701 tr_perms = ctf_fixed_maxseg(tp); 17702 } 17703 /* 17704 * Calculate how long this will take to drain, if 17705 * the calculation comes out to zero, thats ok we 17706 * will use send_a_lot to possibly spin around for 17707 * more increasing tot_len_this_send to the point 17708 * that its going to require a pace, or we hit the 17709 * cwnd. Which in that case we are just waiting for 17710 * a ACK. 17711 */ 17712 slot = len / tr_perms; 17713 /* Now do we reduce the time so we don't run dry? */ 17714 if (slot && rack_slot_reduction) { 17715 reduce = (slot / rack_slot_reduction); 17716 if (reduce < slot) { 17717 slot -= reduce; 17718 } else 17719 slot = 0; 17720 } 17721 slot *= HPTS_USEC_IN_MSEC; 17722 if (rack->rc_pace_to_cwnd) { 17723 uint64_t rate_wanted = 0; 17724 17725 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 17726 rack->rc_ack_can_sendout_data = 1; 17727 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 17728 } else 17729 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 17730 /*******************************************************/ 17731 /* RRS: We insert non-paced call to stats here for len */ 17732 /*******************************************************/ 17733 } else { 17734 uint64_t bw_est, res, lentim, rate_wanted; 17735 uint32_t segs, oh; 17736 int capped = 0; 17737 int prev_fill; 17738 17739 if ((rack->r_rr_config == 1) && rsm) { 17740 return (rack->r_ctl.rc_min_to); 17741 } 17742 if (rack->use_fixed_rate) { 17743 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 17744 } else if ((rack->r_ctl.init_rate == 0) && 17745 (rack->r_ctl.gp_bw == 0)) { 17746 /* no way to yet do an estimate */ 17747 bw_est = rate_wanted = 0; 17748 } else if (rack->dgp_on) { 17749 bw_est = rack_get_bw(rack); 17750 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 17751 } else { 17752 uint32_t gain, rate_set = 0; 17753 17754 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17755 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 17756 if (rate_set == 0) { 17757 if (rate_wanted > rack->rc_tp->snd_wnd) 17758 rate_wanted = rack->rc_tp->snd_wnd; 17759 /* Now lets make it into a b/w */ 17760 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 17761 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17762 } 17763 bw_est = rate_wanted; 17764 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 17765 rack->r_ctl.cwnd_to_use, 17766 rate_wanted, bw_est, 17767 rack->r_ctl.rc_last_us_rtt, 17768 88, __LINE__, NULL, gain); 17769 } 17770 if ((bw_est == 0) || (rate_wanted == 0) || 17771 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 17772 /* 17773 * No way yet to make a b/w estimate or 17774 * our raise is set incorrectly. 17775 */ 17776 goto old_method; 17777 } 17778 rack_rate_cap_bw(rack, &rate_wanted, &capped); 17779 /* We need to account for all the overheads */ 17780 segs = (len + segsiz - 1) / segsiz; 17781 /* 17782 * We need the diff between 1514 bytes (e-mtu with e-hdr) 17783 * and how much data we put in each packet. Yes this 17784 * means we may be off if we are larger than 1500 bytes 17785 * or smaller. But this just makes us more conservative. 17786 */ 17787 17788 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 17789 if (rack->r_is_v6) { 17790 #ifdef INET6 17791 oh += sizeof(struct ip6_hdr); 17792 #endif 17793 } else { 17794 #ifdef INET 17795 oh += sizeof(struct ip); 17796 #endif 17797 } 17798 /* We add a fixed 14 for the ethernet header */ 17799 oh += 14; 17800 segs *= oh; 17801 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 17802 res = lentim / rate_wanted; 17803 slot = (uint32_t)res; 17804 if (rack_hw_rate_min && 17805 (rate_wanted < rack_hw_rate_min)) { 17806 can_start_hw_pacing = 0; 17807 if (rack->r_ctl.crte) { 17808 /* 17809 * Ok we need to release it, we 17810 * have fallen too low. 17811 */ 17812 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17813 rack->r_ctl.crte = NULL; 17814 rack->rack_attempt_hdwr_pace = 0; 17815 rack->rack_hdrw_pacing = 0; 17816 } 17817 } 17818 if (rack->r_ctl.crte && 17819 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17820 /* 17821 * We want more than the hardware can give us, 17822 * don't start any hw pacing. 17823 */ 17824 can_start_hw_pacing = 0; 17825 if (rack->r_rack_hw_rate_caps == 0) { 17826 /* 17827 * Ok we need to release it, we 17828 * want more than the card can give us and 17829 * no rate cap is in place. Set it up so 17830 * when we want less we can retry. 17831 */ 17832 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17833 rack->r_ctl.crte = NULL; 17834 rack->rack_attempt_hdwr_pace = 0; 17835 rack->rack_hdrw_pacing = 0; 17836 } 17837 } 17838 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 17839 /* 17840 * We lost our rate somehow, this can happen 17841 * if the interface changed underneath us. 17842 */ 17843 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17844 rack->r_ctl.crte = NULL; 17845 /* Lets re-allow attempting to setup pacing */ 17846 rack->rack_hdrw_pacing = 0; 17847 rack->rack_attempt_hdwr_pace = 0; 17848 rack_log_hdwr_pacing(rack, 17849 rate_wanted, bw_est, __LINE__, 17850 0, 6); 17851 } 17852 prev_fill = rack->r_via_fill_cw; 17853 if ((rack->rc_pace_to_cwnd) && 17854 (capped == 0) && 17855 (rack->dgp_on == 1) && 17856 (rack->use_fixed_rate == 0) && 17857 (rack->in_probe_rtt == 0) && 17858 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 17859 /* 17860 * We want to pace at our rate *or* faster to 17861 * fill the cwnd to the max if its not full. 17862 */ 17863 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 17864 /* Re-check to make sure we are not exceeding our max b/w */ 17865 if ((rack->r_ctl.crte != NULL) && 17866 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17867 /* 17868 * We want more than the hardware can give us, 17869 * don't start any hw pacing. 17870 */ 17871 can_start_hw_pacing = 0; 17872 if (rack->r_rack_hw_rate_caps == 0) { 17873 /* 17874 * Ok we need to release it, we 17875 * want more than the card can give us and 17876 * no rate cap is in place. Set it up so 17877 * when we want less we can retry. 17878 */ 17879 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17880 rack->r_ctl.crte = NULL; 17881 rack->rack_attempt_hdwr_pace = 0; 17882 rack->rack_hdrw_pacing = 0; 17883 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 17884 } 17885 } 17886 } 17887 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 17888 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17889 if ((rack->rack_hdw_pace_ena) && 17890 (can_start_hw_pacing > 0) && 17891 (rack->rack_hdrw_pacing == 0) && 17892 (rack->rack_attempt_hdwr_pace == 0)) { 17893 /* 17894 * Lets attempt to turn on hardware pacing 17895 * if we can. 17896 */ 17897 rack->rack_attempt_hdwr_pace = 1; 17898 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 17899 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17900 rate_wanted, 17901 RS_PACING_GEQ, 17902 &err, &rack->r_ctl.crte_prev_rate); 17903 if (rack->r_ctl.crte) { 17904 rack->rack_hdrw_pacing = 1; 17905 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 17906 pace_one, rack->r_ctl.crte, 17907 NULL, rack->r_ctl.pace_len_divisor); 17908 rack_log_hdwr_pacing(rack, 17909 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17910 err, 0); 17911 rack->r_ctl.last_hw_bw_req = rate_wanted; 17912 } else { 17913 counter_u64_add(rack_hw_pace_init_fail, 1); 17914 } 17915 } else if (rack->rack_hdrw_pacing && 17916 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 17917 /* Do we need to adjust our rate? */ 17918 const struct tcp_hwrate_limit_table *nrte; 17919 17920 if (rack->r_up_only && 17921 (rate_wanted < rack->r_ctl.crte->rate)) { 17922 /** 17923 * We have four possible states here 17924 * having to do with the previous time 17925 * and this time. 17926 * previous | this-time 17927 * A) 0 | 0 -- fill_cw not in the picture 17928 * B) 1 | 0 -- we were doing a fill-cw but now are not 17929 * C) 1 | 1 -- all rates from fill_cw 17930 * D) 0 | 1 -- we were doing non-fill and now we are filling 17931 * 17932 * For case A, C and D we don't allow a drop. But for 17933 * case B where we now our on our steady rate we do 17934 * allow a drop. 17935 * 17936 */ 17937 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 17938 goto done_w_hdwr; 17939 } 17940 if ((rate_wanted > rack->r_ctl.crte->rate) || 17941 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 17942 if (rack_hw_rate_to_low && 17943 (bw_est < rack_hw_rate_to_low)) { 17944 /* 17945 * The pacing rate is too low for hardware, but 17946 * do allow hardware pacing to be restarted. 17947 */ 17948 rack_log_hdwr_pacing(rack, 17949 bw_est, rack->r_ctl.crte->rate, __LINE__, 17950 0, 5); 17951 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17952 rack->r_ctl.crte = NULL; 17953 rack->rack_attempt_hdwr_pace = 0; 17954 rack->rack_hdrw_pacing = 0; 17955 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17956 goto done_w_hdwr; 17957 } 17958 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 17959 rack->rc_tp, 17960 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17961 rate_wanted, 17962 RS_PACING_GEQ, 17963 &err, &rack->r_ctl.crte_prev_rate); 17964 if (nrte == NULL) { 17965 /* 17966 * Lost the rate, lets drop hardware pacing 17967 * period. 17968 */ 17969 rack->rack_hdrw_pacing = 0; 17970 rack->r_ctl.crte = NULL; 17971 rack_log_hdwr_pacing(rack, 17972 rate_wanted, 0, __LINE__, 17973 err, 1); 17974 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17975 counter_u64_add(rack_hw_pace_lost, 1); 17976 } else if (nrte != rack->r_ctl.crte) { 17977 rack->r_ctl.crte = nrte; 17978 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 17979 segsiz, pace_one, rack->r_ctl.crte, 17980 NULL, rack->r_ctl.pace_len_divisor); 17981 rack_log_hdwr_pacing(rack, 17982 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17983 err, 2); 17984 rack->r_ctl.last_hw_bw_req = rate_wanted; 17985 } 17986 } else { 17987 /* We just need to adjust the segment size */ 17988 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17989 rack_log_hdwr_pacing(rack, 17990 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17991 0, 4); 17992 rack->r_ctl.last_hw_bw_req = rate_wanted; 17993 } 17994 } 17995 } 17996 if (minslot && (minslot > slot)) { 17997 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, 17998 98, __LINE__, NULL, 0); 17999 slot = minslot; 18000 } 18001 done_w_hdwr: 18002 if (rack_limit_time_with_srtt && 18003 (rack->use_fixed_rate == 0) && 18004 (rack->rack_hdrw_pacing == 0)) { 18005 /* 18006 * Sanity check, we do not allow the pacing delay 18007 * to be longer than the SRTT of the path. If it is 18008 * a slow path, then adding a packet should increase 18009 * the RTT and compensate for this i.e. the srtt will 18010 * be greater so the allowed pacing time will be greater. 18011 * 18012 * Note this restriction is not for where a peak rate 18013 * is set, we are doing fixed pacing or hardware pacing. 18014 */ 18015 if (rack->rc_tp->t_srtt) 18016 srtt = rack->rc_tp->t_srtt; 18017 else 18018 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 18019 if (srtt < (uint64_t)slot) { 18020 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 18021 slot = srtt; 18022 } 18023 } 18024 /*******************************************************************/ 18025 /* RRS: We insert paced call to stats here for len and rate_wanted */ 18026 /*******************************************************************/ 18027 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 18028 } 18029 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 18030 /* 18031 * If this rate is seeing enobufs when it 18032 * goes to send then either the nic is out 18033 * of gas or we are mis-estimating the time 18034 * somehow and not letting the queue empty 18035 * completely. Lets add to the pacing time. 18036 */ 18037 int hw_boost_delay; 18038 18039 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 18040 if (hw_boost_delay > rack_enobuf_hw_max) 18041 hw_boost_delay = rack_enobuf_hw_max; 18042 else if (hw_boost_delay < rack_enobuf_hw_min) 18043 hw_boost_delay = rack_enobuf_hw_min; 18044 slot += hw_boost_delay; 18045 } 18046 return (slot); 18047 } 18048 18049 static void 18050 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 18051 tcp_seq startseq, uint32_t sb_offset) 18052 { 18053 struct rack_sendmap *my_rsm = NULL; 18054 18055 if (tp->t_state < TCPS_ESTABLISHED) { 18056 /* 18057 * We don't start any measurements if we are 18058 * not at least established. 18059 */ 18060 return; 18061 } 18062 if (tp->t_state >= TCPS_FIN_WAIT_1) { 18063 /* 18064 * We will get no more data into the SB 18065 * this means we need to have the data available 18066 * before we start a measurement. 18067 */ 18068 18069 if (sbavail(&tptosocket(tp)->so_snd) < 18070 max(rc_init_window(rack), 18071 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 18072 /* Nope not enough data */ 18073 return; 18074 } 18075 } 18076 tp->t_flags |= TF_GPUTINPROG; 18077 rack->r_ctl.rc_gp_cumack_ts = 0; 18078 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 18079 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 18080 tp->gput_seq = startseq; 18081 rack->app_limited_needs_set = 0; 18082 if (rack->in_probe_rtt) 18083 rack->measure_saw_probe_rtt = 1; 18084 else if ((rack->measure_saw_probe_rtt) && 18085 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 18086 rack->measure_saw_probe_rtt = 0; 18087 if (rack->rc_gp_filled) 18088 tp->gput_ts = rack->r_ctl.last_cumack_advance; 18089 else { 18090 /* Special case initial measurement */ 18091 struct timeval tv; 18092 18093 tp->gput_ts = tcp_get_usecs(&tv); 18094 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18095 } 18096 /* 18097 * We take a guess out into the future, 18098 * if we have no measurement and no 18099 * initial rate, we measure the first 18100 * initial-windows worth of data to 18101 * speed up getting some GP measurement and 18102 * thus start pacing. 18103 */ 18104 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 18105 rack->app_limited_needs_set = 1; 18106 tp->gput_ack = startseq + max(rc_init_window(rack), 18107 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 18108 rack_log_pacing_delay_calc(rack, 18109 tp->gput_seq, 18110 tp->gput_ack, 18111 0, 18112 tp->gput_ts, 18113 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18114 9, 18115 __LINE__, NULL, 0); 18116 rack_tend_gp_marks(tp, rack); 18117 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18118 return; 18119 } 18120 if (sb_offset) { 18121 /* 18122 * We are out somewhere in the sb 18123 * can we use the already outstanding data? 18124 */ 18125 18126 if (rack->r_ctl.rc_app_limited_cnt == 0) { 18127 /* 18128 * Yes first one is good and in this case 18129 * the tp->gput_ts is correctly set based on 18130 * the last ack that arrived (no need to 18131 * set things up when an ack comes in). 18132 */ 18133 my_rsm = tqhash_min(rack->r_ctl.tqh); 18134 if ((my_rsm == NULL) || 18135 (my_rsm->r_rtr_cnt != 1)) { 18136 /* retransmission? */ 18137 goto use_latest; 18138 } 18139 } else { 18140 if (rack->r_ctl.rc_first_appl == NULL) { 18141 /* 18142 * If rc_first_appl is NULL 18143 * then the cnt should be 0. 18144 * This is probably an error, maybe 18145 * a KASSERT would be approprate. 18146 */ 18147 goto use_latest; 18148 } 18149 /* 18150 * If we have a marker pointer to the last one that is 18151 * app limited we can use that, but we need to set 18152 * things up so that when it gets ack'ed we record 18153 * the ack time (if its not already acked). 18154 */ 18155 rack->app_limited_needs_set = 1; 18156 /* 18157 * We want to get to the rsm that is either 18158 * next with space i.e. over 1 MSS or the one 18159 * after that (after the app-limited). 18160 */ 18161 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 18162 if (my_rsm) { 18163 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 18164 /* Have to use the next one */ 18165 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18166 else { 18167 /* Use after the first MSS of it is acked */ 18168 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 18169 goto start_set; 18170 } 18171 } 18172 if ((my_rsm == NULL) || 18173 (my_rsm->r_rtr_cnt != 1)) { 18174 /* 18175 * Either its a retransmit or 18176 * the last is the app-limited one. 18177 */ 18178 goto use_latest; 18179 } 18180 } 18181 tp->gput_seq = my_rsm->r_start; 18182 start_set: 18183 if (my_rsm->r_flags & RACK_ACKED) { 18184 /* 18185 * This one has been acked use the arrival ack time 18186 */ 18187 struct rack_sendmap *nrsm; 18188 18189 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18190 rack->app_limited_needs_set = 0; 18191 /* 18192 * Ok in this path we need to use the r_end now 18193 * since this guy is the starting ack. 18194 */ 18195 tp->gput_seq = my_rsm->r_end; 18196 /* 18197 * We also need to adjust up the sendtime 18198 * to the send of the next data after my_rsm. 18199 */ 18200 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18201 if (nrsm != NULL) 18202 my_rsm = nrsm; 18203 else { 18204 /* 18205 * The next as not been sent, thats the 18206 * case for using the latest. 18207 */ 18208 goto use_latest; 18209 } 18210 } 18211 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18212 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 18213 rack->r_ctl.rc_gp_cumack_ts = 0; 18214 rack_log_pacing_delay_calc(rack, 18215 tp->gput_seq, 18216 tp->gput_ack, 18217 (uint64_t)my_rsm, 18218 tp->gput_ts, 18219 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18220 9, 18221 __LINE__, my_rsm, 0); 18222 /* Now lets make sure all are marked as they should be */ 18223 rack_tend_gp_marks(tp, rack); 18224 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18225 return; 18226 } 18227 18228 use_latest: 18229 /* 18230 * We don't know how long we may have been 18231 * idle or if this is the first-send. Lets 18232 * setup the flag so we will trim off 18233 * the first ack'd data so we get a true 18234 * measurement. 18235 */ 18236 rack->app_limited_needs_set = 1; 18237 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18238 rack->r_ctl.rc_gp_cumack_ts = 0; 18239 /* Find this guy so we can pull the send time */ 18240 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18241 if (my_rsm) { 18242 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18243 if (my_rsm->r_flags & RACK_ACKED) { 18244 /* 18245 * Unlikely since its probably what was 18246 * just transmitted (but I am paranoid). 18247 */ 18248 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18249 rack->app_limited_needs_set = 0; 18250 } 18251 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18252 /* This also is unlikely */ 18253 tp->gput_seq = my_rsm->r_start; 18254 } 18255 } else { 18256 /* 18257 * TSNH unless we have some send-map limit, 18258 * and even at that it should not be hitting 18259 * that limit (we should have stopped sending). 18260 */ 18261 struct timeval tv; 18262 18263 microuptime(&tv); 18264 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18265 } 18266 rack_tend_gp_marks(tp, rack); 18267 rack_log_pacing_delay_calc(rack, 18268 tp->gput_seq, 18269 tp->gput_ack, 18270 (uint64_t)my_rsm, 18271 tp->gput_ts, 18272 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18273 9, __LINE__, NULL, 0); 18274 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18275 } 18276 18277 static inline uint32_t 18278 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18279 uint32_t avail, int32_t sb_offset) 18280 { 18281 uint32_t len; 18282 uint32_t sendwin; 18283 18284 if (tp->snd_wnd > cwnd_to_use) 18285 sendwin = cwnd_to_use; 18286 else 18287 sendwin = tp->snd_wnd; 18288 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18289 /* We never want to go over our peers rcv-window */ 18290 len = 0; 18291 } else { 18292 uint32_t flight; 18293 18294 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18295 if (flight >= sendwin) { 18296 /* 18297 * We have in flight what we are allowed by cwnd (if 18298 * it was rwnd blocking it would have hit above out 18299 * >= tp->snd_wnd). 18300 */ 18301 return (0); 18302 } 18303 len = sendwin - flight; 18304 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18305 /* We would send too much (beyond the rwnd) */ 18306 len = tp->snd_wnd - ctf_outstanding(tp); 18307 } 18308 if ((len + sb_offset) > avail) { 18309 /* 18310 * We don't have that much in the SB, how much is 18311 * there? 18312 */ 18313 len = avail - sb_offset; 18314 } 18315 } 18316 return (len); 18317 } 18318 18319 static void 18320 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18321 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18322 int rsm_is_null, int optlen, int line, uint16_t mode) 18323 { 18324 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18325 union tcp_log_stackspecific log; 18326 struct timeval tv; 18327 18328 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18329 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18330 log.u_bbr.flex1 = error; 18331 log.u_bbr.flex2 = flags; 18332 log.u_bbr.flex3 = rsm_is_null; 18333 log.u_bbr.flex4 = ipoptlen; 18334 log.u_bbr.flex5 = tp->rcv_numsacks; 18335 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18336 log.u_bbr.flex7 = optlen; 18337 log.u_bbr.flex8 = rack->r_fsb_inited; 18338 log.u_bbr.applimited = rack->r_fast_output; 18339 log.u_bbr.bw_inuse = rack_get_bw(rack); 18340 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18341 log.u_bbr.cwnd_gain = mode; 18342 log.u_bbr.pkts_out = orig_len; 18343 log.u_bbr.lt_epoch = len; 18344 log.u_bbr.delivered = line; 18345 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18346 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18347 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 18348 len, &log, false, NULL, __func__, __LINE__, &tv); 18349 } 18350 } 18351 18352 18353 static struct mbuf * 18354 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 18355 struct rack_fast_send_blk *fsb, 18356 int32_t seglimit, int32_t segsize, int hw_tls) 18357 { 18358 #ifdef KERN_TLS 18359 struct ktls_session *tls, *ntls; 18360 #ifdef INVARIANTS 18361 struct mbuf *start; 18362 #endif 18363 #endif 18364 struct mbuf *m, *n, **np, *smb; 18365 struct mbuf *top; 18366 int32_t off, soff; 18367 int32_t len = *plen; 18368 int32_t fragsize; 18369 int32_t len_cp = 0; 18370 uint32_t mlen, frags; 18371 18372 soff = off = the_off; 18373 smb = m = the_m; 18374 np = ⊤ 18375 top = NULL; 18376 #ifdef KERN_TLS 18377 if (hw_tls && (m->m_flags & M_EXTPG)) 18378 tls = m->m_epg_tls; 18379 else 18380 tls = NULL; 18381 #ifdef INVARIANTS 18382 start = m; 18383 #endif 18384 #endif 18385 while (len > 0) { 18386 if (m == NULL) { 18387 *plen = len_cp; 18388 break; 18389 } 18390 #ifdef KERN_TLS 18391 if (hw_tls) { 18392 if (m->m_flags & M_EXTPG) 18393 ntls = m->m_epg_tls; 18394 else 18395 ntls = NULL; 18396 18397 /* 18398 * Avoid mixing TLS records with handshake 18399 * data or TLS records from different 18400 * sessions. 18401 */ 18402 if (tls != ntls) { 18403 MPASS(m != start); 18404 *plen = len_cp; 18405 break; 18406 } 18407 } 18408 #endif 18409 mlen = min(len, m->m_len - off); 18410 if (seglimit) { 18411 /* 18412 * For M_EXTPG mbufs, add 3 segments 18413 * + 1 in case we are crossing page boundaries 18414 * + 2 in case the TLS hdr/trailer are used 18415 * It is cheaper to just add the segments 18416 * than it is to take the cache miss to look 18417 * at the mbuf ext_pgs state in detail. 18418 */ 18419 if (m->m_flags & M_EXTPG) { 18420 fragsize = min(segsize, PAGE_SIZE); 18421 frags = 3; 18422 } else { 18423 fragsize = segsize; 18424 frags = 0; 18425 } 18426 18427 /* Break if we really can't fit anymore. */ 18428 if ((frags + 1) >= seglimit) { 18429 *plen = len_cp; 18430 break; 18431 } 18432 18433 /* 18434 * Reduce size if you can't copy the whole 18435 * mbuf. If we can't copy the whole mbuf, also 18436 * adjust len so the loop will end after this 18437 * mbuf. 18438 */ 18439 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 18440 mlen = (seglimit - frags - 1) * fragsize; 18441 len = mlen; 18442 *plen = len_cp + len; 18443 } 18444 frags += howmany(mlen, fragsize); 18445 if (frags == 0) 18446 frags++; 18447 seglimit -= frags; 18448 KASSERT(seglimit > 0, 18449 ("%s: seglimit went too low", __func__)); 18450 } 18451 n = m_get(M_NOWAIT, m->m_type); 18452 *np = n; 18453 if (n == NULL) 18454 goto nospace; 18455 n->m_len = mlen; 18456 soff += mlen; 18457 len_cp += n->m_len; 18458 if (m->m_flags & (M_EXT | M_EXTPG)) { 18459 n->m_data = m->m_data + off; 18460 mb_dupcl(n, m); 18461 } else { 18462 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 18463 (u_int)n->m_len); 18464 } 18465 len -= n->m_len; 18466 off = 0; 18467 m = m->m_next; 18468 np = &n->m_next; 18469 if (len || (soff == smb->m_len)) { 18470 /* 18471 * We have more so we move forward or 18472 * we have consumed the entire mbuf and 18473 * len has fell to 0. 18474 */ 18475 soff = 0; 18476 smb = m; 18477 } 18478 18479 } 18480 if (fsb != NULL) { 18481 fsb->m = smb; 18482 fsb->off = soff; 18483 if (smb) { 18484 /* 18485 * Save off the size of the mbuf. We do 18486 * this so that we can recognize when it 18487 * has been trimmed by sbcut() as acks 18488 * come in. 18489 */ 18490 fsb->o_m_len = smb->m_len; 18491 fsb->o_t_len = M_TRAILINGROOM(smb); 18492 } else { 18493 /* 18494 * This is the case where the next mbuf went to NULL. This 18495 * means with this copy we have sent everything in the sb. 18496 * In theory we could clear the fast_output flag, but lets 18497 * not since its possible that we could get more added 18498 * and acks that call the extend function which would let 18499 * us send more. 18500 */ 18501 fsb->o_m_len = 0; 18502 fsb->o_t_len = 0; 18503 } 18504 } 18505 return (top); 18506 nospace: 18507 if (top) 18508 m_freem(top); 18509 return (NULL); 18510 18511 } 18512 18513 /* 18514 * This is a copy of m_copym(), taking the TSO segment size/limit 18515 * constraints into account, and advancing the sndptr as it goes. 18516 */ 18517 static struct mbuf * 18518 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 18519 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 18520 { 18521 struct mbuf *m, *n; 18522 int32_t soff; 18523 18524 m = rack->r_ctl.fsb.m; 18525 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 18526 /* 18527 * The trailing space changed, mbufs can grow 18528 * at the tail but they can't shrink from 18529 * it, KASSERT that. Adjust the orig_m_len to 18530 * compensate for this change. 18531 */ 18532 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 18533 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 18534 m, 18535 rack, 18536 (intmax_t)M_TRAILINGROOM(m), 18537 rack->r_ctl.fsb.o_t_len, 18538 rack->r_ctl.fsb.o_m_len, 18539 m->m_len)); 18540 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 18541 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 18542 } 18543 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 18544 /* 18545 * Mbuf shrank, trimmed off the top by an ack, our 18546 * offset changes. 18547 */ 18548 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 18549 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 18550 m, m->m_len, 18551 rack, rack->r_ctl.fsb.o_m_len, 18552 rack->r_ctl.fsb.off)); 18553 18554 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 18555 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 18556 else 18557 rack->r_ctl.fsb.off = 0; 18558 rack->r_ctl.fsb.o_m_len = m->m_len; 18559 #ifdef INVARIANTS 18560 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 18561 panic("rack:%p m:%p m_len grew outside of t_space compensation", 18562 rack, m); 18563 #endif 18564 } 18565 soff = rack->r_ctl.fsb.off; 18566 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 18567 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 18568 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 18569 __FUNCTION__, 18570 rack, *plen, m, m->m_len)); 18571 /* Save off the right location before we copy and advance */ 18572 *s_soff = soff; 18573 *s_mb = rack->r_ctl.fsb.m; 18574 n = rack_fo_base_copym(m, soff, plen, 18575 &rack->r_ctl.fsb, 18576 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 18577 return (n); 18578 } 18579 18580 /* Log the buffer level */ 18581 static void 18582 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 18583 int len, struct timeval *tv, 18584 uint32_t cts) 18585 { 18586 uint32_t p_rate = 0, p_queue = 0, err = 0; 18587 union tcp_log_stackspecific log; 18588 18589 #ifdef RATELIMIT 18590 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18591 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18592 #endif 18593 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18594 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18595 log.u_bbr.flex1 = p_rate; 18596 log.u_bbr.flex2 = p_queue; 18597 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18598 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18599 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18600 log.u_bbr.flex7 = 99; 18601 log.u_bbr.flex8 = 0; 18602 log.u_bbr.pkts_out = err; 18603 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18604 log.u_bbr.timeStamp = cts; 18605 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18606 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18607 len, &log, false, NULL, __func__, __LINE__, tv); 18608 18609 } 18610 18611 static uint32_t 18612 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 18613 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 18614 { 18615 uint64_t lentime = 0; 18616 #ifdef RATELIMIT 18617 uint32_t p_rate = 0, p_queue = 0, err; 18618 union tcp_log_stackspecific log; 18619 uint64_t bw; 18620 18621 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18622 /* Failed or queue is zero */ 18623 if (err || (p_queue == 0)) { 18624 lentime = 0; 18625 goto out; 18626 } 18627 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18628 if (err) { 18629 lentime = 0; 18630 goto out; 18631 } 18632 /* 18633 * If we reach here we have some bytes in 18634 * the queue. The number returned is a value 18635 * between 0 and 0xffff where ffff is full 18636 * and 0 is empty. So how best to make this into 18637 * something usable? 18638 * 18639 * The "safer" way is lets take the b/w gotten 18640 * from the query (which should be our b/w rate) 18641 * and pretend that a full send (our rc_pace_max_segs) 18642 * is outstanding. We factor it so its as if a full 18643 * number of our MSS segment is terms of full 18644 * ethernet segments are outstanding. 18645 */ 18646 bw = p_rate / 8; 18647 if (bw) { 18648 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 18649 lentime *= ETHERNET_SEGMENT_SIZE; 18650 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 18651 lentime /= bw; 18652 } else { 18653 /* TSNH -- KASSERT? */ 18654 lentime = 0; 18655 } 18656 out: 18657 if (tcp_bblogging_on(tp)) { 18658 memset(&log, 0, sizeof(log)); 18659 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18660 log.u_bbr.flex1 = p_rate; 18661 log.u_bbr.flex2 = p_queue; 18662 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18663 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18664 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18665 log.u_bbr.flex7 = 99; 18666 log.u_bbr.flex8 = 0; 18667 log.u_bbr.pkts_out = err; 18668 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18669 log.u_bbr.cur_del_rate = lentime; 18670 log.u_bbr.timeStamp = cts; 18671 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18672 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18673 len, &log, false, NULL, __func__, __LINE__,tv); 18674 } 18675 #endif 18676 return ((uint32_t)lentime); 18677 } 18678 18679 static int 18680 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 18681 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 18682 { 18683 /* 18684 * Enter the fast retransmit path. We are given that a sched_pin is 18685 * in place (if accounting is compliled in) and the cycle count taken 18686 * at the entry is in the ts_val. The concept her is that the rsm 18687 * now holds the mbuf offsets and such so we can directly transmit 18688 * without a lot of overhead, the len field is already set for 18689 * us to prohibit us from sending too much (usually its 1MSS). 18690 */ 18691 struct ip *ip = NULL; 18692 struct udphdr *udp = NULL; 18693 struct tcphdr *th = NULL; 18694 struct mbuf *m = NULL; 18695 struct inpcb *inp; 18696 uint8_t *cpto; 18697 struct tcp_log_buffer *lgb; 18698 #ifdef TCP_ACCOUNTING 18699 uint64_t crtsc; 18700 int cnt_thru = 1; 18701 #endif 18702 struct tcpopt to; 18703 u_char opt[TCP_MAXOLEN]; 18704 uint32_t hdrlen, optlen; 18705 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 18706 uint16_t flags; 18707 uint32_t if_hw_tsomaxsegcount = 0, startseq; 18708 uint32_t if_hw_tsomaxsegsize; 18709 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 18710 18711 #ifdef INET6 18712 struct ip6_hdr *ip6 = NULL; 18713 18714 if (rack->r_is_v6) { 18715 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18716 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18717 } else 18718 #endif /* INET6 */ 18719 { 18720 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18721 hdrlen = sizeof(struct tcpiphdr); 18722 } 18723 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 18724 goto failed; 18725 } 18726 if (doing_tlp) { 18727 /* Its a TLP add the flag, it may already be there but be sure */ 18728 rsm->r_flags |= RACK_TLP; 18729 } else { 18730 /* If it was a TLP it is not not on this retransmit */ 18731 rsm->r_flags &= ~RACK_TLP; 18732 } 18733 startseq = rsm->r_start; 18734 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 18735 inp = rack->rc_inp; 18736 to.to_flags = 0; 18737 flags = tcp_outflags[tp->t_state]; 18738 if (flags & (TH_SYN|TH_RST)) { 18739 goto failed; 18740 } 18741 if (rsm->r_flags & RACK_HAS_FIN) { 18742 /* We can't send a FIN here */ 18743 goto failed; 18744 } 18745 if (flags & TH_FIN) { 18746 /* We never send a FIN */ 18747 flags &= ~TH_FIN; 18748 } 18749 if (tp->t_flags & TF_RCVD_TSTMP) { 18750 to.to_tsval = ms_cts + tp->ts_offset; 18751 to.to_tsecr = tp->ts_recent; 18752 to.to_flags = TOF_TS; 18753 } 18754 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18755 /* TCP-MD5 (RFC2385). */ 18756 if (tp->t_flags & TF_SIGNATURE) 18757 to.to_flags |= TOF_SIGNATURE; 18758 #endif 18759 optlen = tcp_addoptions(&to, opt); 18760 hdrlen += optlen; 18761 udp = rack->r_ctl.fsb.udp; 18762 if (udp) 18763 hdrlen += sizeof(struct udphdr); 18764 if (rack->r_ctl.rc_pace_max_segs) 18765 max_val = rack->r_ctl.rc_pace_max_segs; 18766 else if (rack->rc_user_set_max_segs) 18767 max_val = rack->rc_user_set_max_segs * segsiz; 18768 else 18769 max_val = len; 18770 if ((tp->t_flags & TF_TSO) && 18771 V_tcp_do_tso && 18772 (len > segsiz) && 18773 (tp->t_port == 0)) 18774 tso = 1; 18775 #ifdef INET6 18776 if (MHLEN < hdrlen + max_linkhdr) 18777 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18778 else 18779 #endif 18780 m = m_gethdr(M_NOWAIT, MT_DATA); 18781 if (m == NULL) 18782 goto failed; 18783 m->m_data += max_linkhdr; 18784 m->m_len = hdrlen; 18785 th = rack->r_ctl.fsb.th; 18786 /* Establish the len to send */ 18787 if (len > max_val) 18788 len = max_val; 18789 if ((tso) && (len + optlen > segsiz)) { 18790 uint32_t if_hw_tsomax; 18791 int32_t max_len; 18792 18793 /* extract TSO information */ 18794 if_hw_tsomax = tp->t_tsomax; 18795 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18796 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18797 /* 18798 * Check if we should limit by maximum payload 18799 * length: 18800 */ 18801 if (if_hw_tsomax != 0) { 18802 /* compute maximum TSO length */ 18803 max_len = (if_hw_tsomax - hdrlen - 18804 max_linkhdr); 18805 if (max_len <= 0) { 18806 goto failed; 18807 } else if (len > max_len) { 18808 len = max_len; 18809 } 18810 } 18811 if (len <= segsiz) { 18812 /* 18813 * In case there are too many small fragments don't 18814 * use TSO: 18815 */ 18816 tso = 0; 18817 } 18818 } else { 18819 tso = 0; 18820 } 18821 if ((tso == 0) && (len > segsiz)) 18822 len = segsiz; 18823 (void)tcp_get_usecs(tv); 18824 if ((len == 0) || 18825 (len <= MHLEN - hdrlen - max_linkhdr)) { 18826 goto failed; 18827 } 18828 th->th_seq = htonl(rsm->r_start); 18829 th->th_ack = htonl(tp->rcv_nxt); 18830 /* 18831 * The PUSH bit should only be applied 18832 * if the full retransmission is made. If 18833 * we are sending less than this is the 18834 * left hand edge and should not have 18835 * the PUSH bit. 18836 */ 18837 if ((rsm->r_flags & RACK_HAD_PUSH) && 18838 (len == (rsm->r_end - rsm->r_start))) 18839 flags |= TH_PUSH; 18840 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 18841 if (th->th_win == 0) { 18842 tp->t_sndzerowin++; 18843 tp->t_flags |= TF_RXWIN0SENT; 18844 } else 18845 tp->t_flags &= ~TF_RXWIN0SENT; 18846 if (rsm->r_flags & RACK_TLP) { 18847 /* 18848 * TLP should not count in retran count, but 18849 * in its own bin 18850 */ 18851 counter_u64_add(rack_tlp_retran, 1); 18852 counter_u64_add(rack_tlp_retran_bytes, len); 18853 } else { 18854 tp->t_sndrexmitpack++; 18855 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18856 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18857 } 18858 #ifdef STATS 18859 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18860 len); 18861 #endif 18862 if (rsm->m == NULL) 18863 goto failed; 18864 if (rsm->m && 18865 ((rsm->orig_m_len != rsm->m->m_len) || 18866 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 18867 /* Fix up the orig_m_len and possibly the mbuf offset */ 18868 rack_adjust_orig_mlen(rsm); 18869 } 18870 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 18871 if (len <= segsiz) { 18872 /* 18873 * Must have ran out of mbufs for the copy 18874 * shorten it to no longer need tso. Lets 18875 * not put on sendalot since we are low on 18876 * mbufs. 18877 */ 18878 tso = 0; 18879 } 18880 if ((m->m_next == NULL) || (len <= 0)){ 18881 goto failed; 18882 } 18883 if (udp) { 18884 if (rack->r_is_v6) 18885 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18886 else 18887 ulen = hdrlen + len - sizeof(struct ip); 18888 udp->uh_ulen = htons(ulen); 18889 } 18890 m->m_pkthdr.rcvif = (struct ifnet *)0; 18891 if (TCPS_HAVERCVDSYN(tp->t_state) && 18892 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18893 int ect = tcp_ecn_output_established(tp, &flags, len, true); 18894 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18895 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18896 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18897 #ifdef INET6 18898 if (rack->r_is_v6) { 18899 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18900 ip6->ip6_flow |= htonl(ect << 20); 18901 } 18902 else 18903 #endif 18904 { 18905 ip->ip_tos &= ~IPTOS_ECN_MASK; 18906 ip->ip_tos |= ect; 18907 } 18908 } 18909 if (rack->r_ctl.crte != NULL) { 18910 /* See if we can send via the hw queue */ 18911 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 18912 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 18913 if (slot == 0) 18914 ip_sendflag = 0; 18915 } 18916 tcp_set_flags(th, flags); 18917 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18918 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18919 if (to.to_flags & TOF_SIGNATURE) { 18920 /* 18921 * Calculate MD5 signature and put it into the place 18922 * determined before. 18923 * NOTE: since TCP options buffer doesn't point into 18924 * mbuf's data, calculate offset and use it. 18925 */ 18926 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18927 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18928 /* 18929 * Do not send segment if the calculation of MD5 18930 * digest has failed. 18931 */ 18932 goto failed; 18933 } 18934 } 18935 #endif 18936 #ifdef INET6 18937 if (rack->r_is_v6) { 18938 if (tp->t_port) { 18939 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18940 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18941 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18942 th->th_sum = htons(0); 18943 UDPSTAT_INC(udps_opackets); 18944 } else { 18945 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18946 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18947 th->th_sum = in6_cksum_pseudo(ip6, 18948 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18949 0); 18950 } 18951 } 18952 #endif 18953 #if defined(INET6) && defined(INET) 18954 else 18955 #endif 18956 #ifdef INET 18957 { 18958 if (tp->t_port) { 18959 m->m_pkthdr.csum_flags = CSUM_UDP; 18960 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18961 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18962 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18963 th->th_sum = htons(0); 18964 UDPSTAT_INC(udps_opackets); 18965 } else { 18966 m->m_pkthdr.csum_flags = CSUM_TCP; 18967 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18968 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18969 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18970 IPPROTO_TCP + len + optlen)); 18971 } 18972 /* IP version must be set here for ipv4/ipv6 checking later */ 18973 KASSERT(ip->ip_v == IPVERSION, 18974 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18975 } 18976 #endif 18977 if (tso) { 18978 /* 18979 * Here we use segsiz since we have no added options besides 18980 * any standard timestamp options (no DSACKs or SACKS are sent 18981 * via either fast-path). 18982 */ 18983 KASSERT(len > segsiz, 18984 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 18985 m->m_pkthdr.csum_flags |= CSUM_TSO; 18986 m->m_pkthdr.tso_segsz = segsiz; 18987 } 18988 #ifdef INET6 18989 if (rack->r_is_v6) { 18990 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 18991 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18992 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18993 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18994 else 18995 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18996 } 18997 #endif 18998 #if defined(INET) && defined(INET6) 18999 else 19000 #endif 19001 #ifdef INET 19002 { 19003 ip->ip_len = htons(m->m_pkthdr.len); 19004 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19005 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19006 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19007 if (tp->t_port == 0 || len < V_tcp_minmss) { 19008 ip->ip_off |= htons(IP_DF); 19009 } 19010 } else { 19011 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19012 } 19013 } 19014 #endif 19015 if (doing_tlp == 0) { 19016 /* Set we retransmitted */ 19017 rack->rc_gp_saw_rec = 1; 19018 } else { 19019 /* Its a TLP set ca or ss */ 19020 if (tp->snd_cwnd > tp->snd_ssthresh) { 19021 /* Set we sent in CA */ 19022 rack->rc_gp_saw_ca = 1; 19023 } else { 19024 /* Set we sent in SS */ 19025 rack->rc_gp_saw_ss = 1; 19026 } 19027 } 19028 /* Time to copy in our header */ 19029 cpto = mtod(m, uint8_t *); 19030 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19031 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19032 if (optlen) { 19033 bcopy(opt, th + 1, optlen); 19034 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19035 } else { 19036 th->th_off = sizeof(struct tcphdr) >> 2; 19037 } 19038 if (tcp_bblogging_on(rack->rc_tp)) { 19039 union tcp_log_stackspecific log; 19040 19041 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 19042 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 19043 counter_u64_add(rack_collapsed_win_rxt, 1); 19044 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 19045 } 19046 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19047 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19048 if (rack->rack_no_prr) 19049 log.u_bbr.flex1 = 0; 19050 else 19051 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19052 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19053 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19054 log.u_bbr.flex4 = max_val; 19055 /* Save off the early/late values */ 19056 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19057 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19058 log.u_bbr.bw_inuse = rack_get_bw(rack); 19059 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19060 if (doing_tlp == 0) 19061 log.u_bbr.flex8 = 1; 19062 else 19063 log.u_bbr.flex8 = 2; 19064 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19065 log.u_bbr.flex7 = 55; 19066 log.u_bbr.pkts_out = tp->t_maxseg; 19067 log.u_bbr.timeStamp = cts; 19068 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19069 if (rsm && (rsm->r_rtr_cnt > 0)) { 19070 /* 19071 * When we have a retransmit we want to log the 19072 * burst at send and flight at send from before. 19073 */ 19074 log.u_bbr.flex5 = rsm->r_fas; 19075 log.u_bbr.bbr_substate = rsm->r_bas; 19076 } else { 19077 /* 19078 * This is currently unlikely until we do the 19079 * packet pair probes but I will add it for completeness. 19080 */ 19081 log.u_bbr.flex5 = log.u_bbr.inflight; 19082 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19083 } 19084 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19085 log.u_bbr.delivered = 0; 19086 log.u_bbr.rttProp = (uint64_t)rsm; 19087 log.u_bbr.delRate = rsm->r_flags; 19088 log.u_bbr.delRate <<= 31; 19089 log.u_bbr.delRate |= rack->r_must_retran; 19090 log.u_bbr.delRate <<= 1; 19091 log.u_bbr.delRate |= 1; 19092 log.u_bbr.pkt_epoch = __LINE__; 19093 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19094 len, &log, false, NULL, __func__, __LINE__, tv); 19095 } else 19096 lgb = NULL; 19097 if ((rack->r_ctl.crte != NULL) && 19098 tcp_bblogging_on(tp)) { 19099 rack_log_queue_level(tp, rack, len, tv, cts); 19100 } 19101 #ifdef INET6 19102 if (rack->r_is_v6) { 19103 error = ip6_output(m, inp->in6p_outputopts, 19104 &inp->inp_route6, 19105 ip_sendflag, NULL, NULL, inp); 19106 } 19107 else 19108 #endif 19109 #ifdef INET 19110 { 19111 error = ip_output(m, NULL, 19112 &inp->inp_route, 19113 ip_sendflag, 0, inp); 19114 } 19115 #endif 19116 m = NULL; 19117 if (lgb) { 19118 lgb->tlb_errno = error; 19119 lgb = NULL; 19120 } 19121 /* Move snd_nxt to snd_max so we don't have false retransmissions */ 19122 tp->snd_nxt = tp->snd_max; 19123 if (error) { 19124 goto failed; 19125 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 19126 rack->rc_hw_nobuf = 0; 19127 rack->r_ctl.rc_agg_delayed = 0; 19128 rack->r_early = 0; 19129 rack->r_late = 0; 19130 rack->r_ctl.rc_agg_early = 0; 19131 } 19132 19133 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 19134 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 19135 if (doing_tlp) { 19136 rack->rc_tlp_in_progress = 1; 19137 rack->r_ctl.rc_tlp_cnt_out++; 19138 } 19139 if (error == 0) { 19140 counter_u64_add(rack_total_bytes, len); 19141 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 19142 if (doing_tlp) { 19143 rack->rc_last_sent_tlp_past_cumack = 0; 19144 rack->rc_last_sent_tlp_seq_valid = 1; 19145 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 19146 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 19147 } 19148 if (rack->r_ctl.rc_prr_sndcnt >= len) 19149 rack->r_ctl.rc_prr_sndcnt -= len; 19150 else 19151 rack->r_ctl.rc_prr_sndcnt = 0; 19152 } 19153 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19154 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19155 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19156 rack->r_ctl.retran_during_recovery += len; 19157 { 19158 int idx; 19159 19160 idx = (len / segsiz) + 3; 19161 if (idx >= TCP_MSS_ACCT_ATIMER) 19162 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19163 else 19164 counter_u64_add(rack_out_size[idx], 1); 19165 } 19166 if (tp->t_rtttime == 0) { 19167 tp->t_rtttime = ticks; 19168 tp->t_rtseq = startseq; 19169 KMOD_TCPSTAT_INC(tcps_segstimed); 19170 } 19171 counter_u64_add(rack_fto_rsm_send, 1); 19172 if (error && (error == ENOBUFS)) { 19173 if (rack->r_ctl.crte != NULL) { 19174 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 19175 if (tcp_bblogging_on(rack->rc_tp)) 19176 rack_log_queue_level(tp, rack, len, tv, cts); 19177 } else 19178 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 19179 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19180 if (rack->rc_enobuf < 0x7f) 19181 rack->rc_enobuf++; 19182 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19183 slot = 10 * HPTS_USEC_IN_MSEC; 19184 if (rack->r_ctl.crte != NULL) { 19185 counter_u64_add(rack_saw_enobuf_hw, 1); 19186 tcp_rl_log_enobuf(rack->r_ctl.crte); 19187 } 19188 counter_u64_add(rack_saw_enobuf, 1); 19189 } else 19190 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 19191 if ((slot == 0) || 19192 (rack->rc_always_pace == 0) || 19193 (rack->r_rr_config == 1)) { 19194 /* 19195 * We have no pacing set or we 19196 * are using old-style rack or 19197 * we are overridden to use the old 1ms pacing. 19198 */ 19199 slot = rack->r_ctl.rc_min_to; 19200 } 19201 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 19202 #ifdef TCP_ACCOUNTING 19203 crtsc = get_cyclecount(); 19204 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19205 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19206 } 19207 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19208 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19209 } 19210 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19211 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 19212 } 19213 sched_unpin(); 19214 #endif 19215 return (0); 19216 failed: 19217 if (m) 19218 m_free(m); 19219 return (-1); 19220 } 19221 19222 static void 19223 rack_sndbuf_autoscale(struct tcp_rack *rack) 19224 { 19225 /* 19226 * Automatic sizing of send socket buffer. Often the send buffer 19227 * size is not optimally adjusted to the actual network conditions 19228 * at hand (delay bandwidth product). Setting the buffer size too 19229 * small limits throughput on links with high bandwidth and high 19230 * delay (eg. trans-continental/oceanic links). Setting the 19231 * buffer size too big consumes too much real kernel memory, 19232 * especially with many connections on busy servers. 19233 * 19234 * The criteria to step up the send buffer one notch are: 19235 * 1. receive window of remote host is larger than send buffer 19236 * (with a fudge factor of 5/4th); 19237 * 2. send buffer is filled to 7/8th with data (so we actually 19238 * have data to make use of it); 19239 * 3. send buffer fill has not hit maximal automatic size; 19240 * 4. our send window (slow start and cogestion controlled) is 19241 * larger than sent but unacknowledged data in send buffer. 19242 * 19243 * Note that the rack version moves things much faster since 19244 * we want to avoid hitting cache lines in the rack_fast_output() 19245 * path so this is called much less often and thus moves 19246 * the SB forward by a percentage. 19247 */ 19248 struct socket *so; 19249 struct tcpcb *tp; 19250 uint32_t sendwin, scaleup; 19251 19252 tp = rack->rc_tp; 19253 so = rack->rc_inp->inp_socket; 19254 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19255 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19256 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19257 sbused(&so->so_snd) >= 19258 (so->so_snd.sb_hiwat / 8 * 7) && 19259 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19260 sendwin >= (sbused(&so->so_snd) - 19261 (tp->snd_nxt - tp->snd_una))) { 19262 if (rack_autosndbuf_inc) 19263 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19264 else 19265 scaleup = V_tcp_autosndbuf_inc; 19266 if (scaleup < V_tcp_autosndbuf_inc) 19267 scaleup = V_tcp_autosndbuf_inc; 19268 scaleup += so->so_snd.sb_hiwat; 19269 if (scaleup > V_tcp_autosndbuf_max) 19270 scaleup = V_tcp_autosndbuf_max; 19271 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19272 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19273 } 19274 } 19275 } 19276 19277 static int 19278 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19279 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 19280 { 19281 /* 19282 * Enter to do fast output. We are given that the sched_pin is 19283 * in place (if accounting is compiled in) and the cycle count taken 19284 * at entry is in place in ts_val. The idea here is that 19285 * we know how many more bytes needs to be sent (presumably either 19286 * during pacing or to fill the cwnd and that was greater than 19287 * the max-burst). We have how much to send and all the info we 19288 * need to just send. 19289 */ 19290 #ifdef INET 19291 struct ip *ip = NULL; 19292 #endif 19293 struct udphdr *udp = NULL; 19294 struct tcphdr *th = NULL; 19295 struct mbuf *m, *s_mb; 19296 struct inpcb *inp; 19297 uint8_t *cpto; 19298 struct tcp_log_buffer *lgb; 19299 #ifdef TCP_ACCOUNTING 19300 uint64_t crtsc; 19301 #endif 19302 struct tcpopt to; 19303 u_char opt[TCP_MAXOLEN]; 19304 uint32_t hdrlen, optlen; 19305 #ifdef TCP_ACCOUNTING 19306 int cnt_thru = 1; 19307 #endif 19308 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19309 uint16_t flags; 19310 uint32_t s_soff; 19311 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19312 uint32_t if_hw_tsomaxsegsize; 19313 uint16_t add_flag = RACK_SENT_FP; 19314 #ifdef INET6 19315 struct ip6_hdr *ip6 = NULL; 19316 19317 if (rack->r_is_v6) { 19318 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19319 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19320 } else 19321 #endif /* INET6 */ 19322 { 19323 #ifdef INET 19324 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19325 hdrlen = sizeof(struct tcpiphdr); 19326 #endif 19327 } 19328 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19329 m = NULL; 19330 goto failed; 19331 } 19332 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19333 startseq = tp->snd_max; 19334 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19335 inp = rack->rc_inp; 19336 len = rack->r_ctl.fsb.left_to_send; 19337 to.to_flags = 0; 19338 flags = rack->r_ctl.fsb.tcp_flags; 19339 if (tp->t_flags & TF_RCVD_TSTMP) { 19340 to.to_tsval = ms_cts + tp->ts_offset; 19341 to.to_tsecr = tp->ts_recent; 19342 to.to_flags = TOF_TS; 19343 } 19344 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19345 /* TCP-MD5 (RFC2385). */ 19346 if (tp->t_flags & TF_SIGNATURE) 19347 to.to_flags |= TOF_SIGNATURE; 19348 #endif 19349 optlen = tcp_addoptions(&to, opt); 19350 hdrlen += optlen; 19351 udp = rack->r_ctl.fsb.udp; 19352 if (udp) 19353 hdrlen += sizeof(struct udphdr); 19354 if (rack->r_ctl.rc_pace_max_segs) 19355 max_val = rack->r_ctl.rc_pace_max_segs; 19356 else if (rack->rc_user_set_max_segs) 19357 max_val = rack->rc_user_set_max_segs * segsiz; 19358 else 19359 max_val = len; 19360 if ((tp->t_flags & TF_TSO) && 19361 V_tcp_do_tso && 19362 (len > segsiz) && 19363 (tp->t_port == 0)) 19364 tso = 1; 19365 again: 19366 #ifdef INET6 19367 if (MHLEN < hdrlen + max_linkhdr) 19368 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19369 else 19370 #endif 19371 m = m_gethdr(M_NOWAIT, MT_DATA); 19372 if (m == NULL) 19373 goto failed; 19374 m->m_data += max_linkhdr; 19375 m->m_len = hdrlen; 19376 th = rack->r_ctl.fsb.th; 19377 /* Establish the len to send */ 19378 if (len > max_val) 19379 len = max_val; 19380 if ((tso) && (len + optlen > segsiz)) { 19381 uint32_t if_hw_tsomax; 19382 int32_t max_len; 19383 19384 /* extract TSO information */ 19385 if_hw_tsomax = tp->t_tsomax; 19386 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19387 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19388 /* 19389 * Check if we should limit by maximum payload 19390 * length: 19391 */ 19392 if (if_hw_tsomax != 0) { 19393 /* compute maximum TSO length */ 19394 max_len = (if_hw_tsomax - hdrlen - 19395 max_linkhdr); 19396 if (max_len <= 0) { 19397 goto failed; 19398 } else if (len > max_len) { 19399 len = max_len; 19400 } 19401 } 19402 if (len <= segsiz) { 19403 /* 19404 * In case there are too many small fragments don't 19405 * use TSO: 19406 */ 19407 tso = 0; 19408 } 19409 } else { 19410 tso = 0; 19411 } 19412 if ((tso == 0) && (len > segsiz)) 19413 len = segsiz; 19414 (void)tcp_get_usecs(tv); 19415 if ((len == 0) || 19416 (len <= MHLEN - hdrlen - max_linkhdr)) { 19417 goto failed; 19418 } 19419 sb_offset = tp->snd_max - tp->snd_una; 19420 th->th_seq = htonl(tp->snd_max); 19421 th->th_ack = htonl(tp->rcv_nxt); 19422 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19423 if (th->th_win == 0) { 19424 tp->t_sndzerowin++; 19425 tp->t_flags |= TF_RXWIN0SENT; 19426 } else 19427 tp->t_flags &= ~TF_RXWIN0SENT; 19428 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 19429 KMOD_TCPSTAT_INC(tcps_sndpack); 19430 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 19431 #ifdef STATS 19432 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 19433 len); 19434 #endif 19435 if (rack->r_ctl.fsb.m == NULL) 19436 goto failed; 19437 19438 /* s_mb and s_soff are saved for rack_log_output */ 19439 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 19440 &s_mb, &s_soff); 19441 if (len <= segsiz) { 19442 /* 19443 * Must have ran out of mbufs for the copy 19444 * shorten it to no longer need tso. Lets 19445 * not put on sendalot since we are low on 19446 * mbufs. 19447 */ 19448 tso = 0; 19449 } 19450 if (rack->r_ctl.fsb.rfo_apply_push && 19451 (len == rack->r_ctl.fsb.left_to_send)) { 19452 tcp_set_flags(th, flags | TH_PUSH); 19453 add_flag |= RACK_HAD_PUSH; 19454 } 19455 if ((m->m_next == NULL) || (len <= 0)){ 19456 goto failed; 19457 } 19458 if (udp) { 19459 if (rack->r_is_v6) 19460 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19461 else 19462 ulen = hdrlen + len - sizeof(struct ip); 19463 udp->uh_ulen = htons(ulen); 19464 } 19465 m->m_pkthdr.rcvif = (struct ifnet *)0; 19466 if (TCPS_HAVERCVDSYN(tp->t_state) && 19467 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19468 int ect = tcp_ecn_output_established(tp, &flags, len, false); 19469 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19470 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19471 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19472 #ifdef INET6 19473 if (rack->r_is_v6) { 19474 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19475 ip6->ip6_flow |= htonl(ect << 20); 19476 } 19477 else 19478 #endif 19479 { 19480 #ifdef INET 19481 ip->ip_tos &= ~IPTOS_ECN_MASK; 19482 ip->ip_tos |= ect; 19483 #endif 19484 } 19485 } 19486 tcp_set_flags(th, flags); 19487 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19488 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19489 if (to.to_flags & TOF_SIGNATURE) { 19490 /* 19491 * Calculate MD5 signature and put it into the place 19492 * determined before. 19493 * NOTE: since TCP options buffer doesn't point into 19494 * mbuf's data, calculate offset and use it. 19495 */ 19496 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19497 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19498 /* 19499 * Do not send segment if the calculation of MD5 19500 * digest has failed. 19501 */ 19502 goto failed; 19503 } 19504 } 19505 #endif 19506 #ifdef INET6 19507 if (rack->r_is_v6) { 19508 if (tp->t_port) { 19509 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19510 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19511 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19512 th->th_sum = htons(0); 19513 UDPSTAT_INC(udps_opackets); 19514 } else { 19515 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19516 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19517 th->th_sum = in6_cksum_pseudo(ip6, 19518 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19519 0); 19520 } 19521 } 19522 #endif 19523 #if defined(INET6) && defined(INET) 19524 else 19525 #endif 19526 #ifdef INET 19527 { 19528 if (tp->t_port) { 19529 m->m_pkthdr.csum_flags = CSUM_UDP; 19530 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19531 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19532 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19533 th->th_sum = htons(0); 19534 UDPSTAT_INC(udps_opackets); 19535 } else { 19536 m->m_pkthdr.csum_flags = CSUM_TCP; 19537 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19538 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19539 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19540 IPPROTO_TCP + len + optlen)); 19541 } 19542 /* IP version must be set here for ipv4/ipv6 checking later */ 19543 KASSERT(ip->ip_v == IPVERSION, 19544 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19545 } 19546 #endif 19547 if (tso) { 19548 /* 19549 * Here we use segsiz since we have no added options besides 19550 * any standard timestamp options (no DSACKs or SACKS are sent 19551 * via either fast-path). 19552 */ 19553 KASSERT(len > segsiz, 19554 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19555 m->m_pkthdr.csum_flags |= CSUM_TSO; 19556 m->m_pkthdr.tso_segsz = segsiz; 19557 } 19558 #ifdef INET6 19559 if (rack->r_is_v6) { 19560 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19561 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19562 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19563 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19564 else 19565 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19566 } 19567 #endif 19568 #if defined(INET) && defined(INET6) 19569 else 19570 #endif 19571 #ifdef INET 19572 { 19573 ip->ip_len = htons(m->m_pkthdr.len); 19574 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19575 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19576 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19577 if (tp->t_port == 0 || len < V_tcp_minmss) { 19578 ip->ip_off |= htons(IP_DF); 19579 } 19580 } else { 19581 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19582 } 19583 } 19584 #endif 19585 if (tp->snd_cwnd > tp->snd_ssthresh) { 19586 /* Set we sent in CA */ 19587 rack->rc_gp_saw_ca = 1; 19588 } else { 19589 /* Set we sent in SS */ 19590 rack->rc_gp_saw_ss = 1; 19591 } 19592 /* Time to copy in our header */ 19593 cpto = mtod(m, uint8_t *); 19594 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19595 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19596 if (optlen) { 19597 bcopy(opt, th + 1, optlen); 19598 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19599 } else { 19600 th->th_off = sizeof(struct tcphdr) >> 2; 19601 } 19602 if ((rack->r_ctl.crte != NULL) && 19603 tcp_bblogging_on(tp)) { 19604 rack_log_queue_level(tp, rack, len, tv, cts); 19605 } 19606 if (tcp_bblogging_on(rack->rc_tp)) { 19607 union tcp_log_stackspecific log; 19608 19609 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19610 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19611 if (rack->rack_no_prr) 19612 log.u_bbr.flex1 = 0; 19613 else 19614 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19615 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19616 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19617 log.u_bbr.flex4 = max_val; 19618 /* Save off the early/late values */ 19619 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19620 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19621 log.u_bbr.bw_inuse = rack_get_bw(rack); 19622 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19623 log.u_bbr.flex8 = 0; 19624 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19625 log.u_bbr.flex7 = 44; 19626 log.u_bbr.pkts_out = tp->t_maxseg; 19627 log.u_bbr.timeStamp = cts; 19628 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19629 log.u_bbr.flex5 = log.u_bbr.inflight; 19630 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19631 log.u_bbr.delivered = 0; 19632 log.u_bbr.rttProp = 0; 19633 log.u_bbr.delRate = rack->r_must_retran; 19634 log.u_bbr.delRate <<= 1; 19635 log.u_bbr.pkt_epoch = __LINE__; 19636 /* For fast output no retrans so just inflight and how many mss we send */ 19637 log.u_bbr.flex5 = log.u_bbr.inflight; 19638 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19639 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19640 len, &log, false, NULL, __func__, __LINE__, tv); 19641 } else 19642 lgb = NULL; 19643 #ifdef INET6 19644 if (rack->r_is_v6) { 19645 error = ip6_output(m, inp->in6p_outputopts, 19646 &inp->inp_route6, 19647 0, NULL, NULL, inp); 19648 } 19649 #endif 19650 #if defined(INET) && defined(INET6) 19651 else 19652 #endif 19653 #ifdef INET 19654 { 19655 error = ip_output(m, NULL, 19656 &inp->inp_route, 19657 0, 0, inp); 19658 } 19659 #endif 19660 if (lgb) { 19661 lgb->tlb_errno = error; 19662 lgb = NULL; 19663 } 19664 if (error) { 19665 *send_err = error; 19666 m = NULL; 19667 goto failed; 19668 } else if (rack->rc_hw_nobuf) { 19669 rack->rc_hw_nobuf = 0; 19670 rack->r_ctl.rc_agg_delayed = 0; 19671 rack->r_early = 0; 19672 rack->r_late = 0; 19673 rack->r_ctl.rc_agg_early = 0; 19674 } 19675 if ((error == 0) && (rack->lt_bw_up == 0)) { 19676 /* Unlikely */ 19677 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 19678 rack->r_ctl.lt_seq = tp->snd_una; 19679 rack->lt_bw_up = 1; 19680 } 19681 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 19682 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 19683 m = NULL; 19684 if (tp->snd_una == tp->snd_max) { 19685 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19686 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19687 tp->t_acktime = ticks; 19688 } 19689 counter_u64_add(rack_total_bytes, len); 19690 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 19691 19692 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19693 tot_len += len; 19694 if ((tp->t_flags & TF_GPUTINPROG) == 0) 19695 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 19696 tp->snd_max += len; 19697 tp->snd_nxt = tp->snd_max; 19698 if (rack->rc_new_rnd_needed) { 19699 /* 19700 * Update the rnd to start ticking not 19701 * that from a time perspective all of 19702 * the preceding idle time is "in the round" 19703 */ 19704 rack->rc_new_rnd_needed = 0; 19705 rack->r_ctl.roundends = tp->snd_max; 19706 } 19707 { 19708 int idx; 19709 19710 idx = (len / segsiz) + 3; 19711 if (idx >= TCP_MSS_ACCT_ATIMER) 19712 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19713 else 19714 counter_u64_add(rack_out_size[idx], 1); 19715 } 19716 if (len <= rack->r_ctl.fsb.left_to_send) 19717 rack->r_ctl.fsb.left_to_send -= len; 19718 else 19719 rack->r_ctl.fsb.left_to_send = 0; 19720 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19721 rack->r_fast_output = 0; 19722 rack->r_ctl.fsb.left_to_send = 0; 19723 /* At the end of fast_output scale up the sb */ 19724 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 19725 rack_sndbuf_autoscale(rack); 19726 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 19727 } 19728 if (tp->t_rtttime == 0) { 19729 tp->t_rtttime = ticks; 19730 tp->t_rtseq = startseq; 19731 KMOD_TCPSTAT_INC(tcps_segstimed); 19732 } 19733 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 19734 (max_val > len) && 19735 (tso == 0)) { 19736 max_val -= len; 19737 len = segsiz; 19738 th = rack->r_ctl.fsb.th; 19739 #ifdef TCP_ACCOUNTING 19740 cnt_thru++; 19741 #endif 19742 goto again; 19743 } 19744 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19745 counter_u64_add(rack_fto_send, 1); 19746 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 19747 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 19748 #ifdef TCP_ACCOUNTING 19749 crtsc = get_cyclecount(); 19750 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19751 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19752 } 19753 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19754 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19755 } 19756 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19757 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 19758 } 19759 sched_unpin(); 19760 #endif 19761 return (0); 19762 failed: 19763 if (m) 19764 m_free(m); 19765 rack->r_fast_output = 0; 19766 return (-1); 19767 } 19768 19769 static inline void 19770 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 19771 struct sockbuf *sb, 19772 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 19773 bool hw_tls, 19774 uint16_t flags) 19775 { 19776 rack->r_fast_output = 1; 19777 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19778 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19779 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 19780 rack->r_ctl.fsb.tcp_flags = flags; 19781 rack->r_ctl.fsb.left_to_send = orig_len - len; 19782 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 19783 /* Less than a full sized pace, lets not */ 19784 rack->r_fast_output = 0; 19785 return; 19786 } else { 19787 /* Round down to the nearest pace_max_seg */ 19788 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 19789 } 19790 if (hw_tls) 19791 rack->r_ctl.fsb.hw_tls = 1; 19792 else 19793 rack->r_ctl.fsb.hw_tls = 0; 19794 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19795 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19796 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19797 (tp->snd_max - tp->snd_una))); 19798 if (rack->r_ctl.fsb.left_to_send < segsiz) 19799 rack->r_fast_output = 0; 19800 else { 19801 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19802 rack->r_ctl.fsb.rfo_apply_push = 1; 19803 else 19804 rack->r_ctl.fsb.rfo_apply_push = 0; 19805 } 19806 } 19807 19808 static uint32_t 19809 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 19810 { 19811 uint64_t min_time; 19812 uint32_t maxlen; 19813 19814 min_time = (uint64_t)get_hpts_min_sleep_time(); 19815 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 19816 maxlen = roundup(maxlen, segsiz); 19817 return (maxlen); 19818 } 19819 19820 static struct rack_sendmap * 19821 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 19822 { 19823 struct rack_sendmap *rsm = NULL; 19824 int thresh; 19825 19826 restart: 19827 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 19828 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 19829 /* Nothing, strange turn off validity */ 19830 rack->r_collapse_point_valid = 0; 19831 return (NULL); 19832 } 19833 /* Can we send it yet? */ 19834 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 19835 /* 19836 * Receiver window has not grown enough for 19837 * the segment to be put on the wire. 19838 */ 19839 return (NULL); 19840 } 19841 if (rsm->r_flags & RACK_ACKED) { 19842 /* 19843 * It has been sacked, lets move to the 19844 * next one if possible. 19845 */ 19846 rack->r_ctl.last_collapse_point = rsm->r_end; 19847 /* Are we done? */ 19848 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 19849 rack->r_ctl.high_collapse_point)) { 19850 rack->r_collapse_point_valid = 0; 19851 return (NULL); 19852 } 19853 goto restart; 19854 } 19855 /* Now has it been long enough ? */ 19856 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts); 19857 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 19858 rack_log_collapse(rack, rsm->r_start, 19859 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19860 thresh, __LINE__, 6, rsm->r_flags, rsm); 19861 return (rsm); 19862 } 19863 /* Not enough time */ 19864 rack_log_collapse(rack, rsm->r_start, 19865 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19866 thresh, __LINE__, 7, rsm->r_flags, rsm); 19867 return (NULL); 19868 } 19869 19870 static inline void 19871 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 19872 { 19873 if ((rack->full_size_rxt == 0) && 19874 (rack->shape_rxt_to_pacing_min == 0) && 19875 (*len >= segsiz)) { 19876 *len = segsiz; 19877 } else if (rack->shape_rxt_to_pacing_min && 19878 rack->gp_ready) { 19879 /* We use pacing min as shaping len req */ 19880 uint32_t maxlen; 19881 19882 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 19883 if (*len > maxlen) 19884 *len = maxlen; 19885 } else { 19886 /* 19887 * The else is full_size_rxt is on so send it all 19888 * note we do need to check this for exceeding 19889 * our max segment size due to the fact that 19890 * we do sometimes merge chunks together i.e. 19891 * we cannot just assume that we will never have 19892 * a chunk greater than pace_max_seg 19893 */ 19894 if (*len > pace_max_seg) 19895 *len = pace_max_seg; 19896 } 19897 } 19898 19899 static int 19900 rack_output(struct tcpcb *tp) 19901 { 19902 struct socket *so; 19903 uint32_t recwin; 19904 uint32_t sb_offset, s_moff = 0; 19905 int32_t len, error = 0; 19906 uint16_t flags; 19907 struct mbuf *m, *s_mb = NULL; 19908 struct mbuf *mb; 19909 uint32_t if_hw_tsomaxsegcount = 0; 19910 uint32_t if_hw_tsomaxsegsize; 19911 int32_t segsiz, minseg; 19912 long tot_len_this_send = 0; 19913 #ifdef INET 19914 struct ip *ip = NULL; 19915 #endif 19916 struct udphdr *udp = NULL; 19917 struct tcp_rack *rack; 19918 struct tcphdr *th; 19919 uint8_t pass = 0; 19920 uint8_t mark = 0; 19921 uint8_t check_done = 0; 19922 uint8_t wanted_cookie = 0; 19923 u_char opt[TCP_MAXOLEN]; 19924 unsigned ipoptlen, optlen, hdrlen, ulen=0; 19925 uint32_t rack_seq; 19926 19927 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 19928 unsigned ipsec_optlen = 0; 19929 19930 #endif 19931 int32_t idle, sendalot; 19932 int32_t sub_from_prr = 0; 19933 volatile int32_t sack_rxmit; 19934 struct rack_sendmap *rsm = NULL; 19935 int32_t tso, mtu; 19936 struct tcpopt to; 19937 int32_t slot = 0; 19938 int32_t sup_rack = 0; 19939 uint32_t cts, ms_cts, delayed, early; 19940 uint16_t add_flag = RACK_SENT_SP; 19941 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 19942 uint8_t doing_tlp = 0; 19943 uint32_t cwnd_to_use, pace_max_seg; 19944 int32_t do_a_prefetch = 0; 19945 int32_t prefetch_rsm = 0; 19946 int32_t orig_len = 0; 19947 struct timeval tv; 19948 int32_t prefetch_so_done = 0; 19949 struct tcp_log_buffer *lgb; 19950 struct inpcb *inp = tptoinpcb(tp); 19951 struct sockbuf *sb; 19952 uint64_t ts_val = 0; 19953 #ifdef TCP_ACCOUNTING 19954 uint64_t crtsc; 19955 #endif 19956 #ifdef INET6 19957 struct ip6_hdr *ip6 = NULL; 19958 int32_t isipv6; 19959 #endif 19960 bool hpts_calling, hw_tls = false; 19961 19962 NET_EPOCH_ASSERT(); 19963 INP_WLOCK_ASSERT(inp); 19964 19965 /* setup and take the cache hits here */ 19966 rack = (struct tcp_rack *)tp->t_fb_ptr; 19967 #ifdef TCP_ACCOUNTING 19968 sched_pin(); 19969 ts_val = get_cyclecount(); 19970 #endif 19971 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 19972 tp->t_flags2 &= ~TF2_HPTS_CALLS; 19973 #ifdef TCP_OFFLOAD 19974 if (tp->t_flags & TF_TOE) { 19975 #ifdef TCP_ACCOUNTING 19976 sched_unpin(); 19977 #endif 19978 return (tcp_offload_output(tp)); 19979 } 19980 #endif 19981 if (rack->rack_deferred_inited == 0) { 19982 /* 19983 * If we are the connecting socket we will 19984 * hit rack_init() when no sequence numbers 19985 * are setup. This makes it so we must defer 19986 * some initialization. Call that now. 19987 */ 19988 rack_deferred_init(tp, rack); 19989 } 19990 /* 19991 * For TFO connections in SYN_RECEIVED, only allow the initial 19992 * SYN|ACK and those sent by the retransmit timer. 19993 */ 19994 if (IS_FASTOPEN(tp->t_flags) && 19995 (tp->t_state == TCPS_SYN_RECEIVED) && 19996 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 19997 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 19998 #ifdef TCP_ACCOUNTING 19999 sched_unpin(); 20000 #endif 20001 return (0); 20002 } 20003 #ifdef INET6 20004 if (rack->r_state) { 20005 /* Use the cache line loaded if possible */ 20006 isipv6 = rack->r_is_v6; 20007 } else { 20008 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 20009 } 20010 #endif 20011 early = 0; 20012 cts = tcp_get_usecs(&tv); 20013 ms_cts = tcp_tv_to_mssectick(&tv); 20014 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 20015 tcp_in_hpts(rack->rc_tp)) { 20016 /* 20017 * We are on the hpts for some timer but not hptsi output. 20018 * Remove from the hpts unconditionally. 20019 */ 20020 rack_timer_cancel(tp, rack, cts, __LINE__); 20021 } 20022 /* Are we pacing and late? */ 20023 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20024 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 20025 /* We are delayed */ 20026 delayed = cts - rack->r_ctl.rc_last_output_to; 20027 } else { 20028 delayed = 0; 20029 } 20030 /* Do the timers, which may override the pacer */ 20031 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 20032 int retval; 20033 20034 retval = rack_process_timers(tp, rack, cts, hpts_calling, 20035 &doing_tlp); 20036 if (retval != 0) { 20037 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 20038 #ifdef TCP_ACCOUNTING 20039 sched_unpin(); 20040 #endif 20041 /* 20042 * If timers want tcp_drop(), then pass error out, 20043 * otherwise suppress it. 20044 */ 20045 return (retval < 0 ? retval : 0); 20046 } 20047 } 20048 if (rack->rc_in_persist) { 20049 if (tcp_in_hpts(rack->rc_tp) == 0) { 20050 /* Timer is not running */ 20051 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20052 } 20053 #ifdef TCP_ACCOUNTING 20054 sched_unpin(); 20055 #endif 20056 return (0); 20057 } 20058 if ((rack->rc_ack_required == 1) && 20059 (rack->r_timer_override == 0)){ 20060 /* A timeout occurred and no ack has arrived */ 20061 if (tcp_in_hpts(rack->rc_tp) == 0) { 20062 /* Timer is not running */ 20063 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20064 } 20065 #ifdef TCP_ACCOUNTING 20066 sched_unpin(); 20067 #endif 20068 return (0); 20069 } 20070 if ((rack->r_timer_override) || 20071 (rack->rc_ack_can_sendout_data) || 20072 (delayed) || 20073 (tp->t_state < TCPS_ESTABLISHED)) { 20074 rack->rc_ack_can_sendout_data = 0; 20075 if (tcp_in_hpts(rack->rc_tp)) 20076 tcp_hpts_remove(rack->rc_tp); 20077 } else if (tcp_in_hpts(rack->rc_tp)) { 20078 /* 20079 * On the hpts you can't pass even if ACKNOW is on, we will 20080 * when the hpts fires. 20081 */ 20082 #ifdef TCP_ACCOUNTING 20083 crtsc = get_cyclecount(); 20084 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20085 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 20086 } 20087 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20088 tp->tcp_cnt_counters[SND_BLOCKED]++; 20089 } 20090 sched_unpin(); 20091 #endif 20092 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 20093 return (0); 20094 } 20095 /* Finish out both pacing early and late accounting */ 20096 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20097 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 20098 early = rack->r_ctl.rc_last_output_to - cts; 20099 } else 20100 early = 0; 20101 if (delayed) { 20102 rack->r_ctl.rc_agg_delayed += delayed; 20103 rack->r_late = 1; 20104 } else if (early) { 20105 rack->r_ctl.rc_agg_early += early; 20106 rack->r_early = 1; 20107 } 20108 /* Now that early/late accounting is done turn off the flag */ 20109 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 20110 rack->r_wanted_output = 0; 20111 rack->r_timer_override = 0; 20112 if ((tp->t_state != rack->r_state) && 20113 TCPS_HAVEESTABLISHED(tp->t_state)) { 20114 rack_set_state(tp, rack); 20115 } 20116 if ((rack->r_fast_output) && 20117 (doing_tlp == 0) && 20118 (tp->rcv_numsacks == 0)) { 20119 int ret; 20120 20121 error = 0; 20122 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 20123 if (ret >= 0) 20124 return(ret); 20125 else if (error) { 20126 inp = rack->rc_inp; 20127 so = inp->inp_socket; 20128 sb = &so->so_snd; 20129 goto nomore; 20130 } 20131 } 20132 inp = rack->rc_inp; 20133 /* 20134 * For TFO connections in SYN_SENT or SYN_RECEIVED, 20135 * only allow the initial SYN or SYN|ACK and those sent 20136 * by the retransmit timer. 20137 */ 20138 if (IS_FASTOPEN(tp->t_flags) && 20139 ((tp->t_state == TCPS_SYN_RECEIVED) || 20140 (tp->t_state == TCPS_SYN_SENT)) && 20141 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 20142 (tp->t_rxtshift == 0)) { /* not a retransmit */ 20143 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20144 so = inp->inp_socket; 20145 sb = &so->so_snd; 20146 goto just_return_nolock; 20147 } 20148 /* 20149 * Determine length of data that should be transmitted, and flags 20150 * that will be used. If there is some data or critical controls 20151 * (SYN, RST) to send, then transmit; otherwise, investigate 20152 * further. 20153 */ 20154 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 20155 if (tp->t_idle_reduce) { 20156 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 20157 rack_cc_after_idle(rack, tp); 20158 } 20159 tp->t_flags &= ~TF_LASTIDLE; 20160 if (idle) { 20161 if (tp->t_flags & TF_MORETOCOME) { 20162 tp->t_flags |= TF_LASTIDLE; 20163 idle = 0; 20164 } 20165 } 20166 if ((tp->snd_una == tp->snd_max) && 20167 rack->r_ctl.rc_went_idle_time && 20168 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 20169 idle = cts - rack->r_ctl.rc_went_idle_time; 20170 if (idle > rack_min_probertt_hold) { 20171 /* Count as a probe rtt */ 20172 if (rack->in_probe_rtt == 0) { 20173 rack->r_ctl.rc_lower_rtt_us_cts = cts; 20174 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 20175 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 20176 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 20177 } else { 20178 rack_exit_probertt(rack, cts); 20179 } 20180 } 20181 idle = 0; 20182 } 20183 if (rack_use_fsb && 20184 (rack->r_ctl.fsb.tcp_ip_hdr) && 20185 (rack->r_fsb_inited == 0) && 20186 (rack->r_state != TCPS_CLOSED)) 20187 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 20188 again: 20189 /* 20190 * If we've recently taken a timeout, snd_max will be greater than 20191 * snd_nxt. There may be SACK information that allows us to avoid 20192 * resending already delivered data. Adjust snd_nxt accordingly. 20193 */ 20194 sendalot = 0; 20195 cts = tcp_get_usecs(&tv); 20196 ms_cts = tcp_tv_to_mssectick(&tv); 20197 tso = 0; 20198 mtu = 0; 20199 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20200 minseg = segsiz; 20201 if (rack->r_ctl.rc_pace_max_segs == 0) 20202 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 20203 else 20204 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 20205 sb_offset = tp->snd_max - tp->snd_una; 20206 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20207 flags = tcp_outflags[tp->t_state]; 20208 while (rack->rc_free_cnt < rack_free_cache) { 20209 rsm = rack_alloc(rack); 20210 if (rsm == NULL) { 20211 if (hpts_calling) 20212 /* Retry in a ms */ 20213 slot = (1 * HPTS_USEC_IN_MSEC); 20214 so = inp->inp_socket; 20215 sb = &so->so_snd; 20216 goto just_return_nolock; 20217 } 20218 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 20219 rack->rc_free_cnt++; 20220 rsm = NULL; 20221 } 20222 sack_rxmit = 0; 20223 len = 0; 20224 rsm = NULL; 20225 if (flags & TH_RST) { 20226 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 20227 so = inp->inp_socket; 20228 sb = &so->so_snd; 20229 goto send; 20230 } 20231 if (rack->r_ctl.rc_resend) { 20232 /* Retransmit timer */ 20233 rsm = rack->r_ctl.rc_resend; 20234 rack->r_ctl.rc_resend = NULL; 20235 len = rsm->r_end - rsm->r_start; 20236 sack_rxmit = 1; 20237 sendalot = 0; 20238 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20239 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20240 __func__, __LINE__, 20241 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20242 sb_offset = rsm->r_start - tp->snd_una; 20243 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20244 } else if (rack->r_collapse_point_valid && 20245 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 20246 /* 20247 * If an RSM is returned then enough time has passed 20248 * for us to retransmit it. Move up the collapse point, 20249 * since this rsm has its chance to retransmit now. 20250 */ 20251 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 20252 rack->r_ctl.last_collapse_point = rsm->r_end; 20253 /* Are we done? */ 20254 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20255 rack->r_ctl.high_collapse_point)) 20256 rack->r_collapse_point_valid = 0; 20257 sack_rxmit = 1; 20258 /* We are not doing a TLP */ 20259 doing_tlp = 0; 20260 len = rsm->r_end - rsm->r_start; 20261 sb_offset = rsm->r_start - tp->snd_una; 20262 sendalot = 0; 20263 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20264 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 20265 /* We have a retransmit that takes precedence */ 20266 if ((!IN_FASTRECOVERY(tp->t_flags)) && 20267 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 20268 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 20269 /* Enter recovery if not induced by a time-out */ 20270 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 20271 } 20272 #ifdef INVARIANTS 20273 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 20274 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 20275 tp, rack, rsm, rsm->r_start, tp->snd_una); 20276 } 20277 #endif 20278 len = rsm->r_end - rsm->r_start; 20279 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20280 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20281 __func__, __LINE__, 20282 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20283 sb_offset = rsm->r_start - tp->snd_una; 20284 sendalot = 0; 20285 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20286 if (len > 0) { 20287 sack_rxmit = 1; 20288 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 20289 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 20290 min(len, segsiz)); 20291 } 20292 } else if (rack->r_ctl.rc_tlpsend) { 20293 /* Tail loss probe */ 20294 long cwin; 20295 long tlen; 20296 20297 /* 20298 * Check if we can do a TLP with a RACK'd packet 20299 * this can happen if we are not doing the rack 20300 * cheat and we skipped to a TLP and it 20301 * went off. 20302 */ 20303 rsm = rack->r_ctl.rc_tlpsend; 20304 /* We are doing a TLP make sure the flag is preent */ 20305 rsm->r_flags |= RACK_TLP; 20306 rack->r_ctl.rc_tlpsend = NULL; 20307 sack_rxmit = 1; 20308 tlen = rsm->r_end - rsm->r_start; 20309 if (tlen > segsiz) 20310 tlen = segsiz; 20311 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20312 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20313 __func__, __LINE__, 20314 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20315 sb_offset = rsm->r_start - tp->snd_una; 20316 cwin = min(tp->snd_wnd, tlen); 20317 len = cwin; 20318 } 20319 if (rack->r_must_retran && 20320 (doing_tlp == 0) && 20321 (SEQ_GT(tp->snd_max, tp->snd_una)) && 20322 (rsm == NULL)) { 20323 /* 20324 * There are two different ways that we 20325 * can get into this block: 20326 * a) This is a non-sack connection, we had a time-out 20327 * and thus r_must_retran was set and everything 20328 * left outstanding as been marked for retransmit. 20329 * b) The MTU of the path shrank, so that everything 20330 * was marked to be retransmitted with the smaller 20331 * mtu and r_must_retran was set. 20332 * 20333 * This means that we expect the sendmap (outstanding) 20334 * to all be marked must. We can use the tmap to 20335 * look at them. 20336 * 20337 */ 20338 int sendwin, flight; 20339 20340 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 20341 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 20342 if (flight >= sendwin) { 20343 /* 20344 * We can't send yet. 20345 */ 20346 so = inp->inp_socket; 20347 sb = &so->so_snd; 20348 goto just_return_nolock; 20349 } 20350 /* 20351 * This is the case a/b mentioned above. All 20352 * outstanding/not-acked should be marked. 20353 * We can use the tmap to find them. 20354 */ 20355 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 20356 if (rsm == NULL) { 20357 /* TSNH */ 20358 rack->r_must_retran = 0; 20359 rack->r_ctl.rc_out_at_rto = 0; 20360 so = inp->inp_socket; 20361 sb = &so->so_snd; 20362 goto just_return_nolock; 20363 } 20364 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 20365 /* 20366 * The first one does not have the flag, did we collapse 20367 * further up in our list? 20368 */ 20369 rack->r_must_retran = 0; 20370 rack->r_ctl.rc_out_at_rto = 0; 20371 rsm = NULL; 20372 sack_rxmit = 0; 20373 } else { 20374 sack_rxmit = 1; 20375 len = rsm->r_end - rsm->r_start; 20376 sb_offset = rsm->r_start - tp->snd_una; 20377 sendalot = 0; 20378 if ((rack->full_size_rxt == 0) && 20379 (rack->shape_rxt_to_pacing_min == 0) && 20380 (len >= segsiz)) 20381 len = segsiz; 20382 else if (rack->shape_rxt_to_pacing_min && 20383 rack->gp_ready) { 20384 /* We use pacing min as shaping len req */ 20385 uint32_t maxlen; 20386 20387 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20388 if (len > maxlen) 20389 len = maxlen; 20390 } 20391 /* 20392 * Delay removing the flag RACK_MUST_RXT so 20393 * that the fastpath for retransmit will 20394 * work with this rsm. 20395 */ 20396 } 20397 } 20398 /* 20399 * Enforce a connection sendmap count limit if set 20400 * as long as we are not retransmiting. 20401 */ 20402 if ((rsm == NULL) && 20403 (rack->do_detection == 0) && 20404 (V_tcp_map_entries_limit > 0) && 20405 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 20406 counter_u64_add(rack_to_alloc_limited, 1); 20407 if (!rack->alloc_limit_reported) { 20408 rack->alloc_limit_reported = 1; 20409 counter_u64_add(rack_alloc_limited_conns, 1); 20410 } 20411 so = inp->inp_socket; 20412 sb = &so->so_snd; 20413 goto just_return_nolock; 20414 } 20415 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 20416 /* we are retransmitting the fin */ 20417 len--; 20418 if (len) { 20419 /* 20420 * When retransmitting data do *not* include the 20421 * FIN. This could happen from a TLP probe. 20422 */ 20423 flags &= ~TH_FIN; 20424 } 20425 } 20426 if (rsm && rack->r_fsb_inited && 20427 rack_use_rsm_rfo && 20428 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 20429 int ret; 20430 20431 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 20432 if (ret == 0) 20433 return (0); 20434 } 20435 so = inp->inp_socket; 20436 sb = &so->so_snd; 20437 if (do_a_prefetch == 0) { 20438 kern_prefetch(sb, &do_a_prefetch); 20439 do_a_prefetch = 1; 20440 } 20441 #ifdef NETFLIX_SHARED_CWND 20442 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 20443 rack->rack_enable_scwnd) { 20444 /* We are doing cwnd sharing */ 20445 if (rack->gp_ready && 20446 (rack->rack_attempted_scwnd == 0) && 20447 (rack->r_ctl.rc_scw == NULL) && 20448 tp->t_lib) { 20449 /* The pcbid is in, lets make an attempt */ 20450 counter_u64_add(rack_try_scwnd, 1); 20451 rack->rack_attempted_scwnd = 1; 20452 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 20453 &rack->r_ctl.rc_scw_index, 20454 segsiz); 20455 } 20456 if (rack->r_ctl.rc_scw && 20457 (rack->rack_scwnd_is_idle == 1) && 20458 sbavail(&so->so_snd)) { 20459 /* we are no longer out of data */ 20460 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 20461 rack->rack_scwnd_is_idle = 0; 20462 } 20463 if (rack->r_ctl.rc_scw) { 20464 /* First lets update and get the cwnd */ 20465 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 20466 rack->r_ctl.rc_scw_index, 20467 tp->snd_cwnd, tp->snd_wnd, segsiz); 20468 } 20469 } 20470 #endif 20471 /* 20472 * Get standard flags, and add SYN or FIN if requested by 'hidden' 20473 * state flags. 20474 */ 20475 if (tp->t_flags & TF_NEEDFIN) 20476 flags |= TH_FIN; 20477 if (tp->t_flags & TF_NEEDSYN) 20478 flags |= TH_SYN; 20479 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 20480 void *end_rsm; 20481 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 20482 if (end_rsm) 20483 kern_prefetch(end_rsm, &prefetch_rsm); 20484 prefetch_rsm = 1; 20485 } 20486 SOCKBUF_LOCK(sb); 20487 /* 20488 * If snd_nxt == snd_max and we have transmitted a FIN, the 20489 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 20490 * negative length. This can also occur when TCP opens up its 20491 * congestion window while receiving additional duplicate acks after 20492 * fast-retransmit because TCP will reset snd_nxt to snd_max after 20493 * the fast-retransmit. 20494 * 20495 * In the normal retransmit-FIN-only case, however, snd_nxt will be 20496 * set to snd_una, the sb_offset will be 0, and the length may wind 20497 * up 0. 20498 * 20499 * If sack_rxmit is true we are retransmitting from the scoreboard 20500 * in which case len is already set. 20501 */ 20502 if ((sack_rxmit == 0) && 20503 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 20504 uint32_t avail; 20505 20506 avail = sbavail(sb); 20507 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 20508 sb_offset = tp->snd_nxt - tp->snd_una; 20509 else 20510 sb_offset = 0; 20511 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 20512 if (rack->r_ctl.rc_tlp_new_data) { 20513 /* TLP is forcing out new data */ 20514 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 20515 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 20516 } 20517 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 20518 if (tp->snd_wnd > sb_offset) 20519 len = tp->snd_wnd - sb_offset; 20520 else 20521 len = 0; 20522 } else { 20523 len = rack->r_ctl.rc_tlp_new_data; 20524 } 20525 rack->r_ctl.rc_tlp_new_data = 0; 20526 } else { 20527 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 20528 } 20529 if ((rack->r_ctl.crte == NULL) && 20530 IN_FASTRECOVERY(tp->t_flags) && 20531 (rack->full_size_rxt == 0) && 20532 (rack->shape_rxt_to_pacing_min == 0) && 20533 (len > segsiz)) { 20534 /* 20535 * For prr=off, we need to send only 1 MSS 20536 * at a time. We do this because another sack could 20537 * be arriving that causes us to send retransmits and 20538 * we don't want to be on a long pace due to a larger send 20539 * that keeps us from sending out the retransmit. 20540 */ 20541 len = segsiz; 20542 } else if (rack->shape_rxt_to_pacing_min && 20543 rack->gp_ready) { 20544 /* We use pacing min as shaping len req */ 20545 uint32_t maxlen; 20546 20547 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20548 if (len > maxlen) 20549 len = maxlen; 20550 }/* The else is full_size_rxt is on so send it all */ 20551 } else { 20552 uint32_t outstanding; 20553 /* 20554 * We are inside of a Fast recovery episode, this 20555 * is caused by a SACK or 3 dup acks. At this point 20556 * we have sent all the retransmissions and we rely 20557 * on PRR to dictate what we will send in the form of 20558 * new data. 20559 */ 20560 20561 outstanding = tp->snd_max - tp->snd_una; 20562 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 20563 if (tp->snd_wnd > outstanding) { 20564 len = tp->snd_wnd - outstanding; 20565 /* Check to see if we have the data */ 20566 if ((sb_offset + len) > avail) { 20567 /* It does not all fit */ 20568 if (avail > sb_offset) 20569 len = avail - sb_offset; 20570 else 20571 len = 0; 20572 } 20573 } else { 20574 len = 0; 20575 } 20576 } else if (avail > sb_offset) { 20577 len = avail - sb_offset; 20578 } else { 20579 len = 0; 20580 } 20581 if (len > 0) { 20582 if (len > rack->r_ctl.rc_prr_sndcnt) { 20583 len = rack->r_ctl.rc_prr_sndcnt; 20584 } 20585 if (len > 0) { 20586 sub_from_prr = 1; 20587 } 20588 } 20589 if (len > segsiz) { 20590 /* 20591 * We should never send more than a MSS when 20592 * retransmitting or sending new data in prr 20593 * mode unless the override flag is on. Most 20594 * likely the PRR algorithm is not going to 20595 * let us send a lot as well :-) 20596 */ 20597 if (rack->r_ctl.rc_prr_sendalot == 0) { 20598 len = segsiz; 20599 } 20600 } else if (len < segsiz) { 20601 /* 20602 * Do we send any? The idea here is if the 20603 * send empty's the socket buffer we want to 20604 * do it. However if not then lets just wait 20605 * for our prr_sndcnt to get bigger. 20606 */ 20607 long leftinsb; 20608 20609 leftinsb = sbavail(sb) - sb_offset; 20610 if (leftinsb > len) { 20611 /* This send does not empty the sb */ 20612 len = 0; 20613 } 20614 } 20615 } 20616 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 20617 /* 20618 * If you have not established 20619 * and are not doing FAST OPEN 20620 * no data please. 20621 */ 20622 if ((sack_rxmit == 0) && 20623 (!IS_FASTOPEN(tp->t_flags))){ 20624 len = 0; 20625 sb_offset = 0; 20626 } 20627 } 20628 if (prefetch_so_done == 0) { 20629 kern_prefetch(so, &prefetch_so_done); 20630 prefetch_so_done = 1; 20631 } 20632 /* 20633 * Lop off SYN bit if it has already been sent. However, if this is 20634 * SYN-SENT state and if segment contains data and if we don't know 20635 * that foreign host supports TAO, suppress sending segment. 20636 */ 20637 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 20638 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 20639 /* 20640 * When sending additional segments following a TFO SYN|ACK, 20641 * do not include the SYN bit. 20642 */ 20643 if (IS_FASTOPEN(tp->t_flags) && 20644 (tp->t_state == TCPS_SYN_RECEIVED)) 20645 flags &= ~TH_SYN; 20646 } 20647 /* 20648 * Be careful not to send data and/or FIN on SYN segments. This 20649 * measure is needed to prevent interoperability problems with not 20650 * fully conformant TCP implementations. 20651 */ 20652 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 20653 len = 0; 20654 flags &= ~TH_FIN; 20655 } 20656 /* 20657 * On TFO sockets, ensure no data is sent in the following cases: 20658 * 20659 * - When retransmitting SYN|ACK on a passively-created socket 20660 * 20661 * - When retransmitting SYN on an actively created socket 20662 * 20663 * - When sending a zero-length cookie (cookie request) on an 20664 * actively created socket 20665 * 20666 * - When the socket is in the CLOSED state (RST is being sent) 20667 */ 20668 if (IS_FASTOPEN(tp->t_flags) && 20669 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 20670 ((tp->t_state == TCPS_SYN_SENT) && 20671 (tp->t_tfo_client_cookie_len == 0)) || 20672 (flags & TH_RST))) { 20673 sack_rxmit = 0; 20674 len = 0; 20675 } 20676 /* Without fast-open there should never be data sent on a SYN */ 20677 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 20678 tp->snd_nxt = tp->iss; 20679 len = 0; 20680 } 20681 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 20682 /* We only send 1 MSS if we have a DSACK block */ 20683 add_flag |= RACK_SENT_W_DSACK; 20684 len = segsiz; 20685 } 20686 orig_len = len; 20687 if (len <= 0) { 20688 /* 20689 * If FIN has been sent but not acked, but we haven't been 20690 * called to retransmit, len will be < 0. Otherwise, window 20691 * shrank after we sent into it. If window shrank to 0, 20692 * cancel pending retransmit, pull snd_nxt back to (closed) 20693 * window, and set the persist timer if it isn't already 20694 * going. If the window didn't close completely, just wait 20695 * for an ACK. 20696 * 20697 * We also do a general check here to ensure that we will 20698 * set the persist timer when we have data to send, but a 20699 * 0-byte window. This makes sure the persist timer is set 20700 * even if the packet hits one of the "goto send" lines 20701 * below. 20702 */ 20703 len = 0; 20704 if ((tp->snd_wnd == 0) && 20705 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20706 (tp->snd_una == tp->snd_max) && 20707 (sb_offset < (int)sbavail(sb))) { 20708 rack_enter_persist(tp, rack, cts, tp->snd_una); 20709 } 20710 } else if ((rsm == NULL) && 20711 (doing_tlp == 0) && 20712 (len < pace_max_seg)) { 20713 /* 20714 * We are not sending a maximum sized segment for 20715 * some reason. Should we not send anything (think 20716 * sws or persists)? 20717 */ 20718 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20719 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20720 (len < minseg) && 20721 (len < (int)(sbavail(sb) - sb_offset))) { 20722 /* 20723 * Here the rwnd is less than 20724 * the minimum pacing size, this is not a retransmit, 20725 * we are established and 20726 * the send is not the last in the socket buffer 20727 * we send nothing, and we may enter persists 20728 * if nothing is outstanding. 20729 */ 20730 len = 0; 20731 if (tp->snd_max == tp->snd_una) { 20732 /* 20733 * Nothing out we can 20734 * go into persists. 20735 */ 20736 rack_enter_persist(tp, rack, cts, tp->snd_una); 20737 } 20738 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 20739 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20740 (len < (int)(sbavail(sb) - sb_offset)) && 20741 (len < minseg)) { 20742 /* 20743 * Here we are not retransmitting, and 20744 * the cwnd is not so small that we could 20745 * not send at least a min size (rxt timer 20746 * not having gone off), We have 2 segments or 20747 * more already in flight, its not the tail end 20748 * of the socket buffer and the cwnd is blocking 20749 * us from sending out a minimum pacing segment size. 20750 * Lets not send anything. 20751 */ 20752 len = 0; 20753 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 20754 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20755 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20756 (len < (int)(sbavail(sb) - sb_offset)) && 20757 (TCPS_HAVEESTABLISHED(tp->t_state))) { 20758 /* 20759 * Here we have a send window but we have 20760 * filled it up and we can't send another pacing segment. 20761 * We also have in flight more than 2 segments 20762 * and we are not completing the sb i.e. we allow 20763 * the last bytes of the sb to go out even if 20764 * its not a full pacing segment. 20765 */ 20766 len = 0; 20767 } else if ((rack->r_ctl.crte != NULL) && 20768 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 20769 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 20770 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 20771 (len < (int)(sbavail(sb) - sb_offset))) { 20772 /* 20773 * Here we are doing hardware pacing, this is not a TLP, 20774 * we are not sending a pace max segment size, there is rwnd 20775 * room to send at least N pace_max_seg, the cwnd is greater 20776 * than or equal to a full pacing segments plus 4 mss and we have 2 or 20777 * more segments in flight and its not the tail of the socket buffer. 20778 * 20779 * We don't want to send instead we need to get more ack's in to 20780 * allow us to send a full pacing segment. Normally, if we are pacing 20781 * about the right speed, we should have finished our pacing 20782 * send as most of the acks have come back if we are at the 20783 * right rate. This is a bit fuzzy since return path delay 20784 * can delay the acks, which is why we want to make sure we 20785 * have cwnd space to have a bit more than a max pace segments in flight. 20786 * 20787 * If we have not gotten our acks back we are pacing at too high a 20788 * rate delaying will not hurt and will bring our GP estimate down by 20789 * injecting the delay. If we don't do this we will send 20790 * 2 MSS out in response to the acks being clocked in which 20791 * defeats the point of hw-pacing (i.e. to help us get 20792 * larger TSO's out). 20793 */ 20794 len = 0; 20795 } 20796 20797 } 20798 /* len will be >= 0 after this point. */ 20799 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 20800 rack_sndbuf_autoscale(rack); 20801 /* 20802 * Decide if we can use TCP Segmentation Offloading (if supported by 20803 * hardware). 20804 * 20805 * TSO may only be used if we are in a pure bulk sending state. The 20806 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 20807 * options prevent using TSO. With TSO the TCP header is the same 20808 * (except for the sequence number) for all generated packets. This 20809 * makes it impossible to transmit any options which vary per 20810 * generated segment or packet. 20811 * 20812 * IPv4 handling has a clear separation of ip options and ip header 20813 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 20814 * the right thing below to provide length of just ip options and thus 20815 * checking for ipoptlen is enough to decide if ip options are present. 20816 */ 20817 ipoptlen = 0; 20818 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20819 /* 20820 * Pre-calculate here as we save another lookup into the darknesses 20821 * of IPsec that way and can actually decide if TSO is ok. 20822 */ 20823 #ifdef INET6 20824 if (isipv6 && IPSEC_ENABLED(ipv6)) 20825 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 20826 #ifdef INET 20827 else 20828 #endif 20829 #endif /* INET6 */ 20830 #ifdef INET 20831 if (IPSEC_ENABLED(ipv4)) 20832 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 20833 #endif /* INET */ 20834 #endif 20835 20836 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20837 ipoptlen += ipsec_optlen; 20838 #endif 20839 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 20840 (tp->t_port == 0) && 20841 ((tp->t_flags & TF_SIGNATURE) == 0) && 20842 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 20843 ipoptlen == 0) 20844 tso = 1; 20845 { 20846 uint32_t outstanding __unused; 20847 20848 outstanding = tp->snd_max - tp->snd_una; 20849 if (tp->t_flags & TF_SENTFIN) { 20850 /* 20851 * If we sent a fin, snd_max is 1 higher than 20852 * snd_una 20853 */ 20854 outstanding--; 20855 } 20856 if (sack_rxmit) { 20857 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 20858 flags &= ~TH_FIN; 20859 } else { 20860 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 20861 sbused(sb))) 20862 flags &= ~TH_FIN; 20863 } 20864 } 20865 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 20866 (long)TCP_MAXWIN << tp->rcv_scale); 20867 20868 /* 20869 * Sender silly window avoidance. We transmit under the following 20870 * conditions when len is non-zero: 20871 * 20872 * - We have a full segment (or more with TSO) - This is the last 20873 * buffer in a write()/send() and we are either idle or running 20874 * NODELAY - we've timed out (e.g. persist timer) - we have more 20875 * then 1/2 the maximum send window's worth of data (receiver may be 20876 * limited the window size) - we need to retransmit 20877 */ 20878 if (len) { 20879 if (len >= segsiz) { 20880 goto send; 20881 } 20882 /* 20883 * NOTE! on localhost connections an 'ack' from the remote 20884 * end may occur synchronously with the output and cause us 20885 * to flush a buffer queued with moretocome. XXX 20886 * 20887 */ 20888 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 20889 (idle || (tp->t_flags & TF_NODELAY)) && 20890 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20891 (tp->t_flags & TF_NOPUSH) == 0) { 20892 pass = 2; 20893 goto send; 20894 } 20895 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 20896 pass = 22; 20897 goto send; 20898 } 20899 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 20900 pass = 4; 20901 goto send; 20902 } 20903 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 20904 pass = 5; 20905 goto send; 20906 } 20907 if (sack_rxmit) { 20908 pass = 6; 20909 goto send; 20910 } 20911 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 20912 (ctf_outstanding(tp) < (segsiz * 2))) { 20913 /* 20914 * We have less than two MSS outstanding (delayed ack) 20915 * and our rwnd will not let us send a full sized 20916 * MSS. Lets go ahead and let this small segment 20917 * out because we want to try to have at least two 20918 * packets inflight to not be caught by delayed ack. 20919 */ 20920 pass = 12; 20921 goto send; 20922 } 20923 } 20924 /* 20925 * Sending of standalone window updates. 20926 * 20927 * Window updates are important when we close our window due to a 20928 * full socket buffer and are opening it again after the application 20929 * reads data from it. Once the window has opened again and the 20930 * remote end starts to send again the ACK clock takes over and 20931 * provides the most current window information. 20932 * 20933 * We must avoid the silly window syndrome whereas every read from 20934 * the receive buffer, no matter how small, causes a window update 20935 * to be sent. We also should avoid sending a flurry of window 20936 * updates when the socket buffer had queued a lot of data and the 20937 * application is doing small reads. 20938 * 20939 * Prevent a flurry of pointless window updates by only sending an 20940 * update when we can increase the advertized window by more than 20941 * 1/4th of the socket buffer capacity. When the buffer is getting 20942 * full or is very small be more aggressive and send an update 20943 * whenever we can increase by two mss sized segments. In all other 20944 * situations the ACK's to new incoming data will carry further 20945 * window increases. 20946 * 20947 * Don't send an independent window update if a delayed ACK is 20948 * pending (it will get piggy-backed on it) or the remote side 20949 * already has done a half-close and won't send more data. Skip 20950 * this if the connection is in T/TCP half-open state. 20951 */ 20952 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 20953 !(tp->t_flags & TF_DELACK) && 20954 !TCPS_HAVERCVDFIN(tp->t_state)) { 20955 /* 20956 * "adv" is the amount we could increase the window, taking 20957 * into account that we are limited by TCP_MAXWIN << 20958 * tp->rcv_scale. 20959 */ 20960 int32_t adv; 20961 int oldwin; 20962 20963 adv = recwin; 20964 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 20965 oldwin = (tp->rcv_adv - tp->rcv_nxt); 20966 if (adv > oldwin) 20967 adv -= oldwin; 20968 else { 20969 /* We can't increase the window */ 20970 adv = 0; 20971 } 20972 } else 20973 oldwin = 0; 20974 20975 /* 20976 * If the new window size ends up being the same as or less 20977 * than the old size when it is scaled, then don't force 20978 * a window update. 20979 */ 20980 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 20981 goto dontupdate; 20982 20983 if (adv >= (int32_t)(2 * segsiz) && 20984 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 20985 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 20986 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 20987 pass = 7; 20988 goto send; 20989 } 20990 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 20991 pass = 23; 20992 goto send; 20993 } 20994 } 20995 dontupdate: 20996 20997 /* 20998 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 20999 * is also a catch-all for the retransmit timer timeout case. 21000 */ 21001 if (tp->t_flags & TF_ACKNOW) { 21002 pass = 8; 21003 goto send; 21004 } 21005 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 21006 pass = 9; 21007 goto send; 21008 } 21009 /* 21010 * If our state indicates that FIN should be sent and we have not 21011 * yet done so, then we need to send. 21012 */ 21013 if ((flags & TH_FIN) && 21014 (tp->snd_nxt == tp->snd_una)) { 21015 pass = 11; 21016 goto send; 21017 } 21018 /* 21019 * No reason to send a segment, just return. 21020 */ 21021 just_return: 21022 SOCKBUF_UNLOCK(sb); 21023 just_return_nolock: 21024 { 21025 int app_limited = CTF_JR_SENT_DATA; 21026 21027 if (tot_len_this_send > 0) { 21028 /* Make sure snd_nxt is up to max */ 21029 rack->r_ctl.fsb.recwin = recwin; 21030 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 21031 if ((error == 0) && 21032 rack_use_rfo && 21033 ((flags & (TH_SYN|TH_FIN)) == 0) && 21034 (ipoptlen == 0) && 21035 (tp->snd_nxt == tp->snd_max) && 21036 (tp->rcv_numsacks == 0) && 21037 rack->r_fsb_inited && 21038 TCPS_HAVEESTABLISHED(tp->t_state) && 21039 ((IN_RECOVERY(tp->t_flags)) == 0) && 21040 (rack->r_must_retran == 0) && 21041 ((tp->t_flags & TF_NEEDFIN) == 0) && 21042 (len > 0) && (orig_len > 0) && 21043 (orig_len > len) && 21044 ((orig_len - len) >= segsiz) && 21045 ((optlen == 0) || 21046 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 21047 /* We can send at least one more MSS using our fsb */ 21048 rack_setup_fast_output(tp, rack, sb, len, orig_len, 21049 segsiz, pace_max_seg, hw_tls, flags); 21050 } else 21051 rack->r_fast_output = 0; 21052 21053 21054 rack_log_fsb(rack, tp, so, flags, 21055 ipoptlen, orig_len, len, 0, 21056 1, optlen, __LINE__, 1); 21057 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 21058 tp->snd_nxt = tp->snd_max; 21059 } else { 21060 int end_window = 0; 21061 uint32_t seq = tp->gput_ack; 21062 21063 rsm = tqhash_max(rack->r_ctl.tqh); 21064 if (rsm) { 21065 /* 21066 * Mark the last sent that we just-returned (hinting 21067 * that delayed ack may play a role in any rtt measurement). 21068 */ 21069 rsm->r_just_ret = 1; 21070 } 21071 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 21072 rack->r_ctl.rc_agg_delayed = 0; 21073 rack->r_early = 0; 21074 rack->r_late = 0; 21075 rack->r_ctl.rc_agg_early = 0; 21076 if ((ctf_outstanding(tp) + 21077 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 21078 minseg)) >= tp->snd_wnd) { 21079 /* We are limited by the rwnd */ 21080 app_limited = CTF_JR_RWND_LIMITED; 21081 if (IN_FASTRECOVERY(tp->t_flags)) 21082 rack->r_ctl.rc_prr_sndcnt = 0; 21083 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 21084 /* We are limited by whats available -- app limited */ 21085 app_limited = CTF_JR_APP_LIMITED; 21086 if (IN_FASTRECOVERY(tp->t_flags)) 21087 rack->r_ctl.rc_prr_sndcnt = 0; 21088 } else if ((idle == 0) && 21089 ((tp->t_flags & TF_NODELAY) == 0) && 21090 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 21091 (len < segsiz)) { 21092 /* 21093 * No delay is not on and the 21094 * user is sending less than 1MSS. This 21095 * brings out SWS avoidance so we 21096 * don't send. Another app-limited case. 21097 */ 21098 app_limited = CTF_JR_APP_LIMITED; 21099 } else if (tp->t_flags & TF_NOPUSH) { 21100 /* 21101 * The user has requested no push of 21102 * the last segment and we are 21103 * at the last segment. Another app 21104 * limited case. 21105 */ 21106 app_limited = CTF_JR_APP_LIMITED; 21107 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 21108 /* Its the cwnd */ 21109 app_limited = CTF_JR_CWND_LIMITED; 21110 } else if (IN_FASTRECOVERY(tp->t_flags) && 21111 (rack->rack_no_prr == 0) && 21112 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 21113 app_limited = CTF_JR_PRR; 21114 } else { 21115 /* Now why here are we not sending? */ 21116 #ifdef NOW 21117 #ifdef INVARIANTS 21118 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 21119 #endif 21120 #endif 21121 app_limited = CTF_JR_ASSESSING; 21122 } 21123 /* 21124 * App limited in some fashion, for our pacing GP 21125 * measurements we don't want any gap (even cwnd). 21126 * Close down the measurement window. 21127 */ 21128 if (rack_cwnd_block_ends_measure && 21129 ((app_limited == CTF_JR_CWND_LIMITED) || 21130 (app_limited == CTF_JR_PRR))) { 21131 /* 21132 * The reason we are not sending is 21133 * the cwnd (or prr). We have been configured 21134 * to end the measurement window in 21135 * this case. 21136 */ 21137 end_window = 1; 21138 } else if (rack_rwnd_block_ends_measure && 21139 (app_limited == CTF_JR_RWND_LIMITED)) { 21140 /* 21141 * We are rwnd limited and have been 21142 * configured to end the measurement 21143 * window in this case. 21144 */ 21145 end_window = 1; 21146 } else if (app_limited == CTF_JR_APP_LIMITED) { 21147 /* 21148 * A true application limited period, we have 21149 * ran out of data. 21150 */ 21151 end_window = 1; 21152 } else if (app_limited == CTF_JR_ASSESSING) { 21153 /* 21154 * In the assessing case we hit the end of 21155 * the if/else and had no known reason 21156 * This will panic us under invariants.. 21157 * 21158 * If we get this out in logs we need to 21159 * investagate which reason we missed. 21160 */ 21161 end_window = 1; 21162 } 21163 if (end_window) { 21164 uint8_t log = 0; 21165 21166 /* Adjust the Gput measurement */ 21167 if ((tp->t_flags & TF_GPUTINPROG) && 21168 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21169 tp->gput_ack = tp->snd_max; 21170 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21171 /* 21172 * There is not enough to measure. 21173 */ 21174 tp->t_flags &= ~TF_GPUTINPROG; 21175 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 21176 rack->r_ctl.rc_gp_srtt /*flex1*/, 21177 tp->gput_seq, 21178 0, 0, 18, __LINE__, NULL, 0); 21179 } else 21180 log = 1; 21181 } 21182 /* Mark the last packet has app limited */ 21183 rsm = tqhash_max(rack->r_ctl.tqh); 21184 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 21185 if (rack->r_ctl.rc_app_limited_cnt == 0) 21186 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 21187 else { 21188 /* 21189 * Go out to the end app limited and mark 21190 * this new one as next and move the end_appl up 21191 * to this guy. 21192 */ 21193 if (rack->r_ctl.rc_end_appl) 21194 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 21195 rack->r_ctl.rc_end_appl = rsm; 21196 } 21197 rsm->r_flags |= RACK_APP_LIMITED; 21198 rack->r_ctl.rc_app_limited_cnt++; 21199 } 21200 if (log) 21201 rack_log_pacing_delay_calc(rack, 21202 rack->r_ctl.rc_app_limited_cnt, seq, 21203 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 21204 } 21205 } 21206 /* Check if we need to go into persists or not */ 21207 if ((tp->snd_max == tp->snd_una) && 21208 TCPS_HAVEESTABLISHED(tp->t_state) && 21209 sbavail(sb) && 21210 (sbavail(sb) > tp->snd_wnd) && 21211 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 21212 /* Yes lets make sure to move to persist before timer-start */ 21213 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 21214 } 21215 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 21216 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 21217 } 21218 #ifdef NETFLIX_SHARED_CWND 21219 if ((sbavail(sb) == 0) && 21220 rack->r_ctl.rc_scw) { 21221 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21222 rack->rack_scwnd_is_idle = 1; 21223 } 21224 #endif 21225 #ifdef TCP_ACCOUNTING 21226 if (tot_len_this_send > 0) { 21227 crtsc = get_cyclecount(); 21228 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21229 tp->tcp_cnt_counters[SND_OUT_DATA]++; 21230 } 21231 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21232 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21233 } 21234 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21235 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 21236 } 21237 } else { 21238 crtsc = get_cyclecount(); 21239 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21240 tp->tcp_cnt_counters[SND_LIMITED]++; 21241 } 21242 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21243 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 21244 } 21245 } 21246 sched_unpin(); 21247 #endif 21248 return (0); 21249 21250 send: 21251 if ((rack->r_ctl.crte != NULL) && 21252 (rsm == NULL) && 21253 ((rack->rc_hw_nobuf == 1) || 21254 (rack_hw_check_queue && (check_done == 0)))) { 21255 /* 21256 * We only want to do this once with the hw_check_queue, 21257 * for the enobuf case we would only do it once if 21258 * we come around to again, the flag will be clear. 21259 */ 21260 check_done = 1; 21261 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 21262 if (slot) { 21263 rack->r_ctl.rc_agg_delayed = 0; 21264 rack->r_ctl.rc_agg_early = 0; 21265 rack->r_early = 0; 21266 rack->r_late = 0; 21267 SOCKBUF_UNLOCK(&so->so_snd); 21268 goto skip_all_send; 21269 } 21270 } 21271 if (rsm || sack_rxmit) 21272 counter_u64_add(rack_nfto_resend, 1); 21273 else 21274 counter_u64_add(rack_non_fto_send, 1); 21275 if ((flags & TH_FIN) && 21276 sbavail(sb)) { 21277 /* 21278 * We do not transmit a FIN 21279 * with data outstanding. We 21280 * need to make it so all data 21281 * is acked first. 21282 */ 21283 flags &= ~TH_FIN; 21284 } 21285 /* Enforce stack imposed max seg size if we have one */ 21286 if (rack->r_ctl.rc_pace_max_segs && 21287 (len > rack->r_ctl.rc_pace_max_segs)) { 21288 mark = 1; 21289 len = rack->r_ctl.rc_pace_max_segs; 21290 } 21291 SOCKBUF_LOCK_ASSERT(sb); 21292 if (len > 0) { 21293 if (len >= segsiz) 21294 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 21295 else 21296 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 21297 } 21298 /* 21299 * Before ESTABLISHED, force sending of initial options unless TCP 21300 * set not to do any options. NOTE: we assume that the IP/TCP header 21301 * plus TCP options always fit in a single mbuf, leaving room for a 21302 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 21303 * + optlen <= MCLBYTES 21304 */ 21305 optlen = 0; 21306 #ifdef INET6 21307 if (isipv6) 21308 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 21309 else 21310 #endif 21311 hdrlen = sizeof(struct tcpiphdr); 21312 21313 /* 21314 * Compute options for segment. We only have to care about SYN and 21315 * established connection segments. Options for SYN-ACK segments 21316 * are handled in TCP syncache. 21317 */ 21318 to.to_flags = 0; 21319 if ((tp->t_flags & TF_NOOPT) == 0) { 21320 /* Maximum segment size. */ 21321 if (flags & TH_SYN) { 21322 tp->snd_nxt = tp->iss; 21323 to.to_mss = tcp_mssopt(&inp->inp_inc); 21324 if (tp->t_port) 21325 to.to_mss -= V_tcp_udp_tunneling_overhead; 21326 to.to_flags |= TOF_MSS; 21327 21328 /* 21329 * On SYN or SYN|ACK transmits on TFO connections, 21330 * only include the TFO option if it is not a 21331 * retransmit, as the presence of the TFO option may 21332 * have caused the original SYN or SYN|ACK to have 21333 * been dropped by a middlebox. 21334 */ 21335 if (IS_FASTOPEN(tp->t_flags) && 21336 (tp->t_rxtshift == 0)) { 21337 if (tp->t_state == TCPS_SYN_RECEIVED) { 21338 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 21339 to.to_tfo_cookie = 21340 (u_int8_t *)&tp->t_tfo_cookie.server; 21341 to.to_flags |= TOF_FASTOPEN; 21342 wanted_cookie = 1; 21343 } else if (tp->t_state == TCPS_SYN_SENT) { 21344 to.to_tfo_len = 21345 tp->t_tfo_client_cookie_len; 21346 to.to_tfo_cookie = 21347 tp->t_tfo_cookie.client; 21348 to.to_flags |= TOF_FASTOPEN; 21349 wanted_cookie = 1; 21350 /* 21351 * If we wind up having more data to 21352 * send with the SYN than can fit in 21353 * one segment, don't send any more 21354 * until the SYN|ACK comes back from 21355 * the other end. 21356 */ 21357 sendalot = 0; 21358 } 21359 } 21360 } 21361 /* Window scaling. */ 21362 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 21363 to.to_wscale = tp->request_r_scale; 21364 to.to_flags |= TOF_SCALE; 21365 } 21366 /* Timestamps. */ 21367 if ((tp->t_flags & TF_RCVD_TSTMP) || 21368 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 21369 to.to_tsval = ms_cts + tp->ts_offset; 21370 to.to_tsecr = tp->ts_recent; 21371 to.to_flags |= TOF_TS; 21372 } 21373 /* Set receive buffer autosizing timestamp. */ 21374 if (tp->rfbuf_ts == 0 && 21375 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 21376 tp->rfbuf_ts = tcp_ts_getticks(); 21377 /* Selective ACK's. */ 21378 if (tp->t_flags & TF_SACK_PERMIT) { 21379 if (flags & TH_SYN) 21380 to.to_flags |= TOF_SACKPERM; 21381 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 21382 tp->rcv_numsacks > 0) { 21383 to.to_flags |= TOF_SACK; 21384 to.to_nsacks = tp->rcv_numsacks; 21385 to.to_sacks = (u_char *)tp->sackblks; 21386 } 21387 } 21388 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21389 /* TCP-MD5 (RFC2385). */ 21390 if (tp->t_flags & TF_SIGNATURE) 21391 to.to_flags |= TOF_SIGNATURE; 21392 #endif 21393 21394 /* Processing the options. */ 21395 hdrlen += optlen = tcp_addoptions(&to, opt); 21396 /* 21397 * If we wanted a TFO option to be added, but it was unable 21398 * to fit, ensure no data is sent. 21399 */ 21400 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 21401 !(to.to_flags & TOF_FASTOPEN)) 21402 len = 0; 21403 } 21404 if (tp->t_port) { 21405 if (V_tcp_udp_tunneling_port == 0) { 21406 /* The port was removed?? */ 21407 SOCKBUF_UNLOCK(&so->so_snd); 21408 #ifdef TCP_ACCOUNTING 21409 crtsc = get_cyclecount(); 21410 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21411 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 21412 } 21413 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21414 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 21415 } 21416 sched_unpin(); 21417 #endif 21418 return (EHOSTUNREACH); 21419 } 21420 hdrlen += sizeof(struct udphdr); 21421 } 21422 #ifdef INET6 21423 if (isipv6) 21424 ipoptlen = ip6_optlen(inp); 21425 else 21426 #endif 21427 if (inp->inp_options) 21428 ipoptlen = inp->inp_options->m_len - 21429 offsetof(struct ipoption, ipopt_list); 21430 else 21431 ipoptlen = 0; 21432 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21433 ipoptlen += ipsec_optlen; 21434 #endif 21435 21436 /* 21437 * Adjust data length if insertion of options will bump the packet 21438 * length beyond the t_maxseg length. Clear the FIN bit because we 21439 * cut off the tail of the segment. 21440 */ 21441 if (len + optlen + ipoptlen > tp->t_maxseg) { 21442 if (tso) { 21443 uint32_t if_hw_tsomax; 21444 uint32_t moff; 21445 int32_t max_len; 21446 21447 /* extract TSO information */ 21448 if_hw_tsomax = tp->t_tsomax; 21449 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 21450 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 21451 KASSERT(ipoptlen == 0, 21452 ("%s: TSO can't do IP options", __func__)); 21453 21454 /* 21455 * Check if we should limit by maximum payload 21456 * length: 21457 */ 21458 if (if_hw_tsomax != 0) { 21459 /* compute maximum TSO length */ 21460 max_len = (if_hw_tsomax - hdrlen - 21461 max_linkhdr); 21462 if (max_len <= 0) { 21463 len = 0; 21464 } else if (len > max_len) { 21465 sendalot = 1; 21466 len = max_len; 21467 mark = 2; 21468 } 21469 } 21470 /* 21471 * Prevent the last segment from being fractional 21472 * unless the send sockbuf can be emptied: 21473 */ 21474 max_len = (tp->t_maxseg - optlen); 21475 if ((sb_offset + len) < sbavail(sb)) { 21476 moff = len % (u_int)max_len; 21477 if (moff != 0) { 21478 mark = 3; 21479 len -= moff; 21480 } 21481 } 21482 /* 21483 * In case there are too many small fragments don't 21484 * use TSO: 21485 */ 21486 if (len <= max_len) { 21487 mark = 4; 21488 tso = 0; 21489 } 21490 /* 21491 * Send the FIN in a separate segment after the bulk 21492 * sending is done. We don't trust the TSO 21493 * implementations to clear the FIN flag on all but 21494 * the last segment. 21495 */ 21496 if (tp->t_flags & TF_NEEDFIN) { 21497 sendalot = 4; 21498 } 21499 } else { 21500 mark = 5; 21501 if (optlen + ipoptlen >= tp->t_maxseg) { 21502 /* 21503 * Since we don't have enough space to put 21504 * the IP header chain and the TCP header in 21505 * one packet as required by RFC 7112, don't 21506 * send it. Also ensure that at least one 21507 * byte of the payload can be put into the 21508 * TCP segment. 21509 */ 21510 SOCKBUF_UNLOCK(&so->so_snd); 21511 error = EMSGSIZE; 21512 sack_rxmit = 0; 21513 goto out; 21514 } 21515 len = tp->t_maxseg - optlen - ipoptlen; 21516 sendalot = 5; 21517 } 21518 } else { 21519 tso = 0; 21520 mark = 6; 21521 } 21522 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 21523 ("%s: len > IP_MAXPACKET", __func__)); 21524 #ifdef DIAGNOSTIC 21525 #ifdef INET6 21526 if (max_linkhdr + hdrlen > MCLBYTES) 21527 #else 21528 if (max_linkhdr + hdrlen > MHLEN) 21529 #endif 21530 panic("tcphdr too big"); 21531 #endif 21532 21533 /* 21534 * This KASSERT is here to catch edge cases at a well defined place. 21535 * Before, those had triggered (random) panic conditions further 21536 * down. 21537 */ 21538 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21539 if ((len == 0) && 21540 (flags & TH_FIN) && 21541 (sbused(sb))) { 21542 /* 21543 * We have outstanding data, don't send a fin by itself!. 21544 */ 21545 goto just_return; 21546 } 21547 /* 21548 * Grab a header mbuf, attaching a copy of data to be transmitted, 21549 * and initialize the header from the template for sends on this 21550 * connection. 21551 */ 21552 hw_tls = tp->t_nic_ktls_xmit != 0; 21553 if (len) { 21554 uint32_t max_val; 21555 uint32_t moff; 21556 21557 if (rack->r_ctl.rc_pace_max_segs) 21558 max_val = rack->r_ctl.rc_pace_max_segs; 21559 else if (rack->rc_user_set_max_segs) 21560 max_val = rack->rc_user_set_max_segs * segsiz; 21561 else 21562 max_val = len; 21563 /* 21564 * We allow a limit on sending with hptsi. 21565 */ 21566 if (len > max_val) { 21567 mark = 7; 21568 len = max_val; 21569 } 21570 #ifdef INET6 21571 if (MHLEN < hdrlen + max_linkhdr) 21572 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 21573 else 21574 #endif 21575 m = m_gethdr(M_NOWAIT, MT_DATA); 21576 21577 if (m == NULL) { 21578 SOCKBUF_UNLOCK(sb); 21579 error = ENOBUFS; 21580 sack_rxmit = 0; 21581 goto out; 21582 } 21583 m->m_data += max_linkhdr; 21584 m->m_len = hdrlen; 21585 21586 /* 21587 * Start the m_copy functions from the closest mbuf to the 21588 * sb_offset in the socket buffer chain. 21589 */ 21590 mb = sbsndptr_noadv(sb, sb_offset, &moff); 21591 s_mb = mb; 21592 s_moff = moff; 21593 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 21594 m_copydata(mb, moff, (int)len, 21595 mtod(m, caddr_t)+hdrlen); 21596 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 21597 sbsndptr_adv(sb, mb, len); 21598 m->m_len += len; 21599 } else { 21600 struct sockbuf *msb; 21601 21602 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 21603 msb = NULL; 21604 else 21605 msb = sb; 21606 m->m_next = tcp_m_copym( 21607 mb, moff, &len, 21608 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 21609 ((rsm == NULL) ? hw_tls : 0) 21610 #ifdef NETFLIX_COPY_ARGS 21611 , &s_mb, &s_moff 21612 #endif 21613 ); 21614 if (len <= (tp->t_maxseg - optlen)) { 21615 /* 21616 * Must have ran out of mbufs for the copy 21617 * shorten it to no longer need tso. Lets 21618 * not put on sendalot since we are low on 21619 * mbufs. 21620 */ 21621 tso = 0; 21622 } 21623 if (m->m_next == NULL) { 21624 SOCKBUF_UNLOCK(sb); 21625 (void)m_free(m); 21626 error = ENOBUFS; 21627 sack_rxmit = 0; 21628 goto out; 21629 } 21630 } 21631 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 21632 if (rsm && (rsm->r_flags & RACK_TLP)) { 21633 /* 21634 * TLP should not count in retran count, but 21635 * in its own bin 21636 */ 21637 counter_u64_add(rack_tlp_retran, 1); 21638 counter_u64_add(rack_tlp_retran_bytes, len); 21639 } else { 21640 tp->t_sndrexmitpack++; 21641 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 21642 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 21643 } 21644 #ifdef STATS 21645 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 21646 len); 21647 #endif 21648 } else { 21649 KMOD_TCPSTAT_INC(tcps_sndpack); 21650 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 21651 #ifdef STATS 21652 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 21653 len); 21654 #endif 21655 } 21656 /* 21657 * If we're sending everything we've got, set PUSH. (This 21658 * will keep happy those implementations which only give 21659 * data to the user when a buffer fills or a PUSH comes in.) 21660 */ 21661 if (sb_offset + len == sbused(sb) && 21662 sbused(sb) && 21663 !(flags & TH_SYN)) { 21664 flags |= TH_PUSH; 21665 add_flag |= RACK_HAD_PUSH; 21666 } 21667 21668 SOCKBUF_UNLOCK(sb); 21669 } else { 21670 SOCKBUF_UNLOCK(sb); 21671 if (tp->t_flags & TF_ACKNOW) 21672 KMOD_TCPSTAT_INC(tcps_sndacks); 21673 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 21674 KMOD_TCPSTAT_INC(tcps_sndctrl); 21675 else 21676 KMOD_TCPSTAT_INC(tcps_sndwinup); 21677 21678 m = m_gethdr(M_NOWAIT, MT_DATA); 21679 if (m == NULL) { 21680 error = ENOBUFS; 21681 sack_rxmit = 0; 21682 goto out; 21683 } 21684 #ifdef INET6 21685 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 21686 MHLEN >= hdrlen) { 21687 M_ALIGN(m, hdrlen); 21688 } else 21689 #endif 21690 m->m_data += max_linkhdr; 21691 m->m_len = hdrlen; 21692 } 21693 SOCKBUF_UNLOCK_ASSERT(sb); 21694 m->m_pkthdr.rcvif = (struct ifnet *)0; 21695 #ifdef MAC 21696 mac_inpcb_create_mbuf(inp, m); 21697 #endif 21698 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21699 #ifdef INET6 21700 if (isipv6) 21701 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 21702 else 21703 #endif /* INET6 */ 21704 #ifdef INET 21705 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 21706 #endif 21707 th = rack->r_ctl.fsb.th; 21708 udp = rack->r_ctl.fsb.udp; 21709 if (udp) { 21710 #ifdef INET6 21711 if (isipv6) 21712 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21713 else 21714 #endif /* INET6 */ 21715 ulen = hdrlen + len - sizeof(struct ip); 21716 udp->uh_ulen = htons(ulen); 21717 } 21718 } else { 21719 #ifdef INET6 21720 if (isipv6) { 21721 ip6 = mtod(m, struct ip6_hdr *); 21722 if (tp->t_port) { 21723 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 21724 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21725 udp->uh_dport = tp->t_port; 21726 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21727 udp->uh_ulen = htons(ulen); 21728 th = (struct tcphdr *)(udp + 1); 21729 } else 21730 th = (struct tcphdr *)(ip6 + 1); 21731 tcpip_fillheaders(inp, tp->t_port, ip6, th); 21732 } else 21733 #endif /* INET6 */ 21734 { 21735 #ifdef INET 21736 ip = mtod(m, struct ip *); 21737 if (tp->t_port) { 21738 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 21739 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21740 udp->uh_dport = tp->t_port; 21741 ulen = hdrlen + len - sizeof(struct ip); 21742 udp->uh_ulen = htons(ulen); 21743 th = (struct tcphdr *)(udp + 1); 21744 } else 21745 th = (struct tcphdr *)(ip + 1); 21746 tcpip_fillheaders(inp, tp->t_port, ip, th); 21747 #endif 21748 } 21749 } 21750 /* 21751 * Fill in fields, remembering maximum advertised window for use in 21752 * delaying messages about window sizes. If resending a FIN, be sure 21753 * not to use a new sequence number. 21754 */ 21755 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 21756 tp->snd_nxt == tp->snd_max) 21757 tp->snd_nxt--; 21758 /* 21759 * If we are starting a connection, send ECN setup SYN packet. If we 21760 * are on a retransmit, we may resend those bits a number of times 21761 * as per RFC 3168. 21762 */ 21763 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 21764 flags |= tcp_ecn_output_syn_sent(tp); 21765 } 21766 /* Also handle parallel SYN for ECN */ 21767 if (TCPS_HAVERCVDSYN(tp->t_state) && 21768 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 21769 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 21770 if ((tp->t_state == TCPS_SYN_RECEIVED) && 21771 (tp->t_flags2 & TF2_ECN_SND_ECE)) 21772 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 21773 #ifdef INET6 21774 if (isipv6) { 21775 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 21776 ip6->ip6_flow |= htonl(ect << 20); 21777 } 21778 else 21779 #endif 21780 { 21781 #ifdef INET 21782 ip->ip_tos &= ~IPTOS_ECN_MASK; 21783 ip->ip_tos |= ect; 21784 #endif 21785 } 21786 } 21787 /* 21788 * If we are doing retransmissions, then snd_nxt will not reflect 21789 * the first unsent octet. For ACK only packets, we do not want the 21790 * sequence number of the retransmitted packet, we want the sequence 21791 * number of the next unsent octet. So, if there is no data (and no 21792 * SYN or FIN), use snd_max instead of snd_nxt when filling in 21793 * ti_seq. But if we are in persist state, snd_max might reflect 21794 * one byte beyond the right edge of the window, so use snd_nxt in 21795 * that case, since we know we aren't doing a retransmission. 21796 * (retransmit and persist are mutually exclusive...) 21797 */ 21798 if (sack_rxmit == 0) { 21799 if (len || (flags & (TH_SYN | TH_FIN))) { 21800 th->th_seq = htonl(tp->snd_nxt); 21801 rack_seq = tp->snd_nxt; 21802 } else { 21803 th->th_seq = htonl(tp->snd_max); 21804 rack_seq = tp->snd_max; 21805 } 21806 } else { 21807 th->th_seq = htonl(rsm->r_start); 21808 rack_seq = rsm->r_start; 21809 } 21810 th->th_ack = htonl(tp->rcv_nxt); 21811 tcp_set_flags(th, flags); 21812 /* 21813 * Calculate receive window. Don't shrink window, but avoid silly 21814 * window syndrome. 21815 * If a RST segment is sent, advertise a window of zero. 21816 */ 21817 if (flags & TH_RST) { 21818 recwin = 0; 21819 } else { 21820 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 21821 recwin < (long)segsiz) { 21822 recwin = 0; 21823 } 21824 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 21825 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 21826 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 21827 } 21828 21829 /* 21830 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 21831 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 21832 * handled in syncache. 21833 */ 21834 if (flags & TH_SYN) 21835 th->th_win = htons((u_short) 21836 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 21837 else { 21838 /* Avoid shrinking window with window scaling. */ 21839 recwin = roundup2(recwin, 1 << tp->rcv_scale); 21840 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 21841 } 21842 /* 21843 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 21844 * window. This may cause the remote transmitter to stall. This 21845 * flag tells soreceive() to disable delayed acknowledgements when 21846 * draining the buffer. This can occur if the receiver is 21847 * attempting to read more data than can be buffered prior to 21848 * transmitting on the connection. 21849 */ 21850 if (th->th_win == 0) { 21851 tp->t_sndzerowin++; 21852 tp->t_flags |= TF_RXWIN0SENT; 21853 } else 21854 tp->t_flags &= ~TF_RXWIN0SENT; 21855 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 21856 /* Now are we using fsb?, if so copy the template data to the mbuf */ 21857 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21858 uint8_t *cpto; 21859 21860 cpto = mtod(m, uint8_t *); 21861 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 21862 /* 21863 * We have just copied in: 21864 * IP/IP6 21865 * <optional udphdr> 21866 * tcphdr (no options) 21867 * 21868 * We need to grab the correct pointers into the mbuf 21869 * for both the tcp header, and possibly the udp header (if tunneling). 21870 * We do this by using the offset in the copy buffer and adding it 21871 * to the mbuf base pointer (cpto). 21872 */ 21873 #ifdef INET6 21874 if (isipv6) 21875 ip6 = mtod(m, struct ip6_hdr *); 21876 else 21877 #endif /* INET6 */ 21878 #ifdef INET 21879 ip = mtod(m, struct ip *); 21880 #endif 21881 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 21882 /* If we have a udp header lets set it into the mbuf as well */ 21883 if (udp) 21884 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 21885 } 21886 if (optlen) { 21887 bcopy(opt, th + 1, optlen); 21888 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 21889 } 21890 /* 21891 * Put TCP length in extended header, and then checksum extended 21892 * header and data. 21893 */ 21894 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 21895 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21896 if (to.to_flags & TOF_SIGNATURE) { 21897 /* 21898 * Calculate MD5 signature and put it into the place 21899 * determined before. 21900 * NOTE: since TCP options buffer doesn't point into 21901 * mbuf's data, calculate offset and use it. 21902 */ 21903 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 21904 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 21905 /* 21906 * Do not send segment if the calculation of MD5 21907 * digest has failed. 21908 */ 21909 goto out; 21910 } 21911 } 21912 #endif 21913 #ifdef INET6 21914 if (isipv6) { 21915 /* 21916 * ip6_plen is not need to be filled now, and will be filled 21917 * in ip6_output. 21918 */ 21919 if (tp->t_port) { 21920 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 21921 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21922 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 21923 th->th_sum = htons(0); 21924 UDPSTAT_INC(udps_opackets); 21925 } else { 21926 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 21927 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21928 th->th_sum = in6_cksum_pseudo(ip6, 21929 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 21930 0); 21931 } 21932 } 21933 #endif 21934 #if defined(INET6) && defined(INET) 21935 else 21936 #endif 21937 #ifdef INET 21938 { 21939 if (tp->t_port) { 21940 m->m_pkthdr.csum_flags = CSUM_UDP; 21941 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21942 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 21943 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 21944 th->th_sum = htons(0); 21945 UDPSTAT_INC(udps_opackets); 21946 } else { 21947 m->m_pkthdr.csum_flags = CSUM_TCP; 21948 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21949 th->th_sum = in_pseudo(ip->ip_src.s_addr, 21950 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 21951 IPPROTO_TCP + len + optlen)); 21952 } 21953 /* IP version must be set here for ipv4/ipv6 checking later */ 21954 KASSERT(ip->ip_v == IPVERSION, 21955 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 21956 } 21957 #endif 21958 /* 21959 * Enable TSO and specify the size of the segments. The TCP pseudo 21960 * header checksum is always provided. XXX: Fixme: This is currently 21961 * not the case for IPv6. 21962 */ 21963 if (tso) { 21964 /* 21965 * Here we must use t_maxseg and the optlen since 21966 * the optlen may include SACK's (or DSACK). 21967 */ 21968 KASSERT(len > tp->t_maxseg - optlen, 21969 ("%s: len <= tso_segsz", __func__)); 21970 m->m_pkthdr.csum_flags |= CSUM_TSO; 21971 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 21972 } 21973 KASSERT(len + hdrlen == m_length(m, NULL), 21974 ("%s: mbuf chain different than expected: %d + %u != %u", 21975 __func__, len, hdrlen, m_length(m, NULL))); 21976 21977 #ifdef TCP_HHOOK 21978 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 21979 hhook_run_tcp_est_out(tp, th, &to, len, tso); 21980 #endif 21981 if ((rack->r_ctl.crte != NULL) && 21982 (rack->rc_hw_nobuf == 0) && 21983 tcp_bblogging_on(tp)) { 21984 rack_log_queue_level(tp, rack, len, &tv, cts); 21985 } 21986 /* We're getting ready to send; log now. */ 21987 if (tcp_bblogging_on(rack->rc_tp)) { 21988 union tcp_log_stackspecific log; 21989 21990 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 21991 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 21992 if (rack->rack_no_prr) 21993 log.u_bbr.flex1 = 0; 21994 else 21995 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 21996 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 21997 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 21998 log.u_bbr.flex4 = orig_len; 21999 /* Save off the early/late values */ 22000 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 22001 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 22002 log.u_bbr.bw_inuse = rack_get_bw(rack); 22003 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 22004 log.u_bbr.flex8 = 0; 22005 if (rsm) { 22006 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 22007 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 22008 counter_u64_add(rack_collapsed_win_rxt, 1); 22009 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 22010 } 22011 if (doing_tlp) 22012 log.u_bbr.flex8 = 2; 22013 else 22014 log.u_bbr.flex8 = 1; 22015 } else { 22016 if (doing_tlp) 22017 log.u_bbr.flex8 = 3; 22018 } 22019 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 22020 log.u_bbr.flex7 = mark; 22021 log.u_bbr.flex7 <<= 8; 22022 log.u_bbr.flex7 |= pass; 22023 log.u_bbr.pkts_out = tp->t_maxseg; 22024 log.u_bbr.timeStamp = cts; 22025 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 22026 if (rsm && (rsm->r_rtr_cnt > 0)) { 22027 /* 22028 * When we have a retransmit we want to log the 22029 * burst at send and flight at send from before. 22030 */ 22031 log.u_bbr.flex5 = rsm->r_fas; 22032 log.u_bbr.bbr_substate = rsm->r_bas; 22033 } else { 22034 /* 22035 * New transmits we log in flex5 the inflight again as 22036 * well as the number of segments in our send in the 22037 * substate field. 22038 */ 22039 log.u_bbr.flex5 = log.u_bbr.inflight; 22040 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 22041 } 22042 log.u_bbr.lt_epoch = cwnd_to_use; 22043 log.u_bbr.delivered = sendalot; 22044 log.u_bbr.rttProp = (uint64_t)rsm; 22045 log.u_bbr.pkt_epoch = __LINE__; 22046 if (rsm) { 22047 log.u_bbr.delRate = rsm->r_flags; 22048 log.u_bbr.delRate <<= 31; 22049 log.u_bbr.delRate |= rack->r_must_retran; 22050 log.u_bbr.delRate <<= 1; 22051 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22052 } else { 22053 log.u_bbr.delRate = rack->r_must_retran; 22054 log.u_bbr.delRate <<= 1; 22055 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22056 } 22057 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 22058 len, &log, false, NULL, __func__, __LINE__, &tv); 22059 } else 22060 lgb = NULL; 22061 22062 /* 22063 * Fill in IP length and desired time to live and send to IP level. 22064 * There should be a better way to handle ttl and tos; we could keep 22065 * them in the template, but need a way to checksum without them. 22066 */ 22067 /* 22068 * m->m_pkthdr.len should have been set before cksum calcuration, 22069 * because in6_cksum() need it. 22070 */ 22071 #ifdef INET6 22072 if (isipv6) { 22073 /* 22074 * we separately set hoplimit for every segment, since the 22075 * user might want to change the value via setsockopt. Also, 22076 * desired default hop limit might be changed via Neighbor 22077 * Discovery. 22078 */ 22079 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 22080 22081 /* 22082 * Set the packet size here for the benefit of DTrace 22083 * probes. ip6_output() will set it properly; it's supposed 22084 * to include the option header lengths as well. 22085 */ 22086 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 22087 22088 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 22089 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22090 else 22091 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22092 22093 if (tp->t_state == TCPS_SYN_SENT) 22094 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 22095 22096 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 22097 /* TODO: IPv6 IP6TOS_ECT bit on */ 22098 error = ip6_output(m, 22099 inp->in6p_outputopts, 22100 &inp->inp_route6, 22101 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 22102 NULL, NULL, inp); 22103 22104 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 22105 mtu = inp->inp_route6.ro_nh->nh_mtu; 22106 } 22107 #endif /* INET6 */ 22108 #if defined(INET) && defined(INET6) 22109 else 22110 #endif 22111 #ifdef INET 22112 { 22113 ip->ip_len = htons(m->m_pkthdr.len); 22114 #ifdef INET6 22115 if (inp->inp_vflag & INP_IPV6PROTO) 22116 ip->ip_ttl = in6_selecthlim(inp, NULL); 22117 #endif /* INET6 */ 22118 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 22119 /* 22120 * If we do path MTU discovery, then we set DF on every 22121 * packet. This might not be the best thing to do according 22122 * to RFC3390 Section 2. However the tcp hostcache migitates 22123 * the problem so it affects only the first tcp connection 22124 * with a host. 22125 * 22126 * NB: Don't set DF on small MTU/MSS to have a safe 22127 * fallback. 22128 */ 22129 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 22130 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22131 if (tp->t_port == 0 || len < V_tcp_minmss) { 22132 ip->ip_off |= htons(IP_DF); 22133 } 22134 } else { 22135 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22136 } 22137 22138 if (tp->t_state == TCPS_SYN_SENT) 22139 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 22140 22141 TCP_PROBE5(send, NULL, tp, ip, tp, th); 22142 22143 error = ip_output(m, 22144 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22145 inp->inp_options, 22146 #else 22147 NULL, 22148 #endif 22149 &inp->inp_route, 22150 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 22151 inp); 22152 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 22153 mtu = inp->inp_route.ro_nh->nh_mtu; 22154 } 22155 #endif /* INET */ 22156 22157 out: 22158 if (lgb) { 22159 lgb->tlb_errno = error; 22160 lgb = NULL; 22161 } 22162 /* 22163 * In transmit state, time the transmission and arrange for the 22164 * retransmit. In persist state, just set snd_max. 22165 */ 22166 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 22167 rack_to_usec_ts(&tv), 22168 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 22169 if (error == 0) { 22170 if (rsm == NULL) { 22171 if (rack->lt_bw_up == 0) { 22172 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 22173 rack->r_ctl.lt_seq = tp->snd_una; 22174 rack->lt_bw_up = 1; 22175 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 22176 /* 22177 * Need to record what we have since we are 22178 * approaching seq wrap. 22179 */ 22180 uint64_t tmark; 22181 22182 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 22183 rack->r_ctl.lt_seq = tp->snd_una; 22184 tmark = tcp_tv_to_lusectick(&tv); 22185 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 22186 rack->r_ctl.lt_timemark = tmark; 22187 } 22188 } 22189 rack->forced_ack = 0; /* If we send something zap the FA flag */ 22190 counter_u64_add(rack_total_bytes, len); 22191 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 22192 if (rsm && doing_tlp) { 22193 rack->rc_last_sent_tlp_past_cumack = 0; 22194 rack->rc_last_sent_tlp_seq_valid = 1; 22195 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 22196 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 22197 } 22198 if (rack->rc_hw_nobuf) { 22199 rack->rc_hw_nobuf = 0; 22200 rack->r_ctl.rc_agg_delayed = 0; 22201 rack->r_early = 0; 22202 rack->r_late = 0; 22203 rack->r_ctl.rc_agg_early = 0; 22204 } 22205 if (rsm && (doing_tlp == 0)) { 22206 /* Set we retransmitted */ 22207 rack->rc_gp_saw_rec = 1; 22208 } else { 22209 if (cwnd_to_use > tp->snd_ssthresh) { 22210 /* Set we sent in CA */ 22211 rack->rc_gp_saw_ca = 1; 22212 } else { 22213 /* Set we sent in SS */ 22214 rack->rc_gp_saw_ss = 1; 22215 } 22216 } 22217 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22218 (tp->t_flags & TF_SACK_PERMIT) && 22219 tp->rcv_numsacks > 0) 22220 tcp_clean_dsack_blocks(tp); 22221 tot_len_this_send += len; 22222 if (len == 0) { 22223 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 22224 } else { 22225 int idx; 22226 22227 idx = (len / segsiz) + 3; 22228 if (idx >= TCP_MSS_ACCT_ATIMER) 22229 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 22230 else 22231 counter_u64_add(rack_out_size[idx], 1); 22232 } 22233 } 22234 if ((rack->rack_no_prr == 0) && 22235 sub_from_prr && 22236 (error == 0)) { 22237 if (rack->r_ctl.rc_prr_sndcnt >= len) 22238 rack->r_ctl.rc_prr_sndcnt -= len; 22239 else 22240 rack->r_ctl.rc_prr_sndcnt = 0; 22241 } 22242 sub_from_prr = 0; 22243 if (doing_tlp) { 22244 /* Make sure the TLP is added */ 22245 add_flag |= RACK_TLP; 22246 } else if (rsm) { 22247 /* If its a resend without TLP then it must not have the flag */ 22248 rsm->r_flags &= ~RACK_TLP; 22249 } 22250 22251 22252 if ((error == 0) && 22253 (len > 0) && 22254 (tp->snd_una == tp->snd_max)) 22255 rack->r_ctl.rc_tlp_rxt_last_time = cts; 22256 { 22257 tcp_seq startseq = tp->snd_nxt; 22258 22259 /* Track our lost count */ 22260 if (rsm && (doing_tlp == 0)) 22261 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 22262 /* 22263 * Advance snd_nxt over sequence space of this segment. 22264 */ 22265 if (error) 22266 /* We don't log or do anything with errors */ 22267 goto nomore; 22268 if (doing_tlp == 0) { 22269 if (rsm == NULL) { 22270 /* 22271 * Not a retransmission of some 22272 * sort, new data is going out so 22273 * clear our TLP count and flag. 22274 */ 22275 rack->rc_tlp_in_progress = 0; 22276 rack->r_ctl.rc_tlp_cnt_out = 0; 22277 } 22278 } else { 22279 /* 22280 * We have just sent a TLP, mark that it is true 22281 * and make sure our in progress is set so we 22282 * continue to check the count. 22283 */ 22284 rack->rc_tlp_in_progress = 1; 22285 rack->r_ctl.rc_tlp_cnt_out++; 22286 } 22287 if (flags & (TH_SYN | TH_FIN)) { 22288 if (flags & TH_SYN) 22289 tp->snd_nxt++; 22290 if (flags & TH_FIN) { 22291 tp->snd_nxt++; 22292 tp->t_flags |= TF_SENTFIN; 22293 } 22294 } 22295 /* In the ENOBUFS case we do *not* update snd_max */ 22296 if (sack_rxmit) 22297 goto nomore; 22298 22299 tp->snd_nxt += len; 22300 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 22301 if (tp->snd_una == tp->snd_max) { 22302 /* 22303 * Update the time we just added data since 22304 * none was outstanding. 22305 */ 22306 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 22307 tp->t_acktime = ticks; 22308 } 22309 tp->snd_max = tp->snd_nxt; 22310 if (rack->rc_new_rnd_needed) { 22311 /* 22312 * Update the rnd to start ticking not 22313 * that from a time perspective all of 22314 * the preceding idle time is "in the round" 22315 */ 22316 rack->rc_new_rnd_needed = 0; 22317 rack->r_ctl.roundends = tp->snd_max; 22318 } 22319 /* 22320 * Time this transmission if not a retransmission and 22321 * not currently timing anything. 22322 * This is only relevant in case of switching back to 22323 * the base stack. 22324 */ 22325 if (tp->t_rtttime == 0) { 22326 tp->t_rtttime = ticks; 22327 tp->t_rtseq = startseq; 22328 KMOD_TCPSTAT_INC(tcps_segstimed); 22329 } 22330 if (len && 22331 ((tp->t_flags & TF_GPUTINPROG) == 0)) 22332 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 22333 } 22334 /* 22335 * If we are doing FO we need to update the mbuf position and subtract 22336 * this happens when the peer sends us duplicate information and 22337 * we thus want to send a DSACK. 22338 * 22339 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 22340 * turned off? If not then we are going to echo multiple DSACK blocks 22341 * out (with the TSO), which we should not be doing. 22342 */ 22343 if (rack->r_fast_output && len) { 22344 if (rack->r_ctl.fsb.left_to_send > len) 22345 rack->r_ctl.fsb.left_to_send -= len; 22346 else 22347 rack->r_ctl.fsb.left_to_send = 0; 22348 if (rack->r_ctl.fsb.left_to_send < segsiz) 22349 rack->r_fast_output = 0; 22350 if (rack->r_fast_output) { 22351 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 22352 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 22353 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 22354 } 22355 } 22356 } 22357 nomore: 22358 if (error) { 22359 rack->r_ctl.rc_agg_delayed = 0; 22360 rack->r_early = 0; 22361 rack->r_late = 0; 22362 rack->r_ctl.rc_agg_early = 0; 22363 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 22364 /* 22365 * Failures do not advance the seq counter above. For the 22366 * case of ENOBUFS we will fall out and retry in 1ms with 22367 * the hpts. Everything else will just have to retransmit 22368 * with the timer. 22369 * 22370 * In any case, we do not want to loop around for another 22371 * send without a good reason. 22372 */ 22373 sendalot = 0; 22374 switch (error) { 22375 case EPERM: 22376 tp->t_softerror = error; 22377 #ifdef TCP_ACCOUNTING 22378 crtsc = get_cyclecount(); 22379 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22380 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22381 } 22382 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22383 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22384 } 22385 sched_unpin(); 22386 #endif 22387 return (error); 22388 case ENOBUFS: 22389 /* 22390 * Pace us right away to retry in a some 22391 * time 22392 */ 22393 if (rack->r_ctl.crte != NULL) { 22394 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 22395 if (tcp_bblogging_on(rack->rc_tp)) 22396 rack_log_queue_level(tp, rack, len, &tv, cts); 22397 } else 22398 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 22399 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 22400 if (rack->rc_enobuf < 0x7f) 22401 rack->rc_enobuf++; 22402 if (slot < (10 * HPTS_USEC_IN_MSEC)) 22403 slot = 10 * HPTS_USEC_IN_MSEC; 22404 if (rack->r_ctl.crte != NULL) { 22405 counter_u64_add(rack_saw_enobuf_hw, 1); 22406 tcp_rl_log_enobuf(rack->r_ctl.crte); 22407 } 22408 counter_u64_add(rack_saw_enobuf, 1); 22409 goto enobufs; 22410 case EMSGSIZE: 22411 /* 22412 * For some reason the interface we used initially 22413 * to send segments changed to another or lowered 22414 * its MTU. If TSO was active we either got an 22415 * interface without TSO capabilits or TSO was 22416 * turned off. If we obtained mtu from ip_output() 22417 * then update it and try again. 22418 */ 22419 if (tso) 22420 tp->t_flags &= ~TF_TSO; 22421 if (mtu != 0) { 22422 int saved_mtu; 22423 22424 saved_mtu = tp->t_maxseg; 22425 tcp_mss_update(tp, -1, mtu, NULL, NULL); 22426 if (saved_mtu > tp->t_maxseg) { 22427 goto again; 22428 } 22429 } 22430 slot = 10 * HPTS_USEC_IN_MSEC; 22431 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22432 #ifdef TCP_ACCOUNTING 22433 crtsc = get_cyclecount(); 22434 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22435 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22436 } 22437 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22438 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22439 } 22440 sched_unpin(); 22441 #endif 22442 return (error); 22443 case ENETUNREACH: 22444 counter_u64_add(rack_saw_enetunreach, 1); 22445 case EHOSTDOWN: 22446 case EHOSTUNREACH: 22447 case ENETDOWN: 22448 if (TCPS_HAVERCVDSYN(tp->t_state)) { 22449 tp->t_softerror = error; 22450 } 22451 /* FALLTHROUGH */ 22452 default: 22453 slot = 10 * HPTS_USEC_IN_MSEC; 22454 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22455 #ifdef TCP_ACCOUNTING 22456 crtsc = get_cyclecount(); 22457 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22458 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22459 } 22460 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22461 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22462 } 22463 sched_unpin(); 22464 #endif 22465 return (error); 22466 } 22467 } else { 22468 rack->rc_enobuf = 0; 22469 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 22470 rack->r_ctl.retran_during_recovery += len; 22471 } 22472 KMOD_TCPSTAT_INC(tcps_sndtotal); 22473 22474 /* 22475 * Data sent (as far as we can tell). If this advertises a larger 22476 * window than any other segment, then remember the size of the 22477 * advertised window. Any pending ACK has now been sent. 22478 */ 22479 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 22480 tp->rcv_adv = tp->rcv_nxt + recwin; 22481 22482 tp->last_ack_sent = tp->rcv_nxt; 22483 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 22484 enobufs: 22485 if (sendalot) { 22486 /* Do we need to turn off sendalot? */ 22487 if (rack->r_ctl.rc_pace_max_segs && 22488 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 22489 /* We hit our max. */ 22490 sendalot = 0; 22491 } else if ((rack->rc_user_set_max_segs) && 22492 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 22493 /* We hit the user defined max */ 22494 sendalot = 0; 22495 } 22496 } 22497 if ((error == 0) && (flags & TH_FIN)) 22498 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 22499 if (flags & TH_RST) { 22500 /* 22501 * We don't send again after sending a RST. 22502 */ 22503 slot = 0; 22504 sendalot = 0; 22505 if (error == 0) 22506 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 22507 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 22508 /* 22509 * Get our pacing rate, if an error 22510 * occurred in sending (ENOBUF) we would 22511 * hit the else if with slot preset. Other 22512 * errors return. 22513 */ 22514 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 22515 } 22516 if (rsm && 22517 (rsm->r_flags & RACK_HAS_SYN) == 0 && 22518 rack->use_rack_rr) { 22519 /* Its a retransmit and we use the rack cheat? */ 22520 if ((slot == 0) || 22521 (rack->rc_always_pace == 0) || 22522 (rack->r_rr_config == 1)) { 22523 /* 22524 * We have no pacing set or we 22525 * are using old-style rack or 22526 * we are overridden to use the old 1ms pacing. 22527 */ 22528 slot = rack->r_ctl.rc_min_to; 22529 } 22530 } 22531 /* We have sent clear the flag */ 22532 rack->r_ent_rec_ns = 0; 22533 if (rack->r_must_retran) { 22534 if (rsm) { 22535 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 22536 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 22537 /* 22538 * We have retransmitted all. 22539 */ 22540 rack->r_must_retran = 0; 22541 rack->r_ctl.rc_out_at_rto = 0; 22542 } 22543 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22544 /* 22545 * Sending new data will also kill 22546 * the loop. 22547 */ 22548 rack->r_must_retran = 0; 22549 rack->r_ctl.rc_out_at_rto = 0; 22550 } 22551 } 22552 rack->r_ctl.fsb.recwin = recwin; 22553 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 22554 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22555 /* 22556 * We hit an RTO and now have past snd_max at the RTO 22557 * clear all the WAS flags. 22558 */ 22559 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 22560 } 22561 if (slot) { 22562 /* set the rack tcb into the slot N */ 22563 if ((error == 0) && 22564 rack_use_rfo && 22565 ((flags & (TH_SYN|TH_FIN)) == 0) && 22566 (rsm == NULL) && 22567 (tp->snd_nxt == tp->snd_max) && 22568 (ipoptlen == 0) && 22569 (tp->rcv_numsacks == 0) && 22570 rack->r_fsb_inited && 22571 TCPS_HAVEESTABLISHED(tp->t_state) && 22572 ((IN_RECOVERY(tp->t_flags)) == 0) && 22573 (rack->r_must_retran == 0) && 22574 ((tp->t_flags & TF_NEEDFIN) == 0) && 22575 (len > 0) && (orig_len > 0) && 22576 (orig_len > len) && 22577 ((orig_len - len) >= segsiz) && 22578 ((optlen == 0) || 22579 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22580 /* We can send at least one more MSS using our fsb */ 22581 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22582 segsiz, pace_max_seg, hw_tls, flags); 22583 } else 22584 rack->r_fast_output = 0; 22585 rack_log_fsb(rack, tp, so, flags, 22586 ipoptlen, orig_len, len, error, 22587 (rsm == NULL), optlen, __LINE__, 2); 22588 } else if (sendalot) { 22589 int ret; 22590 22591 sack_rxmit = 0; 22592 if ((error == 0) && 22593 rack_use_rfo && 22594 ((flags & (TH_SYN|TH_FIN)) == 0) && 22595 (rsm == NULL) && 22596 (ipoptlen == 0) && 22597 (tp->rcv_numsacks == 0) && 22598 (tp->snd_nxt == tp->snd_max) && 22599 (rack->r_must_retran == 0) && 22600 rack->r_fsb_inited && 22601 TCPS_HAVEESTABLISHED(tp->t_state) && 22602 ((IN_RECOVERY(tp->t_flags)) == 0) && 22603 ((tp->t_flags & TF_NEEDFIN) == 0) && 22604 (len > 0) && (orig_len > 0) && 22605 (orig_len > len) && 22606 ((orig_len - len) >= segsiz) && 22607 ((optlen == 0) || 22608 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22609 /* we can use fast_output for more */ 22610 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22611 segsiz, pace_max_seg, hw_tls, flags); 22612 if (rack->r_fast_output) { 22613 error = 0; 22614 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 22615 if (ret >= 0) 22616 return (ret); 22617 else if (error) 22618 goto nomore; 22619 22620 } 22621 } 22622 goto again; 22623 } 22624 /* Assure when we leave that snd_nxt will point to top */ 22625 skip_all_send: 22626 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22627 tp->snd_nxt = tp->snd_max; 22628 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 22629 #ifdef TCP_ACCOUNTING 22630 crtsc = get_cyclecount() - ts_val; 22631 if (tot_len_this_send) { 22632 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22633 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22634 } 22635 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22636 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 22637 } 22638 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22639 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 22640 } 22641 } else { 22642 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22643 tp->tcp_cnt_counters[SND_OUT_ACK]++; 22644 } 22645 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22646 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 22647 } 22648 } 22649 sched_unpin(); 22650 #endif 22651 if (error == ENOBUFS) 22652 error = 0; 22653 return (error); 22654 } 22655 22656 static void 22657 rack_update_seg(struct tcp_rack *rack) 22658 { 22659 uint32_t orig_val; 22660 22661 orig_val = rack->r_ctl.rc_pace_max_segs; 22662 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 22663 if (orig_val != rack->r_ctl.rc_pace_max_segs) 22664 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 22665 } 22666 22667 static void 22668 rack_mtu_change(struct tcpcb *tp) 22669 { 22670 /* 22671 * The MSS may have changed 22672 */ 22673 struct tcp_rack *rack; 22674 struct rack_sendmap *rsm; 22675 22676 rack = (struct tcp_rack *)tp->t_fb_ptr; 22677 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 22678 /* 22679 * The MTU has changed we need to resend everything 22680 * since all we have sent is lost. We first fix 22681 * up the mtu though. 22682 */ 22683 rack_set_pace_segments(tp, rack, __LINE__, NULL); 22684 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 22685 rack_remxt_tmr(tp); 22686 rack->r_fast_output = 0; 22687 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 22688 rack->r_ctl.rc_sacked); 22689 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 22690 rack->r_must_retran = 1; 22691 /* Mark all inflight to needing to be rxt'd */ 22692 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 22693 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 22694 } 22695 } 22696 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 22697 /* We don't use snd_nxt to retransmit */ 22698 tp->snd_nxt = tp->snd_max; 22699 } 22700 22701 static int 22702 rack_set_dgp(struct tcp_rack *rack) 22703 { 22704 /* pace_always=1 */ 22705 if (rack->rc_always_pace == 0) { 22706 if (tcp_can_enable_pacing() == 0) 22707 return (EBUSY); 22708 } 22709 rack->rc_fillcw_apply_discount = 0; 22710 rack->dgp_on = 1; 22711 rack->rc_always_pace = 1; 22712 rack->use_fixed_rate = 0; 22713 if (rack->gp_ready) 22714 rack_set_cc_pacing(rack); 22715 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22716 rack->rack_attempt_hdwr_pace = 0; 22717 /* rxt settings */ 22718 rack->full_size_rxt = 1; 22719 rack->shape_rxt_to_pacing_min = 0; 22720 /* cmpack=1 */ 22721 rack->r_use_cmp_ack = 1; 22722 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 22723 rack->r_use_cmp_ack) 22724 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22725 /* scwnd=1 */ 22726 rack->rack_enable_scwnd = 1; 22727 /* dynamic=100 */ 22728 rack->rc_gp_dyn_mul = 1; 22729 /* gp_inc_ca */ 22730 rack->r_ctl.rack_per_of_gp_ca = 100; 22731 /* rrr_conf=3 */ 22732 rack->r_rr_config = 3; 22733 /* npush=2 */ 22734 rack->r_ctl.rc_no_push_at_mrtt = 2; 22735 /* fillcw=1 */ 22736 if (rack->r_cwnd_was_clamped == 0) { 22737 rack->rc_pace_to_cwnd = 1; 22738 } else { 22739 rack->rc_pace_to_cwnd = 0; 22740 /* Reset all multipliers to 100.0 so just the measured bw */ 22741 rack->r_ctl.rack_per_of_gp_ss = 100; 22742 rack->r_ctl.rack_per_of_gp_ca = 100; 22743 } 22744 rack->rc_pace_fill_if_rttin_range = 0; 22745 rack->rtt_limit_mul = 0; 22746 /* noprr=1 */ 22747 rack->rack_no_prr = 1; 22748 /* lscwnd=1 */ 22749 rack->r_limit_scw = 1; 22750 /* gp_inc_rec */ 22751 rack->r_ctl.rack_per_of_gp_rec = 90; 22752 rack_client_buffer_level_set(rack); 22753 return (0); 22754 } 22755 22756 22757 22758 static int 22759 rack_set_profile(struct tcp_rack *rack, int prof) 22760 { 22761 int err = EINVAL; 22762 if (prof == 1) { 22763 /* 22764 * Profile 1 is "standard" DGP. It ignores 22765 * client buffer level. 22766 */ 22767 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL0; 22768 err = rack_set_dgp(rack); 22769 if (err) 22770 return (err); 22771 } else if (prof == 2) { 22772 /* 22773 * Profile 2 is DGP. Less aggressive with 22774 * respect to client buffer level. 22775 */ 22776 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL1; 22777 err = rack_set_dgp(rack); 22778 if (err) 22779 return (err); 22780 } else if (prof == 3) { 22781 /* 22782 * Profile 3 is DGP. Even Less aggressive with 22783 * respect to client buffer level. 22784 */ 22785 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL2; 22786 err = rack_set_dgp(rack); 22787 if (err) 22788 return (err); 22789 } else if (prof == 4) { 22790 /* 22791 * Profile 4 is DGP with the most responsiveness 22792 * to client buffer level. 22793 */ 22794 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL3; 22795 err = rack_set_dgp(rack); 22796 if (err) 22797 return (err); 22798 } else if (prof == 5) { 22799 err = rack_set_dgp(rack); 22800 if (err) 22801 return (err); 22802 /* 22803 * By turning DGP off we change the rate 22804 * picked to be only the one the cwnd and rtt 22805 * get us. 22806 */ 22807 rack->dgp_on = 0; 22808 } else if (prof == 6) { 22809 err = rack_set_dgp(rack); 22810 if (err) 22811 return (err); 22812 /* 22813 * Profile 6 tweaks DGP so that it will apply to 22814 * fill-cw the same settings that profile5 does 22815 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 22816 */ 22817 rack->rc_fillcw_apply_discount = 1; 22818 } else if (prof == 0) { 22819 /* This changes things back to the default settings */ 22820 rack->dgp_on = 0; 22821 rack->rc_hybrid_mode = 0; 22822 err = 0; 22823 if (rack_fill_cw_state) 22824 rack->rc_pace_to_cwnd = 1; 22825 else 22826 rack->rc_pace_to_cwnd = 0; 22827 if (rack->rc_always_pace) { 22828 tcp_decrement_paced_conn(); 22829 rack_undo_cc_pacing(rack); 22830 rack->rc_always_pace = 0; 22831 } 22832 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 22833 rack->rc_always_pace = 1; 22834 if (rack->rack_hibeta) 22835 rack_set_cc_pacing(rack); 22836 } else 22837 rack->rc_always_pace = 0; 22838 if (rack_dsack_std_based & 0x1) { 22839 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 22840 rack->rc_rack_tmr_std_based = 1; 22841 } 22842 if (rack_dsack_std_based & 0x2) { 22843 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 22844 rack->rc_rack_use_dsack = 1; 22845 } 22846 if (rack_use_cmp_acks) 22847 rack->r_use_cmp_ack = 1; 22848 else 22849 rack->r_use_cmp_ack = 0; 22850 if (rack_disable_prr) 22851 rack->rack_no_prr = 1; 22852 else 22853 rack->rack_no_prr = 0; 22854 if (rack_gp_no_rec_chg) 22855 rack->rc_gp_no_rec_chg = 1; 22856 else 22857 rack->rc_gp_no_rec_chg = 0; 22858 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 22859 rack->r_mbuf_queue = 1; 22860 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 22861 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22862 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22863 } else { 22864 rack->r_mbuf_queue = 0; 22865 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 22866 } 22867 if (rack_enable_shared_cwnd) 22868 rack->rack_enable_scwnd = 1; 22869 else 22870 rack->rack_enable_scwnd = 0; 22871 if (rack_do_dyn_mul) { 22872 /* When dynamic adjustment is on CA needs to start at 100% */ 22873 rack->rc_gp_dyn_mul = 1; 22874 if (rack_do_dyn_mul >= 100) 22875 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 22876 } else { 22877 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 22878 rack->rc_gp_dyn_mul = 0; 22879 } 22880 rack->r_rr_config = 0; 22881 rack->r_ctl.rc_no_push_at_mrtt = 0; 22882 rack->rc_pace_to_cwnd = 0; 22883 rack->rc_pace_fill_if_rttin_range = 0; 22884 rack->rtt_limit_mul = 0; 22885 22886 if (rack_enable_hw_pacing) 22887 rack->rack_hdw_pace_ena = 1; 22888 else 22889 rack->rack_hdw_pace_ena = 0; 22890 if (rack_disable_prr) 22891 rack->rack_no_prr = 1; 22892 else 22893 rack->rack_no_prr = 0; 22894 if (rack_limits_scwnd) 22895 rack->r_limit_scw = 1; 22896 else 22897 rack->r_limit_scw = 0; 22898 rack_init_retransmit_value(rack, rack_rxt_controls); 22899 err = 0; 22900 } 22901 return (err); 22902 } 22903 22904 static int 22905 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 22906 { 22907 struct deferred_opt_list *dol; 22908 22909 dol = malloc(sizeof(struct deferred_opt_list), 22910 M_TCPFSB, M_NOWAIT|M_ZERO); 22911 if (dol == NULL) { 22912 /* 22913 * No space yikes -- fail out.. 22914 */ 22915 return (0); 22916 } 22917 dol->optname = sopt_name; 22918 dol->optval = loptval; 22919 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 22920 return (1); 22921 } 22922 22923 static int 22924 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 22925 { 22926 #ifdef TCP_REQUEST_TRK 22927 struct tcp_sendfile_track *sft; 22928 struct timeval tv; 22929 tcp_seq seq; 22930 int err; 22931 22932 microuptime(&tv); 22933 22934 /* 22935 * If BB logging is not on we need to look at the DTL flag. 22936 * If its on already then those reasons override the DTL input. 22937 * We do this with any request, you can turn DTL on, but it does 22938 * not turn off at least from hybrid pacing requests. 22939 */ 22940 if (tcp_bblogging_on(rack->rc_tp) == 0) { 22941 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_DTL) { 22942 /* Turn on BB point logging */ 22943 tcp_set_bblog_state(rack->rc_tp, TCP_LOG_VIA_BBPOINTS, 22944 TCP_BBPOINT_REQ_LEVEL_LOGGING); 22945 } 22946 } 22947 /* Make sure no fixed rate is on */ 22948 rack->use_fixed_rate = 0; 22949 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 22950 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 22951 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 22952 /* Now allocate or find our entry that will have these settings */ 22953 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 22954 if (sft == NULL) { 22955 rack->rc_tp->tcp_hybrid_error++; 22956 /* no space, where would it have gone? */ 22957 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 22958 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 22959 return (ENOSPC); 22960 } 22961 /* The seq will be snd_una + everything in the buffer */ 22962 seq = sft->start_seq; 22963 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 22964 /* Disabling hybrid pacing */ 22965 if (rack->rc_hybrid_mode) { 22966 rack_set_profile(rack, 0); 22967 rack->rc_tp->tcp_hybrid_stop++; 22968 } 22969 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 22970 return (0); 22971 } 22972 if (rack->dgp_on == 0) { 22973 /* 22974 * If we have not yet turned DGP on, do so 22975 * now setting pure DGP mode, no buffer level 22976 * response. 22977 */ 22978 if ((err = rack_set_profile(rack, 1)) != 0){ 22979 /* Failed to turn pacing on */ 22980 rack->rc_tp->tcp_hybrid_error++; 22981 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 22982 return (err); 22983 } 22984 } 22985 /* Now set in our flags */ 22986 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 22987 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 22988 sft->cspr = hybrid->cspr; 22989 else 22990 sft->cspr = 0; 22991 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 22992 sft->hint_maxseg = hybrid->hint_maxseg; 22993 else 22994 sft->hint_maxseg = 0; 22995 rack->rc_hybrid_mode = 1; 22996 rack->rc_tp->tcp_hybrid_start++; 22997 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 22998 return (0); 22999 #else 23000 return (ENOTSUP); 23001 #endif 23002 } 23003 23004 static int 23005 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 23006 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 23007 23008 { 23009 struct epoch_tracker et; 23010 struct sockopt sopt; 23011 struct cc_newreno_opts opt; 23012 uint64_t val; 23013 int error = 0; 23014 uint16_t ca, ss; 23015 23016 switch (sopt_name) { 23017 case TCP_RACK_SET_RXT_OPTIONS: 23018 if ((optval >= 0) && (optval <= 2)) { 23019 rack_init_retransmit_value(rack, optval); 23020 } else { 23021 /* 23022 * You must send in 0, 1 or 2 all else is 23023 * invalid. 23024 */ 23025 error = EINVAL; 23026 } 23027 break; 23028 case TCP_RACK_DSACK_OPT: 23029 RACK_OPTS_INC(tcp_rack_dsack_opt); 23030 if (optval & 0x1) { 23031 rack->rc_rack_tmr_std_based = 1; 23032 } else { 23033 rack->rc_rack_tmr_std_based = 0; 23034 } 23035 if (optval & 0x2) { 23036 rack->rc_rack_use_dsack = 1; 23037 } else { 23038 rack->rc_rack_use_dsack = 0; 23039 } 23040 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 23041 break; 23042 case TCP_RACK_PACING_DIVISOR: 23043 RACK_OPTS_INC(tcp_rack_pacing_divisor); 23044 if (optval == 0) { 23045 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 23046 } else { 23047 if (optval < RL_MIN_DIVISOR) 23048 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 23049 else 23050 rack->r_ctl.pace_len_divisor = optval; 23051 } 23052 break; 23053 case TCP_RACK_HI_BETA: 23054 RACK_OPTS_INC(tcp_rack_hi_beta); 23055 if (optval > 0) { 23056 rack->rack_hibeta = 1; 23057 if ((optval >= 50) && 23058 (optval <= 100)) { 23059 /* 23060 * User wants to set a custom beta. 23061 */ 23062 rack->r_ctl.saved_hibeta = optval; 23063 if (rack->rc_pacing_cc_set) 23064 rack_undo_cc_pacing(rack); 23065 rack->r_ctl.rc_saved_beta.beta = optval; 23066 } 23067 if (rack->rc_pacing_cc_set == 0) 23068 rack_set_cc_pacing(rack); 23069 } else { 23070 rack->rack_hibeta = 0; 23071 if (rack->rc_pacing_cc_set) 23072 rack_undo_cc_pacing(rack); 23073 } 23074 break; 23075 case TCP_RACK_PACING_BETA: 23076 RACK_OPTS_INC(tcp_rack_beta); 23077 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23078 /* This only works for newreno. */ 23079 error = EINVAL; 23080 break; 23081 } 23082 if (rack->rc_pacing_cc_set) { 23083 /* 23084 * Set them into the real CC module 23085 * whats in the rack pcb is the old values 23086 * to be used on restoral/ 23087 */ 23088 sopt.sopt_dir = SOPT_SET; 23089 opt.name = CC_NEWRENO_BETA; 23090 opt.val = optval; 23091 if (CC_ALGO(tp)->ctl_output != NULL) 23092 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23093 else { 23094 error = ENOENT; 23095 break; 23096 } 23097 } else { 23098 /* 23099 * Not pacing yet so set it into our local 23100 * rack pcb storage. 23101 */ 23102 rack->r_ctl.rc_saved_beta.beta = optval; 23103 } 23104 break; 23105 case TCP_RACK_TIMER_SLOP: 23106 RACK_OPTS_INC(tcp_rack_timer_slop); 23107 rack->r_ctl.timer_slop = optval; 23108 if (rack->rc_tp->t_srtt) { 23109 /* 23110 * If we have an SRTT lets update t_rxtcur 23111 * to have the new slop. 23112 */ 23113 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 23114 rack_rto_min, rack_rto_max, 23115 rack->r_ctl.timer_slop); 23116 } 23117 break; 23118 case TCP_RACK_PACING_BETA_ECN: 23119 RACK_OPTS_INC(tcp_rack_beta_ecn); 23120 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23121 /* This only works for newreno. */ 23122 error = EINVAL; 23123 break; 23124 } 23125 if (rack->rc_pacing_cc_set) { 23126 /* 23127 * Set them into the real CC module 23128 * whats in the rack pcb is the old values 23129 * to be used on restoral/ 23130 */ 23131 sopt.sopt_dir = SOPT_SET; 23132 opt.name = CC_NEWRENO_BETA_ECN; 23133 opt.val = optval; 23134 if (CC_ALGO(tp)->ctl_output != NULL) 23135 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23136 else 23137 error = ENOENT; 23138 } else { 23139 /* 23140 * Not pacing yet so set it into our local 23141 * rack pcb storage. 23142 */ 23143 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 23144 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 23145 } 23146 break; 23147 case TCP_DEFER_OPTIONS: 23148 RACK_OPTS_INC(tcp_defer_opt); 23149 if (optval) { 23150 if (rack->gp_ready) { 23151 /* Too late */ 23152 error = EINVAL; 23153 break; 23154 } 23155 rack->defer_options = 1; 23156 } else 23157 rack->defer_options = 0; 23158 break; 23159 case TCP_RACK_MEASURE_CNT: 23160 RACK_OPTS_INC(tcp_rack_measure_cnt); 23161 if (optval && (optval <= 0xff)) { 23162 rack->r_ctl.req_measurements = optval; 23163 } else 23164 error = EINVAL; 23165 break; 23166 case TCP_REC_ABC_VAL: 23167 RACK_OPTS_INC(tcp_rec_abc_val); 23168 if (optval > 0) 23169 rack->r_use_labc_for_rec = 1; 23170 else 23171 rack->r_use_labc_for_rec = 0; 23172 break; 23173 case TCP_RACK_ABC_VAL: 23174 RACK_OPTS_INC(tcp_rack_abc_val); 23175 if ((optval > 0) && (optval < 255)) 23176 rack->rc_labc = optval; 23177 else 23178 error = EINVAL; 23179 break; 23180 case TCP_HDWR_UP_ONLY: 23181 RACK_OPTS_INC(tcp_pacing_up_only); 23182 if (optval) 23183 rack->r_up_only = 1; 23184 else 23185 rack->r_up_only = 0; 23186 break; 23187 case TCP_PACING_RATE_CAP: 23188 RACK_OPTS_INC(tcp_pacing_rate_cap); 23189 rack->r_ctl.bw_rate_cap = loptval; 23190 break; 23191 case TCP_HYBRID_PACING: 23192 if (hybrid == NULL) { 23193 error = EINVAL; 23194 break; 23195 } 23196 error = process_hybrid_pacing(rack, hybrid); 23197 break; 23198 case TCP_RACK_PROFILE: 23199 RACK_OPTS_INC(tcp_profile); 23200 error = rack_set_profile(rack, optval); 23201 break; 23202 case TCP_USE_CMP_ACKS: 23203 RACK_OPTS_INC(tcp_use_cmp_acks); 23204 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 23205 /* You can't turn it off once its on! */ 23206 error = EINVAL; 23207 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 23208 rack->r_use_cmp_ack = 1; 23209 rack->r_mbuf_queue = 1; 23210 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23211 } 23212 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 23213 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23214 break; 23215 case TCP_SHARED_CWND_TIME_LIMIT: 23216 RACK_OPTS_INC(tcp_lscwnd); 23217 if (optval) 23218 rack->r_limit_scw = 1; 23219 else 23220 rack->r_limit_scw = 0; 23221 break; 23222 case TCP_RACK_DGP_IN_REC: 23223 RACK_OPTS_INC(tcp_dgp_in_rec); 23224 if (optval) 23225 rack->r_ctl.full_dgp_in_rec = 1; 23226 else 23227 rack->r_ctl.full_dgp_in_rec = 0; 23228 break; 23229 case TCP_RXT_CLAMP: 23230 RACK_OPTS_INC(tcp_rxt_clamp); 23231 rack_translate_clamp_value(rack, optval); 23232 break; 23233 case TCP_RACK_PACE_TO_FILL: 23234 RACK_OPTS_INC(tcp_fillcw); 23235 if (optval == 0) 23236 rack->rc_pace_to_cwnd = 0; 23237 else { 23238 rack->rc_pace_to_cwnd = 1; 23239 if (optval > 1) 23240 rack->r_fill_less_agg = 1; 23241 } 23242 if ((optval >= rack_gp_rtt_maxmul) && 23243 rack_gp_rtt_maxmul && 23244 (optval < 0xf)) { 23245 rack->rc_pace_fill_if_rttin_range = 1; 23246 rack->rtt_limit_mul = optval; 23247 } else { 23248 rack->rc_pace_fill_if_rttin_range = 0; 23249 rack->rtt_limit_mul = 0; 23250 } 23251 break; 23252 case TCP_RACK_NO_PUSH_AT_MAX: 23253 RACK_OPTS_INC(tcp_npush); 23254 if (optval == 0) 23255 rack->r_ctl.rc_no_push_at_mrtt = 0; 23256 else if (optval < 0xff) 23257 rack->r_ctl.rc_no_push_at_mrtt = optval; 23258 else 23259 error = EINVAL; 23260 break; 23261 case TCP_SHARED_CWND_ENABLE: 23262 RACK_OPTS_INC(tcp_rack_scwnd); 23263 if (optval == 0) 23264 rack->rack_enable_scwnd = 0; 23265 else 23266 rack->rack_enable_scwnd = 1; 23267 break; 23268 case TCP_RACK_MBUF_QUEUE: 23269 /* Now do we use the LRO mbuf-queue feature */ 23270 RACK_OPTS_INC(tcp_rack_mbufq); 23271 if (optval || rack->r_use_cmp_ack) 23272 rack->r_mbuf_queue = 1; 23273 else 23274 rack->r_mbuf_queue = 0; 23275 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23276 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23277 else 23278 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23279 break; 23280 case TCP_RACK_NONRXT_CFG_RATE: 23281 RACK_OPTS_INC(tcp_rack_cfg_rate); 23282 if (optval == 0) 23283 rack->rack_rec_nonrxt_use_cr = 0; 23284 else 23285 rack->rack_rec_nonrxt_use_cr = 1; 23286 break; 23287 case TCP_NO_PRR: 23288 RACK_OPTS_INC(tcp_rack_noprr); 23289 if (optval == 0) 23290 rack->rack_no_prr = 0; 23291 else if (optval == 1) 23292 rack->rack_no_prr = 1; 23293 else if (optval == 2) 23294 rack->no_prr_addback = 1; 23295 else 23296 error = EINVAL; 23297 break; 23298 case TCP_TIMELY_DYN_ADJ: 23299 RACK_OPTS_INC(tcp_timely_dyn); 23300 if (optval == 0) 23301 rack->rc_gp_dyn_mul = 0; 23302 else { 23303 rack->rc_gp_dyn_mul = 1; 23304 if (optval >= 100) { 23305 /* 23306 * If the user sets something 100 or more 23307 * its the gp_ca value. 23308 */ 23309 rack->r_ctl.rack_per_of_gp_ca = optval; 23310 } 23311 } 23312 break; 23313 case TCP_RACK_DO_DETECTION: 23314 RACK_OPTS_INC(tcp_rack_do_detection); 23315 if (optval == 0) 23316 rack->do_detection = 0; 23317 else 23318 rack->do_detection = 1; 23319 break; 23320 case TCP_RACK_TLP_USE: 23321 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 23322 error = EINVAL; 23323 break; 23324 } 23325 RACK_OPTS_INC(tcp_tlp_use); 23326 rack->rack_tlp_threshold_use = optval; 23327 break; 23328 case TCP_RACK_TLP_REDUCE: 23329 /* RACK TLP cwnd reduction (bool) */ 23330 RACK_OPTS_INC(tcp_rack_tlp_reduce); 23331 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 23332 break; 23333 /* Pacing related ones */ 23334 case TCP_RACK_PACE_ALWAYS: 23335 /* 23336 * zero is old rack method, 1 is new 23337 * method using a pacing rate. 23338 */ 23339 RACK_OPTS_INC(tcp_rack_pace_always); 23340 if (optval > 0) { 23341 if (rack->rc_always_pace) { 23342 error = EALREADY; 23343 break; 23344 } else if (tcp_can_enable_pacing()) { 23345 rack->rc_always_pace = 1; 23346 if (rack->rack_hibeta) 23347 rack_set_cc_pacing(rack); 23348 } 23349 else { 23350 error = ENOSPC; 23351 break; 23352 } 23353 } else { 23354 if (rack->rc_always_pace) { 23355 tcp_decrement_paced_conn(); 23356 rack->rc_always_pace = 0; 23357 rack_undo_cc_pacing(rack); 23358 } 23359 } 23360 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23361 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23362 else 23363 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23364 /* A rate may be set irate or other, if so set seg size */ 23365 rack_update_seg(rack); 23366 break; 23367 case TCP_BBR_RACK_INIT_RATE: 23368 RACK_OPTS_INC(tcp_initial_rate); 23369 val = optval; 23370 /* Change from kbits per second to bytes per second */ 23371 val *= 1000; 23372 val /= 8; 23373 rack->r_ctl.init_rate = val; 23374 if (rack->rc_init_win != rack_default_init_window) { 23375 uint32_t win, snt; 23376 23377 /* 23378 * Options don't always get applied 23379 * in the order you think. So in order 23380 * to assure we update a cwnd we need 23381 * to check and see if we are still 23382 * where we should raise the cwnd. 23383 */ 23384 win = rc_init_window(rack); 23385 if (SEQ_GT(tp->snd_max, tp->iss)) 23386 snt = tp->snd_max - tp->iss; 23387 else 23388 snt = 0; 23389 if ((snt < win) && 23390 (tp->snd_cwnd < win)) 23391 tp->snd_cwnd = win; 23392 } 23393 if (rack->rc_always_pace) 23394 rack_update_seg(rack); 23395 break; 23396 case TCP_BBR_IWINTSO: 23397 RACK_OPTS_INC(tcp_initial_win); 23398 if (optval && (optval <= 0xff)) { 23399 uint32_t win, snt; 23400 23401 rack->rc_init_win = optval; 23402 win = rc_init_window(rack); 23403 if (SEQ_GT(tp->snd_max, tp->iss)) 23404 snt = tp->snd_max - tp->iss; 23405 else 23406 snt = 0; 23407 if ((snt < win) && 23408 (tp->t_srtt | 23409 rack->r_ctl.init_rate)) { 23410 /* 23411 * We are not past the initial window 23412 * and we have some bases for pacing, 23413 * so we need to possibly adjust up 23414 * the cwnd. Note even if we don't set 23415 * the cwnd, its still ok to raise the rc_init_win 23416 * which can be used coming out of idle when we 23417 * would have a rate. 23418 */ 23419 if (tp->snd_cwnd < win) 23420 tp->snd_cwnd = win; 23421 } 23422 if (rack->rc_always_pace) 23423 rack_update_seg(rack); 23424 } else 23425 error = EINVAL; 23426 break; 23427 case TCP_RACK_FORCE_MSEG: 23428 RACK_OPTS_INC(tcp_rack_force_max_seg); 23429 if (optval) 23430 rack->rc_force_max_seg = 1; 23431 else 23432 rack->rc_force_max_seg = 0; 23433 break; 23434 case TCP_RACK_PACE_MIN_SEG: 23435 RACK_OPTS_INC(tcp_rack_min_seg); 23436 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 23437 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23438 break; 23439 case TCP_RACK_PACE_MAX_SEG: 23440 /* Max segments size in a pace in bytes */ 23441 RACK_OPTS_INC(tcp_rack_max_seg); 23442 if (optval <= MAX_USER_SET_SEG) 23443 rack->rc_user_set_max_segs = optval; 23444 else 23445 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 23446 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23447 break; 23448 case TCP_RACK_PACE_RATE_REC: 23449 /* Set the fixed pacing rate in Bytes per second ca */ 23450 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 23451 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23452 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23453 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23454 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23455 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23456 rack->use_fixed_rate = 1; 23457 if (rack->rack_hibeta) 23458 rack_set_cc_pacing(rack); 23459 rack_log_pacing_delay_calc(rack, 23460 rack->r_ctl.rc_fixed_pacing_rate_ss, 23461 rack->r_ctl.rc_fixed_pacing_rate_ca, 23462 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23463 __LINE__, NULL,0); 23464 break; 23465 23466 case TCP_RACK_PACE_RATE_SS: 23467 /* Set the fixed pacing rate in Bytes per second ca */ 23468 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 23469 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23470 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23471 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23472 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23473 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23474 rack->use_fixed_rate = 1; 23475 if (rack->rack_hibeta) 23476 rack_set_cc_pacing(rack); 23477 rack_log_pacing_delay_calc(rack, 23478 rack->r_ctl.rc_fixed_pacing_rate_ss, 23479 rack->r_ctl.rc_fixed_pacing_rate_ca, 23480 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23481 __LINE__, NULL, 0); 23482 break; 23483 23484 case TCP_RACK_PACE_RATE_CA: 23485 /* Set the fixed pacing rate in Bytes per second ca */ 23486 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 23487 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23488 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23489 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23490 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23491 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23492 rack->use_fixed_rate = 1; 23493 if (rack->rack_hibeta) 23494 rack_set_cc_pacing(rack); 23495 rack_log_pacing_delay_calc(rack, 23496 rack->r_ctl.rc_fixed_pacing_rate_ss, 23497 rack->r_ctl.rc_fixed_pacing_rate_ca, 23498 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23499 __LINE__, NULL, 0); 23500 break; 23501 case TCP_RACK_GP_INCREASE_REC: 23502 RACK_OPTS_INC(tcp_gp_inc_rec); 23503 rack->r_ctl.rack_per_of_gp_rec = optval; 23504 rack_log_pacing_delay_calc(rack, 23505 rack->r_ctl.rack_per_of_gp_ss, 23506 rack->r_ctl.rack_per_of_gp_ca, 23507 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23508 __LINE__, NULL, 0); 23509 break; 23510 case TCP_RACK_GP_INCREASE_CA: 23511 RACK_OPTS_INC(tcp_gp_inc_ca); 23512 ca = optval; 23513 if (ca < 100) { 23514 /* 23515 * We don't allow any reduction 23516 * over the GP b/w. 23517 */ 23518 error = EINVAL; 23519 break; 23520 } 23521 rack->r_ctl.rack_per_of_gp_ca = ca; 23522 rack_log_pacing_delay_calc(rack, 23523 rack->r_ctl.rack_per_of_gp_ss, 23524 rack->r_ctl.rack_per_of_gp_ca, 23525 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23526 __LINE__, NULL, 0); 23527 break; 23528 case TCP_RACK_GP_INCREASE_SS: 23529 RACK_OPTS_INC(tcp_gp_inc_ss); 23530 ss = optval; 23531 if (ss < 100) { 23532 /* 23533 * We don't allow any reduction 23534 * over the GP b/w. 23535 */ 23536 error = EINVAL; 23537 break; 23538 } 23539 rack->r_ctl.rack_per_of_gp_ss = ss; 23540 rack_log_pacing_delay_calc(rack, 23541 rack->r_ctl.rack_per_of_gp_ss, 23542 rack->r_ctl.rack_per_of_gp_ca, 23543 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23544 __LINE__, NULL, 0); 23545 break; 23546 case TCP_RACK_RR_CONF: 23547 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 23548 if (optval && optval <= 3) 23549 rack->r_rr_config = optval; 23550 else 23551 rack->r_rr_config = 0; 23552 break; 23553 case TCP_PACING_DND: /* URL:dnd */ 23554 if (optval > 0) 23555 rack->rc_pace_dnd = 1; 23556 else 23557 rack->rc_pace_dnd = 0; 23558 break; 23559 case TCP_HDWR_RATE_CAP: 23560 RACK_OPTS_INC(tcp_hdwr_rate_cap); 23561 if (optval) { 23562 if (rack->r_rack_hw_rate_caps == 0) 23563 rack->r_rack_hw_rate_caps = 1; 23564 else 23565 error = EALREADY; 23566 } else { 23567 rack->r_rack_hw_rate_caps = 0; 23568 } 23569 break; 23570 case TCP_RACK_SPLIT_LIMIT: 23571 RACK_OPTS_INC(tcp_split_limit); 23572 rack->r_ctl.rc_split_limit = optval; 23573 break; 23574 case TCP_BBR_HDWR_PACE: 23575 RACK_OPTS_INC(tcp_hdwr_pacing); 23576 if (optval){ 23577 if (rack->rack_hdrw_pacing == 0) { 23578 rack->rack_hdw_pace_ena = 1; 23579 rack->rack_attempt_hdwr_pace = 0; 23580 } else 23581 error = EALREADY; 23582 } else { 23583 rack->rack_hdw_pace_ena = 0; 23584 #ifdef RATELIMIT 23585 if (rack->r_ctl.crte != NULL) { 23586 rack->rack_hdrw_pacing = 0; 23587 rack->rack_attempt_hdwr_pace = 0; 23588 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 23589 rack->r_ctl.crte = NULL; 23590 } 23591 #endif 23592 } 23593 break; 23594 /* End Pacing related ones */ 23595 case TCP_RACK_PRR_SENDALOT: 23596 /* Allow PRR to send more than one seg */ 23597 RACK_OPTS_INC(tcp_rack_prr_sendalot); 23598 rack->r_ctl.rc_prr_sendalot = optval; 23599 break; 23600 case TCP_RACK_MIN_TO: 23601 /* Minimum time between rack t-o's in ms */ 23602 RACK_OPTS_INC(tcp_rack_min_to); 23603 rack->r_ctl.rc_min_to = optval; 23604 break; 23605 case TCP_RACK_EARLY_SEG: 23606 /* If early recovery max segments */ 23607 RACK_OPTS_INC(tcp_rack_early_seg); 23608 rack->r_ctl.rc_early_recovery_segs = optval; 23609 break; 23610 case TCP_RACK_ENABLE_HYSTART: 23611 { 23612 if (optval) { 23613 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23614 if (rack_do_hystart > RACK_HYSTART_ON) 23615 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23616 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23617 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23618 } else { 23619 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23620 } 23621 } 23622 break; 23623 case TCP_RACK_REORD_THRESH: 23624 /* RACK reorder threshold (shift amount) */ 23625 RACK_OPTS_INC(tcp_rack_reord_thresh); 23626 if ((optval > 0) && (optval < 31)) 23627 rack->r_ctl.rc_reorder_shift = optval; 23628 else 23629 error = EINVAL; 23630 break; 23631 case TCP_RACK_REORD_FADE: 23632 /* Does reordering fade after ms time */ 23633 RACK_OPTS_INC(tcp_rack_reord_fade); 23634 rack->r_ctl.rc_reorder_fade = optval; 23635 break; 23636 case TCP_RACK_TLP_THRESH: 23637 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 23638 RACK_OPTS_INC(tcp_rack_tlp_thresh); 23639 if (optval) 23640 rack->r_ctl.rc_tlp_threshold = optval; 23641 else 23642 error = EINVAL; 23643 break; 23644 case TCP_BBR_USE_RACK_RR: 23645 RACK_OPTS_INC(tcp_rack_rr); 23646 if (optval) 23647 rack->use_rack_rr = 1; 23648 else 23649 rack->use_rack_rr = 0; 23650 break; 23651 case TCP_RACK_PKT_DELAY: 23652 /* RACK added ms i.e. rack-rtt + reord + N */ 23653 RACK_OPTS_INC(tcp_rack_pkt_delay); 23654 rack->r_ctl.rc_pkt_delay = optval; 23655 break; 23656 case TCP_DELACK: 23657 RACK_OPTS_INC(tcp_rack_delayed_ack); 23658 if (optval == 0) 23659 tp->t_delayed_ack = 0; 23660 else 23661 tp->t_delayed_ack = 1; 23662 if (tp->t_flags & TF_DELACK) { 23663 tp->t_flags &= ~TF_DELACK; 23664 tp->t_flags |= TF_ACKNOW; 23665 NET_EPOCH_ENTER(et); 23666 rack_output(tp); 23667 NET_EPOCH_EXIT(et); 23668 } 23669 break; 23670 23671 case TCP_BBR_RACK_RTT_USE: 23672 RACK_OPTS_INC(tcp_rack_rtt_use); 23673 if ((optval != USE_RTT_HIGH) && 23674 (optval != USE_RTT_LOW) && 23675 (optval != USE_RTT_AVG)) 23676 error = EINVAL; 23677 else 23678 rack->r_ctl.rc_rate_sample_method = optval; 23679 break; 23680 case TCP_DATA_AFTER_CLOSE: 23681 RACK_OPTS_INC(tcp_data_after_close); 23682 if (optval) 23683 rack->rc_allow_data_af_clo = 1; 23684 else 23685 rack->rc_allow_data_af_clo = 0; 23686 break; 23687 default: 23688 break; 23689 } 23690 tcp_log_socket_option(tp, sopt_name, optval, error); 23691 return (error); 23692 } 23693 23694 23695 static void 23696 rack_apply_deferred_options(struct tcp_rack *rack) 23697 { 23698 struct deferred_opt_list *dol, *sdol; 23699 uint32_t s_optval; 23700 23701 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 23702 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 23703 /* Disadvantage of deferal is you loose the error return */ 23704 s_optval = (uint32_t)dol->optval; 23705 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 23706 free(dol, M_TCPDO); 23707 } 23708 } 23709 23710 static void 23711 rack_hw_tls_change(struct tcpcb *tp, int chg) 23712 { 23713 /* Update HW tls state */ 23714 struct tcp_rack *rack; 23715 23716 rack = (struct tcp_rack *)tp->t_fb_ptr; 23717 if (chg) 23718 rack->r_ctl.fsb.hw_tls = 1; 23719 else 23720 rack->r_ctl.fsb.hw_tls = 0; 23721 } 23722 23723 static int 23724 rack_pru_options(struct tcpcb *tp, int flags) 23725 { 23726 if (flags & PRUS_OOB) 23727 return (EOPNOTSUPP); 23728 return (0); 23729 } 23730 23731 static bool 23732 rack_wake_check(struct tcpcb *tp) 23733 { 23734 struct tcp_rack *rack; 23735 struct timeval tv; 23736 uint32_t cts; 23737 23738 rack = (struct tcp_rack *)tp->t_fb_ptr; 23739 if (rack->r_ctl.rc_hpts_flags) { 23740 cts = tcp_get_usecs(&tv); 23741 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 23742 /* 23743 * Pacing timer is up, check if we are ready. 23744 */ 23745 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 23746 return (true); 23747 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 23748 /* 23749 * A timer is up, check if we are ready. 23750 */ 23751 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 23752 return (true); 23753 } 23754 } 23755 return (false); 23756 } 23757 23758 static struct tcp_function_block __tcp_rack = { 23759 .tfb_tcp_block_name = __XSTRING(STACKNAME), 23760 .tfb_tcp_output = rack_output, 23761 .tfb_do_queued_segments = ctf_do_queued_segments, 23762 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 23763 .tfb_tcp_do_segment = rack_do_segment, 23764 .tfb_tcp_ctloutput = rack_ctloutput, 23765 .tfb_tcp_fb_init = rack_init, 23766 .tfb_tcp_fb_fini = rack_fini, 23767 .tfb_tcp_timer_stop_all = rack_stopall, 23768 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 23769 .tfb_tcp_handoff_ok = rack_handoff_ok, 23770 .tfb_tcp_mtu_chg = rack_mtu_change, 23771 .tfb_pru_options = rack_pru_options, 23772 .tfb_hwtls_change = rack_hw_tls_change, 23773 .tfb_chg_query = rack_chg_query, 23774 .tfb_switch_failed = rack_switch_failed, 23775 .tfb_early_wake_check = rack_wake_check, 23776 .tfb_compute_pipe = rack_compute_pipe, 23777 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 23778 }; 23779 23780 /* 23781 * rack_ctloutput() must drop the inpcb lock before performing copyin on 23782 * socket option arguments. When it re-acquires the lock after the copy, it 23783 * has to revalidate that the connection is still valid for the socket 23784 * option. 23785 */ 23786 static int 23787 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 23788 { 23789 struct inpcb *inp = tptoinpcb(tp); 23790 #ifdef INET 23791 struct ip *ip; 23792 #endif 23793 struct tcp_rack *rack; 23794 struct tcp_hybrid_req hybrid; 23795 uint64_t loptval; 23796 int32_t error = 0, optval; 23797 23798 rack = (struct tcp_rack *)tp->t_fb_ptr; 23799 if (rack == NULL) { 23800 INP_WUNLOCK(inp); 23801 return (EINVAL); 23802 } 23803 #ifdef INET 23804 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 23805 #endif 23806 23807 switch (sopt->sopt_level) { 23808 #ifdef INET6 23809 case IPPROTO_IPV6: 23810 MPASS(inp->inp_vflag & INP_IPV6PROTO); 23811 switch (sopt->sopt_name) { 23812 case IPV6_USE_MIN_MTU: 23813 tcp6_use_min_mtu(tp); 23814 break; 23815 } 23816 INP_WUNLOCK(inp); 23817 return (0); 23818 #endif 23819 #ifdef INET 23820 case IPPROTO_IP: 23821 switch (sopt->sopt_name) { 23822 case IP_TOS: 23823 /* 23824 * The DSCP codepoint has changed, update the fsb. 23825 */ 23826 ip->ip_tos = rack->rc_inp->inp_ip_tos; 23827 break; 23828 case IP_TTL: 23829 /* 23830 * The TTL has changed, update the fsb. 23831 */ 23832 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 23833 break; 23834 } 23835 INP_WUNLOCK(inp); 23836 return (0); 23837 #endif 23838 #ifdef SO_PEERPRIO 23839 case SOL_SOCKET: 23840 switch (sopt->sopt_name) { 23841 case SO_PEERPRIO: /* SC-URL:bs */ 23842 /* Already read in and sanity checked in sosetopt(). */ 23843 if (inp->inp_socket) { 23844 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 23845 rack_client_buffer_level_set(rack); 23846 } 23847 break; 23848 } 23849 INP_WUNLOCK(inp); 23850 return (0); 23851 #endif 23852 case IPPROTO_TCP: 23853 switch (sopt->sopt_name) { 23854 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 23855 /* Pacing related ones */ 23856 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 23857 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 23858 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 23859 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 23860 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 23861 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 23862 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 23863 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 23864 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 23865 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 23866 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 23867 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 23868 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 23869 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 23870 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 23871 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 23872 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 23873 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 23874 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 23875 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 23876 case TCP_RACK_DGP_IN_REC: /* URL:dgpinrec */ 23877 /* End pacing related */ 23878 case TCP_RXT_CLAMP: /* URL:rxtclamp */ 23879 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 23880 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 23881 case TCP_RACK_MIN_TO: /* URL:min_to */ 23882 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 23883 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 23884 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 23885 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 23886 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 23887 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 23888 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 23889 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 23890 case TCP_RACK_DO_DETECTION: /* URL:detect */ 23891 case TCP_NO_PRR: /* URL:noprr */ 23892 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 23893 case TCP_DATA_AFTER_CLOSE: /* no URL */ 23894 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 23895 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 23896 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 23897 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 23898 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 23899 case TCP_RACK_PROFILE: /* URL:profile */ 23900 case TCP_HYBRID_PACING: /* URL:hybrid */ 23901 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 23902 case TCP_RACK_ABC_VAL: /* URL:labc */ 23903 case TCP_REC_ABC_VAL: /* URL:reclabc */ 23904 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 23905 case TCP_DEFER_OPTIONS: /* URL:defer */ 23906 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 23907 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 23908 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 23909 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 23910 case TCP_RACK_HI_BETA: /* URL:hibeta */ 23911 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 23912 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 23913 case TCP_PACING_DND: /* URL:dnd */ 23914 goto process_opt; 23915 break; 23916 default: 23917 /* Filter off all unknown options to the base stack */ 23918 return (tcp_default_ctloutput(tp, sopt)); 23919 break; 23920 } 23921 23922 default: 23923 INP_WUNLOCK(inp); 23924 return (0); 23925 } 23926 process_opt: 23927 INP_WUNLOCK(inp); 23928 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 23929 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 23930 /* 23931 * We truncate it down to 32 bits for the socket-option trace this 23932 * means rates > 34Gbps won't show right, but thats probably ok. 23933 */ 23934 optval = (uint32_t)loptval; 23935 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 23936 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 23937 } else { 23938 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 23939 /* Save it in 64 bit form too */ 23940 loptval = optval; 23941 } 23942 if (error) 23943 return (error); 23944 INP_WLOCK(inp); 23945 if (tp->t_fb != &__tcp_rack) { 23946 INP_WUNLOCK(inp); 23947 return (ENOPROTOOPT); 23948 } 23949 if (rack->defer_options && (rack->gp_ready == 0) && 23950 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 23951 (sopt->sopt_name != TCP_HYBRID_PACING) && 23952 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 23953 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 23954 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 23955 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 23956 /* Options are beind deferred */ 23957 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 23958 INP_WUNLOCK(inp); 23959 return (0); 23960 } else { 23961 /* No memory to defer, fail */ 23962 INP_WUNLOCK(inp); 23963 return (ENOMEM); 23964 } 23965 } 23966 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 23967 INP_WUNLOCK(inp); 23968 return (error); 23969 } 23970 23971 static void 23972 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 23973 { 23974 23975 INP_WLOCK_ASSERT(tptoinpcb(tp)); 23976 bzero(ti, sizeof(*ti)); 23977 23978 ti->tcpi_state = tp->t_state; 23979 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 23980 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 23981 if (tp->t_flags & TF_SACK_PERMIT) 23982 ti->tcpi_options |= TCPI_OPT_SACK; 23983 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 23984 ti->tcpi_options |= TCPI_OPT_WSCALE; 23985 ti->tcpi_snd_wscale = tp->snd_scale; 23986 ti->tcpi_rcv_wscale = tp->rcv_scale; 23987 } 23988 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 23989 ti->tcpi_options |= TCPI_OPT_ECN; 23990 if (tp->t_flags & TF_FASTOPEN) 23991 ti->tcpi_options |= TCPI_OPT_TFO; 23992 /* still kept in ticks is t_rcvtime */ 23993 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 23994 /* Since we hold everything in precise useconds this is easy */ 23995 ti->tcpi_rtt = tp->t_srtt; 23996 ti->tcpi_rttvar = tp->t_rttvar; 23997 ti->tcpi_rto = tp->t_rxtcur; 23998 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 23999 ti->tcpi_snd_cwnd = tp->snd_cwnd; 24000 /* 24001 * FreeBSD-specific extension fields for tcp_info. 24002 */ 24003 ti->tcpi_rcv_space = tp->rcv_wnd; 24004 ti->tcpi_rcv_nxt = tp->rcv_nxt; 24005 ti->tcpi_snd_wnd = tp->snd_wnd; 24006 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 24007 ti->tcpi_snd_nxt = tp->snd_nxt; 24008 ti->tcpi_snd_mss = tp->t_maxseg; 24009 ti->tcpi_rcv_mss = tp->t_maxseg; 24010 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 24011 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 24012 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 24013 ti->tcpi_total_tlp = tp->t_sndtlppack; 24014 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 24015 #ifdef NETFLIX_STATS 24016 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 24017 #endif 24018 #ifdef TCP_OFFLOAD 24019 if (tp->t_flags & TF_TOE) { 24020 ti->tcpi_options |= TCPI_OPT_TOE; 24021 tcp_offload_tcp_info(tp, ti); 24022 } 24023 #endif 24024 } 24025 24026 static int 24027 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24028 { 24029 struct inpcb *inp = tptoinpcb(tp); 24030 struct tcp_rack *rack; 24031 int32_t error, optval; 24032 uint64_t val, loptval; 24033 struct tcp_info ti; 24034 /* 24035 * Because all our options are either boolean or an int, we can just 24036 * pull everything into optval and then unlock and copy. If we ever 24037 * add a option that is not a int, then this will have quite an 24038 * impact to this routine. 24039 */ 24040 error = 0; 24041 rack = (struct tcp_rack *)tp->t_fb_ptr; 24042 if (rack == NULL) { 24043 INP_WUNLOCK(inp); 24044 return (EINVAL); 24045 } 24046 switch (sopt->sopt_name) { 24047 case TCP_INFO: 24048 /* First get the info filled */ 24049 rack_fill_info(tp, &ti); 24050 /* Fix up the rtt related fields if needed */ 24051 INP_WUNLOCK(inp); 24052 error = sooptcopyout(sopt, &ti, sizeof ti); 24053 return (error); 24054 /* 24055 * Beta is the congestion control value for NewReno that influences how 24056 * much of a backoff happens when loss is detected. It is normally set 24057 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 24058 * when you exit recovery. 24059 */ 24060 case TCP_RACK_PACING_BETA: 24061 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24062 error = EINVAL; 24063 else if (rack->rc_pacing_cc_set == 0) 24064 optval = rack->r_ctl.rc_saved_beta.beta; 24065 else { 24066 /* 24067 * Reach out into the CC data and report back what 24068 * I have previously set. Yeah it looks hackish but 24069 * we don't want to report the saved values. 24070 */ 24071 if (tp->t_ccv.cc_data) 24072 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 24073 else 24074 error = EINVAL; 24075 } 24076 break; 24077 /* 24078 * Beta_ecn is the congestion control value for NewReno that influences how 24079 * much of a backoff happens when a ECN mark is detected. It is normally set 24080 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 24081 * you exit recovery. Note that classic ECN has a beta of 50, it is only 24082 * ABE Ecn that uses this "less" value, but we do too with pacing :) 24083 */ 24084 24085 case TCP_RACK_PACING_BETA_ECN: 24086 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24087 error = EINVAL; 24088 else if (rack->rc_pacing_cc_set == 0) 24089 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 24090 else { 24091 /* 24092 * Reach out into the CC data and report back what 24093 * I have previously set. Yeah it looks hackish but 24094 * we don't want to report the saved values. 24095 */ 24096 if (tp->t_ccv.cc_data) 24097 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 24098 else 24099 error = EINVAL; 24100 } 24101 break; 24102 case TCP_RACK_DSACK_OPT: 24103 optval = 0; 24104 if (rack->rc_rack_tmr_std_based) { 24105 optval |= 1; 24106 } 24107 if (rack->rc_rack_use_dsack) { 24108 optval |= 2; 24109 } 24110 break; 24111 case TCP_RACK_ENABLE_HYSTART: 24112 { 24113 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24114 optval = RACK_HYSTART_ON; 24115 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 24116 optval = RACK_HYSTART_ON_W_SC; 24117 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 24118 optval = RACK_HYSTART_ON_W_SC_C; 24119 } else { 24120 optval = RACK_HYSTART_OFF; 24121 } 24122 } 24123 break; 24124 case TCP_RACK_DGP_IN_REC: 24125 optval = rack->r_ctl.full_dgp_in_rec; 24126 break; 24127 case TCP_RACK_HI_BETA: 24128 optval = rack->rack_hibeta; 24129 break; 24130 case TCP_RXT_CLAMP: 24131 optval = rack->r_ctl.saved_rxt_clamp_val; 24132 break; 24133 case TCP_DEFER_OPTIONS: 24134 optval = rack->defer_options; 24135 break; 24136 case TCP_RACK_MEASURE_CNT: 24137 optval = rack->r_ctl.req_measurements; 24138 break; 24139 case TCP_REC_ABC_VAL: 24140 optval = rack->r_use_labc_for_rec; 24141 break; 24142 case TCP_RACK_ABC_VAL: 24143 optval = rack->rc_labc; 24144 break; 24145 case TCP_HDWR_UP_ONLY: 24146 optval= rack->r_up_only; 24147 break; 24148 case TCP_PACING_RATE_CAP: 24149 loptval = rack->r_ctl.bw_rate_cap; 24150 break; 24151 case TCP_RACK_PROFILE: 24152 /* You cannot retrieve a profile, its write only */ 24153 error = EINVAL; 24154 break; 24155 case TCP_HYBRID_PACING: 24156 /* You cannot retrieve hybrid pacing information, its write only */ 24157 error = EINVAL; 24158 break; 24159 case TCP_USE_CMP_ACKS: 24160 optval = rack->r_use_cmp_ack; 24161 break; 24162 case TCP_RACK_PACE_TO_FILL: 24163 optval = rack->rc_pace_to_cwnd; 24164 if (optval && rack->r_fill_less_agg) 24165 optval++; 24166 break; 24167 case TCP_RACK_NO_PUSH_AT_MAX: 24168 optval = rack->r_ctl.rc_no_push_at_mrtt; 24169 break; 24170 case TCP_SHARED_CWND_ENABLE: 24171 optval = rack->rack_enable_scwnd; 24172 break; 24173 case TCP_RACK_NONRXT_CFG_RATE: 24174 optval = rack->rack_rec_nonrxt_use_cr; 24175 break; 24176 case TCP_NO_PRR: 24177 if (rack->rack_no_prr == 1) 24178 optval = 1; 24179 else if (rack->no_prr_addback == 1) 24180 optval = 2; 24181 else 24182 optval = 0; 24183 break; 24184 case TCP_RACK_DO_DETECTION: 24185 optval = rack->do_detection; 24186 break; 24187 case TCP_RACK_MBUF_QUEUE: 24188 /* Now do we use the LRO mbuf-queue feature */ 24189 optval = rack->r_mbuf_queue; 24190 break; 24191 case TCP_TIMELY_DYN_ADJ: 24192 optval = rack->rc_gp_dyn_mul; 24193 break; 24194 case TCP_BBR_IWINTSO: 24195 optval = rack->rc_init_win; 24196 break; 24197 case TCP_RACK_TLP_REDUCE: 24198 /* RACK TLP cwnd reduction (bool) */ 24199 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 24200 break; 24201 case TCP_BBR_RACK_INIT_RATE: 24202 val = rack->r_ctl.init_rate; 24203 /* convert to kbits per sec */ 24204 val *= 8; 24205 val /= 1000; 24206 optval = (uint32_t)val; 24207 break; 24208 case TCP_RACK_FORCE_MSEG: 24209 optval = rack->rc_force_max_seg; 24210 break; 24211 case TCP_RACK_PACE_MIN_SEG: 24212 optval = rack->r_ctl.rc_user_set_min_segs; 24213 break; 24214 case TCP_RACK_PACE_MAX_SEG: 24215 /* Max segments in a pace */ 24216 optval = rack->rc_user_set_max_segs; 24217 break; 24218 case TCP_RACK_PACE_ALWAYS: 24219 /* Use the always pace method */ 24220 optval = rack->rc_always_pace; 24221 break; 24222 case TCP_RACK_PRR_SENDALOT: 24223 /* Allow PRR to send more than one seg */ 24224 optval = rack->r_ctl.rc_prr_sendalot; 24225 break; 24226 case TCP_RACK_MIN_TO: 24227 /* Minimum time between rack t-o's in ms */ 24228 optval = rack->r_ctl.rc_min_to; 24229 break; 24230 case TCP_RACK_SPLIT_LIMIT: 24231 optval = rack->r_ctl.rc_split_limit; 24232 break; 24233 case TCP_RACK_EARLY_SEG: 24234 /* If early recovery max segments */ 24235 optval = rack->r_ctl.rc_early_recovery_segs; 24236 break; 24237 case TCP_RACK_REORD_THRESH: 24238 /* RACK reorder threshold (shift amount) */ 24239 optval = rack->r_ctl.rc_reorder_shift; 24240 break; 24241 case TCP_RACK_REORD_FADE: 24242 /* Does reordering fade after ms time */ 24243 optval = rack->r_ctl.rc_reorder_fade; 24244 break; 24245 case TCP_BBR_USE_RACK_RR: 24246 /* Do we use the rack cheat for rxt */ 24247 optval = rack->use_rack_rr; 24248 break; 24249 case TCP_RACK_RR_CONF: 24250 optval = rack->r_rr_config; 24251 break; 24252 case TCP_HDWR_RATE_CAP: 24253 optval = rack->r_rack_hw_rate_caps; 24254 break; 24255 case TCP_BBR_HDWR_PACE: 24256 optval = rack->rack_hdw_pace_ena; 24257 break; 24258 case TCP_RACK_TLP_THRESH: 24259 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24260 optval = rack->r_ctl.rc_tlp_threshold; 24261 break; 24262 case TCP_RACK_PKT_DELAY: 24263 /* RACK added ms i.e. rack-rtt + reord + N */ 24264 optval = rack->r_ctl.rc_pkt_delay; 24265 break; 24266 case TCP_RACK_TLP_USE: 24267 optval = rack->rack_tlp_threshold_use; 24268 break; 24269 case TCP_PACING_DND: 24270 optval = rack->rc_pace_dnd; 24271 break; 24272 case TCP_RACK_PACE_RATE_CA: 24273 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 24274 break; 24275 case TCP_RACK_PACE_RATE_SS: 24276 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 24277 break; 24278 case TCP_RACK_PACE_RATE_REC: 24279 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 24280 break; 24281 case TCP_RACK_GP_INCREASE_SS: 24282 optval = rack->r_ctl.rack_per_of_gp_ca; 24283 break; 24284 case TCP_RACK_GP_INCREASE_CA: 24285 optval = rack->r_ctl.rack_per_of_gp_ss; 24286 break; 24287 case TCP_RACK_PACING_DIVISOR: 24288 optval = rack->r_ctl.pace_len_divisor; 24289 break; 24290 case TCP_BBR_RACK_RTT_USE: 24291 optval = rack->r_ctl.rc_rate_sample_method; 24292 break; 24293 case TCP_DELACK: 24294 optval = tp->t_delayed_ack; 24295 break; 24296 case TCP_DATA_AFTER_CLOSE: 24297 optval = rack->rc_allow_data_af_clo; 24298 break; 24299 case TCP_SHARED_CWND_TIME_LIMIT: 24300 optval = rack->r_limit_scw; 24301 break; 24302 case TCP_RACK_TIMER_SLOP: 24303 optval = rack->r_ctl.timer_slop; 24304 break; 24305 default: 24306 return (tcp_default_ctloutput(tp, sopt)); 24307 break; 24308 } 24309 INP_WUNLOCK(inp); 24310 if (error == 0) { 24311 if (TCP_PACING_RATE_CAP) 24312 error = sooptcopyout(sopt, &loptval, sizeof loptval); 24313 else 24314 error = sooptcopyout(sopt, &optval, sizeof optval); 24315 } 24316 return (error); 24317 } 24318 24319 static int 24320 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 24321 { 24322 if (sopt->sopt_dir == SOPT_SET) { 24323 return (rack_set_sockopt(tp, sopt)); 24324 } else if (sopt->sopt_dir == SOPT_GET) { 24325 return (rack_get_sockopt(tp, sopt)); 24326 } else { 24327 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 24328 } 24329 } 24330 24331 static const char *rack_stack_names[] = { 24332 __XSTRING(STACKNAME), 24333 #ifdef STACKALIAS 24334 __XSTRING(STACKALIAS), 24335 #endif 24336 }; 24337 24338 static int 24339 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 24340 { 24341 memset(mem, 0, size); 24342 return (0); 24343 } 24344 24345 static void 24346 rack_dtor(void *mem, int32_t size, void *arg) 24347 { 24348 24349 } 24350 24351 static bool rack_mod_inited = false; 24352 24353 static int 24354 tcp_addrack(module_t mod, int32_t type, void *data) 24355 { 24356 int32_t err = 0; 24357 int num_stacks; 24358 24359 switch (type) { 24360 case MOD_LOAD: 24361 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 24362 sizeof(struct rack_sendmap), 24363 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 24364 24365 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 24366 sizeof(struct tcp_rack), 24367 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 24368 24369 sysctl_ctx_init(&rack_sysctl_ctx); 24370 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 24371 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 24372 OID_AUTO, 24373 #ifdef STACKALIAS 24374 __XSTRING(STACKALIAS), 24375 #else 24376 __XSTRING(STACKNAME), 24377 #endif 24378 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 24379 ""); 24380 if (rack_sysctl_root == NULL) { 24381 printf("Failed to add sysctl node\n"); 24382 err = EFAULT; 24383 goto free_uma; 24384 } 24385 rack_init_sysctls(); 24386 num_stacks = nitems(rack_stack_names); 24387 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 24388 rack_stack_names, &num_stacks); 24389 if (err) { 24390 printf("Failed to register %s stack name for " 24391 "%s module\n", rack_stack_names[num_stacks], 24392 __XSTRING(MODNAME)); 24393 sysctl_ctx_free(&rack_sysctl_ctx); 24394 free_uma: 24395 uma_zdestroy(rack_zone); 24396 uma_zdestroy(rack_pcb_zone); 24397 rack_counter_destroy(); 24398 printf("Failed to register rack module -- err:%d\n", err); 24399 return (err); 24400 } 24401 tcp_lro_reg_mbufq(); 24402 rack_mod_inited = true; 24403 break; 24404 case MOD_QUIESCE: 24405 err = deregister_tcp_functions(&__tcp_rack, true, false); 24406 break; 24407 case MOD_UNLOAD: 24408 err = deregister_tcp_functions(&__tcp_rack, false, true); 24409 if (err == EBUSY) 24410 break; 24411 if (rack_mod_inited) { 24412 uma_zdestroy(rack_zone); 24413 uma_zdestroy(rack_pcb_zone); 24414 sysctl_ctx_free(&rack_sysctl_ctx); 24415 rack_counter_destroy(); 24416 rack_mod_inited = false; 24417 } 24418 tcp_lro_dereg_mbufq(); 24419 err = 0; 24420 break; 24421 default: 24422 return (EOPNOTSUPP); 24423 } 24424 return (err); 24425 } 24426 24427 static moduledata_t tcp_rack = { 24428 .name = __XSTRING(MODNAME), 24429 .evhand = tcp_addrack, 24430 .priv = 0 24431 }; 24432 24433 MODULE_VERSION(MODNAME, 1); 24434 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 24435 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 24436 24437 #endif /* #if !defined(INET) && !defined(INET6) */ 24438