1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_ratelimit.h" 34 #include "opt_kern_tls.h" 35 #if defined(INET) || defined(INET6) 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_seq.h> 93 #include <netinet/tcp_timer.h> 94 #include <netinet/tcp_var.h> 95 #include <netinet/tcp_log_buf.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCP_OFFLOAD 109 #include <netinet/tcp_offload.h> 110 #endif 111 #ifdef INET6 112 #include <netinet6/tcp6_var.h> 113 #endif 114 #include <netinet/tcp_ecn.h> 115 116 #include <netipsec/ipsec_support.h> 117 118 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 119 #include <netipsec/ipsec.h> 120 #include <netipsec/ipsec6.h> 121 #endif /* IPSEC */ 122 123 #include <netinet/udp.h> 124 #include <netinet/udp_var.h> 125 #include <machine/in_cksum.h> 126 127 #ifdef MAC 128 #include <security/mac/mac_framework.h> 129 #endif 130 #include "sack_filter.h" 131 #include "tcp_rack.h" 132 #include "tailq_hash.h" 133 #include "rack_bbr_common.h" 134 135 uma_zone_t rack_zone; 136 uma_zone_t rack_pcb_zone; 137 138 #ifndef TICKS2SBT 139 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 140 #endif 141 142 VNET_DECLARE(uint32_t, newreno_beta); 143 VNET_DECLARE(uint32_t, newreno_beta_ecn); 144 #define V_newreno_beta VNET(newreno_beta) 145 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 146 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 150 151 struct sysctl_ctx_list rack_sysctl_ctx; 152 struct sysctl_oid *rack_sysctl_root; 153 154 #define CUM_ACKED 1 155 #define SACKED 2 156 157 /* 158 * The RACK module incorporates a number of 159 * TCP ideas that have been put out into the IETF 160 * over the last few years: 161 * - Matt Mathis's Rate Halving which slowly drops 162 * the congestion window so that the ack clock can 163 * be maintained during a recovery. 164 * - Yuchung Cheng's RACK TCP (for which its named) that 165 * will stop us using the number of dup acks and instead 166 * use time as the gage of when we retransmit. 167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 168 * of Dukkipati et.al. 169 * RACK depends on SACK, so if an endpoint arrives that 170 * cannot do SACK the state machine below will shuttle the 171 * connection back to using the "default" TCP stack that is 172 * in FreeBSD. 173 * 174 * To implement RACK the original TCP stack was first decomposed 175 * into a functional state machine with individual states 176 * for each of the possible TCP connection states. The do_segment 177 * functions role in life is to mandate the connection supports SACK 178 * initially and then assure that the RACK state matches the conenction 179 * state before calling the states do_segment function. Each 180 * state is simplified due to the fact that the original do_segment 181 * has been decomposed and we *know* what state we are in (no 182 * switches on the state) and all tests for SACK are gone. This 183 * greatly simplifies what each state does. 184 * 185 * TCP output is also over-written with a new version since it 186 * must maintain the new rack scoreboard. 187 * 188 */ 189 static int32_t rack_tlp_thresh = 1; 190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 191 static int32_t rack_tlp_use_greater = 1; 192 static int32_t rack_reorder_thresh = 2; 193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 194 * - 60 seconds */ 195 static uint32_t rack_clamp_ss_upper = 110; 196 static uint32_t rack_clamp_ca_upper = 105; 197 static uint32_t rack_rxt_min_rnds = 10; /* Min rounds if drastic rxt clamp is in place */ 198 static uint32_t rack_unclamp_round_thresh = 100; /* number of perfect rounds before we unclamp */ 199 static uint32_t rack_unclamp_rxt_thresh = 5; /* .5% and under */ 200 static uint64_t rack_rxt_clamp_thresh = 0; /* Do we do the rxt clamp thing */ 201 static int32_t rack_dnd_default = 0; /* For rr_conf = 3, what is the default for dnd */ 202 static int32_t rack_rxt_controls = 0; 203 static int32_t rack_fill_cw_state = 0; 204 static uint8_t rack_req_measurements = 1; 205 /* Attack threshold detections */ 206 static uint32_t rack_highest_sack_thresh_seen = 0; 207 static uint32_t rack_highest_move_thresh_seen = 0; 208 static uint32_t rack_merge_out_sacks_on_attack = 0; 209 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 210 static int32_t rack_hw_pace_extra_slots = 0; /* 2 extra MSS time betweens */ 211 static int32_t rack_hw_rate_caps = 0; /* 1; */ 212 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */ 213 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 214 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 215 static int32_t rack_hw_up_only = 0; 216 static int32_t rack_stats_gets_ms_rtt = 1; 217 static int32_t rack_prr_addbackmax = 2; 218 static int32_t rack_do_hystart = 0; 219 static int32_t rack_apply_rtt_with_reduced_conf = 0; 220 static int32_t rack_hibeta_setting = 0; 221 static int32_t rack_default_pacing_divisor = 250; 222 static int32_t rack_uses_full_dgp_in_rec = 1; 223 static uint16_t rack_pacing_min_seg = 0; 224 225 226 static uint32_t sad_seg_size_per = 800; /* 80.0 % */ 227 static int32_t rack_pkt_delay = 1000; 228 static int32_t rack_send_a_lot_in_prr = 1; 229 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 230 static int32_t rack_verbose_logging = 0; 231 static int32_t rack_ignore_data_after_close = 1; 232 static int32_t rack_enable_shared_cwnd = 1; 233 static int32_t rack_use_cmp_acks = 1; 234 static int32_t rack_use_fsb = 1; 235 static int32_t rack_use_rfo = 1; 236 static int32_t rack_use_rsm_rfo = 1; 237 static int32_t rack_max_abc_post_recovery = 2; 238 static int32_t rack_client_low_buf = 0; 239 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 240 static int32_t rack_bw_multipler = 2; /* Limit on fill cw's jump up to be this x gp_est */ 241 #ifdef TCP_ACCOUNTING 242 static int32_t rack_tcp_accounting = 0; 243 #endif 244 static int32_t rack_limits_scwnd = 1; 245 static int32_t rack_enable_mqueue_for_nonpaced = 0; 246 static int32_t rack_hybrid_allow_set_maxseg = 0; 247 static int32_t rack_disable_prr = 0; 248 static int32_t use_rack_rr = 1; 249 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 250 static int32_t rack_persist_min = 250000; /* 250usec */ 251 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 252 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 253 static int32_t rack_default_init_window = 0; /* Use system default */ 254 static int32_t rack_limit_time_with_srtt = 0; 255 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 256 static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */ 257 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 258 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 259 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 260 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */ 261 static int32_t rack_full_buffer_discount = 10; 262 /* 263 * Currently regular tcp has a rto_min of 30ms 264 * the backoff goes 12 times so that ends up 265 * being a total of 122.850 seconds before a 266 * connection is killed. 267 */ 268 static uint32_t rack_def_data_window = 20; 269 static uint32_t rack_goal_bdp = 2; 270 static uint32_t rack_min_srtts = 1; 271 static uint32_t rack_min_measure_usec = 0; 272 static int32_t rack_tlp_min = 10000; /* 10ms */ 273 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 274 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 275 static const int32_t rack_free_cache = 2; 276 static int32_t rack_hptsi_segments = 40; 277 static int32_t rack_rate_sample_method = USE_RTT_LOW; 278 static int32_t rack_pace_every_seg = 0; 279 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 280 static int32_t rack_slot_reduction = 4; 281 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 282 static int32_t rack_cwnd_block_ends_measure = 0; 283 static int32_t rack_rwnd_block_ends_measure = 0; 284 static int32_t rack_def_profile = 0; 285 286 static int32_t rack_lower_cwnd_at_tlp = 0; 287 static int32_t rack_limited_retran = 0; 288 static int32_t rack_always_send_oldest = 0; 289 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 290 291 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 292 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 293 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 294 295 /* Probertt */ 296 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 297 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 298 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 299 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 300 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 301 302 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 303 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 304 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 305 static uint32_t rack_probertt_use_min_rtt_exit = 0; 306 static uint32_t rack_probe_rtt_sets_cwnd = 0; 307 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 308 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 309 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 310 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 311 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 312 static uint32_t rack_probertt_filter_life = 10000000; 313 static uint32_t rack_probertt_lower_within = 10; 314 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 315 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 316 static int32_t rack_probertt_clear_is = 1; 317 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 318 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 319 320 /* Part of pacing */ 321 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 322 323 /* Timely information: 324 * 325 * Here we have various control parameters on how 326 * timely may change the multiplier. rack_gain_p5_ub 327 * is associated with timely but not directly influencing 328 * the rate decision like the other variables. It controls 329 * the way fill-cw interacts with timely and caps how much 330 * timely can boost the fill-cw b/w. 331 * 332 * The other values are various boost/shrink numbers as well 333 * as potential caps when adjustments are made to the timely 334 * gain (returned by rack_get_output_gain(). Remember too that 335 * the gain returned can be overriden by other factors such as 336 * probeRTT as well as fixed-rate-pacing. 337 */ 338 static int32_t rack_gain_p5_ub = 250; 339 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 340 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 341 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 342 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 343 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 344 static int32_t rack_gp_decrease_per = 80; /* Beta value of timely decrease (.8) = 80 */ 345 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 346 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 347 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 348 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 349 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 350 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 351 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 352 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 353 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 354 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 355 static int32_t rack_use_max_for_nobackoff = 0; 356 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 357 static int32_t rack_timely_no_stopping = 0; 358 static int32_t rack_down_raise_thresh = 100; 359 static int32_t rack_req_segs = 1; 360 static uint64_t rack_bw_rate_cap = 0; 361 362 363 /* Rack specific counters */ 364 counter_u64_t rack_saw_enobuf; 365 counter_u64_t rack_saw_enobuf_hw; 366 counter_u64_t rack_saw_enetunreach; 367 counter_u64_t rack_persists_sends; 368 counter_u64_t rack_persists_acks; 369 counter_u64_t rack_persists_loss; 370 counter_u64_t rack_persists_lost_ends; 371 counter_u64_t rack_total_bytes; 372 #ifdef INVARIANTS 373 counter_u64_t rack_adjust_map_bw; 374 #endif 375 /* Tail loss probe counters */ 376 counter_u64_t rack_tlp_tot; 377 counter_u64_t rack_tlp_newdata; 378 counter_u64_t rack_tlp_retran; 379 counter_u64_t rack_tlp_retran_bytes; 380 counter_u64_t rack_to_tot; 381 counter_u64_t rack_hot_alloc; 382 counter_u64_t rack_to_alloc; 383 counter_u64_t rack_to_alloc_hard; 384 counter_u64_t rack_to_alloc_emerg; 385 counter_u64_t rack_to_alloc_limited; 386 counter_u64_t rack_alloc_limited_conns; 387 counter_u64_t rack_split_limited; 388 counter_u64_t rack_rxt_clamps_cwnd; 389 counter_u64_t rack_rxt_clamps_cwnd_uniq; 390 391 counter_u64_t rack_multi_single_eq; 392 counter_u64_t rack_proc_non_comp_ack; 393 394 counter_u64_t rack_fto_send; 395 counter_u64_t rack_fto_rsm_send; 396 counter_u64_t rack_nfto_resend; 397 counter_u64_t rack_non_fto_send; 398 counter_u64_t rack_extended_rfo; 399 400 counter_u64_t rack_sack_proc_all; 401 counter_u64_t rack_sack_proc_short; 402 counter_u64_t rack_sack_proc_restart; 403 counter_u64_t rack_sack_attacks_detected; 404 counter_u64_t rack_sack_attacks_reversed; 405 counter_u64_t rack_sack_attacks_suspect; 406 counter_u64_t rack_sack_used_next_merge; 407 counter_u64_t rack_sack_splits; 408 counter_u64_t rack_sack_used_prev_merge; 409 counter_u64_t rack_sack_skipped_acked; 410 counter_u64_t rack_ack_total; 411 counter_u64_t rack_express_sack; 412 counter_u64_t rack_sack_total; 413 counter_u64_t rack_move_none; 414 counter_u64_t rack_move_some; 415 416 counter_u64_t rack_input_idle_reduces; 417 counter_u64_t rack_collapsed_win; 418 counter_u64_t rack_collapsed_win_seen; 419 counter_u64_t rack_collapsed_win_rxt; 420 counter_u64_t rack_collapsed_win_rxt_bytes; 421 counter_u64_t rack_try_scwnd; 422 counter_u64_t rack_hw_pace_init_fail; 423 counter_u64_t rack_hw_pace_lost; 424 425 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 426 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 427 428 429 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 430 431 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 432 (tv) = (value) + slop; \ 433 if ((u_long)(tv) < (u_long)(tvmin)) \ 434 (tv) = (tvmin); \ 435 if ((u_long)(tv) > (u_long)(tvmax)) \ 436 (tv) = (tvmax); \ 437 } while (0) 438 439 static void 440 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 441 442 static int 443 rack_process_ack(struct mbuf *m, struct tcphdr *th, 444 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 445 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 446 static int 447 rack_process_data(struct mbuf *m, struct tcphdr *th, 448 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 449 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 450 static void 451 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 452 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 453 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 454 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 455 uint8_t limit_type); 456 static struct rack_sendmap * 457 rack_check_recovery_mode(struct tcpcb *tp, 458 uint32_t tsused); 459 static void 460 rack_cong_signal(struct tcpcb *tp, 461 uint32_t type, uint32_t ack, int ); 462 static void rack_counter_destroy(void); 463 static int 464 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt); 465 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 466 static void 467 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 468 static void 469 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 470 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos); 471 static void rack_dtor(void *mem, int32_t size, void *arg); 472 static void 473 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 474 uint32_t flex1, uint32_t flex2, 475 uint32_t flex3, uint32_t flex4, 476 uint32_t flex5, uint32_t flex6, 477 uint16_t flex7, uint8_t mod); 478 479 static void 480 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 481 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 482 struct rack_sendmap *rsm, uint8_t quality); 483 static struct rack_sendmap * 484 rack_find_high_nonack(struct tcp_rack *rack, 485 struct rack_sendmap *rsm); 486 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 487 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 488 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 489 static int rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt); 490 static void 491 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 492 tcp_seq th_ack, int line, uint8_t quality); 493 static void 494 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm); 495 496 static uint32_t 497 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 498 static int32_t rack_handoff_ok(struct tcpcb *tp); 499 static int32_t rack_init(struct tcpcb *tp, void **ptr); 500 static void rack_init_sysctls(void); 501 502 static void 503 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 504 struct tcphdr *th, int entered_rec, int dup_ack_struck, 505 int *dsack_seen, int *sacks_seen); 506 static void 507 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 508 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 509 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls, int segsiz); 510 511 static uint64_t rack_get_gp_est(struct tcp_rack *rack); 512 513 static void 514 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 515 struct rack_sendmap *rsm); 516 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 517 static int32_t rack_output(struct tcpcb *tp); 518 519 static uint32_t 520 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 521 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 522 uint32_t cts, int *no_extra, int *moved_two, uint32_t segsiz); 523 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 524 static void rack_remxt_tmr(struct tcpcb *tp); 525 static int rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt); 526 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 527 static int32_t rack_stopall(struct tcpcb *tp); 528 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 529 static uint32_t 530 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 531 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag, int segsiz); 532 static void 533 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 534 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz); 535 static int 536 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 537 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 538 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 539 static int 540 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 541 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 542 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 543 static int 544 rack_do_closing(struct mbuf *m, struct tcphdr *th, 545 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 546 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 547 static int 548 rack_do_established(struct mbuf *m, struct tcphdr *th, 549 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 550 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 551 static int 552 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 553 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 554 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 555 static int 556 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 557 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 558 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 559 static int 560 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 561 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 562 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 563 static int 564 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 565 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 566 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 567 static int 568 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 569 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 570 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 571 static int 572 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 573 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 574 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 575 static void rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts); 576 struct rack_sendmap * 577 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 578 uint32_t tsused); 579 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 580 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 581 static void 582 tcp_rack_partialack(struct tcpcb *tp); 583 static int 584 rack_set_profile(struct tcp_rack *rack, int prof); 585 static void 586 rack_apply_deferred_options(struct tcp_rack *rack); 587 588 int32_t rack_clear_counter=0; 589 590 static uint64_t 591 rack_get_lt_bw(struct tcp_rack *rack) 592 { 593 struct timeval tv; 594 uint64_t tim, bytes; 595 596 tim = rack->r_ctl.lt_bw_time; 597 bytes = rack->r_ctl.lt_bw_bytes; 598 if (rack->lt_bw_up) { 599 /* Include all the current bytes too */ 600 microuptime(&tv); 601 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); 602 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 603 } 604 if ((bytes != 0) && (tim != 0)) 605 return ((bytes * (uint64_t)1000000) / tim); 606 else 607 return (0); 608 } 609 610 static void 611 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 612 { 613 struct sockopt sopt; 614 struct cc_newreno_opts opt; 615 struct newreno old; 616 struct tcpcb *tp; 617 int error, failed = 0; 618 619 tp = rack->rc_tp; 620 if (tp->t_cc == NULL) { 621 /* Tcb is leaving */ 622 return; 623 } 624 rack->rc_pacing_cc_set = 1; 625 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 626 /* Not new-reno we can't play games with beta! */ 627 failed = 1; 628 goto out; 629 630 } 631 if (CC_ALGO(tp)->ctl_output == NULL) { 632 /* Huh, not using new-reno so no swaps.? */ 633 failed = 2; 634 goto out; 635 } 636 /* Get the current values out */ 637 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 638 sopt.sopt_dir = SOPT_GET; 639 opt.name = CC_NEWRENO_BETA; 640 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 641 if (error) { 642 failed = 3; 643 goto out; 644 } 645 old.beta = opt.val; 646 opt.name = CC_NEWRENO_BETA_ECN; 647 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 648 if (error) { 649 failed = 4; 650 goto out; 651 } 652 old.beta_ecn = opt.val; 653 654 /* Now lets set in the values we have stored */ 655 sopt.sopt_dir = SOPT_SET; 656 opt.name = CC_NEWRENO_BETA; 657 opt.val = rack->r_ctl.rc_saved_beta.beta; 658 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 659 if (error) { 660 failed = 5; 661 goto out; 662 } 663 opt.name = CC_NEWRENO_BETA_ECN; 664 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 665 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 666 if (error) { 667 failed = 6; 668 goto out; 669 } 670 /* Save off the values for restoral */ 671 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 672 out: 673 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 674 union tcp_log_stackspecific log; 675 struct timeval tv; 676 struct newreno *ptr; 677 678 ptr = ((struct newreno *)tp->t_ccv.cc_data); 679 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 680 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 681 log.u_bbr.flex1 = ptr->beta; 682 log.u_bbr.flex2 = ptr->beta_ecn; 683 log.u_bbr.flex3 = ptr->newreno_flags; 684 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 685 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 686 log.u_bbr.flex6 = failed; 687 log.u_bbr.flex7 = rack->gp_ready; 688 log.u_bbr.flex7 <<= 1; 689 log.u_bbr.flex7 |= rack->use_fixed_rate; 690 log.u_bbr.flex7 <<= 1; 691 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 692 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 693 log.u_bbr.flex8 = flex8; 694 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 695 0, &log, false, NULL, NULL, 0, &tv); 696 } 697 } 698 699 static void 700 rack_set_cc_pacing(struct tcp_rack *rack) 701 { 702 if (rack->rc_pacing_cc_set) 703 return; 704 /* 705 * Use the swap utility placing in 3 for flex8 to id a 706 * set of a new set of values. 707 */ 708 rack->rc_pacing_cc_set = 1; 709 rack_swap_beta_values(rack, 3); 710 } 711 712 static void 713 rack_undo_cc_pacing(struct tcp_rack *rack) 714 { 715 if (rack->rc_pacing_cc_set == 0) 716 return; 717 /* 718 * Use the swap utility placing in 4 for flex8 to id a 719 * restoral of the old values. 720 */ 721 rack->rc_pacing_cc_set = 0; 722 rack_swap_beta_values(rack, 4); 723 } 724 725 static void 726 rack_log_gpset(struct tcp_rack *rack, uint32_t seq_end, uint32_t ack_end_t, 727 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm) 728 { 729 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { 730 union tcp_log_stackspecific log; 731 struct timeval tv; 732 733 memset(&log, 0, sizeof(log)); 734 log.u_bbr.flex1 = seq_end; 735 log.u_bbr.flex2 = rack->rc_tp->gput_seq; 736 log.u_bbr.flex3 = ack_end_t; 737 log.u_bbr.flex4 = rack->rc_tp->gput_ts; 738 log.u_bbr.flex5 = send_end_t; 739 log.u_bbr.flex6 = rack->rc_tp->gput_ack; 740 log.u_bbr.flex7 = mode; 741 log.u_bbr.flex8 = 69; 742 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; 743 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; 744 log.u_bbr.pkts_out = line; 745 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; 746 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; 747 if (rsm != NULL) { 748 log.u_bbr.applimited = rsm->r_start; 749 log.u_bbr.delivered = rsm->r_end; 750 log.u_bbr.epoch = rsm->r_flags; 751 } 752 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 753 TCP_LOG_EVENTP(rack->rc_tp, NULL, 754 &rack->rc_inp->inp_socket->so_rcv, 755 &rack->rc_inp->inp_socket->so_snd, 756 BBR_LOG_HPTSI_CALC, 0, 757 0, &log, false, &tv); 758 } 759 } 760 761 static int 762 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 763 { 764 uint32_t stat; 765 int32_t error; 766 767 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 768 if (error || req->newptr == NULL) 769 return error; 770 771 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 772 if (error) 773 return (error); 774 if (stat == 1) { 775 #ifdef INVARIANTS 776 printf("Clearing RACK counters\n"); 777 #endif 778 counter_u64_zero(rack_tlp_tot); 779 counter_u64_zero(rack_tlp_newdata); 780 counter_u64_zero(rack_tlp_retran); 781 counter_u64_zero(rack_tlp_retran_bytes); 782 counter_u64_zero(rack_to_tot); 783 counter_u64_zero(rack_saw_enobuf); 784 counter_u64_zero(rack_saw_enobuf_hw); 785 counter_u64_zero(rack_saw_enetunreach); 786 counter_u64_zero(rack_persists_sends); 787 counter_u64_zero(rack_total_bytes); 788 counter_u64_zero(rack_persists_acks); 789 counter_u64_zero(rack_persists_loss); 790 counter_u64_zero(rack_persists_lost_ends); 791 #ifdef INVARIANTS 792 counter_u64_zero(rack_adjust_map_bw); 793 #endif 794 counter_u64_zero(rack_to_alloc_hard); 795 counter_u64_zero(rack_to_alloc_emerg); 796 counter_u64_zero(rack_sack_proc_all); 797 counter_u64_zero(rack_fto_send); 798 counter_u64_zero(rack_fto_rsm_send); 799 counter_u64_zero(rack_extended_rfo); 800 counter_u64_zero(rack_hw_pace_init_fail); 801 counter_u64_zero(rack_hw_pace_lost); 802 counter_u64_zero(rack_non_fto_send); 803 counter_u64_zero(rack_nfto_resend); 804 counter_u64_zero(rack_sack_proc_short); 805 counter_u64_zero(rack_sack_proc_restart); 806 counter_u64_zero(rack_to_alloc); 807 counter_u64_zero(rack_to_alloc_limited); 808 counter_u64_zero(rack_alloc_limited_conns); 809 counter_u64_zero(rack_split_limited); 810 counter_u64_zero(rack_rxt_clamps_cwnd); 811 counter_u64_zero(rack_rxt_clamps_cwnd_uniq); 812 counter_u64_zero(rack_multi_single_eq); 813 counter_u64_zero(rack_proc_non_comp_ack); 814 counter_u64_zero(rack_sack_attacks_detected); 815 counter_u64_zero(rack_sack_attacks_reversed); 816 counter_u64_zero(rack_sack_attacks_suspect); 817 counter_u64_zero(rack_sack_used_next_merge); 818 counter_u64_zero(rack_sack_used_prev_merge); 819 counter_u64_zero(rack_sack_splits); 820 counter_u64_zero(rack_sack_skipped_acked); 821 counter_u64_zero(rack_ack_total); 822 counter_u64_zero(rack_express_sack); 823 counter_u64_zero(rack_sack_total); 824 counter_u64_zero(rack_move_none); 825 counter_u64_zero(rack_move_some); 826 counter_u64_zero(rack_try_scwnd); 827 counter_u64_zero(rack_collapsed_win); 828 counter_u64_zero(rack_collapsed_win_rxt); 829 counter_u64_zero(rack_collapsed_win_seen); 830 counter_u64_zero(rack_collapsed_win_rxt_bytes); 831 } else if (stat == 2) { 832 #ifdef INVARIANTS 833 printf("Clearing RACK option array\n"); 834 #endif 835 COUNTER_ARRAY_ZERO(rack_opts_arry, RACK_OPTS_SIZE); 836 } else if (stat == 3) { 837 printf("Rack has no stats counters to clear (use 1 to clear all stats in sysctl node)\n"); 838 } else if (stat == 4) { 839 #ifdef INVARIANTS 840 printf("Clearing RACK out size array\n"); 841 #endif 842 COUNTER_ARRAY_ZERO(rack_out_size, TCP_MSS_ACCT_SIZE); 843 } 844 rack_clear_counter = 0; 845 return (0); 846 } 847 848 static void 849 rack_init_sysctls(void) 850 { 851 struct sysctl_oid *rack_counters; 852 struct sysctl_oid *rack_attack; 853 struct sysctl_oid *rack_pacing; 854 struct sysctl_oid *rack_timely; 855 struct sysctl_oid *rack_timers; 856 struct sysctl_oid *rack_tlp; 857 struct sysctl_oid *rack_misc; 858 struct sysctl_oid *rack_features; 859 struct sysctl_oid *rack_measure; 860 struct sysctl_oid *rack_probertt; 861 struct sysctl_oid *rack_hw_pacing; 862 863 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 864 SYSCTL_CHILDREN(rack_sysctl_root), 865 OID_AUTO, 866 "sack_attack", 867 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 868 "Rack Sack Attack Counters and Controls"); 869 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 870 SYSCTL_CHILDREN(rack_sysctl_root), 871 OID_AUTO, 872 "stats", 873 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 874 "Rack Counters"); 875 SYSCTL_ADD_S32(&rack_sysctl_ctx, 876 SYSCTL_CHILDREN(rack_sysctl_root), 877 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 878 &rack_rate_sample_method , USE_RTT_LOW, 879 "What method should we use for rate sampling 0=high, 1=low "); 880 /* Probe rtt related controls */ 881 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 882 SYSCTL_CHILDREN(rack_sysctl_root), 883 OID_AUTO, 884 "probertt", 885 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 886 "ProbeRTT related Controls"); 887 SYSCTL_ADD_U16(&rack_sysctl_ctx, 888 SYSCTL_CHILDREN(rack_probertt), 889 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 890 &rack_atexit_prtt_hbp, 130, 891 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 892 SYSCTL_ADD_U16(&rack_sysctl_ctx, 893 SYSCTL_CHILDREN(rack_probertt), 894 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 895 &rack_atexit_prtt, 130, 896 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 897 SYSCTL_ADD_U16(&rack_sysctl_ctx, 898 SYSCTL_CHILDREN(rack_probertt), 899 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 900 &rack_per_of_gp_probertt, 60, 901 "What percentage of goodput do we pace at in probertt"); 902 SYSCTL_ADD_U16(&rack_sysctl_ctx, 903 SYSCTL_CHILDREN(rack_probertt), 904 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 905 &rack_per_of_gp_probertt_reduce, 10, 906 "What percentage of goodput do we reduce every gp_srtt"); 907 SYSCTL_ADD_U16(&rack_sysctl_ctx, 908 SYSCTL_CHILDREN(rack_probertt), 909 OID_AUTO, "gp_per_low", CTLFLAG_RW, 910 &rack_per_of_gp_lowthresh, 40, 911 "What percentage of goodput do we allow the multiplier to fall to"); 912 SYSCTL_ADD_U32(&rack_sysctl_ctx, 913 SYSCTL_CHILDREN(rack_probertt), 914 OID_AUTO, "time_between", CTLFLAG_RW, 915 & rack_time_between_probertt, 96000000, 916 "How many useconds between the lowest rtt falling must past before we enter probertt"); 917 SYSCTL_ADD_U32(&rack_sysctl_ctx, 918 SYSCTL_CHILDREN(rack_probertt), 919 OID_AUTO, "safety", CTLFLAG_RW, 920 &rack_probe_rtt_safety_val, 2000000, 921 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 922 SYSCTL_ADD_U32(&rack_sysctl_ctx, 923 SYSCTL_CHILDREN(rack_probertt), 924 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 925 &rack_probe_rtt_sets_cwnd, 0, 926 "Do we set the cwnd too (if always_lower is on)"); 927 SYSCTL_ADD_U32(&rack_sysctl_ctx, 928 SYSCTL_CHILDREN(rack_probertt), 929 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 930 &rack_max_drain_wait, 2, 931 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 932 SYSCTL_ADD_U32(&rack_sysctl_ctx, 933 SYSCTL_CHILDREN(rack_probertt), 934 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 935 &rack_must_drain, 1, 936 "We must drain this many gp_srtt's waiting for flight to reach goal"); 937 SYSCTL_ADD_U32(&rack_sysctl_ctx, 938 SYSCTL_CHILDREN(rack_probertt), 939 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 940 &rack_probertt_use_min_rtt_entry, 1, 941 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 942 SYSCTL_ADD_U32(&rack_sysctl_ctx, 943 SYSCTL_CHILDREN(rack_probertt), 944 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 945 &rack_probertt_use_min_rtt_exit, 0, 946 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 947 SYSCTL_ADD_U32(&rack_sysctl_ctx, 948 SYSCTL_CHILDREN(rack_probertt), 949 OID_AUTO, "length_div", CTLFLAG_RW, 950 &rack_probertt_gpsrtt_cnt_div, 0, 951 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 952 SYSCTL_ADD_U32(&rack_sysctl_ctx, 953 SYSCTL_CHILDREN(rack_probertt), 954 OID_AUTO, "length_mul", CTLFLAG_RW, 955 &rack_probertt_gpsrtt_cnt_mul, 0, 956 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 957 SYSCTL_ADD_U32(&rack_sysctl_ctx, 958 SYSCTL_CHILDREN(rack_probertt), 959 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 960 &rack_min_probertt_hold, 200000, 961 "What is the minimum time we hold probertt at target"); 962 SYSCTL_ADD_U32(&rack_sysctl_ctx, 963 SYSCTL_CHILDREN(rack_probertt), 964 OID_AUTO, "filter_life", CTLFLAG_RW, 965 &rack_probertt_filter_life, 10000000, 966 "What is the time for the filters life in useconds"); 967 SYSCTL_ADD_U32(&rack_sysctl_ctx, 968 SYSCTL_CHILDREN(rack_probertt), 969 OID_AUTO, "lower_within", CTLFLAG_RW, 970 &rack_probertt_lower_within, 10, 971 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 972 SYSCTL_ADD_U32(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_probertt), 974 OID_AUTO, "must_move", CTLFLAG_RW, 975 &rack_min_rtt_movement, 250, 976 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 977 SYSCTL_ADD_U32(&rack_sysctl_ctx, 978 SYSCTL_CHILDREN(rack_probertt), 979 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 980 &rack_probertt_clear_is, 1, 981 "Do we clear I/S counts on exiting probe-rtt"); 982 SYSCTL_ADD_S32(&rack_sysctl_ctx, 983 SYSCTL_CHILDREN(rack_probertt), 984 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 985 &rack_max_drain_hbp, 1, 986 "How many extra drain gpsrtt's do we get in highly buffered paths"); 987 SYSCTL_ADD_S32(&rack_sysctl_ctx, 988 SYSCTL_CHILDREN(rack_probertt), 989 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 990 &rack_hbp_thresh, 3, 991 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 992 /* Pacing related sysctls */ 993 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_sysctl_root), 995 OID_AUTO, 996 "pacing", 997 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 998 "Pacing related Controls"); 999 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1000 SYSCTL_CHILDREN(rack_pacing), 1001 OID_AUTO, "fulldgpinrec", CTLFLAG_RW, 1002 &rack_uses_full_dgp_in_rec, 1, 1003 "Do we use all DGP features in recovery (fillcw, timely et.al.)?"); 1004 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1005 SYSCTL_CHILDREN(rack_pacing), 1006 OID_AUTO, "fullbufdisc", CTLFLAG_RW, 1007 &rack_full_buffer_discount, 10, 1008 "What percentage b/w reduction over the GP estimate for a full buffer (default=0 off)?"); 1009 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1010 SYSCTL_CHILDREN(rack_pacing), 1011 OID_AUTO, "fillcw", CTLFLAG_RW, 1012 &rack_fill_cw_state, 0, 1013 "Enable fillcw on new connections (default=0 off)?"); 1014 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1015 SYSCTL_CHILDREN(rack_pacing), 1016 OID_AUTO, "min_burst", CTLFLAG_RW, 1017 &rack_pacing_min_seg, 0, 1018 "What is the min burst size for pacing (0 disables)?"); 1019 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1020 SYSCTL_CHILDREN(rack_pacing), 1021 OID_AUTO, "divisor", CTLFLAG_RW, 1022 &rack_default_pacing_divisor, 4, 1023 "What is the default divisor given to the rl code?"); 1024 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1025 SYSCTL_CHILDREN(rack_pacing), 1026 OID_AUTO, "fillcw_max_mult", CTLFLAG_RW, 1027 &rack_bw_multipler, 2, 1028 "What is the multiplier of the current gp_est that fillcw can increase the b/w too?"); 1029 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1030 SYSCTL_CHILDREN(rack_pacing), 1031 OID_AUTO, "max_pace_over", CTLFLAG_RW, 1032 &rack_max_per_above, 30, 1033 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 1034 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1035 SYSCTL_CHILDREN(rack_pacing), 1036 OID_AUTO, "allow1mss", CTLFLAG_RW, 1037 &rack_pace_one_seg, 0, 1038 "Do we allow low b/w pacing of 1MSS instead of two (1.2Meg and less)?"); 1039 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_pacing), 1041 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 1042 &rack_limit_time_with_srtt, 0, 1043 "Do we limit pacing time based on srtt"); 1044 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1045 SYSCTL_CHILDREN(rack_pacing), 1046 OID_AUTO, "init_win", CTLFLAG_RW, 1047 &rack_default_init_window, 0, 1048 "Do we have a rack initial window 0 = system default"); 1049 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1050 SYSCTL_CHILDREN(rack_pacing), 1051 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1052 &rack_per_of_gp_ss, 250, 1053 "If non zero, what percentage of goodput to pace at in slow start"); 1054 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1055 SYSCTL_CHILDREN(rack_pacing), 1056 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1057 &rack_per_of_gp_ca, 150, 1058 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1059 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1060 SYSCTL_CHILDREN(rack_pacing), 1061 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1062 &rack_per_of_gp_rec, 200, 1063 "If non zero, what percentage of goodput to pace at in recovery"); 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_pacing), 1066 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1067 &rack_hptsi_segments, 40, 1068 "What size is the max for TSO segments in pacing and burst mitigation"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_pacing), 1071 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1072 &rack_slot_reduction, 4, 1073 "When doing only burst mitigation what is the reduce divisor"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_sysctl_root), 1076 OID_AUTO, "use_pacing", CTLFLAG_RW, 1077 &rack_pace_every_seg, 0, 1078 "If set we use pacing, if clear we use only the original burst mitigation"); 1079 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_pacing), 1081 OID_AUTO, "rate_cap", CTLFLAG_RW, 1082 &rack_bw_rate_cap, 0, 1083 "If set we apply this value to the absolute rate cap used by pacing"); 1084 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_sysctl_root), 1086 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1087 &rack_req_measurements, 1, 1088 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1089 /* Hardware pacing */ 1090 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_sysctl_root), 1092 OID_AUTO, 1093 "hdwr_pacing", 1094 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1095 "Pacing related Controls"); 1096 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1097 SYSCTL_CHILDREN(rack_hw_pacing), 1098 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1099 &rack_hw_rwnd_factor, 2, 1100 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1101 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1102 SYSCTL_CHILDREN(rack_hw_pacing), 1103 OID_AUTO, "precheck", CTLFLAG_RW, 1104 &rack_hw_check_queue, 0, 1105 "Do we always precheck the hdwr pacing queue to avoid ENOBUF's?"); 1106 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1107 SYSCTL_CHILDREN(rack_hw_pacing), 1108 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1109 &rack_enobuf_hw_boost_mult, 0, 1110 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1111 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1112 SYSCTL_CHILDREN(rack_hw_pacing), 1113 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1114 &rack_enobuf_hw_max, 2, 1115 "What is the max boost the pacing time if we see a ENOBUFS?"); 1116 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1117 SYSCTL_CHILDREN(rack_hw_pacing), 1118 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1119 &rack_enobuf_hw_min, 2, 1120 "What is the min boost the pacing time if we see a ENOBUFS?"); 1121 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1122 SYSCTL_CHILDREN(rack_hw_pacing), 1123 OID_AUTO, "enable", CTLFLAG_RW, 1124 &rack_enable_hw_pacing, 0, 1125 "Should RACK attempt to use hw pacing?"); 1126 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1127 SYSCTL_CHILDREN(rack_hw_pacing), 1128 OID_AUTO, "rate_cap", CTLFLAG_RW, 1129 &rack_hw_rate_caps, 0, 1130 "Does the highest hardware pacing rate cap the rate we will send at??"); 1131 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1132 SYSCTL_CHILDREN(rack_hw_pacing), 1133 OID_AUTO, "uncap_per", CTLFLAG_RW, 1134 &rack_hw_rate_cap_per, 0, 1135 "If you go over b/w by this amount you will be uncapped (0 = never)"); 1136 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1137 SYSCTL_CHILDREN(rack_hw_pacing), 1138 OID_AUTO, "rate_min", CTLFLAG_RW, 1139 &rack_hw_rate_min, 0, 1140 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1141 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1142 SYSCTL_CHILDREN(rack_hw_pacing), 1143 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1144 &rack_hw_rate_to_low, 0, 1145 "If we fall below this rate, dis-engage hw pacing?"); 1146 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1147 SYSCTL_CHILDREN(rack_hw_pacing), 1148 OID_AUTO, "up_only", CTLFLAG_RW, 1149 &rack_hw_up_only, 0, 1150 "Do we allow hw pacing to lower the rate selected?"); 1151 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1152 SYSCTL_CHILDREN(rack_hw_pacing), 1153 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1154 &rack_hw_pace_extra_slots, 0, 1155 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1156 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1157 SYSCTL_CHILDREN(rack_sysctl_root), 1158 OID_AUTO, 1159 "timely", 1160 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1161 "Rack Timely RTT Controls"); 1162 /* Timely based GP dynmics */ 1163 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1164 SYSCTL_CHILDREN(rack_timely), 1165 OID_AUTO, "upper", CTLFLAG_RW, 1166 &rack_gp_per_bw_mul_up, 2, 1167 "Rack timely upper range for equal b/w (in percentage)"); 1168 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1169 SYSCTL_CHILDREN(rack_timely), 1170 OID_AUTO, "lower", CTLFLAG_RW, 1171 &rack_gp_per_bw_mul_down, 4, 1172 "Rack timely lower range for equal b/w (in percentage)"); 1173 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1174 SYSCTL_CHILDREN(rack_timely), 1175 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1176 &rack_gp_rtt_maxmul, 3, 1177 "Rack timely multiplier of lowest rtt for rtt_max"); 1178 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1179 SYSCTL_CHILDREN(rack_timely), 1180 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1181 &rack_gp_rtt_mindiv, 4, 1182 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1183 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1184 SYSCTL_CHILDREN(rack_timely), 1185 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1186 &rack_gp_rtt_minmul, 1, 1187 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1188 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1189 SYSCTL_CHILDREN(rack_timely), 1190 OID_AUTO, "decrease", CTLFLAG_RW, 1191 &rack_gp_decrease_per, 80, 1192 "Rack timely Beta value 80 = .8 (scaled by 100)"); 1193 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1194 SYSCTL_CHILDREN(rack_timely), 1195 OID_AUTO, "increase", CTLFLAG_RW, 1196 &rack_gp_increase_per, 2, 1197 "Rack timely increase perentage of our GP multiplication factor"); 1198 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1199 SYSCTL_CHILDREN(rack_timely), 1200 OID_AUTO, "lowerbound", CTLFLAG_RW, 1201 &rack_per_lower_bound, 50, 1202 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1203 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1204 SYSCTL_CHILDREN(rack_timely), 1205 OID_AUTO, "p5_upper", CTLFLAG_RW, 1206 &rack_gain_p5_ub, 250, 1207 "Profile 5 upper bound to timely gain"); 1208 1209 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1210 SYSCTL_CHILDREN(rack_timely), 1211 OID_AUTO, "upperboundss", CTLFLAG_RW, 1212 &rack_per_upper_bound_ss, 0, 1213 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1214 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1215 SYSCTL_CHILDREN(rack_timely), 1216 OID_AUTO, "upperboundca", CTLFLAG_RW, 1217 &rack_per_upper_bound_ca, 0, 1218 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1219 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1220 SYSCTL_CHILDREN(rack_timely), 1221 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1222 &rack_do_dyn_mul, 0, 1223 "Rack timely do we enable dynmaic timely goodput by default"); 1224 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1225 SYSCTL_CHILDREN(rack_timely), 1226 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1227 &rack_gp_no_rec_chg, 1, 1228 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1229 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1230 SYSCTL_CHILDREN(rack_timely), 1231 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1232 &rack_timely_dec_clear, 6, 1233 "Rack timely what threshold do we count to before another boost during b/w decent"); 1234 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1235 SYSCTL_CHILDREN(rack_timely), 1236 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1237 &rack_timely_max_push_rise, 3, 1238 "Rack timely how many times do we push up with b/w increase"); 1239 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1240 SYSCTL_CHILDREN(rack_timely), 1241 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1242 &rack_timely_max_push_drop, 3, 1243 "Rack timely how many times do we push back on b/w decent"); 1244 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1245 SYSCTL_CHILDREN(rack_timely), 1246 OID_AUTO, "min_segs", CTLFLAG_RW, 1247 &rack_timely_min_segs, 4, 1248 "Rack timely when setting the cwnd what is the min num segments"); 1249 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1250 SYSCTL_CHILDREN(rack_timely), 1251 OID_AUTO, "noback_max", CTLFLAG_RW, 1252 &rack_use_max_for_nobackoff, 0, 1253 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1254 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1255 SYSCTL_CHILDREN(rack_timely), 1256 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1257 &rack_timely_int_timely_only, 0, 1258 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1259 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1260 SYSCTL_CHILDREN(rack_timely), 1261 OID_AUTO, "nonstop", CTLFLAG_RW, 1262 &rack_timely_no_stopping, 0, 1263 "Rack timely don't stop increase"); 1264 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1265 SYSCTL_CHILDREN(rack_timely), 1266 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1267 &rack_down_raise_thresh, 100, 1268 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1269 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1270 SYSCTL_CHILDREN(rack_timely), 1271 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1272 &rack_req_segs, 1, 1273 "Bottom dragging if not these many segments outstanding and room"); 1274 1275 /* TLP and Rack related parameters */ 1276 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1277 SYSCTL_CHILDREN(rack_sysctl_root), 1278 OID_AUTO, 1279 "tlp", 1280 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1281 "TLP and Rack related Controls"); 1282 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1283 SYSCTL_CHILDREN(rack_tlp), 1284 OID_AUTO, "use_rrr", CTLFLAG_RW, 1285 &use_rack_rr, 1, 1286 "Do we use Rack Rapid Recovery"); 1287 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_tlp), 1289 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1290 &rack_max_abc_post_recovery, 2, 1291 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1292 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1293 SYSCTL_CHILDREN(rack_tlp), 1294 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1295 &rack_non_rxt_use_cr, 0, 1296 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1297 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1298 SYSCTL_CHILDREN(rack_tlp), 1299 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1300 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1301 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1302 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1303 SYSCTL_CHILDREN(rack_tlp), 1304 OID_AUTO, "limit", CTLFLAG_RW, 1305 &rack_tlp_limit, 2, 1306 "How many TLP's can be sent without sending new data"); 1307 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1308 SYSCTL_CHILDREN(rack_tlp), 1309 OID_AUTO, "use_greater", CTLFLAG_RW, 1310 &rack_tlp_use_greater, 1, 1311 "Should we use the rack_rtt time if its greater than srtt"); 1312 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1313 SYSCTL_CHILDREN(rack_tlp), 1314 OID_AUTO, "tlpminto", CTLFLAG_RW, 1315 &rack_tlp_min, 10000, 1316 "TLP minimum timeout per the specification (in microseconds)"); 1317 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1318 SYSCTL_CHILDREN(rack_tlp), 1319 OID_AUTO, "send_oldest", CTLFLAG_RW, 1320 &rack_always_send_oldest, 0, 1321 "Should we always send the oldest TLP and RACK-TLP"); 1322 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1323 SYSCTL_CHILDREN(rack_tlp), 1324 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1325 &rack_limited_retran, 0, 1326 "How many times can a rack timeout drive out sends"); 1327 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1328 SYSCTL_CHILDREN(rack_tlp), 1329 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1330 &rack_lower_cwnd_at_tlp, 0, 1331 "When a TLP completes a retran should we enter recovery"); 1332 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1333 SYSCTL_CHILDREN(rack_tlp), 1334 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1335 &rack_reorder_thresh, 2, 1336 "What factor for rack will be added when seeing reordering (shift right)"); 1337 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1338 SYSCTL_CHILDREN(rack_tlp), 1339 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1340 &rack_tlp_thresh, 1, 1341 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1342 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1343 SYSCTL_CHILDREN(rack_tlp), 1344 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1345 &rack_reorder_fade, 60000000, 1346 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1347 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1348 SYSCTL_CHILDREN(rack_tlp), 1349 OID_AUTO, "pktdelay", CTLFLAG_RW, 1350 &rack_pkt_delay, 1000, 1351 "Extra RACK time (in microseconds) besides reordering thresh"); 1352 1353 /* Timer related controls */ 1354 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1355 SYSCTL_CHILDREN(rack_sysctl_root), 1356 OID_AUTO, 1357 "timers", 1358 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1359 "Timer related controls"); 1360 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_timers), 1362 OID_AUTO, "persmin", CTLFLAG_RW, 1363 &rack_persist_min, 250000, 1364 "What is the minimum time in microseconds between persists"); 1365 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1366 SYSCTL_CHILDREN(rack_timers), 1367 OID_AUTO, "persmax", CTLFLAG_RW, 1368 &rack_persist_max, 2000000, 1369 "What is the largest delay in microseconds between persists"); 1370 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1371 SYSCTL_CHILDREN(rack_timers), 1372 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1373 &rack_delayed_ack_time, 40000, 1374 "Delayed ack time (40ms in microseconds)"); 1375 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1376 SYSCTL_CHILDREN(rack_timers), 1377 OID_AUTO, "minrto", CTLFLAG_RW, 1378 &rack_rto_min, 30000, 1379 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1380 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1381 SYSCTL_CHILDREN(rack_timers), 1382 OID_AUTO, "maxrto", CTLFLAG_RW, 1383 &rack_rto_max, 4000000, 1384 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1385 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1386 SYSCTL_CHILDREN(rack_timers), 1387 OID_AUTO, "minto", CTLFLAG_RW, 1388 &rack_min_to, 1000, 1389 "Minimum rack timeout in microseconds"); 1390 /* Measure controls */ 1391 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1392 SYSCTL_CHILDREN(rack_sysctl_root), 1393 OID_AUTO, 1394 "measure", 1395 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1396 "Measure related controls"); 1397 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1398 SYSCTL_CHILDREN(rack_measure), 1399 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1400 &rack_wma_divisor, 8, 1401 "When doing b/w calculation what is the divisor for the WMA"); 1402 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1403 SYSCTL_CHILDREN(rack_measure), 1404 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1405 &rack_cwnd_block_ends_measure, 0, 1406 "Does a cwnd just-return end the measurement window (app limited)"); 1407 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1408 SYSCTL_CHILDREN(rack_measure), 1409 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1410 &rack_rwnd_block_ends_measure, 0, 1411 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1412 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1413 SYSCTL_CHILDREN(rack_measure), 1414 OID_AUTO, "min_target", CTLFLAG_RW, 1415 &rack_def_data_window, 20, 1416 "What is the minimum target window (in mss) for a GP measurements"); 1417 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1418 SYSCTL_CHILDREN(rack_measure), 1419 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1420 &rack_goal_bdp, 2, 1421 "What is the goal BDP to measure"); 1422 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1423 SYSCTL_CHILDREN(rack_measure), 1424 OID_AUTO, "min_srtts", CTLFLAG_RW, 1425 &rack_min_srtts, 1, 1426 "What is the goal BDP to measure"); 1427 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1428 SYSCTL_CHILDREN(rack_measure), 1429 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1430 &rack_min_measure_usec, 0, 1431 "What is the Minimum time time for a measurement if 0, this is off"); 1432 /* Features */ 1433 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1434 SYSCTL_CHILDREN(rack_sysctl_root), 1435 OID_AUTO, 1436 "features", 1437 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1438 "Feature controls"); 1439 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1440 SYSCTL_CHILDREN(rack_features), 1441 OID_AUTO, "rxt_clamp_thresh", CTLFLAG_RW, 1442 &rack_rxt_clamp_thresh, 0, 1443 "Bit encoded clamping setup bits CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP"); 1444 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1445 SYSCTL_CHILDREN(rack_features), 1446 OID_AUTO, "hybrid_set_maxseg", CTLFLAG_RW, 1447 &rack_hybrid_allow_set_maxseg, 0, 1448 "Should hybrid pacing allow the setmss command"); 1449 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1450 SYSCTL_CHILDREN(rack_features), 1451 OID_AUTO, "cmpack", CTLFLAG_RW, 1452 &rack_use_cmp_acks, 1, 1453 "Should RACK have LRO send compressed acks"); 1454 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1455 SYSCTL_CHILDREN(rack_features), 1456 OID_AUTO, "fsb", CTLFLAG_RW, 1457 &rack_use_fsb, 1, 1458 "Should RACK use the fast send block?"); 1459 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1460 SYSCTL_CHILDREN(rack_features), 1461 OID_AUTO, "rfo", CTLFLAG_RW, 1462 &rack_use_rfo, 1, 1463 "Should RACK use rack_fast_output()?"); 1464 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1465 SYSCTL_CHILDREN(rack_features), 1466 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1467 &rack_use_rsm_rfo, 1, 1468 "Should RACK use rack_fast_rsm_output()?"); 1469 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1470 SYSCTL_CHILDREN(rack_features), 1471 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1472 &rack_enable_mqueue_for_nonpaced, 0, 1473 "Should RACK use mbuf queuing for non-paced connections"); 1474 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1475 SYSCTL_CHILDREN(rack_features), 1476 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1477 &rack_do_hystart, 0, 1478 "Should RACK enable HyStart++ on connections?"); 1479 /* Misc rack controls */ 1480 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1481 SYSCTL_CHILDREN(rack_sysctl_root), 1482 OID_AUTO, 1483 "misc", 1484 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1485 "Misc related controls"); 1486 #ifdef TCP_ACCOUNTING 1487 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1488 SYSCTL_CHILDREN(rack_misc), 1489 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1490 &rack_tcp_accounting, 0, 1491 "Should we turn on TCP accounting for all rack sessions?"); 1492 #endif 1493 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1494 SYSCTL_CHILDREN(rack_misc), 1495 OID_AUTO, "dnd", CTLFLAG_RW, 1496 &rack_dnd_default, 0, 1497 "Do not disturb default for rack_rrr = 3"); 1498 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1499 SYSCTL_CHILDREN(rack_misc), 1500 OID_AUTO, "sad_seg_per", CTLFLAG_RW, 1501 &sad_seg_size_per, 800, 1502 "Percentage of segment size needed in a sack 800 = 80.0?"); 1503 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1504 SYSCTL_CHILDREN(rack_misc), 1505 OID_AUTO, "rxt_controls", CTLFLAG_RW, 1506 &rack_rxt_controls, 0, 1507 "Retransmit sending size controls (valid values 0, 1, 2 default=1)?"); 1508 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1509 SYSCTL_CHILDREN(rack_misc), 1510 OID_AUTO, "rack_hibeta", CTLFLAG_RW, 1511 &rack_hibeta_setting, 0, 1512 "Do we ue a high beta (80 instead of 50)?"); 1513 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1514 SYSCTL_CHILDREN(rack_misc), 1515 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1516 &rack_apply_rtt_with_reduced_conf, 0, 1517 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1518 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1519 SYSCTL_CHILDREN(rack_misc), 1520 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1521 &rack_dsack_std_based, 3, 1522 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1523 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1524 SYSCTL_CHILDREN(rack_misc), 1525 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1526 &rack_prr_addbackmax, 2, 1527 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1528 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1529 SYSCTL_CHILDREN(rack_misc), 1530 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1531 &rack_stats_gets_ms_rtt, 1, 1532 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1533 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1534 SYSCTL_CHILDREN(rack_misc), 1535 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1536 &rack_client_low_buf, 0, 1537 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1538 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1539 SYSCTL_CHILDREN(rack_misc), 1540 OID_AUTO, "defprofile", CTLFLAG_RW, 1541 &rack_def_profile, 0, 1542 "Should RACK use a default profile (0=no, num == profile num)?"); 1543 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1544 SYSCTL_CHILDREN(rack_misc), 1545 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1546 &rack_enable_shared_cwnd, 1, 1547 "Should RACK try to use the shared cwnd on connections where allowed"); 1548 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1549 SYSCTL_CHILDREN(rack_misc), 1550 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1551 &rack_limits_scwnd, 1, 1552 "Should RACK place low end time limits on the shared cwnd feature"); 1553 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1554 SYSCTL_CHILDREN(rack_misc), 1555 OID_AUTO, "no_prr", CTLFLAG_RW, 1556 &rack_disable_prr, 0, 1557 "Should RACK not use prr and only pace (must have pacing on)"); 1558 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1559 SYSCTL_CHILDREN(rack_misc), 1560 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1561 &rack_verbose_logging, 0, 1562 "Should RACK black box logging be verbose"); 1563 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1564 SYSCTL_CHILDREN(rack_misc), 1565 OID_AUTO, "data_after_close", CTLFLAG_RW, 1566 &rack_ignore_data_after_close, 1, 1567 "Do we hold off sending a RST until all pending data is ack'd"); 1568 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1569 SYSCTL_CHILDREN(rack_misc), 1570 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1571 &rack_sack_not_required, 1, 1572 "Do we allow rack to run on connections not supporting SACK"); 1573 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_misc), 1575 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1576 &rack_send_a_lot_in_prr, 1, 1577 "Send a lot in prr"); 1578 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_misc), 1580 OID_AUTO, "autoscale", CTLFLAG_RW, 1581 &rack_autosndbuf_inc, 20, 1582 "What percentage should rack scale up its snd buffer by?"); 1583 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1584 SYSCTL_CHILDREN(rack_misc), 1585 OID_AUTO, "rnds_for_rxt_clamp", CTLFLAG_RW, 1586 &rack_rxt_min_rnds, 10, 1587 "Number of rounds needed between RTT clamps due to high loss rates"); 1588 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1589 SYSCTL_CHILDREN(rack_misc), 1590 OID_AUTO, "rnds_for_unclamp", CTLFLAG_RW, 1591 &rack_unclamp_round_thresh, 100, 1592 "Number of rounds needed with no loss to unclamp"); 1593 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1594 SYSCTL_CHILDREN(rack_misc), 1595 OID_AUTO, "rxt_threshs_for_unclamp", CTLFLAG_RW, 1596 &rack_unclamp_rxt_thresh, 5, 1597 "Percentage of retransmits we need to be under to unclamp (5 = .5 percent)\n"); 1598 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1599 SYSCTL_CHILDREN(rack_misc), 1600 OID_AUTO, "clamp_ss_upper", CTLFLAG_RW, 1601 &rack_clamp_ss_upper, 110, 1602 "Clamp percentage ceiling in SS?"); 1603 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1604 SYSCTL_CHILDREN(rack_misc), 1605 OID_AUTO, "clamp_ca_upper", CTLFLAG_RW, 1606 &rack_clamp_ca_upper, 110, 1607 "Clamp percentage ceiling in CA?"); 1608 /* Sack Attacker detection stuff */ 1609 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1610 SYSCTL_CHILDREN(rack_attack), 1611 OID_AUTO, "merge_out", CTLFLAG_RW, 1612 &rack_merge_out_sacks_on_attack, 0, 1613 "Do we merge the sendmap when we decide we are being attacked?"); 1614 1615 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1616 SYSCTL_CHILDREN(rack_attack), 1617 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1618 &rack_highest_sack_thresh_seen, 0, 1619 "Highest sack to ack ratio seen"); 1620 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1621 SYSCTL_CHILDREN(rack_attack), 1622 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1623 &rack_highest_move_thresh_seen, 0, 1624 "Highest move to non-move ratio seen"); 1625 rack_ack_total = counter_u64_alloc(M_WAITOK); 1626 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1627 SYSCTL_CHILDREN(rack_attack), 1628 OID_AUTO, "acktotal", CTLFLAG_RD, 1629 &rack_ack_total, 1630 "Total number of Ack's"); 1631 rack_express_sack = counter_u64_alloc(M_WAITOK); 1632 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1633 SYSCTL_CHILDREN(rack_attack), 1634 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1635 &rack_express_sack, 1636 "Total expresss number of Sack's"); 1637 rack_sack_total = counter_u64_alloc(M_WAITOK); 1638 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1639 SYSCTL_CHILDREN(rack_attack), 1640 OID_AUTO, "sacktotal", CTLFLAG_RD, 1641 &rack_sack_total, 1642 "Total number of SACKs"); 1643 rack_move_none = counter_u64_alloc(M_WAITOK); 1644 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1645 SYSCTL_CHILDREN(rack_attack), 1646 OID_AUTO, "move_none", CTLFLAG_RD, 1647 &rack_move_none, 1648 "Total number of SACK index reuse of positions under threshold"); 1649 rack_move_some = counter_u64_alloc(M_WAITOK); 1650 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1651 SYSCTL_CHILDREN(rack_attack), 1652 OID_AUTO, "move_some", CTLFLAG_RD, 1653 &rack_move_some, 1654 "Total number of SACK index reuse of positions over threshold"); 1655 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1656 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1657 SYSCTL_CHILDREN(rack_attack), 1658 OID_AUTO, "attacks", CTLFLAG_RD, 1659 &rack_sack_attacks_detected, 1660 "Total number of SACK attackers that had sack disabled"); 1661 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1662 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1663 SYSCTL_CHILDREN(rack_attack), 1664 OID_AUTO, "reversed", CTLFLAG_RD, 1665 &rack_sack_attacks_reversed, 1666 "Total number of SACK attackers that were later determined false positive"); 1667 rack_sack_attacks_suspect = counter_u64_alloc(M_WAITOK); 1668 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1669 SYSCTL_CHILDREN(rack_attack), 1670 OID_AUTO, "suspect", CTLFLAG_RD, 1671 &rack_sack_attacks_suspect, 1672 "Total number of SACKs that triggered early detection"); 1673 1674 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1675 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1676 SYSCTL_CHILDREN(rack_attack), 1677 OID_AUTO, "nextmerge", CTLFLAG_RD, 1678 &rack_sack_used_next_merge, 1679 "Total number of times we used the next merge"); 1680 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1681 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1682 SYSCTL_CHILDREN(rack_attack), 1683 OID_AUTO, "prevmerge", CTLFLAG_RD, 1684 &rack_sack_used_prev_merge, 1685 "Total number of times we used the prev merge"); 1686 /* Counters */ 1687 rack_total_bytes = counter_u64_alloc(M_WAITOK); 1688 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1689 SYSCTL_CHILDREN(rack_counters), 1690 OID_AUTO, "totalbytes", CTLFLAG_RD, 1691 &rack_total_bytes, 1692 "Total number of bytes sent"); 1693 rack_fto_send = counter_u64_alloc(M_WAITOK); 1694 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1695 SYSCTL_CHILDREN(rack_counters), 1696 OID_AUTO, "fto_send", CTLFLAG_RD, 1697 &rack_fto_send, "Total number of rack_fast_output sends"); 1698 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1699 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1700 SYSCTL_CHILDREN(rack_counters), 1701 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1702 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1703 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1704 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1705 SYSCTL_CHILDREN(rack_counters), 1706 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1707 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1708 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1709 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1710 SYSCTL_CHILDREN(rack_counters), 1711 OID_AUTO, "nfto_send", CTLFLAG_RD, 1712 &rack_non_fto_send, "Total number of rack_output first sends"); 1713 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1714 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1715 SYSCTL_CHILDREN(rack_counters), 1716 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1717 &rack_extended_rfo, "Total number of times we extended rfo"); 1718 1719 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1720 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1721 SYSCTL_CHILDREN(rack_counters), 1722 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1723 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1724 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1725 1726 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1727 SYSCTL_CHILDREN(rack_counters), 1728 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1729 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1730 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_counters), 1733 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1734 &rack_tlp_tot, 1735 "Total number of tail loss probe expirations"); 1736 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_counters), 1739 OID_AUTO, "tlp_new", CTLFLAG_RD, 1740 &rack_tlp_newdata, 1741 "Total number of tail loss probe sending new data"); 1742 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_counters), 1745 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1746 &rack_tlp_retran, 1747 "Total number of tail loss probe sending retransmitted data"); 1748 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_counters), 1751 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1752 &rack_tlp_retran_bytes, 1753 "Total bytes of tail loss probe sending retransmitted data"); 1754 rack_to_tot = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_counters), 1757 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1758 &rack_to_tot, 1759 "Total number of times the rack to expired"); 1760 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1762 SYSCTL_CHILDREN(rack_counters), 1763 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1764 &rack_saw_enobuf, 1765 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1766 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_counters), 1769 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1770 &rack_saw_enobuf_hw, 1771 "Total number of times a send returned enobuf for hdwr paced connections"); 1772 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1773 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1774 SYSCTL_CHILDREN(rack_counters), 1775 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1776 &rack_saw_enetunreach, 1777 "Total number of times a send received a enetunreachable"); 1778 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1779 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1780 SYSCTL_CHILDREN(rack_counters), 1781 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1782 &rack_hot_alloc, 1783 "Total allocations from the top of our list"); 1784 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1785 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1786 SYSCTL_CHILDREN(rack_counters), 1787 OID_AUTO, "allocs", CTLFLAG_RD, 1788 &rack_to_alloc, 1789 "Total allocations of tracking structures"); 1790 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1791 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1792 SYSCTL_CHILDREN(rack_counters), 1793 OID_AUTO, "allochard", CTLFLAG_RD, 1794 &rack_to_alloc_hard, 1795 "Total allocations done with sleeping the hard way"); 1796 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1797 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1798 SYSCTL_CHILDREN(rack_counters), 1799 OID_AUTO, "allocemerg", CTLFLAG_RD, 1800 &rack_to_alloc_emerg, 1801 "Total allocations done from emergency cache"); 1802 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1803 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1804 SYSCTL_CHILDREN(rack_counters), 1805 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1806 &rack_to_alloc_limited, 1807 "Total allocations dropped due to limit"); 1808 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1809 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1810 SYSCTL_CHILDREN(rack_counters), 1811 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1812 &rack_alloc_limited_conns, 1813 "Connections with allocations dropped due to limit"); 1814 rack_split_limited = counter_u64_alloc(M_WAITOK); 1815 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1816 SYSCTL_CHILDREN(rack_counters), 1817 OID_AUTO, "split_limited", CTLFLAG_RD, 1818 &rack_split_limited, 1819 "Split allocations dropped due to limit"); 1820 rack_rxt_clamps_cwnd = counter_u64_alloc(M_WAITOK); 1821 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1822 SYSCTL_CHILDREN(rack_counters), 1823 OID_AUTO, "rxt_clamps_cwnd", CTLFLAG_RD, 1824 &rack_rxt_clamps_cwnd, 1825 "Number of times that excessive rxt clamped the cwnd down"); 1826 rack_rxt_clamps_cwnd_uniq = counter_u64_alloc(M_WAITOK); 1827 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1828 SYSCTL_CHILDREN(rack_counters), 1829 OID_AUTO, "rxt_clamps_cwnd_uniq", CTLFLAG_RD, 1830 &rack_rxt_clamps_cwnd_uniq, 1831 "Number of connections that have had excessive rxt clamped the cwnd down"); 1832 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1833 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1834 SYSCTL_CHILDREN(rack_counters), 1835 OID_AUTO, "persist_sends", CTLFLAG_RD, 1836 &rack_persists_sends, 1837 "Number of times we sent a persist probe"); 1838 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1839 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1840 SYSCTL_CHILDREN(rack_counters), 1841 OID_AUTO, "persist_acks", CTLFLAG_RD, 1842 &rack_persists_acks, 1843 "Number of times a persist probe was acked"); 1844 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1845 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1846 SYSCTL_CHILDREN(rack_counters), 1847 OID_AUTO, "persist_loss", CTLFLAG_RD, 1848 &rack_persists_loss, 1849 "Number of times we detected a lost persist probe (no ack)"); 1850 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1851 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1852 SYSCTL_CHILDREN(rack_counters), 1853 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1854 &rack_persists_lost_ends, 1855 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1856 #ifdef INVARIANTS 1857 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1858 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1859 SYSCTL_CHILDREN(rack_counters), 1860 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1861 &rack_adjust_map_bw, 1862 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1863 #endif 1864 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1865 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1866 SYSCTL_CHILDREN(rack_counters), 1867 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1868 &rack_multi_single_eq, 1869 "Number of compressed acks total represented"); 1870 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1871 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1872 SYSCTL_CHILDREN(rack_counters), 1873 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1874 &rack_proc_non_comp_ack, 1875 "Number of non compresseds acks that we processed"); 1876 1877 1878 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1879 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1880 SYSCTL_CHILDREN(rack_counters), 1881 OID_AUTO, "sack_long", CTLFLAG_RD, 1882 &rack_sack_proc_all, 1883 "Total times we had to walk whole list for sack processing"); 1884 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1885 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1886 SYSCTL_CHILDREN(rack_counters), 1887 OID_AUTO, "sack_restart", CTLFLAG_RD, 1888 &rack_sack_proc_restart, 1889 "Total times we had to walk whole list due to a restart"); 1890 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1891 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1892 SYSCTL_CHILDREN(rack_counters), 1893 OID_AUTO, "sack_short", CTLFLAG_RD, 1894 &rack_sack_proc_short, 1895 "Total times we took shortcut for sack processing"); 1896 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1898 SYSCTL_CHILDREN(rack_attack), 1899 OID_AUTO, "skipacked", CTLFLAG_RD, 1900 &rack_sack_skipped_acked, 1901 "Total number of times we skipped previously sacked"); 1902 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1903 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1904 SYSCTL_CHILDREN(rack_attack), 1905 OID_AUTO, "ofsplit", CTLFLAG_RD, 1906 &rack_sack_splits, 1907 "Total number of times we did the old fashion tree split"); 1908 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1909 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1910 SYSCTL_CHILDREN(rack_counters), 1911 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1912 &rack_input_idle_reduces, 1913 "Total number of idle reductions on input"); 1914 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1915 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1916 SYSCTL_CHILDREN(rack_counters), 1917 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1918 &rack_collapsed_win_seen, 1919 "Total number of collapsed window events seen (where our window shrinks)"); 1920 1921 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1922 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1923 SYSCTL_CHILDREN(rack_counters), 1924 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1925 &rack_collapsed_win, 1926 "Total number of collapsed window events where we mark packets"); 1927 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1928 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1929 SYSCTL_CHILDREN(rack_counters), 1930 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1931 &rack_collapsed_win_rxt, 1932 "Total number of packets that were retransmitted"); 1933 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1934 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1935 SYSCTL_CHILDREN(rack_counters), 1936 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1937 &rack_collapsed_win_rxt_bytes, 1938 "Total number of bytes that were retransmitted"); 1939 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1940 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1941 SYSCTL_CHILDREN(rack_counters), 1942 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1943 &rack_try_scwnd, 1944 "Total number of scwnd attempts"); 1945 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1946 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1947 OID_AUTO, "outsize", CTLFLAG_RD, 1948 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1949 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1950 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1951 OID_AUTO, "opts", CTLFLAG_RD, 1952 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1953 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1954 SYSCTL_CHILDREN(rack_sysctl_root), 1955 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1956 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1957 } 1958 1959 static uint32_t 1960 rc_init_window(struct tcp_rack *rack) 1961 { 1962 uint32_t win; 1963 1964 if (rack->rc_init_win == 0) { 1965 /* 1966 * Nothing set by the user, use the system stack 1967 * default. 1968 */ 1969 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1970 } 1971 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1972 return (win); 1973 } 1974 1975 static uint64_t 1976 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1977 { 1978 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1979 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1980 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1981 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1982 else 1983 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1984 } 1985 1986 static void 1987 rack_log_hybrid_bw(struct tcp_rack *rack, uint32_t seq, uint64_t cbw, uint64_t tim, 1988 uint64_t data, uint8_t mod, uint16_t aux, 1989 struct tcp_sendfile_track *cur, int line) 1990 { 1991 #ifdef TCP_REQUEST_TRK 1992 int do_log = 0; 1993 1994 /* 1995 * The rate cap one is noisy and only should come out when normal BB logging 1996 * is enabled, the other logs (not RATE_CAP and NOT CAP_CALC) only come out 1997 * once per chunk and make up the BBpoint that can be turned on by the client. 1998 */ 1999 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2000 /* 2001 * The very noisy two need to only come out when 2002 * we have verbose logging on. 2003 */ 2004 if (rack_verbose_logging != 0) 2005 do_log = tcp_bblogging_on(rack->rc_tp); 2006 else 2007 do_log = 0; 2008 } else if (mod != HYBRID_LOG_BW_MEASURE) { 2009 /* 2010 * All other less noisy logs here except the measure which 2011 * also needs to come out on the point and the log. 2012 */ 2013 do_log = tcp_bblogging_on(rack->rc_tp); 2014 } else { 2015 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); 2016 } 2017 2018 if (do_log) { 2019 union tcp_log_stackspecific log; 2020 struct timeval tv; 2021 uint64_t lt_bw; 2022 2023 /* Convert our ms to a microsecond */ 2024 memset(&log, 0, sizeof(log)); 2025 2026 log.u_bbr.cwnd_gain = line; 2027 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2028 log.u_bbr.rttProp = tim; 2029 log.u_bbr.bw_inuse = cbw; 2030 log.u_bbr.delRate = rack_get_gp_est(rack); 2031 lt_bw = rack_get_lt_bw(rack); 2032 log.u_bbr.flex1 = seq; 2033 log.u_bbr.pacing_gain = aux; 2034 /* lt_bw = < flex3 | flex2 > */ 2035 log.u_bbr.flex2 = (uint32_t)(lt_bw & 0x00000000ffffffff); 2036 log.u_bbr.flex3 = (uint32_t)((lt_bw >> 32) & 0x00000000ffffffff); 2037 /* Record the last obtained us rtt in inflight */ 2038 if (cur == NULL) { 2039 /* Make sure we are looking at the right log if an overide comes in */ 2040 cur = rack->r_ctl.rc_last_sft; 2041 } 2042 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) 2043 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; 2044 else { 2045 /* Use the last known rtt i.e. the rack-rtt */ 2046 log.u_bbr.inflight = rack->rc_rack_rtt; 2047 } 2048 if (cur != NULL) { 2049 uint64_t off; 2050 2051 log.u_bbr.cur_del_rate = cur->deadline; 2052 if ((mod == HYBRID_LOG_RATE_CAP) || (mod == HYBRID_LOG_CAP_CALC)) { 2053 /* start = < lost | pkt_epoch > */ 2054 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2055 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2056 log.u_bbr.flex6 = cur->start_seq; 2057 log.u_bbr.pkts_out = cur->end_seq; 2058 } else { 2059 /* start = < lost | pkt_epoch > */ 2060 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); 2061 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2062 /* end = < pkts_out | flex6 > */ 2063 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); 2064 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2065 } 2066 /* first_send = <lt_epoch | epoch> */ 2067 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); 2068 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); 2069 /* localtime = <delivered | applimited>*/ 2070 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2071 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2072 #ifdef TCP_REQUEST_TRK 2073 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2074 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2075 #endif 2076 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); 2077 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); 2078 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; 2079 } else { 2080 log.u_bbr.flex7 = 0xffff; 2081 log.u_bbr.cur_del_rate = 0xffffffffffffffff; 2082 } 2083 /* 2084 * Compose bbr_state to be a bit wise 0000ADHF 2085 * where A is the always_pace flag 2086 * where D is the dgp_on flag 2087 * where H is the hybrid_mode on flag 2088 * where F is the use_fixed_rate flag. 2089 */ 2090 log.u_bbr.bbr_state = rack->rc_always_pace; 2091 log.u_bbr.bbr_state <<= 1; 2092 log.u_bbr.bbr_state |= rack->dgp_on; 2093 log.u_bbr.bbr_state <<= 1; 2094 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; 2095 log.u_bbr.bbr_state <<= 1; 2096 log.u_bbr.bbr_state |= rack->use_fixed_rate; 2097 log.u_bbr.flex8 = mod; 2098 tcp_log_event(rack->rc_tp, NULL, 2099 &rack->rc_inp->inp_socket->so_rcv, 2100 &rack->rc_inp->inp_socket->so_snd, 2101 TCP_HYBRID_PACING_LOG, 0, 2102 0, &log, false, NULL, __func__, __LINE__, &tv); 2103 2104 } 2105 #endif 2106 } 2107 2108 #ifdef TCP_REQUEST_TRK 2109 static void 2110 rack_log_hybrid_sends(struct tcp_rack *rack, struct tcp_sendfile_track *cur, int line) 2111 { 2112 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { 2113 union tcp_log_stackspecific log; 2114 struct timeval tv; 2115 uint64_t off; 2116 2117 /* Convert our ms to a microsecond */ 2118 memset(&log, 0, sizeof(log)); 2119 2120 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2121 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 2122 log.u_bbr.delRate = cur->sent_at_fs; 2123 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; 2124 log.u_bbr.bw_inuse = cur->rxt_at_fs; 2125 log.u_bbr.cwnd_gain = line; 2126 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 2127 log.u_bbr.bbr_substate = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 2128 /* start = < flex1 | flex2 > */ 2129 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); 2130 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); 2131 /* end = < flex3 | flex4 > */ 2132 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); 2133 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); 2134 2135 /* localtime = <delivered | applimited>*/ 2136 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); 2137 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 2138 /* client timestamp = <lt_epoch | epoch>*/ 2139 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); 2140 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); 2141 /* now set all the flags in */ 2142 log.u_bbr.pkts_out = cur->hybrid_flags; 2143 log.u_bbr.flex6 = cur->flags; 2144 /* 2145 * Last send time = <flex5 | pkt_epoch> note we do not distinguish cases 2146 * where a false retransmit occurred so first_send <-> lastsend may 2147 * include longer time then it actually took if we have a false rxt. 2148 */ 2149 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); 2150 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); 2151 2152 log.u_bbr.flex8 = HYBRID_LOG_SENT_LOST; 2153 tcp_log_event(rack->rc_tp, NULL, 2154 &rack->rc_inp->inp_socket->so_rcv, 2155 &rack->rc_inp->inp_socket->so_snd, 2156 TCP_HYBRID_PACING_LOG, 0, 2157 0, &log, false, NULL, __func__, __LINE__, &tv); 2158 } 2159 } 2160 #endif 2161 2162 static inline uint64_t 2163 rack_compensate_for_linerate(struct tcp_rack *rack, uint64_t bw) 2164 { 2165 uint64_t ret_bw, ether; 2166 uint64_t u_segsiz; 2167 2168 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); 2169 if (rack->r_is_v6){ 2170 #ifdef INET6 2171 ether += sizeof(struct ip6_hdr); 2172 #endif 2173 ether += 14; /* eheader size 6+6+2 */ 2174 } else { 2175 #ifdef INET 2176 ether += sizeof(struct ip); 2177 #endif 2178 ether += 14; /* eheader size 6+6+2 */ 2179 } 2180 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); 2181 ret_bw = bw; 2182 ret_bw *= ether; 2183 ret_bw /= u_segsiz; 2184 return (ret_bw); 2185 } 2186 2187 static void 2188 rack_rate_cap_bw(struct tcp_rack *rack, uint64_t *bw, int *capped) 2189 { 2190 #ifdef TCP_REQUEST_TRK 2191 struct timeval tv; 2192 uint64_t timenow, timeleft, lenleft, lengone, calcbw; 2193 #endif 2194 2195 if (rack->r_ctl.bw_rate_cap == 0) 2196 return; 2197 #ifdef TCP_REQUEST_TRK 2198 if (rack->rc_catch_up && rack->rc_hybrid_mode && 2199 (rack->r_ctl.rc_last_sft != NULL)) { 2200 /* 2201 * We have a dynamic cap. The original target 2202 * is in bw_rate_cap, but we need to look at 2203 * how long it is until we hit the deadline. 2204 */ 2205 struct tcp_sendfile_track *ent; 2206 2207 ent = rack->r_ctl.rc_last_sft; 2208 microuptime(&tv); 2209 timenow = tcp_tv_to_lusectick(&tv); 2210 if (timenow >= ent->deadline) { 2211 /* No time left we do DGP only */ 2212 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2213 0, 0, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2214 rack->r_ctl.bw_rate_cap = 0; 2215 return; 2216 } 2217 /* We have the time */ 2218 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; 2219 if (timeleft < HPTS_MSEC_IN_SEC) { 2220 /* If there is less than a ms left just use DGPs rate */ 2221 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2222 0, timeleft, 0, HYBRID_LOG_OUTOFTIME, 0, ent, __LINE__); 2223 rack->r_ctl.bw_rate_cap = 0; 2224 return; 2225 } 2226 /* 2227 * Now lets find the amount of data left to send. 2228 * 2229 * Now ideally we want to use the end_seq to figure out how much more 2230 * but it might not be possible (only if we have the TRACK_FG_COMP on the entry.. 2231 */ 2232 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { 2233 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) 2234 lenleft = ent->end_seq - rack->rc_tp->snd_una; 2235 else { 2236 /* TSNH, we should catch it at the send */ 2237 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2238 0, timeleft, 0, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2239 rack->r_ctl.bw_rate_cap = 0; 2240 return; 2241 } 2242 } else { 2243 /* 2244 * The hard way, figure out how much is gone and then 2245 * take that away from the total the client asked for 2246 * (thats off by tls overhead if this is tls). 2247 */ 2248 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) 2249 lengone = rack->rc_tp->snd_una - ent->start_seq; 2250 else 2251 lengone = 0; 2252 if (lengone < (ent->end - ent->start)) 2253 lenleft = (ent->end - ent->start) - lengone; 2254 else { 2255 /* TSNH, we should catch it at the send */ 2256 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2257 0, timeleft, lengone, HYBRID_LOG_CAPERROR, 0, ent, __LINE__); 2258 rack->r_ctl.bw_rate_cap = 0; 2259 return; 2260 } 2261 } 2262 if (lenleft == 0) { 2263 /* We have it all sent */ 2264 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2265 0, timeleft, lenleft, HYBRID_LOG_ALLSENT, 0, ent, __LINE__); 2266 if (rack->r_ctl.bw_rate_cap) 2267 goto normal_ratecap; 2268 else 2269 return; 2270 } 2271 calcbw = lenleft * HPTS_USEC_IN_SEC; 2272 calcbw /= timeleft; 2273 /* Now we must compensate for IP/TCP overhead */ 2274 calcbw = rack_compensate_for_linerate(rack, calcbw); 2275 /* Update the bit rate cap */ 2276 rack->r_ctl.bw_rate_cap = calcbw; 2277 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2278 (rack_hybrid_allow_set_maxseg == 1) && 2279 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2280 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2281 uint32_t orig_max; 2282 2283 orig_max = rack->r_ctl.rc_pace_max_segs; 2284 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2285 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); 2286 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2287 } 2288 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2289 calcbw, timeleft, lenleft, HYBRID_LOG_CAP_CALC, 0, ent, __LINE__); 2290 if ((calcbw > 0) && (*bw > calcbw)) { 2291 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2292 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); 2293 *capped = 1; 2294 *bw = calcbw; 2295 } 2296 return; 2297 } 2298 normal_ratecap: 2299 #endif 2300 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { 2301 #ifdef TCP_REQUEST_TRK 2302 if (rack->rc_hybrid_mode && 2303 rack->rc_catch_up && 2304 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && 2305 (rack_hybrid_allow_set_maxseg == 1) && 2306 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { 2307 /* Lets set in a smaller mss possibly here to match our rate-cap */ 2308 uint32_t orig_max; 2309 2310 orig_max = rack->r_ctl.rc_pace_max_segs; 2311 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; 2312 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg(rack->rc_tp)); 2313 rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LINE__, 5); 2314 } 2315 #endif 2316 *capped = 1; 2317 *bw = rack->r_ctl.bw_rate_cap; 2318 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 2319 *bw, 0, 0, 2320 HYBRID_LOG_RATE_CAP, 1, NULL, __LINE__); 2321 } 2322 } 2323 2324 static uint64_t 2325 rack_get_gp_est(struct tcp_rack *rack) 2326 { 2327 uint64_t bw, lt_bw, ret_bw; 2328 2329 if (rack->rc_gp_filled == 0) { 2330 /* 2331 * We have yet no b/w measurement, 2332 * if we have a user set initial bw 2333 * return it. If we don't have that and 2334 * we have an srtt, use the tcp IW (10) to 2335 * calculate a fictional b/w over the SRTT 2336 * which is more or less a guess. Note 2337 * we don't use our IW from rack on purpose 2338 * so if we have like IW=30, we are not 2339 * calculating a "huge" b/w. 2340 */ 2341 uint64_t srtt; 2342 2343 lt_bw = rack_get_lt_bw(rack); 2344 if (lt_bw) { 2345 /* 2346 * No goodput bw but a long-term b/w does exist 2347 * lets use that. 2348 */ 2349 ret_bw = lt_bw; 2350 goto compensate; 2351 } 2352 if (rack->r_ctl.init_rate) 2353 return (rack->r_ctl.init_rate); 2354 2355 /* Ok lets come up with the IW guess, if we have a srtt */ 2356 if (rack->rc_tp->t_srtt == 0) { 2357 /* 2358 * Go with old pacing method 2359 * i.e. burst mitigation only. 2360 */ 2361 return (0); 2362 } 2363 /* Ok lets get the initial TCP win (not racks) */ 2364 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2365 srtt = (uint64_t)rack->rc_tp->t_srtt; 2366 bw *= (uint64_t)USECS_IN_SECOND; 2367 bw /= srtt; 2368 ret_bw = bw; 2369 goto compensate; 2370 2371 } 2372 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2373 /* Averaging is done, we can return the value */ 2374 bw = rack->r_ctl.gp_bw; 2375 } else { 2376 /* Still doing initial average must calculate */ 2377 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); 2378 } 2379 lt_bw = rack_get_lt_bw(rack); 2380 if (lt_bw == 0) { 2381 /* If we don't have one then equate it to the gp_bw */ 2382 lt_bw = rack->r_ctl.gp_bw; 2383 } 2384 if ((rack->r_cwnd_was_clamped == 1) && (rack->r_clamped_gets_lower > 0)){ 2385 /* if clamped take the lowest */ 2386 if (lt_bw < bw) 2387 ret_bw = lt_bw; 2388 else 2389 ret_bw = bw; 2390 } else { 2391 /* If not set for clamped to get lowest, take the highest */ 2392 if (lt_bw > bw) 2393 ret_bw = lt_bw; 2394 else 2395 ret_bw = bw; 2396 } 2397 /* 2398 * Now lets compensate based on the TCP/IP overhead. Our 2399 * Goodput estimate does not include this so we must pace out 2400 * a bit faster since our pacing calculations do. The pacing 2401 * calculations use the base ETHERNET_SEGMENT_SIZE and the segsiz 2402 * we are using to do this, so we do that here in the opposite 2403 * direction as well. This means that if we are tunneled and the 2404 * segsiz is say 1200 bytes we will get quite a boost, but its 2405 * compensated for in the pacing time the opposite way. 2406 */ 2407 compensate: 2408 ret_bw = rack_compensate_for_linerate(rack, ret_bw); 2409 return(ret_bw); 2410 } 2411 2412 2413 static uint64_t 2414 rack_get_bw(struct tcp_rack *rack) 2415 { 2416 uint64_t bw; 2417 2418 if (rack->use_fixed_rate) { 2419 /* Return the fixed pacing rate */ 2420 return (rack_get_fixed_pacing_bw(rack)); 2421 } 2422 bw = rack_get_gp_est(rack); 2423 return (bw); 2424 } 2425 2426 static uint16_t 2427 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2428 { 2429 if (rack->use_fixed_rate) { 2430 return (100); 2431 } else if (rack->in_probe_rtt && (rsm == NULL)) 2432 return (rack->r_ctl.rack_per_of_gp_probertt); 2433 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2434 rack->r_ctl.rack_per_of_gp_rec)) { 2435 if (rsm) { 2436 /* a retransmission always use the recovery rate */ 2437 return (rack->r_ctl.rack_per_of_gp_rec); 2438 } else if (rack->rack_rec_nonrxt_use_cr) { 2439 /* Directed to use the configured rate */ 2440 goto configured_rate; 2441 } else if (rack->rack_no_prr && 2442 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2443 /* No PRR, lets just use the b/w estimate only */ 2444 return (100); 2445 } else { 2446 /* 2447 * Here we may have a non-retransmit but we 2448 * have no overrides, so just use the recovery 2449 * rate (prr is in effect). 2450 */ 2451 return (rack->r_ctl.rack_per_of_gp_rec); 2452 } 2453 } 2454 configured_rate: 2455 /* For the configured rate we look at our cwnd vs the ssthresh */ 2456 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2457 return (rack->r_ctl.rack_per_of_gp_ss); 2458 else 2459 return (rack->r_ctl.rack_per_of_gp_ca); 2460 } 2461 2462 static void 2463 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2464 { 2465 /* 2466 * Types of logs (mod value) 2467 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2468 * 2 = a dsack round begins, persist is reset to 16. 2469 * 3 = a dsack round ends 2470 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2471 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2472 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2473 */ 2474 if (tcp_bblogging_on(rack->rc_tp)) { 2475 union tcp_log_stackspecific log; 2476 struct timeval tv; 2477 2478 memset(&log, 0, sizeof(log)); 2479 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2480 log.u_bbr.flex1 <<= 1; 2481 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2482 log.u_bbr.flex1 <<= 1; 2483 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2484 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2485 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2486 log.u_bbr.flex4 = flex4; 2487 log.u_bbr.flex5 = flex5; 2488 log.u_bbr.flex6 = flex6; 2489 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2490 log.u_bbr.flex8 = mod; 2491 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2492 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2493 &rack->rc_inp->inp_socket->so_rcv, 2494 &rack->rc_inp->inp_socket->so_snd, 2495 RACK_DSACK_HANDLING, 0, 2496 0, &log, false, &tv); 2497 } 2498 } 2499 2500 static void 2501 rack_log_hdwr_pacing(struct tcp_rack *rack, 2502 uint64_t rate, uint64_t hw_rate, int line, 2503 int error, uint16_t mod) 2504 { 2505 if (tcp_bblogging_on(rack->rc_tp)) { 2506 union tcp_log_stackspecific log; 2507 struct timeval tv; 2508 const struct ifnet *ifp; 2509 2510 memset(&log, 0, sizeof(log)); 2511 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2512 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2513 if (rack->r_ctl.crte) { 2514 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2515 } else if (rack->rc_inp->inp_route.ro_nh && 2516 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2517 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2518 } else 2519 ifp = NULL; 2520 if (ifp) { 2521 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2522 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2523 } 2524 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2525 log.u_bbr.bw_inuse = rate; 2526 log.u_bbr.flex5 = line; 2527 log.u_bbr.flex6 = error; 2528 log.u_bbr.flex7 = mod; 2529 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2530 log.u_bbr.flex8 = rack->use_fixed_rate; 2531 log.u_bbr.flex8 <<= 1; 2532 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2533 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2534 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2535 if (rack->r_ctl.crte) 2536 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2537 else 2538 log.u_bbr.cur_del_rate = 0; 2539 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2540 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2541 &rack->rc_inp->inp_socket->so_rcv, 2542 &rack->rc_inp->inp_socket->so_snd, 2543 BBR_LOG_HDWR_PACE, 0, 2544 0, &log, false, &tv); 2545 } 2546 } 2547 2548 static uint64_t 2549 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2550 { 2551 /* 2552 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2553 */ 2554 uint64_t bw_est, high_rate; 2555 uint64_t gain; 2556 2557 if ((rack->r_pacing_discount == 0) || 2558 (rack_full_buffer_discount == 0)) { 2559 /* 2560 * No buffer level based discount from client buffer 2561 * level is enabled or the feature is disabled. 2562 */ 2563 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2564 bw_est = bw * gain; 2565 bw_est /= (uint64_t)100; 2566 } else { 2567 /* 2568 * We have a discount in place apply it with 2569 * just a 100% gain (we get no boost if the buffer 2570 * is full). 2571 */ 2572 uint64_t discount; 2573 2574 discount = bw * (uint64_t)(rack_full_buffer_discount * rack->r_ctl.pacing_discount_amm); 2575 discount /= 100; 2576 /* What %% of the b/w do we discount */ 2577 bw_est = bw - discount; 2578 } 2579 /* Never fall below the minimum (def 64kbps) */ 2580 if (bw_est < RACK_MIN_BW) 2581 bw_est = RACK_MIN_BW; 2582 if (rack->r_rack_hw_rate_caps) { 2583 /* Rate caps are in place */ 2584 if (rack->r_ctl.crte != NULL) { 2585 /* We have a hdwr rate already */ 2586 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2587 if (bw_est >= high_rate) { 2588 /* We are capping bw at the highest rate table entry */ 2589 if (rack_hw_rate_cap_per && 2590 (((high_rate * (100 + rack_hw_rate_cap_per)) / 100) < bw_est)) { 2591 rack->r_rack_hw_rate_caps = 0; 2592 goto done; 2593 } 2594 rack_log_hdwr_pacing(rack, 2595 bw_est, high_rate, __LINE__, 2596 0, 3); 2597 bw_est = high_rate; 2598 if (capped) 2599 *capped = 1; 2600 } 2601 } else if ((rack->rack_hdrw_pacing == 0) && 2602 (rack->rack_hdw_pace_ena) && 2603 (rack->rack_attempt_hdwr_pace == 0) && 2604 (rack->rc_inp->inp_route.ro_nh != NULL) && 2605 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2606 /* 2607 * Special case, we have not yet attempted hardware 2608 * pacing, and yet we may, when we do, find out if we are 2609 * above the highest rate. We need to know the maxbw for the interface 2610 * in question (if it supports ratelimiting). We get back 2611 * a 0, if the interface is not found in the RL lists. 2612 */ 2613 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2614 if (high_rate) { 2615 /* Yep, we have a rate is it above this rate? */ 2616 if (bw_est > high_rate) { 2617 bw_est = high_rate; 2618 if (capped) 2619 *capped = 1; 2620 } 2621 } 2622 } 2623 } 2624 done: 2625 return (bw_est); 2626 } 2627 2628 static void 2629 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2630 { 2631 if (tcp_bblogging_on(rack->rc_tp)) { 2632 union tcp_log_stackspecific log; 2633 struct timeval tv; 2634 2635 if (rack->sack_attack_disable > 0) 2636 goto log_anyway; 2637 if ((mod != 1) && (rack_verbose_logging == 0)) { 2638 /* 2639 * We get 3 values currently for mod 2640 * 1 - We are retransmitting and this tells the reason. 2641 * 2 - We are clearing a dup-ack count. 2642 * 3 - We are incrementing a dup-ack count. 2643 * 2644 * The clear/increment are only logged 2645 * if you have BBverbose on. 2646 */ 2647 return; 2648 } 2649 log_anyway: 2650 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2651 log.u_bbr.flex1 = tsused; 2652 log.u_bbr.flex2 = thresh; 2653 log.u_bbr.flex3 = rsm->r_flags; 2654 log.u_bbr.flex4 = rsm->r_dupack; 2655 log.u_bbr.flex5 = rsm->r_start; 2656 log.u_bbr.flex6 = rsm->r_end; 2657 log.u_bbr.flex8 = mod; 2658 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2659 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2660 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2661 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2662 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2663 log.u_bbr.pacing_gain = rack->r_must_retran; 2664 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2665 &rack->rc_inp->inp_socket->so_rcv, 2666 &rack->rc_inp->inp_socket->so_snd, 2667 BBR_LOG_SETTINGS_CHG, 0, 2668 0, &log, false, &tv); 2669 } 2670 } 2671 2672 static void 2673 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2674 { 2675 if (tcp_bblogging_on(rack->rc_tp)) { 2676 union tcp_log_stackspecific log; 2677 struct timeval tv; 2678 2679 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2680 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2681 log.u_bbr.flex2 = to; 2682 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2683 log.u_bbr.flex4 = slot; 2684 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; 2685 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2686 log.u_bbr.flex7 = rack->rc_in_persist; 2687 log.u_bbr.flex8 = which; 2688 if (rack->rack_no_prr) 2689 log.u_bbr.pkts_out = 0; 2690 else 2691 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2692 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2693 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2694 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2695 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2696 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2697 log.u_bbr.pacing_gain = rack->r_must_retran; 2698 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; 2699 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; 2700 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2701 log.u_bbr.lost = rack_rto_min; 2702 log.u_bbr.epoch = rack->r_ctl.roundends; 2703 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2704 &rack->rc_inp->inp_socket->so_rcv, 2705 &rack->rc_inp->inp_socket->so_snd, 2706 BBR_LOG_TIMERSTAR, 0, 2707 0, &log, false, &tv); 2708 } 2709 } 2710 2711 static void 2712 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2713 { 2714 if (tcp_bblogging_on(rack->rc_tp)) { 2715 union tcp_log_stackspecific log; 2716 struct timeval tv; 2717 2718 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2719 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2720 log.u_bbr.flex8 = to_num; 2721 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2722 log.u_bbr.flex2 = rack->rc_rack_rtt; 2723 if (rsm == NULL) 2724 log.u_bbr.flex3 = 0; 2725 else 2726 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2727 if (rack->rack_no_prr) 2728 log.u_bbr.flex5 = 0; 2729 else 2730 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2731 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2732 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2733 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2734 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2735 log.u_bbr.pacing_gain = rack->r_must_retran; 2736 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2737 &rack->rc_inp->inp_socket->so_rcv, 2738 &rack->rc_inp->inp_socket->so_snd, 2739 BBR_LOG_RTO, 0, 2740 0, &log, false, &tv); 2741 } 2742 } 2743 2744 static void 2745 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2746 struct rack_sendmap *prev, 2747 struct rack_sendmap *rsm, 2748 struct rack_sendmap *next, 2749 int flag, uint32_t th_ack, int line) 2750 { 2751 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2752 union tcp_log_stackspecific log; 2753 struct timeval tv; 2754 2755 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2756 log.u_bbr.flex8 = flag; 2757 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2758 log.u_bbr.cur_del_rate = (uint64_t)prev; 2759 log.u_bbr.delRate = (uint64_t)rsm; 2760 log.u_bbr.rttProp = (uint64_t)next; 2761 log.u_bbr.flex7 = 0; 2762 if (prev) { 2763 log.u_bbr.flex1 = prev->r_start; 2764 log.u_bbr.flex2 = prev->r_end; 2765 log.u_bbr.flex7 |= 0x4; 2766 } 2767 if (rsm) { 2768 log.u_bbr.flex3 = rsm->r_start; 2769 log.u_bbr.flex4 = rsm->r_end; 2770 log.u_bbr.flex7 |= 0x2; 2771 } 2772 if (next) { 2773 log.u_bbr.flex5 = next->r_start; 2774 log.u_bbr.flex6 = next->r_end; 2775 log.u_bbr.flex7 |= 0x1; 2776 } 2777 log.u_bbr.applimited = line; 2778 log.u_bbr.pkts_out = th_ack; 2779 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2780 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2781 if (rack->rack_no_prr) 2782 log.u_bbr.lost = 0; 2783 else 2784 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2785 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2786 &rack->rc_inp->inp_socket->so_rcv, 2787 &rack->rc_inp->inp_socket->so_snd, 2788 TCP_LOG_MAPCHG, 0, 2789 0, &log, false, &tv); 2790 } 2791 } 2792 2793 static void 2794 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2795 struct rack_sendmap *rsm, int conf) 2796 { 2797 if (tcp_bblogging_on(tp)) { 2798 union tcp_log_stackspecific log; 2799 struct timeval tv; 2800 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2801 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2802 log.u_bbr.flex1 = t; 2803 log.u_bbr.flex2 = len; 2804 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2805 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2806 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2807 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2808 log.u_bbr.flex7 = conf; 2809 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2810 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2811 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2812 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2813 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2814 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2815 if (rsm) { 2816 log.u_bbr.pkt_epoch = rsm->r_start; 2817 log.u_bbr.lost = rsm->r_end; 2818 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2819 /* We loose any upper of the 24 bits */ 2820 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2821 } else { 2822 /* Its a SYN */ 2823 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2824 log.u_bbr.lost = 0; 2825 log.u_bbr.cwnd_gain = 0; 2826 log.u_bbr.pacing_gain = 0; 2827 } 2828 /* Write out general bits of interest rrs here */ 2829 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2830 log.u_bbr.use_lt_bw <<= 1; 2831 log.u_bbr.use_lt_bw |= rack->forced_ack; 2832 log.u_bbr.use_lt_bw <<= 1; 2833 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2834 log.u_bbr.use_lt_bw <<= 1; 2835 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2836 log.u_bbr.use_lt_bw <<= 1; 2837 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2838 log.u_bbr.use_lt_bw <<= 1; 2839 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2840 log.u_bbr.use_lt_bw <<= 1; 2841 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2842 log.u_bbr.use_lt_bw <<= 1; 2843 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2844 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2845 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2846 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2847 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2848 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2849 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2850 log.u_bbr.bw_inuse <<= 32; 2851 if (rsm) 2852 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2853 TCP_LOG_EVENTP(tp, NULL, 2854 &rack->rc_inp->inp_socket->so_rcv, 2855 &rack->rc_inp->inp_socket->so_snd, 2856 BBR_LOG_BBRRTT, 0, 2857 0, &log, false, &tv); 2858 2859 2860 } 2861 } 2862 2863 static void 2864 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2865 { 2866 /* 2867 * Log the rtt sample we are 2868 * applying to the srtt algorithm in 2869 * useconds. 2870 */ 2871 if (tcp_bblogging_on(rack->rc_tp)) { 2872 union tcp_log_stackspecific log; 2873 struct timeval tv; 2874 2875 /* Convert our ms to a microsecond */ 2876 memset(&log, 0, sizeof(log)); 2877 log.u_bbr.flex1 = rtt; 2878 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2879 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2880 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2881 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2882 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2883 log.u_bbr.flex7 = 1; 2884 log.u_bbr.flex8 = rack->sack_attack_disable; 2885 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2886 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2887 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2888 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2889 log.u_bbr.pacing_gain = rack->r_must_retran; 2890 /* 2891 * We capture in delRate the upper 32 bits as 2892 * the confidence level we had declared, and the 2893 * lower 32 bits as the actual RTT using the arrival 2894 * timestamp. 2895 */ 2896 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2897 log.u_bbr.delRate <<= 32; 2898 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2899 /* Lets capture all the things that make up t_rtxcur */ 2900 log.u_bbr.applimited = rack_rto_min; 2901 log.u_bbr.epoch = rack_rto_max; 2902 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2903 log.u_bbr.lost = rack_rto_min; 2904 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2905 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2906 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2907 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2908 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2909 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2910 &rack->rc_inp->inp_socket->so_rcv, 2911 &rack->rc_inp->inp_socket->so_snd, 2912 TCP_LOG_RTT, 0, 2913 0, &log, false, &tv); 2914 } 2915 } 2916 2917 static void 2918 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2919 { 2920 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2921 union tcp_log_stackspecific log; 2922 struct timeval tv; 2923 2924 /* Convert our ms to a microsecond */ 2925 memset(&log, 0, sizeof(log)); 2926 log.u_bbr.flex1 = rtt; 2927 log.u_bbr.flex2 = send_time; 2928 log.u_bbr.flex3 = ack_time; 2929 log.u_bbr.flex4 = where; 2930 log.u_bbr.flex7 = 2; 2931 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2932 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2933 &rack->rc_inp->inp_socket->so_rcv, 2934 &rack->rc_inp->inp_socket->so_snd, 2935 TCP_LOG_RTT, 0, 2936 0, &log, false, &tv); 2937 } 2938 } 2939 2940 2941 static void 2942 rack_log_rtt_sendmap(struct tcp_rack *rack, uint32_t idx, uint64_t tsv, uint32_t tsecho) 2943 { 2944 if (tcp_bblogging_on(rack->rc_tp)) { 2945 union tcp_log_stackspecific log; 2946 struct timeval tv; 2947 2948 /* Convert our ms to a microsecond */ 2949 memset(&log, 0, sizeof(log)); 2950 log.u_bbr.flex1 = idx; 2951 log.u_bbr.flex2 = rack_ts_to_msec(tsv); 2952 log.u_bbr.flex3 = tsecho; 2953 log.u_bbr.flex7 = 3; 2954 log.u_bbr.rttProp = tsv; 2955 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2956 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2957 &rack->rc_inp->inp_socket->so_rcv, 2958 &rack->rc_inp->inp_socket->so_snd, 2959 TCP_LOG_RTT, 0, 2960 0, &log, false, &tv); 2961 } 2962 } 2963 2964 2965 static inline void 2966 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2967 { 2968 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2969 union tcp_log_stackspecific log; 2970 struct timeval tv; 2971 2972 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2973 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 2974 log.u_bbr.flex1 = line; 2975 log.u_bbr.flex2 = tick; 2976 log.u_bbr.flex3 = tp->t_maxunacktime; 2977 log.u_bbr.flex4 = tp->t_acktime; 2978 log.u_bbr.flex8 = event; 2979 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2980 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2981 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2982 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2983 log.u_bbr.pacing_gain = rack->r_must_retran; 2984 TCP_LOG_EVENTP(tp, NULL, 2985 &rack->rc_inp->inp_socket->so_rcv, 2986 &rack->rc_inp->inp_socket->so_snd, 2987 BBR_LOG_PROGRESS, 0, 2988 0, &log, false, &tv); 2989 } 2990 } 2991 2992 static void 2993 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line) 2994 { 2995 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2996 union tcp_log_stackspecific log; 2997 2998 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2999 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3000 log.u_bbr.flex1 = slot; 3001 if (rack->rack_no_prr) 3002 log.u_bbr.flex2 = 0; 3003 else 3004 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 3005 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3006 log.u_bbr.flex5 = rack->r_ctl.ack_during_sd; 3007 log.u_bbr.flex6 = line; 3008 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 3009 log.u_bbr.flex8 = rack->rc_in_persist; 3010 log.u_bbr.timeStamp = cts; 3011 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3012 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3013 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3014 log.u_bbr.pacing_gain = rack->r_must_retran; 3015 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3016 &rack->rc_inp->inp_socket->so_rcv, 3017 &rack->rc_inp->inp_socket->so_snd, 3018 BBR_LOG_BBRSND, 0, 3019 0, &log, false, tv); 3020 } 3021 } 3022 3023 static void 3024 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 3025 { 3026 if (tcp_bblogging_on(rack->rc_tp)) { 3027 union tcp_log_stackspecific log; 3028 struct timeval tv; 3029 3030 memset(&log, 0, sizeof(log)); 3031 log.u_bbr.flex1 = did_out; 3032 log.u_bbr.flex2 = nxt_pkt; 3033 log.u_bbr.flex3 = way_out; 3034 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3035 if (rack->rack_no_prr) 3036 log.u_bbr.flex5 = 0; 3037 else 3038 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3039 log.u_bbr.flex6 = nsegs; 3040 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 3041 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 3042 log.u_bbr.flex7 <<= 1; 3043 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 3044 log.u_bbr.flex7 <<= 1; 3045 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 3046 log.u_bbr.flex8 = rack->rc_in_persist; 3047 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3048 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3049 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3050 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3051 log.u_bbr.use_lt_bw <<= 1; 3052 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3053 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3054 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3055 log.u_bbr.pacing_gain = rack->r_must_retran; 3056 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3057 &rack->rc_inp->inp_socket->so_rcv, 3058 &rack->rc_inp->inp_socket->so_snd, 3059 BBR_LOG_DOSEG_DONE, 0, 3060 0, &log, false, &tv); 3061 } 3062 } 3063 3064 static void 3065 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 3066 { 3067 if (tcp_bblogging_on(rack->rc_tp)) { 3068 union tcp_log_stackspecific log; 3069 struct timeval tv; 3070 3071 memset(&log, 0, sizeof(log)); 3072 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 3073 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 3074 log.u_bbr.flex4 = arg1; 3075 log.u_bbr.flex5 = arg2; 3076 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; 3077 log.u_bbr.flex6 = arg3; 3078 log.u_bbr.flex8 = frm; 3079 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3080 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3081 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3082 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 3083 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3084 log.u_bbr.pacing_gain = rack->r_must_retran; 3085 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 3086 &tptosocket(tp)->so_snd, 3087 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 3088 } 3089 } 3090 3091 static void 3092 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 3093 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 3094 { 3095 if (tcp_bblogging_on(rack->rc_tp)) { 3096 union tcp_log_stackspecific log; 3097 struct timeval tv; 3098 3099 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3100 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3101 log.u_bbr.flex1 = slot; 3102 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 3103 log.u_bbr.flex4 = reason; 3104 if (rack->rack_no_prr) 3105 log.u_bbr.flex5 = 0; 3106 else 3107 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3108 log.u_bbr.flex7 = hpts_calling; 3109 log.u_bbr.flex8 = rack->rc_in_persist; 3110 log.u_bbr.lt_epoch = cwnd_to_use; 3111 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3112 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3113 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3114 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3115 log.u_bbr.pacing_gain = rack->r_must_retran; 3116 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 3117 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3118 &rack->rc_inp->inp_socket->so_rcv, 3119 &rack->rc_inp->inp_socket->so_snd, 3120 BBR_LOG_JUSTRET, 0, 3121 tlen, &log, false, &tv); 3122 } 3123 } 3124 3125 static void 3126 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 3127 struct timeval *tv, uint32_t flags_on_entry) 3128 { 3129 if (tcp_bblogging_on(rack->rc_tp)) { 3130 union tcp_log_stackspecific log; 3131 3132 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3133 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 3134 log.u_bbr.flex1 = line; 3135 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 3136 log.u_bbr.flex3 = flags_on_entry; 3137 log.u_bbr.flex4 = us_cts; 3138 if (rack->rack_no_prr) 3139 log.u_bbr.flex5 = 0; 3140 else 3141 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 3142 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 3143 log.u_bbr.flex7 = hpts_removed; 3144 log.u_bbr.flex8 = 1; 3145 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 3146 log.u_bbr.timeStamp = us_cts; 3147 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3148 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3149 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3150 log.u_bbr.pacing_gain = rack->r_must_retran; 3151 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3152 &rack->rc_inp->inp_socket->so_rcv, 3153 &rack->rc_inp->inp_socket->so_snd, 3154 BBR_LOG_TIMERCANC, 0, 3155 0, &log, false, tv); 3156 } 3157 } 3158 3159 static void 3160 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 3161 uint32_t flex1, uint32_t flex2, 3162 uint32_t flex3, uint32_t flex4, 3163 uint32_t flex5, uint32_t flex6, 3164 uint16_t flex7, uint8_t mod) 3165 { 3166 if (tcp_bblogging_on(rack->rc_tp)) { 3167 union tcp_log_stackspecific log; 3168 struct timeval tv; 3169 3170 if (mod == 1) { 3171 /* No you can't use 1, its for the real to cancel */ 3172 return; 3173 } 3174 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3175 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3176 log.u_bbr.flex1 = flex1; 3177 log.u_bbr.flex2 = flex2; 3178 log.u_bbr.flex3 = flex3; 3179 log.u_bbr.flex4 = flex4; 3180 log.u_bbr.flex5 = flex5; 3181 log.u_bbr.flex6 = flex6; 3182 log.u_bbr.flex7 = flex7; 3183 log.u_bbr.flex8 = mod; 3184 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3185 &rack->rc_inp->inp_socket->so_rcv, 3186 &rack->rc_inp->inp_socket->so_snd, 3187 BBR_LOG_TIMERCANC, 0, 3188 0, &log, false, &tv); 3189 } 3190 } 3191 3192 static void 3193 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 3194 { 3195 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 3196 union tcp_log_stackspecific log; 3197 struct timeval tv; 3198 3199 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3200 log.u_bbr.flex1 = timers; 3201 log.u_bbr.flex2 = ret; 3202 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 3203 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 3204 log.u_bbr.flex5 = cts; 3205 if (rack->rack_no_prr) 3206 log.u_bbr.flex6 = 0; 3207 else 3208 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 3209 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 3210 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 3211 log.u_bbr.pacing_gain = rack->r_must_retran; 3212 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3213 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3214 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3215 &rack->rc_inp->inp_socket->so_rcv, 3216 &rack->rc_inp->inp_socket->so_snd, 3217 BBR_LOG_TO_PROCESS, 0, 3218 0, &log, false, &tv); 3219 } 3220 } 3221 3222 static void 3223 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 3224 { 3225 if (tcp_bblogging_on(rack->rc_tp)) { 3226 union tcp_log_stackspecific log; 3227 struct timeval tv; 3228 3229 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3230 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 3231 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 3232 if (rack->rack_no_prr) 3233 log.u_bbr.flex3 = 0; 3234 else 3235 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 3236 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 3237 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 3238 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 3239 log.u_bbr.flex7 = line; 3240 log.u_bbr.flex8 = frm; 3241 log.u_bbr.pkts_out = orig_cwnd; 3242 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3243 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3244 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 3245 log.u_bbr.use_lt_bw <<= 1; 3246 log.u_bbr.use_lt_bw |= rack->r_might_revert; 3247 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3248 &rack->rc_inp->inp_socket->so_rcv, 3249 &rack->rc_inp->inp_socket->so_snd, 3250 BBR_LOG_BBRUPD, 0, 3251 0, &log, false, &tv); 3252 } 3253 } 3254 3255 #ifdef TCP_SAD_DETECTION 3256 static void 3257 rack_log_sad(struct tcp_rack *rack, int event) 3258 { 3259 if (tcp_bblogging_on(rack->rc_tp)) { 3260 union tcp_log_stackspecific log; 3261 struct timeval tv; 3262 3263 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3264 log.u_bbr.flex1 = rack->r_ctl.sack_count; 3265 log.u_bbr.flex2 = rack->r_ctl.ack_count; 3266 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 3267 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 3268 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 3269 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 3270 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 3271 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 3272 log.u_bbr.lt_epoch |= rack->do_detection; 3273 log.u_bbr.applimited = tcp_map_minimum; 3274 log.u_bbr.flex7 = rack->sack_attack_disable; 3275 log.u_bbr.flex8 = event; 3276 log.u_bbr.bbr_state = rack->rc_suspicious; 3277 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3278 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3279 log.u_bbr.delivered = tcp_sad_decay_val; 3280 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3281 &rack->rc_inp->inp_socket->so_rcv, 3282 &rack->rc_inp->inp_socket->so_snd, 3283 TCP_SAD_DETECT, 0, 3284 0, &log, false, &tv); 3285 } 3286 } 3287 #endif 3288 3289 static void 3290 rack_counter_destroy(void) 3291 { 3292 counter_u64_free(rack_total_bytes); 3293 counter_u64_free(rack_fto_send); 3294 counter_u64_free(rack_fto_rsm_send); 3295 counter_u64_free(rack_nfto_resend); 3296 counter_u64_free(rack_hw_pace_init_fail); 3297 counter_u64_free(rack_hw_pace_lost); 3298 counter_u64_free(rack_non_fto_send); 3299 counter_u64_free(rack_extended_rfo); 3300 counter_u64_free(rack_ack_total); 3301 counter_u64_free(rack_express_sack); 3302 counter_u64_free(rack_sack_total); 3303 counter_u64_free(rack_move_none); 3304 counter_u64_free(rack_move_some); 3305 counter_u64_free(rack_sack_attacks_detected); 3306 counter_u64_free(rack_sack_attacks_reversed); 3307 counter_u64_free(rack_sack_attacks_suspect); 3308 counter_u64_free(rack_sack_used_next_merge); 3309 counter_u64_free(rack_sack_used_prev_merge); 3310 counter_u64_free(rack_tlp_tot); 3311 counter_u64_free(rack_tlp_newdata); 3312 counter_u64_free(rack_tlp_retran); 3313 counter_u64_free(rack_tlp_retran_bytes); 3314 counter_u64_free(rack_to_tot); 3315 counter_u64_free(rack_saw_enobuf); 3316 counter_u64_free(rack_saw_enobuf_hw); 3317 counter_u64_free(rack_saw_enetunreach); 3318 counter_u64_free(rack_hot_alloc); 3319 counter_u64_free(rack_to_alloc); 3320 counter_u64_free(rack_to_alloc_hard); 3321 counter_u64_free(rack_to_alloc_emerg); 3322 counter_u64_free(rack_to_alloc_limited); 3323 counter_u64_free(rack_alloc_limited_conns); 3324 counter_u64_free(rack_split_limited); 3325 counter_u64_free(rack_multi_single_eq); 3326 counter_u64_free(rack_rxt_clamps_cwnd); 3327 counter_u64_free(rack_rxt_clamps_cwnd_uniq); 3328 counter_u64_free(rack_proc_non_comp_ack); 3329 counter_u64_free(rack_sack_proc_all); 3330 counter_u64_free(rack_sack_proc_restart); 3331 counter_u64_free(rack_sack_proc_short); 3332 counter_u64_free(rack_sack_skipped_acked); 3333 counter_u64_free(rack_sack_splits); 3334 counter_u64_free(rack_input_idle_reduces); 3335 counter_u64_free(rack_collapsed_win); 3336 counter_u64_free(rack_collapsed_win_rxt); 3337 counter_u64_free(rack_collapsed_win_rxt_bytes); 3338 counter_u64_free(rack_collapsed_win_seen); 3339 counter_u64_free(rack_try_scwnd); 3340 counter_u64_free(rack_persists_sends); 3341 counter_u64_free(rack_persists_acks); 3342 counter_u64_free(rack_persists_loss); 3343 counter_u64_free(rack_persists_lost_ends); 3344 #ifdef INVARIANTS 3345 counter_u64_free(rack_adjust_map_bw); 3346 #endif 3347 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 3348 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 3349 } 3350 3351 static struct rack_sendmap * 3352 rack_alloc(struct tcp_rack *rack) 3353 { 3354 struct rack_sendmap *rsm; 3355 3356 /* 3357 * First get the top of the list it in 3358 * theory is the "hottest" rsm we have, 3359 * possibly just freed by ack processing. 3360 */ 3361 if (rack->rc_free_cnt > rack_free_cache) { 3362 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3363 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3364 counter_u64_add(rack_hot_alloc, 1); 3365 rack->rc_free_cnt--; 3366 return (rsm); 3367 } 3368 /* 3369 * Once we get under our free cache we probably 3370 * no longer have a "hot" one available. Lets 3371 * get one from UMA. 3372 */ 3373 rsm = uma_zalloc(rack_zone, M_NOWAIT); 3374 if (rsm) { 3375 rack->r_ctl.rc_num_maps_alloced++; 3376 counter_u64_add(rack_to_alloc, 1); 3377 return (rsm); 3378 } 3379 /* 3380 * Dig in to our aux rsm's (the last two) since 3381 * UMA failed to get us one. 3382 */ 3383 if (rack->rc_free_cnt) { 3384 counter_u64_add(rack_to_alloc_emerg, 1); 3385 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3386 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3387 rack->rc_free_cnt--; 3388 return (rsm); 3389 } 3390 return (NULL); 3391 } 3392 3393 static struct rack_sendmap * 3394 rack_alloc_full_limit(struct tcp_rack *rack) 3395 { 3396 if ((V_tcp_map_entries_limit > 0) && 3397 (rack->do_detection == 0) && 3398 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3399 counter_u64_add(rack_to_alloc_limited, 1); 3400 if (!rack->alloc_limit_reported) { 3401 rack->alloc_limit_reported = 1; 3402 counter_u64_add(rack_alloc_limited_conns, 1); 3403 } 3404 return (NULL); 3405 } 3406 return (rack_alloc(rack)); 3407 } 3408 3409 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3410 static struct rack_sendmap * 3411 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3412 { 3413 struct rack_sendmap *rsm; 3414 3415 if (limit_type) { 3416 /* currently there is only one limit type */ 3417 if (rack->r_ctl.rc_split_limit > 0 && 3418 (rack->do_detection == 0) && 3419 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { 3420 counter_u64_add(rack_split_limited, 1); 3421 if (!rack->alloc_limit_reported) { 3422 rack->alloc_limit_reported = 1; 3423 counter_u64_add(rack_alloc_limited_conns, 1); 3424 } 3425 return (NULL); 3426 #ifdef TCP_SAD_DETECTION 3427 } else if ((tcp_sad_limit != 0) && 3428 (rack->do_detection == 1) && 3429 (rack->r_ctl.rc_num_split_allocs >= tcp_sad_limit)) { 3430 counter_u64_add(rack_split_limited, 1); 3431 if (!rack->alloc_limit_reported) { 3432 rack->alloc_limit_reported = 1; 3433 counter_u64_add(rack_alloc_limited_conns, 1); 3434 } 3435 return (NULL); 3436 #endif 3437 } 3438 } 3439 3440 /* allocate and mark in the limit type, if set */ 3441 rsm = rack_alloc(rack); 3442 if (rsm != NULL && limit_type) { 3443 rsm->r_limit_type = limit_type; 3444 rack->r_ctl.rc_num_split_allocs++; 3445 } 3446 return (rsm); 3447 } 3448 3449 static void 3450 rack_free_trim(struct tcp_rack *rack) 3451 { 3452 struct rack_sendmap *rsm; 3453 3454 /* 3455 * Free up all the tail entries until 3456 * we get our list down to the limit. 3457 */ 3458 while (rack->rc_free_cnt > rack_free_cache) { 3459 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3460 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3461 rack->rc_free_cnt--; 3462 rack->r_ctl.rc_num_maps_alloced--; 3463 uma_zfree(rack_zone, rsm); 3464 } 3465 } 3466 3467 static void 3468 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3469 { 3470 if (rsm->r_flags & RACK_APP_LIMITED) { 3471 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3472 rack->r_ctl.rc_app_limited_cnt--; 3473 } 3474 } 3475 if (rsm->r_limit_type) { 3476 /* currently there is only one limit type */ 3477 rack->r_ctl.rc_num_split_allocs--; 3478 } 3479 if (rsm == rack->r_ctl.rc_first_appl) { 3480 if (rack->r_ctl.rc_app_limited_cnt == 0) 3481 rack->r_ctl.rc_first_appl = NULL; 3482 else 3483 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); 3484 } 3485 if (rsm == rack->r_ctl.rc_resend) 3486 rack->r_ctl.rc_resend = NULL; 3487 if (rsm == rack->r_ctl.rc_end_appl) 3488 rack->r_ctl.rc_end_appl = NULL; 3489 if (rack->r_ctl.rc_tlpsend == rsm) 3490 rack->r_ctl.rc_tlpsend = NULL; 3491 if (rack->r_ctl.rc_sacklast == rsm) 3492 rack->r_ctl.rc_sacklast = NULL; 3493 memset(rsm, 0, sizeof(struct rack_sendmap)); 3494 /* Make sure we are not going to overrun our count limit of 0xff */ 3495 if ((rack->rc_free_cnt + 1) > 0xff) { 3496 rack_free_trim(rack); 3497 } 3498 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3499 rack->rc_free_cnt++; 3500 } 3501 3502 static uint32_t 3503 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3504 { 3505 uint64_t srtt, bw, len, tim; 3506 uint32_t segsiz, def_len, minl; 3507 3508 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3509 def_len = rack_def_data_window * segsiz; 3510 if (rack->rc_gp_filled == 0) { 3511 /* 3512 * We have no measurement (IW is in flight?) so 3513 * we can only guess using our data_window sysctl 3514 * value (usually 20MSS). 3515 */ 3516 return (def_len); 3517 } 3518 /* 3519 * Now we have a number of factors to consider. 3520 * 3521 * 1) We have a desired BDP which is usually 3522 * at least 2. 3523 * 2) We have a minimum number of rtt's usually 1 SRTT 3524 * but we allow it too to be more. 3525 * 3) We want to make sure a measurement last N useconds (if 3526 * we have set rack_min_measure_usec. 3527 * 3528 * We handle the first concern here by trying to create a data 3529 * window of max(rack_def_data_window, DesiredBDP). The 3530 * second concern we handle in not letting the measurement 3531 * window end normally until at least the required SRTT's 3532 * have gone by which is done further below in 3533 * rack_enough_for_measurement(). Finally the third concern 3534 * we also handle here by calculating how long that time 3535 * would take at the current BW and then return the 3536 * max of our first calculation and that length. Note 3537 * that if rack_min_measure_usec is 0, we don't deal 3538 * with concern 3. Also for both Concern 1 and 3 an 3539 * application limited period could end the measurement 3540 * earlier. 3541 * 3542 * So lets calculate the BDP with the "known" b/w using 3543 * the SRTT has our rtt and then multiply it by the 3544 * goal. 3545 */ 3546 bw = rack_get_bw(rack); 3547 srtt = (uint64_t)tp->t_srtt; 3548 len = bw * srtt; 3549 len /= (uint64_t)HPTS_USEC_IN_SEC; 3550 len *= max(1, rack_goal_bdp); 3551 /* Now we need to round up to the nearest MSS */ 3552 len = roundup(len, segsiz); 3553 if (rack_min_measure_usec) { 3554 /* Now calculate our min length for this b/w */ 3555 tim = rack_min_measure_usec; 3556 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3557 if (minl == 0) 3558 minl = 1; 3559 minl = roundup(minl, segsiz); 3560 if (len < minl) 3561 len = minl; 3562 } 3563 /* 3564 * Now if we have a very small window we want 3565 * to attempt to get the window that is 3566 * as small as possible. This happens on 3567 * low b/w connections and we don't want to 3568 * span huge numbers of rtt's between measurements. 3569 * 3570 * We basically include 2 over our "MIN window" so 3571 * that the measurement can be shortened (possibly) by 3572 * an ack'ed packet. 3573 */ 3574 if (len < def_len) 3575 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3576 else 3577 return (max((uint32_t)len, def_len)); 3578 3579 } 3580 3581 static int 3582 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3583 { 3584 uint32_t tim, srtts, segsiz; 3585 3586 /* 3587 * Has enough time passed for the GP measurement to be valid? 3588 */ 3589 if (SEQ_LT(th_ack, tp->gput_seq)) { 3590 /* Not enough bytes yet */ 3591 return (0); 3592 } 3593 if ((tp->snd_max == tp->snd_una) || 3594 (th_ack == tp->snd_max)){ 3595 /* 3596 * All is acked quality of all acked is 3597 * usually low or medium, but we in theory could split 3598 * all acked into two cases, where you got 3599 * a signifigant amount of your window and 3600 * where you did not. For now we leave it 3601 * but it is something to contemplate in the 3602 * future. The danger here is that delayed ack 3603 * is effecting the last byte (which is a 50:50 chance). 3604 */ 3605 *quality = RACK_QUALITY_ALLACKED; 3606 return (1); 3607 } 3608 if (SEQ_GEQ(th_ack, tp->gput_ack)) { 3609 /* 3610 * We obtained our entire window of data we wanted 3611 * no matter if we are in recovery or not then 3612 * its ok since expanding the window does not 3613 * make things fuzzy (or at least not as much). 3614 */ 3615 *quality = RACK_QUALITY_HIGH; 3616 return (1); 3617 } 3618 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3619 if (SEQ_LT(th_ack, tp->gput_ack) && 3620 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3621 /* Not enough bytes yet */ 3622 return (0); 3623 } 3624 if (rack->r_ctl.rc_first_appl && 3625 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3626 /* 3627 * We are up to the app limited send point 3628 * we have to measure irrespective of the time.. 3629 */ 3630 *quality = RACK_QUALITY_APPLIMITED; 3631 return (1); 3632 } 3633 /* Now what about time? */ 3634 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3635 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3636 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { 3637 /* 3638 * We do not allow a measurement if we are in recovery 3639 * that would shrink the goodput window we wanted. 3640 * This is to prevent cloudyness of when the last send 3641 * was actually made. 3642 */ 3643 *quality = RACK_QUALITY_HIGH; 3644 return (1); 3645 } 3646 /* Nope not even a full SRTT has passed */ 3647 return (0); 3648 } 3649 3650 static void 3651 rack_log_timely(struct tcp_rack *rack, 3652 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3653 uint64_t up_bnd, int line, uint8_t method) 3654 { 3655 if (tcp_bblogging_on(rack->rc_tp)) { 3656 union tcp_log_stackspecific log; 3657 struct timeval tv; 3658 3659 memset(&log, 0, sizeof(log)); 3660 log.u_bbr.flex1 = logged; 3661 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3662 log.u_bbr.flex2 <<= 4; 3663 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3664 log.u_bbr.flex2 <<= 4; 3665 log.u_bbr.flex2 |= rack->rc_gp_incr; 3666 log.u_bbr.flex2 <<= 4; 3667 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3668 log.u_bbr.flex3 = rack->rc_gp_incr; 3669 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3670 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3671 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3672 log.u_bbr.flex7 = rack->rc_gp_bwred; 3673 log.u_bbr.flex8 = method; 3674 log.u_bbr.cur_del_rate = cur_bw; 3675 log.u_bbr.delRate = low_bnd; 3676 log.u_bbr.bw_inuse = up_bnd; 3677 log.u_bbr.rttProp = rack_get_bw(rack); 3678 log.u_bbr.pkt_epoch = line; 3679 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3680 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3681 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3682 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3683 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3684 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3685 log.u_bbr.cwnd_gain <<= 1; 3686 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3687 log.u_bbr.cwnd_gain <<= 1; 3688 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3689 log.u_bbr.cwnd_gain <<= 1; 3690 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3691 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3692 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3693 &rack->rc_inp->inp_socket->so_rcv, 3694 &rack->rc_inp->inp_socket->so_snd, 3695 TCP_TIMELY_WORK, 0, 3696 0, &log, false, &tv); 3697 } 3698 } 3699 3700 static int 3701 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3702 { 3703 /* 3704 * Before we increase we need to know if 3705 * the estimate just made was less than 3706 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3707 * 3708 * If we already are pacing at a fast enough 3709 * rate to push us faster there is no sense of 3710 * increasing. 3711 * 3712 * We first caculate our actual pacing rate (ss or ca multiplier 3713 * times our cur_bw). 3714 * 3715 * Then we take the last measured rate and multipy by our 3716 * maximum pacing overage to give us a max allowable rate. 3717 * 3718 * If our act_rate is smaller than our max_allowable rate 3719 * then we should increase. Else we should hold steady. 3720 * 3721 */ 3722 uint64_t act_rate, max_allow_rate; 3723 3724 if (rack_timely_no_stopping) 3725 return (1); 3726 3727 if ((cur_bw == 0) || (last_bw_est == 0)) { 3728 /* 3729 * Initial startup case or 3730 * everything is acked case. 3731 */ 3732 rack_log_timely(rack, mult, cur_bw, 0, 0, 3733 __LINE__, 9); 3734 return (1); 3735 } 3736 if (mult <= 100) { 3737 /* 3738 * We can always pace at or slightly above our rate. 3739 */ 3740 rack_log_timely(rack, mult, cur_bw, 0, 0, 3741 __LINE__, 9); 3742 return (1); 3743 } 3744 act_rate = cur_bw * (uint64_t)mult; 3745 act_rate /= 100; 3746 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3747 max_allow_rate /= 100; 3748 if (act_rate < max_allow_rate) { 3749 /* 3750 * Here the rate we are actually pacing at 3751 * is smaller than 10% above our last measurement. 3752 * This means we are pacing below what we would 3753 * like to try to achieve (plus some wiggle room). 3754 */ 3755 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3756 __LINE__, 9); 3757 return (1); 3758 } else { 3759 /* 3760 * Here we are already pacing at least rack_max_per_above(10%) 3761 * what we are getting back. This indicates most likely 3762 * that we are being limited (cwnd/rwnd/app) and can't 3763 * get any more b/w. There is no sense of trying to 3764 * raise up the pacing rate its not speeding us up 3765 * and we already are pacing faster than we are getting. 3766 */ 3767 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3768 __LINE__, 8); 3769 return (0); 3770 } 3771 } 3772 3773 static void 3774 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3775 { 3776 /* 3777 * When we drag bottom, we want to assure 3778 * that no multiplier is below 1.0, if so 3779 * we want to restore it to at least that. 3780 */ 3781 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3782 /* This is unlikely we usually do not touch recovery */ 3783 rack->r_ctl.rack_per_of_gp_rec = 100; 3784 } 3785 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3786 rack->r_ctl.rack_per_of_gp_ca = 100; 3787 } 3788 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3789 rack->r_ctl.rack_per_of_gp_ss = 100; 3790 } 3791 } 3792 3793 static void 3794 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3795 { 3796 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3797 rack->r_ctl.rack_per_of_gp_ca = 100; 3798 } 3799 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3800 rack->r_ctl.rack_per_of_gp_ss = 100; 3801 } 3802 } 3803 3804 static void 3805 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3806 { 3807 int32_t calc, logged, plus; 3808 3809 logged = 0; 3810 3811 if (override) { 3812 /* 3813 * override is passed when we are 3814 * loosing b/w and making one last 3815 * gasp at trying to not loose out 3816 * to a new-reno flow. 3817 */ 3818 goto extra_boost; 3819 } 3820 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3821 if (rack->rc_gp_incr && 3822 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3823 /* 3824 * Reset and get 5 strokes more before the boost. Note 3825 * that the count is 0 based so we have to add one. 3826 */ 3827 extra_boost: 3828 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3829 rack->rc_gp_timely_inc_cnt = 0; 3830 } else 3831 plus = (uint32_t)rack_gp_increase_per; 3832 /* Must be at least 1% increase for true timely increases */ 3833 if ((plus < 1) && 3834 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3835 plus = 1; 3836 if (rack->rc_gp_saw_rec && 3837 (rack->rc_gp_no_rec_chg == 0) && 3838 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3839 rack->r_ctl.rack_per_of_gp_rec)) { 3840 /* We have been in recovery ding it too */ 3841 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3842 if (calc > 0xffff) 3843 calc = 0xffff; 3844 logged |= 1; 3845 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3846 if (rack->r_ctl.rack_per_upper_bound_ca && 3847 (rack->rc_dragged_bottom == 0) && 3848 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) 3849 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; 3850 } 3851 if (rack->rc_gp_saw_ca && 3852 (rack->rc_gp_saw_ss == 0) && 3853 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3854 rack->r_ctl.rack_per_of_gp_ca)) { 3855 /* In CA */ 3856 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3857 if (calc > 0xffff) 3858 calc = 0xffff; 3859 logged |= 2; 3860 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3861 if (rack->r_ctl.rack_per_upper_bound_ca && 3862 (rack->rc_dragged_bottom == 0) && 3863 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) 3864 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; 3865 } 3866 if (rack->rc_gp_saw_ss && 3867 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3868 rack->r_ctl.rack_per_of_gp_ss)) { 3869 /* In SS */ 3870 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3871 if (calc > 0xffff) 3872 calc = 0xffff; 3873 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3874 if (rack->r_ctl.rack_per_upper_bound_ss && 3875 (rack->rc_dragged_bottom == 0) && 3876 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) 3877 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; 3878 logged |= 4; 3879 } 3880 if (logged && 3881 (rack->rc_gp_incr == 0)){ 3882 /* Go into increment mode */ 3883 rack->rc_gp_incr = 1; 3884 rack->rc_gp_timely_inc_cnt = 0; 3885 } 3886 if (rack->rc_gp_incr && 3887 logged && 3888 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3889 rack->rc_gp_timely_inc_cnt++; 3890 } 3891 rack_log_timely(rack, logged, plus, 0, 0, 3892 __LINE__, 1); 3893 } 3894 3895 static uint32_t 3896 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3897 { 3898 /*- 3899 * norm_grad = rtt_diff / minrtt; 3900 * new_per = curper * (1 - B * norm_grad) 3901 * 3902 * B = rack_gp_decrease_per (default 80%) 3903 * rtt_dif = input var current rtt-diff 3904 * curper = input var current percentage 3905 * minrtt = from rack filter 3906 * 3907 * In order to do the floating point calculations above we 3908 * do an integer conversion. The code looks confusing so let me 3909 * translate it into something that use more variables and 3910 * is clearer for us humans :) 3911 * 3912 * uint64_t norm_grad, inverse, reduce_by, final_result; 3913 * uint32_t perf; 3914 * 3915 * norm_grad = (((uint64_t)rtt_diff * 1000000) / 3916 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); 3917 * inverse = ((uint64_t)rack_gp_decrease * (uint64_t)1000000) * norm_grad; 3918 * inverse /= 1000000; 3919 * reduce_by = (1000000 - inverse); 3920 * final_result = (cur_per * reduce_by) / 1000000; 3921 * perf = (uint32_t)final_result; 3922 */ 3923 uint64_t perf; 3924 3925 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3926 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3927 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3928 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3929 (uint64_t)1000000)) / 3930 (uint64_t)1000000); 3931 if (perf > curper) { 3932 /* TSNH */ 3933 perf = curper - 1; 3934 } 3935 return ((uint32_t)perf); 3936 } 3937 3938 static uint32_t 3939 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3940 { 3941 /* 3942 * highrttthresh 3943 * result = curper * (1 - (B * ( 1 - ------ )) 3944 * gp_srtt 3945 * 3946 * B = rack_gp_decrease_per (default .8 i.e. 80) 3947 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3948 */ 3949 uint64_t perf; 3950 uint32_t highrttthresh; 3951 3952 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3953 3954 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3955 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3956 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3957 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3958 if (tcp_bblogging_on(rack->rc_tp)) { 3959 uint64_t log1; 3960 3961 log1 = rtt; 3962 log1 <<= 32; 3963 log1 |= highrttthresh; 3964 rack_log_timely(rack, 3965 rack_gp_decrease_per, 3966 (uint64_t)curper, 3967 log1, 3968 perf, 3969 __LINE__, 3970 15); 3971 } 3972 return (perf); 3973 } 3974 3975 static void 3976 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3977 { 3978 uint64_t logvar, logvar2, logvar3; 3979 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3980 3981 if (rack->rc_gp_incr) { 3982 /* Turn off increment counting */ 3983 rack->rc_gp_incr = 0; 3984 rack->rc_gp_timely_inc_cnt = 0; 3985 } 3986 ss_red = ca_red = rec_red = 0; 3987 logged = 0; 3988 /* Calculate the reduction value */ 3989 if (rtt_diff < 0) { 3990 rtt_diff *= -1; 3991 } 3992 /* Must be at least 1% reduction */ 3993 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3994 /* We have been in recovery ding it too */ 3995 if (timely_says == 2) { 3996 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3997 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3998 if (alt < new_per) 3999 val = alt; 4000 else 4001 val = new_per; 4002 } else 4003 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 4004 if (rack->r_ctl.rack_per_of_gp_rec > val) { 4005 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 4006 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 4007 } else { 4008 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4009 rec_red = 0; 4010 } 4011 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 4012 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 4013 logged |= 1; 4014 } 4015 if (rack->rc_gp_saw_ss) { 4016 /* Sent in SS */ 4017 if (timely_says == 2) { 4018 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 4019 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4020 if (alt < new_per) 4021 val = alt; 4022 else 4023 val = new_per; 4024 } else 4025 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 4026 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 4027 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 4028 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 4029 } else { 4030 ss_red = new_per; 4031 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4032 logvar = new_per; 4033 logvar <<= 32; 4034 logvar |= alt; 4035 logvar2 = (uint32_t)rtt; 4036 logvar2 <<= 32; 4037 logvar2 |= (uint32_t)rtt_diff; 4038 logvar3 = rack_gp_rtt_maxmul; 4039 logvar3 <<= 32; 4040 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4041 rack_log_timely(rack, timely_says, 4042 logvar2, logvar3, 4043 logvar, __LINE__, 10); 4044 } 4045 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 4046 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 4047 logged |= 4; 4048 } else if (rack->rc_gp_saw_ca) { 4049 /* Sent in CA */ 4050 if (timely_says == 2) { 4051 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 4052 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4053 if (alt < new_per) 4054 val = alt; 4055 else 4056 val = new_per; 4057 } else 4058 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 4059 if (rack->r_ctl.rack_per_of_gp_ca > val) { 4060 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 4061 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 4062 } else { 4063 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4064 ca_red = 0; 4065 logvar = new_per; 4066 logvar <<= 32; 4067 logvar |= alt; 4068 logvar2 = (uint32_t)rtt; 4069 logvar2 <<= 32; 4070 logvar2 |= (uint32_t)rtt_diff; 4071 logvar3 = rack_gp_rtt_maxmul; 4072 logvar3 <<= 32; 4073 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4074 rack_log_timely(rack, timely_says, 4075 logvar2, logvar3, 4076 logvar, __LINE__, 10); 4077 } 4078 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 4079 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 4080 logged |= 2; 4081 } 4082 if (rack->rc_gp_timely_dec_cnt < 0x7) { 4083 rack->rc_gp_timely_dec_cnt++; 4084 if (rack_timely_dec_clear && 4085 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 4086 rack->rc_gp_timely_dec_cnt = 0; 4087 } 4088 logvar = ss_red; 4089 logvar <<= 32; 4090 logvar |= ca_red; 4091 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 4092 __LINE__, 2); 4093 } 4094 4095 static void 4096 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 4097 uint32_t rtt, uint32_t line, uint8_t reas) 4098 { 4099 if (tcp_bblogging_on(rack->rc_tp)) { 4100 union tcp_log_stackspecific log; 4101 struct timeval tv; 4102 4103 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4104 log.u_bbr.flex1 = line; 4105 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 4106 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 4107 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 4108 log.u_bbr.flex5 = rtt; 4109 log.u_bbr.flex6 = rack->rc_highly_buffered; 4110 log.u_bbr.flex6 <<= 1; 4111 log.u_bbr.flex6 |= rack->forced_ack; 4112 log.u_bbr.flex6 <<= 1; 4113 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 4114 log.u_bbr.flex6 <<= 1; 4115 log.u_bbr.flex6 |= rack->in_probe_rtt; 4116 log.u_bbr.flex6 <<= 1; 4117 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 4118 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 4119 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 4120 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 4121 log.u_bbr.flex8 = reas; 4122 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4123 log.u_bbr.delRate = rack_get_bw(rack); 4124 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 4125 log.u_bbr.cur_del_rate <<= 32; 4126 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 4127 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 4128 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 4129 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 4130 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 4131 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 4132 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 4133 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 4134 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4135 log.u_bbr.rttProp = us_cts; 4136 log.u_bbr.rttProp <<= 32; 4137 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 4138 TCP_LOG_EVENTP(rack->rc_tp, NULL, 4139 &rack->rc_inp->inp_socket->so_rcv, 4140 &rack->rc_inp->inp_socket->so_snd, 4141 BBR_LOG_RTT_SHRINKS, 0, 4142 0, &log, false, &rack->r_ctl.act_rcv_time); 4143 } 4144 } 4145 4146 static void 4147 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 4148 { 4149 uint64_t bwdp; 4150 4151 bwdp = rack_get_bw(rack); 4152 bwdp *= (uint64_t)rtt; 4153 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 4154 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 4155 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 4156 /* 4157 * A window protocol must be able to have 4 packets 4158 * outstanding as the floor in order to function 4159 * (especially considering delayed ack :D). 4160 */ 4161 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 4162 } 4163 } 4164 4165 static void 4166 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 4167 { 4168 /** 4169 * ProbeRTT is a bit different in rack_pacing than in 4170 * BBR. It is like BBR in that it uses the lowering of 4171 * the RTT as a signal that we saw something new and 4172 * counts from there for how long between. But it is 4173 * different in that its quite simple. It does not 4174 * play with the cwnd and wait until we get down 4175 * to N segments outstanding and hold that for 4176 * 200ms. Instead it just sets the pacing reduction 4177 * rate to a set percentage (70 by default) and hold 4178 * that for a number of recent GP Srtt's. 4179 */ 4180 uint32_t segsiz; 4181 4182 if (rack->rc_gp_dyn_mul == 0) 4183 return; 4184 4185 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 4186 /* We are idle */ 4187 return; 4188 } 4189 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4190 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4191 /* 4192 * Stop the goodput now, the idea here is 4193 * that future measurements with in_probe_rtt 4194 * won't register if they are not greater so 4195 * we want to get what info (if any) is available 4196 * now. 4197 */ 4198 rack_do_goodput_measurement(rack->rc_tp, rack, 4199 rack->rc_tp->snd_una, __LINE__, 4200 RACK_QUALITY_PROBERTT); 4201 } 4202 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4203 rack->r_ctl.rc_time_probertt_entered = us_cts; 4204 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4205 rack->r_ctl.rc_pace_min_segs); 4206 rack->in_probe_rtt = 1; 4207 rack->measure_saw_probe_rtt = 1; 4208 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4209 rack->r_ctl.rc_time_probertt_starts = 0; 4210 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 4211 if (rack_probertt_use_min_rtt_entry) 4212 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4213 else 4214 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 4215 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4216 __LINE__, RACK_RTTS_ENTERPROBE); 4217 } 4218 4219 static void 4220 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 4221 { 4222 struct rack_sendmap *rsm; 4223 uint32_t segsiz; 4224 4225 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 4226 rack->r_ctl.rc_pace_min_segs); 4227 rack->in_probe_rtt = 0; 4228 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 4229 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 4230 /* 4231 * Stop the goodput now, the idea here is 4232 * that future measurements with in_probe_rtt 4233 * won't register if they are not greater so 4234 * we want to get what info (if any) is available 4235 * now. 4236 */ 4237 rack_do_goodput_measurement(rack->rc_tp, rack, 4238 rack->rc_tp->snd_una, __LINE__, 4239 RACK_QUALITY_PROBERTT); 4240 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 4241 /* 4242 * We don't have enough data to make a measurement. 4243 * So lets just stop and start here after exiting 4244 * probe-rtt. We probably are not interested in 4245 * the results anyway. 4246 */ 4247 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 4248 } 4249 /* 4250 * Measurements through the current snd_max are going 4251 * to be limited by the slower pacing rate. 4252 * 4253 * We need to mark these as app-limited so we 4254 * don't collapse the b/w. 4255 */ 4256 rsm = tqhash_max(rack->r_ctl.tqh); 4257 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 4258 if (rack->r_ctl.rc_app_limited_cnt == 0) 4259 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 4260 else { 4261 /* 4262 * Go out to the end app limited and mark 4263 * this new one as next and move the end_appl up 4264 * to this guy. 4265 */ 4266 if (rack->r_ctl.rc_end_appl) 4267 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 4268 rack->r_ctl.rc_end_appl = rsm; 4269 } 4270 rsm->r_flags |= RACK_APP_LIMITED; 4271 rack->r_ctl.rc_app_limited_cnt++; 4272 } 4273 /* 4274 * Now, we need to examine our pacing rate multipliers. 4275 * If its under 100%, we need to kick it back up to 4276 * 100%. We also don't let it be over our "max" above 4277 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 4278 * Note setting clamp_atexit_prtt to 0 has the effect 4279 * of setting CA/SS to 100% always at exit (which is 4280 * the default behavior). 4281 */ 4282 if (rack_probertt_clear_is) { 4283 rack->rc_gp_incr = 0; 4284 rack->rc_gp_bwred = 0; 4285 rack->rc_gp_timely_inc_cnt = 0; 4286 rack->rc_gp_timely_dec_cnt = 0; 4287 } 4288 /* Do we do any clamping at exit? */ 4289 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 4290 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 4291 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 4292 } 4293 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 4294 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 4295 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 4296 } 4297 /* 4298 * Lets set rtt_diff to 0, so that we will get a "boost" 4299 * after exiting. 4300 */ 4301 rack->r_ctl.rc_rtt_diff = 0; 4302 4303 /* Clear all flags so we start fresh */ 4304 rack->rc_tp->t_bytes_acked = 0; 4305 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4306 /* 4307 * If configured to, set the cwnd and ssthresh to 4308 * our targets. 4309 */ 4310 if (rack_probe_rtt_sets_cwnd) { 4311 uint64_t ebdp; 4312 uint32_t setto; 4313 4314 /* Set ssthresh so we get into CA once we hit our target */ 4315 if (rack_probertt_use_min_rtt_exit == 1) { 4316 /* Set to min rtt */ 4317 rack_set_prtt_target(rack, segsiz, 4318 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 4319 } else if (rack_probertt_use_min_rtt_exit == 2) { 4320 /* Set to current gp rtt */ 4321 rack_set_prtt_target(rack, segsiz, 4322 rack->r_ctl.rc_gp_srtt); 4323 } else if (rack_probertt_use_min_rtt_exit == 3) { 4324 /* Set to entry gp rtt */ 4325 rack_set_prtt_target(rack, segsiz, 4326 rack->r_ctl.rc_entry_gp_rtt); 4327 } else { 4328 uint64_t sum; 4329 uint32_t setval; 4330 4331 sum = rack->r_ctl.rc_entry_gp_rtt; 4332 sum *= 10; 4333 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 4334 if (sum >= 20) { 4335 /* 4336 * A highly buffered path needs 4337 * cwnd space for timely to work. 4338 * Lets set things up as if 4339 * we are heading back here again. 4340 */ 4341 setval = rack->r_ctl.rc_entry_gp_rtt; 4342 } else if (sum >= 15) { 4343 /* 4344 * Lets take the smaller of the 4345 * two since we are just somewhat 4346 * buffered. 4347 */ 4348 setval = rack->r_ctl.rc_gp_srtt; 4349 if (setval > rack->r_ctl.rc_entry_gp_rtt) 4350 setval = rack->r_ctl.rc_entry_gp_rtt; 4351 } else { 4352 /* 4353 * Here we are not highly buffered 4354 * and should pick the min we can to 4355 * keep from causing loss. 4356 */ 4357 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 4358 } 4359 rack_set_prtt_target(rack, segsiz, 4360 setval); 4361 } 4362 if (rack_probe_rtt_sets_cwnd > 1) { 4363 /* There is a percentage here to boost */ 4364 ebdp = rack->r_ctl.rc_target_probertt_flight; 4365 ebdp *= rack_probe_rtt_sets_cwnd; 4366 ebdp /= 100; 4367 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 4368 } else 4369 setto = rack->r_ctl.rc_target_probertt_flight; 4370 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 4371 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 4372 /* Enforce a min */ 4373 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 4374 } 4375 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 4376 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 4377 } 4378 rack_log_rtt_shrinks(rack, us_cts, 4379 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4380 __LINE__, RACK_RTTS_EXITPROBE); 4381 /* Clear times last so log has all the info */ 4382 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 4383 rack->r_ctl.rc_time_probertt_entered = us_cts; 4384 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 4385 rack->r_ctl.rc_time_of_last_probertt = us_cts; 4386 } 4387 4388 static void 4389 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 4390 { 4391 /* Check in on probe-rtt */ 4392 if (rack->rc_gp_filled == 0) { 4393 /* We do not do p-rtt unless we have gp measurements */ 4394 return; 4395 } 4396 if (rack->in_probe_rtt) { 4397 uint64_t no_overflow; 4398 uint32_t endtime, must_stay; 4399 4400 if (rack->r_ctl.rc_went_idle_time && 4401 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 4402 /* 4403 * We went idle during prtt, just exit now. 4404 */ 4405 rack_exit_probertt(rack, us_cts); 4406 } else if (rack_probe_rtt_safety_val && 4407 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 4408 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 4409 /* 4410 * Probe RTT safety value triggered! 4411 */ 4412 rack_log_rtt_shrinks(rack, us_cts, 4413 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4414 __LINE__, RACK_RTTS_SAFETY); 4415 rack_exit_probertt(rack, us_cts); 4416 } 4417 /* Calculate the max we will wait */ 4418 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 4419 if (rack->rc_highly_buffered) 4420 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 4421 /* Calculate the min we must wait */ 4422 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 4423 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 4424 TSTMP_LT(us_cts, endtime)) { 4425 uint32_t calc; 4426 /* Do we lower more? */ 4427 no_exit: 4428 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 4429 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 4430 else 4431 calc = 0; 4432 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 4433 if (calc) { 4434 /* Maybe */ 4435 calc *= rack_per_of_gp_probertt_reduce; 4436 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4437 /* Limit it too */ 4438 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4439 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4440 } 4441 /* We must reach target or the time set */ 4442 return; 4443 } 4444 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4445 if ((TSTMP_LT(us_cts, must_stay) && 4446 rack->rc_highly_buffered) || 4447 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4448 rack->r_ctl.rc_target_probertt_flight)) { 4449 /* We are not past the must_stay time */ 4450 goto no_exit; 4451 } 4452 rack_log_rtt_shrinks(rack, us_cts, 4453 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4454 __LINE__, RACK_RTTS_REACHTARGET); 4455 rack->r_ctl.rc_time_probertt_starts = us_cts; 4456 if (rack->r_ctl.rc_time_probertt_starts == 0) 4457 rack->r_ctl.rc_time_probertt_starts = 1; 4458 /* Restore back to our rate we want to pace at in prtt */ 4459 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4460 } 4461 /* 4462 * Setup our end time, some number of gp_srtts plus 200ms. 4463 */ 4464 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4465 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4466 if (rack_probertt_gpsrtt_cnt_div) 4467 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4468 else 4469 endtime = 0; 4470 endtime += rack_min_probertt_hold; 4471 endtime += rack->r_ctl.rc_time_probertt_starts; 4472 if (TSTMP_GEQ(us_cts, endtime)) { 4473 /* yes, exit probertt */ 4474 rack_exit_probertt(rack, us_cts); 4475 } 4476 4477 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 4478 /* Go into probertt, its been too long since we went lower */ 4479 rack_enter_probertt(rack, us_cts); 4480 } 4481 } 4482 4483 static void 4484 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4485 uint32_t rtt, int32_t rtt_diff) 4486 { 4487 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4488 uint32_t losses; 4489 4490 if ((rack->rc_gp_dyn_mul == 0) || 4491 (rack->use_fixed_rate) || 4492 (rack->in_probe_rtt) || 4493 (rack->rc_always_pace == 0)) { 4494 /* No dynamic GP multiplier in play */ 4495 return; 4496 } 4497 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4498 cur_bw = rack_get_bw(rack); 4499 /* Calculate our up and down range */ 4500 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4501 up_bnd /= 100; 4502 up_bnd += rack->r_ctl.last_gp_comp_bw; 4503 4504 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4505 subfr /= 100; 4506 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4507 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4508 /* 4509 * This is the case where our RTT is above 4510 * the max target and we have been configured 4511 * to just do timely no bonus up stuff in that case. 4512 * 4513 * There are two configurations, set to 1, and we 4514 * just do timely if we are over our max. If its 4515 * set above 1 then we slam the multipliers down 4516 * to 100 and then decrement per timely. 4517 */ 4518 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4519 __LINE__, 3); 4520 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4521 rack_validate_multipliers_at_or_below_100(rack); 4522 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4523 } else if ((timely_says != 0) && (last_bw_est < low_bnd) && !losses) { 4524 /* 4525 * We are decreasing this is a bit complicated this 4526 * means we are loosing ground. This could be 4527 * because another flow entered and we are competing 4528 * for b/w with it. This will push the RTT up which 4529 * makes timely unusable unless we want to get shoved 4530 * into a corner and just be backed off (the age 4531 * old problem with delay based CC). 4532 * 4533 * On the other hand if it was a route change we 4534 * would like to stay somewhat contained and not 4535 * blow out the buffers. 4536 */ 4537 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4538 __LINE__, 3); 4539 rack->r_ctl.last_gp_comp_bw = cur_bw; 4540 if (rack->rc_gp_bwred == 0) { 4541 /* Go into reduction counting */ 4542 rack->rc_gp_bwred = 1; 4543 rack->rc_gp_timely_dec_cnt = 0; 4544 } 4545 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { 4546 /* 4547 * Push another time with a faster pacing 4548 * to try to gain back (we include override to 4549 * get a full raise factor). 4550 */ 4551 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4552 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4553 (timely_says == 0) || 4554 (rack_down_raise_thresh == 0)) { 4555 /* 4556 * Do an override up in b/w if we were 4557 * below the threshold or if the threshold 4558 * is zero we always do the raise. 4559 */ 4560 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4561 } else { 4562 /* Log it stays the same */ 4563 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4564 __LINE__, 11); 4565 } 4566 rack->rc_gp_timely_dec_cnt++; 4567 /* We are not incrementing really no-count */ 4568 rack->rc_gp_incr = 0; 4569 rack->rc_gp_timely_inc_cnt = 0; 4570 } else { 4571 /* 4572 * Lets just use the RTT 4573 * information and give up 4574 * pushing. 4575 */ 4576 goto use_timely; 4577 } 4578 } else if ((timely_says != 2) && 4579 !losses && 4580 (last_bw_est > up_bnd)) { 4581 /* 4582 * We are increasing b/w lets keep going, updating 4583 * our b/w and ignoring any timely input, unless 4584 * of course we are at our max raise (if there is one). 4585 */ 4586 4587 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4588 __LINE__, 3); 4589 rack->r_ctl.last_gp_comp_bw = cur_bw; 4590 if (rack->rc_gp_saw_ss && 4591 rack->r_ctl.rack_per_upper_bound_ss && 4592 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { 4593 /* 4594 * In cases where we can't go higher 4595 * we should just use timely. 4596 */ 4597 goto use_timely; 4598 } 4599 if (rack->rc_gp_saw_ca && 4600 rack->r_ctl.rack_per_upper_bound_ca && 4601 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { 4602 /* 4603 * In cases where we can't go higher 4604 * we should just use timely. 4605 */ 4606 goto use_timely; 4607 } 4608 rack->rc_gp_bwred = 0; 4609 rack->rc_gp_timely_dec_cnt = 0; 4610 /* You get a set number of pushes if timely is trying to reduce */ 4611 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4612 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4613 } else { 4614 /* Log it stays the same */ 4615 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4616 __LINE__, 12); 4617 } 4618 return; 4619 } else { 4620 /* 4621 * We are staying between the lower and upper range bounds 4622 * so use timely to decide. 4623 */ 4624 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4625 __LINE__, 3); 4626 use_timely: 4627 if (timely_says) { 4628 rack->rc_gp_incr = 0; 4629 rack->rc_gp_timely_inc_cnt = 0; 4630 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4631 !losses && 4632 (last_bw_est < low_bnd)) { 4633 /* We are loosing ground */ 4634 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4635 rack->rc_gp_timely_dec_cnt++; 4636 /* We are not incrementing really no-count */ 4637 rack->rc_gp_incr = 0; 4638 rack->rc_gp_timely_inc_cnt = 0; 4639 } else 4640 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4641 } else { 4642 rack->rc_gp_bwred = 0; 4643 rack->rc_gp_timely_dec_cnt = 0; 4644 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4645 } 4646 } 4647 } 4648 4649 static int32_t 4650 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4651 { 4652 int32_t timely_says; 4653 uint64_t log_mult, log_rtt_a_diff; 4654 4655 log_rtt_a_diff = rtt; 4656 log_rtt_a_diff <<= 32; 4657 log_rtt_a_diff |= (uint32_t)rtt_diff; 4658 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4659 rack_gp_rtt_maxmul)) { 4660 /* Reduce the b/w multiplier */ 4661 timely_says = 2; 4662 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4663 log_mult <<= 32; 4664 log_mult |= prev_rtt; 4665 rack_log_timely(rack, timely_says, log_mult, 4666 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4667 log_rtt_a_diff, __LINE__, 4); 4668 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4669 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4670 max(rack_gp_rtt_mindiv , 1)))) { 4671 /* Increase the b/w multiplier */ 4672 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4673 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4674 max(rack_gp_rtt_mindiv , 1)); 4675 log_mult <<= 32; 4676 log_mult |= prev_rtt; 4677 timely_says = 0; 4678 rack_log_timely(rack, timely_says, log_mult , 4679 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4680 log_rtt_a_diff, __LINE__, 5); 4681 } else { 4682 /* 4683 * Use a gradient to find it the timely gradient 4684 * is: 4685 * grad = rc_rtt_diff / min_rtt; 4686 * 4687 * anything below or equal to 0 will be 4688 * a increase indication. Anything above 4689 * zero is a decrease. Note we take care 4690 * of the actual gradient calculation 4691 * in the reduction (its not needed for 4692 * increase). 4693 */ 4694 log_mult = prev_rtt; 4695 if (rtt_diff <= 0) { 4696 /* 4697 * Rttdiff is less than zero, increase the 4698 * b/w multiplier (its 0 or negative) 4699 */ 4700 timely_says = 0; 4701 rack_log_timely(rack, timely_says, log_mult, 4702 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4703 } else { 4704 /* Reduce the b/w multiplier */ 4705 timely_says = 1; 4706 rack_log_timely(rack, timely_says, log_mult, 4707 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4708 } 4709 } 4710 return (timely_says); 4711 } 4712 4713 static __inline int 4714 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm) 4715 { 4716 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4717 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { 4718 /** 4719 * This covers the case that the 4720 * resent is completely inside 4721 * the gp range or up to it. 4722 * |----------------| 4723 * |-----| <or> 4724 * |----| 4725 * <or> |---| 4726 */ 4727 return (1); 4728 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && 4729 SEQ_GT(rsm->r_end, tp->gput_seq)){ 4730 /** 4731 * This covers the case of 4732 * |--------------| 4733 * |-------->| 4734 */ 4735 return (1); 4736 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && 4737 SEQ_LT(rsm->r_start, tp->gput_ack) && 4738 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { 4739 4740 /** 4741 * This covers the case of 4742 * |--------------| 4743 * |-------->| 4744 */ 4745 return (1); 4746 } 4747 return (0); 4748 } 4749 4750 static __inline void 4751 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm) 4752 { 4753 4754 if ((tp->t_flags & TF_GPUTINPROG) == 0) 4755 return; 4756 /* 4757 * We have a Goodput measurement in progress. Mark 4758 * the send if its within the window. If its not 4759 * in the window make sure it does not have the mark. 4760 */ 4761 if (rack_in_gp_window(tp, rsm)) 4762 rsm->r_flags |= RACK_IN_GP_WIN; 4763 else 4764 rsm->r_flags &= ~RACK_IN_GP_WIN; 4765 } 4766 4767 static __inline void 4768 rack_clear_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4769 { 4770 /* A GP measurement is ending, clear all marks on the send map*/ 4771 struct rack_sendmap *rsm = NULL; 4772 4773 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4774 if (rsm == NULL) { 4775 rsm = tqhash_min(rack->r_ctl.tqh); 4776 } 4777 /* Nothing left? */ 4778 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ 4779 rsm->r_flags &= ~RACK_IN_GP_WIN; 4780 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4781 } 4782 } 4783 4784 4785 static __inline void 4786 rack_tend_gp_marks(struct tcpcb *tp, struct tcp_rack *rack) 4787 { 4788 struct rack_sendmap *rsm = NULL; 4789 4790 if (tp->snd_una == tp->snd_max) { 4791 /* Nothing outstanding yet, nothing to do here */ 4792 return; 4793 } 4794 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { 4795 /* 4796 * We are measuring ahead of some outstanding 4797 * data. We need to walk through up until we get 4798 * to gp_seq marking so that no rsm is set incorrectly 4799 * with RACK_IN_GP_WIN. 4800 */ 4801 rsm = tqhash_min(rack->r_ctl.tqh); 4802 while (rsm != NULL) { 4803 rack_mark_in_gp_win(tp, rsm); 4804 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) 4805 break; 4806 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4807 } 4808 } 4809 if (rsm == NULL) { 4810 /* 4811 * Need to find the GP seq, if rsm is 4812 * set we stopped as we hit it. 4813 */ 4814 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 4815 if (rsm == NULL) 4816 return; 4817 rack_mark_in_gp_win(tp, rsm); 4818 } 4819 /* 4820 * Now we may need to mark already sent rsm, ahead of 4821 * gput_seq in the window since they may have been sent 4822 * *before* we started our measurment. The rsm, if non-null 4823 * has been marked (note if rsm would have been NULL we would have 4824 * returned in the previous block). So we go to the next, and continue 4825 * until we run out of entries or we exceed the gp_ack value. 4826 */ 4827 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4828 while (rsm) { 4829 rack_mark_in_gp_win(tp, rsm); 4830 if (SEQ_GT(rsm->r_end, tp->gput_ack)) 4831 break; 4832 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 4833 } 4834 } 4835 4836 static void 4837 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4838 tcp_seq th_ack, int line, uint8_t quality) 4839 { 4840 uint64_t tim, bytes_ps, stim, utim; 4841 uint32_t segsiz, bytes, reqbytes, us_cts; 4842 int32_t gput, new_rtt_diff, timely_says; 4843 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4844 int did_add = 0; 4845 4846 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4847 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4848 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4849 tim = us_cts - tp->gput_ts; 4850 else 4851 tim = 0; 4852 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4853 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4854 else 4855 stim = 0; 4856 /* 4857 * Use the larger of the send time or ack time. This prevents us 4858 * from being influenced by ack artifacts to come up with too 4859 * high of measurement. Note that since we are spanning over many more 4860 * bytes in most of our measurements hopefully that is less likely to 4861 * occur. 4862 */ 4863 if (tim > stim) 4864 utim = max(tim, 1); 4865 else 4866 utim = max(stim, 1); 4867 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4868 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); 4869 if ((tim == 0) && (stim == 0)) { 4870 /* 4871 * Invalid measurement time, maybe 4872 * all on one ack/one send? 4873 */ 4874 bytes = 0; 4875 bytes_ps = 0; 4876 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4877 0, 0, 0, 10, __LINE__, NULL, quality); 4878 goto skip_measurement; 4879 } 4880 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4881 /* We never made a us_rtt measurement? */ 4882 bytes = 0; 4883 bytes_ps = 0; 4884 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4885 0, 0, 0, 10, __LINE__, NULL, quality); 4886 goto skip_measurement; 4887 } 4888 /* 4889 * Calculate the maximum possible b/w this connection 4890 * could have. We base our calculation on the lowest 4891 * rtt we have seen during the measurement and the 4892 * largest rwnd the client has given us in that time. This 4893 * forms a BDP that is the maximum that we could ever 4894 * get to the client. Anything larger is not valid. 4895 * 4896 * I originally had code here that rejected measurements 4897 * where the time was less than 1/2 the latest us_rtt. 4898 * But after thinking on that I realized its wrong since 4899 * say you had a 150Mbps or even 1Gbps link, and you 4900 * were a long way away.. example I am in Europe (100ms rtt) 4901 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4902 * bytes my time would be 1.2ms, and yet my rtt would say 4903 * the measurement was invalid the time was < 50ms. The 4904 * same thing is true for 150Mb (8ms of time). 4905 * 4906 * A better way I realized is to look at what the maximum 4907 * the connection could possibly do. This is gated on 4908 * the lowest RTT we have seen and the highest rwnd. 4909 * We should in theory never exceed that, if we are 4910 * then something on the path is storing up packets 4911 * and then feeding them all at once to our endpoint 4912 * messing up our measurement. 4913 */ 4914 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4915 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4916 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4917 if (SEQ_LT(th_ack, tp->gput_seq)) { 4918 /* No measurement can be made */ 4919 bytes = 0; 4920 bytes_ps = 0; 4921 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4922 0, 0, 0, 10, __LINE__, NULL, quality); 4923 goto skip_measurement; 4924 } else 4925 bytes = (th_ack - tp->gput_seq); 4926 bytes_ps = (uint64_t)bytes; 4927 /* 4928 * Don't measure a b/w for pacing unless we have gotten at least 4929 * an initial windows worth of data in this measurement interval. 4930 * 4931 * Small numbers of bytes get badly influenced by delayed ack and 4932 * other artifacts. Note we take the initial window or our 4933 * defined minimum GP (defaulting to 10 which hopefully is the 4934 * IW). 4935 */ 4936 if (rack->rc_gp_filled == 0) { 4937 /* 4938 * The initial estimate is special. We 4939 * have blasted out an IW worth of packets 4940 * without a real valid ack ts results. We 4941 * then setup the app_limited_needs_set flag, 4942 * this should get the first ack in (probably 2 4943 * MSS worth) to be recorded as the timestamp. 4944 * We thus allow a smaller number of bytes i.e. 4945 * IW - 2MSS. 4946 */ 4947 reqbytes -= (2 * segsiz); 4948 /* Also lets fill previous for our first measurement to be neutral */ 4949 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4950 } 4951 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4952 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4953 rack->r_ctl.rc_app_limited_cnt, 4954 0, 0, 10, __LINE__, NULL, quality); 4955 goto skip_measurement; 4956 } 4957 /* 4958 * We now need to calculate the Timely like status so 4959 * we can update (possibly) the b/w multipliers. 4960 */ 4961 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4962 if (rack->rc_gp_filled == 0) { 4963 /* No previous reading */ 4964 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4965 } else { 4966 if (rack->measure_saw_probe_rtt == 0) { 4967 /* 4968 * We don't want a probertt to be counted 4969 * since it will be negative incorrectly. We 4970 * expect to be reducing the RTT when we 4971 * pace at a slower rate. 4972 */ 4973 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4974 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4975 } 4976 } 4977 timely_says = rack_make_timely_judgement(rack, 4978 rack->r_ctl.rc_gp_srtt, 4979 rack->r_ctl.rc_rtt_diff, 4980 rack->r_ctl.rc_prev_gp_srtt 4981 ); 4982 bytes_ps *= HPTS_USEC_IN_SEC; 4983 bytes_ps /= utim; 4984 if (bytes_ps > rack->r_ctl.last_max_bw) { 4985 /* 4986 * Something is on path playing 4987 * since this b/w is not possible based 4988 * on our BDP (highest rwnd and lowest rtt 4989 * we saw in the measurement window). 4990 * 4991 * Another option here would be to 4992 * instead skip the measurement. 4993 */ 4994 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4995 bytes_ps, rack->r_ctl.last_max_bw, 0, 4996 11, __LINE__, NULL, quality); 4997 bytes_ps = rack->r_ctl.last_max_bw; 4998 } 4999 /* We store gp for b/w in bytes per second */ 5000 if (rack->rc_gp_filled == 0) { 5001 /* Initial measurement */ 5002 if (bytes_ps) { 5003 rack->r_ctl.gp_bw = bytes_ps; 5004 rack->rc_gp_filled = 1; 5005 rack->r_ctl.num_measurements = 1; 5006 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 5007 } else { 5008 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 5009 rack->r_ctl.rc_app_limited_cnt, 5010 0, 0, 10, __LINE__, NULL, quality); 5011 } 5012 if (tcp_in_hpts(rack->rc_tp) && 5013 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 5014 /* 5015 * Ok we can't trust the pacer in this case 5016 * where we transition from un-paced to paced. 5017 * Or for that matter when the burst mitigation 5018 * was making a wild guess and got it wrong. 5019 * Stop the pacer and clear up all the aggregate 5020 * delays etc. 5021 */ 5022 tcp_hpts_remove(rack->rc_tp); 5023 rack->r_ctl.rc_hpts_flags = 0; 5024 rack->r_ctl.rc_last_output_to = 0; 5025 } 5026 did_add = 2; 5027 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 5028 /* Still a small number run an average */ 5029 rack->r_ctl.gp_bw += bytes_ps; 5030 addpart = rack->r_ctl.num_measurements; 5031 rack->r_ctl.num_measurements++; 5032 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 5033 /* We have collected enough to move forward */ 5034 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 5035 } 5036 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5037 did_add = 3; 5038 } else { 5039 /* 5040 * We want to take 1/wma of the goodput and add in to 7/8th 5041 * of the old value weighted by the srtt. So if your measurement 5042 * period is say 2 SRTT's long you would get 1/4 as the 5043 * value, if it was like 1/2 SRTT then you would get 1/16th. 5044 * 5045 * But we must be careful not to take too much i.e. if the 5046 * srtt is say 20ms and the measurement is taken over 5047 * 400ms our weight would be 400/20 i.e. 20. On the 5048 * other hand if we get a measurement over 1ms with a 5049 * 10ms rtt we only want to take a much smaller portion. 5050 */ 5051 if (rack->r_ctl.num_measurements < 0xff) { 5052 rack->r_ctl.num_measurements++; 5053 } 5054 srtt = (uint64_t)tp->t_srtt; 5055 if (srtt == 0) { 5056 /* 5057 * Strange why did t_srtt go back to zero? 5058 */ 5059 if (rack->r_ctl.rc_rack_min_rtt) 5060 srtt = rack->r_ctl.rc_rack_min_rtt; 5061 else 5062 srtt = HPTS_USEC_IN_MSEC; 5063 } 5064 /* 5065 * XXXrrs: Note for reviewers, in playing with 5066 * dynamic pacing I discovered this GP calculation 5067 * as done originally leads to some undesired results. 5068 * Basically you can get longer measurements contributing 5069 * too much to the WMA. Thus I changed it if you are doing 5070 * dynamic adjustments to only do the aportioned adjustment 5071 * if we have a very small (time wise) measurement. Longer 5072 * measurements just get there weight (defaulting to 1/8) 5073 * add to the WMA. We may want to think about changing 5074 * this to always do that for both sides i.e. dynamic 5075 * and non-dynamic... but considering lots of folks 5076 * were playing with this I did not want to change the 5077 * calculation per.se. without your thoughts.. Lawerence? 5078 * Peter?? 5079 */ 5080 if (rack->rc_gp_dyn_mul == 0) { 5081 subpart = rack->r_ctl.gp_bw * utim; 5082 subpart /= (srtt * 8); 5083 if (subpart < (rack->r_ctl.gp_bw / 2)) { 5084 /* 5085 * The b/w update takes no more 5086 * away then 1/2 our running total 5087 * so factor it in. 5088 */ 5089 addpart = bytes_ps * utim; 5090 addpart /= (srtt * 8); 5091 } else { 5092 /* 5093 * Don't allow a single measurement 5094 * to account for more than 1/2 of the 5095 * WMA. This could happen on a retransmission 5096 * where utim becomes huge compared to 5097 * srtt (multiple retransmissions when using 5098 * the sending rate which factors in all the 5099 * transmissions from the first one). 5100 */ 5101 subpart = rack->r_ctl.gp_bw / 2; 5102 addpart = bytes_ps / 2; 5103 } 5104 resid_bw = rack->r_ctl.gp_bw - subpart; 5105 rack->r_ctl.gp_bw = resid_bw + addpart; 5106 did_add = 1; 5107 } else { 5108 if ((utim / srtt) <= 1) { 5109 /* 5110 * The b/w update was over a small period 5111 * of time. The idea here is to prevent a small 5112 * measurement time period from counting 5113 * too much. So we scale it based on the 5114 * time so it attributes less than 1/rack_wma_divisor 5115 * of its measurement. 5116 */ 5117 subpart = rack->r_ctl.gp_bw * utim; 5118 subpart /= (srtt * rack_wma_divisor); 5119 addpart = bytes_ps * utim; 5120 addpart /= (srtt * rack_wma_divisor); 5121 } else { 5122 /* 5123 * The scaled measurement was long 5124 * enough so lets just add in the 5125 * portion of the measurement i.e. 1/rack_wma_divisor 5126 */ 5127 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 5128 addpart = bytes_ps / rack_wma_divisor; 5129 } 5130 if ((rack->measure_saw_probe_rtt == 0) || 5131 (bytes_ps > rack->r_ctl.gp_bw)) { 5132 /* 5133 * For probe-rtt we only add it in 5134 * if its larger, all others we just 5135 * add in. 5136 */ 5137 did_add = 1; 5138 resid_bw = rack->r_ctl.gp_bw - subpart; 5139 rack->r_ctl.gp_bw = resid_bw + addpart; 5140 } 5141 } 5142 rack_set_pace_segments(tp, rack, __LINE__, NULL); 5143 } 5144 if ((rack->gp_ready == 0) && 5145 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 5146 /* We have enough measurements now */ 5147 rack->gp_ready = 1; 5148 if (rack->dgp_on || 5149 rack->rack_hibeta) 5150 rack_set_cc_pacing(rack); 5151 if (rack->defer_options) 5152 rack_apply_deferred_options(rack); 5153 } 5154 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 5155 rack_get_bw(rack), 22, did_add, NULL, quality); 5156 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 5157 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 5158 rack_update_multiplier(rack, timely_says, bytes_ps, 5159 rack->r_ctl.rc_gp_srtt, 5160 rack->r_ctl.rc_rtt_diff); 5161 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 5162 rack_get_bw(rack), 3, line, NULL, quality); 5163 rack_log_pacing_delay_calc(rack, 5164 bytes, /* flex2 */ 5165 tim, /* flex1 */ 5166 bytes_ps, /* bw_inuse */ 5167 rack->r_ctl.gp_bw, /* delRate */ 5168 rack_get_lt_bw(rack), /* rttProp */ 5169 20, line, NULL, 0); 5170 /* reset the gp srtt and setup the new prev */ 5171 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 5172 /* Record the lost count for the next measurement */ 5173 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 5174 skip_measurement: 5175 /* 5176 * We restart our diffs based on the gpsrtt in the 5177 * measurement window. 5178 */ 5179 rack->rc_gp_rtt_set = 0; 5180 rack->rc_gp_saw_rec = 0; 5181 rack->rc_gp_saw_ca = 0; 5182 rack->rc_gp_saw_ss = 0; 5183 rack->rc_dragged_bottom = 0; 5184 5185 if (quality == RACK_QUALITY_HIGH) { 5186 /* 5187 * Gput in the stats world is in kbps where bytes_ps is 5188 * bytes per second so we do ((x * 8)/ 1000). 5189 */ 5190 gput = (int32_t)((bytes_ps << 3) / (uint64_t)1000); 5191 #ifdef STATS 5192 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 5193 gput); 5194 /* 5195 * XXXLAS: This is a temporary hack, and should be 5196 * chained off VOI_TCP_GPUT when stats(9) grows an 5197 * API to deal with chained VOIs. 5198 */ 5199 if (tp->t_stats_gput_prev > 0) 5200 stats_voi_update_abs_s32(tp->t_stats, 5201 VOI_TCP_GPUT_ND, 5202 ((gput - tp->t_stats_gput_prev) * 100) / 5203 tp->t_stats_gput_prev); 5204 #endif 5205 tp->t_stats_gput_prev = gput; 5206 } 5207 tp->t_flags &= ~TF_GPUTINPROG; 5208 /* 5209 * Now are we app limited now and there is space from where we 5210 * were to where we want to go? 5211 * 5212 * We don't do the other case i.e. non-applimited here since 5213 * the next send will trigger us picking up the missing data. 5214 */ 5215 if (rack->r_ctl.rc_first_appl && 5216 TCPS_HAVEESTABLISHED(tp->t_state) && 5217 rack->r_ctl.rc_app_limited_cnt && 5218 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 5219 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 5220 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 5221 /* 5222 * Yep there is enough outstanding to make a measurement here. 5223 */ 5224 struct rack_sendmap *rsm; 5225 5226 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 5227 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 5228 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 5229 rack->app_limited_needs_set = 0; 5230 tp->gput_seq = th_ack; 5231 if (rack->in_probe_rtt) 5232 rack->measure_saw_probe_rtt = 1; 5233 else if ((rack->measure_saw_probe_rtt) && 5234 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 5235 rack->measure_saw_probe_rtt = 0; 5236 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 5237 /* There is a full window to gain info from */ 5238 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 5239 } else { 5240 /* We can only measure up to the applimited point */ 5241 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 5242 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 5243 /* 5244 * We don't have enough to make a measurement. 5245 */ 5246 tp->t_flags &= ~TF_GPUTINPROG; 5247 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 5248 0, 0, 0, 6, __LINE__, NULL, quality); 5249 return; 5250 } 5251 } 5252 if (tp->t_state >= TCPS_FIN_WAIT_1) { 5253 /* 5254 * We will get no more data into the SB 5255 * this means we need to have the data available 5256 * before we start a measurement. 5257 */ 5258 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 5259 /* Nope not enough data. */ 5260 return; 5261 } 5262 } 5263 tp->t_flags |= TF_GPUTINPROG; 5264 /* 5265 * Now we need to find the timestamp of the send at tp->gput_seq 5266 * for the send based measurement. 5267 */ 5268 rack->r_ctl.rc_gp_cumack_ts = 0; 5269 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 5270 if (rsm) { 5271 /* Ok send-based limit is set */ 5272 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 5273 /* 5274 * Move back to include the earlier part 5275 * so our ack time lines up right (this may 5276 * make an overlapping measurement but thats 5277 * ok). 5278 */ 5279 tp->gput_seq = rsm->r_start; 5280 } 5281 if (rsm->r_flags & RACK_ACKED) { 5282 struct rack_sendmap *nrsm; 5283 5284 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 5285 tp->gput_seq = rsm->r_end; 5286 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 5287 if (nrsm) 5288 rsm = nrsm; 5289 else { 5290 rack->app_limited_needs_set = 1; 5291 } 5292 } else 5293 rack->app_limited_needs_set = 1; 5294 /* We always go from the first send */ 5295 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 5296 } else { 5297 /* 5298 * If we don't find the rsm due to some 5299 * send-limit set the current time, which 5300 * basically disables the send-limit. 5301 */ 5302 struct timeval tv; 5303 5304 microuptime(&tv); 5305 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 5306 } 5307 rack_tend_gp_marks(tp, rack); 5308 rack_log_pacing_delay_calc(rack, 5309 tp->gput_seq, 5310 tp->gput_ack, 5311 (uint64_t)rsm, 5312 tp->gput_ts, 5313 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 5314 9, 5315 __LINE__, rsm, quality); 5316 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 5317 } else { 5318 /* 5319 * To make sure proper timestamp merging occurs, we need to clear 5320 * all GP marks if we don't start a measurement. 5321 */ 5322 rack_clear_gp_marks(tp, rack); 5323 } 5324 } 5325 5326 /* 5327 * CC wrapper hook functions 5328 */ 5329 static void 5330 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 5331 uint16_t type, int32_t recovery) 5332 { 5333 uint32_t prior_cwnd, acked; 5334 struct tcp_log_buffer *lgb = NULL; 5335 uint8_t labc_to_use, quality; 5336 5337 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5338 tp->t_ccv.nsegs = nsegs; 5339 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 5340 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 5341 uint32_t max; 5342 5343 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 5344 if (tp->t_ccv.bytes_this_ack > max) { 5345 tp->t_ccv.bytes_this_ack = max; 5346 } 5347 } 5348 #ifdef STATS 5349 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 5350 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 5351 #endif 5352 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { 5353 /* We will ack all, time 5354 * to end any lt_bw_up we 5355 * have running until something 5356 * new is sent. 5357 */ 5358 struct timeval tv; 5359 5360 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); 5361 rack->r_ctl.lt_seq = tp->snd_max; 5362 (void)tcp_get_usecs(&tv); 5363 rack->r_ctl.lt_bw_time += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); 5364 rack->lt_bw_up = 0; 5365 } 5366 quality = RACK_QUALITY_NONE; 5367 if ((tp->t_flags & TF_GPUTINPROG) && 5368 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 5369 /* Measure the Goodput */ 5370 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 5371 } 5372 /* Which way our we limited, if not cwnd limited no advance in CA */ 5373 if (tp->snd_cwnd <= tp->snd_wnd) 5374 tp->t_ccv.flags |= CCF_CWND_LIMITED; 5375 else 5376 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 5377 if (tp->snd_cwnd > tp->snd_ssthresh) { 5378 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 5379 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 5380 /* For the setting of a window past use the actual scwnd we are using */ 5381 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 5382 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 5383 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 5384 } 5385 } else { 5386 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 5387 tp->t_bytes_acked = 0; 5388 } 5389 prior_cwnd = tp->snd_cwnd; 5390 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 5391 (rack_client_low_buf && rack->client_bufferlvl && 5392 (rack->client_bufferlvl < rack_client_low_buf))) 5393 labc_to_use = rack->rc_labc; 5394 else 5395 labc_to_use = rack_max_abc_post_recovery; 5396 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5397 union tcp_log_stackspecific log; 5398 struct timeval tv; 5399 5400 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5401 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5402 log.u_bbr.flex1 = th_ack; 5403 log.u_bbr.flex2 = tp->t_ccv.flags; 5404 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5405 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5406 log.u_bbr.flex5 = labc_to_use; 5407 log.u_bbr.flex6 = prior_cwnd; 5408 log.u_bbr.flex7 = V_tcp_do_newsack; 5409 log.u_bbr.flex8 = 1; 5410 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5411 0, &log, false, NULL, __func__, __LINE__,&tv); 5412 } 5413 if (CC_ALGO(tp)->ack_received != NULL) { 5414 /* XXXLAS: Find a way to live without this */ 5415 tp->t_ccv.curack = th_ack; 5416 tp->t_ccv.labc = labc_to_use; 5417 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 5418 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 5419 } 5420 if (lgb) { 5421 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 5422 } 5423 if (rack->r_must_retran) { 5424 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 5425 /* 5426 * We now are beyond the rxt point so lets disable 5427 * the flag. 5428 */ 5429 rack->r_ctl.rc_out_at_rto = 0; 5430 rack->r_must_retran = 0; 5431 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 5432 /* 5433 * Only decrement the rc_out_at_rto if the cwnd advances 5434 * at least a whole segment. Otherwise next time the peer 5435 * acks, we won't be able to send this generaly happens 5436 * when we are in Congestion Avoidance. 5437 */ 5438 if (acked <= rack->r_ctl.rc_out_at_rto){ 5439 rack->r_ctl.rc_out_at_rto -= acked; 5440 } else { 5441 rack->r_ctl.rc_out_at_rto = 0; 5442 } 5443 } 5444 } 5445 #ifdef STATS 5446 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 5447 #endif 5448 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 5449 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 5450 } 5451 } 5452 5453 static void 5454 tcp_rack_partialack(struct tcpcb *tp) 5455 { 5456 struct tcp_rack *rack; 5457 5458 rack = (struct tcp_rack *)tp->t_fb_ptr; 5459 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5460 /* 5461 * If we are doing PRR and have enough 5462 * room to send <or> we are pacing and prr 5463 * is disabled we will want to see if we 5464 * can send data (by setting r_wanted_output to 5465 * true). 5466 */ 5467 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 5468 rack->rack_no_prr) 5469 rack->r_wanted_output = 1; 5470 } 5471 5472 static inline void 5473 rack_set_most_aggr(struct tcp_rack *rack) 5474 { 5475 rack->r_fill_less_agg = 0; 5476 /* Once the cwnd as been clamped we don't do fill_cw */ 5477 if (rack->r_cwnd_was_clamped == 0) 5478 rack->rc_pace_to_cwnd = 1; 5479 rack->r_pacing_discount = 0; 5480 } 5481 5482 static inline void 5483 rack_limit_fillcw(struct tcp_rack *rack) 5484 { 5485 rack->r_fill_less_agg = 1; 5486 /* Once the cwnd as been clamped we don't do fill_cw */ 5487 if (rack->r_cwnd_was_clamped == 0) 5488 rack->rc_pace_to_cwnd = 1; 5489 rack->r_pacing_discount = 0; 5490 } 5491 5492 static inline void 5493 rack_disable_fillcw(struct tcp_rack *rack) 5494 { 5495 rack->r_fill_less_agg = 1; 5496 rack->rc_pace_to_cwnd = 0; 5497 rack->r_pacing_discount = 0; 5498 } 5499 5500 static void 5501 rack_client_buffer_level_set(struct tcp_rack *rack) 5502 { 5503 /* 5504 * Only if DGP is on do we do anything that 5505 * changes stack behavior. If DGP is off all 5506 * we will do is issue a BB log (if BB logging is 5507 * on) and return. 5508 */ 5509 if (rack->dgp_on == 0) { 5510 rack_log_pacing_delay_calc(rack, 0, rack->client_bufferlvl, 5511 0, 0, 0, 30, __LINE__, NULL, 0); 5512 return; 5513 } 5514 if (IN_RECOVERY(rack->rc_tp->t_flags) && rack->r_ctl.full_dgp_in_rec) { 5515 goto set_most_agg; 5516 } 5517 /* 5518 * We are in DGP so what setting should we 5519 * apply based on where the client is? 5520 */ 5521 switch(rack->r_ctl.rc_dgp_bl_agg) { 5522 default: 5523 case DGP_LEVEL0: 5524 set_most_agg: 5525 rack_set_most_aggr(rack); 5526 break; 5527 case DGP_LEVEL1: 5528 if (rack->client_bufferlvl == 4) 5529 rack_limit_fillcw(rack); 5530 else if (rack->client_bufferlvl == 5) 5531 rack_disable_fillcw(rack); 5532 else 5533 rack_set_most_aggr(rack); 5534 break; 5535 case DGP_LEVEL2: 5536 if (rack->client_bufferlvl == 3) 5537 rack_limit_fillcw(rack); 5538 else if (rack->client_bufferlvl == 4) 5539 rack_disable_fillcw(rack); 5540 else if (rack->client_bufferlvl == 5) { 5541 rack_disable_fillcw(rack); 5542 rack->r_pacing_discount = 1; 5543 rack->r_ctl.pacing_discount_amm = 1; 5544 } else 5545 rack_set_most_aggr(rack); 5546 break; 5547 case DGP_LEVEL3: 5548 if (rack->client_bufferlvl == 2) 5549 rack_limit_fillcw(rack); 5550 else if (rack->client_bufferlvl == 3) 5551 rack_disable_fillcw(rack); 5552 else if (rack->client_bufferlvl == 4) { 5553 rack_disable_fillcw(rack); 5554 rack->r_pacing_discount = 1; 5555 rack->r_ctl.pacing_discount_amm = 1; 5556 } else if (rack->client_bufferlvl == 5) { 5557 rack_disable_fillcw(rack); 5558 rack->r_pacing_discount = 1; 5559 rack->r_ctl.pacing_discount_amm = 2; 5560 } else 5561 rack_set_most_aggr(rack); 5562 break; 5563 } 5564 rack_log_pacing_delay_calc(rack, rack->r_ctl.rc_dgp_bl_agg, rack->client_bufferlvl, 0, 5565 0, 0, 30, __LINE__, NULL, 0); 5566 } 5567 5568 static void 5569 do_rack_check_for_unclamp(struct tcpcb *tp, struct tcp_rack *rack) 5570 { 5571 /* 5572 * Can we unclamp. We unclamp if more than 5573 * N rounds have transpired with no loss. 5574 */ 5575 uint64_t snds, rxts, rxt_per; 5576 uint32_t rnds; 5577 5578 rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped; 5579 if ((rack_unclamp_round_thresh > 0) && 5580 (rnds >= rack_unclamp_round_thresh)) { 5581 snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes; 5582 KASSERT ((snds > 0), ("rack:%p tp:%p snds:%ju is 0", rack, tp, 5583 (uintmax_t)snds)); 5584 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes; 5585 rxt_per = rxts * 1000; 5586 rxt_per /= snds; 5587 if ((uint32_t)rxt_per <= rack_unclamp_rxt_thresh) { 5588 /* Unclamp */ 5589 if (tcp_bblogging_on(rack->rc_tp)) { 5590 union tcp_log_stackspecific log; 5591 struct timeval tv; 5592 5593 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5594 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5595 log.u_bbr.flex3 = rnds; 5596 log.u_bbr.flex4 = rack_unclamp_round_thresh; 5597 log.u_bbr.flex5 = (uint32_t)rxt_per; 5598 log.u_bbr.flex8 = 6; 5599 log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs; 5600 log.u_bbr.bbr_state = rack->rc_pace_to_cwnd; 5601 log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied; 5602 log.u_bbr.applimited = rack->r_ctl.max_clamps; 5603 log.u_bbr.epoch = rack->r_ctl.clamp_options; 5604 log.u_bbr.cur_del_rate = rxts; 5605 log.u_bbr.bw_inuse = rack_get_lt_bw(rack); 5606 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5607 log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff); 5608 log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff); 5609 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5610 0, &log, false, NULL, NULL, 0, &tv); 5611 } 5612 rack->r_ctl.num_of_clamps_applied = 0; 5613 rack->r_cwnd_was_clamped = 0; 5614 rack->excess_rxt_on = 1; 5615 if (rack->r_ctl.clamp_options) { 5616 /* 5617 * We only allow fillcw to be toggled 5618 * if you are setting a max seg too. 5619 */ 5620 if (rack->r_ctl.clamp_options & 0x1) { 5621 if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) { 5622 /* turn on fill cw for non-dgp*/ 5623 rack->rc_pace_to_cwnd = 0; 5624 } else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) { 5625 /* For DGP we want it off */ 5626 rack->rc_pace_to_cwnd = 1; 5627 } 5628 } 5629 } 5630 if (rack->dgp_on) { 5631 /* Reset all multipliers to 100.0 so just the measured bw */ 5632 /* Crash any per boosts down to 100% */ 5633 rack->r_ctl.rack_per_of_gp_rec = 100; 5634 rack->r_ctl.rack_per_of_gp_ss = 100; 5635 rack->r_ctl.rack_per_of_gp_ca = 100; 5636 /* Set in an upper bound for ss/ca % increase */ 5637 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 5638 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 5639 } 5640 } 5641 } 5642 } 5643 5644 static void 5645 do_rack_excess_rxt(struct tcpcb *tp, struct tcp_rack *rack) 5646 { 5647 /* 5648 * Rack excess rxt accounting is turned on. If we 5649 * are above a threshold of rxt's in at least N 5650 * rounds, then back off the cwnd and ssthresh 5651 * to fit into the long-term b/w. 5652 */ 5653 uint64_t snds, rxts, rxt_per, lt_bw, bdp; 5654 uint32_t rnds, new_cwnd, new_ssthresh, rtt, shared_cwnd_was_enabled = 0; 5655 5656 /* Is it shut off by 0 rounds? */ 5657 if (rack_rxt_min_rnds == 0) 5658 return; 5659 if ((rack->r_ctl.max_clamps > 0) && 5660 (rack->r_ctl.num_of_clamps_applied >= rack->r_ctl.max_clamps)) { 5661 /* 5662 * The idea, if max_clamps is set, is that if clamping it 5663 * N times did not work again, then there is no sense 5664 * clamping it again. The link is just a lossy link and 5665 * our clamps are doing no good. Turn it off so we don't come 5666 * back here again. 5667 */ 5668 rack->excess_rxt_on = 0; 5669 rack->r_cwnd_was_clamped = 0; 5670 rack->r_ctl.num_of_clamps_applied = 0; 5671 return; 5672 } 5673 snds = tp->t_sndbytes - rack->r_ctl.last_sndbytes; 5674 rxts = tp->t_snd_rxt_bytes - rack->r_ctl.last_snd_rxt_bytes; 5675 rnds = rack->r_ctl.current_round - rack->r_ctl.last_rnd_rxt_clamped; 5676 /* Has enough rounds progressed for us to re-measure? */ 5677 if ((rnds >= rack_rxt_min_rnds) && 5678 (rack->r_ctl.rxt_threshold > 0)){ 5679 rxt_per = rxts * 1000; 5680 rxt_per /= snds; 5681 if (rxt_per >= rack->r_ctl.rxt_threshold) { 5682 /* 5683 * Action required: 5684 * We are above our excess retransmit level, lets 5685 * cut down the cwnd and ssthresh to match the long-term 5686 * b/w we are getting. 5687 */ 5688 /* First disable scwnd if enabled */ 5689 #ifdef NETFLIX_SHARED_CWND 5690 rack->rack_enable_scwnd = 0; 5691 if (rack->r_ctl.rc_scw) { 5692 uint32_t limit; 5693 5694 shared_cwnd_was_enabled = 1; 5695 if (rack->r_limit_scw) 5696 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 5697 else 5698 limit = 0; 5699 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 5700 rack->r_ctl.rc_scw_index, 5701 limit); 5702 rack->r_ctl.rc_scw = NULL; 5703 } 5704 5705 #endif 5706 /* Calculate what the cwnd and ssthresh should be */ 5707 tcp_trace_point(rack->rc_tp, TCP_TP_EXCESS_RXT); 5708 lt_bw = rack_get_lt_bw(rack); 5709 if (lt_bw == 0) { 5710 /* 5711 * No lt_bw, lets chop things to one MSS 5712 * and the ssthresh to the iwnd. 5713 */ 5714 reset_to_iw: 5715 new_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5716 new_ssthresh = tcp_compute_initwnd(tcp_maxseg(tp)); 5717 } else { 5718 rtt = rack->rc_rack_rtt; 5719 if (rtt == 0) { 5720 /* If we have no rack_rtt drop to the IW situation */ 5721 goto reset_to_iw; 5722 } 5723 bdp = lt_bw * (uint64_t)rtt; 5724 bdp /= HPTS_USEC_IN_SEC; 5725 new_cwnd = (uint32_t)bdp; 5726 new_ssthresh = new_cwnd - 1; 5727 if (new_cwnd < ctf_fixed_maxseg(tp)) { 5728 /* Rock bottom, goto IW settings */ 5729 goto reset_to_iw; 5730 } 5731 } 5732 rack->r_cwnd_was_clamped = 1; 5733 rack->r_ctl.num_of_clamps_applied++; 5734 /* Reset the counter fromn now */ 5735 tp->t_bytes_acked = 0; 5736 /* 5737 * Now what about options? 5738 * We look at the bottom 8 bits: 5739 * F = fill cw bit (toggle it if set) 5740 * S = Segment bits 5741 * M = set max segment bit 5742 * 5743 * SSSS SSMF 5744 */ 5745 if (rack->r_ctl.clamp_options) { 5746 if (rack->r_ctl.clamp_options & 0x1) { 5747 if ((rack->rc_pace_to_cwnd == 0) && (rack->dgp_on == 0)) { 5748 /* turn on fill cw for non-dgp*/ 5749 rack->rc_pace_to_cwnd = 1; 5750 } else if ((rack->dgp_on == 1) && (rack->rc_pace_to_cwnd == 1)) { 5751 /* For DGP we want it off */ 5752 rack->rc_pace_to_cwnd = 0; 5753 } 5754 } 5755 } 5756 if (rack->dgp_on) { 5757 /* Reset all multipliers to 100.0 so just the measured bw */ 5758 /* Crash any per boosts down to 100% */ 5759 rack->r_ctl.rack_per_of_gp_rec = 100; 5760 rack->r_ctl.rack_per_of_gp_ss = 100; 5761 rack->r_ctl.rack_per_of_gp_ca = 100; 5762 /* Set in an upper bound for ss/ca % increase */ 5763 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_clamp_ss_upper; 5764 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_clamp_ca_upper; 5765 /* Now move to the lt_bw */ 5766 rack->r_ctl.gp_bw = lt_bw; 5767 rack->rc_gp_filled = 1; 5768 rack->r_ctl.num_measurements = RACK_REQ_AVG; 5769 } 5770 if (tcp_bblogging_on(rack->rc_tp)) { 5771 union tcp_log_stackspecific log; 5772 struct timeval tv; 5773 5774 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5775 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5776 log.u_bbr.flex1 = new_cwnd; 5777 log.u_bbr.flex2 = new_ssthresh; 5778 log.u_bbr.flex3 = rnds; 5779 log.u_bbr.flex4 = rack_rxt_min_rnds; 5780 log.u_bbr.flex5 = rtt; 5781 log.u_bbr.flex6 = shared_cwnd_was_enabled; 5782 log.u_bbr.flex8 = 5; 5783 log.u_bbr.pkt_epoch = rack->r_ctl.rc_pace_max_segs; 5784 log.u_bbr.bbr_state = rack->rc_pace_to_cwnd; 5785 log.u_bbr.delivered = rack->r_ctl.num_of_clamps_applied; 5786 log.u_bbr.applimited = rack->r_ctl.max_clamps; 5787 log.u_bbr.epoch = rack->r_ctl.clamp_options; 5788 log.u_bbr.cur_del_rate = rxts; 5789 log.u_bbr.delRate = snds; 5790 log.u_bbr.rttProp = rack->r_ctl.rxt_threshold; 5791 log.u_bbr.bw_inuse = lt_bw; 5792 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5793 log.u_bbr.lt_epoch = (uint32_t)((rack->r_ctl.gp_bw >> 32) & 0x00000000ffffffff); 5794 log.u_bbr.pkts_out = (uint32_t)(rack->r_ctl.gp_bw & 0x00000000ffffffff); 5795 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5796 0, &log, false, NULL, NULL, 0, &tv); 5797 } 5798 /* Update our point where we did it */ 5799 if (rack->r_ctl.already_had_a_excess == 0) { 5800 rack->r_ctl.already_had_a_excess = 1; 5801 counter_u64_add(rack_rxt_clamps_cwnd_uniq, 1); 5802 } 5803 counter_u64_add(rack_rxt_clamps_cwnd, 1); 5804 rack->r_ctl.last_sndbytes = tp->t_sndbytes; 5805 rack->r_ctl.last_snd_rxt_bytes = tp->t_snd_rxt_bytes; 5806 rack->r_ctl.last_rnd_rxt_clamped = rack->r_ctl.current_round; 5807 if (new_cwnd < tp->snd_cwnd) 5808 tp->snd_cwnd = new_cwnd; 5809 if (new_ssthresh < tp->snd_ssthresh) 5810 tp->snd_ssthresh = new_ssthresh; 5811 } 5812 } 5813 } 5814 5815 static void 5816 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 5817 { 5818 struct tcp_rack *rack; 5819 uint32_t orig_cwnd; 5820 5821 orig_cwnd = tp->snd_cwnd; 5822 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5823 rack = (struct tcp_rack *)tp->t_fb_ptr; 5824 /* only alert CC if we alerted when we entered */ 5825 if (CC_ALGO(tp)->post_recovery != NULL) { 5826 tp->t_ccv.curack = th_ack; 5827 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 5828 if (tp->snd_cwnd < tp->snd_ssthresh) { 5829 /* 5830 * Rack has burst control and pacing 5831 * so lets not set this any lower than 5832 * snd_ssthresh per RFC-6582 (option 2). 5833 */ 5834 tp->snd_cwnd = tp->snd_ssthresh; 5835 } 5836 } 5837 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5838 union tcp_log_stackspecific log; 5839 struct timeval tv; 5840 5841 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5842 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5843 log.u_bbr.flex1 = th_ack; 5844 log.u_bbr.flex2 = tp->t_ccv.flags; 5845 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 5846 log.u_bbr.flex4 = tp->t_ccv.nsegs; 5847 log.u_bbr.flex5 = V_tcp_abc_l_var; 5848 log.u_bbr.flex6 = orig_cwnd; 5849 log.u_bbr.flex7 = V_tcp_do_newsack; 5850 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 5851 log.u_bbr.flex8 = 2; 5852 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 5853 0, &log, false, NULL, __func__, __LINE__, &tv); 5854 } 5855 if ((rack->rack_no_prr == 0) && 5856 (rack->no_prr_addback == 0) && 5857 (rack->r_ctl.rc_prr_sndcnt > 0)) { 5858 /* 5859 * Suck the next prr cnt back into cwnd, but 5860 * only do that if we are not application limited. 5861 */ 5862 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 5863 /* 5864 * We are allowed to add back to the cwnd the amount we did 5865 * not get out if: 5866 * a) no_prr_addback is off. 5867 * b) we are not app limited 5868 * c) we are doing prr 5869 * <and> 5870 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 5871 */ 5872 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 5873 rack->r_ctl.rc_prr_sndcnt); 5874 } 5875 rack->r_ctl.rc_prr_sndcnt = 0; 5876 rack_log_to_prr(rack, 1, 0, __LINE__); 5877 } 5878 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 5879 tp->snd_recover = tp->snd_una; 5880 if (rack->r_ctl.dsack_persist) { 5881 rack->r_ctl.dsack_persist--; 5882 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 5883 rack->r_ctl.num_dsack = 0; 5884 } 5885 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 5886 } 5887 EXIT_RECOVERY(tp->t_flags); 5888 if (rack->r_ctl.full_dgp_in_rec) 5889 rack_client_buffer_level_set(rack); 5890 } 5891 5892 static void 5893 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 5894 { 5895 struct tcp_rack *rack; 5896 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 5897 5898 INP_WLOCK_ASSERT(tptoinpcb(tp)); 5899 #ifdef STATS 5900 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 5901 #endif 5902 if (IN_RECOVERY(tp->t_flags) == 0) { 5903 in_rec_at_entry = 0; 5904 ssthresh_enter = tp->snd_ssthresh; 5905 cwnd_enter = tp->snd_cwnd; 5906 } else 5907 in_rec_at_entry = 1; 5908 rack = (struct tcp_rack *)tp->t_fb_ptr; 5909 switch (type) { 5910 case CC_NDUPACK: 5911 tp->t_flags &= ~TF_WASFRECOVERY; 5912 tp->t_flags &= ~TF_WASCRECOVERY; 5913 if (!IN_FASTRECOVERY(tp->t_flags)) { 5914 if (rack->dgp_on && rack->r_cwnd_was_clamped) { 5915 /* Reset the gains so that on exit we will be softer longer */ 5916 rack->r_ctl.rack_per_of_gp_rec = 100; 5917 rack->r_ctl.rack_per_of_gp_ss = 98; 5918 rack->r_ctl.rack_per_of_gp_ca = 98; 5919 } 5920 rack->r_ctl.rc_prr_delivered = 0; 5921 rack->r_ctl.rc_prr_out = 0; 5922 rack->r_fast_output = 0; 5923 if (rack->rack_no_prr == 0) { 5924 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5925 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 5926 } 5927 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 5928 tp->snd_recover = tp->snd_max; 5929 if (tp->t_flags2 & TF2_ECN_PERMIT) 5930 tp->t_flags2 |= TF2_ECN_SND_CWR; 5931 } 5932 break; 5933 case CC_ECN: 5934 if (!IN_CONGRECOVERY(tp->t_flags) || 5935 /* 5936 * Allow ECN reaction on ACK to CWR, if 5937 * that data segment was also CE marked. 5938 */ 5939 SEQ_GEQ(ack, tp->snd_recover)) { 5940 EXIT_CONGRECOVERY(tp->t_flags); 5941 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5942 rack->r_fast_output = 0; 5943 tp->snd_recover = tp->snd_max + 1; 5944 if (tp->t_flags2 & TF2_ECN_PERMIT) 5945 tp->t_flags2 |= TF2_ECN_SND_CWR; 5946 } 5947 break; 5948 case CC_RTO: 5949 tp->t_dupacks = 0; 5950 tp->t_bytes_acked = 0; 5951 rack->r_fast_output = 0; 5952 EXIT_RECOVERY(tp->t_flags); 5953 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5954 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5955 orig_cwnd = tp->snd_cwnd; 5956 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5957 rack_log_to_prr(rack, 16, orig_cwnd, line); 5958 if (tp->t_flags2 & TF2_ECN_PERMIT) 5959 tp->t_flags2 |= TF2_ECN_SND_CWR; 5960 break; 5961 case CC_RTO_ERR: 5962 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5963 /* RTO was unnecessary, so reset everything. */ 5964 tp->snd_cwnd = tp->snd_cwnd_prev; 5965 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5966 tp->snd_recover = tp->snd_recover_prev; 5967 if (tp->t_flags & TF_WASFRECOVERY) { 5968 ENTER_FASTRECOVERY(tp->t_flags); 5969 tp->t_flags &= ~TF_WASFRECOVERY; 5970 } 5971 if (tp->t_flags & TF_WASCRECOVERY) { 5972 ENTER_CONGRECOVERY(tp->t_flags); 5973 tp->t_flags &= ~TF_WASCRECOVERY; 5974 } 5975 tp->snd_nxt = tp->snd_max; 5976 tp->t_badrxtwin = 0; 5977 break; 5978 } 5979 if ((CC_ALGO(tp)->cong_signal != NULL) && 5980 (type != CC_RTO)){ 5981 tp->t_ccv.curack = ack; 5982 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 5983 } 5984 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5985 rack_log_to_prr(rack, 15, cwnd_enter, line); 5986 if (rack->r_ctl.full_dgp_in_rec) 5987 rack_client_buffer_level_set(rack); 5988 rack->r_ctl.dsack_byte_cnt = 0; 5989 rack->r_ctl.retran_during_recovery = 0; 5990 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5991 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5992 rack->r_ent_rec_ns = 1; 5993 } 5994 } 5995 5996 static inline void 5997 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5998 { 5999 uint32_t i_cwnd; 6000 6001 INP_WLOCK_ASSERT(tptoinpcb(tp)); 6002 6003 if (CC_ALGO(tp)->after_idle != NULL) 6004 CC_ALGO(tp)->after_idle(&tp->t_ccv); 6005 6006 if (tp->snd_cwnd == 1) 6007 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 6008 else 6009 i_cwnd = rc_init_window(rack); 6010 6011 /* 6012 * Being idle is no different than the initial window. If the cc 6013 * clamps it down below the initial window raise it to the initial 6014 * window. 6015 */ 6016 if (tp->snd_cwnd < i_cwnd) { 6017 tp->snd_cwnd = i_cwnd; 6018 } 6019 } 6020 6021 /* 6022 * Indicate whether this ack should be delayed. We can delay the ack if 6023 * following conditions are met: 6024 * - There is no delayed ack timer in progress. 6025 * - Our last ack wasn't a 0-sized window. We never want to delay 6026 * the ack that opens up a 0-sized window. 6027 * - LRO wasn't used for this segment. We make sure by checking that the 6028 * segment size is not larger than the MSS. 6029 * - Delayed acks are enabled or this is a half-synchronized T/TCP 6030 * connection. 6031 */ 6032 #define DELAY_ACK(tp, tlen) \ 6033 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 6034 ((tp->t_flags & TF_DELACK) == 0) && \ 6035 (tlen <= tp->t_maxseg) && \ 6036 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 6037 6038 static struct rack_sendmap * 6039 rack_find_lowest_rsm(struct tcp_rack *rack) 6040 { 6041 struct rack_sendmap *rsm; 6042 6043 /* 6044 * Walk the time-order transmitted list looking for an rsm that is 6045 * not acked. This will be the one that was sent the longest time 6046 * ago that is still outstanding. 6047 */ 6048 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 6049 if (rsm->r_flags & RACK_ACKED) { 6050 continue; 6051 } 6052 goto finish; 6053 } 6054 finish: 6055 return (rsm); 6056 } 6057 6058 static struct rack_sendmap * 6059 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 6060 { 6061 struct rack_sendmap *prsm; 6062 6063 /* 6064 * Walk the sequence order list backward until we hit and arrive at 6065 * the highest seq not acked. In theory when this is called it 6066 * should be the last segment (which it was not). 6067 */ 6068 prsm = rsm; 6069 6070 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { 6071 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 6072 continue; 6073 } 6074 return (prsm); 6075 } 6076 return (NULL); 6077 } 6078 6079 static uint32_t 6080 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 6081 { 6082 int32_t lro; 6083 uint32_t thresh; 6084 6085 /* 6086 * lro is the flag we use to determine if we have seen reordering. 6087 * If it gets set we have seen reordering. The reorder logic either 6088 * works in one of two ways: 6089 * 6090 * If reorder-fade is configured, then we track the last time we saw 6091 * re-ordering occur. If we reach the point where enough time as 6092 * passed we no longer consider reordering has occuring. 6093 * 6094 * Or if reorder-face is 0, then once we see reordering we consider 6095 * the connection to alway be subject to reordering and just set lro 6096 * to 1. 6097 * 6098 * In the end if lro is non-zero we add the extra time for 6099 * reordering in. 6100 */ 6101 if (srtt == 0) 6102 srtt = 1; 6103 if (rack->r_ctl.rc_reorder_ts) { 6104 if (rack->r_ctl.rc_reorder_fade) { 6105 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 6106 lro = cts - rack->r_ctl.rc_reorder_ts; 6107 if (lro == 0) { 6108 /* 6109 * No time as passed since the last 6110 * reorder, mark it as reordering. 6111 */ 6112 lro = 1; 6113 } 6114 } else { 6115 /* Negative time? */ 6116 lro = 0; 6117 } 6118 if (lro > rack->r_ctl.rc_reorder_fade) { 6119 /* Turn off reordering seen too */ 6120 rack->r_ctl.rc_reorder_ts = 0; 6121 lro = 0; 6122 } 6123 } else { 6124 /* Reodering does not fade */ 6125 lro = 1; 6126 } 6127 } else { 6128 lro = 0; 6129 } 6130 if (rack->rc_rack_tmr_std_based == 0) { 6131 thresh = srtt + rack->r_ctl.rc_pkt_delay; 6132 } else { 6133 /* Standards based pkt-delay is 1/4 srtt */ 6134 thresh = srtt + (srtt >> 2); 6135 } 6136 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 6137 /* It must be set, if not you get 1/4 rtt */ 6138 if (rack->r_ctl.rc_reorder_shift) 6139 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 6140 else 6141 thresh += (srtt >> 2); 6142 } 6143 if (rack->rc_rack_use_dsack && 6144 lro && 6145 (rack->r_ctl.num_dsack > 0)) { 6146 /* 6147 * We only increase the reordering window if we 6148 * have seen reordering <and> we have a DSACK count. 6149 */ 6150 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 6151 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 6152 } 6153 /* SRTT * 2 is the ceiling */ 6154 if (thresh > (srtt * 2)) { 6155 thresh = srtt * 2; 6156 } 6157 /* And we don't want it above the RTO max either */ 6158 if (thresh > rack_rto_max) { 6159 thresh = rack_rto_max; 6160 } 6161 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 6162 return (thresh); 6163 } 6164 6165 static uint32_t 6166 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 6167 struct rack_sendmap *rsm, uint32_t srtt) 6168 { 6169 struct rack_sendmap *prsm; 6170 uint32_t thresh, len; 6171 int segsiz; 6172 6173 if (srtt == 0) 6174 srtt = 1; 6175 if (rack->r_ctl.rc_tlp_threshold) 6176 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 6177 else 6178 thresh = (srtt * 2); 6179 6180 /* Get the previous sent packet, if any */ 6181 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 6182 len = rsm->r_end - rsm->r_start; 6183 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 6184 /* Exactly like the ID */ 6185 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 6186 uint32_t alt_thresh; 6187 /* 6188 * Compensate for delayed-ack with the d-ack time. 6189 */ 6190 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6191 if (alt_thresh > thresh) 6192 thresh = alt_thresh; 6193 } 6194 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 6195 /* 2.1 behavior */ 6196 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 6197 if (prsm && (len <= segsiz)) { 6198 /* 6199 * Two packets outstanding, thresh should be (2*srtt) + 6200 * possible inter-packet delay (if any). 6201 */ 6202 uint32_t inter_gap = 0; 6203 int idx, nidx; 6204 6205 idx = rsm->r_rtr_cnt - 1; 6206 nidx = prsm->r_rtr_cnt - 1; 6207 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 6208 /* Yes it was sent later (or at the same time) */ 6209 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 6210 } 6211 thresh += inter_gap; 6212 } else if (len <= segsiz) { 6213 /* 6214 * Possibly compensate for delayed-ack. 6215 */ 6216 uint32_t alt_thresh; 6217 6218 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6219 if (alt_thresh > thresh) 6220 thresh = alt_thresh; 6221 } 6222 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 6223 /* 2.2 behavior */ 6224 if (len <= segsiz) { 6225 uint32_t alt_thresh; 6226 /* 6227 * Compensate for delayed-ack with the d-ack time. 6228 */ 6229 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 6230 if (alt_thresh > thresh) 6231 thresh = alt_thresh; 6232 } 6233 } 6234 /* Not above an RTO */ 6235 if (thresh > tp->t_rxtcur) { 6236 thresh = tp->t_rxtcur; 6237 } 6238 /* Not above a RTO max */ 6239 if (thresh > rack_rto_max) { 6240 thresh = rack_rto_max; 6241 } 6242 /* Apply user supplied min TLP */ 6243 if (thresh < rack_tlp_min) { 6244 thresh = rack_tlp_min; 6245 } 6246 return (thresh); 6247 } 6248 6249 static uint32_t 6250 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 6251 { 6252 /* 6253 * We want the rack_rtt which is the 6254 * last rtt we measured. However if that 6255 * does not exist we fallback to the srtt (which 6256 * we probably will never do) and then as a last 6257 * resort we use RACK_INITIAL_RTO if no srtt is 6258 * yet set. 6259 */ 6260 if (rack->rc_rack_rtt) 6261 return (rack->rc_rack_rtt); 6262 else if (tp->t_srtt == 0) 6263 return (RACK_INITIAL_RTO); 6264 return (tp->t_srtt); 6265 } 6266 6267 static struct rack_sendmap * 6268 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 6269 { 6270 /* 6271 * Check to see that we don't need to fall into recovery. We will 6272 * need to do so if our oldest transmit is past the time we should 6273 * have had an ack. 6274 */ 6275 struct tcp_rack *rack; 6276 struct rack_sendmap *rsm; 6277 int32_t idx; 6278 uint32_t srtt, thresh; 6279 6280 rack = (struct tcp_rack *)tp->t_fb_ptr; 6281 if (tqhash_empty(rack->r_ctl.tqh)) { 6282 return (NULL); 6283 } 6284 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6285 if (rsm == NULL) 6286 return (NULL); 6287 6288 6289 if (rsm->r_flags & RACK_ACKED) { 6290 rsm = rack_find_lowest_rsm(rack); 6291 if (rsm == NULL) 6292 return (NULL); 6293 } 6294 idx = rsm->r_rtr_cnt - 1; 6295 srtt = rack_grab_rtt(tp, rack); 6296 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 6297 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 6298 return (NULL); 6299 } 6300 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 6301 return (NULL); 6302 } 6303 /* Ok if we reach here we are over-due and this guy can be sent */ 6304 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 6305 return (rsm); 6306 } 6307 6308 static uint32_t 6309 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 6310 { 6311 int32_t t; 6312 int32_t tt; 6313 uint32_t ret_val; 6314 6315 t = (tp->t_srtt + (tp->t_rttvar << 2)); 6316 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 6317 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 6318 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 6319 ret_val = (uint32_t)tt; 6320 return (ret_val); 6321 } 6322 6323 static uint32_t 6324 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 6325 { 6326 /* 6327 * Start the FR timer, we do this based on getting the first one in 6328 * the rc_tmap. Note that if its NULL we must stop the timer. in all 6329 * events we need to stop the running timer (if its running) before 6330 * starting the new one. 6331 */ 6332 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 6333 uint32_t srtt_cur; 6334 int32_t idx; 6335 int32_t is_tlp_timer = 0; 6336 struct rack_sendmap *rsm; 6337 6338 if (rack->t_timers_stopped) { 6339 /* All timers have been stopped none are to run */ 6340 return (0); 6341 } 6342 if (rack->rc_in_persist) { 6343 /* We can't start any timer in persists */ 6344 return (rack_get_persists_timer_val(tp, rack)); 6345 } 6346 rack->rc_on_min_to = 0; 6347 if ((tp->t_state < TCPS_ESTABLISHED) || 6348 (rack->sack_attack_disable > 0) || 6349 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 6350 goto activate_rxt; 6351 } 6352 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6353 if ((rsm == NULL) || sup_rack) { 6354 /* Nothing on the send map or no rack */ 6355 activate_rxt: 6356 time_since_sent = 0; 6357 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6358 if (rsm) { 6359 /* 6360 * Should we discount the RTX timer any? 6361 * 6362 * We want to discount it the smallest amount. 6363 * If a timer (Rack/TLP or RXT) has gone off more 6364 * recently thats the discount we want to use (now - timer time). 6365 * If the retransmit of the oldest packet was more recent then 6366 * we want to use that (now - oldest-packet-last_transmit_time). 6367 * 6368 */ 6369 idx = rsm->r_rtr_cnt - 1; 6370 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 6371 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6372 else 6373 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6374 if (TSTMP_GT(cts, tstmp_touse)) 6375 time_since_sent = cts - tstmp_touse; 6376 } 6377 if (SEQ_LT(tp->snd_una, tp->snd_max) || 6378 sbavail(&tptosocket(tp)->so_snd)) { 6379 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 6380 to = tp->t_rxtcur; 6381 if (to > time_since_sent) 6382 to -= time_since_sent; 6383 else 6384 to = rack->r_ctl.rc_min_to; 6385 if (to == 0) 6386 to = 1; 6387 /* Special case for KEEPINIT */ 6388 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6389 (TP_KEEPINIT(tp) != 0) && 6390 rsm) { 6391 /* 6392 * We have to put a ceiling on the rxt timer 6393 * of the keep-init timeout. 6394 */ 6395 uint32_t max_time, red; 6396 6397 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 6398 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 6399 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 6400 if (red < max_time) 6401 max_time -= red; 6402 else 6403 max_time = 1; 6404 } 6405 /* Reduce timeout to the keep value if needed */ 6406 if (max_time < to) 6407 to = max_time; 6408 } 6409 return (to); 6410 } 6411 return (0); 6412 } 6413 if (rsm->r_flags & RACK_ACKED) { 6414 rsm = rack_find_lowest_rsm(rack); 6415 if (rsm == NULL) { 6416 /* No lowest? */ 6417 goto activate_rxt; 6418 } 6419 } 6420 if (rack->sack_attack_disable) { 6421 /* 6422 * We don't want to do 6423 * any TLP's if you are an attacker. 6424 * Though if you are doing what 6425 * is expected you may still have 6426 * SACK-PASSED marks. 6427 */ 6428 goto activate_rxt; 6429 } 6430 /* Convert from ms to usecs */ 6431 if ((rsm->r_flags & RACK_SACK_PASSED) || 6432 (rsm->r_flags & RACK_RWND_COLLAPSED) || 6433 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 6434 if ((tp->t_flags & TF_SENTFIN) && 6435 ((tp->snd_max - tp->snd_una) == 1) && 6436 (rsm->r_flags & RACK_HAS_FIN)) { 6437 /* 6438 * We don't start a rack timer if all we have is a 6439 * FIN outstanding. 6440 */ 6441 goto activate_rxt; 6442 } 6443 if ((rack->use_rack_rr == 0) && 6444 (IN_FASTRECOVERY(tp->t_flags)) && 6445 (rack->rack_no_prr == 0) && 6446 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 6447 /* 6448 * We are not cheating, in recovery and 6449 * not enough ack's to yet get our next 6450 * retransmission out. 6451 * 6452 * Note that classified attackers do not 6453 * get to use the rack-cheat. 6454 */ 6455 goto activate_tlp; 6456 } 6457 srtt = rack_grab_rtt(tp, rack); 6458 thresh = rack_calc_thresh_rack(rack, srtt, cts); 6459 idx = rsm->r_rtr_cnt - 1; 6460 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 6461 if (SEQ_GEQ(exp, cts)) { 6462 to = exp - cts; 6463 if (to < rack->r_ctl.rc_min_to) { 6464 to = rack->r_ctl.rc_min_to; 6465 if (rack->r_rr_config == 3) 6466 rack->rc_on_min_to = 1; 6467 } 6468 } else { 6469 to = rack->r_ctl.rc_min_to; 6470 if (rack->r_rr_config == 3) 6471 rack->rc_on_min_to = 1; 6472 } 6473 } else { 6474 /* Ok we need to do a TLP not RACK */ 6475 activate_tlp: 6476 if ((rack->rc_tlp_in_progress != 0) && 6477 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 6478 /* 6479 * The previous send was a TLP and we have sent 6480 * N TLP's without sending new data. 6481 */ 6482 goto activate_rxt; 6483 } 6484 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 6485 if (rsm == NULL) { 6486 /* We found no rsm to TLP with. */ 6487 goto activate_rxt; 6488 } 6489 if (rsm->r_flags & RACK_HAS_FIN) { 6490 /* If its a FIN we dont do TLP */ 6491 rsm = NULL; 6492 goto activate_rxt; 6493 } 6494 idx = rsm->r_rtr_cnt - 1; 6495 time_since_sent = 0; 6496 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 6497 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 6498 else 6499 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 6500 if (TSTMP_GT(cts, tstmp_touse)) 6501 time_since_sent = cts - tstmp_touse; 6502 is_tlp_timer = 1; 6503 if (tp->t_srtt) { 6504 if ((rack->rc_srtt_measure_made == 0) && 6505 (tp->t_srtt == 1)) { 6506 /* 6507 * If another stack as run and set srtt to 1, 6508 * then the srtt was 0, so lets use the initial. 6509 */ 6510 srtt = RACK_INITIAL_RTO; 6511 } else { 6512 srtt_cur = tp->t_srtt; 6513 srtt = srtt_cur; 6514 } 6515 } else 6516 srtt = RACK_INITIAL_RTO; 6517 /* 6518 * If the SRTT is not keeping up and the 6519 * rack RTT has spiked we want to use 6520 * the last RTT not the smoothed one. 6521 */ 6522 if (rack_tlp_use_greater && 6523 tp->t_srtt && 6524 (srtt < rack_grab_rtt(tp, rack))) { 6525 srtt = rack_grab_rtt(tp, rack); 6526 } 6527 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 6528 if (thresh > time_since_sent) { 6529 to = thresh - time_since_sent; 6530 } else { 6531 to = rack->r_ctl.rc_min_to; 6532 rack_log_alt_to_to_cancel(rack, 6533 thresh, /* flex1 */ 6534 time_since_sent, /* flex2 */ 6535 tstmp_touse, /* flex3 */ 6536 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 6537 (uint32_t)rsm->r_tim_lastsent[idx], 6538 srtt, 6539 idx, 99); 6540 } 6541 if (to < rack_tlp_min) { 6542 to = rack_tlp_min; 6543 } 6544 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 6545 /* 6546 * If the TLP time works out to larger than the max 6547 * RTO lets not do TLP.. just RTO. 6548 */ 6549 goto activate_rxt; 6550 } 6551 } 6552 if (is_tlp_timer == 0) { 6553 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 6554 } else { 6555 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 6556 } 6557 if (to == 0) 6558 to = 1; 6559 return (to); 6560 } 6561 6562 static void 6563 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, tcp_seq snd_una) 6564 { 6565 struct timeval tv; 6566 6567 if (rack->rc_in_persist == 0) { 6568 if (tp->t_flags & TF_GPUTINPROG) { 6569 /* 6570 * Stop the goodput now, the calling of the 6571 * measurement function clears the flag. 6572 */ 6573 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 6574 RACK_QUALITY_PERSIST); 6575 } 6576 #ifdef NETFLIX_SHARED_CWND 6577 if (rack->r_ctl.rc_scw) { 6578 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6579 rack->rack_scwnd_is_idle = 1; 6580 } 6581 #endif 6582 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(&tv); 6583 if (rack->lt_bw_up) { 6584 /* Suspend our LT BW measurement */ 6585 uint64_t tmark; 6586 6587 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); 6588 rack->r_ctl.lt_seq = snd_una; 6589 tmark = tcp_tv_to_lusectick(&tv); 6590 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 6591 rack->r_ctl.lt_timemark = tmark; 6592 rack->lt_bw_up = 0; 6593 rack->r_persist_lt_bw_off = 1; 6594 } 6595 if (rack->r_ctl.rc_went_idle_time == 0) 6596 rack->r_ctl.rc_went_idle_time = 1; 6597 rack_timer_cancel(tp, rack, cts, __LINE__); 6598 rack->r_ctl.persist_lost_ends = 0; 6599 rack->probe_not_answered = 0; 6600 rack->forced_ack = 0; 6601 tp->t_rxtshift = 0; 6602 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6603 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6604 rack->rc_in_persist = 1; 6605 } 6606 } 6607 6608 static void 6609 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6610 { 6611 struct timeval tv; 6612 uint32_t t_time; 6613 6614 if (tcp_in_hpts(rack->rc_tp)) { 6615 tcp_hpts_remove(rack->rc_tp); 6616 rack->r_ctl.rc_hpts_flags = 0; 6617 } 6618 #ifdef NETFLIX_SHARED_CWND 6619 if (rack->r_ctl.rc_scw) { 6620 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 6621 rack->rack_scwnd_is_idle = 0; 6622 } 6623 #endif 6624 t_time = tcp_get_usecs(&tv); 6625 if (rack->rc_gp_dyn_mul && 6626 (rack->use_fixed_rate == 0) && 6627 (rack->rc_always_pace)) { 6628 /* 6629 * Do we count this as if a probe-rtt just 6630 * finished? 6631 */ 6632 uint32_t time_idle, idle_min; 6633 6634 time_idle = t_time - rack->r_ctl.rc_went_idle_time; 6635 idle_min = rack_min_probertt_hold; 6636 if (rack_probertt_gpsrtt_cnt_div) { 6637 uint64_t extra; 6638 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 6639 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 6640 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 6641 idle_min += (uint32_t)extra; 6642 } 6643 if (time_idle >= idle_min) { 6644 /* Yes, we count it as a probe-rtt. */ 6645 uint32_t us_cts; 6646 6647 us_cts = tcp_get_usecs(NULL); 6648 if (rack->in_probe_rtt == 0) { 6649 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 6650 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 6651 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 6652 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 6653 } else { 6654 rack_exit_probertt(rack, us_cts); 6655 } 6656 } 6657 } 6658 if (rack->r_persist_lt_bw_off) { 6659 /* Continue where we left off */ 6660 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 6661 rack->lt_bw_up = 1; 6662 rack->r_persist_lt_bw_off = 0; 6663 } 6664 rack->rc_in_persist = 0; 6665 rack->r_ctl.rc_went_idle_time = 0; 6666 tp->t_rxtshift = 0; 6667 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 6668 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 6669 rack->r_ctl.rc_agg_delayed = 0; 6670 rack->r_early = 0; 6671 rack->r_late = 0; 6672 rack->r_ctl.rc_agg_early = 0; 6673 } 6674 6675 static void 6676 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 6677 struct hpts_diag *diag, struct timeval *tv) 6678 { 6679 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6680 union tcp_log_stackspecific log; 6681 6682 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6683 log.u_bbr.flex1 = diag->p_nxt_slot; 6684 log.u_bbr.flex2 = diag->p_cur_slot; 6685 log.u_bbr.flex3 = diag->slot_req; 6686 log.u_bbr.flex4 = diag->inp_hptsslot; 6687 log.u_bbr.flex5 = diag->slot_remaining; 6688 log.u_bbr.flex6 = diag->need_new_to; 6689 log.u_bbr.flex7 = diag->p_hpts_active; 6690 log.u_bbr.flex8 = diag->p_on_min_sleep; 6691 /* Hijack other fields as needed */ 6692 log.u_bbr.epoch = diag->have_slept; 6693 log.u_bbr.lt_epoch = diag->yet_to_sleep; 6694 log.u_bbr.pkts_out = diag->co_ret; 6695 log.u_bbr.applimited = diag->hpts_sleep_time; 6696 log.u_bbr.delivered = diag->p_prev_slot; 6697 log.u_bbr.inflight = diag->p_runningslot; 6698 log.u_bbr.bw_inuse = diag->wheel_slot; 6699 log.u_bbr.rttProp = diag->wheel_cts; 6700 log.u_bbr.timeStamp = cts; 6701 log.u_bbr.delRate = diag->maxslots; 6702 log.u_bbr.cur_del_rate = diag->p_curtick; 6703 log.u_bbr.cur_del_rate <<= 32; 6704 log.u_bbr.cur_del_rate |= diag->p_lasttick; 6705 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6706 &rack->rc_inp->inp_socket->so_rcv, 6707 &rack->rc_inp->inp_socket->so_snd, 6708 BBR_LOG_HPTSDIAG, 0, 6709 0, &log, false, tv); 6710 } 6711 6712 } 6713 6714 static void 6715 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 6716 { 6717 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 6718 union tcp_log_stackspecific log; 6719 struct timeval tv; 6720 6721 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6722 log.u_bbr.flex1 = sb->sb_flags; 6723 log.u_bbr.flex2 = len; 6724 log.u_bbr.flex3 = sb->sb_state; 6725 log.u_bbr.flex8 = type; 6726 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 6727 TCP_LOG_EVENTP(rack->rc_tp, NULL, 6728 &rack->rc_inp->inp_socket->so_rcv, 6729 &rack->rc_inp->inp_socket->so_snd, 6730 TCP_LOG_SB_WAKE, 0, 6731 len, &log, false, &tv); 6732 } 6733 } 6734 6735 static void 6736 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 6737 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 6738 { 6739 struct hpts_diag diag; 6740 struct inpcb *inp = tptoinpcb(tp); 6741 struct timeval tv; 6742 uint32_t delayed_ack = 0; 6743 uint32_t hpts_timeout; 6744 uint32_t entry_slot = slot; 6745 uint8_t stopped; 6746 uint32_t left = 0; 6747 uint32_t us_cts; 6748 6749 if ((tp->t_state == TCPS_CLOSED) || 6750 (tp->t_state == TCPS_LISTEN)) { 6751 return; 6752 } 6753 if (tcp_in_hpts(tp)) { 6754 /* Already on the pacer */ 6755 return; 6756 } 6757 stopped = rack->rc_tmr_stopped; 6758 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 6759 left = rack->r_ctl.rc_timer_exp - cts; 6760 } 6761 rack->r_ctl.rc_timer_exp = 0; 6762 rack->r_ctl.rc_hpts_flags = 0; 6763 us_cts = tcp_get_usecs(&tv); 6764 /* Now early/late accounting */ 6765 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 6766 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 6767 /* 6768 * We have a early carry over set, 6769 * we can always add more time so we 6770 * can always make this compensation. 6771 * 6772 * Note if ack's are allowed to wake us do not 6773 * penalize the next timer for being awoke 6774 * by an ack aka the rc_agg_early (non-paced mode). 6775 */ 6776 slot += rack->r_ctl.rc_agg_early; 6777 rack->r_early = 0; 6778 rack->r_ctl.rc_agg_early = 0; 6779 } 6780 if (rack->r_late) { 6781 /* 6782 * This is harder, we can 6783 * compensate some but it 6784 * really depends on what 6785 * the current pacing time is. 6786 */ 6787 if (rack->r_ctl.rc_agg_delayed >= slot) { 6788 /* 6789 * We can't compensate for it all. 6790 * And we have to have some time 6791 * on the clock. We always have a min 6792 * 10 slots (10 x 10 i.e. 100 usecs). 6793 */ 6794 if (slot <= HPTS_TICKS_PER_SLOT) { 6795 /* We gain delay */ 6796 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 6797 slot = HPTS_TICKS_PER_SLOT; 6798 } else { 6799 /* We take off some */ 6800 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 6801 slot = HPTS_TICKS_PER_SLOT; 6802 } 6803 } else { 6804 slot -= rack->r_ctl.rc_agg_delayed; 6805 rack->r_ctl.rc_agg_delayed = 0; 6806 /* Make sure we have 100 useconds at minimum */ 6807 if (slot < HPTS_TICKS_PER_SLOT) { 6808 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 6809 slot = HPTS_TICKS_PER_SLOT; 6810 } 6811 if (rack->r_ctl.rc_agg_delayed == 0) 6812 rack->r_late = 0; 6813 } 6814 } 6815 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 6816 #ifdef TCP_SAD_DETECTION 6817 if (rack->sack_attack_disable && 6818 (rack->r_ctl.ack_during_sd > 0) && 6819 (slot < tcp_sad_pacing_interval)) { 6820 /* 6821 * We have a potential attacker on 6822 * the line. We have possibly some 6823 * (or now) pacing time set. We want to 6824 * slow down the processing of sacks by some 6825 * amount (if it is an attacker). Set the default 6826 * slot for attackers in place (unless the original 6827 * interval is longer). Its stored in 6828 * micro-seconds, so lets convert to msecs. 6829 */ 6830 slot = tcp_sad_pacing_interval; 6831 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 6832 rack->r_ctl.ack_during_sd = 0; 6833 } 6834 #endif 6835 if (tp->t_flags & TF_DELACK) { 6836 delayed_ack = TICKS_2_USEC(tcp_delacktime); 6837 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 6838 } 6839 if (delayed_ack && ((hpts_timeout == 0) || 6840 (delayed_ack < hpts_timeout))) 6841 hpts_timeout = delayed_ack; 6842 else 6843 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6844 /* 6845 * If no timers are going to run and we will fall off the hptsi 6846 * wheel, we resort to a keep-alive timer if its configured. 6847 */ 6848 if ((hpts_timeout == 0) && 6849 (slot == 0)) { 6850 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6851 (tp->t_state <= TCPS_CLOSING)) { 6852 /* 6853 * Ok we have no timer (persists, rack, tlp, rxt or 6854 * del-ack), we don't have segments being paced. So 6855 * all that is left is the keepalive timer. 6856 */ 6857 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6858 /* Get the established keep-alive time */ 6859 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 6860 } else { 6861 /* 6862 * Get the initial setup keep-alive time, 6863 * note that this is probably not going to 6864 * happen, since rack will be running a rxt timer 6865 * if a SYN of some sort is outstanding. It is 6866 * actually handled in rack_timeout_rxt(). 6867 */ 6868 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 6869 } 6870 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 6871 if (rack->in_probe_rtt) { 6872 /* 6873 * We want to instead not wake up a long time from 6874 * now but to wake up about the time we would 6875 * exit probe-rtt and initiate a keep-alive ack. 6876 * This will get us out of probe-rtt and update 6877 * our min-rtt. 6878 */ 6879 hpts_timeout = rack_min_probertt_hold; 6880 } 6881 } 6882 } 6883 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 6884 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 6885 /* 6886 * RACK, TLP, persists and RXT timers all are restartable 6887 * based on actions input .. i.e we received a packet (ack 6888 * or sack) and that changes things (rw, or snd_una etc). 6889 * Thus we can restart them with a new value. For 6890 * keep-alive, delayed_ack we keep track of what was left 6891 * and restart the timer with a smaller value. 6892 */ 6893 if (left < hpts_timeout) 6894 hpts_timeout = left; 6895 } 6896 if (hpts_timeout) { 6897 /* 6898 * Hack alert for now we can't time-out over 2,147,483 6899 * seconds (a bit more than 596 hours), which is probably ok 6900 * :). 6901 */ 6902 if (hpts_timeout > 0x7ffffffe) 6903 hpts_timeout = 0x7ffffffe; 6904 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 6905 } 6906 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 6907 if ((rack->gp_ready == 0) && 6908 (rack->use_fixed_rate == 0) && 6909 (hpts_timeout < slot) && 6910 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 6911 /* 6912 * We have no good estimate yet for the 6913 * old clunky burst mitigation or the 6914 * real pacing. And the tlp or rxt is smaller 6915 * than the pacing calculation. Lets not 6916 * pace that long since we know the calculation 6917 * so far is not accurate. 6918 */ 6919 slot = hpts_timeout; 6920 } 6921 /** 6922 * Turn off all the flags for queuing by default. The 6923 * flags have important meanings to what happens when 6924 * LRO interacts with the transport. Most likely (by default now) 6925 * mbuf_queueing and ack compression are on. So the transport 6926 * has a couple of flags that control what happens (if those 6927 * are not on then these flags won't have any effect since it 6928 * won't go through the queuing LRO path). 6929 * 6930 * TF2_MBUF_QUEUE_READY - This flags says that I am busy 6931 * pacing output, so don't disturb. But 6932 * it also means LRO can wake me if there 6933 * is a SACK arrival. 6934 * 6935 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction 6936 * with the above flag (QUEUE_READY) and 6937 * when present it says don't even wake me 6938 * if a SACK arrives. 6939 * 6940 * The idea behind these flags is that if we are pacing we 6941 * set the MBUF_QUEUE_READY and only get woken up if 6942 * a SACK arrives (which could change things) or if 6943 * our pacing timer expires. If, however, we have a rack 6944 * timer running, then we don't even want a sack to wake 6945 * us since the rack timer has to expire before we can send. 6946 * 6947 * Other cases should usually have none of the flags set 6948 * so LRO can call into us. 6949 */ 6950 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); 6951 if (slot) { 6952 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 6953 rack->r_ctl.rc_last_output_to = us_cts + slot; 6954 /* 6955 * A pacing timer (slot) is being set, in 6956 * such a case we cannot send (we are blocked by 6957 * the timer). So lets tell LRO that it should not 6958 * wake us unless there is a SACK. Note this only 6959 * will be effective if mbuf queueing is on or 6960 * compressed acks are being processed. 6961 */ 6962 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; 6963 /* 6964 * But wait if we have a Rack timer running 6965 * even a SACK should not disturb us (with 6966 * the exception of r_rr_config 3). 6967 */ 6968 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || 6969 (IN_RECOVERY(tp->t_flags))) { 6970 if (rack->r_rr_config != 3) 6971 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6972 else if (rack->rc_pace_dnd) { 6973 /* 6974 * When DND is on, we only let a sack 6975 * interrupt us if we are not in recovery. 6976 * 6977 * If DND is off, then we never hit here 6978 * and let all sacks wake us up. 6979 * 6980 */ 6981 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; 6982 } 6983 } 6984 /* For sack attackers we want to ignore sack */ 6985 if (rack->sack_attack_disable == 1) { 6986 tp->t_flags2 |= (TF2_DONT_SACK_QUEUE | 6987 TF2_MBUF_QUEUE_READY); 6988 } else if (rack->rc_ack_can_sendout_data) { 6989 /* 6990 * Ahh but wait, this is that special case 6991 * where the pacing timer can be disturbed 6992 * backout the changes (used for non-paced 6993 * burst limiting). 6994 */ 6995 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | 6996 TF2_MBUF_QUEUE_READY); 6997 } 6998 if ((rack->use_rack_rr) && 6999 (rack->r_rr_config < 2) && 7000 ((hpts_timeout) && (hpts_timeout < slot))) { 7001 /* 7002 * Arrange for the hpts to kick back in after the 7003 * t-o if the t-o does not cause a send. 7004 */ 7005 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7006 __LINE__, &diag); 7007 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7008 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7009 } else { 7010 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot), 7011 __LINE__, &diag); 7012 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7013 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 7014 } 7015 } else if (hpts_timeout) { 7016 /* 7017 * With respect to t_flags2(?) here, lets let any new acks wake 7018 * us up here. Since we are not pacing (no pacing timer), output 7019 * can happen so we should let it. If its a Rack timer, then any inbound 7020 * packet probably won't change the sending (we will be blocked) 7021 * but it may change the prr stats so letting it in (the set defaults 7022 * at the start of this block) are good enough. 7023 */ 7024 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7025 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), 7026 __LINE__, &diag); 7027 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 7028 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 7029 } else { 7030 /* No timer starting */ 7031 #ifdef INVARIANTS 7032 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 7033 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 7034 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 7035 } 7036 #endif 7037 } 7038 rack->rc_tmr_stopped = 0; 7039 if (slot) 7040 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__); 7041 } 7042 7043 /* 7044 * RACK Timer, here we simply do logging and house keeping. 7045 * the normal rack_output() function will call the 7046 * appropriate thing to check if we need to do a RACK retransmit. 7047 * We return 1, saying don't proceed with rack_output only 7048 * when all timers have been stopped (destroyed PCB?). 7049 */ 7050 static int 7051 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7052 { 7053 /* 7054 * This timer simply provides an internal trigger to send out data. 7055 * The check_recovery_mode call will see if there are needed 7056 * retransmissions, if so we will enter fast-recovery. The output 7057 * call may or may not do the same thing depending on sysctl 7058 * settings. 7059 */ 7060 struct rack_sendmap *rsm; 7061 7062 counter_u64_add(rack_to_tot, 1); 7063 if (rack->r_state && (rack->r_state != tp->t_state)) 7064 rack_set_state(tp, rack); 7065 rack->rc_on_min_to = 0; 7066 rsm = rack_check_recovery_mode(tp, cts); 7067 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 7068 if (rsm) { 7069 rack->r_ctl.rc_resend = rsm; 7070 rack->r_timer_override = 1; 7071 if (rack->use_rack_rr) { 7072 /* 7073 * Don't accumulate extra pacing delay 7074 * we are allowing the rack timer to 7075 * over-ride pacing i.e. rrr takes precedence 7076 * if the pacing interval is longer than the rrr 7077 * time (in other words we get the min pacing 7078 * time versus rrr pacing time). 7079 */ 7080 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7081 } 7082 } 7083 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 7084 if (rsm == NULL) { 7085 /* restart a timer and return 1 */ 7086 rack_start_hpts_timer(rack, tp, cts, 7087 0, 0, 0); 7088 return (1); 7089 } 7090 return (0); 7091 } 7092 7093 7094 7095 static void 7096 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 7097 { 7098 7099 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { 7100 /* 7101 * The trailing space changed, mbufs can grow 7102 * at the tail but they can't shrink from 7103 * it, KASSERT that. Adjust the orig_m_len to 7104 * compensate for this change. 7105 */ 7106 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), 7107 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 7108 rsm->m, 7109 rsm, 7110 (intmax_t)M_TRAILINGROOM(rsm->m), 7111 rsm->orig_t_space, 7112 rsm->orig_m_len, 7113 rsm->m->m_len)); 7114 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); 7115 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7116 } 7117 if (rsm->m->m_len < rsm->orig_m_len) { 7118 /* 7119 * Mbuf shrank, trimmed off the top by an ack, our 7120 * offset changes. 7121 */ 7122 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), 7123 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n", 7124 rsm->m, rsm->m->m_len, 7125 rsm, rsm->orig_m_len, 7126 rsm->soff)); 7127 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) 7128 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 7129 else 7130 rsm->soff = 0; 7131 rsm->orig_m_len = rsm->m->m_len; 7132 #ifdef INVARIANTS 7133 } else if (rsm->m->m_len > rsm->orig_m_len) { 7134 panic("rsm:%p m:%p m_len grew outside of t_space compensation", 7135 rsm, rsm->m); 7136 #endif 7137 } 7138 } 7139 7140 static void 7141 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 7142 { 7143 struct mbuf *m; 7144 uint32_t soff; 7145 7146 if (src_rsm->m && 7147 ((src_rsm->orig_m_len != src_rsm->m->m_len) || 7148 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { 7149 /* Fix up the orig_m_len and possibly the mbuf offset */ 7150 rack_adjust_orig_mlen(src_rsm); 7151 } 7152 m = src_rsm->m; 7153 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 7154 while (soff >= m->m_len) { 7155 /* Move out past this mbuf */ 7156 soff -= m->m_len; 7157 m = m->m_next; 7158 KASSERT((m != NULL), 7159 ("rsm:%p nrsm:%p hit at soff:%u null m", 7160 src_rsm, rsm, soff)); 7161 if (m == NULL) { 7162 /* This should *not* happen which is why there is a kassert */ 7163 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7164 (src_rsm->r_start - rack->rc_tp->snd_una), 7165 &src_rsm->soff); 7166 src_rsm->orig_m_len = src_rsm->m->m_len; 7167 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); 7168 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 7169 (rsm->r_start - rack->rc_tp->snd_una), 7170 &rsm->soff); 7171 rsm->orig_m_len = rsm->m->m_len; 7172 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7173 return; 7174 } 7175 } 7176 rsm->m = m; 7177 rsm->soff = soff; 7178 rsm->orig_m_len = m->m_len; 7179 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 7180 } 7181 7182 static __inline void 7183 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 7184 struct rack_sendmap *rsm, uint32_t start) 7185 { 7186 int idx; 7187 7188 nrsm->r_start = start; 7189 nrsm->r_end = rsm->r_end; 7190 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 7191 nrsm->r_flags = rsm->r_flags; 7192 nrsm->r_dupack = rsm->r_dupack; 7193 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 7194 nrsm->r_rtr_bytes = 0; 7195 nrsm->r_fas = rsm->r_fas; 7196 nrsm->r_bas = rsm->r_bas; 7197 rsm->r_end = nrsm->r_start; 7198 nrsm->r_just_ret = rsm->r_just_ret; 7199 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 7200 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 7201 } 7202 /* Now if we have SYN flag we keep it on the left edge */ 7203 if (nrsm->r_flags & RACK_HAS_SYN) 7204 nrsm->r_flags &= ~RACK_HAS_SYN; 7205 /* Now if we have a FIN flag we keep it on the right edge */ 7206 if (rsm->r_flags & RACK_HAS_FIN) 7207 rsm->r_flags &= ~RACK_HAS_FIN; 7208 /* Push bit must go to the right edge as well */ 7209 if (rsm->r_flags & RACK_HAD_PUSH) 7210 rsm->r_flags &= ~RACK_HAD_PUSH; 7211 /* Clone over the state of the hw_tls flag */ 7212 nrsm->r_hw_tls = rsm->r_hw_tls; 7213 /* 7214 * Now we need to find nrsm's new location in the mbuf chain 7215 * we basically calculate a new offset, which is soff + 7216 * how much is left in original rsm. Then we walk out the mbuf 7217 * chain to find the righ position, it may be the same mbuf 7218 * or maybe not. 7219 */ 7220 KASSERT(((rsm->m != NULL) || 7221 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 7222 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 7223 if (rsm->m) 7224 rack_setup_offset_for_rsm(rack, rsm, nrsm); 7225 } 7226 7227 static struct rack_sendmap * 7228 rack_merge_rsm(struct tcp_rack *rack, 7229 struct rack_sendmap *l_rsm, 7230 struct rack_sendmap *r_rsm) 7231 { 7232 /* 7233 * We are merging two ack'd RSM's, 7234 * the l_rsm is on the left (lower seq 7235 * values) and the r_rsm is on the right 7236 * (higher seq value). The simplest way 7237 * to merge these is to move the right 7238 * one into the left. I don't think there 7239 * is any reason we need to try to find 7240 * the oldest (or last oldest retransmitted). 7241 */ 7242 rack_log_map_chg(rack->rc_tp, rack, NULL, 7243 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 7244 l_rsm->r_end = r_rsm->r_end; 7245 if (l_rsm->r_dupack < r_rsm->r_dupack) 7246 l_rsm->r_dupack = r_rsm->r_dupack; 7247 if (r_rsm->r_rtr_bytes) 7248 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 7249 if (r_rsm->r_in_tmap) { 7250 /* This really should not happen */ 7251 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 7252 r_rsm->r_in_tmap = 0; 7253 } 7254 7255 /* Now the flags */ 7256 if (r_rsm->r_flags & RACK_HAS_FIN) 7257 l_rsm->r_flags |= RACK_HAS_FIN; 7258 if (r_rsm->r_flags & RACK_TLP) 7259 l_rsm->r_flags |= RACK_TLP; 7260 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 7261 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 7262 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 7263 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 7264 /* 7265 * If both are app-limited then let the 7266 * free lower the count. If right is app 7267 * limited and left is not, transfer. 7268 */ 7269 l_rsm->r_flags |= RACK_APP_LIMITED; 7270 r_rsm->r_flags &= ~RACK_APP_LIMITED; 7271 if (r_rsm == rack->r_ctl.rc_first_appl) 7272 rack->r_ctl.rc_first_appl = l_rsm; 7273 } 7274 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); 7275 /* 7276 * We keep the largest value, which is the newest 7277 * send. We do this in case a segment that is 7278 * joined together and not part of a GP estimate 7279 * later gets expanded into the GP estimate. 7280 * 7281 * We prohibit the merging of unlike kinds i.e. 7282 * all pieces that are in the GP estimate can be 7283 * merged and all pieces that are not in a GP estimate 7284 * can be merged, but not disimilar pieces. Combine 7285 * this with taking the highest here and we should 7286 * be ok unless of course the client reneges. Then 7287 * all bets are off. 7288 */ 7289 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < 7290 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { 7291 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; 7292 } 7293 /* 7294 * When merging two RSM's we also need to consider the ack time and keep 7295 * newest. If the ack gets merged into a measurement then that is the 7296 * one we will want to be using. 7297 */ 7298 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) 7299 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; 7300 7301 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 7302 /* Transfer the split limit to the map we free */ 7303 r_rsm->r_limit_type = l_rsm->r_limit_type; 7304 l_rsm->r_limit_type = 0; 7305 } 7306 rack_free(rack, r_rsm); 7307 l_rsm->r_flags |= RACK_MERGED; 7308 return (l_rsm); 7309 } 7310 7311 /* 7312 * TLP Timer, here we simply setup what segment we want to 7313 * have the TLP expire on, the normal rack_output() will then 7314 * send it out. 7315 * 7316 * We return 1, saying don't proceed with rack_output only 7317 * when all timers have been stopped (destroyed PCB?). 7318 */ 7319 static int 7320 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 7321 { 7322 /* 7323 * Tail Loss Probe. 7324 */ 7325 struct rack_sendmap *rsm = NULL; 7326 int insret __diagused; 7327 struct socket *so = tptosocket(tp); 7328 uint32_t amm; 7329 uint32_t out, avail; 7330 int collapsed_win = 0; 7331 7332 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7333 /* Its not time yet */ 7334 return (0); 7335 } 7336 if (ctf_progress_timeout_check(tp, true)) { 7337 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7338 return (-ETIMEDOUT); /* tcp_drop() */ 7339 } 7340 /* 7341 * A TLP timer has expired. We have been idle for 2 rtts. So we now 7342 * need to figure out how to force a full MSS segment out. 7343 */ 7344 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 7345 rack->r_ctl.retran_during_recovery = 0; 7346 rack->r_ctl.dsack_byte_cnt = 0; 7347 counter_u64_add(rack_tlp_tot, 1); 7348 if (rack->r_state && (rack->r_state != tp->t_state)) 7349 rack_set_state(tp, rack); 7350 avail = sbavail(&so->so_snd); 7351 out = tp->snd_max - tp->snd_una; 7352 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 7353 /* special case, we need a retransmission */ 7354 collapsed_win = 1; 7355 goto need_retran; 7356 } 7357 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 7358 rack->r_ctl.dsack_persist--; 7359 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7360 rack->r_ctl.num_dsack = 0; 7361 } 7362 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7363 } 7364 if ((tp->t_flags & TF_GPUTINPROG) && 7365 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 7366 /* 7367 * If this is the second in a row 7368 * TLP and we are doing a measurement 7369 * its time to abandon the measurement. 7370 * Something is likely broken on 7371 * the clients network and measuring a 7372 * broken network does us no good. 7373 */ 7374 tp->t_flags &= ~TF_GPUTINPROG; 7375 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7376 rack->r_ctl.rc_gp_srtt /*flex1*/, 7377 tp->gput_seq, 7378 0, 0, 18, __LINE__, NULL, 0); 7379 } 7380 /* 7381 * Check our send oldest always settings, and if 7382 * there is an oldest to send jump to the need_retran. 7383 */ 7384 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 7385 goto need_retran; 7386 7387 if (avail > out) { 7388 /* New data is available */ 7389 amm = avail - out; 7390 if (amm > ctf_fixed_maxseg(tp)) { 7391 amm = ctf_fixed_maxseg(tp); 7392 if ((amm + out) > tp->snd_wnd) { 7393 /* We are rwnd limited */ 7394 goto need_retran; 7395 } 7396 } else if (amm < ctf_fixed_maxseg(tp)) { 7397 /* not enough to fill a MTU */ 7398 goto need_retran; 7399 } 7400 if (IN_FASTRECOVERY(tp->t_flags)) { 7401 /* Unlikely */ 7402 if (rack->rack_no_prr == 0) { 7403 if (out + amm <= tp->snd_wnd) { 7404 rack->r_ctl.rc_prr_sndcnt = amm; 7405 rack->r_ctl.rc_tlp_new_data = amm; 7406 rack_log_to_prr(rack, 4, 0, __LINE__); 7407 } 7408 } else 7409 goto need_retran; 7410 } else { 7411 /* Set the send-new override */ 7412 if (out + amm <= tp->snd_wnd) 7413 rack->r_ctl.rc_tlp_new_data = amm; 7414 else 7415 goto need_retran; 7416 } 7417 rack->r_ctl.rc_tlpsend = NULL; 7418 counter_u64_add(rack_tlp_newdata, 1); 7419 goto send; 7420 } 7421 need_retran: 7422 /* 7423 * Ok we need to arrange the last un-acked segment to be re-sent, or 7424 * optionally the first un-acked segment. 7425 */ 7426 if (collapsed_win == 0) { 7427 if (rack_always_send_oldest) 7428 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7429 else { 7430 rsm = tqhash_max(rack->r_ctl.tqh); 7431 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 7432 rsm = rack_find_high_nonack(rack, rsm); 7433 } 7434 } 7435 if (rsm == NULL) { 7436 #ifdef TCP_BLACKBOX 7437 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 7438 #endif 7439 goto out; 7440 } 7441 } else { 7442 /* 7443 * We had a collapsed window, lets find 7444 * the point before the collapse. 7445 */ 7446 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) 7447 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); 7448 else { 7449 rsm = tqhash_min(rack->r_ctl.tqh); 7450 } 7451 if (rsm == NULL) { 7452 /* Huh */ 7453 goto out; 7454 } 7455 } 7456 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 7457 /* 7458 * We need to split this the last segment in two. 7459 */ 7460 struct rack_sendmap *nrsm; 7461 7462 nrsm = rack_alloc_full_limit(rack); 7463 if (nrsm == NULL) { 7464 /* 7465 * No memory to split, we will just exit and punt 7466 * off to the RXT timer. 7467 */ 7468 goto out; 7469 } 7470 rack_clone_rsm(rack, nrsm, rsm, 7471 (rsm->r_end - ctf_fixed_maxseg(tp))); 7472 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7473 #ifndef INVARIANTS 7474 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 7475 #else 7476 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 7477 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p", 7478 nrsm, insret, rack, rsm); 7479 } 7480 #endif 7481 if (rsm->r_in_tmap) { 7482 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7483 nrsm->r_in_tmap = 1; 7484 } 7485 rsm = nrsm; 7486 } 7487 rack->r_ctl.rc_tlpsend = rsm; 7488 send: 7489 /* Make sure output path knows we are doing a TLP */ 7490 *doing_tlp = 1; 7491 rack->r_timer_override = 1; 7492 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7493 return (0); 7494 out: 7495 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 7496 return (0); 7497 } 7498 7499 /* 7500 * Delayed ack Timer, here we simply need to setup the 7501 * ACK_NOW flag and remove the DELACK flag. From there 7502 * the output routine will send the ack out. 7503 * 7504 * We only return 1, saying don't proceed, if all timers 7505 * are stopped (destroyed PCB?). 7506 */ 7507 static int 7508 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7509 { 7510 7511 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 7512 tp->t_flags &= ~TF_DELACK; 7513 tp->t_flags |= TF_ACKNOW; 7514 KMOD_TCPSTAT_INC(tcps_delack); 7515 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 7516 return (0); 7517 } 7518 7519 /* 7520 * Persists timer, here we simply send the 7521 * same thing as a keepalive will. 7522 * the one byte send. 7523 * 7524 * We only return 1, saying don't proceed, if all timers 7525 * are stopped (destroyed PCB?). 7526 */ 7527 static int 7528 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7529 { 7530 struct tcptemp *t_template; 7531 int32_t retval = 1; 7532 7533 if (rack->rc_in_persist == 0) 7534 return (0); 7535 if (ctf_progress_timeout_check(tp, false)) { 7536 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7537 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7538 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7539 return (-ETIMEDOUT); /* tcp_drop() */ 7540 } 7541 /* 7542 * Persistence timer into zero window. Force a byte to be output, if 7543 * possible. 7544 */ 7545 KMOD_TCPSTAT_INC(tcps_persisttimeo); 7546 /* 7547 * Hack: if the peer is dead/unreachable, we do not time out if the 7548 * window is closed. After a full backoff, drop the connection if 7549 * the idle time (no responses to probes) reaches the maximum 7550 * backoff that we would use if retransmitting. 7551 */ 7552 if (tp->t_rxtshift >= V_tcp_retries && 7553 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 7554 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 7555 KMOD_TCPSTAT_INC(tcps_persistdrop); 7556 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7557 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7558 retval = -ETIMEDOUT; /* tcp_drop() */ 7559 goto out; 7560 } 7561 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 7562 tp->snd_una == tp->snd_max) 7563 rack_exit_persist(tp, rack, cts); 7564 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 7565 /* 7566 * If the user has closed the socket then drop a persisting 7567 * connection after a much reduced timeout. 7568 */ 7569 if (tp->t_state > TCPS_CLOSE_WAIT && 7570 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 7571 KMOD_TCPSTAT_INC(tcps_persistdrop); 7572 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 7573 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 7574 retval = -ETIMEDOUT; /* tcp_drop() */ 7575 goto out; 7576 } 7577 t_template = tcpip_maketemplate(rack->rc_inp); 7578 if (t_template) { 7579 /* only set it if we were answered */ 7580 if (rack->forced_ack == 0) { 7581 rack->forced_ack = 1; 7582 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7583 } else { 7584 rack->probe_not_answered = 1; 7585 counter_u64_add(rack_persists_loss, 1); 7586 rack->r_ctl.persist_lost_ends++; 7587 } 7588 counter_u64_add(rack_persists_sends, 1); 7589 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 7590 tcp_respond(tp, t_template->tt_ipgen, 7591 &t_template->tt_t, (struct mbuf *)NULL, 7592 tp->rcv_nxt, tp->snd_una - 1, 0); 7593 /* This sends an ack */ 7594 if (tp->t_flags & TF_DELACK) 7595 tp->t_flags &= ~TF_DELACK; 7596 free(t_template, M_TEMP); 7597 } 7598 if (tp->t_rxtshift < V_tcp_retries) 7599 tp->t_rxtshift++; 7600 out: 7601 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 7602 rack_start_hpts_timer(rack, tp, cts, 7603 0, 0, 0); 7604 return (retval); 7605 } 7606 7607 /* 7608 * If a keepalive goes off, we had no other timers 7609 * happening. We always return 1 here since this 7610 * routine either drops the connection or sends 7611 * out a segment with respond. 7612 */ 7613 static int 7614 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7615 { 7616 struct tcptemp *t_template; 7617 struct inpcb *inp = tptoinpcb(tp); 7618 7619 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 7620 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 7621 /* 7622 * Keep-alive timer went off; send something or drop connection if 7623 * idle for too long. 7624 */ 7625 KMOD_TCPSTAT_INC(tcps_keeptimeo); 7626 if (tp->t_state < TCPS_ESTABLISHED) 7627 goto dropit; 7628 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 7629 tp->t_state <= TCPS_CLOSING) { 7630 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 7631 goto dropit; 7632 /* 7633 * Send a packet designed to force a response if the peer is 7634 * up and reachable: either an ACK if the connection is 7635 * still alive, or an RST if the peer has closed the 7636 * connection due to timeout or reboot. Using sequence 7637 * number tp->snd_una-1 causes the transmitted zero-length 7638 * segment to lie outside the receive window; by the 7639 * protocol spec, this requires the correspondent TCP to 7640 * respond. 7641 */ 7642 KMOD_TCPSTAT_INC(tcps_keepprobe); 7643 t_template = tcpip_maketemplate(inp); 7644 if (t_template) { 7645 if (rack->forced_ack == 0) { 7646 rack->forced_ack = 1; 7647 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 7648 } else { 7649 rack->probe_not_answered = 1; 7650 } 7651 tcp_respond(tp, t_template->tt_ipgen, 7652 &t_template->tt_t, (struct mbuf *)NULL, 7653 tp->rcv_nxt, tp->snd_una - 1, 0); 7654 free(t_template, M_TEMP); 7655 } 7656 } 7657 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7658 return (1); 7659 dropit: 7660 KMOD_TCPSTAT_INC(tcps_keepdrops); 7661 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7662 return (-ETIMEDOUT); /* tcp_drop() */ 7663 } 7664 7665 /* 7666 * Retransmit helper function, clear up all the ack 7667 * flags and take care of important book keeping. 7668 */ 7669 static void 7670 rack_remxt_tmr(struct tcpcb *tp) 7671 { 7672 /* 7673 * The retransmit timer went off, all sack'd blocks must be 7674 * un-acked. 7675 */ 7676 struct rack_sendmap *rsm, *trsm = NULL; 7677 struct tcp_rack *rack; 7678 7679 rack = (struct tcp_rack *)tp->t_fb_ptr; 7680 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 7681 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 7682 if (rack->r_state && (rack->r_state != tp->t_state)) 7683 rack_set_state(tp, rack); 7684 /* 7685 * Ideally we would like to be able to 7686 * mark SACK-PASS on anything not acked here. 7687 * 7688 * However, if we do that we would burst out 7689 * all that data 1ms apart. This would be unwise, 7690 * so for now we will just let the normal rxt timer 7691 * and tlp timer take care of it. 7692 * 7693 * Also we really need to stick them back in sequence 7694 * order. This way we send in the proper order and any 7695 * sacks that come floating in will "re-ack" the data. 7696 * To do this we zap the tmap with an INIT and then 7697 * walk through and place every rsm in the RB tree 7698 * back in its seq ordered place. 7699 */ 7700 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7701 7702 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 7703 rsm->r_dupack = 0; 7704 if (rack_verbose_logging) 7705 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7706 /* We must re-add it back to the tlist */ 7707 if (trsm == NULL) { 7708 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7709 } else { 7710 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 7711 } 7712 rsm->r_in_tmap = 1; 7713 trsm = rsm; 7714 if (rsm->r_flags & RACK_ACKED) 7715 rsm->r_flags |= RACK_WAS_ACKED; 7716 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 7717 rsm->r_flags |= RACK_MUST_RXT; 7718 } 7719 /* Clear the count (we just un-acked them) */ 7720 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 7721 rack->r_ctl.rc_sacked = 0; 7722 rack->r_ctl.rc_sacklast = NULL; 7723 rack->r_ctl.rc_agg_delayed = 0; 7724 rack->r_early = 0; 7725 rack->r_ctl.rc_agg_early = 0; 7726 rack->r_late = 0; 7727 /* Clear the tlp rtx mark */ 7728 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); 7729 if (rack->r_ctl.rc_resend != NULL) 7730 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 7731 rack->r_ctl.rc_prr_sndcnt = 0; 7732 rack_log_to_prr(rack, 6, 0, __LINE__); 7733 rack->r_timer_override = 1; 7734 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 7735 #ifdef TCP_SAD_DETECTION 7736 || (rack->sack_attack_disable != 0) 7737 #endif 7738 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 7739 /* 7740 * For non-sack customers new data 7741 * needs to go out as retransmits until 7742 * we retransmit up to snd_max. 7743 */ 7744 rack->r_must_retran = 1; 7745 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 7746 rack->r_ctl.rc_sacked); 7747 } 7748 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 7749 } 7750 7751 static void 7752 rack_convert_rtts(struct tcpcb *tp) 7753 { 7754 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 7755 tp->t_rxtcur = RACK_REXMTVAL(tp); 7756 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 7757 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 7758 } 7759 if (tp->t_rxtcur > rack_rto_max) { 7760 tp->t_rxtcur = rack_rto_max; 7761 } 7762 } 7763 7764 static void 7765 rack_cc_conn_init(struct tcpcb *tp) 7766 { 7767 struct tcp_rack *rack; 7768 uint32_t srtt; 7769 7770 rack = (struct tcp_rack *)tp->t_fb_ptr; 7771 srtt = tp->t_srtt; 7772 cc_conn_init(tp); 7773 /* 7774 * Now convert to rack's internal format, 7775 * if required. 7776 */ 7777 if ((srtt == 0) && (tp->t_srtt != 0)) 7778 rack_convert_rtts(tp); 7779 /* 7780 * We want a chance to stay in slowstart as 7781 * we create a connection. TCP spec says that 7782 * initially ssthresh is infinite. For our 7783 * purposes that is the snd_wnd. 7784 */ 7785 if (tp->snd_ssthresh < tp->snd_wnd) { 7786 tp->snd_ssthresh = tp->snd_wnd; 7787 } 7788 /* 7789 * We also want to assure a IW worth of 7790 * data can get inflight. 7791 */ 7792 if (rc_init_window(rack) < tp->snd_cwnd) 7793 tp->snd_cwnd = rc_init_window(rack); 7794 } 7795 7796 /* 7797 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 7798 * we will setup to retransmit the lowest seq number outstanding. 7799 */ 7800 static int 7801 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 7802 { 7803 struct inpcb *inp = tptoinpcb(tp); 7804 int32_t rexmt; 7805 int32_t retval = 0; 7806 bool isipv6; 7807 7808 if ((tp->t_flags & TF_GPUTINPROG) && 7809 (tp->t_rxtshift)) { 7810 /* 7811 * We have had a second timeout 7812 * measurements on successive rxt's are not profitable. 7813 * It is unlikely to be of any use (the network is 7814 * broken or the client went away). 7815 */ 7816 tp->t_flags &= ~TF_GPUTINPROG; 7817 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7818 rack->r_ctl.rc_gp_srtt /*flex1*/, 7819 tp->gput_seq, 7820 0, 0, 18, __LINE__, NULL, 0); 7821 } 7822 if (ctf_progress_timeout_check(tp, false)) { 7823 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7824 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 7825 return (-ETIMEDOUT); /* tcp_drop() */ 7826 } 7827 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 7828 rack->r_ctl.retran_during_recovery = 0; 7829 rack->rc_ack_required = 1; 7830 rack->r_ctl.dsack_byte_cnt = 0; 7831 if (IN_FASTRECOVERY(tp->t_flags)) 7832 tp->t_flags |= TF_WASFRECOVERY; 7833 else 7834 tp->t_flags &= ~TF_WASFRECOVERY; 7835 if (IN_CONGRECOVERY(tp->t_flags)) 7836 tp->t_flags |= TF_WASCRECOVERY; 7837 else 7838 tp->t_flags &= ~TF_WASCRECOVERY; 7839 if (TCPS_HAVEESTABLISHED(tp->t_state) && 7840 (tp->snd_una == tp->snd_max)) { 7841 /* Nothing outstanding .. nothing to do */ 7842 return (0); 7843 } 7844 if (rack->r_ctl.dsack_persist) { 7845 rack->r_ctl.dsack_persist--; 7846 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 7847 rack->r_ctl.num_dsack = 0; 7848 } 7849 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 7850 } 7851 /* 7852 * Rack can only run one timer at a time, so we cannot 7853 * run a KEEPINIT (gating SYN sending) and a retransmit 7854 * timer for the SYN. So if we are in a front state and 7855 * have a KEEPINIT timer we need to check the first transmit 7856 * against now to see if we have exceeded the KEEPINIT time 7857 * (if one is set). 7858 */ 7859 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 7860 (TP_KEEPINIT(tp) != 0)) { 7861 struct rack_sendmap *rsm; 7862 7863 rsm = tqhash_min(rack->r_ctl.tqh); 7864 if (rsm) { 7865 /* Ok we have something outstanding to test keepinit with */ 7866 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 7867 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 7868 /* We have exceeded the KEEPINIT time */ 7869 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 7870 goto drop_it; 7871 } 7872 } 7873 } 7874 /* 7875 * Retransmission timer went off. Message has not been acked within 7876 * retransmit interval. Back off to a longer retransmit interval 7877 * and retransmit one segment. 7878 */ 7879 rack_remxt_tmr(tp); 7880 if ((rack->r_ctl.rc_resend == NULL) || 7881 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 7882 /* 7883 * If the rwnd collapsed on 7884 * the one we are retransmitting 7885 * it does not count against the 7886 * rxt count. 7887 */ 7888 tp->t_rxtshift++; 7889 } 7890 if (tp->t_rxtshift > V_tcp_retries) { 7891 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 7892 drop_it: 7893 tp->t_rxtshift = V_tcp_retries; 7894 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 7895 /* XXXGL: previously t_softerror was casted to uint16_t */ 7896 MPASS(tp->t_softerror >= 0); 7897 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 7898 goto out; /* tcp_drop() */ 7899 } 7900 if (tp->t_state == TCPS_SYN_SENT) { 7901 /* 7902 * If the SYN was retransmitted, indicate CWND to be limited 7903 * to 1 segment in cc_conn_init(). 7904 */ 7905 tp->snd_cwnd = 1; 7906 } else if (tp->t_rxtshift == 1) { 7907 /* 7908 * first retransmit; record ssthresh and cwnd so they can be 7909 * recovered if this turns out to be a "bad" retransmit. A 7910 * retransmit is considered "bad" if an ACK for this segment 7911 * is received within RTT/2 interval; the assumption here is 7912 * that the ACK was already in flight. See "On Estimating 7913 * End-to-End Network Path Properties" by Allman and Paxson 7914 * for more details. 7915 */ 7916 tp->snd_cwnd_prev = tp->snd_cwnd; 7917 tp->snd_ssthresh_prev = tp->snd_ssthresh; 7918 tp->snd_recover_prev = tp->snd_recover; 7919 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 7920 tp->t_flags |= TF_PREVVALID; 7921 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 7922 tp->t_flags &= ~TF_PREVVALID; 7923 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 7924 if ((tp->t_state == TCPS_SYN_SENT) || 7925 (tp->t_state == TCPS_SYN_RECEIVED)) 7926 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 7927 else 7928 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 7929 7930 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 7931 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 7932 /* 7933 * We enter the path for PLMTUD if connection is established or, if 7934 * connection is FIN_WAIT_1 status, reason for the last is that if 7935 * amount of data we send is very small, we could send it in couple 7936 * of packets and process straight to FIN. In that case we won't 7937 * catch ESTABLISHED state. 7938 */ 7939 #ifdef INET6 7940 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 7941 #else 7942 isipv6 = false; 7943 #endif 7944 if (((V_tcp_pmtud_blackhole_detect == 1) || 7945 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 7946 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 7947 ((tp->t_state == TCPS_ESTABLISHED) || 7948 (tp->t_state == TCPS_FIN_WAIT_1))) { 7949 /* 7950 * Idea here is that at each stage of mtu probe (usually, 7951 * 1448 -> 1188 -> 524) should be given 2 chances to recover 7952 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 7953 * should take care of that. 7954 */ 7955 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 7956 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 7957 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 7958 tp->t_rxtshift % 2 == 0)) { 7959 /* 7960 * Enter Path MTU Black-hole Detection mechanism: - 7961 * Disable Path MTU Discovery (IP "DF" bit). - 7962 * Reduce MTU to lower value than what we negotiated 7963 * with peer. 7964 */ 7965 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7966 /* Record that we may have found a black hole. */ 7967 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7968 /* Keep track of previous MSS. */ 7969 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7970 } 7971 7972 /* 7973 * Reduce the MSS to blackhole value or to the 7974 * default in an attempt to retransmit. 7975 */ 7976 #ifdef INET6 7977 if (isipv6 && 7978 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7979 /* Use the sysctl tuneable blackhole MSS. */ 7980 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7981 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7982 } else if (isipv6) { 7983 /* Use the default MSS. */ 7984 tp->t_maxseg = V_tcp_v6mssdflt; 7985 /* 7986 * Disable Path MTU Discovery when we switch 7987 * to minmss. 7988 */ 7989 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7990 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7991 } 7992 #endif 7993 #if defined(INET6) && defined(INET) 7994 else 7995 #endif 7996 #ifdef INET 7997 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7998 /* Use the sysctl tuneable blackhole MSS. */ 7999 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 8000 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 8001 } else { 8002 /* Use the default MSS. */ 8003 tp->t_maxseg = V_tcp_mssdflt; 8004 /* 8005 * Disable Path MTU Discovery when we switch 8006 * to minmss. 8007 */ 8008 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8009 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 8010 } 8011 #endif 8012 } else { 8013 /* 8014 * If further retransmissions are still unsuccessful 8015 * with a lowered MTU, maybe this isn't a blackhole 8016 * and we restore the previous MSS and blackhole 8017 * detection flags. The limit '6' is determined by 8018 * giving each probe stage (1448, 1188, 524) 2 8019 * chances to recover. 8020 */ 8021 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 8022 (tp->t_rxtshift >= 6)) { 8023 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 8024 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 8025 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 8026 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 8027 } 8028 } 8029 } 8030 /* 8031 * Disable RFC1323 and SACK if we haven't got any response to 8032 * our third SYN to work-around some broken terminal servers 8033 * (most of which have hopefully been retired) that have bad VJ 8034 * header compression code which trashes TCP segments containing 8035 * unknown-to-them TCP options. 8036 */ 8037 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 8038 (tp->t_rxtshift == 3)) 8039 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 8040 /* 8041 * If we backed off this far, our srtt estimate is probably bogus. 8042 * Clobber it so we'll take the next rtt measurement as our srtt; 8043 * move the current srtt into rttvar to keep the current retransmit 8044 * times until then. 8045 */ 8046 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 8047 #ifdef INET6 8048 if ((inp->inp_vflag & INP_IPV6) != 0) 8049 in6_losing(inp); 8050 else 8051 #endif 8052 in_losing(inp); 8053 tp->t_rttvar += tp->t_srtt; 8054 tp->t_srtt = 0; 8055 } 8056 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 8057 tp->snd_recover = tp->snd_max; 8058 tp->t_flags |= TF_ACKNOW; 8059 tp->t_rtttime = 0; 8060 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 8061 out: 8062 return (retval); 8063 } 8064 8065 static int 8066 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 8067 { 8068 int32_t ret = 0; 8069 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 8070 8071 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 8072 (tp->t_flags & TF_GPUTINPROG)) { 8073 /* 8074 * We have a goodput in progress 8075 * and we have entered a late state. 8076 * Do we have enough data in the sb 8077 * to handle the GPUT request? 8078 */ 8079 uint32_t bytes; 8080 8081 bytes = tp->gput_ack - tp->gput_seq; 8082 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 8083 bytes += tp->gput_seq - tp->snd_una; 8084 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 8085 /* 8086 * There are not enough bytes in the socket 8087 * buffer that have been sent to cover this 8088 * measurement. Cancel it. 8089 */ 8090 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 8091 rack->r_ctl.rc_gp_srtt /*flex1*/, 8092 tp->gput_seq, 8093 0, 0, 18, __LINE__, NULL, 0); 8094 tp->t_flags &= ~TF_GPUTINPROG; 8095 } 8096 } 8097 if (timers == 0) { 8098 return (0); 8099 } 8100 if (tp->t_state == TCPS_LISTEN) { 8101 /* no timers on listen sockets */ 8102 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 8103 return (0); 8104 return (1); 8105 } 8106 if ((timers & PACE_TMR_RACK) && 8107 rack->rc_on_min_to) { 8108 /* 8109 * For the rack timer when we 8110 * are on a min-timeout (which means rrr_conf = 3) 8111 * we don't want to check the timer. It may 8112 * be going off for a pace and thats ok we 8113 * want to send the retransmit (if its ready). 8114 * 8115 * If its on a normal rack timer (non-min) then 8116 * we will check if its expired. 8117 */ 8118 goto skip_time_check; 8119 } 8120 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 8121 uint32_t left; 8122 8123 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 8124 ret = -1; 8125 rack_log_to_processing(rack, cts, ret, 0); 8126 return (0); 8127 } 8128 if (hpts_calling == 0) { 8129 /* 8130 * A user send or queued mbuf (sack) has called us? We 8131 * return 0 and let the pacing guards 8132 * deal with it if they should or 8133 * should not cause a send. 8134 */ 8135 ret = -2; 8136 rack_log_to_processing(rack, cts, ret, 0); 8137 return (0); 8138 } 8139 /* 8140 * Ok our timer went off early and we are not paced false 8141 * alarm, go back to sleep. We make sure we don't have 8142 * no-sack wakeup on since we no longer have a PKT_OUTPUT 8143 * flag in place. 8144 */ 8145 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; 8146 ret = -3; 8147 left = rack->r_ctl.rc_timer_exp - cts; 8148 tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left)); 8149 rack_log_to_processing(rack, cts, ret, left); 8150 return (1); 8151 } 8152 skip_time_check: 8153 rack->rc_tmr_stopped = 0; 8154 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 8155 if (timers & PACE_TMR_DELACK) { 8156 ret = rack_timeout_delack(tp, rack, cts); 8157 } else if (timers & PACE_TMR_RACK) { 8158 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8159 rack->r_fast_output = 0; 8160 ret = rack_timeout_rack(tp, rack, cts); 8161 } else if (timers & PACE_TMR_TLP) { 8162 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8163 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 8164 } else if (timers & PACE_TMR_RXT) { 8165 rack->r_ctl.rc_tlp_rxt_last_time = cts; 8166 rack->r_fast_output = 0; 8167 ret = rack_timeout_rxt(tp, rack, cts); 8168 } else if (timers & PACE_TMR_PERSIT) { 8169 ret = rack_timeout_persist(tp, rack, cts); 8170 } else if (timers & PACE_TMR_KEEP) { 8171 ret = rack_timeout_keepalive(tp, rack, cts); 8172 } 8173 rack_log_to_processing(rack, cts, ret, timers); 8174 return (ret); 8175 } 8176 8177 static void 8178 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 8179 { 8180 struct timeval tv; 8181 uint32_t us_cts, flags_on_entry; 8182 uint8_t hpts_removed = 0; 8183 8184 flags_on_entry = rack->r_ctl.rc_hpts_flags; 8185 us_cts = tcp_get_usecs(&tv); 8186 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 8187 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 8188 ((tp->snd_max - tp->snd_una) == 0))) { 8189 tcp_hpts_remove(rack->rc_tp); 8190 hpts_removed = 1; 8191 /* If we were not delayed cancel out the flag. */ 8192 if ((tp->snd_max - tp->snd_una) == 0) 8193 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 8194 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8195 } 8196 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8197 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 8198 if (tcp_in_hpts(rack->rc_tp) && 8199 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 8200 /* 8201 * Canceling timer's when we have no output being 8202 * paced. We also must remove ourselves from the 8203 * hpts. 8204 */ 8205 tcp_hpts_remove(rack->rc_tp); 8206 hpts_removed = 1; 8207 } 8208 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 8209 } 8210 if (hpts_removed == 0) 8211 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 8212 } 8213 8214 static int 8215 rack_stopall(struct tcpcb *tp) 8216 { 8217 struct tcp_rack *rack; 8218 rack = (struct tcp_rack *)tp->t_fb_ptr; 8219 rack->t_timers_stopped = 1; 8220 return (0); 8221 } 8222 8223 static void 8224 rack_stop_all_timers(struct tcpcb *tp, struct tcp_rack *rack) 8225 { 8226 /* 8227 * Assure no timers are running. 8228 */ 8229 if (tcp_timer_active(tp, TT_PERSIST)) { 8230 /* We enter in persists, set the flag appropriately */ 8231 rack->rc_in_persist = 1; 8232 } 8233 if (tcp_in_hpts(rack->rc_tp)) { 8234 tcp_hpts_remove(rack->rc_tp); 8235 } 8236 } 8237 8238 static void 8239 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 8240 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag, int segsiz) 8241 { 8242 int32_t idx; 8243 8244 rsm->r_rtr_cnt++; 8245 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8246 rsm->r_dupack = 0; 8247 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 8248 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 8249 rsm->r_flags |= RACK_OVERMAX; 8250 } 8251 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 8252 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 8253 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 8254 } 8255 idx = rsm->r_rtr_cnt - 1; 8256 rsm->r_tim_lastsent[idx] = ts; 8257 /* 8258 * Here we don't add in the len of send, since its already 8259 * in snduna <->snd_max. 8260 */ 8261 rsm->r_fas = ctf_flight_size(rack->rc_tp, 8262 rack->r_ctl.rc_sacked); 8263 if (rsm->r_flags & RACK_ACKED) { 8264 /* Problably MTU discovery messing with us */ 8265 rsm->r_flags &= ~RACK_ACKED; 8266 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8267 } 8268 if (rsm->r_in_tmap) { 8269 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8270 rsm->r_in_tmap = 0; 8271 } 8272 /* Lets make sure it really is in or not the GP window */ 8273 rack_mark_in_gp_win(tp, rsm); 8274 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8275 rsm->r_in_tmap = 1; 8276 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); 8277 /* Take off the must retransmit flag, if its on */ 8278 if (rsm->r_flags & RACK_MUST_RXT) { 8279 if (rack->r_must_retran) 8280 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 8281 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 8282 /* 8283 * We have retransmitted all we need. Clear 8284 * any must retransmit flags. 8285 */ 8286 rack->r_must_retran = 0; 8287 rack->r_ctl.rc_out_at_rto = 0; 8288 } 8289 rsm->r_flags &= ~RACK_MUST_RXT; 8290 } 8291 /* Remove any collapsed flag */ 8292 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 8293 if (rsm->r_flags & RACK_SACK_PASSED) { 8294 /* We have retransmitted due to the SACK pass */ 8295 rsm->r_flags &= ~RACK_SACK_PASSED; 8296 rsm->r_flags |= RACK_WAS_SACKPASS; 8297 } 8298 } 8299 8300 static uint32_t 8301 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 8302 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag, int segsiz) 8303 { 8304 /* 8305 * We (re-)transmitted starting at rsm->r_start for some length 8306 * (possibly less than r_end. 8307 */ 8308 struct rack_sendmap *nrsm; 8309 int insret __diagused; 8310 uint32_t c_end; 8311 int32_t len; 8312 8313 len = *lenp; 8314 c_end = rsm->r_start + len; 8315 if (SEQ_GEQ(c_end, rsm->r_end)) { 8316 /* 8317 * We retransmitted the whole piece or more than the whole 8318 * slopping into the next rsm. 8319 */ 8320 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8321 if (c_end == rsm->r_end) { 8322 *lenp = 0; 8323 return (0); 8324 } else { 8325 int32_t act_len; 8326 8327 /* Hangs over the end return whats left */ 8328 act_len = rsm->r_end - rsm->r_start; 8329 *lenp = (len - act_len); 8330 return (rsm->r_end); 8331 } 8332 /* We don't get out of this block. */ 8333 } 8334 /* 8335 * Here we retransmitted less than the whole thing which means we 8336 * have to split this into what was transmitted and what was not. 8337 */ 8338 nrsm = rack_alloc_full_limit(rack); 8339 if (nrsm == NULL) { 8340 /* 8341 * We can't get memory, so lets not proceed. 8342 */ 8343 *lenp = 0; 8344 return (0); 8345 } 8346 /* 8347 * So here we are going to take the original rsm and make it what we 8348 * retransmitted. nrsm will be the tail portion we did not 8349 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 8350 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 8351 * 1, 6 and the new piece will be 6, 11. 8352 */ 8353 rack_clone_rsm(rack, nrsm, rsm, c_end); 8354 nrsm->r_dupack = 0; 8355 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8356 #ifndef INVARIANTS 8357 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8358 #else 8359 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8360 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p", 8361 nrsm, insret, rack, rsm); 8362 } 8363 #endif 8364 if (rsm->r_in_tmap) { 8365 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8366 nrsm->r_in_tmap = 1; 8367 } 8368 rsm->r_flags &= (~RACK_HAS_FIN); 8369 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz); 8370 /* Log a split of rsm into rsm and nrsm */ 8371 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8372 *lenp = 0; 8373 return (0); 8374 } 8375 8376 static void 8377 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 8378 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 8379 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, 8380 uint32_t s_moff, int hw_tls, int segsiz) 8381 { 8382 struct tcp_rack *rack; 8383 struct rack_sendmap *rsm, *nrsm; 8384 int insret __diagused; 8385 8386 register uint32_t snd_max, snd_una; 8387 8388 /* 8389 * Add to the RACK log of packets in flight or retransmitted. If 8390 * there is a TS option we will use the TS echoed, if not we will 8391 * grab a TS. 8392 * 8393 * Retransmissions will increment the count and move the ts to its 8394 * proper place. Note that if options do not include TS's then we 8395 * won't be able to effectively use the ACK for an RTT on a retran. 8396 * 8397 * Notes about r_start and r_end. Lets consider a send starting at 8398 * sequence 1 for 10 bytes. In such an example the r_start would be 8399 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 8400 * This means that r_end is actually the first sequence for the next 8401 * slot (11). 8402 * 8403 */ 8404 /* 8405 * If err is set what do we do XXXrrs? should we not add the thing? 8406 * -- i.e. return if err != 0 or should we pretend we sent it? -- 8407 * i.e. proceed with add ** do this for now. 8408 */ 8409 INP_WLOCK_ASSERT(tptoinpcb(tp)); 8410 if (err) 8411 /* 8412 * We don't log errors -- we could but snd_max does not 8413 * advance in this case either. 8414 */ 8415 return; 8416 8417 if (th_flags & TH_RST) { 8418 /* 8419 * We don't log resets and we return immediately from 8420 * sending 8421 */ 8422 return; 8423 } 8424 rack = (struct tcp_rack *)tp->t_fb_ptr; 8425 snd_una = tp->snd_una; 8426 snd_max = tp->snd_max; 8427 if (th_flags & (TH_SYN | TH_FIN)) { 8428 /* 8429 * The call to rack_log_output is made before bumping 8430 * snd_max. This means we can record one extra byte on a SYN 8431 * or FIN if seq_out is adding more on and a FIN is present 8432 * (and we are not resending). 8433 */ 8434 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 8435 len++; 8436 if (th_flags & TH_FIN) 8437 len++; 8438 if (SEQ_LT(snd_max, tp->snd_nxt)) { 8439 /* 8440 * The add/update as not been done for the FIN/SYN 8441 * yet. 8442 */ 8443 snd_max = tp->snd_nxt; 8444 } 8445 } 8446 if (SEQ_LEQ((seq_out + len), snd_una)) { 8447 /* Are sending an old segment to induce an ack (keep-alive)? */ 8448 return; 8449 } 8450 if (SEQ_LT(seq_out, snd_una)) { 8451 /* huh? should we panic? */ 8452 uint32_t end; 8453 8454 end = seq_out + len; 8455 seq_out = snd_una; 8456 if (SEQ_GEQ(end, seq_out)) 8457 len = end - seq_out; 8458 else 8459 len = 0; 8460 } 8461 if (len == 0) { 8462 /* We don't log zero window probes */ 8463 return; 8464 } 8465 if (IN_FASTRECOVERY(tp->t_flags)) { 8466 rack->r_ctl.rc_prr_out += len; 8467 } 8468 /* First question is it a retransmission or new? */ 8469 if (seq_out == snd_max) { 8470 /* Its new */ 8471 rack_chk_req_and_hybrid_on_out(rack, seq_out, len, cts); 8472 again: 8473 rsm = rack_alloc(rack); 8474 if (rsm == NULL) { 8475 /* 8476 * Hmm out of memory and the tcb got destroyed while 8477 * we tried to wait. 8478 */ 8479 return; 8480 } 8481 if (th_flags & TH_FIN) { 8482 rsm->r_flags = RACK_HAS_FIN|add_flag; 8483 } else { 8484 rsm->r_flags = add_flag; 8485 } 8486 if (hw_tls) 8487 rsm->r_hw_tls = 1; 8488 rsm->r_tim_lastsent[0] = cts; 8489 rsm->r_rtr_cnt = 1; 8490 rsm->r_rtr_bytes = 0; 8491 if (th_flags & TH_SYN) { 8492 /* The data space is one beyond snd_una */ 8493 rsm->r_flags |= RACK_HAS_SYN; 8494 } 8495 rsm->r_start = seq_out; 8496 rsm->r_end = rsm->r_start + len; 8497 rack_mark_in_gp_win(tp, rsm); 8498 rsm->r_dupack = 0; 8499 /* 8500 * save off the mbuf location that 8501 * sndmbuf_noadv returned (which is 8502 * where we started copying from).. 8503 */ 8504 rsm->m = s_mb; 8505 rsm->soff = s_moff; 8506 /* 8507 * Here we do add in the len of send, since its not yet 8508 * reflected in in snduna <->snd_max 8509 */ 8510 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 8511 rack->r_ctl.rc_sacked) + 8512 (rsm->r_end - rsm->r_start)); 8513 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 8514 if (rsm->m) { 8515 if (rsm->m->m_len <= rsm->soff) { 8516 /* 8517 * XXXrrs Question, will this happen? 8518 * 8519 * If sbsndptr is set at the correct place 8520 * then s_moff should always be somewhere 8521 * within rsm->m. But if the sbsndptr was 8522 * off then that won't be true. If it occurs 8523 * we need to walkout to the correct location. 8524 */ 8525 struct mbuf *lm; 8526 8527 lm = rsm->m; 8528 while (lm->m_len <= rsm->soff) { 8529 rsm->soff -= lm->m_len; 8530 lm = lm->m_next; 8531 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 8532 __func__, rack, s_moff, s_mb, rsm->soff)); 8533 } 8534 rsm->m = lm; 8535 } 8536 rsm->orig_m_len = rsm->m->m_len; 8537 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 8538 } else { 8539 rsm->orig_m_len = 0; 8540 rsm->orig_t_space = 0; 8541 } 8542 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); 8543 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8544 /* Log a new rsm */ 8545 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 8546 #ifndef INVARIANTS 8547 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 8548 #else 8549 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 8550 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p", 8551 nrsm, insret, rack, rsm); 8552 } 8553 #endif 8554 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8555 rsm->r_in_tmap = 1; 8556 /* 8557 * Special case detection, is there just a single 8558 * packet outstanding when we are not in recovery? 8559 * 8560 * If this is true mark it so. 8561 */ 8562 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 8563 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 8564 struct rack_sendmap *prsm; 8565 8566 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); 8567 if (prsm) 8568 prsm->r_one_out_nr = 1; 8569 } 8570 return; 8571 } 8572 /* 8573 * If we reach here its a retransmission and we need to find it. 8574 */ 8575 more: 8576 if (hintrsm && (hintrsm->r_start == seq_out)) { 8577 rsm = hintrsm; 8578 hintrsm = NULL; 8579 } else { 8580 /* No hints sorry */ 8581 rsm = NULL; 8582 } 8583 if ((rsm) && (rsm->r_start == seq_out)) { 8584 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8585 if (len == 0) { 8586 return; 8587 } else { 8588 goto more; 8589 } 8590 } 8591 /* Ok it was not the last pointer go through it the hard way. */ 8592 refind: 8593 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); 8594 if (rsm) { 8595 if (rsm->r_start == seq_out) { 8596 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz); 8597 if (len == 0) { 8598 return; 8599 } else { 8600 goto refind; 8601 } 8602 } 8603 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 8604 /* Transmitted within this piece */ 8605 /* 8606 * Ok we must split off the front and then let the 8607 * update do the rest 8608 */ 8609 nrsm = rack_alloc_full_limit(rack); 8610 if (nrsm == NULL) { 8611 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz); 8612 return; 8613 } 8614 /* 8615 * copy rsm to nrsm and then trim the front of rsm 8616 * to not include this part. 8617 */ 8618 rack_clone_rsm(rack, nrsm, rsm, seq_out); 8619 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 8620 #ifndef INVARIANTS 8621 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 8622 #else 8623 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 8624 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p", 8625 nrsm, insret, rack, rsm); 8626 } 8627 #endif 8628 if (rsm->r_in_tmap) { 8629 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8630 nrsm->r_in_tmap = 1; 8631 } 8632 rsm->r_flags &= (~RACK_HAS_FIN); 8633 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag, segsiz); 8634 if (len == 0) { 8635 return; 8636 } else if (len > 0) 8637 goto refind; 8638 } 8639 } 8640 /* 8641 * Hmm not found in map did they retransmit both old and on into the 8642 * new? 8643 */ 8644 if (seq_out == tp->snd_max) { 8645 goto again; 8646 } else if (SEQ_LT(seq_out, tp->snd_max)) { 8647 #ifdef INVARIANTS 8648 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 8649 seq_out, len, tp->snd_una, tp->snd_max); 8650 printf("Starting Dump of all rack entries\n"); 8651 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 8652 printf("rsm:%p start:%u end:%u\n", 8653 rsm, rsm->r_start, rsm->r_end); 8654 } 8655 printf("Dump complete\n"); 8656 panic("seq_out not found rack:%p tp:%p", 8657 rack, tp); 8658 #endif 8659 } else { 8660 #ifdef INVARIANTS 8661 /* 8662 * Hmm beyond sndmax? (only if we are using the new rtt-pack 8663 * flag) 8664 */ 8665 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 8666 seq_out, len, tp->snd_max, tp); 8667 #endif 8668 } 8669 } 8670 8671 /* 8672 * Record one of the RTT updates from an ack into 8673 * our sample structure. 8674 */ 8675 8676 static void 8677 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 8678 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 8679 { 8680 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8681 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 8682 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 8683 } 8684 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8685 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 8686 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 8687 } 8688 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 8689 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 8690 rack->r_ctl.rc_gp_lowrtt = us_rtt; 8691 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 8692 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 8693 } 8694 if ((confidence == 1) && 8695 ((rsm == NULL) || 8696 (rsm->r_just_ret) || 8697 (rsm->r_one_out_nr && 8698 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 8699 /* 8700 * If the rsm had a just return 8701 * hit it then we can't trust the 8702 * rtt measurement for buffer deterimination 8703 * Note that a confidence of 2, indicates 8704 * SACK'd which overrides the r_just_ret or 8705 * the r_one_out_nr. If it was a CUM-ACK and 8706 * we had only two outstanding, but get an 8707 * ack for only 1. Then that also lowers our 8708 * confidence. 8709 */ 8710 confidence = 0; 8711 } 8712 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 8713 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 8714 if (rack->r_ctl.rack_rs.confidence == 0) { 8715 /* 8716 * We take anything with no current confidence 8717 * saved. 8718 */ 8719 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8720 rack->r_ctl.rack_rs.confidence = confidence; 8721 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8722 } else if (confidence != 0) { 8723 /* 8724 * Once we have a confident number, 8725 * we can update it with a smaller 8726 * value since this confident number 8727 * may include the DSACK time until 8728 * the next segment (the second one) arrived. 8729 */ 8730 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 8731 rack->r_ctl.rack_rs.confidence = confidence; 8732 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 8733 } 8734 } 8735 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 8736 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 8737 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 8738 rack->r_ctl.rack_rs.rs_rtt_cnt++; 8739 } 8740 8741 /* 8742 * Collect new round-trip time estimate 8743 * and update averages and current timeout. 8744 */ 8745 static void 8746 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 8747 { 8748 int32_t delta; 8749 int32_t rtt; 8750 8751 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 8752 /* No valid sample */ 8753 return; 8754 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 8755 /* We are to use the lowest RTT seen in a single ack */ 8756 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 8757 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 8758 /* We are to use the highest RTT seen in a single ack */ 8759 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 8760 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 8761 /* We are to use the average RTT seen in a single ack */ 8762 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 8763 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 8764 } else { 8765 #ifdef INVARIANTS 8766 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 8767 #endif 8768 return; 8769 } 8770 if (rtt == 0) 8771 rtt = 1; 8772 if (rack->rc_gp_rtt_set == 0) { 8773 /* 8774 * With no RTT we have to accept 8775 * even one we are not confident of. 8776 */ 8777 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 8778 rack->rc_gp_rtt_set = 1; 8779 } else if (rack->r_ctl.rack_rs.confidence) { 8780 /* update the running gp srtt */ 8781 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 8782 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 8783 } 8784 if (rack->r_ctl.rack_rs.confidence) { 8785 /* 8786 * record the low and high for highly buffered path computation, 8787 * we only do this if we are confident (not a retransmission). 8788 */ 8789 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 8790 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8791 } 8792 if (rack->rc_highly_buffered == 0) { 8793 /* 8794 * Currently once we declare a path has 8795 * highly buffered there is no going 8796 * back, which may be a problem... 8797 */ 8798 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 8799 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 8800 rack->r_ctl.rc_highest_us_rtt, 8801 rack->r_ctl.rc_lowest_us_rtt, 8802 RACK_RTTS_SEEHBP); 8803 rack->rc_highly_buffered = 1; 8804 } 8805 } 8806 } 8807 if ((rack->r_ctl.rack_rs.confidence) || 8808 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 8809 /* 8810 * If we are highly confident of it <or> it was 8811 * never retransmitted we accept it as the last us_rtt. 8812 */ 8813 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8814 /* The lowest rtt can be set if its was not retransmited */ 8815 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 8816 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 8817 if (rack->r_ctl.rc_lowest_us_rtt == 0) 8818 rack->r_ctl.rc_lowest_us_rtt = 1; 8819 } 8820 } 8821 rack = (struct tcp_rack *)tp->t_fb_ptr; 8822 if (tp->t_srtt != 0) { 8823 /* 8824 * We keep a simple srtt in microseconds, like our rtt 8825 * measurement. We don't need to do any tricks with shifting 8826 * etc. Instead we just add in 1/8th of the new measurement 8827 * and subtract out 1/8 of the old srtt. We do the same with 8828 * the variance after finding the absolute value of the 8829 * difference between this sample and the current srtt. 8830 */ 8831 delta = tp->t_srtt - rtt; 8832 /* Take off 1/8th of the current sRTT */ 8833 tp->t_srtt -= (tp->t_srtt >> 3); 8834 /* Add in 1/8th of the new RTT just measured */ 8835 tp->t_srtt += (rtt >> 3); 8836 if (tp->t_srtt <= 0) 8837 tp->t_srtt = 1; 8838 /* Now lets make the absolute value of the variance */ 8839 if (delta < 0) 8840 delta = -delta; 8841 /* Subtract out 1/8th */ 8842 tp->t_rttvar -= (tp->t_rttvar >> 3); 8843 /* Add in 1/8th of the new variance we just saw */ 8844 tp->t_rttvar += (delta >> 3); 8845 if (tp->t_rttvar <= 0) 8846 tp->t_rttvar = 1; 8847 } else { 8848 /* 8849 * No rtt measurement yet - use the unsmoothed rtt. Set the 8850 * variance to half the rtt (so our first retransmit happens 8851 * at 3*rtt). 8852 */ 8853 tp->t_srtt = rtt; 8854 tp->t_rttvar = rtt >> 1; 8855 } 8856 rack->rc_srtt_measure_made = 1; 8857 KMOD_TCPSTAT_INC(tcps_rttupdated); 8858 if (tp->t_rttupdated < UCHAR_MAX) 8859 tp->t_rttupdated++; 8860 #ifdef STATS 8861 if (rack_stats_gets_ms_rtt == 0) { 8862 /* Send in the microsecond rtt used for rxt timeout purposes */ 8863 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 8864 } else if (rack_stats_gets_ms_rtt == 1) { 8865 /* Send in the millisecond rtt used for rxt timeout purposes */ 8866 int32_t ms_rtt; 8867 8868 /* Round up */ 8869 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8870 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8871 } else if (rack_stats_gets_ms_rtt == 2) { 8872 /* Send in the millisecond rtt has close to the path RTT as we can get */ 8873 int32_t ms_rtt; 8874 8875 /* Round up */ 8876 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 8877 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 8878 } else { 8879 /* Send in the microsecond rtt has close to the path RTT as we can get */ 8880 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8881 } 8882 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 8883 #endif 8884 /* 8885 * the retransmit should happen at rtt + 4 * rttvar. Because of the 8886 * way we do the smoothing, srtt and rttvar will each average +1/2 8887 * tick of bias. When we compute the retransmit timer, we want 1/2 8888 * tick of rounding and 1 extra tick because of +-1/2 tick 8889 * uncertainty in the firing of the timer. The bias will give us 8890 * exactly the 1.5 tick we need. But, because the bias is 8891 * statistical, we have to test that we don't drop below the minimum 8892 * feasible timer (which is 2 ticks). 8893 */ 8894 tp->t_rxtshift = 0; 8895 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8896 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 8897 rack_log_rtt_sample(rack, rtt); 8898 tp->t_softerror = 0; 8899 } 8900 8901 8902 static void 8903 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 8904 { 8905 /* 8906 * Apply to filter the inbound us-rtt at us_cts. 8907 */ 8908 uint32_t old_rtt; 8909 8910 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 8911 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 8912 us_rtt, us_cts); 8913 if (old_rtt > us_rtt) { 8914 /* We just hit a new lower rtt time */ 8915 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 8916 __LINE__, RACK_RTTS_NEWRTT); 8917 /* 8918 * Only count it if its lower than what we saw within our 8919 * calculated range. 8920 */ 8921 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 8922 if (rack_probertt_lower_within && 8923 rack->rc_gp_dyn_mul && 8924 (rack->use_fixed_rate == 0) && 8925 (rack->rc_always_pace)) { 8926 /* 8927 * We are seeing a new lower rtt very close 8928 * to the time that we would have entered probe-rtt. 8929 * This is probably due to the fact that a peer flow 8930 * has entered probe-rtt. Lets go in now too. 8931 */ 8932 uint32_t val; 8933 8934 val = rack_probertt_lower_within * rack_time_between_probertt; 8935 val /= 100; 8936 if ((rack->in_probe_rtt == 0) && 8937 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 8938 rack_enter_probertt(rack, us_cts); 8939 } 8940 } 8941 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 8942 } 8943 } 8944 } 8945 8946 static int 8947 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 8948 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 8949 { 8950 uint32_t us_rtt; 8951 int32_t i, all; 8952 uint32_t t, len_acked; 8953 8954 if ((rsm->r_flags & RACK_ACKED) || 8955 (rsm->r_flags & RACK_WAS_ACKED)) 8956 /* Already done */ 8957 return (0); 8958 if (rsm->r_no_rtt_allowed) { 8959 /* Not allowed */ 8960 return (0); 8961 } 8962 if (ack_type == CUM_ACKED) { 8963 if (SEQ_GT(th_ack, rsm->r_end)) { 8964 len_acked = rsm->r_end - rsm->r_start; 8965 all = 1; 8966 } else { 8967 len_acked = th_ack - rsm->r_start; 8968 all = 0; 8969 } 8970 } else { 8971 len_acked = rsm->r_end - rsm->r_start; 8972 all = 0; 8973 } 8974 if (rsm->r_rtr_cnt == 1) { 8975 8976 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8977 if ((int)t <= 0) 8978 t = 1; 8979 if (!tp->t_rttlow || tp->t_rttlow > t) 8980 tp->t_rttlow = t; 8981 if (!rack->r_ctl.rc_rack_min_rtt || 8982 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8983 rack->r_ctl.rc_rack_min_rtt = t; 8984 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8985 rack->r_ctl.rc_rack_min_rtt = 1; 8986 } 8987 } 8988 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8989 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8990 else 8991 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8992 if (us_rtt == 0) 8993 us_rtt = 1; 8994 if (CC_ALGO(tp)->rttsample != NULL) { 8995 /* Kick the RTT to the CC */ 8996 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 8997 } 8998 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8999 if (ack_type == SACKED) { 9000 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 9001 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 9002 } else { 9003 /* 9004 * We need to setup what our confidence 9005 * is in this ack. 9006 * 9007 * If the rsm was app limited and it is 9008 * less than a mss in length (the end 9009 * of the send) then we have a gap. If we 9010 * were app limited but say we were sending 9011 * multiple MSS's then we are more confident 9012 * int it. 9013 * 9014 * When we are not app-limited then we see if 9015 * the rsm is being included in the current 9016 * measurement, we tell this by the app_limited_needs_set 9017 * flag. 9018 * 9019 * Note that being cwnd blocked is not applimited 9020 * as well as the pacing delay between packets which 9021 * are sending only 1 or 2 MSS's also will show up 9022 * in the RTT. We probably need to examine this algorithm 9023 * a bit more and enhance it to account for the delay 9024 * between rsm's. We could do that by saving off the 9025 * pacing delay of each rsm (in an rsm) and then 9026 * factoring that in somehow though for now I am 9027 * not sure how :) 9028 */ 9029 int calc_conf = 0; 9030 9031 if (rsm->r_flags & RACK_APP_LIMITED) { 9032 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 9033 calc_conf = 0; 9034 else 9035 calc_conf = 1; 9036 } else if (rack->app_limited_needs_set == 0) { 9037 calc_conf = 1; 9038 } else { 9039 calc_conf = 0; 9040 } 9041 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 9042 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 9043 calc_conf, rsm, rsm->r_rtr_cnt); 9044 } 9045 if ((rsm->r_flags & RACK_TLP) && 9046 (!IN_FASTRECOVERY(tp->t_flags))) { 9047 /* Segment was a TLP and our retrans matched */ 9048 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 9049 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9050 } 9051 } 9052 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9053 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9054 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9055 /* New more recent rack_tmit_time */ 9056 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9057 if (rack->r_ctl.rc_rack_tmit_time == 0) 9058 rack->r_ctl.rc_rack_tmit_time = 1; 9059 rack->rc_rack_rtt = t; 9060 } 9061 return (1); 9062 } 9063 /* 9064 * We clear the soft/rxtshift since we got an ack. 9065 * There is no assurance we will call the commit() function 9066 * so we need to clear these to avoid incorrect handling. 9067 */ 9068 tp->t_rxtshift = 0; 9069 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9070 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 9071 tp->t_softerror = 0; 9072 if (to && (to->to_flags & TOF_TS) && 9073 (ack_type == CUM_ACKED) && 9074 (to->to_tsecr) && 9075 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 9076 /* 9077 * Now which timestamp does it match? In this block the ACK 9078 * must be coming from a previous transmission. 9079 */ 9080 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9081 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 9082 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9083 if ((int)t <= 0) 9084 t = 1; 9085 if (CC_ALGO(tp)->rttsample != NULL) { 9086 /* 9087 * Kick the RTT to the CC, here 9088 * we lie a bit in that we know the 9089 * retransmission is correct even though 9090 * we retransmitted. This is because 9091 * we match the timestamps. 9092 */ 9093 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 9094 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 9095 else 9096 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 9097 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 9098 } 9099 if ((i + 1) < rsm->r_rtr_cnt) { 9100 /* 9101 * The peer ack'd from our previous 9102 * transmission. We have a spurious 9103 * retransmission and thus we dont 9104 * want to update our rack_rtt. 9105 * 9106 * Hmm should there be a CC revert here? 9107 * 9108 */ 9109 return (0); 9110 } 9111 if (!tp->t_rttlow || tp->t_rttlow > t) 9112 tp->t_rttlow = t; 9113 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9114 rack->r_ctl.rc_rack_min_rtt = t; 9115 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9116 rack->r_ctl.rc_rack_min_rtt = 1; 9117 } 9118 } 9119 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9120 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9121 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { 9122 /* New more recent rack_tmit_time */ 9123 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 9124 if (rack->r_ctl.rc_rack_tmit_time == 0) 9125 rack->r_ctl.rc_rack_tmit_time = 1; 9126 rack->rc_rack_rtt = t; 9127 } 9128 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 9129 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 9130 rsm->r_rtr_cnt); 9131 return (1); 9132 } 9133 } 9134 /* If we are logging log out the sendmap */ 9135 if (tcp_bblogging_on(rack->rc_tp)) { 9136 for (i = 0; i < rsm->r_rtr_cnt; i++) { 9137 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); 9138 } 9139 } 9140 goto ts_not_found; 9141 } else { 9142 /* 9143 * Ok its a SACK block that we retransmitted. or a windows 9144 * machine without timestamps. We can tell nothing from the 9145 * time-stamp since its not there or the time the peer last 9146 * recieved a segment that moved forward its cum-ack point. 9147 */ 9148 ts_not_found: 9149 i = rsm->r_rtr_cnt - 1; 9150 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 9151 if ((int)t <= 0) 9152 t = 1; 9153 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9154 /* 9155 * We retransmitted and the ack came back in less 9156 * than the smallest rtt we have observed. We most 9157 * likely did an improper retransmit as outlined in 9158 * 6.2 Step 2 point 2 in the rack-draft so we 9159 * don't want to update our rack_rtt. We in 9160 * theory (in future) might want to think about reverting our 9161 * cwnd state but we won't for now. 9162 */ 9163 return (0); 9164 } else if (rack->r_ctl.rc_rack_min_rtt) { 9165 /* 9166 * We retransmitted it and the retransmit did the 9167 * job. 9168 */ 9169 if (!rack->r_ctl.rc_rack_min_rtt || 9170 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 9171 rack->r_ctl.rc_rack_min_rtt = t; 9172 if (rack->r_ctl.rc_rack_min_rtt == 0) { 9173 rack->r_ctl.rc_rack_min_rtt = 1; 9174 } 9175 } 9176 if ((rack->r_ctl.rc_rack_tmit_time == 0) || 9177 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 9178 (uint32_t)rsm->r_tim_lastsent[i]))) { 9179 /* New more recent rack_tmit_time */ 9180 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 9181 if (rack->r_ctl.rc_rack_tmit_time == 0) 9182 rack->r_ctl.rc_rack_tmit_time = 1; 9183 rack->rc_rack_rtt = t; 9184 } 9185 return (1); 9186 } 9187 } 9188 return (0); 9189 } 9190 9191 /* 9192 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 9193 */ 9194 static void 9195 rack_log_sack_passed(struct tcpcb *tp, 9196 struct tcp_rack *rack, struct rack_sendmap *rsm) 9197 { 9198 struct rack_sendmap *nrsm; 9199 9200 nrsm = rsm; 9201 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 9202 rack_head, r_tnext) { 9203 if (nrsm == rsm) { 9204 /* Skip original segment he is acked */ 9205 continue; 9206 } 9207 if (nrsm->r_flags & RACK_ACKED) { 9208 /* 9209 * Skip ack'd segments, though we 9210 * should not see these, since tmap 9211 * should not have ack'd segments. 9212 */ 9213 continue; 9214 } 9215 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 9216 /* 9217 * If the peer dropped the rwnd on 9218 * these then we don't worry about them. 9219 */ 9220 continue; 9221 } 9222 if (nrsm->r_flags & RACK_SACK_PASSED) { 9223 /* 9224 * We found one that is already marked 9225 * passed, we have been here before and 9226 * so all others below this are marked. 9227 */ 9228 break; 9229 } 9230 nrsm->r_flags |= RACK_SACK_PASSED; 9231 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 9232 } 9233 } 9234 9235 static void 9236 rack_need_set_test(struct tcpcb *tp, 9237 struct tcp_rack *rack, 9238 struct rack_sendmap *rsm, 9239 tcp_seq th_ack, 9240 int line, 9241 int use_which) 9242 { 9243 struct rack_sendmap *s_rsm; 9244 9245 if ((tp->t_flags & TF_GPUTINPROG) && 9246 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9247 /* 9248 * We were app limited, and this ack 9249 * butts up or goes beyond the point where we want 9250 * to start our next measurement. We need 9251 * to record the new gput_ts as here and 9252 * possibly update the start sequence. 9253 */ 9254 uint32_t seq, ts; 9255 9256 if (rsm->r_rtr_cnt > 1) { 9257 /* 9258 * This is a retransmit, can we 9259 * really make any assessment at this 9260 * point? We are not really sure of 9261 * the timestamp, is it this or the 9262 * previous transmission? 9263 * 9264 * Lets wait for something better that 9265 * is not retransmitted. 9266 */ 9267 return; 9268 } 9269 seq = tp->gput_seq; 9270 ts = tp->gput_ts; 9271 rack->app_limited_needs_set = 0; 9272 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9273 /* Do we start at a new end? */ 9274 if ((use_which == RACK_USE_BEG) && 9275 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 9276 /* 9277 * When we get an ACK that just eats 9278 * up some of the rsm, we set RACK_USE_BEG 9279 * since whats at r_start (i.e. th_ack) 9280 * is left unacked and thats where the 9281 * measurement now starts. 9282 */ 9283 tp->gput_seq = rsm->r_start; 9284 } 9285 if ((use_which == RACK_USE_END) && 9286 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 9287 /* 9288 * We use the end when the cumack 9289 * is moving forward and completely 9290 * deleting the rsm passed so basically 9291 * r_end holds th_ack. 9292 * 9293 * For SACK's we also want to use the end 9294 * since this piece just got sacked and 9295 * we want to target anything after that 9296 * in our measurement. 9297 */ 9298 tp->gput_seq = rsm->r_end; 9299 } 9300 if (use_which == RACK_USE_END_OR_THACK) { 9301 /* 9302 * special case for ack moving forward, 9303 * not a sack, we need to move all the 9304 * way up to where this ack cum-ack moves 9305 * to. 9306 */ 9307 if (SEQ_GT(th_ack, rsm->r_end)) 9308 tp->gput_seq = th_ack; 9309 else 9310 tp->gput_seq = rsm->r_end; 9311 } 9312 if (SEQ_LT(tp->gput_seq, tp->snd_max)) 9313 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); 9314 else 9315 s_rsm = NULL; 9316 /* 9317 * Pick up the correct send time if we can the rsm passed in 9318 * may be equal to s_rsm if the RACK_USE_BEG was set. For the other 9319 * two cases (RACK_USE_THACK or RACK_USE_END) most likely we will 9320 * find a different seq i.e. the next send up. 9321 * 9322 * If that has not been sent, s_rsm will be NULL and we must 9323 * arrange it so this function will get called again by setting 9324 * app_limited_needs_set. 9325 */ 9326 if (s_rsm) 9327 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; 9328 else { 9329 /* If we hit here we have to have *not* sent tp->gput_seq */ 9330 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; 9331 /* Set it up so we will go through here again */ 9332 rack->app_limited_needs_set = 1; 9333 } 9334 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 9335 /* 9336 * We moved beyond this guy's range, re-calculate 9337 * the new end point. 9338 */ 9339 if (rack->rc_gp_filled == 0) { 9340 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 9341 } else { 9342 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 9343 } 9344 } 9345 /* 9346 * We are moving the goal post, we may be able to clear the 9347 * measure_saw_probe_rtt flag. 9348 */ 9349 if ((rack->in_probe_rtt == 0) && 9350 (rack->measure_saw_probe_rtt) && 9351 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 9352 rack->measure_saw_probe_rtt = 0; 9353 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 9354 seq, tp->gput_seq, 9355 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9356 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9357 5, line, NULL, 0); 9358 if (rack->rc_gp_filled && 9359 ((tp->gput_ack - tp->gput_seq) < 9360 max(rc_init_window(rack), (MIN_GP_WIN * 9361 ctf_fixed_maxseg(tp))))) { 9362 uint32_t ideal_amount; 9363 9364 ideal_amount = rack_get_measure_window(tp, rack); 9365 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 9366 /* 9367 * There is no sense of continuing this measurement 9368 * because its too small to gain us anything we 9369 * trust. Skip it and that way we can start a new 9370 * measurement quicker. 9371 */ 9372 tp->t_flags &= ~TF_GPUTINPROG; 9373 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 9374 0, 0, 9375 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | 9376 (uint64_t)rack->r_ctl.rc_gp_output_ts), 9377 6, __LINE__, NULL, 0); 9378 } else { 9379 /* 9380 * Reset the window further out. 9381 */ 9382 tp->gput_ack = tp->gput_seq + ideal_amount; 9383 } 9384 } 9385 rack_tend_gp_marks(tp, rack); 9386 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); 9387 } 9388 } 9389 9390 static inline int 9391 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 9392 { 9393 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 9394 /* Behind our TLP definition or right at */ 9395 return (0); 9396 } 9397 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 9398 /* The start is beyond or right at our end of TLP definition */ 9399 return (0); 9400 } 9401 /* It has to be a sub-part of the original TLP recorded */ 9402 return (1); 9403 } 9404 9405 9406 9407 static uint32_t 9408 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 9409 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, 9410 int *no_extra, 9411 int *moved_two, uint32_t segsiz) 9412 { 9413 uint32_t start, end, changed = 0; 9414 struct rack_sendmap stack_map; 9415 struct rack_sendmap *rsm, *nrsm, *prev, *next; 9416 int insret __diagused; 9417 int32_t used_ref = 1; 9418 int moved = 0; 9419 #ifdef TCP_SAD_DETECTION 9420 int allow_segsiz; 9421 int first_time_through = 1; 9422 #endif 9423 int noextra = 0; 9424 int can_use_hookery = 0; 9425 9426 start = sack->start; 9427 end = sack->end; 9428 rsm = *prsm; 9429 9430 #ifdef TCP_SAD_DETECTION 9431 /* 9432 * There are a strange number of proxys and meddle boxes in the world 9433 * that seem to cut up segments on different boundaries. This gets us 9434 * smaller sacks that are still ok in terms of it being an attacker. 9435 * We use the base segsiz to calculate an allowable smallness but 9436 * also enforce a min on the segsiz in case it is an attacker playing 9437 * games with MSS. So basically if the sack arrives and it is 9438 * larger than a worse case 960 bytes, we don't classify the guy 9439 * as supicious. 9440 */ 9441 allow_segsiz = max(segsiz, 1200) * sad_seg_size_per; 9442 allow_segsiz /= 1000; 9443 #endif 9444 do_rest_ofb: 9445 if ((rsm == NULL) || 9446 (SEQ_LT(end, rsm->r_start)) || 9447 (SEQ_GEQ(start, rsm->r_end)) || 9448 (SEQ_LT(start, rsm->r_start))) { 9449 /* 9450 * We are not in the right spot, 9451 * find the correct spot in the tree. 9452 */ 9453 used_ref = 0; 9454 rsm = tqhash_find(rack->r_ctl.tqh, start); 9455 moved++; 9456 } 9457 if (rsm == NULL) { 9458 /* TSNH */ 9459 goto out; 9460 } 9461 #ifdef TCP_SAD_DETECTION 9462 /* Now we must check for suspicous activity */ 9463 if ((first_time_through == 1) && 9464 ((end - start) < min((rsm->r_end - rsm->r_start), allow_segsiz)) && 9465 ((rsm->r_flags & RACK_PMTU_CHG) == 0) && 9466 ((rsm->r_flags & RACK_TLP) == 0)) { 9467 /* 9468 * Its less than a full MSS or the segment being acked 9469 * this should only happen if the rsm in question had the 9470 * r_just_ret flag set <and> the end matches the end of 9471 * the rsm block. 9472 * 9473 * Note we do not look at segments that have had TLP's on 9474 * them since we can get un-reported rwnd collapses that 9475 * basically we TLP on and then we get back a sack block 9476 * that goes from the start to only a small way. 9477 * 9478 */ 9479 int loss, ok; 9480 9481 ok = 0; 9482 if (SEQ_GEQ(end, rsm->r_end)) { 9483 if (rsm->r_just_ret == 1) { 9484 /* This was at the end of a send which is ok */ 9485 ok = 1; 9486 } else { 9487 /* A bit harder was it the end of our segment */ 9488 int segs, len; 9489 9490 len = (rsm->r_end - rsm->r_start); 9491 segs = len / segsiz; 9492 segs *= segsiz; 9493 if ((segs + (rsm->r_end - start)) == len) { 9494 /* 9495 * So this last bit was the 9496 * end of our send if we cut it 9497 * up into segsiz pieces so its ok. 9498 */ 9499 ok = 1; 9500 } 9501 } 9502 } 9503 if (ok == 0) { 9504 /* 9505 * This guy is doing something suspicious 9506 * lets start detection. 9507 */ 9508 if (rack->rc_suspicious == 0) { 9509 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_SUSPECT); 9510 counter_u64_add(rack_sack_attacks_suspect, 1); 9511 rack->rc_suspicious = 1; 9512 rack_log_sad(rack, 4); 9513 if (tcp_bblogging_on(rack->rc_tp)) { 9514 union tcp_log_stackspecific log; 9515 struct timeval tv; 9516 9517 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 9518 log.u_bbr.flex1 = end; 9519 log.u_bbr.flex2 = start; 9520 log.u_bbr.flex3 = rsm->r_end; 9521 log.u_bbr.flex4 = rsm->r_start; 9522 log.u_bbr.flex5 = segsiz; 9523 log.u_bbr.flex6 = rsm->r_fas; 9524 log.u_bbr.flex7 = rsm->r_bas; 9525 log.u_bbr.flex8 = 5; 9526 log.u_bbr.pkts_out = rsm->r_flags; 9527 log.u_bbr.bbr_state = rack->rc_suspicious; 9528 log.u_bbr.bbr_substate = rsm->r_just_ret; 9529 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 9530 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9531 TCP_LOG_EVENTP(rack->rc_tp, NULL, 9532 &rack->rc_inp->inp_socket->so_rcv, 9533 &rack->rc_inp->inp_socket->so_snd, 9534 TCP_SAD_DETECTION, 0, 9535 0, &log, false, &tv); 9536 } 9537 } 9538 /* You loose some ack count every time you sack 9539 * a small bit that is not butting to the end of 9540 * what we have sent. This is because we never 9541 * send small bits unless its the end of the sb. 9542 * Anyone sending a sack that is not at the end 9543 * is thus very very suspicious. 9544 */ 9545 loss = (segsiz/2) / (end - start); 9546 if (loss < rack->r_ctl.ack_count) 9547 rack->r_ctl.ack_count -= loss; 9548 else 9549 rack->r_ctl.ack_count = 0; 9550 } 9551 } 9552 first_time_through = 0; 9553 #endif 9554 /* Ok we have an ACK for some piece of this rsm */ 9555 if (rsm->r_start != start) { 9556 if ((rsm->r_flags & RACK_ACKED) == 0) { 9557 /* 9558 * Before any splitting or hookery is 9559 * done is it a TLP of interest i.e. rxt? 9560 */ 9561 if ((rsm->r_flags & RACK_TLP) && 9562 (rsm->r_rtr_cnt > 1)) { 9563 /* 9564 * We are splitting a rxt TLP, check 9565 * if we need to save off the start/end 9566 */ 9567 if (rack->rc_last_tlp_acked_set && 9568 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9569 /* 9570 * We already turned this on since we are inside 9571 * the previous one was a partially sack now we 9572 * are getting another one (maybe all of it). 9573 * 9574 */ 9575 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9576 /* 9577 * Lets make sure we have all of it though. 9578 */ 9579 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9580 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9581 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9582 rack->r_ctl.last_tlp_acked_end); 9583 } 9584 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9585 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9586 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9587 rack->r_ctl.last_tlp_acked_end); 9588 } 9589 } else { 9590 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9591 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9592 rack->rc_last_tlp_past_cumack = 0; 9593 rack->rc_last_tlp_acked_set = 1; 9594 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9595 } 9596 } 9597 /** 9598 * Need to split this in two pieces the before and after, 9599 * the before remains in the map, the after must be 9600 * added. In other words we have: 9601 * rsm |--------------| 9602 * sackblk |-------> 9603 * rsm will become 9604 * rsm |---| 9605 * and nrsm will be the sacked piece 9606 * nrsm |----------| 9607 * 9608 * But before we start down that path lets 9609 * see if the sack spans over on top of 9610 * the next guy and it is already sacked. 9611 * 9612 */ 9613 /* 9614 * Hookery can only be used if the two entries 9615 * are in the same bucket and neither one of 9616 * them staddle the bucket line. 9617 */ 9618 next = tqhash_next(rack->r_ctl.tqh, rsm); 9619 if (next && 9620 (rsm->bindex == next->bindex) && 9621 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9622 ((next->r_flags & RACK_STRADDLE) == 0) && 9623 (rsm->r_flags & RACK_IN_GP_WIN) && 9624 (next->r_flags & RACK_IN_GP_WIN)) 9625 can_use_hookery = 1; 9626 else if (next && 9627 (rsm->bindex == next->bindex) && 9628 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9629 ((next->r_flags & RACK_STRADDLE) == 0) && 9630 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) && 9631 ((next->r_flags & RACK_IN_GP_WIN) == 0)) 9632 can_use_hookery = 1; 9633 else 9634 can_use_hookery = 0; 9635 if (next && can_use_hookery && 9636 (next->r_flags & RACK_ACKED) && 9637 SEQ_GEQ(end, next->r_start)) { 9638 /** 9639 * So the next one is already acked, and 9640 * we can thus by hookery use our stack_map 9641 * to reflect the piece being sacked and 9642 * then adjust the two tree entries moving 9643 * the start and ends around. So we start like: 9644 * rsm |------------| (not-acked) 9645 * next |-----------| (acked) 9646 * sackblk |--------> 9647 * We want to end like so: 9648 * rsm |------| (not-acked) 9649 * next |-----------------| (acked) 9650 * nrsm |-----| 9651 * Where nrsm is a temporary stack piece we 9652 * use to update all the gizmos. 9653 */ 9654 /* Copy up our fudge block */ 9655 noextra++; 9656 nrsm = &stack_map; 9657 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 9658 /* Now adjust our tree blocks */ 9659 rsm->r_end = start; 9660 next->r_start = start; 9661 rsm->r_flags |= RACK_SHUFFLED; 9662 next->r_flags |= RACK_SHUFFLED; 9663 /* Now we must adjust back where next->m is */ 9664 rack_setup_offset_for_rsm(rack, rsm, next); 9665 /* 9666 * Which timestamp do we keep? It is rather 9667 * important in GP measurements to have the 9668 * accurate end of the send window. 9669 * 9670 * We keep the largest value, which is the newest 9671 * send. We do this in case a segment that is 9672 * joined together and not part of a GP estimate 9673 * later gets expanded into the GP estimate. 9674 * 9675 * We prohibit the merging of unlike kinds i.e. 9676 * all pieces that are in the GP estimate can be 9677 * merged and all pieces that are not in a GP estimate 9678 * can be merged, but not disimilar pieces. Combine 9679 * this with taking the highest here and we should 9680 * be ok unless of course the client reneges. Then 9681 * all bets are off. 9682 */ 9683 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < 9684 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) 9685 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; 9686 /* 9687 * And we must keep the newest ack arrival time. 9688 */ 9689 if (next->r_ack_arrival < 9690 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 9691 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9692 9693 9694 /* We don't need to adjust rsm, it did not change */ 9695 /* Clear out the dup ack count of the remainder */ 9696 rsm->r_dupack = 0; 9697 rsm->r_just_ret = 0; 9698 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9699 /* Now lets make sure our fudge block is right */ 9700 nrsm->r_start = start; 9701 /* Now lets update all the stats and such */ 9702 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 9703 if (rack->app_limited_needs_set) 9704 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 9705 changed += (nrsm->r_end - nrsm->r_start); 9706 /* You get a count for acking a whole segment or more */ 9707 if ((nrsm->r_end - nrsm->r_start) >= segsiz) 9708 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz); 9709 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 9710 if (nrsm->r_flags & RACK_SACK_PASSED) { 9711 rack->r_ctl.rc_reorder_ts = cts; 9712 if (rack->r_ctl.rc_reorder_ts == 0) 9713 rack->r_ctl.rc_reorder_ts = 1; 9714 } 9715 /* 9716 * Now we want to go up from rsm (the 9717 * one left un-acked) to the next one 9718 * in the tmap. We do this so when 9719 * we walk backwards we include marking 9720 * sack-passed on rsm (The one passed in 9721 * is skipped since it is generally called 9722 * on something sacked before removing it 9723 * from the tmap). 9724 */ 9725 if (rsm->r_in_tmap) { 9726 nrsm = TAILQ_NEXT(rsm, r_tnext); 9727 /* 9728 * Now that we have the next 9729 * one walk backwards from there. 9730 */ 9731 if (nrsm && nrsm->r_in_tmap) 9732 rack_log_sack_passed(tp, rack, nrsm); 9733 } 9734 /* Now are we done? */ 9735 if (SEQ_LT(end, next->r_end) || 9736 (end == next->r_end)) { 9737 /* Done with block */ 9738 goto out; 9739 } 9740 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 9741 counter_u64_add(rack_sack_used_next_merge, 1); 9742 /* Postion for the next block */ 9743 start = next->r_end; 9744 rsm = tqhash_next(rack->r_ctl.tqh, next); 9745 if (rsm == NULL) 9746 goto out; 9747 } else { 9748 /** 9749 * We can't use any hookery here, so we 9750 * need to split the map. We enter like 9751 * so: 9752 * rsm |--------| 9753 * sackblk |-----> 9754 * We will add the new block nrsm and 9755 * that will be the new portion, and then 9756 * fall through after reseting rsm. So we 9757 * split and look like this: 9758 * rsm |----| 9759 * sackblk |-----> 9760 * nrsm |---| 9761 * We then fall through reseting 9762 * rsm to nrsm, so the next block 9763 * picks it up. 9764 */ 9765 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9766 if (nrsm == NULL) { 9767 /* 9768 * failed XXXrrs what can we do but loose the sack 9769 * info? 9770 */ 9771 goto out; 9772 } 9773 counter_u64_add(rack_sack_splits, 1); 9774 rack_clone_rsm(rack, nrsm, rsm, start); 9775 moved++; 9776 rsm->r_just_ret = 0; 9777 #ifndef INVARIANTS 9778 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 9779 #else 9780 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 9781 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p", 9782 nrsm, insret, rack, rsm); 9783 } 9784 #endif 9785 if (rsm->r_in_tmap) { 9786 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9787 nrsm->r_in_tmap = 1; 9788 } 9789 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 9790 rsm->r_flags &= (~RACK_HAS_FIN); 9791 /* Position us to point to the new nrsm that starts the sack blk */ 9792 rsm = nrsm; 9793 } 9794 } else { 9795 /* Already sacked this piece */ 9796 counter_u64_add(rack_sack_skipped_acked, 1); 9797 moved++; 9798 if (end == rsm->r_end) { 9799 /* Done with block */ 9800 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9801 goto out; 9802 } else if (SEQ_LT(end, rsm->r_end)) { 9803 /* A partial sack to a already sacked block */ 9804 moved++; 9805 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9806 goto out; 9807 } else { 9808 /* 9809 * The end goes beyond this guy 9810 * reposition the start to the 9811 * next block. 9812 */ 9813 start = rsm->r_end; 9814 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 9815 if (rsm == NULL) 9816 goto out; 9817 } 9818 } 9819 } 9820 if (SEQ_GEQ(end, rsm->r_end)) { 9821 /** 9822 * The end of this block is either beyond this guy or right 9823 * at this guy. I.e.: 9824 * rsm --- |-----| 9825 * end |-----| 9826 * <or> 9827 * end |---------| 9828 */ 9829 if ((rsm->r_flags & RACK_ACKED) == 0) { 9830 /* 9831 * Is it a TLP of interest? 9832 */ 9833 if ((rsm->r_flags & RACK_TLP) && 9834 (rsm->r_rtr_cnt > 1)) { 9835 /* 9836 * We are splitting a rxt TLP, check 9837 * if we need to save off the start/end 9838 */ 9839 if (rack->rc_last_tlp_acked_set && 9840 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9841 /* 9842 * We already turned this on since we are inside 9843 * the previous one was a partially sack now we 9844 * are getting another one (maybe all of it). 9845 */ 9846 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9847 /* 9848 * Lets make sure we have all of it though. 9849 */ 9850 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9851 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9852 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9853 rack->r_ctl.last_tlp_acked_end); 9854 } 9855 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9856 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9857 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9858 rack->r_ctl.last_tlp_acked_end); 9859 } 9860 } else { 9861 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9862 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9863 rack->rc_last_tlp_past_cumack = 0; 9864 rack->rc_last_tlp_acked_set = 1; 9865 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9866 } 9867 } 9868 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 9869 changed += (rsm->r_end - rsm->r_start); 9870 /* You get a count for acking a whole segment or more */ 9871 if ((rsm->r_end - rsm->r_start) >= segsiz) 9872 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz); 9873 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 9874 if (rsm->r_in_tmap) /* should be true */ 9875 rack_log_sack_passed(tp, rack, rsm); 9876 /* Is Reordering occuring? */ 9877 if (rsm->r_flags & RACK_SACK_PASSED) { 9878 rsm->r_flags &= ~RACK_SACK_PASSED; 9879 rack->r_ctl.rc_reorder_ts = cts; 9880 if (rack->r_ctl.rc_reorder_ts == 0) 9881 rack->r_ctl.rc_reorder_ts = 1; 9882 } 9883 if (rack->app_limited_needs_set) 9884 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 9885 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9886 rsm->r_flags |= RACK_ACKED; 9887 if (rsm->r_in_tmap) { 9888 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9889 rsm->r_in_tmap = 0; 9890 } 9891 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 9892 } else { 9893 counter_u64_add(rack_sack_skipped_acked, 1); 9894 moved++; 9895 } 9896 if (end == rsm->r_end) { 9897 /* This block only - done, setup for next */ 9898 goto out; 9899 } 9900 /* 9901 * There is more not coverend by this rsm move on 9902 * to the next block in the RB tree. 9903 */ 9904 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); 9905 start = rsm->r_end; 9906 rsm = nrsm; 9907 if (rsm == NULL) 9908 goto out; 9909 goto do_rest_ofb; 9910 } 9911 /** 9912 * The end of this sack block is smaller than 9913 * our rsm i.e.: 9914 * rsm --- |-----| 9915 * end |--| 9916 */ 9917 if ((rsm->r_flags & RACK_ACKED) == 0) { 9918 /* 9919 * Is it a TLP of interest? 9920 */ 9921 if ((rsm->r_flags & RACK_TLP) && 9922 (rsm->r_rtr_cnt > 1)) { 9923 /* 9924 * We are splitting a rxt TLP, check 9925 * if we need to save off the start/end 9926 */ 9927 if (rack->rc_last_tlp_acked_set && 9928 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9929 /* 9930 * We already turned this on since we are inside 9931 * the previous one was a partially sack now we 9932 * are getting another one (maybe all of it). 9933 */ 9934 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9935 /* 9936 * Lets make sure we have all of it though. 9937 */ 9938 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9939 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9940 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9941 rack->r_ctl.last_tlp_acked_end); 9942 } 9943 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9944 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9945 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9946 rack->r_ctl.last_tlp_acked_end); 9947 } 9948 } else { 9949 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9950 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9951 rack->rc_last_tlp_past_cumack = 0; 9952 rack->rc_last_tlp_acked_set = 1; 9953 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9954 } 9955 } 9956 /* 9957 * Hookery can only be used if the two entries 9958 * are in the same bucket and neither one of 9959 * them staddle the bucket line. 9960 */ 9961 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 9962 if (prev && 9963 (rsm->bindex == prev->bindex) && 9964 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9965 ((prev->r_flags & RACK_STRADDLE) == 0) && 9966 (rsm->r_flags & RACK_IN_GP_WIN) && 9967 (prev->r_flags & RACK_IN_GP_WIN)) 9968 can_use_hookery = 1; 9969 else if (prev && 9970 (rsm->bindex == prev->bindex) && 9971 ((rsm->r_flags & RACK_STRADDLE) == 0) && 9972 ((prev->r_flags & RACK_STRADDLE) == 0) && 9973 ((rsm->r_flags & RACK_IN_GP_WIN) == 0) && 9974 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) 9975 can_use_hookery = 1; 9976 else 9977 can_use_hookery = 0; 9978 9979 if (prev && can_use_hookery && 9980 (prev->r_flags & RACK_ACKED)) { 9981 /** 9982 * Goal, we want the right remainder of rsm to shrink 9983 * in place and span from (rsm->r_start = end) to rsm->r_end. 9984 * We want to expand prev to go all the way 9985 * to prev->r_end <- end. 9986 * so in the tree we have before: 9987 * prev |--------| (acked) 9988 * rsm |-------| (non-acked) 9989 * sackblk |-| 9990 * We churn it so we end up with 9991 * prev |----------| (acked) 9992 * rsm |-----| (non-acked) 9993 * nrsm |-| (temporary) 9994 * 9995 * Note if either prev/rsm is a TLP we don't 9996 * do this. 9997 */ 9998 noextra++; 9999 nrsm = &stack_map; 10000 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 10001 prev->r_end = end; 10002 rsm->r_start = end; 10003 rsm->r_flags |= RACK_SHUFFLED; 10004 prev->r_flags |= RACK_SHUFFLED; 10005 /* Now adjust nrsm (stack copy) to be 10006 * the one that is the small 10007 * piece that was "sacked". 10008 */ 10009 nrsm->r_end = end; 10010 rsm->r_dupack = 0; 10011 /* 10012 * Which timestamp do we keep? It is rather 10013 * important in GP measurements to have the 10014 * accurate end of the send window. 10015 * 10016 * We keep the largest value, which is the newest 10017 * send. We do this in case a segment that is 10018 * joined together and not part of a GP estimate 10019 * later gets expanded into the GP estimate. 10020 * 10021 * We prohibit the merging of unlike kinds i.e. 10022 * all pieces that are in the GP estimate can be 10023 * merged and all pieces that are not in a GP estimate 10024 * can be merged, but not disimilar pieces. Combine 10025 * this with taking the highest here and we should 10026 * be ok unless of course the client reneges. Then 10027 * all bets are off. 10028 */ 10029 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < 10030 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { 10031 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 10032 } 10033 /* 10034 * And we must keep the newest ack arrival time. 10035 */ 10036 10037 if(prev->r_ack_arrival < 10038 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) 10039 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10040 10041 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10042 /* 10043 * Now that the rsm has had its start moved forward 10044 * lets go ahead and get its new place in the world. 10045 */ 10046 rack_setup_offset_for_rsm(rack, prev, rsm); 10047 /* 10048 * Now nrsm is our new little piece 10049 * that is acked (which was merged 10050 * to prev). Update the rtt and changed 10051 * based on that. Also check for reordering. 10052 */ 10053 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 10054 if (rack->app_limited_needs_set) 10055 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 10056 changed += (nrsm->r_end - nrsm->r_start); 10057 /* You get a count for acking a whole segment or more */ 10058 if ((nrsm->r_end - nrsm->r_start) >= segsiz) 10059 rack->r_ctl.ack_count += ((nrsm->r_end - nrsm->r_start) / segsiz); 10060 10061 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 10062 if (nrsm->r_flags & RACK_SACK_PASSED) { 10063 rack->r_ctl.rc_reorder_ts = cts; 10064 if (rack->r_ctl.rc_reorder_ts == 0) 10065 rack->r_ctl.rc_reorder_ts = 1; 10066 } 10067 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 10068 rsm = prev; 10069 counter_u64_add(rack_sack_used_prev_merge, 1); 10070 } else { 10071 /** 10072 * This is the case where our previous 10073 * block is not acked either, so we must 10074 * split the block in two. 10075 */ 10076 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10077 if (nrsm == NULL) { 10078 /* failed rrs what can we do but loose the sack info? */ 10079 goto out; 10080 } 10081 if ((rsm->r_flags & RACK_TLP) && 10082 (rsm->r_rtr_cnt > 1)) { 10083 /* 10084 * We are splitting a rxt TLP, check 10085 * if we need to save off the start/end 10086 */ 10087 if (rack->rc_last_tlp_acked_set && 10088 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10089 /* 10090 * We already turned this on since this block is inside 10091 * the previous one was a partially sack now we 10092 * are getting another one (maybe all of it). 10093 */ 10094 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10095 /* 10096 * Lets make sure we have all of it though. 10097 */ 10098 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10099 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10100 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10101 rack->r_ctl.last_tlp_acked_end); 10102 } 10103 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10104 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10105 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10106 rack->r_ctl.last_tlp_acked_end); 10107 } 10108 } else { 10109 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10110 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10111 rack->rc_last_tlp_acked_set = 1; 10112 rack->rc_last_tlp_past_cumack = 0; 10113 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10114 } 10115 } 10116 /** 10117 * In this case nrsm becomes 10118 * nrsm->r_start = end; 10119 * nrsm->r_end = rsm->r_end; 10120 * which is un-acked. 10121 * <and> 10122 * rsm->r_end = nrsm->r_start; 10123 * i.e. the remaining un-acked 10124 * piece is left on the left 10125 * hand side. 10126 * 10127 * So we start like this 10128 * rsm |----------| (not acked) 10129 * sackblk |---| 10130 * build it so we have 10131 * rsm |---| (acked) 10132 * nrsm |------| (not acked) 10133 */ 10134 counter_u64_add(rack_sack_splits, 1); 10135 rack_clone_rsm(rack, nrsm, rsm, end); 10136 moved++; 10137 rsm->r_flags &= (~RACK_HAS_FIN); 10138 rsm->r_just_ret = 0; 10139 #ifndef INVARIANTS 10140 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 10141 #else 10142 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 10143 panic("Insert in rb tree of %p fails ret:% rack:%p rsm:%p", 10144 nrsm, insret, rack, rsm); 10145 } 10146 #endif 10147 if (rsm->r_in_tmap) { 10148 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10149 nrsm->r_in_tmap = 1; 10150 } 10151 nrsm->r_dupack = 0; 10152 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 10153 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 10154 changed += (rsm->r_end - rsm->r_start); 10155 /* You get a count for acking a whole segment or more */ 10156 if ((rsm->r_end - rsm->r_start) >= segsiz) 10157 rack->r_ctl.ack_count += ((rsm->r_end - rsm->r_start) / segsiz); 10158 10159 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 10160 if (rsm->r_in_tmap) /* should be true */ 10161 rack_log_sack_passed(tp, rack, rsm); 10162 /* Is Reordering occuring? */ 10163 if (rsm->r_flags & RACK_SACK_PASSED) { 10164 rsm->r_flags &= ~RACK_SACK_PASSED; 10165 rack->r_ctl.rc_reorder_ts = cts; 10166 if (rack->r_ctl.rc_reorder_ts == 0) 10167 rack->r_ctl.rc_reorder_ts = 1; 10168 } 10169 if (rack->app_limited_needs_set) 10170 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 10171 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10172 rsm->r_flags |= RACK_ACKED; 10173 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 10174 if (rsm->r_in_tmap) { 10175 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10176 rsm->r_in_tmap = 0; 10177 } 10178 } 10179 } else if (start != end){ 10180 /* 10181 * The block was already acked. 10182 */ 10183 counter_u64_add(rack_sack_skipped_acked, 1); 10184 moved++; 10185 } 10186 out: 10187 if (rsm && 10188 ((rsm->r_flags & RACK_TLP) == 0) && 10189 (rsm->r_flags & RACK_ACKED)) { 10190 /* 10191 * Now can we merge where we worked 10192 * with either the previous or 10193 * next block? 10194 */ 10195 next = tqhash_next(rack->r_ctl.tqh, rsm); 10196 while (next) { 10197 if (next->r_flags & RACK_TLP) 10198 break; 10199 /* Only allow merges between ones in or out of GP window */ 10200 if ((next->r_flags & RACK_IN_GP_WIN) && 10201 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10202 break; 10203 } 10204 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10205 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { 10206 break; 10207 } 10208 if (rsm->bindex != next->bindex) 10209 break; 10210 if (rsm->r_flags & RACK_STRADDLE) 10211 break; 10212 if (next->r_flags & RACK_STRADDLE) 10213 break; 10214 if (next->r_flags & RACK_ACKED) { 10215 /* yep this and next can be merged */ 10216 rsm = rack_merge_rsm(rack, rsm, next); 10217 noextra++; 10218 next = tqhash_next(rack->r_ctl.tqh, rsm); 10219 } else 10220 break; 10221 } 10222 /* Now what about the previous? */ 10223 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10224 while (prev) { 10225 if (prev->r_flags & RACK_TLP) 10226 break; 10227 /* Only allow merges between ones in or out of GP window */ 10228 if ((prev->r_flags & RACK_IN_GP_WIN) && 10229 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { 10230 break; 10231 } 10232 if ((rsm->r_flags & RACK_IN_GP_WIN) && 10233 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { 10234 break; 10235 } 10236 if (rsm->bindex != prev->bindex) 10237 break; 10238 if (rsm->r_flags & RACK_STRADDLE) 10239 break; 10240 if (prev->r_flags & RACK_STRADDLE) 10241 break; 10242 if (prev->r_flags & RACK_ACKED) { 10243 /* yep the previous and this can be merged */ 10244 rsm = rack_merge_rsm(rack, prev, rsm); 10245 noextra++; 10246 prev = tqhash_prev(rack->r_ctl.tqh, rsm); 10247 } else 10248 break; 10249 } 10250 } 10251 if (used_ref == 0) { 10252 counter_u64_add(rack_sack_proc_all, 1); 10253 } else { 10254 counter_u64_add(rack_sack_proc_short, 1); 10255 } 10256 /* Save off the next one for quick reference. */ 10257 nrsm = tqhash_find(rack->r_ctl.tqh, end); 10258 *prsm = rack->r_ctl.rc_sacklast = nrsm; 10259 /* Pass back the moved. */ 10260 *moved_two = moved; 10261 *no_extra = noextra; 10262 return (changed); 10263 } 10264 10265 static void inline 10266 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 10267 { 10268 struct rack_sendmap *tmap; 10269 10270 tmap = NULL; 10271 while (rsm && (rsm->r_flags & RACK_ACKED)) { 10272 /* Its no longer sacked, mark it so */ 10273 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10274 #ifdef INVARIANTS 10275 if (rsm->r_in_tmap) { 10276 panic("rack:%p rsm:%p flags:0x%x in tmap?", 10277 rack, rsm, rsm->r_flags); 10278 } 10279 #endif 10280 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 10281 /* Rebuild it into our tmap */ 10282 if (tmap == NULL) { 10283 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10284 tmap = rsm; 10285 } else { 10286 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 10287 tmap = rsm; 10288 } 10289 tmap->r_in_tmap = 1; 10290 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 10291 } 10292 /* 10293 * Now lets possibly clear the sack filter so we start 10294 * recognizing sacks that cover this area. 10295 */ 10296 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 10297 10298 } 10299 10300 static void 10301 rack_do_decay(struct tcp_rack *rack) 10302 { 10303 struct timeval res; 10304 10305 #define timersub(tvp, uvp, vvp) \ 10306 do { \ 10307 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 10308 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 10309 if ((vvp)->tv_usec < 0) { \ 10310 (vvp)->tv_sec--; \ 10311 (vvp)->tv_usec += 1000000; \ 10312 } \ 10313 } while (0) 10314 10315 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 10316 #undef timersub 10317 10318 rack->r_ctl.input_pkt++; 10319 if ((rack->rc_in_persist) || 10320 (res.tv_sec >= 1) || 10321 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 10322 /* 10323 * Check for decay of non-SAD, 10324 * we want all SAD detection metrics to 10325 * decay 1/4 per second (or more) passed. 10326 * Current default is 800 so it decays 10327 * 80% every second. 10328 */ 10329 #ifdef TCP_SAD_DETECTION 10330 uint32_t pkt_delta; 10331 10332 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 10333 #endif 10334 /* Update our saved tracking values */ 10335 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 10336 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 10337 /* Now do we escape without decay? */ 10338 #ifdef TCP_SAD_DETECTION 10339 if (rack->rc_in_persist || 10340 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 10341 (pkt_delta < tcp_sad_low_pps)){ 10342 /* 10343 * We don't decay idle connections 10344 * or ones that have a low input pps. 10345 */ 10346 return; 10347 } 10348 /* Decay the counters */ 10349 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 10350 tcp_sad_decay_val); 10351 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 10352 tcp_sad_decay_val); 10353 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 10354 tcp_sad_decay_val); 10355 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 10356 tcp_sad_decay_val); 10357 #endif 10358 } 10359 } 10360 10361 static void inline 10362 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from) 10363 { 10364 /* 10365 * We look at advancing the end send time for our GP 10366 * measurement tracking only as the cumulative acknowledgment 10367 * moves forward. You might wonder about this, why not 10368 * at every transmission or retransmission within the 10369 * GP window update the rc_gp_cumack_ts? Well its rather 10370 * nuanced but basically the GP window *may* expand (as 10371 * it does below) or worse and harder to track it may shrink. 10372 * 10373 * This last makes it impossible to track at the time of 10374 * the send, since you may set forward your rc_gp_cumack_ts 10375 * when you send, because that send *is* in your currently 10376 * "guessed" window, but then it shrinks. Now which was 10377 * the send time of the last bytes in the window, by the 10378 * time you ask that question that part of the sendmap 10379 * is freed. So you don't know and you will have too 10380 * long of send window. Instead by updating the time 10381 * marker only when the cumack advances this assures us 10382 * that we will have only the sends in the window of our 10383 * GP measurement. 10384 * 10385 * Another complication from this is the 10386 * merging of sendmap entries. During SACK processing this 10387 * can happen to conserve the sendmap size. That breaks 10388 * everything down in tracking the send window of the GP 10389 * estimate. So to prevent that and keep it working with 10390 * a tiny bit more limited merging, we only allow like 10391 * types to be merged. I.e. if two sends are in the GP window 10392 * then its ok to merge them together. If two sends are not 10393 * in the GP window its ok to merge them together too. Though 10394 * one send in and one send out cannot be merged. We combine 10395 * this with never allowing the shrinking of the GP window when 10396 * we are in recovery so that we can properly calculate the 10397 * sending times. 10398 * 10399 * This all of course seems complicated, because it is.. :) 10400 * 10401 * The cum-ack is being advanced upon the sendmap. 10402 * If we are not doing a GP estimate don't 10403 * proceed. 10404 */ 10405 uint64_t ts; 10406 10407 if ((tp->t_flags & TF_GPUTINPROG) == 0) 10408 return; 10409 /* 10410 * If this sendmap entry is going 10411 * beyond the measurement window we had picked, 10412 * expand the measurement window by that much. 10413 */ 10414 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { 10415 tp->gput_ack = rsm->r_end; 10416 } 10417 /* 10418 * If we have not setup a ack, then we 10419 * have no idea if the newly acked pieces 10420 * will be "in our seq measurement range". If 10421 * it is when we clear the app_limited_needs_set 10422 * flag the timestamp will be updated. 10423 */ 10424 if (rack->app_limited_needs_set) 10425 return; 10426 /* 10427 * Finally, we grab out the latest timestamp 10428 * that this packet was sent and then see 10429 * if: 10430 * a) The packet touches are newly defined GP range. 10431 * b) The time is greater than (newer) than the 10432 * one we currently have. If so we update 10433 * our sending end time window. 10434 * 10435 * Note we *do not* do this at send time. The reason 10436 * is that if you do you *may* pick up a newer timestamp 10437 * for a range you are not going to measure. We project 10438 * out how far and then sometimes modify that to be 10439 * smaller. If that occurs then you will have a send 10440 * that does not belong to the range included. 10441 */ 10442 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= 10443 rack->r_ctl.rc_gp_cumack_ts) 10444 return; 10445 if (rack_in_gp_window(tp, rsm)) { 10446 rack->r_ctl.rc_gp_cumack_ts = ts; 10447 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, 10448 __LINE__, from, rsm); 10449 } 10450 } 10451 10452 static void 10453 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to, uint64_t acktime) 10454 { 10455 struct rack_sendmap *rsm; 10456 /* 10457 * The ACK point is advancing to th_ack, we must drop off 10458 * the packets in the rack log and calculate any eligble 10459 * RTT's. 10460 */ 10461 10462 rack->r_wanted_output = 1; 10463 if (SEQ_GT(th_ack, tp->snd_una)) 10464 rack->r_ctl.last_cumack_advance = acktime; 10465 10466 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 10467 if ((rack->rc_last_tlp_acked_set == 1)&& 10468 (rack->rc_last_tlp_past_cumack == 1) && 10469 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 10470 /* 10471 * We have reached the point where our last rack 10472 * tlp retransmit sequence is ahead of the cum-ack. 10473 * This can only happen when the cum-ack moves all 10474 * the way around (its been a full 2^^31+1 bytes 10475 * or more since we sent a retransmitted TLP). Lets 10476 * turn off the valid flag since its not really valid. 10477 * 10478 * Note since sack's also turn on this event we have 10479 * a complication, we have to wait to age it out until 10480 * the cum-ack is by the TLP before checking which is 10481 * what the next else clause does. 10482 */ 10483 rack_log_dsack_event(rack, 9, __LINE__, 10484 rack->r_ctl.last_tlp_acked_start, 10485 rack->r_ctl.last_tlp_acked_end); 10486 rack->rc_last_tlp_acked_set = 0; 10487 rack->rc_last_tlp_past_cumack = 0; 10488 } else if ((rack->rc_last_tlp_acked_set == 1) && 10489 (rack->rc_last_tlp_past_cumack == 0) && 10490 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 10491 /* 10492 * It is safe to start aging TLP's out. 10493 */ 10494 rack->rc_last_tlp_past_cumack = 1; 10495 } 10496 /* We do the same for the tlp send seq as well */ 10497 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10498 (rack->rc_last_sent_tlp_past_cumack == 1) && 10499 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 10500 rack_log_dsack_event(rack, 9, __LINE__, 10501 rack->r_ctl.last_sent_tlp_seq, 10502 (rack->r_ctl.last_sent_tlp_seq + 10503 rack->r_ctl.last_sent_tlp_len)); 10504 rack->rc_last_sent_tlp_seq_valid = 0; 10505 rack->rc_last_sent_tlp_past_cumack = 0; 10506 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 10507 (rack->rc_last_sent_tlp_past_cumack == 0) && 10508 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 10509 /* 10510 * It is safe to start aging TLP's send. 10511 */ 10512 rack->rc_last_sent_tlp_past_cumack = 1; 10513 } 10514 more: 10515 rsm = tqhash_min(rack->r_ctl.tqh); 10516 if (rsm == NULL) { 10517 if ((th_ack - 1) == tp->iss) { 10518 /* 10519 * For the SYN incoming case we will not 10520 * have called tcp_output for the sending of 10521 * the SYN, so there will be no map. All 10522 * other cases should probably be a panic. 10523 */ 10524 return; 10525 } 10526 if (tp->t_flags & TF_SENTFIN) { 10527 /* if we sent a FIN we often will not have map */ 10528 return; 10529 } 10530 #ifdef INVARIANTS 10531 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 10532 tp, 10533 tp->t_state, th_ack, rack, 10534 tp->snd_una, tp->snd_max, tp->snd_nxt); 10535 #endif 10536 return; 10537 } 10538 if (SEQ_LT(th_ack, rsm->r_start)) { 10539 /* Huh map is missing this */ 10540 #ifdef INVARIANTS 10541 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 10542 rsm->r_start, 10543 th_ack, tp->t_state, rack->r_state); 10544 #endif 10545 return; 10546 } 10547 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 10548 10549 /* Now was it a retransmitted TLP? */ 10550 if ((rsm->r_flags & RACK_TLP) && 10551 (rsm->r_rtr_cnt > 1)) { 10552 /* 10553 * Yes, this rsm was a TLP and retransmitted, remember that 10554 * since if a DSACK comes back on this we don't want 10555 * to think of it as a reordered segment. This may 10556 * get updated again with possibly even other TLPs 10557 * in flight, but thats ok. Only when we don't send 10558 * a retransmitted TLP for 1/2 the sequences space 10559 * will it get turned off (above). 10560 */ 10561 if (rack->rc_last_tlp_acked_set && 10562 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 10563 /* 10564 * We already turned this on since the end matches, 10565 * the previous one was a partially ack now we 10566 * are getting another one (maybe all of it). 10567 */ 10568 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 10569 /* 10570 * Lets make sure we have all of it though. 10571 */ 10572 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 10573 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10574 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10575 rack->r_ctl.last_tlp_acked_end); 10576 } 10577 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 10578 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10579 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 10580 rack->r_ctl.last_tlp_acked_end); 10581 } 10582 } else { 10583 rack->rc_last_tlp_past_cumack = 1; 10584 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 10585 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 10586 rack->rc_last_tlp_acked_set = 1; 10587 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 10588 } 10589 } 10590 /* Now do we consume the whole thing? */ 10591 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 10592 if (SEQ_GEQ(th_ack, rsm->r_end)) { 10593 /* Its all consumed. */ 10594 uint32_t left; 10595 uint8_t newly_acked; 10596 10597 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 10598 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 10599 rsm->r_rtr_bytes = 0; 10600 /* 10601 * Record the time of highest cumack sent if its in our measurement 10602 * window and possibly bump out the end. 10603 */ 10604 rack_rsm_sender_update(rack, tp, rsm, 4); 10605 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 10606 if (rsm->r_in_tmap) { 10607 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10608 rsm->r_in_tmap = 0; 10609 } 10610 newly_acked = 1; 10611 if (rsm->r_flags & RACK_ACKED) { 10612 /* 10613 * It was acked on the scoreboard -- remove 10614 * it from total 10615 */ 10616 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 10617 newly_acked = 0; 10618 } else if (rsm->r_flags & RACK_SACK_PASSED) { 10619 /* 10620 * There are segments ACKED on the 10621 * scoreboard further up. We are seeing 10622 * reordering. 10623 */ 10624 rsm->r_flags &= ~RACK_SACK_PASSED; 10625 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 10626 rsm->r_flags |= RACK_ACKED; 10627 rack->r_ctl.rc_reorder_ts = cts; 10628 if (rack->r_ctl.rc_reorder_ts == 0) 10629 rack->r_ctl.rc_reorder_ts = 1; 10630 if (rack->r_ent_rec_ns) { 10631 /* 10632 * We have sent no more, and we saw an sack 10633 * then ack arrive. 10634 */ 10635 rack->r_might_revert = 1; 10636 } 10637 } 10638 if ((rsm->r_flags & RACK_TO_REXT) && 10639 (tp->t_flags & TF_RCVD_TSTMP) && 10640 (to->to_flags & TOF_TS) && 10641 (to->to_tsecr != 0) && 10642 (tp->t_flags & TF_PREVVALID)) { 10643 /* 10644 * We can use the timestamp to see 10645 * if this retransmission was from the 10646 * first transmit. If so we made a mistake. 10647 */ 10648 tp->t_flags &= ~TF_PREVVALID; 10649 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 10650 /* The first transmit is what this ack is for */ 10651 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 10652 } 10653 } 10654 left = th_ack - rsm->r_end; 10655 if (rack->app_limited_needs_set && newly_acked) 10656 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 10657 /* Free back to zone */ 10658 rack_free(rack, rsm); 10659 if (left) { 10660 goto more; 10661 } 10662 /* Check for reneging */ 10663 rsm = tqhash_min(rack->r_ctl.tqh); 10664 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 10665 /* 10666 * The peer has moved snd_una up to 10667 * the edge of this send, i.e. one 10668 * that it had previously acked. The only 10669 * way that can be true if the peer threw 10670 * away data (space issues) that it had 10671 * previously sacked (else it would have 10672 * given us snd_una up to (rsm->r_end). 10673 * We need to undo the acked markings here. 10674 * 10675 * Note we have to look to make sure th_ack is 10676 * our rsm->r_start in case we get an old ack 10677 * where th_ack is behind snd_una. 10678 */ 10679 rack_peer_reneges(rack, rsm, th_ack); 10680 } 10681 return; 10682 } 10683 if (rsm->r_flags & RACK_ACKED) { 10684 /* 10685 * It was acked on the scoreboard -- remove it from 10686 * total for the part being cum-acked. 10687 */ 10688 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 10689 } 10690 /* 10691 * Clear the dup ack count for 10692 * the piece that remains. 10693 */ 10694 rsm->r_dupack = 0; 10695 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 10696 if (rsm->r_rtr_bytes) { 10697 /* 10698 * It was retransmitted adjust the 10699 * sack holes for what was acked. 10700 */ 10701 int ack_am; 10702 10703 ack_am = (th_ack - rsm->r_start); 10704 if (ack_am >= rsm->r_rtr_bytes) { 10705 rack->r_ctl.rc_holes_rxt -= ack_am; 10706 rsm->r_rtr_bytes -= ack_am; 10707 } 10708 } 10709 /* 10710 * Update where the piece starts and record 10711 * the time of send of highest cumack sent if 10712 * its in our GP range. 10713 */ 10714 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 10715 /* Now we need to move our offset forward too */ 10716 if (rsm->m && 10717 ((rsm->orig_m_len != rsm->m->m_len) || 10718 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 10719 /* Fix up the orig_m_len and possibly the mbuf offset */ 10720 rack_adjust_orig_mlen(rsm); 10721 } 10722 rsm->soff += (th_ack - rsm->r_start); 10723 rack_rsm_sender_update(rack, tp, rsm, 5); 10724 /* The trim will move th_ack into r_start for us */ 10725 tqhash_trim(rack->r_ctl.tqh, th_ack); 10726 /* Now do we need to move the mbuf fwd too? */ 10727 { 10728 struct mbuf *m; 10729 uint32_t soff; 10730 10731 m = rsm->m; 10732 soff = rsm->soff; 10733 if (m) { 10734 while (soff >= m->m_len) { 10735 soff -= m->m_len; 10736 KASSERT((m->m_next != NULL), 10737 (" rsm:%p off:%u soff:%u m:%p", 10738 rsm, rsm->soff, soff, m)); 10739 m = m->m_next; 10740 if (m == NULL) { 10741 /* 10742 * This is a fall-back that prevents a panic. In reality 10743 * we should be able to walk the mbuf's and find our place. 10744 * At this point snd_una has not been updated with the sbcut() yet 10745 * but tqhash_trim did update rsm->r_start so the offset calcuation 10746 * should work fine. This is undesirable since we will take cache 10747 * hits to access the socket buffer. And even more puzzling is that 10748 * it happens occasionally. It should not :( 10749 */ 10750 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 10751 (rsm->r_start - tp->snd_una), 10752 &soff); 10753 break; 10754 } 10755 } 10756 /* 10757 * Now save in our updated values. 10758 */ 10759 rsm->m = m; 10760 rsm->soff = soff; 10761 rsm->orig_m_len = rsm->m->m_len; 10762 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 10763 } 10764 } 10765 if (rack->app_limited_needs_set && 10766 SEQ_GEQ(th_ack, tp->gput_seq)) 10767 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 10768 } 10769 10770 static void 10771 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 10772 { 10773 struct rack_sendmap *rsm; 10774 int sack_pass_fnd = 0; 10775 10776 if (rack->r_might_revert) { 10777 /* 10778 * Ok we have reordering, have not sent anything, we 10779 * might want to revert the congestion state if nothing 10780 * further has SACK_PASSED on it. Lets check. 10781 * 10782 * We also get here when we have DSACKs come in for 10783 * all the data that we FR'd. Note that a rxt or tlp 10784 * timer clears this from happening. 10785 */ 10786 10787 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 10788 if (rsm->r_flags & RACK_SACK_PASSED) { 10789 sack_pass_fnd = 1; 10790 break; 10791 } 10792 } 10793 if (sack_pass_fnd == 0) { 10794 /* 10795 * We went into recovery 10796 * incorrectly due to reordering! 10797 */ 10798 int orig_cwnd; 10799 10800 rack->r_ent_rec_ns = 0; 10801 orig_cwnd = tp->snd_cwnd; 10802 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 10803 tp->snd_recover = tp->snd_una; 10804 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 10805 EXIT_RECOVERY(tp->t_flags); 10806 } 10807 rack->r_might_revert = 0; 10808 } 10809 } 10810 10811 #ifdef TCP_SAD_DETECTION 10812 10813 static void 10814 rack_merge_out_sacks(struct tcp_rack *rack) 10815 { 10816 struct rack_sendmap *cur, *next, *rsm, *trsm = NULL; 10817 10818 cur = tqhash_min(rack->r_ctl.tqh); 10819 while(cur) { 10820 next = tqhash_next(rack->r_ctl.tqh, cur); 10821 /* 10822 * The idea is to go through all and merge back 10823 * together the pieces sent together, 10824 */ 10825 if ((next != NULL) && 10826 (cur->r_tim_lastsent[0] == next->r_tim_lastsent[0])) { 10827 rack_merge_rsm(rack, cur, next); 10828 } else { 10829 cur = next; 10830 } 10831 } 10832 /* 10833 * now treat it like a rxt event, everything is outstanding 10834 * and sent nothing acvked and dupacks are all zero. If this 10835 * is not an attacker it will have to dupack its way through 10836 * it all. 10837 */ 10838 TAILQ_INIT(&rack->r_ctl.rc_tmap); 10839 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { 10840 rsm->r_dupack = 0; 10841 /* We must re-add it back to the tlist */ 10842 if (trsm == NULL) { 10843 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 10844 } else { 10845 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 10846 } 10847 rsm->r_in_tmap = 1; 10848 trsm = rsm; 10849 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 10850 } 10851 sack_filter_clear(&rack->r_ctl.rack_sf, rack->rc_tp->snd_una); 10852 } 10853 10854 static void 10855 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 10856 { 10857 int do_detection = 0; 10858 10859 if (rack->sack_attack_disable || rack->rc_suspicious) { 10860 /* 10861 * If we have been disabled we must detect 10862 * to possibly reverse it. Or if the guy has 10863 * sent in suspicious sacks we want to do detection too. 10864 */ 10865 do_detection = 1; 10866 10867 } else if ((rack->do_detection || tcp_force_detection) && 10868 (tcp_sack_to_ack_thresh > 0) && 10869 (tcp_sack_to_move_thresh > 0) && 10870 (rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum)) { 10871 /* 10872 * We only detect here if: 10873 * 1) System wide forcing is on <or> do_detection is on 10874 * <and> 10875 * 2) We have thresholds for move and ack (set one to 0 and we are off) 10876 * <and> 10877 * 3) We have maps allocated larger than our min (500). 10878 */ 10879 do_detection = 1; 10880 } 10881 if (do_detection > 0) { 10882 /* 10883 * We have thresholds set to find 10884 * possible attackers and disable sack. 10885 * Check them. 10886 */ 10887 uint64_t ackratio, moveratio, movetotal; 10888 10889 /* Log detecting */ 10890 rack_log_sad(rack, 1); 10891 /* Do we establish a ack ratio */ 10892 if ((rack->r_ctl.sack_count > tcp_map_minimum) || 10893 (rack->rc_suspicious == 1) || 10894 (rack->sack_attack_disable > 0)) { 10895 ackratio = (uint64_t)(rack->r_ctl.sack_count); 10896 ackratio *= (uint64_t)(1000); 10897 if (rack->r_ctl.ack_count) 10898 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 10899 else { 10900 /* We can hit this due to ack totals degregation (via small sacks) */ 10901 ackratio = 1000; 10902 } 10903 } else { 10904 /* 10905 * No ack ratio needed if we have not 10906 * seen more sacks then the number of map entries. 10907 * The exception to that is if we have disabled sack then 10908 * we need to find a ratio. 10909 */ 10910 ackratio = 0; 10911 } 10912 10913 if ((rack->sack_attack_disable == 0) && 10914 (ackratio > rack_highest_sack_thresh_seen)) 10915 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 10916 /* Do we establish a move ratio? */ 10917 if ((rack->r_ctl.sack_moved_extra > tcp_map_minimum) || 10918 (rack->rc_suspicious == 1) || 10919 (rack->sack_attack_disable > 0)) { 10920 /* 10921 * We need to have more sack moves than maps 10922 * allocated to have a move ratio considered. 10923 */ 10924 movetotal = rack->r_ctl.sack_moved_extra; 10925 movetotal += rack->r_ctl.sack_noextra_move; 10926 moveratio = rack->r_ctl.sack_moved_extra; 10927 moveratio *= (uint64_t)1000; 10928 if (movetotal) 10929 moveratio /= movetotal; 10930 else { 10931 /* No moves, thats pretty good */ 10932 moveratio = 0; 10933 } 10934 } else { 10935 /* 10936 * Not enough moves have occured to consider 10937 * if we are out of whack in that ratio. 10938 * The exception to that is if we have disabled sack then 10939 * we need to find a ratio. 10940 */ 10941 moveratio = 0; 10942 } 10943 if ((rack->sack_attack_disable == 0) && 10944 (moveratio > rack_highest_move_thresh_seen)) 10945 rack_highest_move_thresh_seen = (uint32_t)moveratio; 10946 /* Now the tests */ 10947 if (rack->sack_attack_disable == 0) { 10948 /* Not disabled, do we need to disable? */ 10949 if ((ackratio > tcp_sack_to_ack_thresh) && 10950 (moveratio > tcp_sack_to_move_thresh)) { 10951 /* Disable sack processing */ 10952 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED); 10953 rack->sack_attack_disable = 1; 10954 /* set it so we have the built in delay */ 10955 rack->r_ctl.ack_during_sd = 1; 10956 if (rack_merge_out_sacks_on_attack) 10957 rack_merge_out_sacks(rack); 10958 counter_u64_add(rack_sack_attacks_detected, 1); 10959 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED); 10960 /* Clamp the cwnd at flight size */ 10961 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 10962 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10963 rack_log_sad(rack, 2); 10964 } 10965 } else { 10966 /* We are sack-disabled check for false positives */ 10967 if ((ackratio <= tcp_restoral_thresh) || 10968 ((rack_merge_out_sacks_on_attack == 0) && 10969 (rack->rc_suspicious == 0) && 10970 (rack->r_ctl.rc_num_maps_alloced <= (tcp_map_minimum/2)))) { 10971 rack->sack_attack_disable = 0; 10972 rack_log_sad(rack, 3); 10973 /* Restart counting */ 10974 rack->r_ctl.sack_count = 0; 10975 rack->r_ctl.sack_moved_extra = 0; 10976 rack->r_ctl.sack_noextra_move = 1; 10977 rack->rc_suspicious = 0; 10978 rack->r_ctl.ack_count = max(1, 10979 (bytes_this_ack / segsiz)); 10980 10981 counter_u64_add(rack_sack_attacks_reversed, 1); 10982 /* Restore the cwnd */ 10983 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 10984 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 10985 } 10986 } 10987 } 10988 } 10989 #endif 10990 10991 static int 10992 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 10993 { 10994 10995 uint32_t am, l_end; 10996 int was_tlp = 0; 10997 10998 if (SEQ_GT(end, start)) 10999 am = end - start; 11000 else 11001 am = 0; 11002 if ((rack->rc_last_tlp_acked_set ) && 11003 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 11004 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 11005 /* 11006 * The DSACK is because of a TLP which we don't 11007 * do anything with the reordering window over since 11008 * it was not reordering that caused the DSACK but 11009 * our previous retransmit TLP. 11010 */ 11011 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11012 was_tlp = 1; 11013 goto skip_dsack_round; 11014 } 11015 if (rack->rc_last_sent_tlp_seq_valid) { 11016 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 11017 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 11018 (SEQ_LEQ(end, l_end))) { 11019 /* 11020 * This dsack is from the last sent TLP, ignore it 11021 * for reordering purposes. 11022 */ 11023 rack_log_dsack_event(rack, 7, __LINE__, start, end); 11024 was_tlp = 1; 11025 goto skip_dsack_round; 11026 } 11027 } 11028 if (rack->rc_dsack_round_seen == 0) { 11029 rack->rc_dsack_round_seen = 1; 11030 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 11031 rack->r_ctl.num_dsack++; 11032 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 11033 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 11034 } 11035 skip_dsack_round: 11036 /* 11037 * We keep track of how many DSACK blocks we get 11038 * after a recovery incident. 11039 */ 11040 rack->r_ctl.dsack_byte_cnt += am; 11041 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 11042 rack->r_ctl.retran_during_recovery && 11043 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 11044 /* 11045 * False recovery most likely culprit is reordering. If 11046 * nothing else is missing we need to revert. 11047 */ 11048 rack->r_might_revert = 1; 11049 rack_handle_might_revert(rack->rc_tp, rack); 11050 rack->r_might_revert = 0; 11051 rack->r_ctl.retran_during_recovery = 0; 11052 rack->r_ctl.dsack_byte_cnt = 0; 11053 } 11054 return (was_tlp); 11055 } 11056 11057 static uint32_t 11058 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 11059 { 11060 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt); 11061 } 11062 11063 static int32_t 11064 rack_compute_pipe(struct tcpcb *tp) 11065 { 11066 return ((int32_t)do_rack_compute_pipe(tp, 11067 (struct tcp_rack *)tp->t_fb_ptr, 11068 tp->snd_una)); 11069 } 11070 11071 static void 11072 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 11073 { 11074 /* Deal with changed and PRR here (in recovery only) */ 11075 uint32_t pipe, snd_una; 11076 11077 rack->r_ctl.rc_prr_delivered += changed; 11078 11079 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 11080 /* 11081 * It is all outstanding, we are application limited 11082 * and thus we don't need more room to send anything. 11083 * Note we use tp->snd_una here and not th_ack because 11084 * the data as yet not been cut from the sb. 11085 */ 11086 rack->r_ctl.rc_prr_sndcnt = 0; 11087 return; 11088 } 11089 /* Compute prr_sndcnt */ 11090 if (SEQ_GT(tp->snd_una, th_ack)) { 11091 snd_una = tp->snd_una; 11092 } else { 11093 snd_una = th_ack; 11094 } 11095 pipe = do_rack_compute_pipe(tp, rack, snd_una); 11096 if (pipe > tp->snd_ssthresh) { 11097 long sndcnt; 11098 11099 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 11100 if (rack->r_ctl.rc_prr_recovery_fs > 0) 11101 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 11102 else { 11103 rack->r_ctl.rc_prr_sndcnt = 0; 11104 rack_log_to_prr(rack, 9, 0, __LINE__); 11105 sndcnt = 0; 11106 } 11107 sndcnt++; 11108 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 11109 sndcnt -= rack->r_ctl.rc_prr_out; 11110 else 11111 sndcnt = 0; 11112 rack->r_ctl.rc_prr_sndcnt = sndcnt; 11113 rack_log_to_prr(rack, 10, 0, __LINE__); 11114 } else { 11115 uint32_t limit; 11116 11117 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 11118 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 11119 else 11120 limit = 0; 11121 if (changed > limit) 11122 limit = changed; 11123 limit += ctf_fixed_maxseg(tp); 11124 if (tp->snd_ssthresh > pipe) { 11125 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 11126 rack_log_to_prr(rack, 11, 0, __LINE__); 11127 } else { 11128 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 11129 rack_log_to_prr(rack, 12, 0, __LINE__); 11130 } 11131 } 11132 } 11133 11134 static void 11135 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck, 11136 int *dsack_seen, int *sacks_seen) 11137 { 11138 uint32_t changed; 11139 struct tcp_rack *rack; 11140 struct rack_sendmap *rsm; 11141 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 11142 register uint32_t th_ack; 11143 int32_t i, j, k, num_sack_blks = 0; 11144 uint32_t cts, acked, ack_point; 11145 int loop_start = 0, moved_two = 0, no_extra = 0; 11146 uint32_t tsused; 11147 uint32_t segsiz, o_cnt; 11148 11149 11150 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11151 if (tcp_get_flags(th) & TH_RST) { 11152 /* We don't log resets */ 11153 return; 11154 } 11155 rack = (struct tcp_rack *)tp->t_fb_ptr; 11156 cts = tcp_get_usecs(NULL); 11157 rsm = tqhash_min(rack->r_ctl.tqh); 11158 changed = 0; 11159 th_ack = th->th_ack; 11160 if (rack->sack_attack_disable == 0) 11161 rack_do_decay(rack); 11162 segsiz = ctf_fixed_maxseg(rack->rc_tp); 11163 if (BYTES_THIS_ACK(tp, th) >= segsiz) { 11164 /* 11165 * You only get credit for 11166 * MSS and greater (and you get extra 11167 * credit for larger cum-ack moves). 11168 */ 11169 int ac; 11170 11171 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 11172 rack->r_ctl.ack_count += ac; 11173 counter_u64_add(rack_ack_total, ac); 11174 } 11175 if (rack->r_ctl.ack_count > 0xfff00000) { 11176 /* 11177 * reduce the number to keep us under 11178 * a uint32_t. 11179 */ 11180 rack->r_ctl.ack_count /= 2; 11181 rack->r_ctl.sack_count /= 2; 11182 } 11183 if (SEQ_GT(th_ack, tp->snd_una)) { 11184 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 11185 tp->t_acktime = ticks; 11186 } 11187 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 11188 changed = th_ack - rsm->r_start; 11189 if (changed) { 11190 rack_process_to_cumack(tp, rack, th_ack, cts, to, 11191 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 11192 } 11193 if ((to->to_flags & TOF_SACK) == 0) { 11194 /* We are done nothing left and no sack. */ 11195 rack_handle_might_revert(tp, rack); 11196 /* 11197 * For cases where we struck a dup-ack 11198 * with no SACK, add to the changes so 11199 * PRR will work right. 11200 */ 11201 if (dup_ack_struck && (changed == 0)) { 11202 changed += ctf_fixed_maxseg(rack->rc_tp); 11203 } 11204 goto out; 11205 } 11206 /* Sack block processing */ 11207 if (SEQ_GT(th_ack, tp->snd_una)) 11208 ack_point = th_ack; 11209 else 11210 ack_point = tp->snd_una; 11211 for (i = 0; i < to->to_nsacks; i++) { 11212 bcopy((to->to_sacks + i * TCPOLEN_SACK), 11213 &sack, sizeof(sack)); 11214 sack.start = ntohl(sack.start); 11215 sack.end = ntohl(sack.end); 11216 if (SEQ_GT(sack.end, sack.start) && 11217 SEQ_GT(sack.start, ack_point) && 11218 SEQ_LT(sack.start, tp->snd_max) && 11219 SEQ_GT(sack.end, ack_point) && 11220 SEQ_LEQ(sack.end, tp->snd_max)) { 11221 sack_blocks[num_sack_blks] = sack; 11222 num_sack_blks++; 11223 } else if (SEQ_LEQ(sack.start, th_ack) && 11224 SEQ_LEQ(sack.end, th_ack)) { 11225 int was_tlp; 11226 11227 if (dsack_seen != NULL) 11228 *dsack_seen = 1; 11229 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 11230 /* 11231 * Its a D-SACK block. 11232 */ 11233 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 11234 } 11235 } 11236 if (rack->rc_dsack_round_seen) { 11237 /* Is the dsack roound over? */ 11238 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 11239 /* Yes it is */ 11240 rack->rc_dsack_round_seen = 0; 11241 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 11242 } 11243 } 11244 /* 11245 * Sort the SACK blocks so we can update the rack scoreboard with 11246 * just one pass. 11247 */ 11248 o_cnt = num_sack_blks; 11249 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 11250 num_sack_blks, th->th_ack); 11251 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 11252 if (sacks_seen != NULL) 11253 *sacks_seen = num_sack_blks; 11254 if (num_sack_blks == 0) { 11255 /* Nothing to sack, but we need to update counts */ 11256 if ((o_cnt == 1) && 11257 (*dsack_seen != 1)) 11258 rack->r_ctl.sack_count++; 11259 else if (o_cnt > 1) 11260 rack->r_ctl.sack_count++; 11261 goto out_with_totals; 11262 } 11263 if (rack->sack_attack_disable) { 11264 /* 11265 * An attacker disablement is in place, for 11266 * every sack block that is not at least a full MSS 11267 * count up sack_count. 11268 */ 11269 for (i = 0; i < num_sack_blks; i++) { 11270 if ((sack_blocks[i].end - sack_blocks[i].start) < segsiz) { 11271 rack->r_ctl.sack_count++; 11272 } 11273 if (rack->r_ctl.sack_count > 0xfff00000) { 11274 /* 11275 * reduce the number to keep us under 11276 * a uint32_t. 11277 */ 11278 rack->r_ctl.ack_count /= 2; 11279 rack->r_ctl.sack_count /= 2; 11280 } 11281 } 11282 goto out; 11283 } 11284 /* Its a sack of some sort */ 11285 rack->r_ctl.sack_count += num_sack_blks; 11286 if (rack->r_ctl.sack_count > 0xfff00000) { 11287 /* 11288 * reduce the number to keep us under 11289 * a uint32_t. 11290 */ 11291 rack->r_ctl.ack_count /= 2; 11292 rack->r_ctl.sack_count /= 2; 11293 } 11294 if (num_sack_blks < 2) { 11295 /* Only one, we don't need to sort */ 11296 goto do_sack_work; 11297 } 11298 /* Sort the sacks */ 11299 for (i = 0; i < num_sack_blks; i++) { 11300 for (j = i + 1; j < num_sack_blks; j++) { 11301 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 11302 sack = sack_blocks[i]; 11303 sack_blocks[i] = sack_blocks[j]; 11304 sack_blocks[j] = sack; 11305 } 11306 } 11307 } 11308 /* 11309 * Now are any of the sack block ends the same (yes some 11310 * implementations send these)? 11311 */ 11312 again: 11313 if (num_sack_blks == 0) 11314 goto out_with_totals; 11315 if (num_sack_blks > 1) { 11316 for (i = 0; i < num_sack_blks; i++) { 11317 for (j = i + 1; j < num_sack_blks; j++) { 11318 if (sack_blocks[i].end == sack_blocks[j].end) { 11319 /* 11320 * Ok these two have the same end we 11321 * want the smallest end and then 11322 * throw away the larger and start 11323 * again. 11324 */ 11325 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 11326 /* 11327 * The second block covers 11328 * more area use that 11329 */ 11330 sack_blocks[i].start = sack_blocks[j].start; 11331 } 11332 /* 11333 * Now collapse out the dup-sack and 11334 * lower the count 11335 */ 11336 for (k = (j + 1); k < num_sack_blks; k++) { 11337 sack_blocks[j].start = sack_blocks[k].start; 11338 sack_blocks[j].end = sack_blocks[k].end; 11339 j++; 11340 } 11341 num_sack_blks--; 11342 goto again; 11343 } 11344 } 11345 } 11346 } 11347 do_sack_work: 11348 /* 11349 * First lets look to see if 11350 * we have retransmitted and 11351 * can use the transmit next? 11352 */ 11353 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11354 if (rsm && 11355 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 11356 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 11357 /* 11358 * We probably did the FR and the next 11359 * SACK in continues as we would expect. 11360 */ 11361 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &no_extra, &moved_two, segsiz); 11362 if (acked) { 11363 rack->r_wanted_output = 1; 11364 changed += acked; 11365 } 11366 if (num_sack_blks == 1) { 11367 /* 11368 * This is what we would expect from 11369 * a normal implementation to happen 11370 * after we have retransmitted the FR, 11371 * i.e the sack-filter pushes down 11372 * to 1 block and the next to be retransmitted 11373 * is the sequence in the sack block (has more 11374 * are acked). Count this as ACK'd data to boost 11375 * up the chances of recovering any false positives. 11376 */ 11377 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 11378 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 11379 counter_u64_add(rack_express_sack, 1); 11380 if (rack->r_ctl.ack_count > 0xfff00000) { 11381 /* 11382 * reduce the number to keep us under 11383 * a uint32_t. 11384 */ 11385 rack->r_ctl.ack_count /= 2; 11386 rack->r_ctl.sack_count /= 2; 11387 } 11388 if (moved_two) { 11389 /* 11390 * If we did not get a SACK for at least a MSS and 11391 * had to move at all, or if we moved more than our 11392 * threshold, it counts against the "extra" move. 11393 */ 11394 rack->r_ctl.sack_moved_extra += moved_two; 11395 rack->r_ctl.sack_noextra_move += no_extra; 11396 counter_u64_add(rack_move_some, 1); 11397 } else { 11398 /* 11399 * else we did not have to move 11400 * any more than we would expect. 11401 */ 11402 rack->r_ctl.sack_noextra_move += no_extra; 11403 rack->r_ctl.sack_noextra_move++; 11404 counter_u64_add(rack_move_none, 1); 11405 } 11406 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 11407 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 11408 rack->r_ctl.sack_moved_extra /= 2; 11409 rack->r_ctl.sack_noextra_move /= 2; 11410 } 11411 goto out_with_totals; 11412 } else { 11413 /* 11414 * Start the loop through the 11415 * rest of blocks, past the first block. 11416 */ 11417 loop_start = 1; 11418 } 11419 } 11420 counter_u64_add(rack_sack_total, 1); 11421 rsm = rack->r_ctl.rc_sacklast; 11422 for (i = loop_start; i < num_sack_blks; i++) { 11423 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &no_extra, &moved_two, segsiz); 11424 if (acked) { 11425 rack->r_wanted_output = 1; 11426 changed += acked; 11427 } 11428 if (moved_two) { 11429 /* 11430 * If we did not get a SACK for at least a MSS and 11431 * had to move at all, or if we moved more than our 11432 * threshold, it counts against the "extra" move. 11433 */ 11434 rack->r_ctl.sack_moved_extra += moved_two; 11435 rack->r_ctl.sack_noextra_move += no_extra; 11436 counter_u64_add(rack_move_some, 1); 11437 } else { 11438 /* 11439 * else we did not have to move 11440 * any more than we would expect. 11441 */ 11442 rack->r_ctl.sack_noextra_move += no_extra; 11443 rack->r_ctl.sack_noextra_move++; 11444 counter_u64_add(rack_move_none, 1); 11445 } 11446 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 11447 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 11448 rack->r_ctl.sack_moved_extra /= 2; 11449 rack->r_ctl.sack_noextra_move /= 2; 11450 } 11451 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 11452 /* 11453 * If the SACK was not a full MSS then 11454 * we add to sack_count the number of 11455 * MSS's (or possibly more than 11456 * a MSS if its a TSO send) we had to skip by. 11457 */ 11458 rack->r_ctl.sack_count += moved_two; 11459 if (rack->r_ctl.sack_count > 0xfff00000) { 11460 rack->r_ctl.ack_count /= 2; 11461 rack->r_ctl.sack_count /= 2; 11462 } 11463 counter_u64_add(rack_sack_total, moved_two); 11464 } 11465 /* 11466 * Now we need to setup for the next 11467 * round. First we make sure we won't 11468 * exceed the size of our uint32_t on 11469 * the various counts, and then clear out 11470 * moved_two. 11471 */ 11472 moved_two = 0; 11473 no_extra = 0; 11474 } 11475 out_with_totals: 11476 if (num_sack_blks > 1) { 11477 /* 11478 * You get an extra stroke if 11479 * you have more than one sack-blk, this 11480 * could be where we are skipping forward 11481 * and the sack-filter is still working, or 11482 * it could be an attacker constantly 11483 * moving us. 11484 */ 11485 rack->r_ctl.sack_moved_extra++; 11486 counter_u64_add(rack_move_some, 1); 11487 } 11488 out: 11489 #ifdef TCP_SAD_DETECTION 11490 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 11491 #endif 11492 if (changed) { 11493 /* Something changed cancel the rack timer */ 11494 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11495 } 11496 tsused = tcp_get_usecs(NULL); 11497 rsm = tcp_rack_output(tp, rack, tsused); 11498 if ((!IN_FASTRECOVERY(tp->t_flags)) && 11499 rsm && 11500 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 11501 /* Enter recovery */ 11502 entered_recovery = 1; 11503 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 11504 /* 11505 * When we enter recovery we need to assure we send 11506 * one packet. 11507 */ 11508 if (rack->rack_no_prr == 0) { 11509 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 11510 rack_log_to_prr(rack, 8, 0, __LINE__); 11511 } 11512 rack->r_timer_override = 1; 11513 rack->r_early = 0; 11514 rack->r_ctl.rc_agg_early = 0; 11515 } else if (IN_FASTRECOVERY(tp->t_flags) && 11516 rsm && 11517 (rack->r_rr_config == 3)) { 11518 /* 11519 * Assure we can output and we get no 11520 * remembered pace time except the retransmit. 11521 */ 11522 rack->r_timer_override = 1; 11523 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 11524 rack->r_ctl.rc_resend = rsm; 11525 } 11526 if (IN_FASTRECOVERY(tp->t_flags) && 11527 (rack->rack_no_prr == 0) && 11528 (entered_recovery == 0)) { 11529 rack_update_prr(tp, rack, changed, th_ack); 11530 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 11531 ((tcp_in_hpts(rack->rc_tp) == 0) && 11532 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 11533 /* 11534 * If you are pacing output you don't want 11535 * to override. 11536 */ 11537 rack->r_early = 0; 11538 rack->r_ctl.rc_agg_early = 0; 11539 rack->r_timer_override = 1; 11540 } 11541 } 11542 } 11543 11544 static void 11545 rack_strike_dupack(struct tcp_rack *rack) 11546 { 11547 struct rack_sendmap *rsm; 11548 11549 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 11550 while (rsm) { 11551 /* 11552 * We need to skip anything already set 11553 * to be retransmitted. 11554 */ 11555 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 11556 (rsm->r_flags & RACK_MUST_RXT)) { 11557 rsm = TAILQ_NEXT(rsm, r_tnext); 11558 continue; 11559 } 11560 break; 11561 } 11562 if (rsm && (rsm->r_dupack < 0xff)) { 11563 rsm->r_dupack++; 11564 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 11565 struct timeval tv; 11566 uint32_t cts; 11567 /* 11568 * Here we see if we need to retransmit. For 11569 * a SACK type connection if enough time has passed 11570 * we will get a return of the rsm. For a non-sack 11571 * connection we will get the rsm returned if the 11572 * dupack value is 3 or more. 11573 */ 11574 cts = tcp_get_usecs(&tv); 11575 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 11576 if (rack->r_ctl.rc_resend != NULL) { 11577 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 11578 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 11579 rack->rc_tp->snd_una, __LINE__); 11580 } 11581 rack->r_wanted_output = 1; 11582 rack->r_timer_override = 1; 11583 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 11584 } 11585 } else { 11586 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 11587 } 11588 } 11589 } 11590 11591 static void 11592 rack_check_bottom_drag(struct tcpcb *tp, 11593 struct tcp_rack *rack, 11594 struct socket *so) 11595 { 11596 uint32_t segsiz, minseg; 11597 11598 segsiz = ctf_fixed_maxseg(tp); 11599 minseg = segsiz; 11600 if (tp->snd_max == tp->snd_una) { 11601 /* 11602 * We are doing dynamic pacing and we are way 11603 * under. Basically everything got acked while 11604 * we were still waiting on the pacer to expire. 11605 * 11606 * This means we need to boost the b/w in 11607 * addition to any earlier boosting of 11608 * the multiplier. 11609 */ 11610 uint64_t lt_bw; 11611 11612 lt_bw = rack_get_lt_bw(rack); 11613 rack->rc_dragged_bottom = 1; 11614 rack_validate_multipliers_at_or_above100(rack); 11615 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 11616 (lt_bw > 0)) { 11617 /* 11618 * Lets use the long-term b/w we have 11619 * been getting as a base. 11620 */ 11621 if (rack->rc_gp_filled == 0) { 11622 if (lt_bw > ONE_POINT_TWO_MEG) { 11623 /* 11624 * If we have no measurement 11625 * don't let us set in more than 11626 * 1.2Mbps. If we are still too 11627 * low after pacing with this we 11628 * will hopefully have a max b/w 11629 * available to sanity check things. 11630 */ 11631 lt_bw = ONE_POINT_TWO_MEG; 11632 } 11633 rack->r_ctl.rc_rtt_diff = 0; 11634 rack->r_ctl.gp_bw = lt_bw; 11635 rack->rc_gp_filled = 1; 11636 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11637 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11638 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11639 } else if (lt_bw > rack->r_ctl.gp_bw) { 11640 rack->r_ctl.rc_rtt_diff = 0; 11641 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 11642 rack->r_ctl.num_measurements = RACK_REQ_AVG; 11643 rack->r_ctl.gp_bw = lt_bw; 11644 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 11645 } else 11646 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11647 if ((rack->gp_ready == 0) && 11648 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 11649 /* We have enough measurements now */ 11650 rack->gp_ready = 1; 11651 if (rack->dgp_on || 11652 rack->rack_hibeta) 11653 rack_set_cc_pacing(rack); 11654 if (rack->defer_options) 11655 rack_apply_deferred_options(rack); 11656 } 11657 } else { 11658 /* 11659 * zero rtt possibly?, settle for just an old increase. 11660 */ 11661 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11662 } 11663 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 11664 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 11665 minseg)) && 11666 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 11667 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 11668 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 11669 (segsiz * rack_req_segs))) { 11670 /* 11671 * We are doing dynamic GP pacing and 11672 * we have everything except 1MSS or less 11673 * bytes left out. We are still pacing away. 11674 * And there is data that could be sent, This 11675 * means we are inserting delayed ack time in 11676 * our measurements because we are pacing too slow. 11677 */ 11678 rack_validate_multipliers_at_or_above100(rack); 11679 rack->rc_dragged_bottom = 1; 11680 rack_increase_bw_mul(rack, -1, 0, 0, 1); 11681 } 11682 } 11683 11684 #ifdef TCP_REQUEST_TRK 11685 static void 11686 rack_log_hybrid(struct tcp_rack *rack, uint32_t seq, 11687 struct tcp_sendfile_track *cur, uint8_t mod, int line, int err) 11688 { 11689 int do_log; 11690 11691 do_log = tcp_bblogging_on(rack->rc_tp); 11692 if (do_log == 0) { 11693 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) 11694 return; 11695 /* We only allow the three below with point logging on */ 11696 if ((mod != HYBRID_LOG_RULES_APP) && 11697 (mod != HYBRID_LOG_RULES_SET) && 11698 (mod != HYBRID_LOG_REQ_COMP)) 11699 return; 11700 11701 } 11702 if (do_log) { 11703 union tcp_log_stackspecific log; 11704 struct timeval tv; 11705 11706 /* Convert our ms to a microsecond */ 11707 memset(&log, 0, sizeof(log)); 11708 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 11709 log.u_bbr.flex1 = seq; 11710 log.u_bbr.cwnd_gain = line; 11711 if (cur != NULL) { 11712 uint64_t off; 11713 11714 log.u_bbr.flex2 = cur->start_seq; 11715 log.u_bbr.flex3 = cur->end_seq; 11716 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); 11717 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); 11718 log.u_bbr.flex6 = cur->flags; 11719 log.u_bbr.pkts_out = cur->hybrid_flags; 11720 log.u_bbr.rttProp = cur->timestamp; 11721 log.u_bbr.cur_del_rate = cur->cspr; 11722 log.u_bbr.bw_inuse = cur->start; 11723 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); 11724 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; 11725 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); 11726 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; 11727 log.u_bbr.bbr_state = 1; 11728 #ifdef TCP_REQUEST_TRK 11729 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); 11730 log.u_bbr.use_lt_bw = (uint8_t)(off / sizeof(struct tcp_sendfile_track)); 11731 #endif 11732 } else { 11733 log.u_bbr.flex2 = err; 11734 } 11735 /* 11736 * Fill in flex7 to be CHD (catchup|hybrid|DGP) 11737 */ 11738 log.u_bbr.flex7 = rack->rc_catch_up; 11739 log.u_bbr.flex7 <<= 1; 11740 log.u_bbr.flex7 |= rack->rc_hybrid_mode; 11741 log.u_bbr.flex7 <<= 1; 11742 log.u_bbr.flex7 |= rack->dgp_on; 11743 log.u_bbr.flex8 = mod; 11744 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; 11745 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; 11746 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 11747 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; 11748 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; 11749 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; 11750 tcp_log_event(rack->rc_tp, NULL, 11751 &rack->rc_inp->inp_socket->so_rcv, 11752 &rack->rc_inp->inp_socket->so_snd, 11753 TCP_HYBRID_PACING_LOG, 0, 11754 0, &log, false, NULL, __func__, __LINE__, &tv); 11755 } 11756 } 11757 #endif 11758 11759 #ifdef TCP_REQUEST_TRK 11760 static void 11761 rack_set_dgp_hybrid_mode(struct tcp_rack *rack, tcp_seq seq, uint32_t len) 11762 { 11763 struct tcp_sendfile_track *rc_cur; 11764 struct tcpcb *tp; 11765 int err = 0; 11766 11767 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); 11768 if (rc_cur == NULL) { 11769 /* If not in the beginning what about the end piece */ 11770 if (rack->rc_hybrid_mode) 11771 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11772 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); 11773 } else { 11774 err = 12345; 11775 } 11776 /* If we find no parameters we are in straight DGP mode */ 11777 if(rc_cur == NULL) { 11778 /* None found for this seq, just DGP for now */ 11779 rack->r_ctl.client_suggested_maxseg = 0; 11780 rack->rc_catch_up = 0; 11781 rack->r_ctl.bw_rate_cap = 0; 11782 if (rack->rc_hybrid_mode) 11783 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); 11784 if (rack->r_ctl.rc_last_sft) { 11785 rack->r_ctl.rc_last_sft = NULL; 11786 } 11787 return; 11788 } 11789 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { 11790 /* This entry was never setup for hybrid pacing on/off etc */ 11791 return; 11792 } 11793 /* 11794 * Ok if we have a new entry *or* have never 11795 * set up an entry we need to proceed. If 11796 * we have already set it up this entry we 11797 * just continue along with what we already 11798 * setup. 11799 */ 11800 tp = rack->rc_tp; 11801 if ((rack->r_ctl.rc_last_sft != NULL) && 11802 (rack->r_ctl.rc_last_sft == rc_cur)) { 11803 /* Its already in place */ 11804 if (rack->rc_hybrid_mode) 11805 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_ISSAME, __LINE__, 0); 11806 return; 11807 } 11808 if (rack->rc_hybrid_mode == 0) { 11809 rack->r_ctl.rc_last_sft = rc_cur; 11810 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11811 return; 11812 } 11813 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ 11814 /* Compensate for all the header overhead's */ 11815 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); 11816 } else 11817 rack->r_ctl.bw_rate_cap = 0; 11818 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) 11819 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; 11820 else 11821 rack->r_ctl.client_suggested_maxseg = 0; 11822 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && 11823 (rc_cur->cspr > 0)) { 11824 uint64_t len; 11825 11826 rack->rc_catch_up = 1; 11827 /* 11828 * Calculate the deadline time, first set the 11829 * time to when the request arrived. 11830 */ 11831 rc_cur->deadline = rc_cur->localtime; 11832 /* 11833 * Next calculate the length and compensate for 11834 * TLS if need be. 11835 */ 11836 len = rc_cur->end - rc_cur->start; 11837 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { 11838 /* 11839 * This session is doing TLS. Take a swag guess 11840 * at the overhead. 11841 */ 11842 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); 11843 } 11844 /* 11845 * Now considering the size, and the cspr, what is the time that 11846 * would be required at the cspr rate. Here we use the raw 11847 * cspr value since the client only looks at the raw data. We 11848 * do use len which includes TLS overhead, but not the TCP/IP etc. 11849 * That will get made up for in the CU pacing rate set. 11850 */ 11851 len *= HPTS_USEC_IN_SEC; 11852 len /= rc_cur->cspr; 11853 rc_cur->deadline += len; 11854 } else { 11855 rack->rc_catch_up = 0; 11856 rc_cur->deadline = 0; 11857 } 11858 if (rack->r_ctl.client_suggested_maxseg != 0) { 11859 /* 11860 * We need to reset the max pace segs if we have a 11861 * client_suggested_maxseg. 11862 */ 11863 rack_set_pace_segments(tp, rack, __LINE__, NULL); 11864 } 11865 rack_log_hybrid(rack, seq, rc_cur, HYBRID_LOG_RULES_APP, __LINE__, 0); 11866 /* Remember it for next time and for CU mode */ 11867 rack->r_ctl.rc_last_sft = rc_cur; 11868 } 11869 #endif 11870 11871 static void 11872 rack_chk_req_and_hybrid_on_out(struct tcp_rack *rack, tcp_seq seq, uint32_t len, uint64_t cts) 11873 { 11874 #ifdef TCP_REQUEST_TRK 11875 struct tcp_sendfile_track *ent; 11876 11877 ent = rack->r_ctl.rc_last_sft; 11878 if ((ent == NULL) || 11879 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || 11880 (SEQ_GEQ(seq, ent->end_seq))) { 11881 /* Time to update the track. */ 11882 rack_set_dgp_hybrid_mode(rack, seq, len); 11883 ent = rack->r_ctl.rc_last_sft; 11884 } 11885 /* Out of all */ 11886 if (ent == NULL) { 11887 return; 11888 } 11889 if (SEQ_LT(ent->end_seq, (seq + len))) { 11890 /* 11891 * This is the case where our end_seq guess 11892 * was wrong. This is usually due to TLS having 11893 * more bytes then our guess. It could also be the 11894 * case that the client sent in two requests closely 11895 * and the SB is full of both so we are sending part 11896 * of each (end|beg). In such a case lets move this 11897 * guys end to match the end of this send. That 11898 * way it will complete when all of it is acked. 11899 */ 11900 ent->end_seq = (seq + len); 11901 if (rack->rc_hybrid_mode) 11902 rack_log_hybrid_bw(rack, seq, len, 0, 0, HYBRID_LOG_EXTEND, 0, ent, __LINE__); 11903 } 11904 /* Now validate we have set the send time of this one */ 11905 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { 11906 ent->flags |= TCP_TRK_TRACK_FLG_FSND; 11907 ent->first_send = cts; 11908 ent->sent_at_fs = rack->rc_tp->t_sndbytes; 11909 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; 11910 } 11911 #endif 11912 } 11913 11914 static void 11915 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 11916 { 11917 /* 11918 * The fast output path is enabled and we 11919 * have moved the cumack forward. Lets see if 11920 * we can expand forward the fast path length by 11921 * that amount. What we would ideally like to 11922 * do is increase the number of bytes in the 11923 * fast path block (left_to_send) by the 11924 * acked amount. However we have to gate that 11925 * by two factors: 11926 * 1) The amount outstanding and the rwnd of the peer 11927 * (i.e. we don't want to exceed the rwnd of the peer). 11928 * <and> 11929 * 2) The amount of data left in the socket buffer (i.e. 11930 * we can't send beyond what is in the buffer). 11931 * 11932 * Note that this does not take into account any increase 11933 * in the cwnd. We will only extend the fast path by 11934 * what was acked. 11935 */ 11936 uint32_t new_total, gating_val; 11937 11938 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 11939 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 11940 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 11941 if (new_total <= gating_val) { 11942 /* We can increase left_to_send by the acked amount */ 11943 counter_u64_add(rack_extended_rfo, 1); 11944 rack->r_ctl.fsb.left_to_send = new_total; 11945 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 11946 ("rack:%p left_to_send:%u sbavail:%u out:%u", 11947 rack, rack->r_ctl.fsb.left_to_send, 11948 sbavail(&rack->rc_inp->inp_socket->so_snd), 11949 (tp->snd_max - tp->snd_una))); 11950 11951 } 11952 } 11953 11954 static void 11955 rack_adjust_sendmap_head(struct tcp_rack *rack, struct sockbuf *sb) 11956 { 11957 /* 11958 * Here any sendmap entry that points to the 11959 * beginning mbuf must be adjusted to the correct 11960 * offset. This must be called with: 11961 * 1) The socket buffer locked 11962 * 2) snd_una adjusted to its new position. 11963 * 11964 * Note that (2) implies rack_ack_received has also 11965 * been called and all the sbcut's have been done. 11966 * 11967 * We grab the first mbuf in the socket buffer and 11968 * then go through the front of the sendmap, recalculating 11969 * the stored offset for any sendmap entry that has 11970 * that mbuf. We must use the sb functions to do this 11971 * since its possible an add was done has well as 11972 * the subtraction we may have just completed. This should 11973 * not be a penalty though, since we just referenced the sb 11974 * to go in and trim off the mbufs that we freed (of course 11975 * there will be a penalty for the sendmap references though). 11976 * 11977 * Note also with INVARIANT on, we validate with a KASSERT 11978 * that the first sendmap entry has a soff of 0. 11979 * 11980 */ 11981 struct mbuf *m; 11982 struct rack_sendmap *rsm; 11983 tcp_seq snd_una; 11984 #ifdef INVARIANTS 11985 int first_processed = 0; 11986 #endif 11987 11988 snd_una = rack->rc_tp->snd_una; 11989 SOCKBUF_LOCK_ASSERT(sb); 11990 m = sb->sb_mb; 11991 rsm = tqhash_min(rack->r_ctl.tqh); 11992 if ((rsm == NULL) || (m == NULL)) { 11993 /* Nothing outstanding */ 11994 return; 11995 } 11996 /* The very first RSM's mbuf must point to the head mbuf in the sb */ 11997 KASSERT((rsm->m == m), 11998 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", 11999 rack, sb, rsm)); 12000 while (rsm->m && (rsm->m == m)) { 12001 /* one to adjust */ 12002 #ifdef INVARIANTS 12003 struct mbuf *tm; 12004 uint32_t soff; 12005 12006 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 12007 if ((rsm->orig_m_len != m->m_len) || 12008 (rsm->orig_t_space != M_TRAILINGROOM(m))){ 12009 rack_adjust_orig_mlen(rsm); 12010 } 12011 if (first_processed == 0) { 12012 KASSERT((rsm->soff == 0), 12013 ("Rack:%p rsm:%p -- rsm at head but soff not zero", 12014 rack, rsm)); 12015 first_processed = 1; 12016 } 12017 if ((rsm->soff != soff) || (rsm->m != tm)) { 12018 /* 12019 * This is not a fatal error, we anticipate it 12020 * might happen (the else code), so we count it here 12021 * so that under invariant we can see that it really 12022 * does happen. 12023 */ 12024 counter_u64_add(rack_adjust_map_bw, 1); 12025 } 12026 rsm->m = tm; 12027 rsm->soff = soff; 12028 if (tm) { 12029 rsm->orig_m_len = rsm->m->m_len; 12030 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12031 } else { 12032 rsm->orig_m_len = 0; 12033 rsm->orig_t_space = 0; 12034 } 12035 #else 12036 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 12037 if (rsm->m) { 12038 rsm->orig_m_len = rsm->m->m_len; 12039 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 12040 } else { 12041 rsm->orig_m_len = 0; 12042 rsm->orig_t_space = 0; 12043 } 12044 #endif 12045 rsm = tqhash_next(rack->r_ctl.tqh, rsm); 12046 if (rsm == NULL) 12047 break; 12048 } 12049 } 12050 12051 #ifdef TCP_REQUEST_TRK 12052 static inline void 12053 rack_req_check_for_comp(struct tcp_rack *rack, tcp_seq th_ack) 12054 { 12055 struct tcp_sendfile_track *ent; 12056 int i; 12057 12058 if ((rack->rc_hybrid_mode == 0) && 12059 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { 12060 /* 12061 * Just do normal completions hybrid pacing is not on 12062 * and CLDL is off as well. 12063 */ 12064 tcp_req_check_for_comp(rack->rc_tp, th_ack); 12065 return; 12066 } 12067 /* 12068 * Originally I was just going to find the th_ack associated 12069 * with an entry. But then I realized a large strech ack could 12070 * in theory ack two or more requests at once. So instead we 12071 * need to find all entries that are completed by th_ack not 12072 * just a single entry and do our logging. 12073 */ 12074 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12075 while (ent != NULL) { 12076 /* 12077 * We may be doing hybrid pacing or CLDL and need more details possibly 12078 * so we do it manually instead of calling 12079 * tcp_req_check_for_comp() 12080 */ 12081 uint64_t laa, tim, data, cbw, ftim; 12082 12083 /* Ok this ack frees it */ 12084 rack_log_hybrid(rack, th_ack, 12085 ent, HYBRID_LOG_REQ_COMP, __LINE__, 0); 12086 rack_log_hybrid_sends(rack, ent, __LINE__); 12087 /* calculate the time based on the ack arrival */ 12088 data = ent->end - ent->start; 12089 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); 12090 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { 12091 if (ent->first_send > ent->localtime) 12092 ftim = ent->first_send; 12093 else 12094 ftim = ent->localtime; 12095 } else { 12096 /* TSNH */ 12097 ftim = ent->localtime; 12098 } 12099 if (laa > ent->localtime) 12100 tim = laa - ftim; 12101 else 12102 tim = 0; 12103 cbw = data * HPTS_USEC_IN_SEC; 12104 if (tim > 0) 12105 cbw /= tim; 12106 else 12107 cbw = 0; 12108 rack_log_hybrid_bw(rack, th_ack, cbw, tim, data, HYBRID_LOG_BW_MEASURE, 0, ent, __LINE__); 12109 /* 12110 * Check to see if we are freeing what we are pointing to send wise 12111 * if so be sure to NULL the pointer so we know we are no longer 12112 * set to anything. 12113 */ 12114 if (ent == rack->r_ctl.rc_last_sft) 12115 rack->r_ctl.rc_last_sft = NULL; 12116 /* Generate the log that the tcp_netflix call would have */ 12117 tcp_req_log_req_info(rack->rc_tp, ent, 12118 i, TCP_TRK_REQ_LOG_FREED, 0, 0); 12119 /* Free it and see if there is another one */ 12120 tcp_req_free_a_slot(rack->rc_tp, ent); 12121 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); 12122 } 12123 } 12124 #endif 12125 12126 12127 /* 12128 * Return value of 1, we do not need to call rack_process_data(). 12129 * return value of 0, rack_process_data can be called. 12130 * For ret_val if its 0 the TCP is locked, if its non-zero 12131 * its unlocked and probably unsafe to touch the TCB. 12132 */ 12133 static int 12134 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12135 struct tcpcb *tp, struct tcpopt *to, 12136 uint32_t tiwin, int32_t tlen, 12137 int32_t * ofia, int32_t thflags, int32_t *ret_val) 12138 { 12139 int32_t ourfinisacked = 0; 12140 int32_t nsegs, acked_amount; 12141 int32_t acked; 12142 struct mbuf *mfree; 12143 struct tcp_rack *rack; 12144 int32_t under_pacing = 0; 12145 int32_t recovery = 0; 12146 12147 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12148 12149 rack = (struct tcp_rack *)tp->t_fb_ptr; 12150 if (SEQ_GT(th->th_ack, tp->snd_max)) { 12151 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 12152 &rack->r_ctl.challenge_ack_ts, 12153 &rack->r_ctl.challenge_ack_cnt); 12154 rack->r_wanted_output = 1; 12155 return (1); 12156 } 12157 if (rack->gp_ready && 12158 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12159 under_pacing = 1; 12160 } 12161 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 12162 int in_rec, dup_ack_struck = 0; 12163 int dsack_seen = 0, sacks_seen = 0; 12164 12165 in_rec = IN_FASTRECOVERY(tp->t_flags); 12166 if (rack->rc_in_persist) { 12167 tp->t_rxtshift = 0; 12168 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12169 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12170 } 12171 12172 if ((th->th_ack == tp->snd_una) && 12173 (tiwin == tp->snd_wnd) && 12174 ((to->to_flags & TOF_SACK) == 0)) { 12175 rack_strike_dupack(rack); 12176 dup_ack_struck = 1; 12177 } 12178 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), 12179 dup_ack_struck, &dsack_seen, &sacks_seen); 12180 if ((rack->sack_attack_disable > 0) && 12181 (th->th_ack == tp->snd_una) && 12182 (tiwin == tp->snd_wnd) && 12183 (dsack_seen == 0) && 12184 (sacks_seen > 0)) { 12185 /* 12186 * If sacks have been disabled we may 12187 * want to strike a dup-ack "ignoring" the 12188 * sack as long as the sack was not a "dsack". Note 12189 * that if no sack is sent (TOF_SACK is off) then the 12190 * normal dsack code above rack_log_ack() would have 12191 * already struck. So this is just to catch the case 12192 * were we are ignoring sacks from this guy due to 12193 * it being a suspected attacker. 12194 */ 12195 rack_strike_dupack(rack); 12196 } 12197 12198 } 12199 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12200 /* 12201 * Old ack, behind (or duplicate to) the last one rcv'd 12202 * Note: We mark reordering is occuring if its 12203 * less than and we have not closed our window. 12204 */ 12205 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 12206 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12207 if (rack->r_ctl.rc_reorder_ts == 0) 12208 rack->r_ctl.rc_reorder_ts = 1; 12209 } 12210 return (0); 12211 } 12212 /* 12213 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 12214 * something we sent. 12215 */ 12216 if (tp->t_flags & TF_NEEDSYN) { 12217 /* 12218 * T/TCP: Connection was half-synchronized, and our SYN has 12219 * been ACK'd (so connection is now fully synchronized). Go 12220 * to non-starred state, increment snd_una for ACK of SYN, 12221 * and check if we can do window scaling. 12222 */ 12223 tp->t_flags &= ~TF_NEEDSYN; 12224 tp->snd_una++; 12225 /* Do window scaling? */ 12226 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 12227 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 12228 tp->rcv_scale = tp->request_r_scale; 12229 /* Send window already scaled. */ 12230 } 12231 } 12232 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12233 12234 acked = BYTES_THIS_ACK(tp, th); 12235 if (acked) { 12236 /* 12237 * Any time we move the cum-ack forward clear 12238 * keep-alive tied probe-not-answered. The 12239 * persists clears its own on entry. 12240 */ 12241 rack->probe_not_answered = 0; 12242 } 12243 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 12244 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 12245 /* 12246 * If we just performed our first retransmit, and the ACK arrives 12247 * within our recovery window, then it was a mistake to do the 12248 * retransmit in the first place. Recover our original cwnd and 12249 * ssthresh, and proceed to transmit where we left off. 12250 */ 12251 if ((tp->t_flags & TF_PREVVALID) && 12252 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 12253 tp->t_flags &= ~TF_PREVVALID; 12254 if (tp->t_rxtshift == 1 && 12255 (int)(ticks - tp->t_badrxtwin) < 0) 12256 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 12257 } 12258 if (acked) { 12259 /* assure we are not backed off */ 12260 tp->t_rxtshift = 0; 12261 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 12262 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 12263 rack->rc_tlp_in_progress = 0; 12264 rack->r_ctl.rc_tlp_cnt_out = 0; 12265 /* 12266 * If it is the RXT timer we want to 12267 * stop it, so we can restart a TLP. 12268 */ 12269 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 12270 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12271 #ifdef TCP_REQUEST_TRK 12272 rack_req_check_for_comp(rack, th->th_ack); 12273 #endif 12274 } 12275 /* 12276 * If we have a timestamp reply, update smoothed round trip time. If 12277 * no timestamp is present but transmit timer is running and timed 12278 * sequence number was acked, update smoothed round trip time. Since 12279 * we now have an rtt measurement, cancel the timer backoff (cf., 12280 * Phil Karn's retransmit alg.). Recompute the initial retransmit 12281 * timer. 12282 * 12283 * Some boxes send broken timestamp replies during the SYN+ACK 12284 * phase, ignore timestamps of 0 or we could calculate a huge RTT 12285 * and blow up the retransmit timer. 12286 */ 12287 /* 12288 * If all outstanding data is acked, stop retransmit timer and 12289 * remember to restart (more output or persist). If there is more 12290 * data to be acked, restart retransmit timer, using current 12291 * (possibly backed-off) value. 12292 */ 12293 if (acked == 0) { 12294 if (ofia) 12295 *ofia = ourfinisacked; 12296 return (0); 12297 } 12298 if (IN_RECOVERY(tp->t_flags)) { 12299 if (SEQ_LT(th->th_ack, tp->snd_recover) && 12300 (SEQ_LT(th->th_ack, tp->snd_max))) { 12301 tcp_rack_partialack(tp); 12302 } else { 12303 rack_post_recovery(tp, th->th_ack); 12304 recovery = 1; 12305 } 12306 } 12307 /* 12308 * Let the congestion control algorithm update congestion control 12309 * related information. This typically means increasing the 12310 * congestion window. 12311 */ 12312 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 12313 SOCKBUF_LOCK(&so->so_snd); 12314 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 12315 tp->snd_wnd -= acked_amount; 12316 mfree = sbcut_locked(&so->so_snd, acked_amount); 12317 if ((sbused(&so->so_snd) == 0) && 12318 (acked > acked_amount) && 12319 (tp->t_state >= TCPS_FIN_WAIT_1) && 12320 (tp->t_flags & TF_SENTFIN)) { 12321 /* 12322 * We must be sure our fin 12323 * was sent and acked (we can be 12324 * in FIN_WAIT_1 without having 12325 * sent the fin). 12326 */ 12327 ourfinisacked = 1; 12328 } 12329 tp->snd_una = th->th_ack; 12330 /* wakeups? */ 12331 if (acked_amount && sbavail(&so->so_snd)) 12332 rack_adjust_sendmap_head(rack, &so->so_snd); 12333 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 12334 /* NB: sowwakeup_locked() does an implicit unlock. */ 12335 sowwakeup_locked(so); 12336 /* now check the rxt clamps */ 12337 if ((recovery == 1) && 12338 (rack->excess_rxt_on) && 12339 (rack->r_cwnd_was_clamped == 0)) { 12340 do_rack_excess_rxt(tp, rack); 12341 } else if (rack->r_cwnd_was_clamped) 12342 do_rack_check_for_unclamp(tp, rack); 12343 m_freem(mfree); 12344 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 12345 tp->snd_recover = tp->snd_una; 12346 12347 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 12348 tp->snd_nxt = tp->snd_una; 12349 } 12350 if (under_pacing && 12351 (rack->use_fixed_rate == 0) && 12352 (rack->in_probe_rtt == 0) && 12353 rack->rc_gp_dyn_mul && 12354 rack->rc_always_pace) { 12355 /* Check if we are dragging bottom */ 12356 rack_check_bottom_drag(tp, rack, so); 12357 } 12358 if (tp->snd_una == tp->snd_max) { 12359 /* Nothing left outstanding */ 12360 tp->t_flags &= ~TF_PREVVALID; 12361 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 12362 rack->r_ctl.retran_during_recovery = 0; 12363 rack->r_ctl.dsack_byte_cnt = 0; 12364 if (rack->r_ctl.rc_went_idle_time == 0) 12365 rack->r_ctl.rc_went_idle_time = 1; 12366 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 12367 if (sbavail(&tptosocket(tp)->so_snd) == 0) 12368 tp->t_acktime = 0; 12369 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12370 rack->rc_suspicious = 0; 12371 /* Set need output so persist might get set */ 12372 rack->r_wanted_output = 1; 12373 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12374 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 12375 (sbavail(&so->so_snd) == 0) && 12376 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 12377 /* 12378 * The socket was gone and the 12379 * peer sent data (now or in the past), time to 12380 * reset him. 12381 */ 12382 *ret_val = 1; 12383 /* tcp_close will kill the inp pre-log the Reset */ 12384 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 12385 tp = tcp_close(tp); 12386 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 12387 return (1); 12388 } 12389 } 12390 if (ofia) 12391 *ofia = ourfinisacked; 12392 return (0); 12393 } 12394 12395 12396 static void 12397 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 12398 int dir, uint32_t flags, struct rack_sendmap *rsm) 12399 { 12400 if (tcp_bblogging_on(rack->rc_tp)) { 12401 union tcp_log_stackspecific log; 12402 struct timeval tv; 12403 12404 memset(&log, 0, sizeof(log)); 12405 log.u_bbr.flex1 = cnt; 12406 log.u_bbr.flex2 = split; 12407 log.u_bbr.flex3 = out; 12408 log.u_bbr.flex4 = line; 12409 log.u_bbr.flex5 = rack->r_must_retran; 12410 log.u_bbr.flex6 = flags; 12411 log.u_bbr.flex7 = rack->rc_has_collapsed; 12412 log.u_bbr.flex8 = dir; /* 12413 * 1 is collapsed, 0 is uncollapsed, 12414 * 2 is log of a rsm being marked, 3 is a split. 12415 */ 12416 if (rsm == NULL) 12417 log.u_bbr.rttProp = 0; 12418 else 12419 log.u_bbr.rttProp = (uint64_t)rsm; 12420 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 12421 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 12422 TCP_LOG_EVENTP(rack->rc_tp, NULL, 12423 &rack->rc_inp->inp_socket->so_rcv, 12424 &rack->rc_inp->inp_socket->so_snd, 12425 TCP_RACK_LOG_COLLAPSE, 0, 12426 0, &log, false, &tv); 12427 } 12428 } 12429 12430 static void 12431 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, tcp_seq th_ack, int line) 12432 { 12433 /* 12434 * Here all we do is mark the collapsed point and set the flag. 12435 * This may happen again and again, but there is no 12436 * sense splitting our map until we know where the 12437 * peer finally lands in the collapse. 12438 */ 12439 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12440 if ((rack->rc_has_collapsed == 0) || 12441 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) 12442 counter_u64_add(rack_collapsed_win_seen, 1); 12443 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; 12444 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 12445 rack->rc_has_collapsed = 1; 12446 rack->r_collapse_point_valid = 1; 12447 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 12448 } 12449 12450 static void 12451 rack_un_collapse_window(struct tcp_rack *rack, int line) 12452 { 12453 struct rack_sendmap *nrsm, *rsm; 12454 int cnt = 0, split = 0; 12455 int insret __diagused; 12456 12457 12458 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 12459 rack->rc_has_collapsed = 0; 12460 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 12461 if (rsm == NULL) { 12462 /* Nothing to do maybe the peer ack'ed it all */ 12463 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12464 return; 12465 } 12466 /* Now do we need to split this one? */ 12467 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 12468 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 12469 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 12470 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 12471 if (nrsm == NULL) { 12472 /* We can't get a rsm, mark all? */ 12473 nrsm = rsm; 12474 goto no_split; 12475 } 12476 /* Clone it */ 12477 split = 1; 12478 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 12479 #ifndef INVARIANTS 12480 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); 12481 #else 12482 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { 12483 panic("Insert in rb tree of %p fails ret:%d rack:%p rsm:%p", 12484 nrsm, insret, rack, rsm); 12485 } 12486 #endif 12487 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 12488 rack->r_ctl.last_collapse_point, __LINE__); 12489 if (rsm->r_in_tmap) { 12490 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 12491 nrsm->r_in_tmap = 1; 12492 } 12493 /* 12494 * Set in the new RSM as the 12495 * collapsed starting point 12496 */ 12497 rsm = nrsm; 12498 } 12499 12500 no_split: 12501 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { 12502 cnt++; 12503 nrsm->r_flags |= RACK_RWND_COLLAPSED; 12504 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 12505 cnt++; 12506 } 12507 if (cnt) { 12508 counter_u64_add(rack_collapsed_win, 1); 12509 } 12510 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 12511 } 12512 12513 static void 12514 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 12515 int32_t tlen, int32_t tfo_syn) 12516 { 12517 if (DELAY_ACK(tp, tlen) || tfo_syn) { 12518 rack_timer_cancel(tp, rack, 12519 rack->r_ctl.rc_rcvtime, __LINE__); 12520 tp->t_flags |= TF_DELACK; 12521 } else { 12522 rack->r_wanted_output = 1; 12523 tp->t_flags |= TF_ACKNOW; 12524 } 12525 } 12526 12527 static void 12528 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 12529 { 12530 /* 12531 * If fast output is in progress, lets validate that 12532 * the new window did not shrink on us and make it 12533 * so fast output should end. 12534 */ 12535 if (rack->r_fast_output) { 12536 uint32_t out; 12537 12538 /* 12539 * Calculate what we will send if left as is 12540 * and compare that to our send window. 12541 */ 12542 out = ctf_outstanding(tp); 12543 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 12544 /* ok we have an issue */ 12545 if (out >= tp->snd_wnd) { 12546 /* Turn off fast output the window is met or collapsed */ 12547 rack->r_fast_output = 0; 12548 } else { 12549 /* we have some room left */ 12550 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 12551 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 12552 /* If not at least 1 full segment never mind */ 12553 rack->r_fast_output = 0; 12554 } 12555 } 12556 } 12557 } 12558 } 12559 12560 12561 /* 12562 * Return value of 1, the TCB is unlocked and most 12563 * likely gone, return value of 0, the TCP is still 12564 * locked. 12565 */ 12566 static int 12567 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 12568 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 12569 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 12570 { 12571 /* 12572 * Update window information. Don't look at window if no ACK: TAC's 12573 * send garbage on first SYN. 12574 */ 12575 int32_t nsegs; 12576 int32_t tfo_syn; 12577 struct tcp_rack *rack; 12578 12579 INP_WLOCK_ASSERT(tptoinpcb(tp)); 12580 12581 rack = (struct tcp_rack *)tp->t_fb_ptr; 12582 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12583 if ((thflags & TH_ACK) && 12584 (SEQ_LT(tp->snd_wl1, th->th_seq) || 12585 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 12586 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 12587 /* keep track of pure window updates */ 12588 if (tlen == 0 && 12589 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 12590 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12591 tp->snd_wnd = tiwin; 12592 rack_validate_fo_sendwin_up(tp, rack); 12593 tp->snd_wl1 = th->th_seq; 12594 tp->snd_wl2 = th->th_ack; 12595 if (tp->snd_wnd > tp->max_sndwnd) 12596 tp->max_sndwnd = tp->snd_wnd; 12597 rack->r_wanted_output = 1; 12598 } else if (thflags & TH_ACK) { 12599 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 12600 tp->snd_wnd = tiwin; 12601 rack_validate_fo_sendwin_up(tp, rack); 12602 tp->snd_wl1 = th->th_seq; 12603 tp->snd_wl2 = th->th_ack; 12604 } 12605 } 12606 if (tp->snd_wnd < ctf_outstanding(tp)) 12607 /* The peer collapsed the window */ 12608 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 12609 else if (rack->rc_has_collapsed) 12610 rack_un_collapse_window(rack, __LINE__); 12611 if ((rack->r_collapse_point_valid) && 12612 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 12613 rack->r_collapse_point_valid = 0; 12614 /* Was persist timer active and now we have window space? */ 12615 if ((rack->rc_in_persist != 0) && 12616 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12617 rack->r_ctl.rc_pace_min_segs))) { 12618 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12619 tp->snd_nxt = tp->snd_max; 12620 /* Make sure we output to start the timer */ 12621 rack->r_wanted_output = 1; 12622 } 12623 /* Do we enter persists? */ 12624 if ((rack->rc_in_persist == 0) && 12625 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12626 TCPS_HAVEESTABLISHED(tp->t_state) && 12627 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12628 sbavail(&tptosocket(tp)->so_snd) && 12629 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12630 /* 12631 * Here the rwnd is less than 12632 * the pacing size, we are established, 12633 * nothing is outstanding, and there is 12634 * data to send. Enter persists. 12635 */ 12636 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 12637 } 12638 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 12639 m_freem(m); 12640 return (0); 12641 } 12642 /* 12643 * don't process the URG bit, ignore them drag 12644 * along the up. 12645 */ 12646 tp->rcv_up = tp->rcv_nxt; 12647 12648 /* 12649 * Process the segment text, merging it into the TCP sequencing 12650 * queue, and arranging for acknowledgment of receipt if necessary. 12651 * This process logically involves adjusting tp->rcv_wnd as data is 12652 * presented to the user (this happens in tcp_usrreq.c, case 12653 * PRU_RCVD). If a FIN has already been received on this connection 12654 * then we just ignore the text. 12655 */ 12656 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 12657 IS_FASTOPEN(tp->t_flags)); 12658 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 12659 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12660 tcp_seq save_start = th->th_seq; 12661 tcp_seq save_rnxt = tp->rcv_nxt; 12662 int save_tlen = tlen; 12663 12664 m_adj(m, drop_hdrlen); /* delayed header drop */ 12665 /* 12666 * Insert segment which includes th into TCP reassembly 12667 * queue with control block tp. Set thflags to whether 12668 * reassembly now includes a segment with FIN. This handles 12669 * the common case inline (segment is the next to be 12670 * received on an established connection, and the queue is 12671 * empty), avoiding linkage into and removal from the queue 12672 * and repetition of various conversions. Set DELACK for 12673 * segments received in order, but ack immediately when 12674 * segments are out of order (so fast retransmit can work). 12675 */ 12676 if (th->th_seq == tp->rcv_nxt && 12677 SEGQ_EMPTY(tp) && 12678 (TCPS_HAVEESTABLISHED(tp->t_state) || 12679 tfo_syn)) { 12680 #ifdef NETFLIX_SB_LIMITS 12681 u_int mcnt, appended; 12682 12683 if (so->so_rcv.sb_shlim) { 12684 mcnt = m_memcnt(m); 12685 appended = 0; 12686 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12687 CFO_NOSLEEP, NULL) == false) { 12688 counter_u64_add(tcp_sb_shlim_fails, 1); 12689 m_freem(m); 12690 return (0); 12691 } 12692 } 12693 #endif 12694 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 12695 tp->rcv_nxt += tlen; 12696 if (tlen && 12697 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12698 (tp->t_fbyte_in == 0)) { 12699 tp->t_fbyte_in = ticks; 12700 if (tp->t_fbyte_in == 0) 12701 tp->t_fbyte_in = 1; 12702 if (tp->t_fbyte_out && tp->t_fbyte_in) 12703 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12704 } 12705 thflags = tcp_get_flags(th) & TH_FIN; 12706 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12707 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12708 SOCKBUF_LOCK(&so->so_rcv); 12709 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12710 m_freem(m); 12711 } else 12712 #ifdef NETFLIX_SB_LIMITS 12713 appended = 12714 #endif 12715 sbappendstream_locked(&so->so_rcv, m, 0); 12716 12717 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12718 /* NB: sorwakeup_locked() does an implicit unlock. */ 12719 sorwakeup_locked(so); 12720 #ifdef NETFLIX_SB_LIMITS 12721 if (so->so_rcv.sb_shlim && appended != mcnt) 12722 counter_fo_release(so->so_rcv.sb_shlim, 12723 mcnt - appended); 12724 #endif 12725 } else { 12726 /* 12727 * XXX: Due to the header drop above "th" is 12728 * theoretically invalid by now. Fortunately 12729 * m_adj() doesn't actually frees any mbufs when 12730 * trimming from the head. 12731 */ 12732 tcp_seq temp = save_start; 12733 12734 thflags = tcp_reass(tp, th, &temp, &tlen, m); 12735 tp->t_flags |= TF_ACKNOW; 12736 if (tp->t_flags & TF_WAKESOR) { 12737 tp->t_flags &= ~TF_WAKESOR; 12738 /* NB: sorwakeup_locked() does an implicit unlock. */ 12739 sorwakeup_locked(so); 12740 } 12741 } 12742 if ((tp->t_flags & TF_SACK_PERMIT) && 12743 (save_tlen > 0) && 12744 TCPS_HAVEESTABLISHED(tp->t_state)) { 12745 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 12746 /* 12747 * DSACK actually handled in the fastpath 12748 * above. 12749 */ 12750 tcp_update_sack_list(tp, save_start, 12751 save_start + save_tlen); 12752 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 12753 if ((tp->rcv_numsacks >= 1) && 12754 (tp->sackblks[0].end == save_start)) { 12755 /* 12756 * Partial overlap, recorded at todrop 12757 * above. 12758 */ 12759 tcp_update_sack_list(tp, 12760 tp->sackblks[0].start, 12761 tp->sackblks[0].end); 12762 } else { 12763 tcp_update_dsack_list(tp, save_start, 12764 save_start + save_tlen); 12765 } 12766 } else if (tlen >= save_tlen) { 12767 /* Update of sackblks. */ 12768 tcp_update_dsack_list(tp, save_start, 12769 save_start + save_tlen); 12770 } else if (tlen > 0) { 12771 tcp_update_dsack_list(tp, save_start, 12772 save_start + tlen); 12773 } 12774 } 12775 } else { 12776 m_freem(m); 12777 thflags &= ~TH_FIN; 12778 } 12779 12780 /* 12781 * If FIN is received ACK the FIN and let the user know that the 12782 * connection is closing. 12783 */ 12784 if (thflags & TH_FIN) { 12785 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 12786 /* The socket upcall is handled by socantrcvmore. */ 12787 socantrcvmore(so); 12788 /* 12789 * If connection is half-synchronized (ie NEEDSYN 12790 * flag on) then delay ACK, so it may be piggybacked 12791 * when SYN is sent. Otherwise, since we received a 12792 * FIN then no more input can be expected, send ACK 12793 * now. 12794 */ 12795 if (tp->t_flags & TF_NEEDSYN) { 12796 rack_timer_cancel(tp, rack, 12797 rack->r_ctl.rc_rcvtime, __LINE__); 12798 tp->t_flags |= TF_DELACK; 12799 } else { 12800 tp->t_flags |= TF_ACKNOW; 12801 } 12802 tp->rcv_nxt++; 12803 } 12804 switch (tp->t_state) { 12805 /* 12806 * In SYN_RECEIVED and ESTABLISHED STATES enter the 12807 * CLOSE_WAIT state. 12808 */ 12809 case TCPS_SYN_RECEIVED: 12810 tp->t_starttime = ticks; 12811 /* FALLTHROUGH */ 12812 case TCPS_ESTABLISHED: 12813 rack_timer_cancel(tp, rack, 12814 rack->r_ctl.rc_rcvtime, __LINE__); 12815 tcp_state_change(tp, TCPS_CLOSE_WAIT); 12816 break; 12817 12818 /* 12819 * If still in FIN_WAIT_1 STATE FIN has not been 12820 * acked so enter the CLOSING state. 12821 */ 12822 case TCPS_FIN_WAIT_1: 12823 rack_timer_cancel(tp, rack, 12824 rack->r_ctl.rc_rcvtime, __LINE__); 12825 tcp_state_change(tp, TCPS_CLOSING); 12826 break; 12827 12828 /* 12829 * In FIN_WAIT_2 state enter the TIME_WAIT state, 12830 * starting the time-wait timer, turning off the 12831 * other standard timers. 12832 */ 12833 case TCPS_FIN_WAIT_2: 12834 rack_timer_cancel(tp, rack, 12835 rack->r_ctl.rc_rcvtime, __LINE__); 12836 tcp_twstart(tp); 12837 return (1); 12838 } 12839 } 12840 /* 12841 * Return any desired output. 12842 */ 12843 if ((tp->t_flags & TF_ACKNOW) || 12844 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 12845 rack->r_wanted_output = 1; 12846 } 12847 return (0); 12848 } 12849 12850 /* 12851 * Here nothing is really faster, its just that we 12852 * have broken out the fast-data path also just like 12853 * the fast-ack. 12854 */ 12855 static int 12856 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 12857 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12858 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 12859 { 12860 int32_t nsegs; 12861 int32_t newsize = 0; /* automatic sockbuf scaling */ 12862 struct tcp_rack *rack; 12863 #ifdef NETFLIX_SB_LIMITS 12864 u_int mcnt, appended; 12865 #endif 12866 12867 /* 12868 * If last ACK falls within this segment's sequence numbers, record 12869 * the timestamp. NOTE that the test is modified according to the 12870 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 12871 */ 12872 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 12873 return (0); 12874 } 12875 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 12876 return (0); 12877 } 12878 if (tiwin && tiwin != tp->snd_wnd) { 12879 return (0); 12880 } 12881 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 12882 return (0); 12883 } 12884 if (__predict_false((to->to_flags & TOF_TS) && 12885 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 12886 return (0); 12887 } 12888 if (__predict_false((th->th_ack != tp->snd_una))) { 12889 return (0); 12890 } 12891 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 12892 return (0); 12893 } 12894 if ((to->to_flags & TOF_TS) != 0 && 12895 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 12896 tp->ts_recent_age = tcp_ts_getticks(); 12897 tp->ts_recent = to->to_tsval; 12898 } 12899 rack = (struct tcp_rack *)tp->t_fb_ptr; 12900 /* 12901 * This is a pure, in-sequence data packet with nothing on the 12902 * reassembly queue and we have enough buffer space to take it. 12903 */ 12904 nsegs = max(1, m->m_pkthdr.lro_nsegs); 12905 12906 #ifdef NETFLIX_SB_LIMITS 12907 if (so->so_rcv.sb_shlim) { 12908 mcnt = m_memcnt(m); 12909 appended = 0; 12910 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 12911 CFO_NOSLEEP, NULL) == false) { 12912 counter_u64_add(tcp_sb_shlim_fails, 1); 12913 m_freem(m); 12914 return (1); 12915 } 12916 } 12917 #endif 12918 /* Clean receiver SACK report if present */ 12919 if (tp->rcv_numsacks) 12920 tcp_clean_sackreport(tp); 12921 KMOD_TCPSTAT_INC(tcps_preddat); 12922 tp->rcv_nxt += tlen; 12923 if (tlen && 12924 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 12925 (tp->t_fbyte_in == 0)) { 12926 tp->t_fbyte_in = ticks; 12927 if (tp->t_fbyte_in == 0) 12928 tp->t_fbyte_in = 1; 12929 if (tp->t_fbyte_out && tp->t_fbyte_in) 12930 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 12931 } 12932 /* 12933 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 12934 */ 12935 tp->snd_wl1 = th->th_seq; 12936 /* 12937 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 12938 */ 12939 tp->rcv_up = tp->rcv_nxt; 12940 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 12941 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 12942 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 12943 12944 /* Add data to socket buffer. */ 12945 SOCKBUF_LOCK(&so->so_rcv); 12946 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12947 m_freem(m); 12948 } else { 12949 /* 12950 * Set new socket buffer size. Give up when limit is 12951 * reached. 12952 */ 12953 if (newsize) 12954 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 12955 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 12956 m_adj(m, drop_hdrlen); /* delayed header drop */ 12957 #ifdef NETFLIX_SB_LIMITS 12958 appended = 12959 #endif 12960 sbappendstream_locked(&so->so_rcv, m, 0); 12961 ctf_calc_rwin(so, tp); 12962 } 12963 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 12964 /* NB: sorwakeup_locked() does an implicit unlock. */ 12965 sorwakeup_locked(so); 12966 #ifdef NETFLIX_SB_LIMITS 12967 if (so->so_rcv.sb_shlim && mcnt != appended) 12968 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 12969 #endif 12970 rack_handle_delayed_ack(tp, rack, tlen, 0); 12971 if (tp->snd_una == tp->snd_max) 12972 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 12973 return (1); 12974 } 12975 12976 /* 12977 * This subfunction is used to try to highly optimize the 12978 * fast path. We again allow window updates that are 12979 * in sequence to remain in the fast-path. We also add 12980 * in the __predict's to attempt to help the compiler. 12981 * Note that if we return a 0, then we can *not* process 12982 * it and the caller should push the packet into the 12983 * slow-path. 12984 */ 12985 static int 12986 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12987 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12988 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 12989 { 12990 int32_t acked; 12991 int32_t nsegs; 12992 int32_t under_pacing = 0; 12993 struct tcp_rack *rack; 12994 12995 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 12996 /* Old ack, behind (or duplicate to) the last one rcv'd */ 12997 return (0); 12998 } 12999 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 13000 /* Above what we have sent? */ 13001 return (0); 13002 } 13003 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 13004 /* We are retransmitting */ 13005 return (0); 13006 } 13007 if (__predict_false(tiwin == 0)) { 13008 /* zero window */ 13009 return (0); 13010 } 13011 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 13012 /* We need a SYN or a FIN, unlikely.. */ 13013 return (0); 13014 } 13015 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 13016 /* Timestamp is behind .. old ack with seq wrap? */ 13017 return (0); 13018 } 13019 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 13020 /* Still recovering */ 13021 return (0); 13022 } 13023 rack = (struct tcp_rack *)tp->t_fb_ptr; 13024 if (rack->r_ctl.rc_sacked) { 13025 /* We have sack holes on our scoreboard */ 13026 return (0); 13027 } 13028 /* Ok if we reach here, we can process a fast-ack */ 13029 if (rack->gp_ready && 13030 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 13031 under_pacing = 1; 13032 } 13033 nsegs = max(1, m->m_pkthdr.lro_nsegs); 13034 rack_log_ack(tp, to, th, 0, 0, NULL, NULL); 13035 /* Did the window get updated? */ 13036 if (tiwin != tp->snd_wnd) { 13037 tp->snd_wnd = tiwin; 13038 rack_validate_fo_sendwin_up(tp, rack); 13039 tp->snd_wl1 = th->th_seq; 13040 if (tp->snd_wnd > tp->max_sndwnd) 13041 tp->max_sndwnd = tp->snd_wnd; 13042 } 13043 /* Do we exit persists? */ 13044 if ((rack->rc_in_persist != 0) && 13045 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13046 rack->r_ctl.rc_pace_min_segs))) { 13047 rack_exit_persist(tp, rack, cts); 13048 } 13049 /* Do we enter persists? */ 13050 if ((rack->rc_in_persist == 0) && 13051 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13052 TCPS_HAVEESTABLISHED(tp->t_state) && 13053 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13054 sbavail(&tptosocket(tp)->so_snd) && 13055 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13056 /* 13057 * Here the rwnd is less than 13058 * the pacing size, we are established, 13059 * nothing is outstanding, and there is 13060 * data to send. Enter persists. 13061 */ 13062 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); 13063 } 13064 /* 13065 * If last ACK falls within this segment's sequence numbers, record 13066 * the timestamp. NOTE that the test is modified according to the 13067 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 13068 */ 13069 if ((to->to_flags & TOF_TS) != 0 && 13070 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 13071 tp->ts_recent_age = tcp_ts_getticks(); 13072 tp->ts_recent = to->to_tsval; 13073 } 13074 /* 13075 * This is a pure ack for outstanding data. 13076 */ 13077 KMOD_TCPSTAT_INC(tcps_predack); 13078 13079 /* 13080 * "bad retransmit" recovery. 13081 */ 13082 if ((tp->t_flags & TF_PREVVALID) && 13083 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13084 tp->t_flags &= ~TF_PREVVALID; 13085 if (tp->t_rxtshift == 1 && 13086 (int)(ticks - tp->t_badrxtwin) < 0) 13087 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 13088 } 13089 /* 13090 * Recalculate the transmit timer / rtt. 13091 * 13092 * Some boxes send broken timestamp replies during the SYN+ACK 13093 * phase, ignore timestamps of 0 or we could calculate a huge RTT 13094 * and blow up the retransmit timer. 13095 */ 13096 acked = BYTES_THIS_ACK(tp, th); 13097 13098 #ifdef TCP_HHOOK 13099 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 13100 hhook_run_tcp_est_in(tp, th, to); 13101 #endif 13102 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 13103 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13104 if (acked) { 13105 struct mbuf *mfree; 13106 13107 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 13108 SOCKBUF_LOCK(&so->so_snd); 13109 mfree = sbcut_locked(&so->so_snd, acked); 13110 tp->snd_una = th->th_ack; 13111 /* Note we want to hold the sb lock through the sendmap adjust */ 13112 rack_adjust_sendmap_head(rack, &so->so_snd); 13113 /* Wake up the socket if we have room to write more */ 13114 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13115 sowwakeup_locked(so); 13116 m_freem(mfree); 13117 tp->t_rxtshift = 0; 13118 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13119 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13120 rack->rc_tlp_in_progress = 0; 13121 rack->r_ctl.rc_tlp_cnt_out = 0; 13122 /* 13123 * If it is the RXT timer we want to 13124 * stop it, so we can restart a TLP. 13125 */ 13126 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13127 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13128 13129 #ifdef TCP_REQUEST_TRK 13130 rack_req_check_for_comp(rack, th->th_ack); 13131 #endif 13132 } 13133 /* 13134 * Let the congestion control algorithm update congestion control 13135 * related information. This typically means increasing the 13136 * congestion window. 13137 */ 13138 if (tp->snd_wnd < ctf_outstanding(tp)) { 13139 /* The peer collapsed the window */ 13140 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); 13141 } else if (rack->rc_has_collapsed) 13142 rack_un_collapse_window(rack, __LINE__); 13143 if ((rack->r_collapse_point_valid) && 13144 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 13145 rack->r_collapse_point_valid = 0; 13146 /* 13147 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 13148 */ 13149 tp->snd_wl2 = th->th_ack; 13150 tp->t_dupacks = 0; 13151 m_freem(m); 13152 /* ND6_HINT(tp); *//* Some progress has been made. */ 13153 13154 /* 13155 * If all outstanding data are acked, stop retransmit timer, 13156 * otherwise restart timer using current (possibly backed-off) 13157 * value. If process is waiting for space, wakeup/selwakeup/signal. 13158 * If data are ready to send, let tcp_output decide between more 13159 * output or persist. 13160 */ 13161 if (under_pacing && 13162 (rack->use_fixed_rate == 0) && 13163 (rack->in_probe_rtt == 0) && 13164 rack->rc_gp_dyn_mul && 13165 rack->rc_always_pace) { 13166 /* Check if we are dragging bottom */ 13167 rack_check_bottom_drag(tp, rack, so); 13168 } 13169 if (tp->snd_una == tp->snd_max) { 13170 tp->t_flags &= ~TF_PREVVALID; 13171 rack->r_ctl.retran_during_recovery = 0; 13172 rack->rc_suspicious = 0; 13173 rack->r_ctl.dsack_byte_cnt = 0; 13174 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13175 if (rack->r_ctl.rc_went_idle_time == 0) 13176 rack->r_ctl.rc_went_idle_time = 1; 13177 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13178 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13179 tp->t_acktime = 0; 13180 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13181 } 13182 if (acked && rack->r_fast_output) 13183 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 13184 if (sbavail(&so->so_snd)) { 13185 rack->r_wanted_output = 1; 13186 } 13187 return (1); 13188 } 13189 13190 /* 13191 * Return value of 1, the TCB is unlocked and most 13192 * likely gone, return value of 0, the TCP is still 13193 * locked. 13194 */ 13195 static int 13196 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 13197 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13198 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13199 { 13200 int32_t ret_val = 0; 13201 int32_t todrop; 13202 int32_t ourfinisacked = 0; 13203 struct tcp_rack *rack; 13204 13205 INP_WLOCK_ASSERT(tptoinpcb(tp)); 13206 13207 ctf_calc_rwin(so, tp); 13208 /* 13209 * If the state is SYN_SENT: if seg contains an ACK, but not for our 13210 * SYN, drop the input. if seg contains a RST, then drop the 13211 * connection. if seg does not contain SYN, then drop it. Otherwise 13212 * this is an acceptable SYN segment initialize tp->rcv_nxt and 13213 * tp->irs if seg contains ack then advance tp->snd_una if seg 13214 * contains an ECE and ECN support is enabled, the stream is ECN 13215 * capable. if SYN has been acked change to ESTABLISHED else 13216 * SYN_RCVD state arrange for segment to be acked (eventually) 13217 * continue processing rest of data/controls. 13218 */ 13219 if ((thflags & TH_ACK) && 13220 (SEQ_LEQ(th->th_ack, tp->iss) || 13221 SEQ_GT(th->th_ack, tp->snd_max))) { 13222 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13223 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13224 return (1); 13225 } 13226 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 13227 TCP_PROBE5(connect__refused, NULL, tp, 13228 mtod(m, const char *), tp, th); 13229 tp = tcp_drop(tp, ECONNREFUSED); 13230 ctf_do_drop(m, tp); 13231 return (1); 13232 } 13233 if (thflags & TH_RST) { 13234 ctf_do_drop(m, tp); 13235 return (1); 13236 } 13237 if (!(thflags & TH_SYN)) { 13238 ctf_do_drop(m, tp); 13239 return (1); 13240 } 13241 tp->irs = th->th_seq; 13242 tcp_rcvseqinit(tp); 13243 rack = (struct tcp_rack *)tp->t_fb_ptr; 13244 if (thflags & TH_ACK) { 13245 int tfo_partial = 0; 13246 13247 KMOD_TCPSTAT_INC(tcps_connects); 13248 soisconnected(so); 13249 #ifdef MAC 13250 mac_socketpeer_set_from_mbuf(m, so); 13251 #endif 13252 /* Do window scaling on this connection? */ 13253 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13254 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13255 tp->rcv_scale = tp->request_r_scale; 13256 } 13257 tp->rcv_adv += min(tp->rcv_wnd, 13258 TCP_MAXWIN << tp->rcv_scale); 13259 /* 13260 * If not all the data that was sent in the TFO SYN 13261 * has been acked, resend the remainder right away. 13262 */ 13263 if (IS_FASTOPEN(tp->t_flags) && 13264 (tp->snd_una != tp->snd_max)) { 13265 tp->snd_nxt = th->th_ack; 13266 tfo_partial = 1; 13267 } 13268 /* 13269 * If there's data, delay ACK; if there's also a FIN ACKNOW 13270 * will be turned on later. 13271 */ 13272 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 13273 rack_timer_cancel(tp, rack, 13274 rack->r_ctl.rc_rcvtime, __LINE__); 13275 tp->t_flags |= TF_DELACK; 13276 } else { 13277 rack->r_wanted_output = 1; 13278 tp->t_flags |= TF_ACKNOW; 13279 } 13280 13281 tcp_ecn_input_syn_sent(tp, thflags, iptos); 13282 13283 if (SEQ_GT(th->th_ack, tp->snd_una)) { 13284 /* 13285 * We advance snd_una for the 13286 * fast open case. If th_ack is 13287 * acknowledging data beyond 13288 * snd_una we can't just call 13289 * ack-processing since the 13290 * data stream in our send-map 13291 * will start at snd_una + 1 (one 13292 * beyond the SYN). If its just 13293 * equal we don't need to do that 13294 * and there is no send_map. 13295 */ 13296 tp->snd_una++; 13297 } 13298 /* 13299 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 13300 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 13301 */ 13302 tp->t_starttime = ticks; 13303 if (tp->t_flags & TF_NEEDFIN) { 13304 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13305 tp->t_flags &= ~TF_NEEDFIN; 13306 thflags &= ~TH_SYN; 13307 } else { 13308 tcp_state_change(tp, TCPS_ESTABLISHED); 13309 TCP_PROBE5(connect__established, NULL, tp, 13310 mtod(m, const char *), tp, th); 13311 rack_cc_conn_init(tp); 13312 } 13313 } else { 13314 /* 13315 * Received initial SYN in SYN-SENT[*] state => simultaneous 13316 * open. If segment contains CC option and there is a 13317 * cached CC, apply TAO test. If it succeeds, connection is * 13318 * half-synchronized. Otherwise, do 3-way handshake: 13319 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 13320 * there was no CC option, clear cached CC value. 13321 */ 13322 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 13323 tcp_state_change(tp, TCPS_SYN_RECEIVED); 13324 } 13325 /* 13326 * Advance th->th_seq to correspond to first data byte. If data, 13327 * trim to stay within window, dropping FIN if necessary. 13328 */ 13329 th->th_seq++; 13330 if (tlen > tp->rcv_wnd) { 13331 todrop = tlen - tp->rcv_wnd; 13332 m_adj(m, -todrop); 13333 tlen = tp->rcv_wnd; 13334 thflags &= ~TH_FIN; 13335 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 13336 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 13337 } 13338 tp->snd_wl1 = th->th_seq - 1; 13339 tp->rcv_up = th->th_seq; 13340 /* 13341 * Client side of transaction: already sent SYN and data. If the 13342 * remote host used T/TCP to validate the SYN, our data will be 13343 * ACK'd; if so, enter normal data segment processing in the middle 13344 * of step 5, ack processing. Otherwise, goto step 6. 13345 */ 13346 if (thflags & TH_ACK) { 13347 /* For syn-sent we need to possibly update the rtt */ 13348 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13349 uint32_t t, mcts; 13350 13351 mcts = tcp_ts_getticks(); 13352 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13353 if (!tp->t_rttlow || tp->t_rttlow > t) 13354 tp->t_rttlow = t; 13355 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 13356 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13357 tcp_rack_xmit_timer_commit(rack, tp); 13358 } 13359 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 13360 return (ret_val); 13361 /* We may have changed to FIN_WAIT_1 above */ 13362 if (tp->t_state == TCPS_FIN_WAIT_1) { 13363 /* 13364 * In FIN_WAIT_1 STATE in addition to the processing 13365 * for the ESTABLISHED state if our FIN is now 13366 * acknowledged then enter FIN_WAIT_2. 13367 */ 13368 if (ourfinisacked) { 13369 /* 13370 * If we can't receive any more data, then 13371 * closing user can proceed. Starting the 13372 * timer is contrary to the specification, 13373 * but if we don't get a FIN we'll hang 13374 * forever. 13375 * 13376 * XXXjl: we should release the tp also, and 13377 * use a compressed state. 13378 */ 13379 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13380 soisdisconnected(so); 13381 tcp_timer_activate(tp, TT_2MSL, 13382 (tcp_fast_finwait2_recycle ? 13383 tcp_finwait2_timeout : 13384 TP_MAXIDLE(tp))); 13385 } 13386 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13387 } 13388 } 13389 } 13390 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13391 tiwin, thflags, nxt_pkt)); 13392 } 13393 13394 /* 13395 * Return value of 1, the TCB is unlocked and most 13396 * likely gone, return value of 0, the TCP is still 13397 * locked. 13398 */ 13399 static int 13400 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 13401 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13402 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13403 { 13404 struct tcp_rack *rack; 13405 int32_t ret_val = 0; 13406 int32_t ourfinisacked = 0; 13407 13408 ctf_calc_rwin(so, tp); 13409 if ((thflags & TH_ACK) && 13410 (SEQ_LEQ(th->th_ack, tp->snd_una) || 13411 SEQ_GT(th->th_ack, tp->snd_max))) { 13412 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13413 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13414 return (1); 13415 } 13416 rack = (struct tcp_rack *)tp->t_fb_ptr; 13417 if (IS_FASTOPEN(tp->t_flags)) { 13418 /* 13419 * When a TFO connection is in SYN_RECEIVED, the 13420 * only valid packets are the initial SYN, a 13421 * retransmit/copy of the initial SYN (possibly with 13422 * a subset of the original data), a valid ACK, a 13423 * FIN, or a RST. 13424 */ 13425 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 13426 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13427 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13428 return (1); 13429 } else if (thflags & TH_SYN) { 13430 /* non-initial SYN is ignored */ 13431 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 13432 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 13433 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 13434 ctf_do_drop(m, NULL); 13435 return (0); 13436 } 13437 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 13438 ctf_do_drop(m, NULL); 13439 return (0); 13440 } 13441 } 13442 13443 if ((thflags & TH_RST) || 13444 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13445 return (__ctf_process_rst(m, th, so, tp, 13446 &rack->r_ctl.challenge_ack_ts, 13447 &rack->r_ctl.challenge_ack_cnt)); 13448 /* 13449 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13450 * it's less than ts_recent, drop it. 13451 */ 13452 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13453 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13454 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13455 return (ret_val); 13456 } 13457 /* 13458 * In the SYN-RECEIVED state, validate that the packet belongs to 13459 * this connection before trimming the data to fit the receive 13460 * window. Check the sequence number versus IRS since we know the 13461 * sequence numbers haven't wrapped. This is a partial fix for the 13462 * "LAND" DoS attack. 13463 */ 13464 if (SEQ_LT(th->th_seq, tp->irs)) { 13465 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13466 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13467 return (1); 13468 } 13469 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13470 &rack->r_ctl.challenge_ack_ts, 13471 &rack->r_ctl.challenge_ack_cnt)) { 13472 return (ret_val); 13473 } 13474 /* 13475 * If last ACK falls within this segment's sequence numbers, record 13476 * its timestamp. NOTE: 1) That the test incorporates suggestions 13477 * from the latest proposal of the tcplw@cray.com list (Braden 13478 * 1993/04/26). 2) That updating only on newer timestamps interferes 13479 * with our earlier PAWS tests, so this check should be solely 13480 * predicated on the sequence space of this segment. 3) That we 13481 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13482 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13483 * SEG.Len, This modified check allows us to overcome RFC1323's 13484 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13485 * p.869. In such cases, we can still calculate the RTT correctly 13486 * when RCV.NXT == Last.ACK.Sent. 13487 */ 13488 if ((to->to_flags & TOF_TS) != 0 && 13489 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13490 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13491 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13492 tp->ts_recent_age = tcp_ts_getticks(); 13493 tp->ts_recent = to->to_tsval; 13494 } 13495 tp->snd_wnd = tiwin; 13496 rack_validate_fo_sendwin_up(tp, rack); 13497 /* 13498 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13499 * is on (half-synchronized state), then queue data for later 13500 * processing; else drop segment and return. 13501 */ 13502 if ((thflags & TH_ACK) == 0) { 13503 if (IS_FASTOPEN(tp->t_flags)) { 13504 rack_cc_conn_init(tp); 13505 } 13506 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13507 tiwin, thflags, nxt_pkt)); 13508 } 13509 KMOD_TCPSTAT_INC(tcps_connects); 13510 if (tp->t_flags & TF_SONOTCONN) { 13511 tp->t_flags &= ~TF_SONOTCONN; 13512 soisconnected(so); 13513 } 13514 /* Do window scaling? */ 13515 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 13516 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 13517 tp->rcv_scale = tp->request_r_scale; 13518 } 13519 /* 13520 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 13521 * FIN-WAIT-1 13522 */ 13523 tp->t_starttime = ticks; 13524 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 13525 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 13526 tp->t_tfo_pending = NULL; 13527 } 13528 if (tp->t_flags & TF_NEEDFIN) { 13529 tcp_state_change(tp, TCPS_FIN_WAIT_1); 13530 tp->t_flags &= ~TF_NEEDFIN; 13531 } else { 13532 tcp_state_change(tp, TCPS_ESTABLISHED); 13533 TCP_PROBE5(accept__established, NULL, tp, 13534 mtod(m, const char *), tp, th); 13535 /* 13536 * TFO connections call cc_conn_init() during SYN 13537 * processing. Calling it again here for such connections 13538 * is not harmless as it would undo the snd_cwnd reduction 13539 * that occurs when a TFO SYN|ACK is retransmitted. 13540 */ 13541 if (!IS_FASTOPEN(tp->t_flags)) 13542 rack_cc_conn_init(tp); 13543 } 13544 /* 13545 * Account for the ACK of our SYN prior to 13546 * regular ACK processing below, except for 13547 * simultaneous SYN, which is handled later. 13548 */ 13549 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 13550 tp->snd_una++; 13551 /* 13552 * If segment contains data or ACK, will call tcp_reass() later; if 13553 * not, do so now to pass queued data to user. 13554 */ 13555 if (tlen == 0 && (thflags & TH_FIN) == 0) { 13556 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 13557 (struct mbuf *)0); 13558 if (tp->t_flags & TF_WAKESOR) { 13559 tp->t_flags &= ~TF_WAKESOR; 13560 /* NB: sorwakeup_locked() does an implicit unlock. */ 13561 sorwakeup_locked(so); 13562 } 13563 } 13564 tp->snd_wl1 = th->th_seq - 1; 13565 /* For syn-recv we need to possibly update the rtt */ 13566 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 13567 uint32_t t, mcts; 13568 13569 mcts = tcp_ts_getticks(); 13570 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 13571 if (!tp->t_rttlow || tp->t_rttlow > t) 13572 tp->t_rttlow = t; 13573 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 13574 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 13575 tcp_rack_xmit_timer_commit(rack, tp); 13576 } 13577 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 13578 return (ret_val); 13579 } 13580 if (tp->t_state == TCPS_FIN_WAIT_1) { 13581 /* We could have went to FIN_WAIT_1 (or EST) above */ 13582 /* 13583 * In FIN_WAIT_1 STATE in addition to the processing for the 13584 * ESTABLISHED state if our FIN is now acknowledged then 13585 * enter FIN_WAIT_2. 13586 */ 13587 if (ourfinisacked) { 13588 /* 13589 * If we can't receive any more data, then closing 13590 * user can proceed. Starting the timer is contrary 13591 * to the specification, but if we don't get a FIN 13592 * we'll hang forever. 13593 * 13594 * XXXjl: we should release the tp also, and use a 13595 * compressed state. 13596 */ 13597 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13598 soisdisconnected(so); 13599 tcp_timer_activate(tp, TT_2MSL, 13600 (tcp_fast_finwait2_recycle ? 13601 tcp_finwait2_timeout : 13602 TP_MAXIDLE(tp))); 13603 } 13604 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13605 } 13606 } 13607 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13608 tiwin, thflags, nxt_pkt)); 13609 } 13610 13611 /* 13612 * Return value of 1, the TCB is unlocked and most 13613 * likely gone, return value of 0, the TCP is still 13614 * locked. 13615 */ 13616 static int 13617 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 13618 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13619 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13620 { 13621 int32_t ret_val = 0; 13622 struct tcp_rack *rack; 13623 13624 /* 13625 * Header prediction: check for the two common cases of a 13626 * uni-directional data xfer. If the packet has no control flags, 13627 * is in-sequence, the window didn't change and we're not 13628 * retransmitting, it's a candidate. If the length is zero and the 13629 * ack moved forward, we're the sender side of the xfer. Just free 13630 * the data acked & wake any higher level process that was blocked 13631 * waiting for space. If the length is non-zero and the ack didn't 13632 * move, we're the receiver side. If we're getting packets in-order 13633 * (the reassembly queue is empty), add the data toc The socket 13634 * buffer and note that we need a delayed ack. Make sure that the 13635 * hidden state-flags are also off. Since we check for 13636 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 13637 */ 13638 rack = (struct tcp_rack *)tp->t_fb_ptr; 13639 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 13640 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 13641 __predict_true(SEGQ_EMPTY(tp)) && 13642 __predict_true(th->th_seq == tp->rcv_nxt)) { 13643 if (tlen == 0) { 13644 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 13645 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 13646 return (0); 13647 } 13648 } else { 13649 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 13650 tiwin, nxt_pkt, iptos)) { 13651 return (0); 13652 } 13653 } 13654 } 13655 ctf_calc_rwin(so, tp); 13656 13657 if ((thflags & TH_RST) || 13658 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13659 return (__ctf_process_rst(m, th, so, tp, 13660 &rack->r_ctl.challenge_ack_ts, 13661 &rack->r_ctl.challenge_ack_cnt)); 13662 13663 /* 13664 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13665 * synchronized state. 13666 */ 13667 if (thflags & TH_SYN) { 13668 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13669 return (ret_val); 13670 } 13671 /* 13672 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13673 * it's less than ts_recent, drop it. 13674 */ 13675 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13676 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13677 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13678 return (ret_val); 13679 } 13680 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13681 &rack->r_ctl.challenge_ack_ts, 13682 &rack->r_ctl.challenge_ack_cnt)) { 13683 return (ret_val); 13684 } 13685 /* 13686 * If last ACK falls within this segment's sequence numbers, record 13687 * its timestamp. NOTE: 1) That the test incorporates suggestions 13688 * from the latest proposal of the tcplw@cray.com list (Braden 13689 * 1993/04/26). 2) That updating only on newer timestamps interferes 13690 * with our earlier PAWS tests, so this check should be solely 13691 * predicated on the sequence space of this segment. 3) That we 13692 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13693 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13694 * SEG.Len, This modified check allows us to overcome RFC1323's 13695 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13696 * p.869. In such cases, we can still calculate the RTT correctly 13697 * when RCV.NXT == Last.ACK.Sent. 13698 */ 13699 if ((to->to_flags & TOF_TS) != 0 && 13700 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13701 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13702 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13703 tp->ts_recent_age = tcp_ts_getticks(); 13704 tp->ts_recent = to->to_tsval; 13705 } 13706 /* 13707 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13708 * is on (half-synchronized state), then queue data for later 13709 * processing; else drop segment and return. 13710 */ 13711 if ((thflags & TH_ACK) == 0) { 13712 if (tp->t_flags & TF_NEEDSYN) { 13713 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13714 tiwin, thflags, nxt_pkt)); 13715 13716 } else if (tp->t_flags & TF_ACKNOW) { 13717 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13718 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13719 return (ret_val); 13720 } else { 13721 ctf_do_drop(m, NULL); 13722 return (0); 13723 } 13724 } 13725 /* 13726 * Ack processing. 13727 */ 13728 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 13729 return (ret_val); 13730 } 13731 if (sbavail(&so->so_snd)) { 13732 if (ctf_progress_timeout_check(tp, true)) { 13733 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 13734 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13735 return (1); 13736 } 13737 } 13738 /* State changes only happen in rack_process_data() */ 13739 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13740 tiwin, thflags, nxt_pkt)); 13741 } 13742 13743 /* 13744 * Return value of 1, the TCB is unlocked and most 13745 * likely gone, return value of 0, the TCP is still 13746 * locked. 13747 */ 13748 static int 13749 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 13750 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13751 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13752 { 13753 int32_t ret_val = 0; 13754 struct tcp_rack *rack; 13755 13756 rack = (struct tcp_rack *)tp->t_fb_ptr; 13757 ctf_calc_rwin(so, tp); 13758 if ((thflags & TH_RST) || 13759 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13760 return (__ctf_process_rst(m, th, so, tp, 13761 &rack->r_ctl.challenge_ack_ts, 13762 &rack->r_ctl.challenge_ack_cnt)); 13763 /* 13764 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13765 * synchronized state. 13766 */ 13767 if (thflags & TH_SYN) { 13768 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13769 return (ret_val); 13770 } 13771 /* 13772 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13773 * it's less than ts_recent, drop it. 13774 */ 13775 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13776 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13777 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13778 return (ret_val); 13779 } 13780 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13781 &rack->r_ctl.challenge_ack_ts, 13782 &rack->r_ctl.challenge_ack_cnt)) { 13783 return (ret_val); 13784 } 13785 /* 13786 * If last ACK falls within this segment's sequence numbers, record 13787 * its timestamp. NOTE: 1) That the test incorporates suggestions 13788 * from the latest proposal of the tcplw@cray.com list (Braden 13789 * 1993/04/26). 2) That updating only on newer timestamps interferes 13790 * with our earlier PAWS tests, so this check should be solely 13791 * predicated on the sequence space of this segment. 3) That we 13792 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13793 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13794 * SEG.Len, This modified check allows us to overcome RFC1323's 13795 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13796 * p.869. In such cases, we can still calculate the RTT correctly 13797 * when RCV.NXT == Last.ACK.Sent. 13798 */ 13799 if ((to->to_flags & TOF_TS) != 0 && 13800 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13801 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13802 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13803 tp->ts_recent_age = tcp_ts_getticks(); 13804 tp->ts_recent = to->to_tsval; 13805 } 13806 /* 13807 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13808 * is on (half-synchronized state), then queue data for later 13809 * processing; else drop segment and return. 13810 */ 13811 if ((thflags & TH_ACK) == 0) { 13812 if (tp->t_flags & TF_NEEDSYN) { 13813 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13814 tiwin, thflags, nxt_pkt)); 13815 13816 } else if (tp->t_flags & TF_ACKNOW) { 13817 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13818 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13819 return (ret_val); 13820 } else { 13821 ctf_do_drop(m, NULL); 13822 return (0); 13823 } 13824 } 13825 /* 13826 * Ack processing. 13827 */ 13828 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 13829 return (ret_val); 13830 } 13831 if (sbavail(&so->so_snd)) { 13832 if (ctf_progress_timeout_check(tp, true)) { 13833 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13834 tp, tick, PROGRESS_DROP, __LINE__); 13835 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13836 return (1); 13837 } 13838 } 13839 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13840 tiwin, thflags, nxt_pkt)); 13841 } 13842 13843 static int 13844 rack_check_data_after_close(struct mbuf *m, 13845 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 13846 { 13847 struct tcp_rack *rack; 13848 13849 rack = (struct tcp_rack *)tp->t_fb_ptr; 13850 if (rack->rc_allow_data_af_clo == 0) { 13851 close_now: 13852 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13853 /* tcp_close will kill the inp pre-log the Reset */ 13854 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13855 tp = tcp_close(tp); 13856 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 13857 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 13858 return (1); 13859 } 13860 if (sbavail(&so->so_snd) == 0) 13861 goto close_now; 13862 /* Ok we allow data that is ignored and a followup reset */ 13863 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 13864 tp->rcv_nxt = th->th_seq + *tlen; 13865 tp->t_flags2 |= TF2_DROP_AF_DATA; 13866 rack->r_wanted_output = 1; 13867 *tlen = 0; 13868 return (0); 13869 } 13870 13871 /* 13872 * Return value of 1, the TCB is unlocked and most 13873 * likely gone, return value of 0, the TCP is still 13874 * locked. 13875 */ 13876 static int 13877 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 13878 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 13879 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 13880 { 13881 int32_t ret_val = 0; 13882 int32_t ourfinisacked = 0; 13883 struct tcp_rack *rack; 13884 13885 rack = (struct tcp_rack *)tp->t_fb_ptr; 13886 ctf_calc_rwin(so, tp); 13887 13888 if ((thflags & TH_RST) || 13889 (tp->t_fin_is_rst && (thflags & TH_FIN))) 13890 return (__ctf_process_rst(m, th, so, tp, 13891 &rack->r_ctl.challenge_ack_ts, 13892 &rack->r_ctl.challenge_ack_cnt)); 13893 /* 13894 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 13895 * synchronized state. 13896 */ 13897 if (thflags & TH_SYN) { 13898 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 13899 return (ret_val); 13900 } 13901 /* 13902 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 13903 * it's less than ts_recent, drop it. 13904 */ 13905 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 13906 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 13907 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 13908 return (ret_val); 13909 } 13910 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 13911 &rack->r_ctl.challenge_ack_ts, 13912 &rack->r_ctl.challenge_ack_cnt)) { 13913 return (ret_val); 13914 } 13915 /* 13916 * If new data are received on a connection after the user processes 13917 * are gone, then RST the other end. 13918 */ 13919 if ((tp->t_flags & TF_CLOSED) && tlen && 13920 rack_check_data_after_close(m, tp, &tlen, th, so)) 13921 return (1); 13922 /* 13923 * If last ACK falls within this segment's sequence numbers, record 13924 * its timestamp. NOTE: 1) That the test incorporates suggestions 13925 * from the latest proposal of the tcplw@cray.com list (Braden 13926 * 1993/04/26). 2) That updating only on newer timestamps interferes 13927 * with our earlier PAWS tests, so this check should be solely 13928 * predicated on the sequence space of this segment. 3) That we 13929 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 13930 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 13931 * SEG.Len, This modified check allows us to overcome RFC1323's 13932 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 13933 * p.869. In such cases, we can still calculate the RTT correctly 13934 * when RCV.NXT == Last.ACK.Sent. 13935 */ 13936 if ((to->to_flags & TOF_TS) != 0 && 13937 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 13938 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 13939 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 13940 tp->ts_recent_age = tcp_ts_getticks(); 13941 tp->ts_recent = to->to_tsval; 13942 } 13943 /* 13944 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 13945 * is on (half-synchronized state), then queue data for later 13946 * processing; else drop segment and return. 13947 */ 13948 if ((thflags & TH_ACK) == 0) { 13949 if (tp->t_flags & TF_NEEDSYN) { 13950 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13951 tiwin, thflags, nxt_pkt)); 13952 } else if (tp->t_flags & TF_ACKNOW) { 13953 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 13954 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 13955 return (ret_val); 13956 } else { 13957 ctf_do_drop(m, NULL); 13958 return (0); 13959 } 13960 } 13961 /* 13962 * Ack processing. 13963 */ 13964 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 13965 return (ret_val); 13966 } 13967 if (ourfinisacked) { 13968 /* 13969 * If we can't receive any more data, then closing user can 13970 * proceed. Starting the timer is contrary to the 13971 * specification, but if we don't get a FIN we'll hang 13972 * forever. 13973 * 13974 * XXXjl: we should release the tp also, and use a 13975 * compressed state. 13976 */ 13977 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13978 soisdisconnected(so); 13979 tcp_timer_activate(tp, TT_2MSL, 13980 (tcp_fast_finwait2_recycle ? 13981 tcp_finwait2_timeout : 13982 TP_MAXIDLE(tp))); 13983 } 13984 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13985 } 13986 if (sbavail(&so->so_snd)) { 13987 if (ctf_progress_timeout_check(tp, true)) { 13988 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13989 tp, tick, PROGRESS_DROP, __LINE__); 13990 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13991 return (1); 13992 } 13993 } 13994 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 13995 tiwin, thflags, nxt_pkt)); 13996 } 13997 13998 /* 13999 * Return value of 1, the TCB is unlocked and most 14000 * likely gone, return value of 0, the TCP is still 14001 * locked. 14002 */ 14003 static int 14004 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 14005 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14006 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14007 { 14008 int32_t ret_val = 0; 14009 int32_t ourfinisacked = 0; 14010 struct tcp_rack *rack; 14011 14012 rack = (struct tcp_rack *)tp->t_fb_ptr; 14013 ctf_calc_rwin(so, tp); 14014 14015 if ((thflags & TH_RST) || 14016 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14017 return (__ctf_process_rst(m, th, so, tp, 14018 &rack->r_ctl.challenge_ack_ts, 14019 &rack->r_ctl.challenge_ack_cnt)); 14020 /* 14021 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14022 * synchronized state. 14023 */ 14024 if (thflags & TH_SYN) { 14025 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14026 return (ret_val); 14027 } 14028 /* 14029 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14030 * it's less than ts_recent, drop it. 14031 */ 14032 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14033 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14034 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14035 return (ret_val); 14036 } 14037 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14038 &rack->r_ctl.challenge_ack_ts, 14039 &rack->r_ctl.challenge_ack_cnt)) { 14040 return (ret_val); 14041 } 14042 /* 14043 * If new data are received on a connection after the user processes 14044 * are gone, then RST the other end. 14045 */ 14046 if ((tp->t_flags & TF_CLOSED) && tlen && 14047 rack_check_data_after_close(m, tp, &tlen, th, so)) 14048 return (1); 14049 /* 14050 * If last ACK falls within this segment's sequence numbers, record 14051 * its timestamp. NOTE: 1) That the test incorporates suggestions 14052 * from the latest proposal of the tcplw@cray.com list (Braden 14053 * 1993/04/26). 2) That updating only on newer timestamps interferes 14054 * with our earlier PAWS tests, so this check should be solely 14055 * predicated on the sequence space of this segment. 3) That we 14056 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14057 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14058 * SEG.Len, This modified check allows us to overcome RFC1323's 14059 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14060 * p.869. In such cases, we can still calculate the RTT correctly 14061 * when RCV.NXT == Last.ACK.Sent. 14062 */ 14063 if ((to->to_flags & TOF_TS) != 0 && 14064 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14065 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14066 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14067 tp->ts_recent_age = tcp_ts_getticks(); 14068 tp->ts_recent = to->to_tsval; 14069 } 14070 /* 14071 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14072 * is on (half-synchronized state), then queue data for later 14073 * processing; else drop segment and return. 14074 */ 14075 if ((thflags & TH_ACK) == 0) { 14076 if (tp->t_flags & TF_NEEDSYN) { 14077 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14078 tiwin, thflags, nxt_pkt)); 14079 } else if (tp->t_flags & TF_ACKNOW) { 14080 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14081 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14082 return (ret_val); 14083 } else { 14084 ctf_do_drop(m, NULL); 14085 return (0); 14086 } 14087 } 14088 /* 14089 * Ack processing. 14090 */ 14091 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 14092 return (ret_val); 14093 } 14094 if (ourfinisacked) { 14095 tcp_twstart(tp); 14096 m_freem(m); 14097 return (1); 14098 } 14099 if (sbavail(&so->so_snd)) { 14100 if (ctf_progress_timeout_check(tp, true)) { 14101 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14102 tp, tick, PROGRESS_DROP, __LINE__); 14103 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14104 return (1); 14105 } 14106 } 14107 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14108 tiwin, thflags, nxt_pkt)); 14109 } 14110 14111 /* 14112 * Return value of 1, the TCB is unlocked and most 14113 * likely gone, return value of 0, the TCP is still 14114 * locked. 14115 */ 14116 static int 14117 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 14118 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14119 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14120 { 14121 int32_t ret_val = 0; 14122 int32_t ourfinisacked = 0; 14123 struct tcp_rack *rack; 14124 14125 rack = (struct tcp_rack *)tp->t_fb_ptr; 14126 ctf_calc_rwin(so, tp); 14127 14128 if ((thflags & TH_RST) || 14129 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14130 return (__ctf_process_rst(m, th, so, tp, 14131 &rack->r_ctl.challenge_ack_ts, 14132 &rack->r_ctl.challenge_ack_cnt)); 14133 /* 14134 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14135 * synchronized state. 14136 */ 14137 if (thflags & TH_SYN) { 14138 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14139 return (ret_val); 14140 } 14141 /* 14142 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14143 * it's less than ts_recent, drop it. 14144 */ 14145 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14146 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14147 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14148 return (ret_val); 14149 } 14150 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14151 &rack->r_ctl.challenge_ack_ts, 14152 &rack->r_ctl.challenge_ack_cnt)) { 14153 return (ret_val); 14154 } 14155 /* 14156 * If new data are received on a connection after the user processes 14157 * are gone, then RST the other end. 14158 */ 14159 if ((tp->t_flags & TF_CLOSED) && tlen && 14160 rack_check_data_after_close(m, tp, &tlen, th, so)) 14161 return (1); 14162 /* 14163 * If last ACK falls within this segment's sequence numbers, record 14164 * its timestamp. NOTE: 1) That the test incorporates suggestions 14165 * from the latest proposal of the tcplw@cray.com list (Braden 14166 * 1993/04/26). 2) That updating only on newer timestamps interferes 14167 * with our earlier PAWS tests, so this check should be solely 14168 * predicated on the sequence space of this segment. 3) That we 14169 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14170 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14171 * SEG.Len, This modified check allows us to overcome RFC1323's 14172 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14173 * p.869. In such cases, we can still calculate the RTT correctly 14174 * when RCV.NXT == Last.ACK.Sent. 14175 */ 14176 if ((to->to_flags & TOF_TS) != 0 && 14177 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14178 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14179 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14180 tp->ts_recent_age = tcp_ts_getticks(); 14181 tp->ts_recent = to->to_tsval; 14182 } 14183 /* 14184 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14185 * is on (half-synchronized state), then queue data for later 14186 * processing; else drop segment and return. 14187 */ 14188 if ((thflags & TH_ACK) == 0) { 14189 if (tp->t_flags & TF_NEEDSYN) { 14190 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14191 tiwin, thflags, nxt_pkt)); 14192 } else if (tp->t_flags & TF_ACKNOW) { 14193 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14194 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14195 return (ret_val); 14196 } else { 14197 ctf_do_drop(m, NULL); 14198 return (0); 14199 } 14200 } 14201 /* 14202 * case TCPS_LAST_ACK: Ack processing. 14203 */ 14204 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 14205 return (ret_val); 14206 } 14207 if (ourfinisacked) { 14208 tp = tcp_close(tp); 14209 ctf_do_drop(m, tp); 14210 return (1); 14211 } 14212 if (sbavail(&so->so_snd)) { 14213 if (ctf_progress_timeout_check(tp, true)) { 14214 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14215 tp, tick, PROGRESS_DROP, __LINE__); 14216 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14217 return (1); 14218 } 14219 } 14220 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14221 tiwin, thflags, nxt_pkt)); 14222 } 14223 14224 /* 14225 * Return value of 1, the TCB is unlocked and most 14226 * likely gone, return value of 0, the TCP is still 14227 * locked. 14228 */ 14229 static int 14230 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 14231 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 14232 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 14233 { 14234 int32_t ret_val = 0; 14235 int32_t ourfinisacked = 0; 14236 struct tcp_rack *rack; 14237 14238 rack = (struct tcp_rack *)tp->t_fb_ptr; 14239 ctf_calc_rwin(so, tp); 14240 14241 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 14242 if ((thflags & TH_RST) || 14243 (tp->t_fin_is_rst && (thflags & TH_FIN))) 14244 return (__ctf_process_rst(m, th, so, tp, 14245 &rack->r_ctl.challenge_ack_ts, 14246 &rack->r_ctl.challenge_ack_cnt)); 14247 /* 14248 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 14249 * synchronized state. 14250 */ 14251 if (thflags & TH_SYN) { 14252 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 14253 return (ret_val); 14254 } 14255 /* 14256 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 14257 * it's less than ts_recent, drop it. 14258 */ 14259 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 14260 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 14261 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 14262 return (ret_val); 14263 } 14264 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 14265 &rack->r_ctl.challenge_ack_ts, 14266 &rack->r_ctl.challenge_ack_cnt)) { 14267 return (ret_val); 14268 } 14269 /* 14270 * If new data are received on a connection after the user processes 14271 * are gone, then RST the other end. 14272 */ 14273 if ((tp->t_flags & TF_CLOSED) && tlen && 14274 rack_check_data_after_close(m, tp, &tlen, th, so)) 14275 return (1); 14276 /* 14277 * If last ACK falls within this segment's sequence numbers, record 14278 * its timestamp. NOTE: 1) That the test incorporates suggestions 14279 * from the latest proposal of the tcplw@cray.com list (Braden 14280 * 1993/04/26). 2) That updating only on newer timestamps interferes 14281 * with our earlier PAWS tests, so this check should be solely 14282 * predicated on the sequence space of this segment. 3) That we 14283 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 14284 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 14285 * SEG.Len, This modified check allows us to overcome RFC1323's 14286 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 14287 * p.869. In such cases, we can still calculate the RTT correctly 14288 * when RCV.NXT == Last.ACK.Sent. 14289 */ 14290 if ((to->to_flags & TOF_TS) != 0 && 14291 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 14292 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 14293 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 14294 tp->ts_recent_age = tcp_ts_getticks(); 14295 tp->ts_recent = to->to_tsval; 14296 } 14297 /* 14298 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 14299 * is on (half-synchronized state), then queue data for later 14300 * processing; else drop segment and return. 14301 */ 14302 if ((thflags & TH_ACK) == 0) { 14303 if (tp->t_flags & TF_NEEDSYN) { 14304 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14305 tiwin, thflags, nxt_pkt)); 14306 } else if (tp->t_flags & TF_ACKNOW) { 14307 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 14308 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 14309 return (ret_val); 14310 } else { 14311 ctf_do_drop(m, NULL); 14312 return (0); 14313 } 14314 } 14315 /* 14316 * Ack processing. 14317 */ 14318 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 14319 return (ret_val); 14320 } 14321 if (sbavail(&so->so_snd)) { 14322 if (ctf_progress_timeout_check(tp, true)) { 14323 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14324 tp, tick, PROGRESS_DROP, __LINE__); 14325 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14326 return (1); 14327 } 14328 } 14329 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 14330 tiwin, thflags, nxt_pkt)); 14331 } 14332 14333 static void inline 14334 rack_clear_rate_sample(struct tcp_rack *rack) 14335 { 14336 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 14337 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 14338 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 14339 } 14340 14341 static void 14342 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 14343 { 14344 uint64_t bw_est, rate_wanted; 14345 int chged = 0; 14346 uint32_t user_max, orig_min, orig_max; 14347 14348 #ifdef TCP_REQUEST_TRK 14349 if (rack->rc_hybrid_mode && 14350 (rack->r_ctl.rc_pace_max_segs != 0) && 14351 (rack_hybrid_allow_set_maxseg == 1) && 14352 (rack->r_ctl.rc_last_sft != NULL)) { 14353 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; 14354 return; 14355 } 14356 #endif 14357 orig_min = rack->r_ctl.rc_pace_min_segs; 14358 orig_max = rack->r_ctl.rc_pace_max_segs; 14359 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 14360 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 14361 chged = 1; 14362 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 14363 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 14364 if (user_max != rack->r_ctl.rc_pace_max_segs) 14365 chged = 1; 14366 } 14367 if (rack->rc_force_max_seg) { 14368 rack->r_ctl.rc_pace_max_segs = user_max; 14369 } else if (rack->use_fixed_rate) { 14370 bw_est = rack_get_bw(rack); 14371 if ((rack->r_ctl.crte == NULL) || 14372 (bw_est != rack->r_ctl.crte->rate)) { 14373 rack->r_ctl.rc_pace_max_segs = user_max; 14374 } else { 14375 /* We are pacing right at the hardware rate */ 14376 uint32_t segsiz, pace_one; 14377 14378 if (rack_pace_one_seg || 14379 (rack->r_ctl.rc_user_set_min_segs == 1)) 14380 pace_one = 1; 14381 else 14382 pace_one = 0; 14383 segsiz = min(ctf_fixed_maxseg(tp), 14384 rack->r_ctl.rc_pace_min_segs); 14385 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( 14386 tp, bw_est, segsiz, pace_one, 14387 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 14388 } 14389 } else if (rack->rc_always_pace) { 14390 if (rack->r_ctl.gp_bw || 14391 rack->r_ctl.init_rate) { 14392 /* We have a rate of some sort set */ 14393 uint32_t orig; 14394 14395 bw_est = rack_get_bw(rack); 14396 orig = rack->r_ctl.rc_pace_max_segs; 14397 if (fill_override) 14398 rate_wanted = *fill_override; 14399 else 14400 rate_wanted = rack_get_gp_est(rack); 14401 if (rate_wanted) { 14402 /* We have something */ 14403 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 14404 rate_wanted, 14405 ctf_fixed_maxseg(rack->rc_tp)); 14406 } else 14407 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 14408 if (orig != rack->r_ctl.rc_pace_max_segs) 14409 chged = 1; 14410 } else if ((rack->r_ctl.gp_bw == 0) && 14411 (rack->r_ctl.rc_pace_max_segs == 0)) { 14412 /* 14413 * If we have nothing limit us to bursting 14414 * out IW sized pieces. 14415 */ 14416 chged = 1; 14417 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 14418 } 14419 } 14420 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 14421 chged = 1; 14422 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 14423 } 14424 if (chged) 14425 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 14426 } 14427 14428 14429 static void 14430 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack, int32_t flags) 14431 { 14432 #ifdef INET6 14433 struct ip6_hdr *ip6 = NULL; 14434 #endif 14435 #ifdef INET 14436 struct ip *ip = NULL; 14437 #endif 14438 struct udphdr *udp = NULL; 14439 14440 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 14441 #ifdef INET6 14442 if (rack->r_is_v6) { 14443 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 14444 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 14445 if (tp->t_port) { 14446 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14447 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 14448 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14449 udp->uh_dport = tp->t_port; 14450 rack->r_ctl.fsb.udp = udp; 14451 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14452 } else 14453 { 14454 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 14455 rack->r_ctl.fsb.udp = NULL; 14456 } 14457 tcpip_fillheaders(rack->rc_inp, 14458 tp->t_port, 14459 ip6, rack->r_ctl.fsb.th); 14460 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); 14461 } else 14462 #endif /* INET6 */ 14463 #ifdef INET 14464 { 14465 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 14466 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 14467 if (tp->t_port) { 14468 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 14469 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 14470 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 14471 udp->uh_dport = tp->t_port; 14472 rack->r_ctl.fsb.udp = udp; 14473 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 14474 } else 14475 { 14476 rack->r_ctl.fsb.udp = NULL; 14477 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 14478 } 14479 tcpip_fillheaders(rack->rc_inp, 14480 tp->t_port, 14481 ip, rack->r_ctl.fsb.th); 14482 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; 14483 } 14484 #endif 14485 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), 14486 (long)TCP_MAXWIN << tp->rcv_scale); 14487 rack->r_fsb_inited = 1; 14488 } 14489 14490 static int 14491 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 14492 { 14493 /* 14494 * Allocate the larger of spaces V6 if available else just 14495 * V4 and include udphdr (overbook) 14496 */ 14497 #ifdef INET6 14498 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 14499 #else 14500 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 14501 #endif 14502 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 14503 M_TCPFSB, M_NOWAIT|M_ZERO); 14504 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 14505 return (ENOMEM); 14506 } 14507 rack->r_fsb_inited = 0; 14508 return (0); 14509 } 14510 14511 static void 14512 rack_log_hystart_event(struct tcp_rack *rack, uint32_t high_seq, uint8_t mod) 14513 { 14514 /* 14515 * Types of logs (mod value) 14516 * 20 - Initial round setup 14517 * 21 - Rack declares a new round. 14518 */ 14519 struct tcpcb *tp; 14520 14521 tp = rack->rc_tp; 14522 if (tcp_bblogging_on(tp)) { 14523 union tcp_log_stackspecific log; 14524 struct timeval tv; 14525 14526 memset(&log, 0, sizeof(log)); 14527 log.u_bbr.flex1 = rack->r_ctl.current_round; 14528 log.u_bbr.flex2 = rack->r_ctl.roundends; 14529 log.u_bbr.flex3 = high_seq; 14530 log.u_bbr.flex4 = tp->snd_max; 14531 log.u_bbr.flex8 = mod; 14532 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14533 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; 14534 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; 14535 TCP_LOG_EVENTP(tp, NULL, 14536 &tptosocket(tp)->so_rcv, 14537 &tptosocket(tp)->so_snd, 14538 TCP_HYSTART, 0, 14539 0, &log, false, &tv); 14540 } 14541 } 14542 14543 static void 14544 rack_deferred_init(struct tcpcb *tp, struct tcp_rack *rack) 14545 { 14546 rack->rack_deferred_inited = 1; 14547 rack->r_ctl.roundends = tp->snd_max; 14548 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 14549 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 14550 } 14551 14552 static void 14553 rack_init_retransmit_value(struct tcp_rack *rack, int ctl) 14554 { 14555 /* Retransmit bit controls. 14556 * 14557 * The setting of these values control one of 14558 * three settings you can have and dictate 14559 * how rack does retransmissions. Note this 14560 * is in *any* mode i.e. pacing on or off DGP 14561 * fixed rate pacing, or just bursting rack. 14562 * 14563 * 1 - Use full sized retransmits i.e. limit 14564 * the size to whatever the pace_max_segments 14565 * size is. 14566 * 14567 * 2 - Use pacer min granularity as a guide to 14568 * the size combined with the current calculated 14569 * goodput b/w measurement. So for example if 14570 * the goodput is measured at 20Mbps we would 14571 * calculate 8125 (pacer minimum 250usec in 14572 * that b/w) and then round it up to the next 14573 * MSS i.e. for 1448 mss 6 MSS or 8688 bytes. 14574 * 14575 * 0 - The rack default 1 MSS (anything not 0/1/2 14576 * fall here too if we are setting via rack_init()). 14577 * 14578 */ 14579 if (ctl == 1) { 14580 rack->full_size_rxt = 1; 14581 rack->shape_rxt_to_pacing_min = 0; 14582 } else if (ctl == 2) { 14583 rack->full_size_rxt = 0; 14584 rack->shape_rxt_to_pacing_min = 1; 14585 } else { 14586 rack->full_size_rxt = 0; 14587 rack->shape_rxt_to_pacing_min = 0; 14588 } 14589 } 14590 14591 static void 14592 rack_log_chg_info(struct tcpcb *tp, struct tcp_rack *rack, uint8_t mod, 14593 uint32_t flex1, 14594 uint32_t flex2, 14595 uint32_t flex3) 14596 { 14597 if (tcp_bblogging_on(rack->rc_tp)) { 14598 union tcp_log_stackspecific log; 14599 struct timeval tv; 14600 14601 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14602 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14603 log.u_bbr.flex8 = mod; 14604 log.u_bbr.flex1 = flex1; 14605 log.u_bbr.flex2 = flex2; 14606 log.u_bbr.flex3 = flex3; 14607 tcp_log_event(tp, NULL, NULL, NULL, TCP_CHG_QUERY, 0, 14608 0, &log, false, NULL, __func__, __LINE__, &tv); 14609 } 14610 } 14611 14612 static int 14613 rack_chg_query(struct tcpcb *tp, struct tcp_query_resp *reqr) 14614 { 14615 struct tcp_rack *rack; 14616 struct rack_sendmap *rsm; 14617 int i; 14618 14619 14620 rack = (struct tcp_rack *)tp->t_fb_ptr; 14621 switch (reqr->req) { 14622 case TCP_QUERY_SENDMAP: 14623 if ((reqr->req_param == tp->snd_max) || 14624 (tp->snd_max == tp->snd_una)){ 14625 /* Unlikely */ 14626 return (0); 14627 } 14628 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); 14629 if (rsm == NULL) { 14630 /* Can't find that seq -- unlikely */ 14631 return (0); 14632 } 14633 reqr->sendmap_start = rsm->r_start; 14634 reqr->sendmap_end = rsm->r_end; 14635 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; 14636 reqr->sendmap_fas = rsm->r_fas; 14637 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) 14638 reqr->sendmap_send_cnt = SNDMAP_NRTX; 14639 for(i=0; i<reqr->sendmap_send_cnt; i++) 14640 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; 14641 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; 14642 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; 14643 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; 14644 reqr->sendmap_dupacks = rsm->r_dupack; 14645 rack_log_chg_info(tp, rack, 1, 14646 rsm->r_start, 14647 rsm->r_end, 14648 rsm->r_flags); 14649 return(1); 14650 break; 14651 case TCP_QUERY_TIMERS_UP: 14652 if (rack->r_ctl.rc_hpts_flags == 0) { 14653 /* no timers up */ 14654 return (0); 14655 } 14656 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; 14657 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14658 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; 14659 } 14660 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14661 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; 14662 } 14663 rack_log_chg_info(tp, rack, 2, 14664 rack->r_ctl.rc_hpts_flags, 14665 rack->r_ctl.rc_last_output_to, 14666 rack->r_ctl.rc_timer_exp); 14667 return (1); 14668 break; 14669 case TCP_QUERY_RACK_TIMES: 14670 /* Reordering items */ 14671 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; 14672 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; 14673 /* Timerstamps and timers */ 14674 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; 14675 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; 14676 reqr->rack_rtt = rack->rc_rack_rtt; 14677 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; 14678 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; 14679 /* PRR data */ 14680 reqr->rack_sacked = rack->r_ctl.rc_sacked; 14681 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; 14682 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; 14683 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; 14684 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; 14685 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; 14686 /* TLP and persists info */ 14687 reqr->rack_tlp_out = rack->rc_tlp_in_progress; 14688 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; 14689 if (rack->rc_in_persist) { 14690 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; 14691 reqr->rack_in_persist = 1; 14692 } else { 14693 reqr->rack_time_went_idle = 0; 14694 reqr->rack_in_persist = 0; 14695 } 14696 if (rack->r_wanted_output) 14697 reqr->rack_wanted_output = 1; 14698 else 14699 reqr->rack_wanted_output = 0; 14700 return (1); 14701 break; 14702 default: 14703 return (-EINVAL); 14704 } 14705 } 14706 14707 static void 14708 rack_switch_failed(struct tcpcb *tp) 14709 { 14710 /* 14711 * This method gets called if a stack switch was 14712 * attempted and it failed. We are left 14713 * but our hpts timers were stopped and we 14714 * need to validate time units and t_flags2. 14715 */ 14716 struct tcp_rack *rack; 14717 struct timeval tv; 14718 uint32_t cts; 14719 uint32_t toval; 14720 struct hpts_diag diag; 14721 14722 rack = (struct tcp_rack *)tp->t_fb_ptr; 14723 tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC); 14724 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 14725 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 14726 else 14727 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 14728 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 14729 tp->t_flags2 |= TF2_MBUF_ACKCMP; 14730 if (tp->t_in_hpts > IHPTS_NONE) { 14731 /* Strange */ 14732 return; 14733 } 14734 cts = tcp_get_usecs(&tv); 14735 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14736 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 14737 toval = rack->r_ctl.rc_last_output_to - cts; 14738 } else { 14739 /* one slot please */ 14740 toval = HPTS_TICKS_PER_SLOT; 14741 } 14742 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 14743 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 14744 toval = rack->r_ctl.rc_timer_exp - cts; 14745 } else { 14746 /* one slot please */ 14747 toval = HPTS_TICKS_PER_SLOT; 14748 } 14749 } else 14750 toval = HPTS_TICKS_PER_SLOT; 14751 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), 14752 __LINE__, &diag); 14753 rack_log_hpts_diag(rack, cts, &diag, &tv); 14754 } 14755 14756 static int 14757 rack_init_outstanding(struct tcpcb *tp, struct tcp_rack *rack, uint32_t us_cts, void *ptr) 14758 { 14759 struct rack_sendmap *rsm, *ersm; 14760 int insret __diagused; 14761 /* 14762 * When initing outstanding, we must be quite careful 14763 * to not refer to tp->t_fb_ptr. This has the old rack 14764 * pointer in it, not the "new" one (when we are doing 14765 * a stack switch). 14766 */ 14767 14768 14769 if (tp->t_fb->tfb_chg_query == NULL) { 14770 /* Create a send map for the current outstanding data */ 14771 14772 rsm = rack_alloc(rack); 14773 if (rsm == NULL) { 14774 uma_zfree(rack_pcb_zone, ptr); 14775 return (ENOMEM); 14776 } 14777 rsm->r_no_rtt_allowed = 1; 14778 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 14779 rsm->r_rtr_cnt = 1; 14780 rsm->r_rtr_bytes = 0; 14781 if (tp->t_flags & TF_SENTFIN) 14782 rsm->r_flags |= RACK_HAS_FIN; 14783 rsm->r_end = tp->snd_max; 14784 if (tp->snd_una == tp->iss) { 14785 /* The data space is one beyond snd_una */ 14786 rsm->r_flags |= RACK_HAS_SYN; 14787 rsm->r_start = tp->iss; 14788 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 14789 } else 14790 rsm->r_start = tp->snd_una; 14791 rsm->r_dupack = 0; 14792 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 14793 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 14794 if (rsm->m) { 14795 rsm->orig_m_len = rsm->m->m_len; 14796 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14797 } else { 14798 rsm->orig_m_len = 0; 14799 rsm->orig_t_space = 0; 14800 } 14801 } else { 14802 /* 14803 * This can happen if we have a stand-alone FIN or 14804 * SYN. 14805 */ 14806 rsm->m = NULL; 14807 rsm->orig_m_len = 0; 14808 rsm->orig_t_space = 0; 14809 rsm->soff = 0; 14810 } 14811 #ifdef INVARIANTS 14812 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14813 panic("Insert in rb tree fails ret:%d rack:%p rsm:%p", 14814 insret, rack, rsm); 14815 } 14816 #else 14817 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14818 #endif 14819 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14820 rsm->r_in_tmap = 1; 14821 } else { 14822 /* We have a query mechanism, lets use it */ 14823 struct tcp_query_resp qr; 14824 int i; 14825 tcp_seq at; 14826 14827 at = tp->snd_una; 14828 while (at != tp->snd_max) { 14829 memset(&qr, 0, sizeof(qr)); 14830 qr.req = TCP_QUERY_SENDMAP; 14831 qr.req_param = at; 14832 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) 14833 break; 14834 /* Move forward */ 14835 at = qr.sendmap_end; 14836 /* Now lets build the entry for this one */ 14837 rsm = rack_alloc(rack); 14838 if (rsm == NULL) { 14839 uma_zfree(rack_pcb_zone, ptr); 14840 return (ENOMEM); 14841 } 14842 memset(rsm, 0, sizeof(struct rack_sendmap)); 14843 /* Now configure the rsm and insert it */ 14844 rsm->r_dupack = qr.sendmap_dupacks; 14845 rsm->r_start = qr.sendmap_start; 14846 rsm->r_end = qr.sendmap_end; 14847 if (qr.sendmap_fas) 14848 rsm->r_fas = qr.sendmap_end; 14849 else 14850 rsm->r_fas = rsm->r_start - tp->snd_una; 14851 /* 14852 * We have carefully aligned the bits 14853 * so that all we have to do is copy over 14854 * the bits with the mask. 14855 */ 14856 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; 14857 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; 14858 rsm->r_rtr_cnt = qr.sendmap_send_cnt; 14859 rsm->r_ack_arrival = qr.sendmap_ack_arrival; 14860 for (i=0 ; i<rsm->r_rtr_cnt; i++) 14861 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; 14862 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 14863 (rsm->r_start - tp->snd_una), &rsm->soff); 14864 if (rsm->m) { 14865 rsm->orig_m_len = rsm->m->m_len; 14866 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); 14867 } else { 14868 rsm->orig_m_len = 0; 14869 rsm->orig_t_space = 0; 14870 } 14871 #ifdef INVARIANTS 14872 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { 14873 panic("Insert in rb tree fails ret:%d rack:%p rsm:%p", 14874 insret, rack, rsm); 14875 } 14876 #else 14877 (void)tqhash_insert(rack->r_ctl.tqh, rsm); 14878 #endif 14879 if ((rsm->r_flags & RACK_ACKED) == 0) { 14880 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { 14881 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > 14882 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { 14883 /* 14884 * If the existing ersm was sent at 14885 * a later time than the new one, then 14886 * the new one should appear ahead of this 14887 * ersm. 14888 */ 14889 rsm->r_in_tmap = 1; 14890 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext); 14891 break; 14892 } 14893 } 14894 if (rsm->r_in_tmap == 0) { 14895 /* 14896 * Not found so shove it on the tail. 14897 */ 14898 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 14899 rsm->r_in_tmap = 1; 14900 } 14901 } else { 14902 if ((rack->r_ctl.rc_sacklast == NULL) || 14903 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { 14904 rack->r_ctl.rc_sacklast = rsm; 14905 } 14906 } 14907 rack_log_chg_info(tp, rack, 3, 14908 rsm->r_start, 14909 rsm->r_end, 14910 rsm->r_flags); 14911 } 14912 } 14913 return (0); 14914 } 14915 14916 static void 14917 rack_translate_clamp_value(struct tcp_rack *rack, uint32_t optval) 14918 { 14919 /* 14920 * P = percent bits 14921 * F = fill cw bit -- Toggle fillcw if this bit is set. 14922 * S = Segment bits 14923 * M = set max segment bit 14924 * U = Unclamined 14925 * C = If set to non-zero override the max number of clamps. 14926 * L = Bit to indicate if clamped gets lower. 14927 * 14928 * CCCC CCCCC UUUU UULF PPPP PPPP PPPP PPPP 14929 * 14930 * The lowest 3 nibbles is the perentage .1 - 6553.5% 14931 * where 10.1 = 101, max 6553.5 14932 * The upper 16 bits holds some options. 14933 * The F bit will turn on fill-cw on if you are 14934 * not pacing, it will turn it off if dgp is on. 14935 * The L bit will change it so when clamped we get 14936 * the min(gp, lt-bw) for dgp. 14937 */ 14938 uint16_t per; 14939 14940 rack->r_ctl.saved_rxt_clamp_val = optval; 14941 per = optval & 0x0000ffff; 14942 rack->r_ctl.rxt_threshold = (uint64_t)(per & 0xffff); 14943 if (optval > 0) { 14944 uint16_t clamp_opt; 14945 14946 rack->excess_rxt_on = 1; 14947 clamp_opt = ((optval & 0xffff0000) >> 16); 14948 rack->r_ctl.clamp_options = clamp_opt & 0x00ff; 14949 if (clamp_opt & 0xff00) { 14950 /* A max clamps is also present */ 14951 rack->r_ctl.max_clamps = (clamp_opt >> 8); 14952 } else { 14953 /* No specified clamps means no limit */ 14954 rack->r_ctl.max_clamps = 0; 14955 } 14956 if (rack->r_ctl.clamp_options & 0x0002) { 14957 rack->r_clamped_gets_lower = 1; 14958 } else { 14959 rack->r_clamped_gets_lower = 0; 14960 } 14961 } else { 14962 /* Turn it off back to default */ 14963 rack->excess_rxt_on = 0; 14964 rack->r_clamped_gets_lower = 0; 14965 } 14966 14967 } 14968 14969 14970 static int32_t 14971 rack_init(struct tcpcb *tp, void **ptr) 14972 { 14973 struct inpcb *inp = tptoinpcb(tp); 14974 struct tcp_rack *rack = NULL; 14975 uint32_t iwin, snt, us_cts; 14976 int err, no_query; 14977 14978 /* 14979 * First are we the initial or are we a switched stack? 14980 * If we are initing via tcp_newtcppcb the ptr passed 14981 * will be tp->t_fb_ptr. If its a stack switch that 14982 * has a previous stack we can query it will be a local 14983 * var that will in the end be set into t_fb_ptr. 14984 */ 14985 if (ptr == &tp->t_fb_ptr) 14986 no_query = 1; 14987 else 14988 no_query = 0; 14989 *ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 14990 if (*ptr == NULL) { 14991 /* 14992 * We need to allocate memory but cant. The INP and INP_INFO 14993 * locks and they are recursive (happens during setup. So a 14994 * scheme to drop the locks fails :( 14995 * 14996 */ 14997 return(ENOMEM); 14998 } 14999 memset(*ptr, 0, sizeof(struct tcp_rack)); 15000 rack = (struct tcp_rack *)*ptr; 15001 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); 15002 if (rack->r_ctl.tqh == NULL) { 15003 uma_zfree(rack_pcb_zone, rack); 15004 return(ENOMEM); 15005 } 15006 tqhash_init(rack->r_ctl.tqh); 15007 TAILQ_INIT(&rack->r_ctl.rc_free); 15008 TAILQ_INIT(&rack->r_ctl.rc_tmap); 15009 rack->rc_tp = tp; 15010 rack->rc_inp = inp; 15011 /* Set the flag */ 15012 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 15013 /* Probably not needed but lets be sure */ 15014 rack_clear_rate_sample(rack); 15015 /* 15016 * Save off the default values, socket options will poke 15017 * at these if pacing is not on or we have not yet 15018 * reached where pacing is on (gp_ready/fixed enabled). 15019 * When they get set into the CC module (when gp_ready 15020 * is enabled or we enable fixed) then we will set these 15021 * values into the CC and place in here the old values 15022 * so we have a restoral. Then we will set the flag 15023 * rc_pacing_cc_set. That way whenever we turn off pacing 15024 * or switch off this stack, we will know to go restore 15025 * the saved values. 15026 * 15027 * We specifically put into the beta the ecn value for pacing. 15028 */ 15029 rack->rc_new_rnd_needed = 1; 15030 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; 15031 /* We want abe like behavior as well */ 15032 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 15033 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 15034 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 15035 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 15036 if (rack_rxt_clamp_thresh) { 15037 rack_translate_clamp_value(rack, rack_rxt_clamp_thresh); 15038 rack->excess_rxt_on = 1; 15039 } 15040 if (rack_uses_full_dgp_in_rec) 15041 rack->r_ctl.full_dgp_in_rec = 1; 15042 if (rack_fill_cw_state) 15043 rack->rc_pace_to_cwnd = 1; 15044 if (rack_pacing_min_seg) 15045 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; 15046 if (use_rack_rr) 15047 rack->use_rack_rr = 1; 15048 if (rack_dnd_default) { 15049 rack->rc_pace_dnd = 1; 15050 } 15051 if (V_tcp_delack_enabled) 15052 tp->t_delayed_ack = 1; 15053 else 15054 tp->t_delayed_ack = 0; 15055 #ifdef TCP_ACCOUNTING 15056 if (rack_tcp_accounting) { 15057 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 15058 } 15059 #endif 15060 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; 15061 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; 15062 if (rack_enable_shared_cwnd) 15063 rack->rack_enable_scwnd = 1; 15064 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 15065 rack->rc_user_set_max_segs = rack_hptsi_segments; 15066 rack->rc_force_max_seg = 0; 15067 TAILQ_INIT(&rack->r_ctl.opt_list); 15068 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 15069 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 15070 if (rack_hibeta_setting) { 15071 rack->rack_hibeta = 1; 15072 if ((rack_hibeta_setting >= 50) && 15073 (rack_hibeta_setting <= 100)) { 15074 rack->r_ctl.rc_saved_beta.beta = rack_hibeta_setting; 15075 rack->r_ctl.saved_hibeta = rack_hibeta_setting; 15076 } 15077 } else { 15078 rack->r_ctl.saved_hibeta = 50; 15079 } 15080 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 15081 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 15082 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 15083 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 15084 rack->r_ctl.rc_highest_us_rtt = 0; 15085 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 15086 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 15087 if (rack_use_cmp_acks) 15088 rack->r_use_cmp_ack = 1; 15089 if (rack_disable_prr) 15090 rack->rack_no_prr = 1; 15091 if (rack_gp_no_rec_chg) 15092 rack->rc_gp_no_rec_chg = 1; 15093 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 15094 rack->rc_always_pace = 1; 15095 if (rack->rack_hibeta) 15096 rack_set_cc_pacing(rack); 15097 } else 15098 rack->rc_always_pace = 0; 15099 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 15100 rack->r_mbuf_queue = 1; 15101 else 15102 rack->r_mbuf_queue = 0; 15103 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15104 if (rack_limits_scwnd) 15105 rack->r_limit_scw = 1; 15106 else 15107 rack->r_limit_scw = 0; 15108 rack_init_retransmit_value(rack, rack_rxt_controls); 15109 rack->rc_labc = V_tcp_abc_l_var; 15110 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 15111 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 15112 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 15113 rack->r_ctl.rc_min_to = rack_min_to; 15114 microuptime(&rack->r_ctl.act_rcv_time); 15115 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 15116 rack->rc_init_win = rack_default_init_window; 15117 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 15118 if (rack_hw_up_only) 15119 rack->r_up_only = 1; 15120 if (rack_do_dyn_mul) { 15121 /* When dynamic adjustment is on CA needs to start at 100% */ 15122 rack->rc_gp_dyn_mul = 1; 15123 if (rack_do_dyn_mul >= 100) 15124 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 15125 } else 15126 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 15127 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 15128 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 15129 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 15130 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 15131 rack_probertt_filter_life); 15132 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15133 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 15134 rack->r_ctl.rc_time_of_last_probertt = us_cts; 15135 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 15136 rack->r_ctl.rc_time_probertt_starts = 0; 15137 if (rack_dsack_std_based & 0x1) { 15138 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 15139 rack->rc_rack_tmr_std_based = 1; 15140 } 15141 if (rack_dsack_std_based & 0x2) { 15142 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 15143 rack->rc_rack_use_dsack = 1; 15144 } 15145 /* We require at least one measurement, even if the sysctl is 0 */ 15146 if (rack_req_measurements) 15147 rack->r_ctl.req_measurements = rack_req_measurements; 15148 else 15149 rack->r_ctl.req_measurements = 1; 15150 if (rack_enable_hw_pacing) 15151 rack->rack_hdw_pace_ena = 1; 15152 if (rack_hw_rate_caps) 15153 rack->r_rack_hw_rate_caps = 1; 15154 #ifdef TCP_SAD_DETECTION 15155 rack->do_detection = 1; 15156 #else 15157 rack->do_detection = 0; 15158 #endif 15159 if (rack_non_rxt_use_cr) 15160 rack->rack_rec_nonrxt_use_cr = 1; 15161 /* Lets setup the fsb block */ 15162 err = rack_init_fsb(tp, rack); 15163 if (err) { 15164 uma_zfree(rack_pcb_zone, *ptr); 15165 *ptr = NULL; 15166 return (err); 15167 } 15168 if (rack_do_hystart) { 15169 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 15170 if (rack_do_hystart > 1) 15171 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 15172 if (rack_do_hystart > 2) 15173 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 15174 } 15175 /* Log what we will do with queries */ 15176 rack_log_chg_info(tp, rack, 7, 15177 no_query, 0, 0); 15178 if (rack_def_profile) 15179 rack_set_profile(rack, rack_def_profile); 15180 /* Cancel the GP measurement in progress */ 15181 tp->t_flags &= ~TF_GPUTINPROG; 15182 if ((tp->t_state != TCPS_CLOSED) && 15183 (tp->t_state != TCPS_TIME_WAIT)) { 15184 /* 15185 * We are already open, we may 15186 * need to adjust a few things. 15187 */ 15188 if (SEQ_GT(tp->snd_max, tp->iss)) 15189 snt = tp->snd_max - tp->iss; 15190 else 15191 snt = 0; 15192 iwin = rc_init_window(rack); 15193 if ((snt < iwin) && 15194 (no_query == 1)) { 15195 /* We are not past the initial window 15196 * on the first init (i.e. a stack switch 15197 * has not yet occured) so we need to make 15198 * sure cwnd and ssthresh is correct. 15199 */ 15200 if (tp->snd_cwnd < iwin) 15201 tp->snd_cwnd = iwin; 15202 /* 15203 * If we are within the initial window 15204 * we want ssthresh to be unlimited. Setting 15205 * it to the rwnd (which the default stack does 15206 * and older racks) is not really a good idea 15207 * since we want to be in SS and grow both the 15208 * cwnd and the rwnd (via dynamic rwnd growth). If 15209 * we set it to the rwnd then as the peer grows its 15210 * rwnd we will be stuck in CA and never hit SS. 15211 * 15212 * Its far better to raise it up high (this takes the 15213 * risk that there as been a loss already, probably 15214 * we should have an indicator in all stacks of loss 15215 * but we don't), but considering the normal use this 15216 * is a risk worth taking. The consequences of not 15217 * hitting SS are far worse than going one more time 15218 * into it early on (before we have sent even a IW). 15219 * It is highly unlikely that we will have had a loss 15220 * before getting the IW out. 15221 */ 15222 tp->snd_ssthresh = 0xffffffff; 15223 } 15224 /* 15225 * Any init based on sequence numbers 15226 * should be done in the deferred init path 15227 * since we can be CLOSED and not have them 15228 * inited when rack_init() is called. We 15229 * are not closed so lets call it. 15230 */ 15231 rack_deferred_init(tp, rack); 15232 } 15233 if ((tp->t_state != TCPS_CLOSED) && 15234 (tp->t_state != TCPS_TIME_WAIT) && 15235 (no_query == 0) && 15236 (tp->snd_una != tp->snd_max)) { 15237 err = rack_init_outstanding(tp, rack, us_cts, *ptr); 15238 if (err) { 15239 *ptr = NULL; 15240 return(err); 15241 } 15242 } 15243 rack_stop_all_timers(tp, rack); 15244 /* Setup all the t_flags2 */ 15245 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 15246 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 15247 else 15248 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 15249 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15250 tp->t_flags2 |= TF2_MBUF_ACKCMP; 15251 /* 15252 * Timers in Rack are kept in microseconds so lets 15253 * convert any initial incoming variables 15254 * from ticks into usecs. Note that we 15255 * also change the values of t_srtt and t_rttvar, if 15256 * they are non-zero. They are kept with a 5 15257 * bit decimal so we have to carefully convert 15258 * these to get the full precision. 15259 */ 15260 rack_convert_rtts(tp); 15261 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); 15262 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { 15263 /* We do not start any timers on DROPPED connections */ 15264 if (tp->t_fb->tfb_chg_query == NULL) { 15265 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15266 } else { 15267 struct tcp_query_resp qr; 15268 int ret; 15269 15270 memset(&qr, 0, sizeof(qr)); 15271 15272 /* Get the misc time stamps and such for rack */ 15273 qr.req = TCP_QUERY_RACK_TIMES; 15274 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15275 if (ret == 1) { 15276 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; 15277 rack->r_ctl.num_dsack = qr.rack_num_dsacks; 15278 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; 15279 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; 15280 rack->rc_rack_rtt = qr.rack_rtt; 15281 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; 15282 rack->r_ctl.rc_sacked = qr.rack_sacked; 15283 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; 15284 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; 15285 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; 15286 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; 15287 rack->r_ctl.rc_prr_out = qr.rack_prr_out; 15288 if (qr.rack_tlp_out) { 15289 rack->rc_tlp_in_progress = 1; 15290 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; 15291 } else { 15292 rack->rc_tlp_in_progress = 0; 15293 rack->r_ctl.rc_tlp_cnt_out = 0; 15294 } 15295 if (qr.rack_srtt_measured) 15296 rack->rc_srtt_measure_made = 1; 15297 if (qr.rack_in_persist == 1) { 15298 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; 15299 #ifdef NETFLIX_SHARED_CWND 15300 if (rack->r_ctl.rc_scw) { 15301 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 15302 rack->rack_scwnd_is_idle = 1; 15303 } 15304 #endif 15305 rack->r_ctl.persist_lost_ends = 0; 15306 rack->probe_not_answered = 0; 15307 rack->forced_ack = 0; 15308 tp->t_rxtshift = 0; 15309 rack->rc_in_persist = 1; 15310 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 15311 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 15312 } 15313 if (qr.rack_wanted_output) 15314 rack->r_wanted_output = 1; 15315 rack_log_chg_info(tp, rack, 6, 15316 qr.rack_min_rtt, 15317 qr.rack_rtt, 15318 qr.rack_reorder_ts); 15319 } 15320 /* Get the old stack timers */ 15321 qr.req_param = 0; 15322 qr.req = TCP_QUERY_TIMERS_UP; 15323 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); 15324 if (ret) { 15325 /* 15326 * non-zero return means we have a timer('s) 15327 * to start. Zero means no timer (no keepalive 15328 * I suppose). 15329 */ 15330 uint32_t tov = 0; 15331 15332 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; 15333 if (qr.timer_hpts_flags & PACE_PKT_OUTPUT) { 15334 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; 15335 if (TSTMP_GT(qr.timer_pacing_to, us_cts)) 15336 tov = qr.timer_pacing_to - us_cts; 15337 else 15338 tov = HPTS_TICKS_PER_SLOT; 15339 } 15340 if (qr.timer_hpts_flags & PACE_TMR_MASK) { 15341 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; 15342 if (tov == 0) { 15343 if (TSTMP_GT(qr.timer_timer_exp, us_cts)) 15344 tov = qr.timer_timer_exp - us_cts; 15345 else 15346 tov = HPTS_TICKS_PER_SLOT; 15347 } 15348 } 15349 rack_log_chg_info(tp, rack, 4, 15350 rack->r_ctl.rc_hpts_flags, 15351 rack->r_ctl.rc_last_output_to, 15352 rack->r_ctl.rc_timer_exp); 15353 if (tov) { 15354 struct hpts_diag diag; 15355 15356 (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov), 15357 __LINE__, &diag); 15358 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); 15359 } 15360 } 15361 } 15362 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 15363 __LINE__, RACK_RTTS_INIT); 15364 } 15365 return (0); 15366 } 15367 15368 static int 15369 rack_handoff_ok(struct tcpcb *tp) 15370 { 15371 if ((tp->t_state == TCPS_CLOSED) || 15372 (tp->t_state == TCPS_LISTEN)) { 15373 /* Sure no problem though it may not stick */ 15374 return (0); 15375 } 15376 if ((tp->t_state == TCPS_SYN_SENT) || 15377 (tp->t_state == TCPS_SYN_RECEIVED)) { 15378 /* 15379 * We really don't know if you support sack, 15380 * you have to get to ESTAB or beyond to tell. 15381 */ 15382 return (EAGAIN); 15383 } 15384 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 15385 /* 15386 * Rack will only send a FIN after all data is acknowledged. 15387 * So in this case we have more data outstanding. We can't 15388 * switch stacks until either all data and only the FIN 15389 * is left (in which case rack_init() now knows how 15390 * to deal with that) <or> all is acknowledged and we 15391 * are only left with incoming data, though why you 15392 * would want to switch to rack after all data is acknowledged 15393 * I have no idea (rrs)! 15394 */ 15395 return (EAGAIN); 15396 } 15397 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 15398 return (0); 15399 } 15400 /* 15401 * If we reach here we don't do SACK on this connection so we can 15402 * never do rack. 15403 */ 15404 return (EINVAL); 15405 } 15406 15407 static void 15408 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 15409 { 15410 15411 if (tp->t_fb_ptr) { 15412 uint32_t cnt_free = 0; 15413 struct tcp_rack *rack; 15414 struct rack_sendmap *rsm; 15415 15416 tcp_handle_orphaned_packets(tp); 15417 tp->t_flags &= ~TF_FORCEDATA; 15418 rack = (struct tcp_rack *)tp->t_fb_ptr; 15419 rack_log_pacing_delay_calc(rack, 15420 0, 15421 0, 15422 0, 15423 rack_get_gp_est(rack), /* delRate */ 15424 rack_get_lt_bw(rack), /* rttProp */ 15425 20, __LINE__, NULL, 0); 15426 #ifdef NETFLIX_SHARED_CWND 15427 if (rack->r_ctl.rc_scw) { 15428 uint32_t limit; 15429 15430 if (rack->r_limit_scw) 15431 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 15432 else 15433 limit = 0; 15434 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 15435 rack->r_ctl.rc_scw_index, 15436 limit); 15437 rack->r_ctl.rc_scw = NULL; 15438 } 15439 #endif 15440 if (rack->r_ctl.fsb.tcp_ip_hdr) { 15441 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 15442 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 15443 rack->r_ctl.fsb.th = NULL; 15444 } 15445 if (rack->rc_always_pace) { 15446 tcp_decrement_paced_conn(); 15447 rack_undo_cc_pacing(rack); 15448 rack->rc_always_pace = 0; 15449 } 15450 /* Clean up any options if they were not applied */ 15451 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 15452 struct deferred_opt_list *dol; 15453 15454 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 15455 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 15456 free(dol, M_TCPDO); 15457 } 15458 /* rack does not use force data but other stacks may clear it */ 15459 if (rack->r_ctl.crte != NULL) { 15460 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 15461 rack->rack_hdrw_pacing = 0; 15462 rack->r_ctl.crte = NULL; 15463 } 15464 #ifdef TCP_BLACKBOX 15465 tcp_log_flowend(tp); 15466 #endif 15467 /* 15468 * Lets take a different approach to purging just 15469 * get each one and free it like a cum-ack would and 15470 * not use a foreach loop. 15471 */ 15472 rsm = tqhash_min(rack->r_ctl.tqh); 15473 while (rsm) { 15474 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); 15475 rack->r_ctl.rc_num_maps_alloced--; 15476 uma_zfree(rack_zone, rsm); 15477 rsm = tqhash_min(rack->r_ctl.tqh); 15478 } 15479 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15480 while (rsm) { 15481 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 15482 rack->r_ctl.rc_num_maps_alloced--; 15483 rack->rc_free_cnt--; 15484 cnt_free++; 15485 uma_zfree(rack_zone, rsm); 15486 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15487 } 15488 if ((rack->r_ctl.rc_num_maps_alloced > 0) && 15489 (tcp_bblogging_on(tp))) { 15490 union tcp_log_stackspecific log; 15491 struct timeval tv; 15492 15493 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15494 log.u_bbr.flex8 = 10; 15495 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; 15496 log.u_bbr.flex2 = rack->rc_free_cnt; 15497 log.u_bbr.flex3 = cnt_free; 15498 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15499 rsm = tqhash_min(rack->r_ctl.tqh); 15500 log.u_bbr.delRate = (uint64_t)rsm; 15501 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 15502 log.u_bbr.cur_del_rate = (uint64_t)rsm; 15503 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15504 log.u_bbr.pkt_epoch = __LINE__; 15505 (void)tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15506 0, &log, false, NULL, NULL, 0, &tv); 15507 } 15508 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), 15509 ("rack:%p num_aloc:%u after freeing all?", 15510 rack, 15511 rack->r_ctl.rc_num_maps_alloced)); 15512 rack->rc_free_cnt = 0; 15513 free(rack->r_ctl.tqh, M_TCPFSB); 15514 rack->r_ctl.tqh = NULL; 15515 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 15516 tp->t_fb_ptr = NULL; 15517 } 15518 /* Make sure snd_nxt is correctly set */ 15519 tp->snd_nxt = tp->snd_max; 15520 } 15521 15522 static void 15523 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 15524 { 15525 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 15526 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 15527 } 15528 switch (tp->t_state) { 15529 case TCPS_SYN_SENT: 15530 rack->r_state = TCPS_SYN_SENT; 15531 rack->r_substate = rack_do_syn_sent; 15532 break; 15533 case TCPS_SYN_RECEIVED: 15534 rack->r_state = TCPS_SYN_RECEIVED; 15535 rack->r_substate = rack_do_syn_recv; 15536 break; 15537 case TCPS_ESTABLISHED: 15538 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15539 rack->r_state = TCPS_ESTABLISHED; 15540 rack->r_substate = rack_do_established; 15541 break; 15542 case TCPS_CLOSE_WAIT: 15543 rack->r_state = TCPS_CLOSE_WAIT; 15544 rack->r_substate = rack_do_close_wait; 15545 break; 15546 case TCPS_FIN_WAIT_1: 15547 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15548 rack->r_state = TCPS_FIN_WAIT_1; 15549 rack->r_substate = rack_do_fin_wait_1; 15550 break; 15551 case TCPS_CLOSING: 15552 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15553 rack->r_state = TCPS_CLOSING; 15554 rack->r_substate = rack_do_closing; 15555 break; 15556 case TCPS_LAST_ACK: 15557 rack_set_pace_segments(tp, rack, __LINE__, NULL); 15558 rack->r_state = TCPS_LAST_ACK; 15559 rack->r_substate = rack_do_lastack; 15560 break; 15561 case TCPS_FIN_WAIT_2: 15562 rack->r_state = TCPS_FIN_WAIT_2; 15563 rack->r_substate = rack_do_fin_wait_2; 15564 break; 15565 case TCPS_LISTEN: 15566 case TCPS_CLOSED: 15567 case TCPS_TIME_WAIT: 15568 default: 15569 break; 15570 }; 15571 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 15572 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 15573 15574 } 15575 15576 static void 15577 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 15578 { 15579 /* 15580 * We received an ack, and then did not 15581 * call send or were bounced out due to the 15582 * hpts was running. Now a timer is up as well, is 15583 * it the right timer? 15584 */ 15585 struct rack_sendmap *rsm; 15586 int tmr_up; 15587 15588 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 15589 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 15590 return; 15591 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 15592 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 15593 (tmr_up == PACE_TMR_RXT)) { 15594 /* Should be an RXT */ 15595 return; 15596 } 15597 if (rsm == NULL) { 15598 /* Nothing outstanding? */ 15599 if (tp->t_flags & TF_DELACK) { 15600 if (tmr_up == PACE_TMR_DELACK) 15601 /* We are supposed to have delayed ack up and we do */ 15602 return; 15603 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 15604 /* 15605 * if we hit enobufs then we would expect the possibility 15606 * of nothing outstanding and the RXT up (and the hptsi timer). 15607 */ 15608 return; 15609 } else if (((V_tcp_always_keepalive || 15610 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 15611 (tp->t_state <= TCPS_CLOSING)) && 15612 (tmr_up == PACE_TMR_KEEP) && 15613 (tp->snd_max == tp->snd_una)) { 15614 /* We should have keep alive up and we do */ 15615 return; 15616 } 15617 } 15618 if (SEQ_GT(tp->snd_max, tp->snd_una) && 15619 ((tmr_up == PACE_TMR_TLP) || 15620 (tmr_up == PACE_TMR_RACK) || 15621 (tmr_up == PACE_TMR_RXT))) { 15622 /* 15623 * Either a Rack, TLP or RXT is fine if we 15624 * have outstanding data. 15625 */ 15626 return; 15627 } else if (tmr_up == PACE_TMR_DELACK) { 15628 /* 15629 * If the delayed ack was going to go off 15630 * before the rtx/tlp/rack timer were going to 15631 * expire, then that would be the timer in control. 15632 * Note we don't check the time here trusting the 15633 * code is correct. 15634 */ 15635 return; 15636 } 15637 /* 15638 * Ok the timer originally started is not what we want now. 15639 * We will force the hpts to be stopped if any, and restart 15640 * with the slot set to what was in the saved slot. 15641 */ 15642 if (tcp_in_hpts(rack->rc_tp)) { 15643 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 15644 uint32_t us_cts; 15645 15646 us_cts = tcp_get_usecs(NULL); 15647 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 15648 rack->r_early = 1; 15649 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 15650 } 15651 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 15652 } 15653 tcp_hpts_remove(rack->rc_tp); 15654 } 15655 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 15656 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 15657 } 15658 15659 15660 static void 15661 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts) 15662 { 15663 if ((SEQ_LT(tp->snd_wl1, seq) || 15664 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 15665 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 15666 /* keep track of pure window updates */ 15667 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 15668 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 15669 tp->snd_wnd = tiwin; 15670 rack_validate_fo_sendwin_up(tp, rack); 15671 tp->snd_wl1 = seq; 15672 tp->snd_wl2 = ack; 15673 if (tp->snd_wnd > tp->max_sndwnd) 15674 tp->max_sndwnd = tp->snd_wnd; 15675 rack->r_wanted_output = 1; 15676 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 15677 tp->snd_wnd = tiwin; 15678 rack_validate_fo_sendwin_up(tp, rack); 15679 tp->snd_wl1 = seq; 15680 tp->snd_wl2 = ack; 15681 } else { 15682 /* Not a valid win update */ 15683 return; 15684 } 15685 if (tp->snd_wnd > tp->max_sndwnd) 15686 tp->max_sndwnd = tp->snd_wnd; 15687 /* Do we exit persists? */ 15688 if ((rack->rc_in_persist != 0) && 15689 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 15690 rack->r_ctl.rc_pace_min_segs))) { 15691 rack_exit_persist(tp, rack, cts); 15692 } 15693 /* Do we enter persists? */ 15694 if ((rack->rc_in_persist == 0) && 15695 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 15696 TCPS_HAVEESTABLISHED(tp->t_state) && 15697 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 15698 sbavail(&tptosocket(tp)->so_snd) && 15699 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 15700 /* 15701 * Here the rwnd is less than 15702 * the pacing size, we are established, 15703 * nothing is outstanding, and there is 15704 * data to send. Enter persists. 15705 */ 15706 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); 15707 } 15708 } 15709 15710 static void 15711 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 15712 { 15713 15714 if (tcp_bblogging_on(rack->rc_tp)) { 15715 struct inpcb *inp = tptoinpcb(tp); 15716 union tcp_log_stackspecific log; 15717 struct timeval ltv; 15718 char tcp_hdr_buf[60]; 15719 struct tcphdr *th; 15720 struct timespec ts; 15721 uint32_t orig_snd_una; 15722 uint8_t xx = 0; 15723 15724 #ifdef TCP_REQUEST_TRK 15725 struct tcp_sendfile_track *tcp_req; 15726 15727 if (SEQ_GT(ae->ack, tp->snd_una)) { 15728 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); 15729 } else { 15730 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); 15731 } 15732 #endif 15733 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15734 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 15735 if (rack->rack_no_prr == 0) 15736 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15737 else 15738 log.u_bbr.flex1 = 0; 15739 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 15740 log.u_bbr.use_lt_bw <<= 1; 15741 log.u_bbr.use_lt_bw |= rack->r_might_revert; 15742 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 15743 log.u_bbr.bbr_state = rack->rc_free_cnt; 15744 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15745 log.u_bbr.pkts_out = tp->t_maxseg; 15746 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 15747 log.u_bbr.flex7 = 1; 15748 log.u_bbr.lost = ae->flags; 15749 log.u_bbr.cwnd_gain = ackval; 15750 log.u_bbr.pacing_gain = 0x2; 15751 if (ae->flags & TSTMP_HDWR) { 15752 /* Record the hardware timestamp if present */ 15753 log.u_bbr.flex3 = M_TSTMP; 15754 ts.tv_sec = ae->timestamp / 1000000000; 15755 ts.tv_nsec = ae->timestamp % 1000000000; 15756 ltv.tv_sec = ts.tv_sec; 15757 ltv.tv_usec = ts.tv_nsec / 1000; 15758 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 15759 } else if (ae->flags & TSTMP_LRO) { 15760 /* Record the LRO the arrival timestamp */ 15761 log.u_bbr.flex3 = M_TSTMP_LRO; 15762 ts.tv_sec = ae->timestamp / 1000000000; 15763 ts.tv_nsec = ae->timestamp % 1000000000; 15764 ltv.tv_sec = ts.tv_sec; 15765 ltv.tv_usec = ts.tv_nsec / 1000; 15766 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 15767 } 15768 log.u_bbr.timeStamp = tcp_get_usecs(<v); 15769 /* Log the rcv time */ 15770 log.u_bbr.delRate = ae->timestamp; 15771 #ifdef TCP_REQUEST_TRK 15772 log.u_bbr.applimited = tp->t_tcpreq_closed; 15773 log.u_bbr.applimited <<= 8; 15774 log.u_bbr.applimited |= tp->t_tcpreq_open; 15775 log.u_bbr.applimited <<= 8; 15776 log.u_bbr.applimited |= tp->t_tcpreq_req; 15777 if (tcp_req) { 15778 /* Copy out any client req info */ 15779 /* seconds */ 15780 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 15781 /* useconds */ 15782 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 15783 log.u_bbr.rttProp = tcp_req->timestamp; 15784 log.u_bbr.cur_del_rate = tcp_req->start; 15785 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 15786 log.u_bbr.flex8 |= 1; 15787 } else { 15788 log.u_bbr.flex8 |= 2; 15789 log.u_bbr.bw_inuse = tcp_req->end; 15790 } 15791 log.u_bbr.flex6 = tcp_req->start_seq; 15792 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 15793 log.u_bbr.flex8 |= 4; 15794 log.u_bbr.epoch = tcp_req->end_seq; 15795 } 15796 } 15797 #endif 15798 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 15799 th = (struct tcphdr *)tcp_hdr_buf; 15800 th->th_seq = ae->seq; 15801 th->th_ack = ae->ack; 15802 th->th_win = ae->win; 15803 /* Now fill in the ports */ 15804 th->th_sport = inp->inp_fport; 15805 th->th_dport = inp->inp_lport; 15806 tcp_set_flags(th, ae->flags); 15807 /* Now do we have a timestamp option? */ 15808 if (ae->flags & HAS_TSTMP) { 15809 u_char *cp; 15810 uint32_t val; 15811 15812 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 15813 cp = (u_char *)(th + 1); 15814 *cp = TCPOPT_NOP; 15815 cp++; 15816 *cp = TCPOPT_NOP; 15817 cp++; 15818 *cp = TCPOPT_TIMESTAMP; 15819 cp++; 15820 *cp = TCPOLEN_TIMESTAMP; 15821 cp++; 15822 val = htonl(ae->ts_value); 15823 bcopy((char *)&val, 15824 (char *)cp, sizeof(uint32_t)); 15825 val = htonl(ae->ts_echo); 15826 bcopy((char *)&val, 15827 (char *)(cp + 4), sizeof(uint32_t)); 15828 } else 15829 th->th_off = (sizeof(struct tcphdr) >> 2); 15830 15831 /* 15832 * For sane logging we need to play a little trick. 15833 * If the ack were fully processed we would have moved 15834 * snd_una to high_seq, but since compressed acks are 15835 * processed in two phases, at this point (logging) snd_una 15836 * won't be advanced. So we would see multiple acks showing 15837 * the advancement. We can prevent that by "pretending" that 15838 * snd_una was advanced and then un-advancing it so that the 15839 * logging code has the right value for tlb_snd_una. 15840 */ 15841 if (tp->snd_una != high_seq) { 15842 orig_snd_una = tp->snd_una; 15843 tp->snd_una = high_seq; 15844 xx = 1; 15845 } else 15846 xx = 0; 15847 TCP_LOG_EVENTP(tp, th, 15848 &tptosocket(tp)->so_rcv, 15849 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 15850 0, &log, true, <v); 15851 if (xx) { 15852 tp->snd_una = orig_snd_una; 15853 } 15854 } 15855 15856 } 15857 15858 static void 15859 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 15860 { 15861 uint32_t us_rtt; 15862 /* 15863 * A persist or keep-alive was forced out, update our 15864 * min rtt time. Note now worry about lost responses. 15865 * When a subsequent keep-alive or persist times out 15866 * and forced_ack is still on, then the last probe 15867 * was not responded to. In such cases we have a 15868 * sysctl that controls the behavior. Either we apply 15869 * the rtt but with reduced confidence (0). Or we just 15870 * plain don't apply the rtt estimate. Having data flow 15871 * will clear the probe_not_answered flag i.e. cum-ack 15872 * move forward <or> exiting and reentering persists. 15873 */ 15874 15875 rack->forced_ack = 0; 15876 rack->rc_tp->t_rxtshift = 0; 15877 if ((rack->rc_in_persist && 15878 (tiwin == rack->rc_tp->snd_wnd)) || 15879 (rack->rc_in_persist == 0)) { 15880 /* 15881 * In persists only apply the RTT update if this is 15882 * a response to our window probe. And that 15883 * means the rwnd sent must match the current 15884 * snd_wnd. If it does not, then we got a 15885 * window update ack instead. For keepalive 15886 * we allow the answer no matter what the window. 15887 * 15888 * Note that if the probe_not_answered is set then 15889 * the forced_ack_ts is the oldest one i.e. the first 15890 * probe sent that might have been lost. This assures 15891 * us that if we do calculate an RTT it is longer not 15892 * some short thing. 15893 */ 15894 if (rack->rc_in_persist) 15895 counter_u64_add(rack_persists_acks, 1); 15896 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 15897 if (us_rtt == 0) 15898 us_rtt = 1; 15899 if (rack->probe_not_answered == 0) { 15900 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15901 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 15902 } else { 15903 /* We have a retransmitted probe here too */ 15904 if (rack_apply_rtt_with_reduced_conf) { 15905 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 15906 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 15907 } 15908 } 15909 } 15910 } 15911 15912 static int 15913 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 15914 { 15915 /* 15916 * Handle a "special" compressed ack mbuf. Each incoming 15917 * ack has only four possible dispositions: 15918 * 15919 * A) It moves the cum-ack forward 15920 * B) It is behind the cum-ack. 15921 * C) It is a window-update ack. 15922 * D) It is a dup-ack. 15923 * 15924 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 15925 * in the incoming mbuf. We also need to still pay attention 15926 * to nxt_pkt since there may be another packet after this 15927 * one. 15928 */ 15929 #ifdef TCP_ACCOUNTING 15930 uint64_t ts_val; 15931 uint64_t rdstc; 15932 #endif 15933 int segsiz; 15934 struct timespec ts; 15935 struct tcp_rack *rack; 15936 struct tcp_ackent *ae; 15937 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 15938 int cnt, i, did_out, ourfinisacked = 0; 15939 struct tcpopt to_holder, *to = NULL; 15940 #ifdef TCP_ACCOUNTING 15941 int win_up_req = 0; 15942 #endif 15943 int nsegs = 0; 15944 int under_pacing = 0; 15945 int recovery = 0; 15946 #ifdef TCP_ACCOUNTING 15947 sched_pin(); 15948 #endif 15949 rack = (struct tcp_rack *)tp->t_fb_ptr; 15950 if (rack->gp_ready && 15951 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 15952 under_pacing = 1; 15953 15954 if (rack->r_state != tp->t_state) 15955 rack_set_state(tp, rack); 15956 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 15957 (tp->t_flags & TF_GPUTINPROG)) { 15958 /* 15959 * We have a goodput in progress 15960 * and we have entered a late state. 15961 * Do we have enough data in the sb 15962 * to handle the GPUT request? 15963 */ 15964 uint32_t bytes; 15965 15966 bytes = tp->gput_ack - tp->gput_seq; 15967 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 15968 bytes += tp->gput_seq - tp->snd_una; 15969 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 15970 /* 15971 * There are not enough bytes in the socket 15972 * buffer that have been sent to cover this 15973 * measurement. Cancel it. 15974 */ 15975 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 15976 rack->r_ctl.rc_gp_srtt /*flex1*/, 15977 tp->gput_seq, 15978 0, 0, 18, __LINE__, NULL, 0); 15979 tp->t_flags &= ~TF_GPUTINPROG; 15980 } 15981 } 15982 to = &to_holder; 15983 to->to_flags = 0; 15984 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 15985 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 15986 cnt = m->m_len / sizeof(struct tcp_ackent); 15987 counter_u64_add(rack_multi_single_eq, cnt); 15988 high_seq = tp->snd_una; 15989 the_win = tp->snd_wnd; 15990 win_seq = tp->snd_wl1; 15991 win_upd_ack = tp->snd_wl2; 15992 cts = tcp_tv_to_usectick(tv); 15993 ms_cts = tcp_tv_to_mssectick(tv); 15994 rack->r_ctl.rc_rcvtime = cts; 15995 segsiz = ctf_fixed_maxseg(tp); 15996 if ((rack->rc_gp_dyn_mul) && 15997 (rack->use_fixed_rate == 0) && 15998 (rack->rc_always_pace)) { 15999 /* Check in on probertt */ 16000 rack_check_probe_rtt(rack, cts); 16001 } 16002 for (i = 0; i < cnt; i++) { 16003 #ifdef TCP_ACCOUNTING 16004 ts_val = get_cyclecount(); 16005 #endif 16006 rack_clear_rate_sample(rack); 16007 ae = ((mtod(m, struct tcp_ackent *)) + i); 16008 if (ae->flags & TH_FIN) 16009 rack_log_pacing_delay_calc(rack, 16010 0, 16011 0, 16012 0, 16013 rack_get_gp_est(rack), /* delRate */ 16014 rack_get_lt_bw(rack), /* rttProp */ 16015 20, __LINE__, NULL, 0); 16016 /* Setup the window */ 16017 tiwin = ae->win << tp->snd_scale; 16018 if (tiwin > rack->r_ctl.rc_high_rwnd) 16019 rack->r_ctl.rc_high_rwnd = tiwin; 16020 /* figure out the type of ack */ 16021 if (SEQ_LT(ae->ack, high_seq)) { 16022 /* Case B*/ 16023 ae->ack_val_set = ACK_BEHIND; 16024 } else if (SEQ_GT(ae->ack, high_seq)) { 16025 /* Case A */ 16026 ae->ack_val_set = ACK_CUMACK; 16027 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 16028 /* Case D */ 16029 ae->ack_val_set = ACK_DUPACK; 16030 } else { 16031 /* Case C */ 16032 ae->ack_val_set = ACK_RWND; 16033 } 16034 if (rack->sack_attack_disable > 0) { 16035 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16036 rack->r_ctl.ack_during_sd++; 16037 } 16038 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 16039 /* Validate timestamp */ 16040 if (ae->flags & HAS_TSTMP) { 16041 /* Setup for a timestamp */ 16042 to->to_flags = TOF_TS; 16043 ae->ts_echo -= tp->ts_offset; 16044 to->to_tsecr = ae->ts_echo; 16045 to->to_tsval = ae->ts_value; 16046 /* 16047 * If echoed timestamp is later than the current time, fall back to 16048 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16049 * were used when this connection was established. 16050 */ 16051 if (TSTMP_GT(ae->ts_echo, ms_cts)) 16052 to->to_tsecr = 0; 16053 if (tp->ts_recent && 16054 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 16055 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 16056 #ifdef TCP_ACCOUNTING 16057 rdstc = get_cyclecount(); 16058 if (rdstc > ts_val) { 16059 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16060 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16061 } 16062 } 16063 #endif 16064 continue; 16065 } 16066 } 16067 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 16068 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 16069 tp->ts_recent_age = tcp_ts_getticks(); 16070 tp->ts_recent = ae->ts_value; 16071 } 16072 } else { 16073 /* Setup for a no options */ 16074 to->to_flags = 0; 16075 } 16076 /* Update the rcv time and perform idle reduction possibly */ 16077 if (tp->t_idle_reduce && 16078 (tp->snd_max == tp->snd_una) && 16079 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16080 counter_u64_add(rack_input_idle_reduces, 1); 16081 rack_cc_after_idle(rack, tp); 16082 } 16083 tp->t_rcvtime = ticks; 16084 /* Now what about ECN of a chain of pure ACKs? */ 16085 if (tcp_ecn_input_segment(tp, ae->flags, 0, 16086 tcp_packets_this_ack(tp, ae->ack), 16087 ae->codepoint)) 16088 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 16089 #ifdef TCP_ACCOUNTING 16090 /* Count for the specific type of ack in */ 16091 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16092 tp->tcp_cnt_counters[ae->ack_val_set]++; 16093 } 16094 #endif 16095 /* 16096 * Note how we could move up these in the determination 16097 * above, but we don't so that way the timestamp checks (and ECN) 16098 * is done first before we do any processing on the ACK. 16099 * The non-compressed path through the code has this 16100 * weakness (noted by @jtl) that it actually does some 16101 * processing before verifying the timestamp information. 16102 * We don't take that path here which is why we set 16103 * the ack_val_set first, do the timestamp and ecn 16104 * processing, and then look at what we have setup. 16105 */ 16106 if (ae->ack_val_set == ACK_BEHIND) { 16107 /* 16108 * Case B flag reordering, if window is not closed 16109 * or it could be a keep-alive or persists 16110 */ 16111 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 16112 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 16113 if (rack->r_ctl.rc_reorder_ts == 0) 16114 rack->r_ctl.rc_reorder_ts = 1; 16115 } 16116 } else if (ae->ack_val_set == ACK_DUPACK) { 16117 /* Case D */ 16118 rack_strike_dupack(rack); 16119 } else if (ae->ack_val_set == ACK_RWND) { 16120 /* Case C */ 16121 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16122 ts.tv_sec = ae->timestamp / 1000000000; 16123 ts.tv_nsec = ae->timestamp % 1000000000; 16124 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16125 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16126 } else { 16127 rack->r_ctl.act_rcv_time = *tv; 16128 } 16129 if (rack->forced_ack) { 16130 rack_handle_probe_response(rack, tiwin, 16131 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 16132 } 16133 #ifdef TCP_ACCOUNTING 16134 win_up_req = 1; 16135 #endif 16136 win_upd_ack = ae->ack; 16137 win_seq = ae->seq; 16138 the_win = tiwin; 16139 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16140 } else { 16141 /* Case A */ 16142 if (SEQ_GT(ae->ack, tp->snd_max)) { 16143 /* 16144 * We just send an ack since the incoming 16145 * ack is beyond the largest seq we sent. 16146 */ 16147 if ((tp->t_flags & TF_ACKNOW) == 0) { 16148 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 16149 if (tp->t_flags && TF_ACKNOW) 16150 rack->r_wanted_output = 1; 16151 } 16152 } else { 16153 nsegs++; 16154 /* If the window changed setup to update */ 16155 if (tiwin != tp->snd_wnd) { 16156 win_upd_ack = ae->ack; 16157 win_seq = ae->seq; 16158 the_win = tiwin; 16159 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts); 16160 } 16161 #ifdef TCP_ACCOUNTING 16162 /* Account for the acks */ 16163 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16164 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 16165 } 16166 #endif 16167 high_seq = ae->ack; 16168 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) 16169 rack_log_hystart_event(rack, high_seq, 8); 16170 /* Setup our act_rcv_time */ 16171 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 16172 ts.tv_sec = ae->timestamp / 1000000000; 16173 ts.tv_nsec = ae->timestamp % 1000000000; 16174 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16175 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16176 } else { 16177 rack->r_ctl.act_rcv_time = *tv; 16178 } 16179 rack_process_to_cumack(tp, rack, ae->ack, cts, to, 16180 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); 16181 #ifdef TCP_REQUEST_TRK 16182 rack_req_check_for_comp(rack, high_seq); 16183 #endif 16184 if (rack->rc_dsack_round_seen) { 16185 /* Is the dsack round over? */ 16186 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 16187 /* Yes it is */ 16188 rack->rc_dsack_round_seen = 0; 16189 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 16190 } 16191 } 16192 } 16193 } 16194 /* And lets be sure to commit the rtt measurements for this ack */ 16195 tcp_rack_xmit_timer_commit(rack, tp); 16196 #ifdef TCP_ACCOUNTING 16197 rdstc = get_cyclecount(); 16198 if (rdstc > ts_val) { 16199 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16200 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 16201 if (ae->ack_val_set == ACK_CUMACK) 16202 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 16203 } 16204 } 16205 #endif 16206 } 16207 #ifdef TCP_ACCOUNTING 16208 ts_val = get_cyclecount(); 16209 #endif 16210 /* Tend to any collapsed window */ 16211 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 16212 /* The peer collapsed the window */ 16213 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); 16214 } else if (rack->rc_has_collapsed) 16215 rack_un_collapse_window(rack, __LINE__); 16216 if ((rack->r_collapse_point_valid) && 16217 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 16218 rack->r_collapse_point_valid = 0; 16219 acked_amount = acked = (high_seq - tp->snd_una); 16220 if (acked) { 16221 /* 16222 * The draft (v3) calls for us to use SEQ_GEQ, but that 16223 * causes issues when we are just going app limited. Lets 16224 * instead use SEQ_GT <or> where its equal but more data 16225 * is outstanding. 16226 * 16227 * Also make sure we are on the last ack of a series. We 16228 * have to have all the ack's processed in queue to know 16229 * if there is something left outstanding. 16230 * 16231 */ 16232 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && 16233 (rack->rc_new_rnd_needed == 0) && 16234 (nxt_pkt == 0)) { 16235 rack_log_hystart_event(rack, high_seq, 21); 16236 rack->r_ctl.current_round++; 16237 /* Force the next send to setup the next round */ 16238 rack->rc_new_rnd_needed = 1; 16239 if (CC_ALGO(tp)->newround != NULL) { 16240 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 16241 } 16242 } 16243 /* 16244 * Clear the probe not answered flag 16245 * since cum-ack moved forward. 16246 */ 16247 rack->probe_not_answered = 0; 16248 if (rack->sack_attack_disable == 0) 16249 rack_do_decay(rack); 16250 if (acked >= segsiz) { 16251 /* 16252 * You only get credit for 16253 * MSS and greater (and you get extra 16254 * credit for larger cum-ack moves). 16255 */ 16256 int ac; 16257 16258 ac = acked / segsiz; 16259 rack->r_ctl.ack_count += ac; 16260 counter_u64_add(rack_ack_total, ac); 16261 } 16262 if (rack->r_ctl.ack_count > 0xfff00000) { 16263 /* 16264 * reduce the number to keep us under 16265 * a uint32_t. 16266 */ 16267 rack->r_ctl.ack_count /= 2; 16268 rack->r_ctl.sack_count /= 2; 16269 } 16270 if (tp->t_flags & TF_NEEDSYN) { 16271 /* 16272 * T/TCP: Connection was half-synchronized, and our SYN has 16273 * been ACK'd (so connection is now fully synchronized). Go 16274 * to non-starred state, increment snd_una for ACK of SYN, 16275 * and check if we can do window scaling. 16276 */ 16277 tp->t_flags &= ~TF_NEEDSYN; 16278 tp->snd_una++; 16279 acked_amount = acked = (high_seq - tp->snd_una); 16280 } 16281 if (acked > sbavail(&so->so_snd)) 16282 acked_amount = sbavail(&so->so_snd); 16283 #ifdef TCP_SAD_DETECTION 16284 /* 16285 * We only care on a cum-ack move if we are in a sack-disabled 16286 * state. We have already added in to the ack_count, and we never 16287 * would disable on a cum-ack move, so we only care to do the 16288 * detection if it may "undo" it, i.e. we were in disabled already. 16289 */ 16290 if (rack->sack_attack_disable) 16291 rack_do_detection(tp, rack, acked_amount, segsiz); 16292 #endif 16293 if (IN_FASTRECOVERY(tp->t_flags) && 16294 (rack->rack_no_prr == 0)) 16295 rack_update_prr(tp, rack, acked_amount, high_seq); 16296 if (IN_RECOVERY(tp->t_flags)) { 16297 if (SEQ_LT(high_seq, tp->snd_recover) && 16298 (SEQ_LT(high_seq, tp->snd_max))) { 16299 tcp_rack_partialack(tp); 16300 } else { 16301 rack_post_recovery(tp, high_seq); 16302 recovery = 1; 16303 } 16304 } 16305 /* Handle the rack-log-ack part (sendmap) */ 16306 if ((sbused(&so->so_snd) == 0) && 16307 (acked > acked_amount) && 16308 (tp->t_state >= TCPS_FIN_WAIT_1) && 16309 (tp->t_flags & TF_SENTFIN)) { 16310 /* 16311 * We must be sure our fin 16312 * was sent and acked (we can be 16313 * in FIN_WAIT_1 without having 16314 * sent the fin). 16315 */ 16316 ourfinisacked = 1; 16317 /* 16318 * Lets make sure snd_una is updated 16319 * since most likely acked_amount = 0 (it 16320 * should be). 16321 */ 16322 tp->snd_una = high_seq; 16323 } 16324 /* Did we make a RTO error? */ 16325 if ((tp->t_flags & TF_PREVVALID) && 16326 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 16327 tp->t_flags &= ~TF_PREVVALID; 16328 if (tp->t_rxtshift == 1 && 16329 (int)(ticks - tp->t_badrxtwin) < 0) 16330 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 16331 } 16332 /* Handle the data in the socket buffer */ 16333 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 16334 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 16335 if (acked_amount > 0) { 16336 struct mbuf *mfree; 16337 16338 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 16339 SOCKBUF_LOCK(&so->so_snd); 16340 mfree = sbcut_locked(&so->so_snd, acked_amount); 16341 tp->snd_una = high_seq; 16342 /* Note we want to hold the sb lock through the sendmap adjust */ 16343 rack_adjust_sendmap_head(rack, &so->so_snd); 16344 /* Wake up the socket if we have room to write more */ 16345 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 16346 sowwakeup_locked(so); 16347 if ((recovery == 1) && 16348 (rack->excess_rxt_on) && 16349 (rack->r_cwnd_was_clamped == 0)) { 16350 do_rack_excess_rxt(tp, rack); 16351 } else if (rack->r_cwnd_was_clamped) 16352 do_rack_check_for_unclamp(tp, rack); 16353 m_freem(mfree); 16354 } 16355 /* update progress */ 16356 tp->t_acktime = ticks; 16357 rack_log_progress_event(rack, tp, tp->t_acktime, 16358 PROGRESS_UPDATE, __LINE__); 16359 /* Clear out shifts and such */ 16360 tp->t_rxtshift = 0; 16361 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 16362 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 16363 rack->rc_tlp_in_progress = 0; 16364 rack->r_ctl.rc_tlp_cnt_out = 0; 16365 /* Send recover and snd_nxt must be dragged along */ 16366 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 16367 tp->snd_recover = tp->snd_una; 16368 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 16369 tp->snd_nxt = tp->snd_una; 16370 /* 16371 * If the RXT timer is running we want to 16372 * stop it, so we can restart a TLP (or new RXT). 16373 */ 16374 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 16375 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16376 tp->snd_wl2 = high_seq; 16377 tp->t_dupacks = 0; 16378 if (under_pacing && 16379 (rack->use_fixed_rate == 0) && 16380 (rack->in_probe_rtt == 0) && 16381 rack->rc_gp_dyn_mul && 16382 rack->rc_always_pace) { 16383 /* Check if we are dragging bottom */ 16384 rack_check_bottom_drag(tp, rack, so); 16385 } 16386 if (tp->snd_una == tp->snd_max) { 16387 tp->t_flags &= ~TF_PREVVALID; 16388 rack->r_ctl.retran_during_recovery = 0; 16389 rack->rc_suspicious = 0; 16390 rack->r_ctl.dsack_byte_cnt = 0; 16391 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 16392 if (rack->r_ctl.rc_went_idle_time == 0) 16393 rack->r_ctl.rc_went_idle_time = 1; 16394 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 16395 if (sbavail(&tptosocket(tp)->so_snd) == 0) 16396 tp->t_acktime = 0; 16397 /* Set so we might enter persists... */ 16398 rack->r_wanted_output = 1; 16399 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16400 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 16401 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16402 (sbavail(&so->so_snd) == 0) && 16403 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 16404 /* 16405 * The socket was gone and the 16406 * peer sent data (not now in the past), time to 16407 * reset him. 16408 */ 16409 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 16410 /* tcp_close will kill the inp pre-log the Reset */ 16411 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 16412 #ifdef TCP_ACCOUNTING 16413 rdstc = get_cyclecount(); 16414 if (rdstc > ts_val) { 16415 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16416 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16417 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16418 } 16419 } 16420 #endif 16421 m_freem(m); 16422 tp = tcp_close(tp); 16423 if (tp == NULL) { 16424 #ifdef TCP_ACCOUNTING 16425 sched_unpin(); 16426 #endif 16427 return (1); 16428 } 16429 /* 16430 * We would normally do drop-with-reset which would 16431 * send back a reset. We can't since we don't have 16432 * all the needed bits. Instead lets arrange for 16433 * a call to tcp_output(). That way since we 16434 * are in the closed state we will generate a reset. 16435 * 16436 * Note if tcp_accounting is on we don't unpin since 16437 * we do that after the goto label. 16438 */ 16439 goto send_out_a_rst; 16440 } 16441 if ((sbused(&so->so_snd) == 0) && 16442 (tp->t_state >= TCPS_FIN_WAIT_1) && 16443 (tp->t_flags & TF_SENTFIN)) { 16444 /* 16445 * If we can't receive any more data, then closing user can 16446 * proceed. Starting the timer is contrary to the 16447 * specification, but if we don't get a FIN we'll hang 16448 * forever. 16449 * 16450 */ 16451 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16452 soisdisconnected(so); 16453 tcp_timer_activate(tp, TT_2MSL, 16454 (tcp_fast_finwait2_recycle ? 16455 tcp_finwait2_timeout : 16456 TP_MAXIDLE(tp))); 16457 } 16458 if (ourfinisacked == 0) { 16459 /* 16460 * We don't change to fin-wait-2 if we have our fin acked 16461 * which means we are probably in TCPS_CLOSING. 16462 */ 16463 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16464 } 16465 } 16466 } 16467 /* Wake up the socket if we have room to write more */ 16468 if (sbavail(&so->so_snd)) { 16469 rack->r_wanted_output = 1; 16470 if (ctf_progress_timeout_check(tp, true)) { 16471 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 16472 tp, tick, PROGRESS_DROP, __LINE__); 16473 /* 16474 * We cheat here and don't send a RST, we should send one 16475 * when the pacer drops the connection. 16476 */ 16477 #ifdef TCP_ACCOUNTING 16478 rdstc = get_cyclecount(); 16479 if (rdstc > ts_val) { 16480 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16481 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16482 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16483 } 16484 } 16485 sched_unpin(); 16486 #endif 16487 (void)tcp_drop(tp, ETIMEDOUT); 16488 m_freem(m); 16489 return (1); 16490 } 16491 } 16492 if (ourfinisacked) { 16493 switch(tp->t_state) { 16494 case TCPS_CLOSING: 16495 #ifdef TCP_ACCOUNTING 16496 rdstc = get_cyclecount(); 16497 if (rdstc > ts_val) { 16498 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16499 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16500 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16501 } 16502 } 16503 sched_unpin(); 16504 #endif 16505 tcp_twstart(tp); 16506 m_freem(m); 16507 return (1); 16508 break; 16509 case TCPS_LAST_ACK: 16510 #ifdef TCP_ACCOUNTING 16511 rdstc = get_cyclecount(); 16512 if (rdstc > ts_val) { 16513 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16514 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16515 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16516 } 16517 } 16518 sched_unpin(); 16519 #endif 16520 tp = tcp_close(tp); 16521 ctf_do_drop(m, tp); 16522 return (1); 16523 break; 16524 case TCPS_FIN_WAIT_1: 16525 #ifdef TCP_ACCOUNTING 16526 rdstc = get_cyclecount(); 16527 if (rdstc > ts_val) { 16528 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16529 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16530 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16531 } 16532 } 16533 #endif 16534 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 16535 soisdisconnected(so); 16536 tcp_timer_activate(tp, TT_2MSL, 16537 (tcp_fast_finwait2_recycle ? 16538 tcp_finwait2_timeout : 16539 TP_MAXIDLE(tp))); 16540 } 16541 tcp_state_change(tp, TCPS_FIN_WAIT_2); 16542 break; 16543 default: 16544 break; 16545 } 16546 } 16547 if (rack->r_fast_output) { 16548 /* 16549 * We re doing fast output.. can we expand that? 16550 */ 16551 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 16552 } 16553 #ifdef TCP_ACCOUNTING 16554 rdstc = get_cyclecount(); 16555 if (rdstc > ts_val) { 16556 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16557 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 16558 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 16559 } 16560 } 16561 16562 } else if (win_up_req) { 16563 rdstc = get_cyclecount(); 16564 if (rdstc > ts_val) { 16565 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16566 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 16567 } 16568 } 16569 #endif 16570 } 16571 /* Now is there a next packet, if so we are done */ 16572 m_freem(m); 16573 did_out = 0; 16574 if (nxt_pkt) { 16575 #ifdef TCP_ACCOUNTING 16576 sched_unpin(); 16577 #endif 16578 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 16579 return (0); 16580 } 16581 rack_handle_might_revert(tp, rack); 16582 ctf_calc_rwin(so, tp); 16583 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 16584 send_out_a_rst: 16585 if (tcp_output(tp) < 0) { 16586 #ifdef TCP_ACCOUNTING 16587 sched_unpin(); 16588 #endif 16589 return (1); 16590 } 16591 did_out = 1; 16592 } 16593 if (tp->t_flags2 & TF2_HPTS_CALLS) 16594 tp->t_flags2 &= ~TF2_HPTS_CALLS; 16595 rack_free_trim(rack); 16596 #ifdef TCP_ACCOUNTING 16597 sched_unpin(); 16598 #endif 16599 rack_timer_audit(tp, rack, &so->so_snd); 16600 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 16601 return (0); 16602 } 16603 16604 #define TCP_LRO_TS_OPTION \ 16605 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 16606 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 16607 16608 static int 16609 rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 16610 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, int32_t nxt_pkt, 16611 struct timeval *tv) 16612 { 16613 struct inpcb *inp = tptoinpcb(tp); 16614 struct socket *so = tptosocket(tp); 16615 #ifdef TCP_ACCOUNTING 16616 uint64_t ts_val; 16617 #endif 16618 int32_t thflags, retval, did_out = 0; 16619 int32_t way_out = 0; 16620 /* 16621 * cts - is the current time from tv (caller gets ts) in microseconds. 16622 * ms_cts - is the current time from tv in milliseconds. 16623 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 16624 */ 16625 uint32_t cts, us_cts, ms_cts; 16626 uint32_t tiwin, high_seq; 16627 struct timespec ts; 16628 struct tcpopt to; 16629 struct tcp_rack *rack; 16630 struct rack_sendmap *rsm; 16631 int32_t prev_state = 0; 16632 int no_output = 0; 16633 int slot_remaining = 0; 16634 #ifdef TCP_ACCOUNTING 16635 int ack_val_set = 0xf; 16636 #endif 16637 int nsegs; 16638 16639 NET_EPOCH_ASSERT(); 16640 INP_WLOCK_ASSERT(inp); 16641 16642 /* 16643 * tv passed from common code is from either M_TSTMP_LRO or 16644 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 16645 */ 16646 rack = (struct tcp_rack *)tp->t_fb_ptr; 16647 if (rack->rack_deferred_inited == 0) { 16648 /* 16649 * If we are the connecting socket we will 16650 * hit rack_init() when no sequence numbers 16651 * are setup. This makes it so we must defer 16652 * some initialization. Call that now. 16653 */ 16654 rack_deferred_init(tp, rack); 16655 } 16656 /* 16657 * Check to see if we need to skip any output plans. This 16658 * can happen in the non-LRO path where we are pacing and 16659 * must process the ack coming in but need to defer sending 16660 * anything becase a pacing timer is running. 16661 */ 16662 us_cts = tcp_tv_to_usectick(tv); 16663 if (m->m_flags & M_ACKCMP) { 16664 /* 16665 * All compressed ack's are ack's by definition so 16666 * remove any ack required flag and then do the processing. 16667 */ 16668 rack->rc_ack_required = 0; 16669 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 16670 } 16671 thflags = tcp_get_flags(th); 16672 if ((rack->rc_always_pace == 1) && 16673 (rack->rc_ack_can_sendout_data == 0) && 16674 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16675 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { 16676 /* 16677 * Ok conditions are right for queuing the packets 16678 * but we do have to check the flags in the inp, it 16679 * could be, if a sack is present, we want to be awoken and 16680 * so should process the packets. 16681 */ 16682 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; 16683 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { 16684 no_output = 1; 16685 } else { 16686 /* 16687 * If there is no options, or just a 16688 * timestamp option, we will want to queue 16689 * the packets. This is the same that LRO does 16690 * and will need to change with accurate ECN. 16691 */ 16692 uint32_t *ts_ptr; 16693 int optlen; 16694 16695 optlen = (th->th_off << 2) - sizeof(struct tcphdr); 16696 ts_ptr = (uint32_t *)(th + 1); 16697 if ((optlen == 0) || 16698 ((optlen == TCPOLEN_TSTAMP_APPA) && 16699 (*ts_ptr == TCP_LRO_TS_OPTION))) 16700 no_output = 1; 16701 } 16702 if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) { 16703 /* 16704 * It is unrealistic to think we can pace in less than 16705 * the minimum granularity of the pacer (def:250usec). So 16706 * if we have less than that time remaining we should go 16707 * ahead and allow output to be "early". We will attempt to 16708 * make up for it in any pacing time we try to apply on 16709 * the outbound packet. 16710 */ 16711 no_output = 0; 16712 } 16713 } 16714 /* 16715 * If there is a RST or FIN lets dump out the bw 16716 * with a FIN the connection may go on but we 16717 * may not. 16718 */ 16719 if ((thflags & TH_FIN) || (thflags & TH_RST)) 16720 rack_log_pacing_delay_calc(rack, 16721 rack->r_ctl.gp_bw, 16722 0, 16723 0, 16724 rack_get_gp_est(rack), /* delRate */ 16725 rack_get_lt_bw(rack), /* rttProp */ 16726 20, __LINE__, NULL, 0); 16727 if (m->m_flags & M_ACKCMP) { 16728 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 16729 } 16730 cts = tcp_tv_to_usectick(tv); 16731 ms_cts = tcp_tv_to_mssectick(tv); 16732 nsegs = m->m_pkthdr.lro_nsegs; 16733 counter_u64_add(rack_proc_non_comp_ack, 1); 16734 #ifdef TCP_ACCOUNTING 16735 sched_pin(); 16736 if (thflags & TH_ACK) 16737 ts_val = get_cyclecount(); 16738 #endif 16739 if ((m->m_flags & M_TSTMP) || 16740 (m->m_flags & M_TSTMP_LRO)) { 16741 mbuf_tstmp2timespec(m, &ts); 16742 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 16743 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 16744 } else 16745 rack->r_ctl.act_rcv_time = *tv; 16746 kern_prefetch(rack, &prev_state); 16747 prev_state = 0; 16748 /* 16749 * Unscale the window into a 32-bit value. For the SYN_SENT state 16750 * the scale is zero. 16751 */ 16752 tiwin = th->th_win << tp->snd_scale; 16753 #ifdef TCP_ACCOUNTING 16754 if (thflags & TH_ACK) { 16755 /* 16756 * We have a tradeoff here. We can either do what we are 16757 * doing i.e. pinning to this CPU and then doing the accounting 16758 * <or> we could do a critical enter, setup the rdtsc and cpu 16759 * as in below, and then validate we are on the same CPU on 16760 * exit. I have choosen to not do the critical enter since 16761 * that often will gain you a context switch, and instead lock 16762 * us (line above this if) to the same CPU with sched_pin(). This 16763 * means we may be context switched out for a higher priority 16764 * interupt but we won't be moved to another CPU. 16765 * 16766 * If this occurs (which it won't very often since we most likely 16767 * are running this code in interupt context and only a higher 16768 * priority will bump us ... clock?) we will falsely add in 16769 * to the time the interupt processing time plus the ack processing 16770 * time. This is ok since its a rare event. 16771 */ 16772 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 16773 ctf_fixed_maxseg(tp)); 16774 } 16775 #endif 16776 /* 16777 * Parse options on any incoming segment. 16778 */ 16779 memset(&to, 0, sizeof(to)); 16780 tcp_dooptions(&to, (u_char *)(th + 1), 16781 (th->th_off << 2) - sizeof(struct tcphdr), 16782 (thflags & TH_SYN) ? TO_SYN : 0); 16783 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 16784 __func__)); 16785 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 16786 __func__)); 16787 16788 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 16789 (tp->t_flags & TF_GPUTINPROG)) { 16790 /* 16791 * We have a goodput in progress 16792 * and we have entered a late state. 16793 * Do we have enough data in the sb 16794 * to handle the GPUT request? 16795 */ 16796 uint32_t bytes; 16797 16798 bytes = tp->gput_ack - tp->gput_seq; 16799 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 16800 bytes += tp->gput_seq - tp->snd_una; 16801 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 16802 /* 16803 * There are not enough bytes in the socket 16804 * buffer that have been sent to cover this 16805 * measurement. Cancel it. 16806 */ 16807 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 16808 rack->r_ctl.rc_gp_srtt /*flex1*/, 16809 tp->gput_seq, 16810 0, 0, 18, __LINE__, NULL, 0); 16811 tp->t_flags &= ~TF_GPUTINPROG; 16812 } 16813 } 16814 high_seq = th->th_ack; 16815 if (tcp_bblogging_on(rack->rc_tp)) { 16816 union tcp_log_stackspecific log; 16817 struct timeval ltv; 16818 #ifdef TCP_REQUEST_TRK 16819 struct tcp_sendfile_track *tcp_req; 16820 16821 if (SEQ_GT(th->th_ack, tp->snd_una)) { 16822 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); 16823 } else { 16824 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); 16825 } 16826 #endif 16827 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16828 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 16829 if (rack->rack_no_prr == 0) 16830 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16831 else 16832 log.u_bbr.flex1 = 0; 16833 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 16834 log.u_bbr.use_lt_bw <<= 1; 16835 log.u_bbr.use_lt_bw |= rack->r_might_revert; 16836 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 16837 log.u_bbr.bbr_state = rack->rc_free_cnt; 16838 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16839 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 16840 log.u_bbr.flex3 = m->m_flags; 16841 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 16842 log.u_bbr.lost = thflags; 16843 log.u_bbr.pacing_gain = 0x1; 16844 #ifdef TCP_ACCOUNTING 16845 log.u_bbr.cwnd_gain = ack_val_set; 16846 #endif 16847 log.u_bbr.flex7 = 2; 16848 if (m->m_flags & M_TSTMP) { 16849 /* Record the hardware timestamp if present */ 16850 mbuf_tstmp2timespec(m, &ts); 16851 ltv.tv_sec = ts.tv_sec; 16852 ltv.tv_usec = ts.tv_nsec / 1000; 16853 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 16854 } else if (m->m_flags & M_TSTMP_LRO) { 16855 /* Record the LRO the arrival timestamp */ 16856 mbuf_tstmp2timespec(m, &ts); 16857 ltv.tv_sec = ts.tv_sec; 16858 ltv.tv_usec = ts.tv_nsec / 1000; 16859 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 16860 } 16861 log.u_bbr.timeStamp = tcp_get_usecs(<v); 16862 /* Log the rcv time */ 16863 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 16864 #ifdef TCP_REQUEST_TRK 16865 log.u_bbr.applimited = tp->t_tcpreq_closed; 16866 log.u_bbr.applimited <<= 8; 16867 log.u_bbr.applimited |= tp->t_tcpreq_open; 16868 log.u_bbr.applimited <<= 8; 16869 log.u_bbr.applimited |= tp->t_tcpreq_req; 16870 if (tcp_req) { 16871 /* Copy out any client req info */ 16872 /* seconds */ 16873 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); 16874 /* useconds */ 16875 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); 16876 log.u_bbr.rttProp = tcp_req->timestamp; 16877 log.u_bbr.cur_del_rate = tcp_req->start; 16878 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { 16879 log.u_bbr.flex8 |= 1; 16880 } else { 16881 log.u_bbr.flex8 |= 2; 16882 log.u_bbr.bw_inuse = tcp_req->end; 16883 } 16884 log.u_bbr.flex6 = tcp_req->start_seq; 16885 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { 16886 log.u_bbr.flex8 |= 4; 16887 log.u_bbr.epoch = tcp_req->end_seq; 16888 } 16889 } 16890 #endif 16891 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 16892 tlen, &log, true, <v); 16893 } 16894 /* Remove ack required flag if set, we have one */ 16895 if (thflags & TH_ACK) 16896 rack->rc_ack_required = 0; 16897 if (rack->sack_attack_disable > 0) { 16898 rack->r_ctl.ack_during_sd++; 16899 rack_log_type_bbrsnd(rack, 0, 0, cts, tv, __LINE__); 16900 } 16901 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 16902 way_out = 4; 16903 retval = 0; 16904 m_freem(m); 16905 goto done_with_input; 16906 } 16907 /* 16908 * If a segment with the ACK-bit set arrives in the SYN-SENT state 16909 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 16910 */ 16911 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 16912 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 16913 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 16914 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 16915 #ifdef TCP_ACCOUNTING 16916 sched_unpin(); 16917 #endif 16918 return (1); 16919 } 16920 /* 16921 * If timestamps were negotiated during SYN/ACK and a 16922 * segment without a timestamp is received, silently drop 16923 * the segment, unless it is a RST segment or missing timestamps are 16924 * tolerated. 16925 * See section 3.2 of RFC 7323. 16926 */ 16927 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 16928 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 16929 way_out = 5; 16930 retval = 0; 16931 m_freem(m); 16932 goto done_with_input; 16933 } 16934 16935 /* 16936 * Segment received on connection. Reset idle time and keep-alive 16937 * timer. XXX: This should be done after segment validation to 16938 * ignore broken/spoofed segs. 16939 */ 16940 if (tp->t_idle_reduce && 16941 (tp->snd_max == tp->snd_una) && 16942 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 16943 counter_u64_add(rack_input_idle_reduces, 1); 16944 rack_cc_after_idle(rack, tp); 16945 } 16946 tp->t_rcvtime = ticks; 16947 #ifdef STATS 16948 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 16949 #endif 16950 if (tiwin > rack->r_ctl.rc_high_rwnd) 16951 rack->r_ctl.rc_high_rwnd = tiwin; 16952 /* 16953 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 16954 * this to occur after we've validated the segment. 16955 */ 16956 if (tcp_ecn_input_segment(tp, thflags, tlen, 16957 tcp_packets_this_ack(tp, th->th_ack), 16958 iptos)) 16959 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 16960 16961 /* 16962 * If echoed timestamp is later than the current time, fall back to 16963 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 16964 * were used when this connection was established. 16965 */ 16966 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 16967 to.to_tsecr -= tp->ts_offset; 16968 if (TSTMP_GT(to.to_tsecr, ms_cts)) 16969 to.to_tsecr = 0; 16970 } 16971 16972 /* 16973 * If its the first time in we need to take care of options and 16974 * verify we can do SACK for rack! 16975 */ 16976 if (rack->r_state == 0) { 16977 /* Should be init'd by rack_init() */ 16978 KASSERT(rack->rc_inp != NULL, 16979 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 16980 if (rack->rc_inp == NULL) { 16981 rack->rc_inp = inp; 16982 } 16983 16984 /* 16985 * Process options only when we get SYN/ACK back. The SYN 16986 * case for incoming connections is handled in tcp_syncache. 16987 * According to RFC1323 the window field in a SYN (i.e., a 16988 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 16989 * this is traditional behavior, may need to be cleaned up. 16990 */ 16991 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 16992 /* Handle parallel SYN for ECN */ 16993 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 16994 if ((to.to_flags & TOF_SCALE) && 16995 (tp->t_flags & TF_REQ_SCALE)) { 16996 tp->t_flags |= TF_RCVD_SCALE; 16997 tp->snd_scale = to.to_wscale; 16998 } else 16999 tp->t_flags &= ~TF_REQ_SCALE; 17000 /* 17001 * Initial send window. It will be updated with the 17002 * next incoming segment to the scaled value. 17003 */ 17004 tp->snd_wnd = th->th_win; 17005 rack_validate_fo_sendwin_up(tp, rack); 17006 if ((to.to_flags & TOF_TS) && 17007 (tp->t_flags & TF_REQ_TSTMP)) { 17008 tp->t_flags |= TF_RCVD_TSTMP; 17009 tp->ts_recent = to.to_tsval; 17010 tp->ts_recent_age = cts; 17011 } else 17012 tp->t_flags &= ~TF_REQ_TSTMP; 17013 if (to.to_flags & TOF_MSS) { 17014 tcp_mss(tp, to.to_mss); 17015 } 17016 if ((tp->t_flags & TF_SACK_PERMIT) && 17017 (to.to_flags & TOF_SACKPERM) == 0) 17018 tp->t_flags &= ~TF_SACK_PERMIT; 17019 if (IS_FASTOPEN(tp->t_flags)) { 17020 if (to.to_flags & TOF_FASTOPEN) { 17021 uint16_t mss; 17022 17023 if (to.to_flags & TOF_MSS) 17024 mss = to.to_mss; 17025 else 17026 if ((inp->inp_vflag & INP_IPV6) != 0) 17027 mss = TCP6_MSS; 17028 else 17029 mss = TCP_MSS; 17030 tcp_fastopen_update_cache(tp, mss, 17031 to.to_tfo_len, to.to_tfo_cookie); 17032 } else 17033 tcp_fastopen_disable_path(tp); 17034 } 17035 } 17036 /* 17037 * At this point we are at the initial call. Here we decide 17038 * if we are doing RACK or not. We do this by seeing if 17039 * TF_SACK_PERMIT is set and the sack-not-required is clear. 17040 * The code now does do dup-ack counting so if you don't 17041 * switch back you won't get rack & TLP, but you will still 17042 * get this stack. 17043 */ 17044 17045 if ((rack_sack_not_required == 0) && 17046 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 17047 tcp_switch_back_to_default(tp); 17048 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, 17049 tlen, iptos); 17050 #ifdef TCP_ACCOUNTING 17051 sched_unpin(); 17052 #endif 17053 return (1); 17054 } 17055 tcp_set_hpts(tp); 17056 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 17057 } 17058 if (thflags & TH_FIN) 17059 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 17060 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 17061 if ((rack->rc_gp_dyn_mul) && 17062 (rack->use_fixed_rate == 0) && 17063 (rack->rc_always_pace)) { 17064 /* Check in on probertt */ 17065 rack_check_probe_rtt(rack, us_cts); 17066 } 17067 rack_clear_rate_sample(rack); 17068 if ((rack->forced_ack) && 17069 ((tcp_get_flags(th) & TH_RST) == 0)) { 17070 rack_handle_probe_response(rack, tiwin, us_cts); 17071 } 17072 /* 17073 * This is the one exception case where we set the rack state 17074 * always. All other times (timers etc) we must have a rack-state 17075 * set (so we assure we have done the checks above for SACK). 17076 */ 17077 rack->r_ctl.rc_rcvtime = cts; 17078 if (rack->r_state != tp->t_state) 17079 rack_set_state(tp, rack); 17080 if (SEQ_GT(th->th_ack, tp->snd_una) && 17081 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) 17082 kern_prefetch(rsm, &prev_state); 17083 prev_state = rack->r_state; 17084 if ((thflags & TH_RST) && 17085 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 17086 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 17087 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { 17088 /* The connection will be killed by a reset check the tracepoint */ 17089 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); 17090 } 17091 retval = (*rack->r_substate) (m, th, so, 17092 tp, &to, drop_hdrlen, 17093 tlen, tiwin, thflags, nxt_pkt, iptos); 17094 if (retval == 0) { 17095 /* 17096 * If retval is 1 the tcb is unlocked and most likely the tp 17097 * is gone. 17098 */ 17099 INP_WLOCK_ASSERT(inp); 17100 if ((rack->rc_gp_dyn_mul) && 17101 (rack->rc_always_pace) && 17102 (rack->use_fixed_rate == 0) && 17103 rack->in_probe_rtt && 17104 (rack->r_ctl.rc_time_probertt_starts == 0)) { 17105 /* 17106 * If we are going for target, lets recheck before 17107 * we output. 17108 */ 17109 rack_check_probe_rtt(rack, us_cts); 17110 } 17111 if (rack->set_pacing_done_a_iw == 0) { 17112 /* How much has been acked? */ 17113 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 17114 /* We have enough to set in the pacing segment size */ 17115 rack->set_pacing_done_a_iw = 1; 17116 rack_set_pace_segments(tp, rack, __LINE__, NULL); 17117 } 17118 } 17119 tcp_rack_xmit_timer_commit(rack, tp); 17120 #ifdef TCP_ACCOUNTING 17121 /* 17122 * If we set the ack_val_se to what ack processing we are doing 17123 * we also want to track how many cycles we burned. Note 17124 * the bits after tcp_output we let be "free". This is because 17125 * we are also tracking the tcp_output times as well. Note the 17126 * use of 0xf here since we only have 11 counter (0 - 0xa) and 17127 * 0xf cannot be returned and is what we initialize it too to 17128 * indicate we are not doing the tabulations. 17129 */ 17130 if (ack_val_set != 0xf) { 17131 uint64_t crtsc; 17132 17133 crtsc = get_cyclecount(); 17134 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17135 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 17136 } 17137 } 17138 #endif 17139 if ((nxt_pkt == 0) && (no_output == 0)) { 17140 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 17141 do_output_now: 17142 if (tcp_output(tp) < 0) { 17143 #ifdef TCP_ACCOUNTING 17144 sched_unpin(); 17145 #endif 17146 return (1); 17147 } 17148 did_out = 1; 17149 } 17150 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 17151 rack_free_trim(rack); 17152 } else if ((no_output == 1) && 17153 (nxt_pkt == 0) && 17154 (tcp_in_hpts(rack->rc_tp) == 0)) { 17155 /* 17156 * We are not in hpts and we had a pacing timer up. Use 17157 * the remaining time (slot_remaining) to restart the timer. 17158 */ 17159 KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp)); 17160 rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0); 17161 rack_free_trim(rack); 17162 } 17163 /* Clear the flag, it may have been cleared by output but we may not have */ 17164 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) 17165 tp->t_flags2 &= ~TF2_HPTS_CALLS; 17166 /* Update any rounds needed */ 17167 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) 17168 rack_log_hystart_event(rack, high_seq, 8); 17169 /* 17170 * The draft (v3) calls for us to use SEQ_GEQ, but that 17171 * causes issues when we are just going app limited. Lets 17172 * instead use SEQ_GT <or> where its equal but more data 17173 * is outstanding. 17174 * 17175 * Also make sure we are on the last ack of a series. We 17176 * have to have all the ack's processed in queue to know 17177 * if there is something left outstanding. 17178 */ 17179 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && 17180 (rack->rc_new_rnd_needed == 0) && 17181 (nxt_pkt == 0)) { 17182 rack_log_hystart_event(rack, tp->snd_una, 21); 17183 rack->r_ctl.current_round++; 17184 /* Force the next send to setup the next round */ 17185 rack->rc_new_rnd_needed = 1; 17186 if (CC_ALGO(tp)->newround != NULL) { 17187 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 17188 } 17189 } 17190 if ((nxt_pkt == 0) && 17191 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 17192 (SEQ_GT(tp->snd_max, tp->snd_una) || 17193 (tp->t_flags & TF_DELACK) || 17194 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 17195 (tp->t_state <= TCPS_CLOSING)))) { 17196 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 17197 if ((tp->snd_max == tp->snd_una) && 17198 ((tp->t_flags & TF_DELACK) == 0) && 17199 (tcp_in_hpts(rack->rc_tp)) && 17200 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 17201 /* keep alive not needed if we are hptsi output yet */ 17202 ; 17203 } else { 17204 int late = 0; 17205 if (tcp_in_hpts(tp)) { 17206 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 17207 us_cts = tcp_get_usecs(NULL); 17208 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 17209 rack->r_early = 1; 17210 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 17211 } else 17212 late = 1; 17213 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 17214 } 17215 tcp_hpts_remove(tp); 17216 } 17217 if (late && (did_out == 0)) { 17218 /* 17219 * We are late in the sending 17220 * and we did not call the output 17221 * (this probably should not happen). 17222 */ 17223 goto do_output_now; 17224 } 17225 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 17226 } 17227 way_out = 1; 17228 } else if (nxt_pkt == 0) { 17229 /* Do we have the correct timer running? */ 17230 rack_timer_audit(tp, rack, &so->so_snd); 17231 way_out = 2; 17232 } 17233 done_with_input: 17234 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 17235 if (did_out) 17236 rack->r_wanted_output = 0; 17237 } 17238 #ifdef TCP_ACCOUNTING 17239 sched_unpin(); 17240 #endif 17241 return (retval); 17242 } 17243 17244 static void 17245 rack_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 17246 int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 17247 { 17248 struct timeval tv; 17249 17250 /* First lets see if we have old packets */ 17251 if (!STAILQ_EMPTY(&tp->t_inqueue)) { 17252 if (ctf_do_queued_segments(tp, 1)) { 17253 m_freem(m); 17254 return; 17255 } 17256 } 17257 if (m->m_flags & M_TSTMP_LRO) { 17258 mbuf_tstmp2timeval(m, &tv); 17259 } else { 17260 /* Should not be should we kassert instead? */ 17261 tcp_get_usecs(&tv); 17262 } 17263 if (rack_do_segment_nounlock(tp, m, th, drop_hdrlen, tlen, iptos, 0, 17264 &tv) == 0) { 17265 INP_WUNLOCK(tptoinpcb(tp)); 17266 } 17267 } 17268 17269 struct rack_sendmap * 17270 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 17271 { 17272 struct rack_sendmap *rsm = NULL; 17273 int32_t idx; 17274 uint32_t srtt = 0, thresh = 0, ts_low = 0; 17275 int no_sack = 0; 17276 17277 /* Return the next guy to be re-transmitted */ 17278 if (tqhash_empty(rack->r_ctl.tqh)) { 17279 return (NULL); 17280 } 17281 if (tp->t_flags & TF_SENTFIN) { 17282 /* retran the end FIN? */ 17283 return (NULL); 17284 } 17285 /* ok lets look at this one */ 17286 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17287 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17288 return (rsm); 17289 } 17290 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 17291 goto check_it; 17292 } 17293 rsm = rack_find_lowest_rsm(rack); 17294 if (rsm == NULL) { 17295 return (NULL); 17296 } 17297 check_it: 17298 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) || 17299 (rack->sack_attack_disable > 0)) { 17300 no_sack = 1; 17301 } 17302 if ((no_sack > 0) && 17303 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 17304 /* 17305 * No sack so we automatically do the 3 strikes and 17306 * retransmit (no rack timer would be started). 17307 */ 17308 return (rsm); 17309 } 17310 if (rsm->r_flags & RACK_ACKED) { 17311 return (NULL); 17312 } 17313 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 17314 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 17315 /* Its not yet ready */ 17316 return (NULL); 17317 } 17318 srtt = rack_grab_rtt(tp, rack); 17319 idx = rsm->r_rtr_cnt - 1; 17320 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 17321 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 17322 if ((tsused == ts_low) || 17323 (TSTMP_LT(tsused, ts_low))) { 17324 /* No time since sending */ 17325 return (NULL); 17326 } 17327 if ((tsused - ts_low) < thresh) { 17328 /* It has not been long enough yet */ 17329 return (NULL); 17330 } 17331 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 17332 ((rsm->r_flags & RACK_SACK_PASSED) && 17333 (rack->sack_attack_disable == 0))) { 17334 /* 17335 * We have passed the dup-ack threshold <or> 17336 * a SACK has indicated this is missing. 17337 * Note that if you are a declared attacker 17338 * it is only the dup-ack threshold that 17339 * will cause retransmits. 17340 */ 17341 /* log retransmit reason */ 17342 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 17343 rack->r_fast_output = 0; 17344 return (rsm); 17345 } 17346 return (NULL); 17347 } 17348 17349 static void 17350 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 17351 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 17352 int line, struct rack_sendmap *rsm, uint8_t quality) 17353 { 17354 if (tcp_bblogging_on(rack->rc_tp)) { 17355 union tcp_log_stackspecific log; 17356 struct timeval tv; 17357 17358 if (rack_verbose_logging == 0) { 17359 /* 17360 * We are not verbose screen out all but 17361 * ones we always want. 17362 */ 17363 if ((method != 2) && 17364 (method != 3) && 17365 (method != 7) && 17366 (method != 14) && 17367 (method != 20)) { 17368 return; 17369 } 17370 } 17371 memset(&log, 0, sizeof(log)); 17372 log.u_bbr.flex1 = slot; 17373 log.u_bbr.flex2 = len; 17374 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 17375 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 17376 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 17377 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 17378 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 17379 log.u_bbr.use_lt_bw <<= 1; 17380 log.u_bbr.use_lt_bw |= rack->r_late; 17381 log.u_bbr.use_lt_bw <<= 1; 17382 log.u_bbr.use_lt_bw |= rack->r_early; 17383 log.u_bbr.use_lt_bw <<= 1; 17384 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 17385 log.u_bbr.use_lt_bw <<= 1; 17386 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 17387 log.u_bbr.use_lt_bw <<= 1; 17388 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 17389 log.u_bbr.use_lt_bw <<= 1; 17390 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 17391 log.u_bbr.use_lt_bw <<= 1; 17392 log.u_bbr.use_lt_bw |= rack->gp_ready; 17393 log.u_bbr.pkt_epoch = line; 17394 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 17395 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 17396 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 17397 log.u_bbr.bw_inuse = bw_est; 17398 log.u_bbr.delRate = bw; 17399 if (rack->r_ctl.gp_bw == 0) 17400 log.u_bbr.cur_del_rate = 0; 17401 else 17402 log.u_bbr.cur_del_rate = rack_get_bw(rack); 17403 log.u_bbr.rttProp = len_time; 17404 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 17405 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 17406 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 17407 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 17408 /* We are in slow start */ 17409 log.u_bbr.flex7 = 1; 17410 } else { 17411 /* we are on congestion avoidance */ 17412 log.u_bbr.flex7 = 0; 17413 } 17414 log.u_bbr.flex8 = method; 17415 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 17416 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 17417 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 17418 log.u_bbr.cwnd_gain <<= 1; 17419 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 17420 log.u_bbr.cwnd_gain <<= 1; 17421 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 17422 log.u_bbr.bbr_substate = quality; 17423 log.u_bbr.bbr_state = rack->dgp_on; 17424 log.u_bbr.bbr_state <<= 1; 17425 log.u_bbr.bbr_state |= rack->r_fill_less_agg; 17426 log.u_bbr.bbr_state <<= 1; 17427 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; 17428 log.u_bbr.bbr_state <<= 2; 17429 log.u_bbr.bbr_state |= rack->r_pacing_discount; 17430 log.u_bbr.flex7 = ((rack->r_ctl.pacing_discount_amm << 1) | log.u_bbr.flex7); 17431 TCP_LOG_EVENTP(rack->rc_tp, NULL, 17432 &rack->rc_inp->inp_socket->so_rcv, 17433 &rack->rc_inp->inp_socket->so_snd, 17434 BBR_LOG_HPTSI_CALC, 0, 17435 0, &log, false, &tv); 17436 } 17437 } 17438 17439 static uint32_t 17440 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 17441 { 17442 uint32_t new_tso, user_max, pace_one; 17443 17444 user_max = rack->rc_user_set_max_segs * mss; 17445 if (rack->rc_force_max_seg) { 17446 return (user_max); 17447 } 17448 if (rack->use_fixed_rate && 17449 ((rack->r_ctl.crte == NULL) || 17450 (bw != rack->r_ctl.crte->rate))) { 17451 /* Use the user mss since we are not exactly matched */ 17452 return (user_max); 17453 } 17454 if (rack_pace_one_seg || 17455 (rack->r_ctl.rc_user_set_min_segs == 1)) 17456 pace_one = 1; 17457 else 17458 pace_one = 0; 17459 17460 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, 17461 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); 17462 if (new_tso > user_max) 17463 new_tso = user_max; 17464 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { 17465 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) 17466 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; 17467 } 17468 if (rack->r_ctl.rc_user_set_min_segs && 17469 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) 17470 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; 17471 return (new_tso); 17472 } 17473 17474 static uint64_t 17475 rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uint32_t *rate_set, uint32_t *gain_b) 17476 { 17477 uint64_t reduced_win; 17478 uint32_t gain; 17479 17480 if (window_input < rc_init_window(rack)) { 17481 /* 17482 * The cwnd is collapsed to 17483 * nearly zero, maybe because of a time-out? 17484 * Lets drop back to the lt-bw. 17485 */ 17486 reduced_win = rack_get_lt_bw(rack); 17487 /* Set the flag so the caller knows its a rate and not a reduced window */ 17488 *rate_set = 1; 17489 gain = 100; 17490 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { 17491 /* 17492 * If we are in recover our cwnd needs to be less for 17493 * our pacing consideration. 17494 */ 17495 if (rack->rack_hibeta == 0) { 17496 reduced_win = window_input / 2; 17497 gain = 50; 17498 } else { 17499 reduced_win = window_input * rack->r_ctl.saved_hibeta; 17500 reduced_win /= 100; 17501 gain = rack->r_ctl.saved_hibeta; 17502 } 17503 } else { 17504 /* 17505 * Apply Timely factor to increase/decrease the 17506 * amount we are pacing at. 17507 */ 17508 gain = rack_get_output_gain(rack, NULL); 17509 if (gain > rack_gain_p5_ub) { 17510 gain = rack_gain_p5_ub; 17511 } 17512 reduced_win = window_input * gain; 17513 reduced_win /= 100; 17514 } 17515 if (gain_b != NULL) 17516 *gain_b = gain; 17517 /* 17518 * What is being returned here is a trimmed down 17519 * window values in all cases where rate_set is left 17520 * at 0. In one case we actually return the rate (lt_bw). 17521 * the "reduced_win" is returned as a slimmed down cwnd that 17522 * is then calculated by the caller into a rate when rate_set 17523 * is 0. 17524 */ 17525 return (reduced_win); 17526 } 17527 17528 static int32_t 17529 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 17530 { 17531 uint64_t lentim, fill_bw; 17532 17533 /* Lets first see if we are full, if so continue with normal rate */ 17534 rack->r_via_fill_cw = 0; 17535 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 17536 return (slot); 17537 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 17538 return (slot); 17539 if (rack->r_ctl.rc_last_us_rtt == 0) 17540 return (slot); 17541 if (rack->rc_pace_fill_if_rttin_range && 17542 (rack->r_ctl.rc_last_us_rtt >= 17543 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 17544 /* The rtt is huge, N * smallest, lets not fill */ 17545 return (slot); 17546 } 17547 /* 17548 * first lets calculate the b/w based on the last us-rtt 17549 * and the the smallest send window. 17550 */ 17551 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17552 if (rack->rc_fillcw_apply_discount) { 17553 uint32_t rate_set = 0; 17554 17555 fill_bw = rack_arrive_at_discounted_rate(rack, fill_bw, &rate_set, NULL); 17556 if (rate_set) { 17557 goto at_lt_bw; 17558 } 17559 } 17560 /* Take the rwnd if its smaller */ 17561 if (fill_bw > rack->rc_tp->snd_wnd) 17562 fill_bw = rack->rc_tp->snd_wnd; 17563 /* Now lets make it into a b/w */ 17564 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 17565 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17566 at_lt_bw: 17567 if (rack->r_fill_less_agg) { 17568 /* 17569 * We want the average of the rate_wanted 17570 * and our fill-cw calculated bw. We also want 17571 * to cap any increase to be no more than 17572 * X times the lt_bw (where X is the rack_bw_multipler). 17573 */ 17574 uint64_t lt_bw, rate; 17575 17576 lt_bw = rack_get_lt_bw(rack); 17577 if (lt_bw > *rate_wanted) 17578 rate = lt_bw; 17579 else 17580 rate = *rate_wanted; 17581 fill_bw += rate; 17582 fill_bw /= 2; 17583 if (rack_bw_multipler && (fill_bw > (rate * rack_bw_multipler))) { 17584 fill_bw = rate * rack_bw_multipler; 17585 } 17586 } 17587 /* We are below the min b/w */ 17588 if (non_paced) 17589 *rate_wanted = fill_bw; 17590 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 17591 return (slot); 17592 rack->r_via_fill_cw = 1; 17593 if (rack->r_rack_hw_rate_caps && 17594 (rack->r_ctl.crte != NULL)) { 17595 uint64_t high_rate; 17596 17597 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 17598 if (fill_bw > high_rate) { 17599 /* We are capping bw at the highest rate table entry */ 17600 if (*rate_wanted > high_rate) { 17601 /* The original rate was also capped */ 17602 rack->r_via_fill_cw = 0; 17603 } 17604 rack_log_hdwr_pacing(rack, 17605 fill_bw, high_rate, __LINE__, 17606 0, 3); 17607 fill_bw = high_rate; 17608 if (capped) 17609 *capped = 1; 17610 } 17611 } else if ((rack->r_ctl.crte == NULL) && 17612 (rack->rack_hdrw_pacing == 0) && 17613 (rack->rack_hdw_pace_ena) && 17614 rack->r_rack_hw_rate_caps && 17615 (rack->rack_attempt_hdwr_pace == 0) && 17616 (rack->rc_inp->inp_route.ro_nh != NULL) && 17617 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17618 /* 17619 * Ok we may have a first attempt that is greater than our top rate 17620 * lets check. 17621 */ 17622 uint64_t high_rate; 17623 17624 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 17625 if (high_rate) { 17626 if (fill_bw > high_rate) { 17627 fill_bw = high_rate; 17628 if (capped) 17629 *capped = 1; 17630 } 17631 } 17632 } 17633 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { 17634 if (rack->rc_hybrid_mode) 17635 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, 17636 fill_bw, 0, 0, HYBRID_LOG_RATE_CAP, 2, NULL, __LINE__); 17637 fill_bw = rack->r_ctl.bw_rate_cap; 17638 } 17639 /* 17640 * Ok fill_bw holds our mythical b/w to fill the cwnd 17641 * in an rtt (unless it was capped), what does that 17642 * time wise equate too? 17643 */ 17644 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 17645 lentim /= fill_bw; 17646 *rate_wanted = fill_bw; 17647 if (non_paced || (lentim < slot)) { 17648 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 17649 0, lentim, 12, __LINE__, NULL, 0); 17650 return ((int32_t)lentim); 17651 } else 17652 return (slot); 17653 } 17654 17655 static int32_t 17656 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 17657 { 17658 uint64_t srtt; 17659 int32_t slot = 0; 17660 int32_t minslot = 0; 17661 int can_start_hw_pacing = 1; 17662 int err; 17663 int pace_one; 17664 17665 if (rack_pace_one_seg || 17666 (rack->r_ctl.rc_user_set_min_segs == 1)) 17667 pace_one = 1; 17668 else 17669 pace_one = 0; 17670 if (rack->rc_always_pace == 0) { 17671 /* 17672 * We use the most optimistic possible cwnd/srtt for 17673 * sending calculations. This will make our 17674 * calculation anticipate getting more through 17675 * quicker then possible. But thats ok we don't want 17676 * the peer to have a gap in data sending. 17677 */ 17678 uint64_t cwnd, tr_perms = 0; 17679 int32_t reduce = 0; 17680 17681 old_method: 17682 /* 17683 * We keep no precise pacing with the old method 17684 * instead we use the pacer to mitigate bursts. 17685 */ 17686 if (rack->r_ctl.rc_rack_min_rtt) 17687 srtt = rack->r_ctl.rc_rack_min_rtt; 17688 else 17689 srtt = max(tp->t_srtt, 1); 17690 if (rack->r_ctl.rc_rack_largest_cwnd) 17691 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 17692 else 17693 cwnd = rack->r_ctl.cwnd_to_use; 17694 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 17695 tr_perms = (cwnd * 1000) / srtt; 17696 if (tr_perms == 0) { 17697 tr_perms = ctf_fixed_maxseg(tp); 17698 } 17699 /* 17700 * Calculate how long this will take to drain, if 17701 * the calculation comes out to zero, thats ok we 17702 * will use send_a_lot to possibly spin around for 17703 * more increasing tot_len_this_send to the point 17704 * that its going to require a pace, or we hit the 17705 * cwnd. Which in that case we are just waiting for 17706 * a ACK. 17707 */ 17708 slot = len / tr_perms; 17709 /* Now do we reduce the time so we don't run dry? */ 17710 if (slot && rack_slot_reduction) { 17711 reduce = (slot / rack_slot_reduction); 17712 if (reduce < slot) { 17713 slot -= reduce; 17714 } else 17715 slot = 0; 17716 } 17717 slot *= HPTS_USEC_IN_MSEC; 17718 if (rack->rc_pace_to_cwnd) { 17719 uint64_t rate_wanted = 0; 17720 17721 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 17722 rack->rc_ack_can_sendout_data = 1; 17723 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 17724 } else 17725 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 17726 /*******************************************************/ 17727 /* RRS: We insert non-paced call to stats here for len */ 17728 /*******************************************************/ 17729 } else { 17730 uint64_t bw_est, res, lentim, rate_wanted; 17731 uint32_t segs, oh; 17732 int capped = 0; 17733 int prev_fill; 17734 17735 if ((rack->r_rr_config == 1) && rsm) { 17736 return (rack->r_ctl.rc_min_to); 17737 } 17738 if (rack->use_fixed_rate) { 17739 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 17740 } else if ((rack->r_ctl.init_rate == 0) && 17741 (rack->r_ctl.gp_bw == 0)) { 17742 /* no way to yet do an estimate */ 17743 bw_est = rate_wanted = 0; 17744 } else if (rack->dgp_on) { 17745 bw_est = rack_get_bw(rack); 17746 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 17747 } else { 17748 uint32_t gain, rate_set = 0; 17749 17750 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); 17751 rate_wanted = rack_arrive_at_discounted_rate(rack, rate_wanted, &rate_set, &gain); 17752 if (rate_set == 0) { 17753 if (rate_wanted > rack->rc_tp->snd_wnd) 17754 rate_wanted = rack->rc_tp->snd_wnd; 17755 /* Now lets make it into a b/w */ 17756 rate_wanted *= (uint64_t)HPTS_USEC_IN_SEC; 17757 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 17758 } 17759 bw_est = rate_wanted; 17760 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, 17761 rack->r_ctl.cwnd_to_use, 17762 rate_wanted, bw_est, 17763 rack->r_ctl.rc_last_us_rtt, 17764 88, __LINE__, NULL, gain); 17765 } 17766 if ((bw_est == 0) || (rate_wanted == 0) || 17767 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 17768 /* 17769 * No way yet to make a b/w estimate or 17770 * our raise is set incorrectly. 17771 */ 17772 goto old_method; 17773 } 17774 rack_rate_cap_bw(rack, &rate_wanted, &capped); 17775 /* We need to account for all the overheads */ 17776 segs = (len + segsiz - 1) / segsiz; 17777 /* 17778 * We need the diff between 1514 bytes (e-mtu with e-hdr) 17779 * and how much data we put in each packet. Yes this 17780 * means we may be off if we are larger than 1500 bytes 17781 * or smaller. But this just makes us more conservative. 17782 */ 17783 17784 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); 17785 if (rack->r_is_v6) { 17786 #ifdef INET6 17787 oh += sizeof(struct ip6_hdr); 17788 #endif 17789 } else { 17790 #ifdef INET 17791 oh += sizeof(struct ip); 17792 #endif 17793 } 17794 /* We add a fixed 14 for the ethernet header */ 17795 oh += 14; 17796 segs *= oh; 17797 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 17798 res = lentim / rate_wanted; 17799 slot = (uint32_t)res; 17800 if (rack_hw_rate_min && 17801 (rate_wanted < rack_hw_rate_min)) { 17802 can_start_hw_pacing = 0; 17803 if (rack->r_ctl.crte) { 17804 /* 17805 * Ok we need to release it, we 17806 * have fallen too low. 17807 */ 17808 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17809 rack->r_ctl.crte = NULL; 17810 rack->rack_attempt_hdwr_pace = 0; 17811 rack->rack_hdrw_pacing = 0; 17812 } 17813 } 17814 if (rack->r_ctl.crte && 17815 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17816 /* 17817 * We want more than the hardware can give us, 17818 * don't start any hw pacing. 17819 */ 17820 can_start_hw_pacing = 0; 17821 if (rack->r_rack_hw_rate_caps == 0) { 17822 /* 17823 * Ok we need to release it, we 17824 * want more than the card can give us and 17825 * no rate cap is in place. Set it up so 17826 * when we want less we can retry. 17827 */ 17828 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17829 rack->r_ctl.crte = NULL; 17830 rack->rack_attempt_hdwr_pace = 0; 17831 rack->rack_hdrw_pacing = 0; 17832 } 17833 } 17834 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { 17835 /* 17836 * We lost our rate somehow, this can happen 17837 * if the interface changed underneath us. 17838 */ 17839 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17840 rack->r_ctl.crte = NULL; 17841 /* Lets re-allow attempting to setup pacing */ 17842 rack->rack_hdrw_pacing = 0; 17843 rack->rack_attempt_hdwr_pace = 0; 17844 rack_log_hdwr_pacing(rack, 17845 rate_wanted, bw_est, __LINE__, 17846 0, 6); 17847 } 17848 prev_fill = rack->r_via_fill_cw; 17849 if ((rack->rc_pace_to_cwnd) && 17850 (capped == 0) && 17851 (rack->dgp_on == 1) && 17852 (rack->use_fixed_rate == 0) && 17853 (rack->in_probe_rtt == 0) && 17854 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 17855 /* 17856 * We want to pace at our rate *or* faster to 17857 * fill the cwnd to the max if its not full. 17858 */ 17859 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 17860 /* Re-check to make sure we are not exceeding our max b/w */ 17861 if ((rack->r_ctl.crte != NULL) && 17862 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { 17863 /* 17864 * We want more than the hardware can give us, 17865 * don't start any hw pacing. 17866 */ 17867 can_start_hw_pacing = 0; 17868 if (rack->r_rack_hw_rate_caps == 0) { 17869 /* 17870 * Ok we need to release it, we 17871 * want more than the card can give us and 17872 * no rate cap is in place. Set it up so 17873 * when we want less we can retry. 17874 */ 17875 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17876 rack->r_ctl.crte = NULL; 17877 rack->rack_attempt_hdwr_pace = 0; 17878 rack->rack_hdrw_pacing = 0; 17879 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 17880 } 17881 } 17882 } 17883 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 17884 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 17885 if ((rack->rack_hdw_pace_ena) && 17886 (can_start_hw_pacing > 0) && 17887 (rack->rack_hdrw_pacing == 0) && 17888 (rack->rack_attempt_hdwr_pace == 0)) { 17889 /* 17890 * Lets attempt to turn on hardware pacing 17891 * if we can. 17892 */ 17893 rack->rack_attempt_hdwr_pace = 1; 17894 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 17895 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17896 rate_wanted, 17897 RS_PACING_GEQ, 17898 &err, &rack->r_ctl.crte_prev_rate); 17899 if (rack->r_ctl.crte) { 17900 rack->rack_hdrw_pacing = 1; 17901 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, 17902 pace_one, rack->r_ctl.crte, 17903 NULL, rack->r_ctl.pace_len_divisor); 17904 rack_log_hdwr_pacing(rack, 17905 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17906 err, 0); 17907 rack->r_ctl.last_hw_bw_req = rate_wanted; 17908 } else { 17909 counter_u64_add(rack_hw_pace_init_fail, 1); 17910 } 17911 } else if (rack->rack_hdrw_pacing && 17912 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 17913 /* Do we need to adjust our rate? */ 17914 const struct tcp_hwrate_limit_table *nrte; 17915 17916 if (rack->r_up_only && 17917 (rate_wanted < rack->r_ctl.crte->rate)) { 17918 /** 17919 * We have four possible states here 17920 * having to do with the previous time 17921 * and this time. 17922 * previous | this-time 17923 * A) 0 | 0 -- fill_cw not in the picture 17924 * B) 1 | 0 -- we were doing a fill-cw but now are not 17925 * C) 1 | 1 -- all rates from fill_cw 17926 * D) 0 | 1 -- we were doing non-fill and now we are filling 17927 * 17928 * For case A, C and D we don't allow a drop. But for 17929 * case B where we now our on our steady rate we do 17930 * allow a drop. 17931 * 17932 */ 17933 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 17934 goto done_w_hdwr; 17935 } 17936 if ((rate_wanted > rack->r_ctl.crte->rate) || 17937 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 17938 if (rack_hw_rate_to_low && 17939 (bw_est < rack_hw_rate_to_low)) { 17940 /* 17941 * The pacing rate is too low for hardware, but 17942 * do allow hardware pacing to be restarted. 17943 */ 17944 rack_log_hdwr_pacing(rack, 17945 bw_est, rack->r_ctl.crte->rate, __LINE__, 17946 0, 5); 17947 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 17948 rack->r_ctl.crte = NULL; 17949 rack->rack_attempt_hdwr_pace = 0; 17950 rack->rack_hdrw_pacing = 0; 17951 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17952 goto done_w_hdwr; 17953 } 17954 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 17955 rack->rc_tp, 17956 rack->rc_inp->inp_route.ro_nh->nh_ifp, 17957 rate_wanted, 17958 RS_PACING_GEQ, 17959 &err, &rack->r_ctl.crte_prev_rate); 17960 if (nrte == NULL) { 17961 /* 17962 * Lost the rate, lets drop hardware pacing 17963 * period. 17964 */ 17965 rack->rack_hdrw_pacing = 0; 17966 rack->r_ctl.crte = NULL; 17967 rack_log_hdwr_pacing(rack, 17968 rate_wanted, 0, __LINE__, 17969 err, 1); 17970 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17971 counter_u64_add(rack_hw_pace_lost, 1); 17972 } else if (nrte != rack->r_ctl.crte) { 17973 rack->r_ctl.crte = nrte; 17974 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, 17975 segsiz, pace_one, rack->r_ctl.crte, 17976 NULL, rack->r_ctl.pace_len_divisor); 17977 rack_log_hdwr_pacing(rack, 17978 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17979 err, 2); 17980 rack->r_ctl.last_hw_bw_req = rate_wanted; 17981 } 17982 } else { 17983 /* We just need to adjust the segment size */ 17984 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 17985 rack_log_hdwr_pacing(rack, 17986 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 17987 0, 4); 17988 rack->r_ctl.last_hw_bw_req = rate_wanted; 17989 } 17990 } 17991 } 17992 if (minslot && (minslot > slot)) { 17993 rack_log_pacing_delay_calc(rack, minslot, slot, rack->r_ctl.crte->rate, bw_est, lentim, 17994 98, __LINE__, NULL, 0); 17995 slot = minslot; 17996 } 17997 done_w_hdwr: 17998 if (rack_limit_time_with_srtt && 17999 (rack->use_fixed_rate == 0) && 18000 (rack->rack_hdrw_pacing == 0)) { 18001 /* 18002 * Sanity check, we do not allow the pacing delay 18003 * to be longer than the SRTT of the path. If it is 18004 * a slow path, then adding a packet should increase 18005 * the RTT and compensate for this i.e. the srtt will 18006 * be greater so the allowed pacing time will be greater. 18007 * 18008 * Note this restriction is not for where a peak rate 18009 * is set, we are doing fixed pacing or hardware pacing. 18010 */ 18011 if (rack->rc_tp->t_srtt) 18012 srtt = rack->rc_tp->t_srtt; 18013 else 18014 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 18015 if (srtt < (uint64_t)slot) { 18016 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 18017 slot = srtt; 18018 } 18019 } 18020 /*******************************************************************/ 18021 /* RRS: We insert paced call to stats here for len and rate_wanted */ 18022 /*******************************************************************/ 18023 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 18024 } 18025 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 18026 /* 18027 * If this rate is seeing enobufs when it 18028 * goes to send then either the nic is out 18029 * of gas or we are mis-estimating the time 18030 * somehow and not letting the queue empty 18031 * completely. Lets add to the pacing time. 18032 */ 18033 int hw_boost_delay; 18034 18035 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 18036 if (hw_boost_delay > rack_enobuf_hw_max) 18037 hw_boost_delay = rack_enobuf_hw_max; 18038 else if (hw_boost_delay < rack_enobuf_hw_min) 18039 hw_boost_delay = rack_enobuf_hw_min; 18040 slot += hw_boost_delay; 18041 } 18042 return (slot); 18043 } 18044 18045 static void 18046 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 18047 tcp_seq startseq, uint32_t sb_offset) 18048 { 18049 struct rack_sendmap *my_rsm = NULL; 18050 18051 if (tp->t_state < TCPS_ESTABLISHED) { 18052 /* 18053 * We don't start any measurements if we are 18054 * not at least established. 18055 */ 18056 return; 18057 } 18058 if (tp->t_state >= TCPS_FIN_WAIT_1) { 18059 /* 18060 * We will get no more data into the SB 18061 * this means we need to have the data available 18062 * before we start a measurement. 18063 */ 18064 18065 if (sbavail(&tptosocket(tp)->so_snd) < 18066 max(rc_init_window(rack), 18067 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 18068 /* Nope not enough data */ 18069 return; 18070 } 18071 } 18072 tp->t_flags |= TF_GPUTINPROG; 18073 rack->r_ctl.rc_gp_cumack_ts = 0; 18074 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 18075 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 18076 tp->gput_seq = startseq; 18077 rack->app_limited_needs_set = 0; 18078 if (rack->in_probe_rtt) 18079 rack->measure_saw_probe_rtt = 1; 18080 else if ((rack->measure_saw_probe_rtt) && 18081 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 18082 rack->measure_saw_probe_rtt = 0; 18083 if (rack->rc_gp_filled) 18084 tp->gput_ts = rack->r_ctl.last_cumack_advance; 18085 else { 18086 /* Special case initial measurement */ 18087 struct timeval tv; 18088 18089 tp->gput_ts = tcp_get_usecs(&tv); 18090 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18091 } 18092 /* 18093 * We take a guess out into the future, 18094 * if we have no measurement and no 18095 * initial rate, we measure the first 18096 * initial-windows worth of data to 18097 * speed up getting some GP measurement and 18098 * thus start pacing. 18099 */ 18100 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 18101 rack->app_limited_needs_set = 1; 18102 tp->gput_ack = startseq + max(rc_init_window(rack), 18103 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 18104 rack_log_pacing_delay_calc(rack, 18105 tp->gput_seq, 18106 tp->gput_ack, 18107 0, 18108 tp->gput_ts, 18109 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18110 9, 18111 __LINE__, NULL, 0); 18112 rack_tend_gp_marks(tp, rack); 18113 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18114 return; 18115 } 18116 if (sb_offset) { 18117 /* 18118 * We are out somewhere in the sb 18119 * can we use the already outstanding data? 18120 */ 18121 18122 if (rack->r_ctl.rc_app_limited_cnt == 0) { 18123 /* 18124 * Yes first one is good and in this case 18125 * the tp->gput_ts is correctly set based on 18126 * the last ack that arrived (no need to 18127 * set things up when an ack comes in). 18128 */ 18129 my_rsm = tqhash_min(rack->r_ctl.tqh); 18130 if ((my_rsm == NULL) || 18131 (my_rsm->r_rtr_cnt != 1)) { 18132 /* retransmission? */ 18133 goto use_latest; 18134 } 18135 } else { 18136 if (rack->r_ctl.rc_first_appl == NULL) { 18137 /* 18138 * If rc_first_appl is NULL 18139 * then the cnt should be 0. 18140 * This is probably an error, maybe 18141 * a KASSERT would be approprate. 18142 */ 18143 goto use_latest; 18144 } 18145 /* 18146 * If we have a marker pointer to the last one that is 18147 * app limited we can use that, but we need to set 18148 * things up so that when it gets ack'ed we record 18149 * the ack time (if its not already acked). 18150 */ 18151 rack->app_limited_needs_set = 1; 18152 /* 18153 * We want to get to the rsm that is either 18154 * next with space i.e. over 1 MSS or the one 18155 * after that (after the app-limited). 18156 */ 18157 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); 18158 if (my_rsm) { 18159 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 18160 /* Have to use the next one */ 18161 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18162 else { 18163 /* Use after the first MSS of it is acked */ 18164 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 18165 goto start_set; 18166 } 18167 } 18168 if ((my_rsm == NULL) || 18169 (my_rsm->r_rtr_cnt != 1)) { 18170 /* 18171 * Either its a retransmit or 18172 * the last is the app-limited one. 18173 */ 18174 goto use_latest; 18175 } 18176 } 18177 tp->gput_seq = my_rsm->r_start; 18178 start_set: 18179 if (my_rsm->r_flags & RACK_ACKED) { 18180 /* 18181 * This one has been acked use the arrival ack time 18182 */ 18183 struct rack_sendmap *nrsm; 18184 18185 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18186 rack->app_limited_needs_set = 0; 18187 /* 18188 * Ok in this path we need to use the r_end now 18189 * since this guy is the starting ack. 18190 */ 18191 tp->gput_seq = my_rsm->r_end; 18192 /* 18193 * We also need to adjust up the sendtime 18194 * to the send of the next data after my_rsm. 18195 */ 18196 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); 18197 if (nrsm != NULL) 18198 my_rsm = nrsm; 18199 else { 18200 /* 18201 * The next as not been sent, thats the 18202 * case for using the latest. 18203 */ 18204 goto use_latest; 18205 } 18206 } 18207 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18208 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 18209 rack->r_ctl.rc_gp_cumack_ts = 0; 18210 rack_log_pacing_delay_calc(rack, 18211 tp->gput_seq, 18212 tp->gput_ack, 18213 (uint64_t)my_rsm, 18214 tp->gput_ts, 18215 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18216 9, 18217 __LINE__, my_rsm, 0); 18218 /* Now lets make sure all are marked as they should be */ 18219 rack_tend_gp_marks(tp, rack); 18220 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18221 return; 18222 } 18223 18224 use_latest: 18225 /* 18226 * We don't know how long we may have been 18227 * idle or if this is the first-send. Lets 18228 * setup the flag so we will trim off 18229 * the first ack'd data so we get a true 18230 * measurement. 18231 */ 18232 rack->app_limited_needs_set = 1; 18233 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 18234 rack->r_ctl.rc_gp_cumack_ts = 0; 18235 /* Find this guy so we can pull the send time */ 18236 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); 18237 if (my_rsm) { 18238 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; 18239 if (my_rsm->r_flags & RACK_ACKED) { 18240 /* 18241 * Unlikely since its probably what was 18242 * just transmitted (but I am paranoid). 18243 */ 18244 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 18245 rack->app_limited_needs_set = 0; 18246 } 18247 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 18248 /* This also is unlikely */ 18249 tp->gput_seq = my_rsm->r_start; 18250 } 18251 } else { 18252 /* 18253 * TSNH unless we have some send-map limit, 18254 * and even at that it should not be hitting 18255 * that limit (we should have stopped sending). 18256 */ 18257 struct timeval tv; 18258 18259 microuptime(&tv); 18260 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 18261 } 18262 rack_tend_gp_marks(tp, rack); 18263 rack_log_pacing_delay_calc(rack, 18264 tp->gput_seq, 18265 tp->gput_ack, 18266 (uint64_t)my_rsm, 18267 tp->gput_ts, 18268 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), 18269 9, __LINE__, NULL, 0); 18270 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); 18271 } 18272 18273 static inline uint32_t 18274 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 18275 uint32_t avail, int32_t sb_offset) 18276 { 18277 uint32_t len; 18278 uint32_t sendwin; 18279 18280 if (tp->snd_wnd > cwnd_to_use) 18281 sendwin = cwnd_to_use; 18282 else 18283 sendwin = tp->snd_wnd; 18284 if (ctf_outstanding(tp) >= tp->snd_wnd) { 18285 /* We never want to go over our peers rcv-window */ 18286 len = 0; 18287 } else { 18288 uint32_t flight; 18289 18290 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 18291 if (flight >= sendwin) { 18292 /* 18293 * We have in flight what we are allowed by cwnd (if 18294 * it was rwnd blocking it would have hit above out 18295 * >= tp->snd_wnd). 18296 */ 18297 return (0); 18298 } 18299 len = sendwin - flight; 18300 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 18301 /* We would send too much (beyond the rwnd) */ 18302 len = tp->snd_wnd - ctf_outstanding(tp); 18303 } 18304 if ((len + sb_offset) > avail) { 18305 /* 18306 * We don't have that much in the SB, how much is 18307 * there? 18308 */ 18309 len = avail - sb_offset; 18310 } 18311 } 18312 return (len); 18313 } 18314 18315 static void 18316 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 18317 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 18318 int rsm_is_null, int optlen, int line, uint16_t mode) 18319 { 18320 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 18321 union tcp_log_stackspecific log; 18322 struct timeval tv; 18323 18324 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18325 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18326 log.u_bbr.flex1 = error; 18327 log.u_bbr.flex2 = flags; 18328 log.u_bbr.flex3 = rsm_is_null; 18329 log.u_bbr.flex4 = ipoptlen; 18330 log.u_bbr.flex5 = tp->rcv_numsacks; 18331 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18332 log.u_bbr.flex7 = optlen; 18333 log.u_bbr.flex8 = rack->r_fsb_inited; 18334 log.u_bbr.applimited = rack->r_fast_output; 18335 log.u_bbr.bw_inuse = rack_get_bw(rack); 18336 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 18337 log.u_bbr.cwnd_gain = mode; 18338 log.u_bbr.pkts_out = orig_len; 18339 log.u_bbr.lt_epoch = len; 18340 log.u_bbr.delivered = line; 18341 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 18342 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18343 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 18344 len, &log, false, NULL, __func__, __LINE__, &tv); 18345 } 18346 } 18347 18348 18349 static struct mbuf * 18350 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 18351 struct rack_fast_send_blk *fsb, 18352 int32_t seglimit, int32_t segsize, int hw_tls) 18353 { 18354 #ifdef KERN_TLS 18355 struct ktls_session *tls, *ntls; 18356 #ifdef INVARIANTS 18357 struct mbuf *start; 18358 #endif 18359 #endif 18360 struct mbuf *m, *n, **np, *smb; 18361 struct mbuf *top; 18362 int32_t off, soff; 18363 int32_t len = *plen; 18364 int32_t fragsize; 18365 int32_t len_cp = 0; 18366 uint32_t mlen, frags; 18367 18368 soff = off = the_off; 18369 smb = m = the_m; 18370 np = ⊤ 18371 top = NULL; 18372 #ifdef KERN_TLS 18373 if (hw_tls && (m->m_flags & M_EXTPG)) 18374 tls = m->m_epg_tls; 18375 else 18376 tls = NULL; 18377 #ifdef INVARIANTS 18378 start = m; 18379 #endif 18380 #endif 18381 while (len > 0) { 18382 if (m == NULL) { 18383 *plen = len_cp; 18384 break; 18385 } 18386 #ifdef KERN_TLS 18387 if (hw_tls) { 18388 if (m->m_flags & M_EXTPG) 18389 ntls = m->m_epg_tls; 18390 else 18391 ntls = NULL; 18392 18393 /* 18394 * Avoid mixing TLS records with handshake 18395 * data or TLS records from different 18396 * sessions. 18397 */ 18398 if (tls != ntls) { 18399 MPASS(m != start); 18400 *plen = len_cp; 18401 break; 18402 } 18403 } 18404 #endif 18405 mlen = min(len, m->m_len - off); 18406 if (seglimit) { 18407 /* 18408 * For M_EXTPG mbufs, add 3 segments 18409 * + 1 in case we are crossing page boundaries 18410 * + 2 in case the TLS hdr/trailer are used 18411 * It is cheaper to just add the segments 18412 * than it is to take the cache miss to look 18413 * at the mbuf ext_pgs state in detail. 18414 */ 18415 if (m->m_flags & M_EXTPG) { 18416 fragsize = min(segsize, PAGE_SIZE); 18417 frags = 3; 18418 } else { 18419 fragsize = segsize; 18420 frags = 0; 18421 } 18422 18423 /* Break if we really can't fit anymore. */ 18424 if ((frags + 1) >= seglimit) { 18425 *plen = len_cp; 18426 break; 18427 } 18428 18429 /* 18430 * Reduce size if you can't copy the whole 18431 * mbuf. If we can't copy the whole mbuf, also 18432 * adjust len so the loop will end after this 18433 * mbuf. 18434 */ 18435 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 18436 mlen = (seglimit - frags - 1) * fragsize; 18437 len = mlen; 18438 *plen = len_cp + len; 18439 } 18440 frags += howmany(mlen, fragsize); 18441 if (frags == 0) 18442 frags++; 18443 seglimit -= frags; 18444 KASSERT(seglimit > 0, 18445 ("%s: seglimit went too low", __func__)); 18446 } 18447 n = m_get(M_NOWAIT, m->m_type); 18448 *np = n; 18449 if (n == NULL) 18450 goto nospace; 18451 n->m_len = mlen; 18452 soff += mlen; 18453 len_cp += n->m_len; 18454 if (m->m_flags & (M_EXT|M_EXTPG)) { 18455 n->m_data = m->m_data + off; 18456 mb_dupcl(n, m); 18457 } else { 18458 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 18459 (u_int)n->m_len); 18460 } 18461 len -= n->m_len; 18462 off = 0; 18463 m = m->m_next; 18464 np = &n->m_next; 18465 if (len || (soff == smb->m_len)) { 18466 /* 18467 * We have more so we move forward or 18468 * we have consumed the entire mbuf and 18469 * len has fell to 0. 18470 */ 18471 soff = 0; 18472 smb = m; 18473 } 18474 18475 } 18476 if (fsb != NULL) { 18477 fsb->m = smb; 18478 fsb->off = soff; 18479 if (smb) { 18480 /* 18481 * Save off the size of the mbuf. We do 18482 * this so that we can recognize when it 18483 * has been trimmed by sbcut() as acks 18484 * come in. 18485 */ 18486 fsb->o_m_len = smb->m_len; 18487 fsb->o_t_len = M_TRAILINGROOM(smb); 18488 } else { 18489 /* 18490 * This is the case where the next mbuf went to NULL. This 18491 * means with this copy we have sent everything in the sb. 18492 * In theory we could clear the fast_output flag, but lets 18493 * not since its possible that we could get more added 18494 * and acks that call the extend function which would let 18495 * us send more. 18496 */ 18497 fsb->o_m_len = 0; 18498 fsb->o_t_len = 0; 18499 } 18500 } 18501 return (top); 18502 nospace: 18503 if (top) 18504 m_freem(top); 18505 return (NULL); 18506 18507 } 18508 18509 /* 18510 * This is a copy of m_copym(), taking the TSO segment size/limit 18511 * constraints into account, and advancing the sndptr as it goes. 18512 */ 18513 static struct mbuf * 18514 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 18515 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 18516 { 18517 struct mbuf *m, *n; 18518 int32_t soff; 18519 18520 m = rack->r_ctl.fsb.m; 18521 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { 18522 /* 18523 * The trailing space changed, mbufs can grow 18524 * at the tail but they can't shrink from 18525 * it, KASSERT that. Adjust the orig_m_len to 18526 * compensate for this change. 18527 */ 18528 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), 18529 ("mbuf:%p rack:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n", 18530 m, 18531 rack, 18532 (intmax_t)M_TRAILINGROOM(m), 18533 rack->r_ctl.fsb.o_t_len, 18534 rack->r_ctl.fsb.o_m_len, 18535 m->m_len)); 18536 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); 18537 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); 18538 } 18539 if (m->m_len < rack->r_ctl.fsb.o_m_len) { 18540 /* 18541 * Mbuf shrank, trimmed off the top by an ack, our 18542 * offset changes. 18543 */ 18544 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), 18545 ("mbuf:%p len:%u rack:%p oml:%u soff:%u\n", 18546 m, m->m_len, 18547 rack, rack->r_ctl.fsb.o_m_len, 18548 rack->r_ctl.fsb.off)); 18549 18550 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) 18551 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); 18552 else 18553 rack->r_ctl.fsb.off = 0; 18554 rack->r_ctl.fsb.o_m_len = m->m_len; 18555 #ifdef INVARIANTS 18556 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { 18557 panic("rack:%p m:%p m_len grew outside of t_space compensation", 18558 rack, m); 18559 #endif 18560 } 18561 soff = rack->r_ctl.fsb.off; 18562 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 18563 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 18564 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 18565 __FUNCTION__, 18566 rack, *plen, m, m->m_len)); 18567 /* Save off the right location before we copy and advance */ 18568 *s_soff = soff; 18569 *s_mb = rack->r_ctl.fsb.m; 18570 n = rack_fo_base_copym(m, soff, plen, 18571 &rack->r_ctl.fsb, 18572 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 18573 return (n); 18574 } 18575 18576 /* Log the buffer level */ 18577 static void 18578 rack_log_queue_level(struct tcpcb *tp, struct tcp_rack *rack, 18579 int len, struct timeval *tv, 18580 uint32_t cts) 18581 { 18582 uint32_t p_rate = 0, p_queue = 0, err = 0; 18583 union tcp_log_stackspecific log; 18584 18585 #ifdef RATELIMIT 18586 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18587 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18588 #endif 18589 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18590 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18591 log.u_bbr.flex1 = p_rate; 18592 log.u_bbr.flex2 = p_queue; 18593 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18594 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18595 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18596 log.u_bbr.flex7 = 99; 18597 log.u_bbr.flex8 = 0; 18598 log.u_bbr.pkts_out = err; 18599 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18600 log.u_bbr.timeStamp = cts; 18601 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18602 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18603 len, &log, false, NULL, __func__, __LINE__, tv); 18604 18605 } 18606 18607 static uint32_t 18608 rack_check_queue_level(struct tcp_rack *rack, struct tcpcb *tp, 18609 struct timeval *tv, uint32_t cts, int len, uint32_t segsiz) 18610 { 18611 uint64_t lentime = 0; 18612 #ifdef RATELIMIT 18613 uint32_t p_rate = 0, p_queue = 0, err; 18614 union tcp_log_stackspecific log; 18615 uint64_t bw; 18616 18617 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); 18618 /* Failed or queue is zero */ 18619 if (err || (p_queue == 0)) { 18620 lentime = 0; 18621 goto out; 18622 } 18623 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); 18624 if (err) { 18625 lentime = 0; 18626 goto out; 18627 } 18628 /* 18629 * If we reach here we have some bytes in 18630 * the queue. The number returned is a value 18631 * between 0 and 0xffff where ffff is full 18632 * and 0 is empty. So how best to make this into 18633 * something usable? 18634 * 18635 * The "safer" way is lets take the b/w gotten 18636 * from the query (which should be our b/w rate) 18637 * and pretend that a full send (our rc_pace_max_segs) 18638 * is outstanding. We factor it so its as if a full 18639 * number of our MSS segment is terms of full 18640 * ethernet segments are outstanding. 18641 */ 18642 bw = p_rate / 8; 18643 if (bw) { 18644 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); 18645 lentime *= ETHERNET_SEGMENT_SIZE; 18646 lentime *= (uint64_t)HPTS_USEC_IN_SEC; 18647 lentime /= bw; 18648 } else { 18649 /* TSNH -- KASSERT? */ 18650 lentime = 0; 18651 } 18652 out: 18653 if (tcp_bblogging_on(tp)) { 18654 memset(&log, 0, sizeof(log)); 18655 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 18656 log.u_bbr.flex1 = p_rate; 18657 log.u_bbr.flex2 = p_queue; 18658 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; 18659 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; 18660 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; 18661 log.u_bbr.flex7 = 99; 18662 log.u_bbr.flex8 = 0; 18663 log.u_bbr.pkts_out = err; 18664 log.u_bbr.delRate = rack->r_ctl.crte->rate; 18665 log.u_bbr.cur_del_rate = lentime; 18666 log.u_bbr.timeStamp = cts; 18667 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18668 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_HDWR_PACE, 0, 18669 len, &log, false, NULL, __func__, __LINE__,tv); 18670 } 18671 #endif 18672 return ((uint32_t)lentime); 18673 } 18674 18675 static int 18676 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 18677 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 18678 { 18679 /* 18680 * Enter the fast retransmit path. We are given that a sched_pin is 18681 * in place (if accounting is compliled in) and the cycle count taken 18682 * at the entry is in the ts_val. The concept her is that the rsm 18683 * now holds the mbuf offsets and such so we can directly transmit 18684 * without a lot of overhead, the len field is already set for 18685 * us to prohibit us from sending too much (usually its 1MSS). 18686 */ 18687 struct ip *ip = NULL; 18688 struct udphdr *udp = NULL; 18689 struct tcphdr *th = NULL; 18690 struct mbuf *m = NULL; 18691 struct inpcb *inp; 18692 uint8_t *cpto; 18693 struct tcp_log_buffer *lgb; 18694 #ifdef TCP_ACCOUNTING 18695 uint64_t crtsc; 18696 int cnt_thru = 1; 18697 #endif 18698 struct tcpopt to; 18699 u_char opt[TCP_MAXOLEN]; 18700 uint32_t hdrlen, optlen; 18701 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 18702 uint16_t flags; 18703 uint32_t if_hw_tsomaxsegcount = 0, startseq; 18704 uint32_t if_hw_tsomaxsegsize; 18705 int32_t ip_sendflag = IP_NO_SND_TAG_RL; 18706 18707 #ifdef INET6 18708 struct ip6_hdr *ip6 = NULL; 18709 18710 if (rack->r_is_v6) { 18711 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18712 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18713 } else 18714 #endif /* INET6 */ 18715 { 18716 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18717 hdrlen = sizeof(struct tcpiphdr); 18718 } 18719 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 18720 goto failed; 18721 } 18722 if (doing_tlp) { 18723 /* Its a TLP add the flag, it may already be there but be sure */ 18724 rsm->r_flags |= RACK_TLP; 18725 } else { 18726 /* If it was a TLP it is not not on this retransmit */ 18727 rsm->r_flags &= ~RACK_TLP; 18728 } 18729 startseq = rsm->r_start; 18730 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 18731 inp = rack->rc_inp; 18732 to.to_flags = 0; 18733 flags = tcp_outflags[tp->t_state]; 18734 if (flags & (TH_SYN|TH_RST)) { 18735 goto failed; 18736 } 18737 if (rsm->r_flags & RACK_HAS_FIN) { 18738 /* We can't send a FIN here */ 18739 goto failed; 18740 } 18741 if (flags & TH_FIN) { 18742 /* We never send a FIN */ 18743 flags &= ~TH_FIN; 18744 } 18745 if (tp->t_flags & TF_RCVD_TSTMP) { 18746 to.to_tsval = ms_cts + tp->ts_offset; 18747 to.to_tsecr = tp->ts_recent; 18748 to.to_flags = TOF_TS; 18749 } 18750 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18751 /* TCP-MD5 (RFC2385). */ 18752 if (tp->t_flags & TF_SIGNATURE) 18753 to.to_flags |= TOF_SIGNATURE; 18754 #endif 18755 optlen = tcp_addoptions(&to, opt); 18756 hdrlen += optlen; 18757 udp = rack->r_ctl.fsb.udp; 18758 if (udp) 18759 hdrlen += sizeof(struct udphdr); 18760 if (rack->r_ctl.rc_pace_max_segs) 18761 max_val = rack->r_ctl.rc_pace_max_segs; 18762 else if (rack->rc_user_set_max_segs) 18763 max_val = rack->rc_user_set_max_segs * segsiz; 18764 else 18765 max_val = len; 18766 if ((tp->t_flags & TF_TSO) && 18767 V_tcp_do_tso && 18768 (len > segsiz) && 18769 (tp->t_port == 0)) 18770 tso = 1; 18771 #ifdef INET6 18772 if (MHLEN < hdrlen + max_linkhdr) 18773 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18774 else 18775 #endif 18776 m = m_gethdr(M_NOWAIT, MT_DATA); 18777 if (m == NULL) 18778 goto failed; 18779 m->m_data += max_linkhdr; 18780 m->m_len = hdrlen; 18781 th = rack->r_ctl.fsb.th; 18782 /* Establish the len to send */ 18783 if (len > max_val) 18784 len = max_val; 18785 if ((tso) && (len + optlen > segsiz)) { 18786 uint32_t if_hw_tsomax; 18787 int32_t max_len; 18788 18789 /* extract TSO information */ 18790 if_hw_tsomax = tp->t_tsomax; 18791 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18792 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18793 /* 18794 * Check if we should limit by maximum payload 18795 * length: 18796 */ 18797 if (if_hw_tsomax != 0) { 18798 /* compute maximum TSO length */ 18799 max_len = (if_hw_tsomax - hdrlen - 18800 max_linkhdr); 18801 if (max_len <= 0) { 18802 goto failed; 18803 } else if (len > max_len) { 18804 len = max_len; 18805 } 18806 } 18807 if (len <= segsiz) { 18808 /* 18809 * In case there are too many small fragments don't 18810 * use TSO: 18811 */ 18812 tso = 0; 18813 } 18814 } else { 18815 tso = 0; 18816 } 18817 if ((tso == 0) && (len > segsiz)) 18818 len = segsiz; 18819 (void)tcp_get_usecs(tv); 18820 if ((len == 0) || 18821 (len <= MHLEN - hdrlen - max_linkhdr)) { 18822 goto failed; 18823 } 18824 th->th_seq = htonl(rsm->r_start); 18825 th->th_ack = htonl(tp->rcv_nxt); 18826 /* 18827 * The PUSH bit should only be applied 18828 * if the full retransmission is made. If 18829 * we are sending less than this is the 18830 * left hand edge and should not have 18831 * the PUSH bit. 18832 */ 18833 if ((rsm->r_flags & RACK_HAD_PUSH) && 18834 (len == (rsm->r_end - rsm->r_start))) 18835 flags |= TH_PUSH; 18836 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 18837 if (th->th_win == 0) { 18838 tp->t_sndzerowin++; 18839 tp->t_flags |= TF_RXWIN0SENT; 18840 } else 18841 tp->t_flags &= ~TF_RXWIN0SENT; 18842 if (rsm->r_flags & RACK_TLP) { 18843 /* 18844 * TLP should not count in retran count, but 18845 * in its own bin 18846 */ 18847 counter_u64_add(rack_tlp_retran, 1); 18848 counter_u64_add(rack_tlp_retran_bytes, len); 18849 } else { 18850 tp->t_sndrexmitpack++; 18851 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18852 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18853 } 18854 #ifdef STATS 18855 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18856 len); 18857 #endif 18858 if (rsm->m == NULL) 18859 goto failed; 18860 if (rsm->m && 18861 ((rsm->orig_m_len != rsm->m->m_len) || 18862 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { 18863 /* Fix up the orig_m_len and possibly the mbuf offset */ 18864 rack_adjust_orig_mlen(rsm); 18865 } 18866 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 18867 if (len <= segsiz) { 18868 /* 18869 * Must have ran out of mbufs for the copy 18870 * shorten it to no longer need tso. Lets 18871 * not put on sendalot since we are low on 18872 * mbufs. 18873 */ 18874 tso = 0; 18875 } 18876 if ((m->m_next == NULL) || (len <= 0)){ 18877 goto failed; 18878 } 18879 if (udp) { 18880 if (rack->r_is_v6) 18881 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18882 else 18883 ulen = hdrlen + len - sizeof(struct ip); 18884 udp->uh_ulen = htons(ulen); 18885 } 18886 m->m_pkthdr.rcvif = (struct ifnet *)0; 18887 if (TCPS_HAVERCVDSYN(tp->t_state) && 18888 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18889 int ect = tcp_ecn_output_established(tp, &flags, len, true); 18890 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18891 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18892 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18893 #ifdef INET6 18894 if (rack->r_is_v6) { 18895 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18896 ip6->ip6_flow |= htonl(ect << 20); 18897 } 18898 else 18899 #endif 18900 { 18901 ip->ip_tos &= ~IPTOS_ECN_MASK; 18902 ip->ip_tos |= ect; 18903 } 18904 } 18905 if (rack->r_ctl.crte != NULL) { 18906 /* See if we can send via the hw queue */ 18907 slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz); 18908 /* If there is nothing in queue (no pacing time) we can send via the hw queue */ 18909 if (slot == 0) 18910 ip_sendflag = 0; 18911 } 18912 tcp_set_flags(th, flags); 18913 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18914 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18915 if (to.to_flags & TOF_SIGNATURE) { 18916 /* 18917 * Calculate MD5 signature and put it into the place 18918 * determined before. 18919 * NOTE: since TCP options buffer doesn't point into 18920 * mbuf's data, calculate offset and use it. 18921 */ 18922 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18923 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18924 /* 18925 * Do not send segment if the calculation of MD5 18926 * digest has failed. 18927 */ 18928 goto failed; 18929 } 18930 } 18931 #endif 18932 #ifdef INET6 18933 if (rack->r_is_v6) { 18934 if (tp->t_port) { 18935 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18936 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18937 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18938 th->th_sum = htons(0); 18939 UDPSTAT_INC(udps_opackets); 18940 } else { 18941 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18942 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18943 th->th_sum = in6_cksum_pseudo(ip6, 18944 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18945 0); 18946 } 18947 } 18948 #endif 18949 #if defined(INET6) && defined(INET) 18950 else 18951 #endif 18952 #ifdef INET 18953 { 18954 if (tp->t_port) { 18955 m->m_pkthdr.csum_flags = CSUM_UDP; 18956 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18957 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18958 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18959 th->th_sum = htons(0); 18960 UDPSTAT_INC(udps_opackets); 18961 } else { 18962 m->m_pkthdr.csum_flags = CSUM_TCP; 18963 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18964 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18965 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18966 IPPROTO_TCP + len + optlen)); 18967 } 18968 /* IP version must be set here for ipv4/ipv6 checking later */ 18969 KASSERT(ip->ip_v == IPVERSION, 18970 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18971 } 18972 #endif 18973 if (tso) { 18974 /* 18975 * Here we use segsiz since we have no added options besides 18976 * any standard timestamp options (no DSACKs or SACKS are sent 18977 * via either fast-path). 18978 */ 18979 KASSERT(len > segsiz, 18980 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 18981 m->m_pkthdr.csum_flags |= CSUM_TSO; 18982 m->m_pkthdr.tso_segsz = segsiz; 18983 } 18984 #ifdef INET6 18985 if (rack->r_is_v6) { 18986 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 18987 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18988 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18989 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18990 else 18991 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18992 } 18993 #endif 18994 #if defined(INET) && defined(INET6) 18995 else 18996 #endif 18997 #ifdef INET 18998 { 18999 ip->ip_len = htons(m->m_pkthdr.len); 19000 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19001 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19002 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19003 if (tp->t_port == 0 || len < V_tcp_minmss) { 19004 ip->ip_off |= htons(IP_DF); 19005 } 19006 } else { 19007 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19008 } 19009 } 19010 #endif 19011 if (doing_tlp == 0) { 19012 /* Set we retransmitted */ 19013 rack->rc_gp_saw_rec = 1; 19014 } else { 19015 /* Its a TLP set ca or ss */ 19016 if (tp->snd_cwnd > tp->snd_ssthresh) { 19017 /* Set we sent in CA */ 19018 rack->rc_gp_saw_ca = 1; 19019 } else { 19020 /* Set we sent in SS */ 19021 rack->rc_gp_saw_ss = 1; 19022 } 19023 } 19024 /* Time to copy in our header */ 19025 cpto = mtod(m, uint8_t *); 19026 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19027 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19028 if (optlen) { 19029 bcopy(opt, th + 1, optlen); 19030 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19031 } else { 19032 th->th_off = sizeof(struct tcphdr) >> 2; 19033 } 19034 if (tcp_bblogging_on(rack->rc_tp)) { 19035 union tcp_log_stackspecific log; 19036 19037 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 19038 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 19039 counter_u64_add(rack_collapsed_win_rxt, 1); 19040 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 19041 } 19042 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19043 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19044 if (rack->rack_no_prr) 19045 log.u_bbr.flex1 = 0; 19046 else 19047 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19048 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19049 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19050 log.u_bbr.flex4 = max_val; 19051 /* Save off the early/late values */ 19052 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19053 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19054 log.u_bbr.bw_inuse = rack_get_bw(rack); 19055 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19056 if (doing_tlp == 0) 19057 log.u_bbr.flex8 = 1; 19058 else 19059 log.u_bbr.flex8 = 2; 19060 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19061 log.u_bbr.flex7 = 55; 19062 log.u_bbr.pkts_out = tp->t_maxseg; 19063 log.u_bbr.timeStamp = cts; 19064 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19065 if (rsm && (rsm->r_rtr_cnt > 0)) { 19066 /* 19067 * When we have a retransmit we want to log the 19068 * burst at send and flight at send from before. 19069 */ 19070 log.u_bbr.flex5 = rsm->r_fas; 19071 log.u_bbr.bbr_substate = rsm->r_bas; 19072 } else { 19073 /* 19074 * This is currently unlikely until we do the 19075 * packet pair probes but I will add it for completeness. 19076 */ 19077 log.u_bbr.flex5 = log.u_bbr.inflight; 19078 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19079 } 19080 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19081 log.u_bbr.delivered = 0; 19082 log.u_bbr.rttProp = (uint64_t)rsm; 19083 log.u_bbr.delRate = rsm->r_flags; 19084 log.u_bbr.delRate <<= 31; 19085 log.u_bbr.delRate |= rack->r_must_retran; 19086 log.u_bbr.delRate <<= 1; 19087 log.u_bbr.delRate |= 1; 19088 log.u_bbr.pkt_epoch = __LINE__; 19089 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19090 len, &log, false, NULL, __func__, __LINE__, tv); 19091 } else 19092 lgb = NULL; 19093 if ((rack->r_ctl.crte != NULL) && 19094 tcp_bblogging_on(tp)) { 19095 rack_log_queue_level(tp, rack, len, tv, cts); 19096 } 19097 #ifdef INET6 19098 if (rack->r_is_v6) { 19099 error = ip6_output(m, inp->in6p_outputopts, 19100 &inp->inp_route6, 19101 ip_sendflag, NULL, NULL, inp); 19102 } 19103 else 19104 #endif 19105 #ifdef INET 19106 { 19107 error = ip_output(m, NULL, 19108 &inp->inp_route, 19109 ip_sendflag, 0, inp); 19110 } 19111 #endif 19112 m = NULL; 19113 if (lgb) { 19114 lgb->tlb_errno = error; 19115 lgb = NULL; 19116 } 19117 if (error) { 19118 goto failed; 19119 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { 19120 rack->rc_hw_nobuf = 0; 19121 rack->r_ctl.rc_agg_delayed = 0; 19122 rack->r_early = 0; 19123 rack->r_late = 0; 19124 rack->r_ctl.rc_agg_early = 0; 19125 } 19126 19127 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 19128 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); 19129 if (doing_tlp) { 19130 rack->rc_tlp_in_progress = 1; 19131 rack->r_ctl.rc_tlp_cnt_out++; 19132 } 19133 if (error == 0) { 19134 counter_u64_add(rack_total_bytes, len); 19135 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 19136 if (doing_tlp) { 19137 rack->rc_last_sent_tlp_past_cumack = 0; 19138 rack->rc_last_sent_tlp_seq_valid = 1; 19139 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 19140 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 19141 } 19142 if (rack->r_ctl.rc_prr_sndcnt >= len) 19143 rack->r_ctl.rc_prr_sndcnt -= len; 19144 else 19145 rack->r_ctl.rc_prr_sndcnt = 0; 19146 } 19147 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19148 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19149 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19150 rack->r_ctl.retran_during_recovery += len; 19151 { 19152 int idx; 19153 19154 idx = (len / segsiz) + 3; 19155 if (idx >= TCP_MSS_ACCT_ATIMER) 19156 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19157 else 19158 counter_u64_add(rack_out_size[idx], 1); 19159 } 19160 if (tp->t_rtttime == 0) { 19161 tp->t_rtttime = ticks; 19162 tp->t_rtseq = startseq; 19163 KMOD_TCPSTAT_INC(tcps_segstimed); 19164 } 19165 counter_u64_add(rack_fto_rsm_send, 1); 19166 if (error && (error == ENOBUFS)) { 19167 if (rack->r_ctl.crte != NULL) { 19168 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 19169 if (tcp_bblogging_on(rack->rc_tp)) 19170 rack_log_queue_level(tp, rack, len, tv, cts); 19171 } else 19172 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 19173 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19174 if (rack->rc_enobuf < 0x7f) 19175 rack->rc_enobuf++; 19176 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19177 slot = 10 * HPTS_USEC_IN_MSEC; 19178 if (rack->r_ctl.crte != NULL) { 19179 counter_u64_add(rack_saw_enobuf_hw, 1); 19180 tcp_rl_log_enobuf(rack->r_ctl.crte); 19181 } 19182 counter_u64_add(rack_saw_enobuf, 1); 19183 } else 19184 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 19185 if ((slot == 0) || 19186 (rack->rc_always_pace == 0) || 19187 (rack->r_rr_config == 1)) { 19188 /* 19189 * We have no pacing set or we 19190 * are using old-style rack or 19191 * we are overridden to use the old 1ms pacing. 19192 */ 19193 slot = rack->r_ctl.rc_min_to; 19194 } 19195 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 19196 #ifdef TCP_ACCOUNTING 19197 crtsc = get_cyclecount(); 19198 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19199 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19200 } 19201 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19202 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19203 } 19204 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19205 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 19206 } 19207 sched_unpin(); 19208 #endif 19209 return (0); 19210 failed: 19211 if (m) 19212 m_free(m); 19213 return (-1); 19214 } 19215 19216 static void 19217 rack_sndbuf_autoscale(struct tcp_rack *rack) 19218 { 19219 /* 19220 * Automatic sizing of send socket buffer. Often the send buffer 19221 * size is not optimally adjusted to the actual network conditions 19222 * at hand (delay bandwidth product). Setting the buffer size too 19223 * small limits throughput on links with high bandwidth and high 19224 * delay (eg. trans-continental/oceanic links). Setting the 19225 * buffer size too big consumes too much real kernel memory, 19226 * especially with many connections on busy servers. 19227 * 19228 * The criteria to step up the send buffer one notch are: 19229 * 1. receive window of remote host is larger than send buffer 19230 * (with a fudge factor of 5/4th); 19231 * 2. send buffer is filled to 7/8th with data (so we actually 19232 * have data to make use of it); 19233 * 3. send buffer fill has not hit maximal automatic size; 19234 * 4. our send window (slow start and cogestion controlled) is 19235 * larger than sent but unacknowledged data in send buffer. 19236 * 19237 * Note that the rack version moves things much faster since 19238 * we want to avoid hitting cache lines in the rack_fast_output() 19239 * path so this is called much less often and thus moves 19240 * the SB forward by a percentage. 19241 */ 19242 struct socket *so; 19243 struct tcpcb *tp; 19244 uint32_t sendwin, scaleup; 19245 19246 tp = rack->rc_tp; 19247 so = rack->rc_inp->inp_socket; 19248 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 19249 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 19250 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 19251 sbused(&so->so_snd) >= 19252 (so->so_snd.sb_hiwat / 8 * 7) && 19253 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 19254 sendwin >= (sbused(&so->so_snd) - 19255 (tp->snd_nxt - tp->snd_una))) { 19256 if (rack_autosndbuf_inc) 19257 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 19258 else 19259 scaleup = V_tcp_autosndbuf_inc; 19260 if (scaleup < V_tcp_autosndbuf_inc) 19261 scaleup = V_tcp_autosndbuf_inc; 19262 scaleup += so->so_snd.sb_hiwat; 19263 if (scaleup > V_tcp_autosndbuf_max) 19264 scaleup = V_tcp_autosndbuf_max; 19265 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 19266 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 19267 } 19268 } 19269 } 19270 19271 static int 19272 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 19273 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 19274 { 19275 /* 19276 * Enter to do fast output. We are given that the sched_pin is 19277 * in place (if accounting is compiled in) and the cycle count taken 19278 * at entry is in place in ts_val. The idea here is that 19279 * we know how many more bytes needs to be sent (presumably either 19280 * during pacing or to fill the cwnd and that was greater than 19281 * the max-burst). We have how much to send and all the info we 19282 * need to just send. 19283 */ 19284 #ifdef INET 19285 struct ip *ip = NULL; 19286 #endif 19287 struct udphdr *udp = NULL; 19288 struct tcphdr *th = NULL; 19289 struct mbuf *m, *s_mb; 19290 struct inpcb *inp; 19291 uint8_t *cpto; 19292 struct tcp_log_buffer *lgb; 19293 #ifdef TCP_ACCOUNTING 19294 uint64_t crtsc; 19295 #endif 19296 struct tcpopt to; 19297 u_char opt[TCP_MAXOLEN]; 19298 uint32_t hdrlen, optlen; 19299 #ifdef TCP_ACCOUNTING 19300 int cnt_thru = 1; 19301 #endif 19302 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 19303 uint16_t flags; 19304 uint32_t s_soff; 19305 uint32_t if_hw_tsomaxsegcount = 0, startseq; 19306 uint32_t if_hw_tsomaxsegsize; 19307 uint16_t add_flag = RACK_SENT_FP; 19308 #ifdef INET6 19309 struct ip6_hdr *ip6 = NULL; 19310 19311 if (rack->r_is_v6) { 19312 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 19313 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 19314 } else 19315 #endif /* INET6 */ 19316 { 19317 #ifdef INET 19318 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 19319 hdrlen = sizeof(struct tcpiphdr); 19320 #endif 19321 } 19322 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 19323 m = NULL; 19324 goto failed; 19325 } 19326 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 19327 startseq = tp->snd_max; 19328 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 19329 inp = rack->rc_inp; 19330 len = rack->r_ctl.fsb.left_to_send; 19331 to.to_flags = 0; 19332 flags = rack->r_ctl.fsb.tcp_flags; 19333 if (tp->t_flags & TF_RCVD_TSTMP) { 19334 to.to_tsval = ms_cts + tp->ts_offset; 19335 to.to_tsecr = tp->ts_recent; 19336 to.to_flags = TOF_TS; 19337 } 19338 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19339 /* TCP-MD5 (RFC2385). */ 19340 if (tp->t_flags & TF_SIGNATURE) 19341 to.to_flags |= TOF_SIGNATURE; 19342 #endif 19343 optlen = tcp_addoptions(&to, opt); 19344 hdrlen += optlen; 19345 udp = rack->r_ctl.fsb.udp; 19346 if (udp) 19347 hdrlen += sizeof(struct udphdr); 19348 if (rack->r_ctl.rc_pace_max_segs) 19349 max_val = rack->r_ctl.rc_pace_max_segs; 19350 else if (rack->rc_user_set_max_segs) 19351 max_val = rack->rc_user_set_max_segs * segsiz; 19352 else 19353 max_val = len; 19354 if ((tp->t_flags & TF_TSO) && 19355 V_tcp_do_tso && 19356 (len > segsiz) && 19357 (tp->t_port == 0)) 19358 tso = 1; 19359 again: 19360 #ifdef INET6 19361 if (MHLEN < hdrlen + max_linkhdr) 19362 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 19363 else 19364 #endif 19365 m = m_gethdr(M_NOWAIT, MT_DATA); 19366 if (m == NULL) 19367 goto failed; 19368 m->m_data += max_linkhdr; 19369 m->m_len = hdrlen; 19370 th = rack->r_ctl.fsb.th; 19371 /* Establish the len to send */ 19372 if (len > max_val) 19373 len = max_val; 19374 if ((tso) && (len + optlen > segsiz)) { 19375 uint32_t if_hw_tsomax; 19376 int32_t max_len; 19377 19378 /* extract TSO information */ 19379 if_hw_tsomax = tp->t_tsomax; 19380 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 19381 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 19382 /* 19383 * Check if we should limit by maximum payload 19384 * length: 19385 */ 19386 if (if_hw_tsomax != 0) { 19387 /* compute maximum TSO length */ 19388 max_len = (if_hw_tsomax - hdrlen - 19389 max_linkhdr); 19390 if (max_len <= 0) { 19391 goto failed; 19392 } else if (len > max_len) { 19393 len = max_len; 19394 } 19395 } 19396 if (len <= segsiz) { 19397 /* 19398 * In case there are too many small fragments don't 19399 * use TSO: 19400 */ 19401 tso = 0; 19402 } 19403 } else { 19404 tso = 0; 19405 } 19406 if ((tso == 0) && (len > segsiz)) 19407 len = segsiz; 19408 (void)tcp_get_usecs(tv); 19409 if ((len == 0) || 19410 (len <= MHLEN - hdrlen - max_linkhdr)) { 19411 goto failed; 19412 } 19413 sb_offset = tp->snd_max - tp->snd_una; 19414 th->th_seq = htonl(tp->snd_max); 19415 th->th_ack = htonl(tp->rcv_nxt); 19416 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 19417 if (th->th_win == 0) { 19418 tp->t_sndzerowin++; 19419 tp->t_flags |= TF_RXWIN0SENT; 19420 } else 19421 tp->t_flags &= ~TF_RXWIN0SENT; 19422 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 19423 KMOD_TCPSTAT_INC(tcps_sndpack); 19424 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 19425 #ifdef STATS 19426 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 19427 len); 19428 #endif 19429 if (rack->r_ctl.fsb.m == NULL) 19430 goto failed; 19431 19432 /* s_mb and s_soff are saved for rack_log_output */ 19433 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 19434 &s_mb, &s_soff); 19435 if (len <= segsiz) { 19436 /* 19437 * Must have ran out of mbufs for the copy 19438 * shorten it to no longer need tso. Lets 19439 * not put on sendalot since we are low on 19440 * mbufs. 19441 */ 19442 tso = 0; 19443 } 19444 if (rack->r_ctl.fsb.rfo_apply_push && 19445 (len == rack->r_ctl.fsb.left_to_send)) { 19446 tcp_set_flags(th, flags | TH_PUSH); 19447 add_flag |= RACK_HAD_PUSH; 19448 } 19449 if ((m->m_next == NULL) || (len <= 0)){ 19450 goto failed; 19451 } 19452 if (udp) { 19453 if (rack->r_is_v6) 19454 ulen = hdrlen + len - sizeof(struct ip6_hdr); 19455 else 19456 ulen = hdrlen + len - sizeof(struct ip); 19457 udp->uh_ulen = htons(ulen); 19458 } 19459 m->m_pkthdr.rcvif = (struct ifnet *)0; 19460 if (TCPS_HAVERCVDSYN(tp->t_state) && 19461 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 19462 int ect = tcp_ecn_output_established(tp, &flags, len, false); 19463 if ((tp->t_state == TCPS_SYN_RECEIVED) && 19464 (tp->t_flags2 & TF2_ECN_SND_ECE)) 19465 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 19466 #ifdef INET6 19467 if (rack->r_is_v6) { 19468 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 19469 ip6->ip6_flow |= htonl(ect << 20); 19470 } 19471 else 19472 #endif 19473 { 19474 #ifdef INET 19475 ip->ip_tos &= ~IPTOS_ECN_MASK; 19476 ip->ip_tos |= ect; 19477 #endif 19478 } 19479 } 19480 tcp_set_flags(th, flags); 19481 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 19482 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 19483 if (to.to_flags & TOF_SIGNATURE) { 19484 /* 19485 * Calculate MD5 signature and put it into the place 19486 * determined before. 19487 * NOTE: since TCP options buffer doesn't point into 19488 * mbuf's data, calculate offset and use it. 19489 */ 19490 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 19491 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 19492 /* 19493 * Do not send segment if the calculation of MD5 19494 * digest has failed. 19495 */ 19496 goto failed; 19497 } 19498 } 19499 #endif 19500 #ifdef INET6 19501 if (rack->r_is_v6) { 19502 if (tp->t_port) { 19503 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 19504 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19505 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 19506 th->th_sum = htons(0); 19507 UDPSTAT_INC(udps_opackets); 19508 } else { 19509 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 19510 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19511 th->th_sum = in6_cksum_pseudo(ip6, 19512 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 19513 0); 19514 } 19515 } 19516 #endif 19517 #if defined(INET6) && defined(INET) 19518 else 19519 #endif 19520 #ifdef INET 19521 { 19522 if (tp->t_port) { 19523 m->m_pkthdr.csum_flags = CSUM_UDP; 19524 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 19525 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 19526 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 19527 th->th_sum = htons(0); 19528 UDPSTAT_INC(udps_opackets); 19529 } else { 19530 m->m_pkthdr.csum_flags = CSUM_TCP; 19531 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 19532 th->th_sum = in_pseudo(ip->ip_src.s_addr, 19533 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 19534 IPPROTO_TCP + len + optlen)); 19535 } 19536 /* IP version must be set here for ipv4/ipv6 checking later */ 19537 KASSERT(ip->ip_v == IPVERSION, 19538 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 19539 } 19540 #endif 19541 if (tso) { 19542 /* 19543 * Here we use segsiz since we have no added options besides 19544 * any standard timestamp options (no DSACKs or SACKS are sent 19545 * via either fast-path). 19546 */ 19547 KASSERT(len > segsiz, 19548 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 19549 m->m_pkthdr.csum_flags |= CSUM_TSO; 19550 m->m_pkthdr.tso_segsz = segsiz; 19551 } 19552 #ifdef INET6 19553 if (rack->r_is_v6) { 19554 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 19555 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 19556 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 19557 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19558 else 19559 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19560 } 19561 #endif 19562 #if defined(INET) && defined(INET6) 19563 else 19564 #endif 19565 #ifdef INET 19566 { 19567 ip->ip_len = htons(m->m_pkthdr.len); 19568 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 19569 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 19570 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 19571 if (tp->t_port == 0 || len < V_tcp_minmss) { 19572 ip->ip_off |= htons(IP_DF); 19573 } 19574 } else { 19575 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 19576 } 19577 } 19578 #endif 19579 if (tp->snd_cwnd > tp->snd_ssthresh) { 19580 /* Set we sent in CA */ 19581 rack->rc_gp_saw_ca = 1; 19582 } else { 19583 /* Set we sent in SS */ 19584 rack->rc_gp_saw_ss = 1; 19585 } 19586 /* Time to copy in our header */ 19587 cpto = mtod(m, uint8_t *); 19588 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 19589 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 19590 if (optlen) { 19591 bcopy(opt, th + 1, optlen); 19592 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 19593 } else { 19594 th->th_off = sizeof(struct tcphdr) >> 2; 19595 } 19596 if ((rack->r_ctl.crte != NULL) && 19597 tcp_bblogging_on(tp)) { 19598 rack_log_queue_level(tp, rack, len, tv, cts); 19599 } 19600 if (tcp_bblogging_on(rack->rc_tp)) { 19601 union tcp_log_stackspecific log; 19602 19603 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 19604 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 19605 if (rack->rack_no_prr) 19606 log.u_bbr.flex1 = 0; 19607 else 19608 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 19609 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 19610 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 19611 log.u_bbr.flex4 = max_val; 19612 /* Save off the early/late values */ 19613 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 19614 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 19615 log.u_bbr.bw_inuse = rack_get_bw(rack); 19616 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 19617 log.u_bbr.flex8 = 0; 19618 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 19619 log.u_bbr.flex7 = 44; 19620 log.u_bbr.pkts_out = tp->t_maxseg; 19621 log.u_bbr.timeStamp = cts; 19622 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 19623 log.u_bbr.flex5 = log.u_bbr.inflight; 19624 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 19625 log.u_bbr.delivered = 0; 19626 log.u_bbr.rttProp = 0; 19627 log.u_bbr.delRate = rack->r_must_retran; 19628 log.u_bbr.delRate <<= 1; 19629 log.u_bbr.pkt_epoch = __LINE__; 19630 /* For fast output no retrans so just inflight and how many mss we send */ 19631 log.u_bbr.flex5 = log.u_bbr.inflight; 19632 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 19633 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 19634 len, &log, false, NULL, __func__, __LINE__, tv); 19635 } else 19636 lgb = NULL; 19637 #ifdef INET6 19638 if (rack->r_is_v6) { 19639 error = ip6_output(m, inp->in6p_outputopts, 19640 &inp->inp_route6, 19641 0, NULL, NULL, inp); 19642 } 19643 #endif 19644 #if defined(INET) && defined(INET6) 19645 else 19646 #endif 19647 #ifdef INET 19648 { 19649 error = ip_output(m, NULL, 19650 &inp->inp_route, 19651 0, 0, inp); 19652 } 19653 #endif 19654 if (lgb) { 19655 lgb->tlb_errno = error; 19656 lgb = NULL; 19657 } 19658 if (error) { 19659 *send_err = error; 19660 m = NULL; 19661 goto failed; 19662 } else if (rack->rc_hw_nobuf) { 19663 rack->rc_hw_nobuf = 0; 19664 rack->r_ctl.rc_agg_delayed = 0; 19665 rack->r_early = 0; 19666 rack->r_late = 0; 19667 rack->r_ctl.rc_agg_early = 0; 19668 } 19669 if ((error == 0) && (rack->lt_bw_up == 0)) { 19670 /* Unlikely */ 19671 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); 19672 rack->r_ctl.lt_seq = tp->snd_una; 19673 rack->lt_bw_up = 1; 19674 } 19675 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 19676 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); 19677 m = NULL; 19678 if (tp->snd_una == tp->snd_max) { 19679 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19680 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19681 tp->t_acktime = ticks; 19682 } 19683 counter_u64_add(rack_total_bytes, len); 19684 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 19685 19686 rack->forced_ack = 0; /* If we send something zap the FA flag */ 19687 tot_len += len; 19688 if ((tp->t_flags & TF_GPUTINPROG) == 0) 19689 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 19690 tp->snd_max += len; 19691 tp->snd_nxt = tp->snd_max; 19692 if (rack->rc_new_rnd_needed) { 19693 /* 19694 * Update the rnd to start ticking not 19695 * that from a time perspective all of 19696 * the preceding idle time is "in the round" 19697 */ 19698 rack->rc_new_rnd_needed = 0; 19699 rack->r_ctl.roundends = tp->snd_max; 19700 } 19701 { 19702 int idx; 19703 19704 idx = (len / segsiz) + 3; 19705 if (idx >= TCP_MSS_ACCT_ATIMER) 19706 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 19707 else 19708 counter_u64_add(rack_out_size[idx], 1); 19709 } 19710 if (len <= rack->r_ctl.fsb.left_to_send) 19711 rack->r_ctl.fsb.left_to_send -= len; 19712 else 19713 rack->r_ctl.fsb.left_to_send = 0; 19714 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19715 rack->r_fast_output = 0; 19716 rack->r_ctl.fsb.left_to_send = 0; 19717 /* At the end of fast_output scale up the sb */ 19718 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 19719 rack_sndbuf_autoscale(rack); 19720 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 19721 } 19722 if (tp->t_rtttime == 0) { 19723 tp->t_rtttime = ticks; 19724 tp->t_rtseq = startseq; 19725 KMOD_TCPSTAT_INC(tcps_segstimed); 19726 } 19727 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 19728 (max_val > len) && 19729 (tso == 0)) { 19730 max_val -= len; 19731 len = segsiz; 19732 th = rack->r_ctl.fsb.th; 19733 #ifdef TCP_ACCOUNTING 19734 cnt_thru++; 19735 #endif 19736 goto again; 19737 } 19738 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19739 counter_u64_add(rack_fto_send, 1); 19740 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 19741 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 19742 #ifdef TCP_ACCOUNTING 19743 crtsc = get_cyclecount(); 19744 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19745 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 19746 } 19747 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19748 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 19749 } 19750 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19751 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 19752 } 19753 sched_unpin(); 19754 #endif 19755 return (0); 19756 failed: 19757 if (m) 19758 m_free(m); 19759 rack->r_fast_output = 0; 19760 return (-1); 19761 } 19762 19763 static inline void 19764 rack_setup_fast_output(struct tcpcb *tp, struct tcp_rack *rack, 19765 struct sockbuf *sb, 19766 int len, int orig_len, int segsiz, uint32_t pace_max_seg, 19767 bool hw_tls, 19768 uint16_t flags) 19769 { 19770 rack->r_fast_output = 1; 19771 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19772 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19773 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 19774 rack->r_ctl.fsb.tcp_flags = flags; 19775 rack->r_ctl.fsb.left_to_send = orig_len - len; 19776 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { 19777 /* Less than a full sized pace, lets not */ 19778 rack->r_fast_output = 0; 19779 return; 19780 } else { 19781 /* Round down to the nearest pace_max_seg */ 19782 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); 19783 } 19784 if (hw_tls) 19785 rack->r_ctl.fsb.hw_tls = 1; 19786 else 19787 rack->r_ctl.fsb.hw_tls = 0; 19788 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19789 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19790 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19791 (tp->snd_max - tp->snd_una))); 19792 if (rack->r_ctl.fsb.left_to_send < segsiz) 19793 rack->r_fast_output = 0; 19794 else { 19795 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19796 rack->r_ctl.fsb.rfo_apply_push = 1; 19797 else 19798 rack->r_ctl.fsb.rfo_apply_push = 0; 19799 } 19800 } 19801 19802 static uint32_t 19803 rack_get_hpts_pacing_min_for_bw(struct tcp_rack *rack, int32_t segsiz) 19804 { 19805 uint64_t min_time; 19806 uint32_t maxlen; 19807 19808 min_time = (uint64_t)get_hpts_min_sleep_time(); 19809 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); 19810 maxlen = roundup(maxlen, segsiz); 19811 return (maxlen); 19812 } 19813 19814 static struct rack_sendmap * 19815 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 19816 { 19817 struct rack_sendmap *rsm = NULL; 19818 int thresh; 19819 19820 restart: 19821 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); 19822 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 19823 /* Nothing, strange turn off validity */ 19824 rack->r_collapse_point_valid = 0; 19825 return (NULL); 19826 } 19827 /* Can we send it yet? */ 19828 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 19829 /* 19830 * Receiver window has not grown enough for 19831 * the segment to be put on the wire. 19832 */ 19833 return (NULL); 19834 } 19835 if (rsm->r_flags & RACK_ACKED) { 19836 /* 19837 * It has been sacked, lets move to the 19838 * next one if possible. 19839 */ 19840 rack->r_ctl.last_collapse_point = rsm->r_end; 19841 /* Are we done? */ 19842 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 19843 rack->r_ctl.high_collapse_point)) { 19844 rack->r_collapse_point_valid = 0; 19845 return (NULL); 19846 } 19847 goto restart; 19848 } 19849 /* Now has it been long enough ? */ 19850 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts); 19851 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 19852 rack_log_collapse(rack, rsm->r_start, 19853 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19854 thresh, __LINE__, 6, rsm->r_flags, rsm); 19855 return (rsm); 19856 } 19857 /* Not enough time */ 19858 rack_log_collapse(rack, rsm->r_start, 19859 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 19860 thresh, __LINE__, 7, rsm->r_flags, rsm); 19861 return (NULL); 19862 } 19863 19864 static inline void 19865 rack_validate_sizes(struct tcp_rack *rack, int32_t *len, int32_t segsiz, uint32_t pace_max_seg) 19866 { 19867 if ((rack->full_size_rxt == 0) && 19868 (rack->shape_rxt_to_pacing_min == 0) && 19869 (*len >= segsiz)) { 19870 *len = segsiz; 19871 } else if (rack->shape_rxt_to_pacing_min && 19872 rack->gp_ready) { 19873 /* We use pacing min as shaping len req */ 19874 uint32_t maxlen; 19875 19876 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 19877 if (*len > maxlen) 19878 *len = maxlen; 19879 } else { 19880 /* 19881 * The else is full_size_rxt is on so send it all 19882 * note we do need to check this for exceeding 19883 * our max segment size due to the fact that 19884 * we do sometimes merge chunks together i.e. 19885 * we cannot just assume that we will never have 19886 * a chunk greater than pace_max_seg 19887 */ 19888 if (*len > pace_max_seg) 19889 *len = pace_max_seg; 19890 } 19891 } 19892 19893 static int 19894 rack_output(struct tcpcb *tp) 19895 { 19896 struct socket *so; 19897 uint32_t recwin; 19898 uint32_t sb_offset, s_moff = 0; 19899 int32_t len, error = 0; 19900 uint16_t flags; 19901 struct mbuf *m, *s_mb = NULL; 19902 struct mbuf *mb; 19903 uint32_t if_hw_tsomaxsegcount = 0; 19904 uint32_t if_hw_tsomaxsegsize; 19905 int32_t segsiz, minseg; 19906 long tot_len_this_send = 0; 19907 #ifdef INET 19908 struct ip *ip = NULL; 19909 #endif 19910 struct udphdr *udp = NULL; 19911 struct tcp_rack *rack; 19912 struct tcphdr *th; 19913 uint8_t pass = 0; 19914 uint8_t mark = 0; 19915 uint8_t check_done = 0; 19916 uint8_t wanted_cookie = 0; 19917 u_char opt[TCP_MAXOLEN]; 19918 unsigned ipoptlen, optlen, hdrlen, ulen=0; 19919 uint32_t rack_seq; 19920 19921 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 19922 unsigned ipsec_optlen = 0; 19923 19924 #endif 19925 int32_t idle, sendalot; 19926 int32_t sub_from_prr = 0; 19927 volatile int32_t sack_rxmit; 19928 struct rack_sendmap *rsm = NULL; 19929 int32_t tso, mtu; 19930 struct tcpopt to; 19931 int32_t slot = 0; 19932 int32_t sup_rack = 0; 19933 uint32_t cts, ms_cts, delayed, early; 19934 uint16_t add_flag = RACK_SENT_SP; 19935 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 19936 uint8_t doing_tlp = 0; 19937 uint32_t cwnd_to_use, pace_max_seg; 19938 int32_t do_a_prefetch = 0; 19939 int32_t prefetch_rsm = 0; 19940 int32_t orig_len = 0; 19941 struct timeval tv; 19942 int32_t prefetch_so_done = 0; 19943 struct tcp_log_buffer *lgb; 19944 struct inpcb *inp = tptoinpcb(tp); 19945 struct sockbuf *sb; 19946 uint64_t ts_val = 0; 19947 #ifdef TCP_ACCOUNTING 19948 uint64_t crtsc; 19949 #endif 19950 #ifdef INET6 19951 struct ip6_hdr *ip6 = NULL; 19952 int32_t isipv6; 19953 #endif 19954 bool hpts_calling, hw_tls = false; 19955 19956 NET_EPOCH_ASSERT(); 19957 INP_WLOCK_ASSERT(inp); 19958 19959 /* setup and take the cache hits here */ 19960 rack = (struct tcp_rack *)tp->t_fb_ptr; 19961 #ifdef TCP_ACCOUNTING 19962 sched_pin(); 19963 ts_val = get_cyclecount(); 19964 #endif 19965 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); 19966 tp->t_flags2 &= ~TF2_HPTS_CALLS; 19967 #ifdef TCP_OFFLOAD 19968 if (tp->t_flags & TF_TOE) { 19969 #ifdef TCP_ACCOUNTING 19970 sched_unpin(); 19971 #endif 19972 return (tcp_offload_output(tp)); 19973 } 19974 #endif 19975 if (rack->rack_deferred_inited == 0) { 19976 /* 19977 * If we are the connecting socket we will 19978 * hit rack_init() when no sequence numbers 19979 * are setup. This makes it so we must defer 19980 * some initialization. Call that now. 19981 */ 19982 rack_deferred_init(tp, rack); 19983 } 19984 /* 19985 * For TFO connections in SYN_RECEIVED, only allow the initial 19986 * SYN|ACK and those sent by the retransmit timer. 19987 */ 19988 if (IS_FASTOPEN(tp->t_flags) && 19989 (tp->t_state == TCPS_SYN_RECEIVED) && 19990 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 19991 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 19992 #ifdef TCP_ACCOUNTING 19993 sched_unpin(); 19994 #endif 19995 return (0); 19996 } 19997 #ifdef INET6 19998 if (rack->r_state) { 19999 /* Use the cache line loaded if possible */ 20000 isipv6 = rack->r_is_v6; 20001 } else { 20002 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 20003 } 20004 #endif 20005 early = 0; 20006 cts = tcp_get_usecs(&tv); 20007 ms_cts = tcp_tv_to_mssectick(&tv); 20008 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 20009 tcp_in_hpts(rack->rc_tp)) { 20010 /* 20011 * We are on the hpts for some timer but not hptsi output. 20012 * Remove from the hpts unconditionally. 20013 */ 20014 rack_timer_cancel(tp, rack, cts, __LINE__); 20015 } 20016 /* Are we pacing and late? */ 20017 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20018 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 20019 /* We are delayed */ 20020 delayed = cts - rack->r_ctl.rc_last_output_to; 20021 } else { 20022 delayed = 0; 20023 } 20024 /* Do the timers, which may override the pacer */ 20025 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 20026 int retval; 20027 20028 retval = rack_process_timers(tp, rack, cts, hpts_calling, 20029 &doing_tlp); 20030 if (retval != 0) { 20031 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 20032 #ifdef TCP_ACCOUNTING 20033 sched_unpin(); 20034 #endif 20035 /* 20036 * If timers want tcp_drop(), then pass error out, 20037 * otherwise suppress it. 20038 */ 20039 return (retval < 0 ? retval : 0); 20040 } 20041 } 20042 if (rack->rc_in_persist) { 20043 if (tcp_in_hpts(rack->rc_tp) == 0) { 20044 /* Timer is not running */ 20045 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20046 } 20047 #ifdef TCP_ACCOUNTING 20048 sched_unpin(); 20049 #endif 20050 return (0); 20051 } 20052 if ((rack->rc_ack_required == 1) && 20053 (rack->r_timer_override == 0)){ 20054 /* A timeout occurred and no ack has arrived */ 20055 if (tcp_in_hpts(rack->rc_tp) == 0) { 20056 /* Timer is not running */ 20057 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 20058 } 20059 #ifdef TCP_ACCOUNTING 20060 sched_unpin(); 20061 #endif 20062 return (0); 20063 } 20064 if ((rack->r_timer_override) || 20065 (rack->rc_ack_can_sendout_data) || 20066 (delayed) || 20067 (tp->t_state < TCPS_ESTABLISHED)) { 20068 rack->rc_ack_can_sendout_data = 0; 20069 if (tcp_in_hpts(rack->rc_tp)) 20070 tcp_hpts_remove(rack->rc_tp); 20071 } else if (tcp_in_hpts(rack->rc_tp)) { 20072 /* 20073 * On the hpts you can't pass even if ACKNOW is on, we will 20074 * when the hpts fires. 20075 */ 20076 #ifdef TCP_ACCOUNTING 20077 crtsc = get_cyclecount(); 20078 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20079 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 20080 } 20081 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 20082 tp->tcp_cnt_counters[SND_BLOCKED]++; 20083 } 20084 sched_unpin(); 20085 #endif 20086 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 20087 return (0); 20088 } 20089 /* Finish out both pacing early and late accounting */ 20090 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 20091 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 20092 early = rack->r_ctl.rc_last_output_to - cts; 20093 } else 20094 early = 0; 20095 if (delayed) { 20096 rack->r_ctl.rc_agg_delayed += delayed; 20097 rack->r_late = 1; 20098 } else if (early) { 20099 rack->r_ctl.rc_agg_early += early; 20100 rack->r_early = 1; 20101 } 20102 /* Now that early/late accounting is done turn off the flag */ 20103 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 20104 rack->r_wanted_output = 0; 20105 rack->r_timer_override = 0; 20106 if ((tp->t_state != rack->r_state) && 20107 TCPS_HAVEESTABLISHED(tp->t_state)) { 20108 rack_set_state(tp, rack); 20109 } 20110 if ((rack->r_fast_output) && 20111 (doing_tlp == 0) && 20112 (tp->rcv_numsacks == 0)) { 20113 int ret; 20114 20115 error = 0; 20116 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 20117 if (ret >= 0) 20118 return(ret); 20119 else if (error) { 20120 inp = rack->rc_inp; 20121 so = inp->inp_socket; 20122 sb = &so->so_snd; 20123 goto nomore; 20124 } 20125 } 20126 inp = rack->rc_inp; 20127 /* 20128 * For TFO connections in SYN_SENT or SYN_RECEIVED, 20129 * only allow the initial SYN or SYN|ACK and those sent 20130 * by the retransmit timer. 20131 */ 20132 if (IS_FASTOPEN(tp->t_flags) && 20133 ((tp->t_state == TCPS_SYN_RECEIVED) || 20134 (tp->t_state == TCPS_SYN_SENT)) && 20135 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 20136 (tp->t_rxtshift == 0)) { /* not a retransmit */ 20137 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20138 so = inp->inp_socket; 20139 sb = &so->so_snd; 20140 goto just_return_nolock; 20141 } 20142 /* 20143 * Determine length of data that should be transmitted, and flags 20144 * that will be used. If there is some data or critical controls 20145 * (SYN, RST) to send, then transmit; otherwise, investigate 20146 * further. 20147 */ 20148 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 20149 if (tp->t_idle_reduce) { 20150 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 20151 rack_cc_after_idle(rack, tp); 20152 } 20153 tp->t_flags &= ~TF_LASTIDLE; 20154 if (idle) { 20155 if (tp->t_flags & TF_MORETOCOME) { 20156 tp->t_flags |= TF_LASTIDLE; 20157 idle = 0; 20158 } 20159 } 20160 if ((tp->snd_una == tp->snd_max) && 20161 rack->r_ctl.rc_went_idle_time && 20162 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 20163 idle = cts - rack->r_ctl.rc_went_idle_time; 20164 if (idle > rack_min_probertt_hold) { 20165 /* Count as a probe rtt */ 20166 if (rack->in_probe_rtt == 0) { 20167 rack->r_ctl.rc_lower_rtt_us_cts = cts; 20168 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 20169 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 20170 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 20171 } else { 20172 rack_exit_probertt(rack, cts); 20173 } 20174 } 20175 idle = 0; 20176 } 20177 if (rack_use_fsb && 20178 (rack->r_ctl.fsb.tcp_ip_hdr) && 20179 (rack->r_fsb_inited == 0) && 20180 (rack->r_state != TCPS_CLOSED)) 20181 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); 20182 again: 20183 /* 20184 * If we've recently taken a timeout, snd_max will be greater than 20185 * snd_nxt. There may be SACK information that allows us to avoid 20186 * resending already delivered data. Adjust snd_nxt accordingly. 20187 */ 20188 sendalot = 0; 20189 cts = tcp_get_usecs(&tv); 20190 ms_cts = tcp_tv_to_mssectick(&tv); 20191 tso = 0; 20192 mtu = 0; 20193 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 20194 minseg = segsiz; 20195 if (rack->r_ctl.rc_pace_max_segs == 0) 20196 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 20197 else 20198 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 20199 sb_offset = tp->snd_max - tp->snd_una; 20200 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 20201 flags = tcp_outflags[tp->t_state]; 20202 while (rack->rc_free_cnt < rack_free_cache) { 20203 rsm = rack_alloc(rack); 20204 if (rsm == NULL) { 20205 if (hpts_calling) 20206 /* Retry in a ms */ 20207 slot = (1 * HPTS_USEC_IN_MSEC); 20208 so = inp->inp_socket; 20209 sb = &so->so_snd; 20210 goto just_return_nolock; 20211 } 20212 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 20213 rack->rc_free_cnt++; 20214 rsm = NULL; 20215 } 20216 sack_rxmit = 0; 20217 len = 0; 20218 rsm = NULL; 20219 if (flags & TH_RST) { 20220 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 20221 so = inp->inp_socket; 20222 sb = &so->so_snd; 20223 goto send; 20224 } 20225 if (rack->r_ctl.rc_resend) { 20226 /* Retransmit timer */ 20227 rsm = rack->r_ctl.rc_resend; 20228 rack->r_ctl.rc_resend = NULL; 20229 len = rsm->r_end - rsm->r_start; 20230 sack_rxmit = 1; 20231 sendalot = 0; 20232 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20233 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20234 __func__, __LINE__, 20235 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20236 sb_offset = rsm->r_start - tp->snd_una; 20237 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20238 } else if (rack->r_collapse_point_valid && 20239 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 20240 /* 20241 * If an RSM is returned then enough time has passed 20242 * for us to retransmit it. Move up the collapse point, 20243 * since this rsm has its chance to retransmit now. 20244 */ 20245 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 20246 rack->r_ctl.last_collapse_point = rsm->r_end; 20247 /* Are we done? */ 20248 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 20249 rack->r_ctl.high_collapse_point)) 20250 rack->r_collapse_point_valid = 0; 20251 sack_rxmit = 1; 20252 /* We are not doing a TLP */ 20253 doing_tlp = 0; 20254 len = rsm->r_end - rsm->r_start; 20255 sb_offset = rsm->r_start - tp->snd_una; 20256 sendalot = 0; 20257 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20258 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 20259 /* We have a retransmit that takes precedence */ 20260 if ((!IN_FASTRECOVERY(tp->t_flags)) && 20261 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 20262 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 20263 /* Enter recovery if not induced by a time-out */ 20264 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 20265 } 20266 #ifdef INVARIANTS 20267 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 20268 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 20269 tp, rack, rsm, rsm->r_start, tp->snd_una); 20270 } 20271 #endif 20272 len = rsm->r_end - rsm->r_start; 20273 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20274 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20275 __func__, __LINE__, 20276 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20277 sb_offset = rsm->r_start - tp->snd_una; 20278 sendalot = 0; 20279 rack_validate_sizes(rack, &len, segsiz, pace_max_seg); 20280 if (len > 0) { 20281 sack_rxmit = 1; 20282 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 20283 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 20284 min(len, segsiz)); 20285 } 20286 } else if (rack->r_ctl.rc_tlpsend) { 20287 /* Tail loss probe */ 20288 long cwin; 20289 long tlen; 20290 20291 /* 20292 * Check if we can do a TLP with a RACK'd packet 20293 * this can happen if we are not doing the rack 20294 * cheat and we skipped to a TLP and it 20295 * went off. 20296 */ 20297 rsm = rack->r_ctl.rc_tlpsend; 20298 /* We are doing a TLP make sure the flag is preent */ 20299 rsm->r_flags |= RACK_TLP; 20300 rack->r_ctl.rc_tlpsend = NULL; 20301 sack_rxmit = 1; 20302 tlen = rsm->r_end - rsm->r_start; 20303 if (tlen > segsiz) 20304 tlen = segsiz; 20305 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 20306 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 20307 __func__, __LINE__, 20308 rsm->r_start, tp->snd_una, tp, rack, rsm)); 20309 sb_offset = rsm->r_start - tp->snd_una; 20310 cwin = min(tp->snd_wnd, tlen); 20311 len = cwin; 20312 } 20313 if (rack->r_must_retran && 20314 (doing_tlp == 0) && 20315 (SEQ_GT(tp->snd_max, tp->snd_una)) && 20316 (rsm == NULL)) { 20317 /* 20318 * There are two different ways that we 20319 * can get into this block: 20320 * a) This is a non-sack connection, we had a time-out 20321 * and thus r_must_retran was set and everything 20322 * left outstanding as been marked for retransmit. 20323 * b) The MTU of the path shrank, so that everything 20324 * was marked to be retransmitted with the smaller 20325 * mtu and r_must_retran was set. 20326 * 20327 * This means that we expect the sendmap (outstanding) 20328 * to all be marked must. We can use the tmap to 20329 * look at them. 20330 * 20331 */ 20332 int sendwin, flight; 20333 20334 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 20335 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 20336 if (flight >= sendwin) { 20337 /* 20338 * We can't send yet. 20339 */ 20340 so = inp->inp_socket; 20341 sb = &so->so_snd; 20342 goto just_return_nolock; 20343 } 20344 /* 20345 * This is the case a/b mentioned above. All 20346 * outstanding/not-acked should be marked. 20347 * We can use the tmap to find them. 20348 */ 20349 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 20350 if (rsm == NULL) { 20351 /* TSNH */ 20352 rack->r_must_retran = 0; 20353 rack->r_ctl.rc_out_at_rto = 0; 20354 so = inp->inp_socket; 20355 sb = &so->so_snd; 20356 goto just_return_nolock; 20357 } 20358 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 20359 /* 20360 * The first one does not have the flag, did we collapse 20361 * further up in our list? 20362 */ 20363 rack->r_must_retran = 0; 20364 rack->r_ctl.rc_out_at_rto = 0; 20365 rsm = NULL; 20366 sack_rxmit = 0; 20367 } else { 20368 sack_rxmit = 1; 20369 len = rsm->r_end - rsm->r_start; 20370 sb_offset = rsm->r_start - tp->snd_una; 20371 sendalot = 0; 20372 if ((rack->full_size_rxt == 0) && 20373 (rack->shape_rxt_to_pacing_min == 0) && 20374 (len >= segsiz)) 20375 len = segsiz; 20376 else if (rack->shape_rxt_to_pacing_min && 20377 rack->gp_ready) { 20378 /* We use pacing min as shaping len req */ 20379 uint32_t maxlen; 20380 20381 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20382 if (len > maxlen) 20383 len = maxlen; 20384 } 20385 /* 20386 * Delay removing the flag RACK_MUST_RXT so 20387 * that the fastpath for retransmit will 20388 * work with this rsm. 20389 */ 20390 } 20391 } 20392 /* 20393 * Enforce a connection sendmap count limit if set 20394 * as long as we are not retransmiting. 20395 */ 20396 if ((rsm == NULL) && 20397 (rack->do_detection == 0) && 20398 (V_tcp_map_entries_limit > 0) && 20399 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 20400 counter_u64_add(rack_to_alloc_limited, 1); 20401 if (!rack->alloc_limit_reported) { 20402 rack->alloc_limit_reported = 1; 20403 counter_u64_add(rack_alloc_limited_conns, 1); 20404 } 20405 so = inp->inp_socket; 20406 sb = &so->so_snd; 20407 goto just_return_nolock; 20408 } 20409 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 20410 /* we are retransmitting the fin */ 20411 len--; 20412 if (len) { 20413 /* 20414 * When retransmitting data do *not* include the 20415 * FIN. This could happen from a TLP probe. 20416 */ 20417 flags &= ~TH_FIN; 20418 } 20419 } 20420 if (rsm && rack->r_fsb_inited && 20421 rack_use_rsm_rfo && 20422 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 20423 int ret; 20424 20425 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 20426 if (ret == 0) 20427 return (0); 20428 } 20429 so = inp->inp_socket; 20430 sb = &so->so_snd; 20431 if (do_a_prefetch == 0) { 20432 kern_prefetch(sb, &do_a_prefetch); 20433 do_a_prefetch = 1; 20434 } 20435 #ifdef NETFLIX_SHARED_CWND 20436 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 20437 rack->rack_enable_scwnd) { 20438 /* We are doing cwnd sharing */ 20439 if (rack->gp_ready && 20440 (rack->rack_attempted_scwnd == 0) && 20441 (rack->r_ctl.rc_scw == NULL) && 20442 tp->t_lib) { 20443 /* The pcbid is in, lets make an attempt */ 20444 counter_u64_add(rack_try_scwnd, 1); 20445 rack->rack_attempted_scwnd = 1; 20446 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 20447 &rack->r_ctl.rc_scw_index, 20448 segsiz); 20449 } 20450 if (rack->r_ctl.rc_scw && 20451 (rack->rack_scwnd_is_idle == 1) && 20452 sbavail(&so->so_snd)) { 20453 /* we are no longer out of data */ 20454 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 20455 rack->rack_scwnd_is_idle = 0; 20456 } 20457 if (rack->r_ctl.rc_scw) { 20458 /* First lets update and get the cwnd */ 20459 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 20460 rack->r_ctl.rc_scw_index, 20461 tp->snd_cwnd, tp->snd_wnd, segsiz); 20462 } 20463 } 20464 #endif 20465 /* 20466 * Get standard flags, and add SYN or FIN if requested by 'hidden' 20467 * state flags. 20468 */ 20469 if (tp->t_flags & TF_NEEDFIN) 20470 flags |= TH_FIN; 20471 if (tp->t_flags & TF_NEEDSYN) 20472 flags |= TH_SYN; 20473 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 20474 void *end_rsm; 20475 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 20476 if (end_rsm) 20477 kern_prefetch(end_rsm, &prefetch_rsm); 20478 prefetch_rsm = 1; 20479 } 20480 SOCKBUF_LOCK(sb); 20481 /* 20482 * If snd_nxt == snd_max and we have transmitted a FIN, the 20483 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 20484 * negative length. This can also occur when TCP opens up its 20485 * congestion window while receiving additional duplicate acks after 20486 * fast-retransmit because TCP will reset snd_nxt to snd_max after 20487 * the fast-retransmit. 20488 * 20489 * In the normal retransmit-FIN-only case, however, snd_nxt will be 20490 * set to snd_una, the sb_offset will be 0, and the length may wind 20491 * up 0. 20492 * 20493 * If sack_rxmit is true we are retransmitting from the scoreboard 20494 * in which case len is already set. 20495 */ 20496 if ((sack_rxmit == 0) && 20497 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 20498 uint32_t avail; 20499 20500 avail = sbavail(sb); 20501 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 20502 sb_offset = tp->snd_nxt - tp->snd_una; 20503 else 20504 sb_offset = 0; 20505 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 20506 if (rack->r_ctl.rc_tlp_new_data) { 20507 /* TLP is forcing out new data */ 20508 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 20509 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 20510 } 20511 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 20512 if (tp->snd_wnd > sb_offset) 20513 len = tp->snd_wnd - sb_offset; 20514 else 20515 len = 0; 20516 } else { 20517 len = rack->r_ctl.rc_tlp_new_data; 20518 } 20519 rack->r_ctl.rc_tlp_new_data = 0; 20520 } else { 20521 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 20522 } 20523 if ((rack->r_ctl.crte == NULL) && 20524 IN_FASTRECOVERY(tp->t_flags) && 20525 (rack->full_size_rxt == 0) && 20526 (rack->shape_rxt_to_pacing_min == 0) && 20527 (len > segsiz)) { 20528 /* 20529 * For prr=off, we need to send only 1 MSS 20530 * at a time. We do this because another sack could 20531 * be arriving that causes us to send retransmits and 20532 * we don't want to be on a long pace due to a larger send 20533 * that keeps us from sending out the retransmit. 20534 */ 20535 len = segsiz; 20536 } else if (rack->shape_rxt_to_pacing_min && 20537 rack->gp_ready) { 20538 /* We use pacing min as shaping len req */ 20539 uint32_t maxlen; 20540 20541 maxlen = rack_get_hpts_pacing_min_for_bw(rack, segsiz); 20542 if (len > maxlen) 20543 len = maxlen; 20544 }/* The else is full_size_rxt is on so send it all */ 20545 } else { 20546 uint32_t outstanding; 20547 /* 20548 * We are inside of a Fast recovery episode, this 20549 * is caused by a SACK or 3 dup acks. At this point 20550 * we have sent all the retransmissions and we rely 20551 * on PRR to dictate what we will send in the form of 20552 * new data. 20553 */ 20554 20555 outstanding = tp->snd_max - tp->snd_una; 20556 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 20557 if (tp->snd_wnd > outstanding) { 20558 len = tp->snd_wnd - outstanding; 20559 /* Check to see if we have the data */ 20560 if ((sb_offset + len) > avail) { 20561 /* It does not all fit */ 20562 if (avail > sb_offset) 20563 len = avail - sb_offset; 20564 else 20565 len = 0; 20566 } 20567 } else { 20568 len = 0; 20569 } 20570 } else if (avail > sb_offset) { 20571 len = avail - sb_offset; 20572 } else { 20573 len = 0; 20574 } 20575 if (len > 0) { 20576 if (len > rack->r_ctl.rc_prr_sndcnt) { 20577 len = rack->r_ctl.rc_prr_sndcnt; 20578 } 20579 if (len > 0) { 20580 sub_from_prr = 1; 20581 } 20582 } 20583 if (len > segsiz) { 20584 /* 20585 * We should never send more than a MSS when 20586 * retransmitting or sending new data in prr 20587 * mode unless the override flag is on. Most 20588 * likely the PRR algorithm is not going to 20589 * let us send a lot as well :-) 20590 */ 20591 if (rack->r_ctl.rc_prr_sendalot == 0) { 20592 len = segsiz; 20593 } 20594 } else if (len < segsiz) { 20595 /* 20596 * Do we send any? The idea here is if the 20597 * send empty's the socket buffer we want to 20598 * do it. However if not then lets just wait 20599 * for our prr_sndcnt to get bigger. 20600 */ 20601 long leftinsb; 20602 20603 leftinsb = sbavail(sb) - sb_offset; 20604 if (leftinsb > len) { 20605 /* This send does not empty the sb */ 20606 len = 0; 20607 } 20608 } 20609 } 20610 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 20611 /* 20612 * If you have not established 20613 * and are not doing FAST OPEN 20614 * no data please. 20615 */ 20616 if ((sack_rxmit == 0) && 20617 (!IS_FASTOPEN(tp->t_flags))){ 20618 len = 0; 20619 sb_offset = 0; 20620 } 20621 } 20622 if (prefetch_so_done == 0) { 20623 kern_prefetch(so, &prefetch_so_done); 20624 prefetch_so_done = 1; 20625 } 20626 /* 20627 * Lop off SYN bit if it has already been sent. However, if this is 20628 * SYN-SENT state and if segment contains data and if we don't know 20629 * that foreign host supports TAO, suppress sending segment. 20630 */ 20631 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 20632 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 20633 /* 20634 * When sending additional segments following a TFO SYN|ACK, 20635 * do not include the SYN bit. 20636 */ 20637 if (IS_FASTOPEN(tp->t_flags) && 20638 (tp->t_state == TCPS_SYN_RECEIVED)) 20639 flags &= ~TH_SYN; 20640 } 20641 /* 20642 * Be careful not to send data and/or FIN on SYN segments. This 20643 * measure is needed to prevent interoperability problems with not 20644 * fully conformant TCP implementations. 20645 */ 20646 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 20647 len = 0; 20648 flags &= ~TH_FIN; 20649 } 20650 /* 20651 * On TFO sockets, ensure no data is sent in the following cases: 20652 * 20653 * - When retransmitting SYN|ACK on a passively-created socket 20654 * 20655 * - When retransmitting SYN on an actively created socket 20656 * 20657 * - When sending a zero-length cookie (cookie request) on an 20658 * actively created socket 20659 * 20660 * - When the socket is in the CLOSED state (RST is being sent) 20661 */ 20662 if (IS_FASTOPEN(tp->t_flags) && 20663 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 20664 ((tp->t_state == TCPS_SYN_SENT) && 20665 (tp->t_tfo_client_cookie_len == 0)) || 20666 (flags & TH_RST))) { 20667 sack_rxmit = 0; 20668 len = 0; 20669 } 20670 /* Without fast-open there should never be data sent on a SYN */ 20671 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 20672 tp->snd_nxt = tp->iss; 20673 len = 0; 20674 } 20675 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 20676 /* We only send 1 MSS if we have a DSACK block */ 20677 add_flag |= RACK_SENT_W_DSACK; 20678 len = segsiz; 20679 } 20680 orig_len = len; 20681 if (len <= 0) { 20682 /* 20683 * If FIN has been sent but not acked, but we haven't been 20684 * called to retransmit, len will be < 0. Otherwise, window 20685 * shrank after we sent into it. If window shrank to 0, 20686 * cancel pending retransmit, pull snd_nxt back to (closed) 20687 * window, and set the persist timer if it isn't already 20688 * going. If the window didn't close completely, just wait 20689 * for an ACK. 20690 * 20691 * We also do a general check here to ensure that we will 20692 * set the persist timer when we have data to send, but a 20693 * 0-byte window. This makes sure the persist timer is set 20694 * even if the packet hits one of the "goto send" lines 20695 * below. 20696 */ 20697 len = 0; 20698 if ((tp->snd_wnd == 0) && 20699 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20700 (tp->snd_una == tp->snd_max) && 20701 (sb_offset < (int)sbavail(sb))) { 20702 rack_enter_persist(tp, rack, cts, tp->snd_una); 20703 } 20704 } else if ((rsm == NULL) && 20705 (doing_tlp == 0) && 20706 (len < pace_max_seg)) { 20707 /* 20708 * We are not sending a maximum sized segment for 20709 * some reason. Should we not send anything (think 20710 * sws or persists)? 20711 */ 20712 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20713 (TCPS_HAVEESTABLISHED(tp->t_state)) && 20714 (len < minseg) && 20715 (len < (int)(sbavail(sb) - sb_offset))) { 20716 /* 20717 * Here the rwnd is less than 20718 * the minimum pacing size, this is not a retransmit, 20719 * we are established and 20720 * the send is not the last in the socket buffer 20721 * we send nothing, and we may enter persists 20722 * if nothing is outstanding. 20723 */ 20724 len = 0; 20725 if (tp->snd_max == tp->snd_una) { 20726 /* 20727 * Nothing out we can 20728 * go into persists. 20729 */ 20730 rack_enter_persist(tp, rack, cts, tp->snd_una); 20731 } 20732 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 20733 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20734 (len < (int)(sbavail(sb) - sb_offset)) && 20735 (len < minseg)) { 20736 /* 20737 * Here we are not retransmitting, and 20738 * the cwnd is not so small that we could 20739 * not send at least a min size (rxt timer 20740 * not having gone off), We have 2 segments or 20741 * more already in flight, its not the tail end 20742 * of the socket buffer and the cwnd is blocking 20743 * us from sending out a minimum pacing segment size. 20744 * Lets not send anything. 20745 */ 20746 len = 0; 20747 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 20748 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 20749 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 20750 (len < (int)(sbavail(sb) - sb_offset)) && 20751 (TCPS_HAVEESTABLISHED(tp->t_state))) { 20752 /* 20753 * Here we have a send window but we have 20754 * filled it up and we can't send another pacing segment. 20755 * We also have in flight more than 2 segments 20756 * and we are not completing the sb i.e. we allow 20757 * the last bytes of the sb to go out even if 20758 * its not a full pacing segment. 20759 */ 20760 len = 0; 20761 } else if ((rack->r_ctl.crte != NULL) && 20762 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 20763 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 20764 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 20765 (len < (int)(sbavail(sb) - sb_offset))) { 20766 /* 20767 * Here we are doing hardware pacing, this is not a TLP, 20768 * we are not sending a pace max segment size, there is rwnd 20769 * room to send at least N pace_max_seg, the cwnd is greater 20770 * than or equal to a full pacing segments plus 4 mss and we have 2 or 20771 * more segments in flight and its not the tail of the socket buffer. 20772 * 20773 * We don't want to send instead we need to get more ack's in to 20774 * allow us to send a full pacing segment. Normally, if we are pacing 20775 * about the right speed, we should have finished our pacing 20776 * send as most of the acks have come back if we are at the 20777 * right rate. This is a bit fuzzy since return path delay 20778 * can delay the acks, which is why we want to make sure we 20779 * have cwnd space to have a bit more than a max pace segments in flight. 20780 * 20781 * If we have not gotten our acks back we are pacing at too high a 20782 * rate delaying will not hurt and will bring our GP estimate down by 20783 * injecting the delay. If we don't do this we will send 20784 * 2 MSS out in response to the acks being clocked in which 20785 * defeats the point of hw-pacing (i.e. to help us get 20786 * larger TSO's out). 20787 */ 20788 len = 0; 20789 } 20790 20791 } 20792 /* len will be >= 0 after this point. */ 20793 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 20794 rack_sndbuf_autoscale(rack); 20795 /* 20796 * Decide if we can use TCP Segmentation Offloading (if supported by 20797 * hardware). 20798 * 20799 * TSO may only be used if we are in a pure bulk sending state. The 20800 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 20801 * options prevent using TSO. With TSO the TCP header is the same 20802 * (except for the sequence number) for all generated packets. This 20803 * makes it impossible to transmit any options which vary per 20804 * generated segment or packet. 20805 * 20806 * IPv4 handling has a clear separation of ip options and ip header 20807 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 20808 * the right thing below to provide length of just ip options and thus 20809 * checking for ipoptlen is enough to decide if ip options are present. 20810 */ 20811 ipoptlen = 0; 20812 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20813 /* 20814 * Pre-calculate here as we save another lookup into the darknesses 20815 * of IPsec that way and can actually decide if TSO is ok. 20816 */ 20817 #ifdef INET6 20818 if (isipv6 && IPSEC_ENABLED(ipv6)) 20819 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 20820 #ifdef INET 20821 else 20822 #endif 20823 #endif /* INET6 */ 20824 #ifdef INET 20825 if (IPSEC_ENABLED(ipv4)) 20826 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 20827 #endif /* INET */ 20828 #endif 20829 20830 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 20831 ipoptlen += ipsec_optlen; 20832 #endif 20833 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 20834 (tp->t_port == 0) && 20835 ((tp->t_flags & TF_SIGNATURE) == 0) && 20836 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 20837 ipoptlen == 0) 20838 tso = 1; 20839 { 20840 uint32_t outstanding __unused; 20841 20842 outstanding = tp->snd_max - tp->snd_una; 20843 if (tp->t_flags & TF_SENTFIN) { 20844 /* 20845 * If we sent a fin, snd_max is 1 higher than 20846 * snd_una 20847 */ 20848 outstanding--; 20849 } 20850 if (sack_rxmit) { 20851 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 20852 flags &= ~TH_FIN; 20853 } else { 20854 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 20855 sbused(sb))) 20856 flags &= ~TH_FIN; 20857 } 20858 } 20859 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 20860 (long)TCP_MAXWIN << tp->rcv_scale); 20861 20862 /* 20863 * Sender silly window avoidance. We transmit under the following 20864 * conditions when len is non-zero: 20865 * 20866 * - We have a full segment (or more with TSO) - This is the last 20867 * buffer in a write()/send() and we are either idle or running 20868 * NODELAY - we've timed out (e.g. persist timer) - we have more 20869 * then 1/2 the maximum send window's worth of data (receiver may be 20870 * limited the window size) - we need to retransmit 20871 */ 20872 if (len) { 20873 if (len >= segsiz) { 20874 goto send; 20875 } 20876 /* 20877 * NOTE! on localhost connections an 'ack' from the remote 20878 * end may occur synchronously with the output and cause us 20879 * to flush a buffer queued with moretocome. XXX 20880 * 20881 */ 20882 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 20883 (idle || (tp->t_flags & TF_NODELAY)) && 20884 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 20885 (tp->t_flags & TF_NOPUSH) == 0) { 20886 pass = 2; 20887 goto send; 20888 } 20889 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 20890 pass = 22; 20891 goto send; 20892 } 20893 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 20894 pass = 4; 20895 goto send; 20896 } 20897 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 20898 pass = 5; 20899 goto send; 20900 } 20901 if (sack_rxmit) { 20902 pass = 6; 20903 goto send; 20904 } 20905 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 20906 (ctf_outstanding(tp) < (segsiz * 2))) { 20907 /* 20908 * We have less than two MSS outstanding (delayed ack) 20909 * and our rwnd will not let us send a full sized 20910 * MSS. Lets go ahead and let this small segment 20911 * out because we want to try to have at least two 20912 * packets inflight to not be caught by delayed ack. 20913 */ 20914 pass = 12; 20915 goto send; 20916 } 20917 } 20918 /* 20919 * Sending of standalone window updates. 20920 * 20921 * Window updates are important when we close our window due to a 20922 * full socket buffer and are opening it again after the application 20923 * reads data from it. Once the window has opened again and the 20924 * remote end starts to send again the ACK clock takes over and 20925 * provides the most current window information. 20926 * 20927 * We must avoid the silly window syndrome whereas every read from 20928 * the receive buffer, no matter how small, causes a window update 20929 * to be sent. We also should avoid sending a flurry of window 20930 * updates when the socket buffer had queued a lot of data and the 20931 * application is doing small reads. 20932 * 20933 * Prevent a flurry of pointless window updates by only sending an 20934 * update when we can increase the advertized window by more than 20935 * 1/4th of the socket buffer capacity. When the buffer is getting 20936 * full or is very small be more aggressive and send an update 20937 * whenever we can increase by two mss sized segments. In all other 20938 * situations the ACK's to new incoming data will carry further 20939 * window increases. 20940 * 20941 * Don't send an independent window update if a delayed ACK is 20942 * pending (it will get piggy-backed on it) or the remote side 20943 * already has done a half-close and won't send more data. Skip 20944 * this if the connection is in T/TCP half-open state. 20945 */ 20946 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 20947 !(tp->t_flags & TF_DELACK) && 20948 !TCPS_HAVERCVDFIN(tp->t_state)) { 20949 /* 20950 * "adv" is the amount we could increase the window, taking 20951 * into account that we are limited by TCP_MAXWIN << 20952 * tp->rcv_scale. 20953 */ 20954 int32_t adv; 20955 int oldwin; 20956 20957 adv = recwin; 20958 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 20959 oldwin = (tp->rcv_adv - tp->rcv_nxt); 20960 if (adv > oldwin) 20961 adv -= oldwin; 20962 else { 20963 /* We can't increase the window */ 20964 adv = 0; 20965 } 20966 } else 20967 oldwin = 0; 20968 20969 /* 20970 * If the new window size ends up being the same as or less 20971 * than the old size when it is scaled, then don't force 20972 * a window update. 20973 */ 20974 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 20975 goto dontupdate; 20976 20977 if (adv >= (int32_t)(2 * segsiz) && 20978 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 20979 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 20980 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 20981 pass = 7; 20982 goto send; 20983 } 20984 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 20985 pass = 23; 20986 goto send; 20987 } 20988 } 20989 dontupdate: 20990 20991 /* 20992 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 20993 * is also a catch-all for the retransmit timer timeout case. 20994 */ 20995 if (tp->t_flags & TF_ACKNOW) { 20996 pass = 8; 20997 goto send; 20998 } 20999 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 21000 pass = 9; 21001 goto send; 21002 } 21003 /* 21004 * If our state indicates that FIN should be sent and we have not 21005 * yet done so, then we need to send. 21006 */ 21007 if ((flags & TH_FIN) && 21008 (tp->snd_nxt == tp->snd_una)) { 21009 pass = 11; 21010 goto send; 21011 } 21012 /* 21013 * No reason to send a segment, just return. 21014 */ 21015 just_return: 21016 SOCKBUF_UNLOCK(sb); 21017 just_return_nolock: 21018 { 21019 int app_limited = CTF_JR_SENT_DATA; 21020 21021 if (tot_len_this_send > 0) { 21022 /* Make sure snd_nxt is up to max */ 21023 rack->r_ctl.fsb.recwin = recwin; 21024 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 21025 if ((error == 0) && 21026 rack_use_rfo && 21027 ((flags & (TH_SYN|TH_FIN)) == 0) && 21028 (ipoptlen == 0) && 21029 (tp->snd_nxt == tp->snd_max) && 21030 (tp->rcv_numsacks == 0) && 21031 rack->r_fsb_inited && 21032 TCPS_HAVEESTABLISHED(tp->t_state) && 21033 ((IN_RECOVERY(tp->t_flags)) == 0) && 21034 (rack->r_must_retran == 0) && 21035 ((tp->t_flags & TF_NEEDFIN) == 0) && 21036 (len > 0) && (orig_len > 0) && 21037 (orig_len > len) && 21038 ((orig_len - len) >= segsiz) && 21039 ((optlen == 0) || 21040 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 21041 /* We can send at least one more MSS using our fsb */ 21042 rack_setup_fast_output(tp, rack, sb, len, orig_len, 21043 segsiz, pace_max_seg, hw_tls, flags); 21044 } else 21045 rack->r_fast_output = 0; 21046 21047 21048 rack_log_fsb(rack, tp, so, flags, 21049 ipoptlen, orig_len, len, 0, 21050 1, optlen, __LINE__, 1); 21051 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 21052 tp->snd_nxt = tp->snd_max; 21053 } else { 21054 int end_window = 0; 21055 uint32_t seq = tp->gput_ack; 21056 21057 rsm = tqhash_max(rack->r_ctl.tqh); 21058 if (rsm) { 21059 /* 21060 * Mark the last sent that we just-returned (hinting 21061 * that delayed ack may play a role in any rtt measurement). 21062 */ 21063 rsm->r_just_ret = 1; 21064 } 21065 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 21066 rack->r_ctl.rc_agg_delayed = 0; 21067 rack->r_early = 0; 21068 rack->r_late = 0; 21069 rack->r_ctl.rc_agg_early = 0; 21070 if ((ctf_outstanding(tp) + 21071 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 21072 minseg)) >= tp->snd_wnd) { 21073 /* We are limited by the rwnd */ 21074 app_limited = CTF_JR_RWND_LIMITED; 21075 if (IN_FASTRECOVERY(tp->t_flags)) 21076 rack->r_ctl.rc_prr_sndcnt = 0; 21077 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 21078 /* We are limited by whats available -- app limited */ 21079 app_limited = CTF_JR_APP_LIMITED; 21080 if (IN_FASTRECOVERY(tp->t_flags)) 21081 rack->r_ctl.rc_prr_sndcnt = 0; 21082 } else if ((idle == 0) && 21083 ((tp->t_flags & TF_NODELAY) == 0) && 21084 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 21085 (len < segsiz)) { 21086 /* 21087 * No delay is not on and the 21088 * user is sending less than 1MSS. This 21089 * brings out SWS avoidance so we 21090 * don't send. Another app-limited case. 21091 */ 21092 app_limited = CTF_JR_APP_LIMITED; 21093 } else if (tp->t_flags & TF_NOPUSH) { 21094 /* 21095 * The user has requested no push of 21096 * the last segment and we are 21097 * at the last segment. Another app 21098 * limited case. 21099 */ 21100 app_limited = CTF_JR_APP_LIMITED; 21101 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 21102 /* Its the cwnd */ 21103 app_limited = CTF_JR_CWND_LIMITED; 21104 } else if (IN_FASTRECOVERY(tp->t_flags) && 21105 (rack->rack_no_prr == 0) && 21106 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 21107 app_limited = CTF_JR_PRR; 21108 } else { 21109 /* Now why here are we not sending? */ 21110 #ifdef NOW 21111 #ifdef INVARIANTS 21112 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 21113 #endif 21114 #endif 21115 app_limited = CTF_JR_ASSESSING; 21116 } 21117 /* 21118 * App limited in some fashion, for our pacing GP 21119 * measurements we don't want any gap (even cwnd). 21120 * Close down the measurement window. 21121 */ 21122 if (rack_cwnd_block_ends_measure && 21123 ((app_limited == CTF_JR_CWND_LIMITED) || 21124 (app_limited == CTF_JR_PRR))) { 21125 /* 21126 * The reason we are not sending is 21127 * the cwnd (or prr). We have been configured 21128 * to end the measurement window in 21129 * this case. 21130 */ 21131 end_window = 1; 21132 } else if (rack_rwnd_block_ends_measure && 21133 (app_limited == CTF_JR_RWND_LIMITED)) { 21134 /* 21135 * We are rwnd limited and have been 21136 * configured to end the measurement 21137 * window in this case. 21138 */ 21139 end_window = 1; 21140 } else if (app_limited == CTF_JR_APP_LIMITED) { 21141 /* 21142 * A true application limited period, we have 21143 * ran out of data. 21144 */ 21145 end_window = 1; 21146 } else if (app_limited == CTF_JR_ASSESSING) { 21147 /* 21148 * In the assessing case we hit the end of 21149 * the if/else and had no known reason 21150 * This will panic us under invariants.. 21151 * 21152 * If we get this out in logs we need to 21153 * investagate which reason we missed. 21154 */ 21155 end_window = 1; 21156 } 21157 if (end_window) { 21158 uint8_t log = 0; 21159 21160 /* Adjust the Gput measurement */ 21161 if ((tp->t_flags & TF_GPUTINPROG) && 21162 SEQ_GT(tp->gput_ack, tp->snd_max)) { 21163 tp->gput_ack = tp->snd_max; 21164 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 21165 /* 21166 * There is not enough to measure. 21167 */ 21168 tp->t_flags &= ~TF_GPUTINPROG; 21169 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 21170 rack->r_ctl.rc_gp_srtt /*flex1*/, 21171 tp->gput_seq, 21172 0, 0, 18, __LINE__, NULL, 0); 21173 } else 21174 log = 1; 21175 } 21176 /* Mark the last packet has app limited */ 21177 rsm = tqhash_max(rack->r_ctl.tqh); 21178 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 21179 if (rack->r_ctl.rc_app_limited_cnt == 0) 21180 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 21181 else { 21182 /* 21183 * Go out to the end app limited and mark 21184 * this new one as next and move the end_appl up 21185 * to this guy. 21186 */ 21187 if (rack->r_ctl.rc_end_appl) 21188 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 21189 rack->r_ctl.rc_end_appl = rsm; 21190 } 21191 rsm->r_flags |= RACK_APP_LIMITED; 21192 rack->r_ctl.rc_app_limited_cnt++; 21193 } 21194 if (log) 21195 rack_log_pacing_delay_calc(rack, 21196 rack->r_ctl.rc_app_limited_cnt, seq, 21197 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 21198 } 21199 } 21200 /* Check if we need to go into persists or not */ 21201 if ((tp->snd_max == tp->snd_una) && 21202 TCPS_HAVEESTABLISHED(tp->t_state) && 21203 sbavail(sb) && 21204 (sbavail(sb) > tp->snd_wnd) && 21205 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 21206 /* Yes lets make sure to move to persist before timer-start */ 21207 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); 21208 } 21209 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 21210 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 21211 } 21212 #ifdef NETFLIX_SHARED_CWND 21213 if ((sbavail(sb) == 0) && 21214 rack->r_ctl.rc_scw) { 21215 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 21216 rack->rack_scwnd_is_idle = 1; 21217 } 21218 #endif 21219 #ifdef TCP_ACCOUNTING 21220 if (tot_len_this_send > 0) { 21221 crtsc = get_cyclecount(); 21222 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21223 tp->tcp_cnt_counters[SND_OUT_DATA]++; 21224 } 21225 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21226 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 21227 } 21228 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21229 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 21230 } 21231 } else { 21232 crtsc = get_cyclecount(); 21233 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21234 tp->tcp_cnt_counters[SND_LIMITED]++; 21235 } 21236 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21237 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 21238 } 21239 } 21240 sched_unpin(); 21241 #endif 21242 return (0); 21243 21244 send: 21245 if ((rack->r_ctl.crte != NULL) && 21246 (rsm == NULL) && 21247 ((rack->rc_hw_nobuf == 1) || 21248 (rack_hw_check_queue && (check_done == 0)))) { 21249 /* 21250 * We only want to do this once with the hw_check_queue, 21251 * for the enobuf case we would only do it once if 21252 * we come around to again, the flag will be clear. 21253 */ 21254 check_done = 1; 21255 slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz); 21256 if (slot) { 21257 rack->r_ctl.rc_agg_delayed = 0; 21258 rack->r_ctl.rc_agg_early = 0; 21259 rack->r_early = 0; 21260 rack->r_late = 0; 21261 SOCKBUF_UNLOCK(&so->so_snd); 21262 goto skip_all_send; 21263 } 21264 } 21265 if (rsm || sack_rxmit) 21266 counter_u64_add(rack_nfto_resend, 1); 21267 else 21268 counter_u64_add(rack_non_fto_send, 1); 21269 if ((flags & TH_FIN) && 21270 sbavail(sb)) { 21271 /* 21272 * We do not transmit a FIN 21273 * with data outstanding. We 21274 * need to make it so all data 21275 * is acked first. 21276 */ 21277 flags &= ~TH_FIN; 21278 } 21279 /* Enforce stack imposed max seg size if we have one */ 21280 if (rack->r_ctl.rc_pace_max_segs && 21281 (len > rack->r_ctl.rc_pace_max_segs)) { 21282 mark = 1; 21283 len = rack->r_ctl.rc_pace_max_segs; 21284 } 21285 SOCKBUF_LOCK_ASSERT(sb); 21286 if (len > 0) { 21287 if (len >= segsiz) 21288 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 21289 else 21290 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 21291 } 21292 /* 21293 * Before ESTABLISHED, force sending of initial options unless TCP 21294 * set not to do any options. NOTE: we assume that the IP/TCP header 21295 * plus TCP options always fit in a single mbuf, leaving room for a 21296 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 21297 * + optlen <= MCLBYTES 21298 */ 21299 optlen = 0; 21300 #ifdef INET6 21301 if (isipv6) 21302 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 21303 else 21304 #endif 21305 hdrlen = sizeof(struct tcpiphdr); 21306 21307 /* 21308 * Compute options for segment. We only have to care about SYN and 21309 * established connection segments. Options for SYN-ACK segments 21310 * are handled in TCP syncache. 21311 */ 21312 to.to_flags = 0; 21313 if ((tp->t_flags & TF_NOOPT) == 0) { 21314 /* Maximum segment size. */ 21315 if (flags & TH_SYN) { 21316 tp->snd_nxt = tp->iss; 21317 to.to_mss = tcp_mssopt(&inp->inp_inc); 21318 if (tp->t_port) 21319 to.to_mss -= V_tcp_udp_tunneling_overhead; 21320 to.to_flags |= TOF_MSS; 21321 21322 /* 21323 * On SYN or SYN|ACK transmits on TFO connections, 21324 * only include the TFO option if it is not a 21325 * retransmit, as the presence of the TFO option may 21326 * have caused the original SYN or SYN|ACK to have 21327 * been dropped by a middlebox. 21328 */ 21329 if (IS_FASTOPEN(tp->t_flags) && 21330 (tp->t_rxtshift == 0)) { 21331 if (tp->t_state == TCPS_SYN_RECEIVED) { 21332 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 21333 to.to_tfo_cookie = 21334 (u_int8_t *)&tp->t_tfo_cookie.server; 21335 to.to_flags |= TOF_FASTOPEN; 21336 wanted_cookie = 1; 21337 } else if (tp->t_state == TCPS_SYN_SENT) { 21338 to.to_tfo_len = 21339 tp->t_tfo_client_cookie_len; 21340 to.to_tfo_cookie = 21341 tp->t_tfo_cookie.client; 21342 to.to_flags |= TOF_FASTOPEN; 21343 wanted_cookie = 1; 21344 /* 21345 * If we wind up having more data to 21346 * send with the SYN than can fit in 21347 * one segment, don't send any more 21348 * until the SYN|ACK comes back from 21349 * the other end. 21350 */ 21351 sendalot = 0; 21352 } 21353 } 21354 } 21355 /* Window scaling. */ 21356 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 21357 to.to_wscale = tp->request_r_scale; 21358 to.to_flags |= TOF_SCALE; 21359 } 21360 /* Timestamps. */ 21361 if ((tp->t_flags & TF_RCVD_TSTMP) || 21362 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 21363 to.to_tsval = ms_cts + tp->ts_offset; 21364 to.to_tsecr = tp->ts_recent; 21365 to.to_flags |= TOF_TS; 21366 } 21367 /* Set receive buffer autosizing timestamp. */ 21368 if (tp->rfbuf_ts == 0 && 21369 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 21370 tp->rfbuf_ts = tcp_ts_getticks(); 21371 /* Selective ACK's. */ 21372 if (tp->t_flags & TF_SACK_PERMIT) { 21373 if (flags & TH_SYN) 21374 to.to_flags |= TOF_SACKPERM; 21375 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 21376 tp->rcv_numsacks > 0) { 21377 to.to_flags |= TOF_SACK; 21378 to.to_nsacks = tp->rcv_numsacks; 21379 to.to_sacks = (u_char *)tp->sackblks; 21380 } 21381 } 21382 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21383 /* TCP-MD5 (RFC2385). */ 21384 if (tp->t_flags & TF_SIGNATURE) 21385 to.to_flags |= TOF_SIGNATURE; 21386 #endif 21387 21388 /* Processing the options. */ 21389 hdrlen += optlen = tcp_addoptions(&to, opt); 21390 /* 21391 * If we wanted a TFO option to be added, but it was unable 21392 * to fit, ensure no data is sent. 21393 */ 21394 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 21395 !(to.to_flags & TOF_FASTOPEN)) 21396 len = 0; 21397 } 21398 if (tp->t_port) { 21399 if (V_tcp_udp_tunneling_port == 0) { 21400 /* The port was removed?? */ 21401 SOCKBUF_UNLOCK(&so->so_snd); 21402 #ifdef TCP_ACCOUNTING 21403 crtsc = get_cyclecount(); 21404 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21405 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 21406 } 21407 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 21408 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 21409 } 21410 sched_unpin(); 21411 #endif 21412 return (EHOSTUNREACH); 21413 } 21414 hdrlen += sizeof(struct udphdr); 21415 } 21416 #ifdef INET6 21417 if (isipv6) 21418 ipoptlen = ip6_optlen(inp); 21419 else 21420 #endif 21421 if (inp->inp_options) 21422 ipoptlen = inp->inp_options->m_len - 21423 offsetof(struct ipoption, ipopt_list); 21424 else 21425 ipoptlen = 0; 21426 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 21427 ipoptlen += ipsec_optlen; 21428 #endif 21429 21430 /* 21431 * Adjust data length if insertion of options will bump the packet 21432 * length beyond the t_maxseg length. Clear the FIN bit because we 21433 * cut off the tail of the segment. 21434 */ 21435 if (len + optlen + ipoptlen > tp->t_maxseg) { 21436 if (tso) { 21437 uint32_t if_hw_tsomax; 21438 uint32_t moff; 21439 int32_t max_len; 21440 21441 /* extract TSO information */ 21442 if_hw_tsomax = tp->t_tsomax; 21443 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 21444 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 21445 KASSERT(ipoptlen == 0, 21446 ("%s: TSO can't do IP options", __func__)); 21447 21448 /* 21449 * Check if we should limit by maximum payload 21450 * length: 21451 */ 21452 if (if_hw_tsomax != 0) { 21453 /* compute maximum TSO length */ 21454 max_len = (if_hw_tsomax - hdrlen - 21455 max_linkhdr); 21456 if (max_len <= 0) { 21457 len = 0; 21458 } else if (len > max_len) { 21459 sendalot = 1; 21460 len = max_len; 21461 mark = 2; 21462 } 21463 } 21464 /* 21465 * Prevent the last segment from being fractional 21466 * unless the send sockbuf can be emptied: 21467 */ 21468 max_len = (tp->t_maxseg - optlen); 21469 if ((sb_offset + len) < sbavail(sb)) { 21470 moff = len % (u_int)max_len; 21471 if (moff != 0) { 21472 mark = 3; 21473 len -= moff; 21474 } 21475 } 21476 /* 21477 * In case there are too many small fragments don't 21478 * use TSO: 21479 */ 21480 if (len <= max_len) { 21481 mark = 4; 21482 tso = 0; 21483 } 21484 /* 21485 * Send the FIN in a separate segment after the bulk 21486 * sending is done. We don't trust the TSO 21487 * implementations to clear the FIN flag on all but 21488 * the last segment. 21489 */ 21490 if (tp->t_flags & TF_NEEDFIN) { 21491 sendalot = 4; 21492 } 21493 } else { 21494 mark = 5; 21495 if (optlen + ipoptlen >= tp->t_maxseg) { 21496 /* 21497 * Since we don't have enough space to put 21498 * the IP header chain and the TCP header in 21499 * one packet as required by RFC 7112, don't 21500 * send it. Also ensure that at least one 21501 * byte of the payload can be put into the 21502 * TCP segment. 21503 */ 21504 SOCKBUF_UNLOCK(&so->so_snd); 21505 error = EMSGSIZE; 21506 sack_rxmit = 0; 21507 goto out; 21508 } 21509 len = tp->t_maxseg - optlen - ipoptlen; 21510 sendalot = 5; 21511 } 21512 } else { 21513 tso = 0; 21514 mark = 6; 21515 } 21516 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 21517 ("%s: len > IP_MAXPACKET", __func__)); 21518 #ifdef DIAGNOSTIC 21519 #ifdef INET6 21520 if (max_linkhdr + hdrlen > MCLBYTES) 21521 #else 21522 if (max_linkhdr + hdrlen > MHLEN) 21523 #endif 21524 panic("tcphdr too big"); 21525 #endif 21526 21527 /* 21528 * This KASSERT is here to catch edge cases at a well defined place. 21529 * Before, those had triggered (random) panic conditions further 21530 * down. 21531 */ 21532 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 21533 if ((len == 0) && 21534 (flags & TH_FIN) && 21535 (sbused(sb))) { 21536 /* 21537 * We have outstanding data, don't send a fin by itself!. 21538 */ 21539 goto just_return; 21540 } 21541 /* 21542 * Grab a header mbuf, attaching a copy of data to be transmitted, 21543 * and initialize the header from the template for sends on this 21544 * connection. 21545 */ 21546 hw_tls = tp->t_nic_ktls_xmit != 0; 21547 if (len) { 21548 uint32_t max_val; 21549 uint32_t moff; 21550 21551 if (rack->r_ctl.rc_pace_max_segs) 21552 max_val = rack->r_ctl.rc_pace_max_segs; 21553 else if (rack->rc_user_set_max_segs) 21554 max_val = rack->rc_user_set_max_segs * segsiz; 21555 else 21556 max_val = len; 21557 /* 21558 * We allow a limit on sending with hptsi. 21559 */ 21560 if (len > max_val) { 21561 mark = 7; 21562 len = max_val; 21563 } 21564 #ifdef INET6 21565 if (MHLEN < hdrlen + max_linkhdr) 21566 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 21567 else 21568 #endif 21569 m = m_gethdr(M_NOWAIT, MT_DATA); 21570 21571 if (m == NULL) { 21572 SOCKBUF_UNLOCK(sb); 21573 error = ENOBUFS; 21574 sack_rxmit = 0; 21575 goto out; 21576 } 21577 m->m_data += max_linkhdr; 21578 m->m_len = hdrlen; 21579 21580 /* 21581 * Start the m_copy functions from the closest mbuf to the 21582 * sb_offset in the socket buffer chain. 21583 */ 21584 mb = sbsndptr_noadv(sb, sb_offset, &moff); 21585 s_mb = mb; 21586 s_moff = moff; 21587 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 21588 m_copydata(mb, moff, (int)len, 21589 mtod(m, caddr_t)+hdrlen); 21590 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 21591 sbsndptr_adv(sb, mb, len); 21592 m->m_len += len; 21593 } else { 21594 struct sockbuf *msb; 21595 21596 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 21597 msb = NULL; 21598 else 21599 msb = sb; 21600 m->m_next = tcp_m_copym( 21601 mb, moff, &len, 21602 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 21603 ((rsm == NULL) ? hw_tls : 0) 21604 #ifdef NETFLIX_COPY_ARGS 21605 , &s_mb, &s_moff 21606 #endif 21607 ); 21608 if (len <= (tp->t_maxseg - optlen)) { 21609 /* 21610 * Must have ran out of mbufs for the copy 21611 * shorten it to no longer need tso. Lets 21612 * not put on sendalot since we are low on 21613 * mbufs. 21614 */ 21615 tso = 0; 21616 } 21617 if (m->m_next == NULL) { 21618 SOCKBUF_UNLOCK(sb); 21619 (void)m_free(m); 21620 error = ENOBUFS; 21621 sack_rxmit = 0; 21622 goto out; 21623 } 21624 } 21625 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 21626 if (rsm && (rsm->r_flags & RACK_TLP)) { 21627 /* 21628 * TLP should not count in retran count, but 21629 * in its own bin 21630 */ 21631 counter_u64_add(rack_tlp_retran, 1); 21632 counter_u64_add(rack_tlp_retran_bytes, len); 21633 } else { 21634 tp->t_sndrexmitpack++; 21635 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 21636 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 21637 } 21638 #ifdef STATS 21639 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 21640 len); 21641 #endif 21642 } else { 21643 KMOD_TCPSTAT_INC(tcps_sndpack); 21644 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 21645 #ifdef STATS 21646 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 21647 len); 21648 #endif 21649 } 21650 /* 21651 * If we're sending everything we've got, set PUSH. (This 21652 * will keep happy those implementations which only give 21653 * data to the user when a buffer fills or a PUSH comes in.) 21654 */ 21655 if (sb_offset + len == sbused(sb) && 21656 sbused(sb) && 21657 !(flags & TH_SYN)) { 21658 flags |= TH_PUSH; 21659 add_flag |= RACK_HAD_PUSH; 21660 } 21661 21662 SOCKBUF_UNLOCK(sb); 21663 } else { 21664 SOCKBUF_UNLOCK(sb); 21665 if (tp->t_flags & TF_ACKNOW) 21666 KMOD_TCPSTAT_INC(tcps_sndacks); 21667 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 21668 KMOD_TCPSTAT_INC(tcps_sndctrl); 21669 else 21670 KMOD_TCPSTAT_INC(tcps_sndwinup); 21671 21672 m = m_gethdr(M_NOWAIT, MT_DATA); 21673 if (m == NULL) { 21674 error = ENOBUFS; 21675 sack_rxmit = 0; 21676 goto out; 21677 } 21678 #ifdef INET6 21679 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 21680 MHLEN >= hdrlen) { 21681 M_ALIGN(m, hdrlen); 21682 } else 21683 #endif 21684 m->m_data += max_linkhdr; 21685 m->m_len = hdrlen; 21686 } 21687 SOCKBUF_UNLOCK_ASSERT(sb); 21688 m->m_pkthdr.rcvif = (struct ifnet *)0; 21689 #ifdef MAC 21690 mac_inpcb_create_mbuf(inp, m); 21691 #endif 21692 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21693 #ifdef INET6 21694 if (isipv6) 21695 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 21696 else 21697 #endif /* INET6 */ 21698 #ifdef INET 21699 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 21700 #endif 21701 th = rack->r_ctl.fsb.th; 21702 udp = rack->r_ctl.fsb.udp; 21703 if (udp) { 21704 #ifdef INET6 21705 if (isipv6) 21706 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21707 else 21708 #endif /* INET6 */ 21709 ulen = hdrlen + len - sizeof(struct ip); 21710 udp->uh_ulen = htons(ulen); 21711 } 21712 } else { 21713 #ifdef INET6 21714 if (isipv6) { 21715 ip6 = mtod(m, struct ip6_hdr *); 21716 if (tp->t_port) { 21717 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 21718 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21719 udp->uh_dport = tp->t_port; 21720 ulen = hdrlen + len - sizeof(struct ip6_hdr); 21721 udp->uh_ulen = htons(ulen); 21722 th = (struct tcphdr *)(udp + 1); 21723 } else 21724 th = (struct tcphdr *)(ip6 + 1); 21725 tcpip_fillheaders(inp, tp->t_port, ip6, th); 21726 } else 21727 #endif /* INET6 */ 21728 { 21729 #ifdef INET 21730 ip = mtod(m, struct ip *); 21731 if (tp->t_port) { 21732 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 21733 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 21734 udp->uh_dport = tp->t_port; 21735 ulen = hdrlen + len - sizeof(struct ip); 21736 udp->uh_ulen = htons(ulen); 21737 th = (struct tcphdr *)(udp + 1); 21738 } else 21739 th = (struct tcphdr *)(ip + 1); 21740 tcpip_fillheaders(inp, tp->t_port, ip, th); 21741 #endif 21742 } 21743 } 21744 /* 21745 * Fill in fields, remembering maximum advertised window for use in 21746 * delaying messages about window sizes. If resending a FIN, be sure 21747 * not to use a new sequence number. 21748 */ 21749 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 21750 tp->snd_nxt == tp->snd_max) 21751 tp->snd_nxt--; 21752 /* 21753 * If we are starting a connection, send ECN setup SYN packet. If we 21754 * are on a retransmit, we may resend those bits a number of times 21755 * as per RFC 3168. 21756 */ 21757 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 21758 flags |= tcp_ecn_output_syn_sent(tp); 21759 } 21760 /* Also handle parallel SYN for ECN */ 21761 if (TCPS_HAVERCVDSYN(tp->t_state) && 21762 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 21763 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 21764 if ((tp->t_state == TCPS_SYN_RECEIVED) && 21765 (tp->t_flags2 & TF2_ECN_SND_ECE)) 21766 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 21767 #ifdef INET6 21768 if (isipv6) { 21769 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 21770 ip6->ip6_flow |= htonl(ect << 20); 21771 } 21772 else 21773 #endif 21774 { 21775 #ifdef INET 21776 ip->ip_tos &= ~IPTOS_ECN_MASK; 21777 ip->ip_tos |= ect; 21778 #endif 21779 } 21780 } 21781 /* 21782 * If we are doing retransmissions, then snd_nxt will not reflect 21783 * the first unsent octet. For ACK only packets, we do not want the 21784 * sequence number of the retransmitted packet, we want the sequence 21785 * number of the next unsent octet. So, if there is no data (and no 21786 * SYN or FIN), use snd_max instead of snd_nxt when filling in 21787 * ti_seq. But if we are in persist state, snd_max might reflect 21788 * one byte beyond the right edge of the window, so use snd_nxt in 21789 * that case, since we know we aren't doing a retransmission. 21790 * (retransmit and persist are mutually exclusive...) 21791 */ 21792 if (sack_rxmit == 0) { 21793 if (len || (flags & (TH_SYN | TH_FIN))) { 21794 th->th_seq = htonl(tp->snd_nxt); 21795 rack_seq = tp->snd_nxt; 21796 } else { 21797 th->th_seq = htonl(tp->snd_max); 21798 rack_seq = tp->snd_max; 21799 } 21800 } else { 21801 th->th_seq = htonl(rsm->r_start); 21802 rack_seq = rsm->r_start; 21803 } 21804 th->th_ack = htonl(tp->rcv_nxt); 21805 tcp_set_flags(th, flags); 21806 /* 21807 * Calculate receive window. Don't shrink window, but avoid silly 21808 * window syndrome. 21809 * If a RST segment is sent, advertise a window of zero. 21810 */ 21811 if (flags & TH_RST) { 21812 recwin = 0; 21813 } else { 21814 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 21815 recwin < (long)segsiz) { 21816 recwin = 0; 21817 } 21818 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 21819 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 21820 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 21821 } 21822 21823 /* 21824 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 21825 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 21826 * handled in syncache. 21827 */ 21828 if (flags & TH_SYN) 21829 th->th_win = htons((u_short) 21830 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 21831 else { 21832 /* Avoid shrinking window with window scaling. */ 21833 recwin = roundup2(recwin, 1 << tp->rcv_scale); 21834 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 21835 } 21836 /* 21837 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 21838 * window. This may cause the remote transmitter to stall. This 21839 * flag tells soreceive() to disable delayed acknowledgements when 21840 * draining the buffer. This can occur if the receiver is 21841 * attempting to read more data than can be buffered prior to 21842 * transmitting on the connection. 21843 */ 21844 if (th->th_win == 0) { 21845 tp->t_sndzerowin++; 21846 tp->t_flags |= TF_RXWIN0SENT; 21847 } else 21848 tp->t_flags &= ~TF_RXWIN0SENT; 21849 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 21850 /* Now are we using fsb?, if so copy the template data to the mbuf */ 21851 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 21852 uint8_t *cpto; 21853 21854 cpto = mtod(m, uint8_t *); 21855 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 21856 /* 21857 * We have just copied in: 21858 * IP/IP6 21859 * <optional udphdr> 21860 * tcphdr (no options) 21861 * 21862 * We need to grab the correct pointers into the mbuf 21863 * for both the tcp header, and possibly the udp header (if tunneling). 21864 * We do this by using the offset in the copy buffer and adding it 21865 * to the mbuf base pointer (cpto). 21866 */ 21867 #ifdef INET6 21868 if (isipv6) 21869 ip6 = mtod(m, struct ip6_hdr *); 21870 else 21871 #endif /* INET6 */ 21872 #ifdef INET 21873 ip = mtod(m, struct ip *); 21874 #endif 21875 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 21876 /* If we have a udp header lets set it into the mbuf as well */ 21877 if (udp) 21878 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 21879 } 21880 if (optlen) { 21881 bcopy(opt, th + 1, optlen); 21882 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 21883 } 21884 /* 21885 * Put TCP length in extended header, and then checksum extended 21886 * header and data. 21887 */ 21888 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 21889 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 21890 if (to.to_flags & TOF_SIGNATURE) { 21891 /* 21892 * Calculate MD5 signature and put it into the place 21893 * determined before. 21894 * NOTE: since TCP options buffer doesn't point into 21895 * mbuf's data, calculate offset and use it. 21896 */ 21897 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 21898 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 21899 /* 21900 * Do not send segment if the calculation of MD5 21901 * digest has failed. 21902 */ 21903 goto out; 21904 } 21905 } 21906 #endif 21907 #ifdef INET6 21908 if (isipv6) { 21909 /* 21910 * ip6_plen is not need to be filled now, and will be filled 21911 * in ip6_output. 21912 */ 21913 if (tp->t_port) { 21914 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 21915 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21916 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 21917 th->th_sum = htons(0); 21918 UDPSTAT_INC(udps_opackets); 21919 } else { 21920 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 21921 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21922 th->th_sum = in6_cksum_pseudo(ip6, 21923 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 21924 0); 21925 } 21926 } 21927 #endif 21928 #if defined(INET6) && defined(INET) 21929 else 21930 #endif 21931 #ifdef INET 21932 { 21933 if (tp->t_port) { 21934 m->m_pkthdr.csum_flags = CSUM_UDP; 21935 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 21936 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 21937 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 21938 th->th_sum = htons(0); 21939 UDPSTAT_INC(udps_opackets); 21940 } else { 21941 m->m_pkthdr.csum_flags = CSUM_TCP; 21942 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 21943 th->th_sum = in_pseudo(ip->ip_src.s_addr, 21944 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 21945 IPPROTO_TCP + len + optlen)); 21946 } 21947 /* IP version must be set here for ipv4/ipv6 checking later */ 21948 KASSERT(ip->ip_v == IPVERSION, 21949 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 21950 } 21951 #endif 21952 /* 21953 * Enable TSO and specify the size of the segments. The TCP pseudo 21954 * header checksum is always provided. XXX: Fixme: This is currently 21955 * not the case for IPv6. 21956 */ 21957 if (tso) { 21958 /* 21959 * Here we must use t_maxseg and the optlen since 21960 * the optlen may include SACK's (or DSACK). 21961 */ 21962 KASSERT(len > tp->t_maxseg - optlen, 21963 ("%s: len <= tso_segsz", __func__)); 21964 m->m_pkthdr.csum_flags |= CSUM_TSO; 21965 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 21966 } 21967 KASSERT(len + hdrlen == m_length(m, NULL), 21968 ("%s: mbuf chain different than expected: %d + %u != %u", 21969 __func__, len, hdrlen, m_length(m, NULL))); 21970 21971 #ifdef TCP_HHOOK 21972 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 21973 hhook_run_tcp_est_out(tp, th, &to, len, tso); 21974 #endif 21975 if ((rack->r_ctl.crte != NULL) && 21976 (rack->rc_hw_nobuf == 0) && 21977 tcp_bblogging_on(tp)) { 21978 rack_log_queue_level(tp, rack, len, &tv, cts); 21979 } 21980 /* We're getting ready to send; log now. */ 21981 if (tcp_bblogging_on(rack->rc_tp)) { 21982 union tcp_log_stackspecific log; 21983 21984 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 21985 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); 21986 if (rack->rack_no_prr) 21987 log.u_bbr.flex1 = 0; 21988 else 21989 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 21990 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 21991 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 21992 log.u_bbr.flex4 = orig_len; 21993 /* Save off the early/late values */ 21994 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 21995 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 21996 log.u_bbr.bw_inuse = rack_get_bw(rack); 21997 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; 21998 log.u_bbr.flex8 = 0; 21999 if (rsm) { 22000 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 22001 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 22002 counter_u64_add(rack_collapsed_win_rxt, 1); 22003 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 22004 } 22005 if (doing_tlp) 22006 log.u_bbr.flex8 = 2; 22007 else 22008 log.u_bbr.flex8 = 1; 22009 } else { 22010 if (doing_tlp) 22011 log.u_bbr.flex8 = 3; 22012 } 22013 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 22014 log.u_bbr.flex7 = mark; 22015 log.u_bbr.flex7 <<= 8; 22016 log.u_bbr.flex7 |= pass; 22017 log.u_bbr.pkts_out = tp->t_maxseg; 22018 log.u_bbr.timeStamp = cts; 22019 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 22020 if (rsm && (rsm->r_rtr_cnt > 0)) { 22021 /* 22022 * When we have a retransmit we want to log the 22023 * burst at send and flight at send from before. 22024 */ 22025 log.u_bbr.flex5 = rsm->r_fas; 22026 log.u_bbr.bbr_substate = rsm->r_bas; 22027 } else { 22028 /* 22029 * New transmits we log in flex5 the inflight again as 22030 * well as the number of segments in our send in the 22031 * substate field. 22032 */ 22033 log.u_bbr.flex5 = log.u_bbr.inflight; 22034 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); 22035 } 22036 log.u_bbr.lt_epoch = cwnd_to_use; 22037 log.u_bbr.delivered = sendalot; 22038 log.u_bbr.rttProp = (uint64_t)rsm; 22039 log.u_bbr.pkt_epoch = __LINE__; 22040 if (rsm) { 22041 log.u_bbr.delRate = rsm->r_flags; 22042 log.u_bbr.delRate <<= 31; 22043 log.u_bbr.delRate |= rack->r_must_retran; 22044 log.u_bbr.delRate <<= 1; 22045 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22046 } else { 22047 log.u_bbr.delRate = rack->r_must_retran; 22048 log.u_bbr.delRate <<= 1; 22049 log.u_bbr.delRate |= (sack_rxmit & 0x00000001); 22050 } 22051 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 22052 len, &log, false, NULL, __func__, __LINE__, &tv); 22053 } else 22054 lgb = NULL; 22055 22056 /* 22057 * Fill in IP length and desired time to live and send to IP level. 22058 * There should be a better way to handle ttl and tos; we could keep 22059 * them in the template, but need a way to checksum without them. 22060 */ 22061 /* 22062 * m->m_pkthdr.len should have been set before cksum calcuration, 22063 * because in6_cksum() need it. 22064 */ 22065 #ifdef INET6 22066 if (isipv6) { 22067 /* 22068 * we separately set hoplimit for every segment, since the 22069 * user might want to change the value via setsockopt. Also, 22070 * desired default hop limit might be changed via Neighbor 22071 * Discovery. 22072 */ 22073 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 22074 22075 /* 22076 * Set the packet size here for the benefit of DTrace 22077 * probes. ip6_output() will set it properly; it's supposed 22078 * to include the option header lengths as well. 22079 */ 22080 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 22081 22082 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 22083 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22084 else 22085 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22086 22087 if (tp->t_state == TCPS_SYN_SENT) 22088 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 22089 22090 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 22091 /* TODO: IPv6 IP6TOS_ECT bit on */ 22092 error = ip6_output(m, 22093 inp->in6p_outputopts, 22094 &inp->inp_route6, 22095 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 22096 NULL, NULL, inp); 22097 22098 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 22099 mtu = inp->inp_route6.ro_nh->nh_mtu; 22100 } 22101 #endif /* INET6 */ 22102 #if defined(INET) && defined(INET6) 22103 else 22104 #endif 22105 #ifdef INET 22106 { 22107 ip->ip_len = htons(m->m_pkthdr.len); 22108 #ifdef INET6 22109 if (inp->inp_vflag & INP_IPV6PROTO) 22110 ip->ip_ttl = in6_selecthlim(inp, NULL); 22111 #endif /* INET6 */ 22112 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 22113 /* 22114 * If we do path MTU discovery, then we set DF on every 22115 * packet. This might not be the best thing to do according 22116 * to RFC3390 Section 2. However the tcp hostcache migitates 22117 * the problem so it affects only the first tcp connection 22118 * with a host. 22119 * 22120 * NB: Don't set DF on small MTU/MSS to have a safe 22121 * fallback. 22122 */ 22123 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 22124 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 22125 if (tp->t_port == 0 || len < V_tcp_minmss) { 22126 ip->ip_off |= htons(IP_DF); 22127 } 22128 } else { 22129 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 22130 } 22131 22132 if (tp->t_state == TCPS_SYN_SENT) 22133 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 22134 22135 TCP_PROBE5(send, NULL, tp, ip, tp, th); 22136 22137 error = ip_output(m, 22138 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 22139 inp->inp_options, 22140 #else 22141 NULL, 22142 #endif 22143 &inp->inp_route, 22144 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 22145 inp); 22146 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 22147 mtu = inp->inp_route.ro_nh->nh_mtu; 22148 } 22149 #endif /* INET */ 22150 22151 out: 22152 if (lgb) { 22153 lgb->tlb_errno = error; 22154 lgb = NULL; 22155 } 22156 /* 22157 * In transmit state, time the transmission and arrange for the 22158 * retransmit. In persist state, just set snd_max. 22159 */ 22160 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 22161 rack_to_usec_ts(&tv), 22162 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz); 22163 if (error == 0) { 22164 if (rsm == NULL) { 22165 if (rack->lt_bw_up == 0) { 22166 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); 22167 rack->r_ctl.lt_seq = tp->snd_una; 22168 rack->lt_bw_up = 1; 22169 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { 22170 /* 22171 * Need to record what we have since we are 22172 * approaching seq wrap. 22173 */ 22174 uint64_t tmark; 22175 22176 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); 22177 rack->r_ctl.lt_seq = tp->snd_una; 22178 tmark = tcp_tv_to_lusectick(&tv); 22179 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); 22180 rack->r_ctl.lt_timemark = tmark; 22181 } 22182 } 22183 rack->forced_ack = 0; /* If we send something zap the FA flag */ 22184 counter_u64_add(rack_total_bytes, len); 22185 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 22186 if (rsm && doing_tlp) { 22187 rack->rc_last_sent_tlp_past_cumack = 0; 22188 rack->rc_last_sent_tlp_seq_valid = 1; 22189 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 22190 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 22191 } 22192 if (rack->rc_hw_nobuf) { 22193 rack->rc_hw_nobuf = 0; 22194 rack->r_ctl.rc_agg_delayed = 0; 22195 rack->r_early = 0; 22196 rack->r_late = 0; 22197 rack->r_ctl.rc_agg_early = 0; 22198 } 22199 if (rsm && (doing_tlp == 0)) { 22200 /* Set we retransmitted */ 22201 rack->rc_gp_saw_rec = 1; 22202 } else { 22203 if (cwnd_to_use > tp->snd_ssthresh) { 22204 /* Set we sent in CA */ 22205 rack->rc_gp_saw_ca = 1; 22206 } else { 22207 /* Set we sent in SS */ 22208 rack->rc_gp_saw_ss = 1; 22209 } 22210 } 22211 if (TCPS_HAVEESTABLISHED(tp->t_state) && 22212 (tp->t_flags & TF_SACK_PERMIT) && 22213 tp->rcv_numsacks > 0) 22214 tcp_clean_dsack_blocks(tp); 22215 tot_len_this_send += len; 22216 if (len == 0) { 22217 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 22218 } else { 22219 int idx; 22220 22221 idx = (len / segsiz) + 3; 22222 if (idx >= TCP_MSS_ACCT_ATIMER) 22223 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 22224 else 22225 counter_u64_add(rack_out_size[idx], 1); 22226 } 22227 } 22228 if ((rack->rack_no_prr == 0) && 22229 sub_from_prr && 22230 (error == 0)) { 22231 if (rack->r_ctl.rc_prr_sndcnt >= len) 22232 rack->r_ctl.rc_prr_sndcnt -= len; 22233 else 22234 rack->r_ctl.rc_prr_sndcnt = 0; 22235 } 22236 sub_from_prr = 0; 22237 if (doing_tlp) { 22238 /* Make sure the TLP is added */ 22239 add_flag |= RACK_TLP; 22240 } else if (rsm) { 22241 /* If its a resend without TLP then it must not have the flag */ 22242 rsm->r_flags &= ~RACK_TLP; 22243 } 22244 22245 22246 if ((error == 0) && 22247 (len > 0) && 22248 (tp->snd_una == tp->snd_max)) 22249 rack->r_ctl.rc_tlp_rxt_last_time = cts; 22250 { 22251 tcp_seq startseq = tp->snd_nxt; 22252 22253 /* Track our lost count */ 22254 if (rsm && (doing_tlp == 0)) 22255 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 22256 /* 22257 * Advance snd_nxt over sequence space of this segment. 22258 */ 22259 if (error) 22260 /* We don't log or do anything with errors */ 22261 goto nomore; 22262 if (doing_tlp == 0) { 22263 if (rsm == NULL) { 22264 /* 22265 * Not a retransmission of some 22266 * sort, new data is going out so 22267 * clear our TLP count and flag. 22268 */ 22269 rack->rc_tlp_in_progress = 0; 22270 rack->r_ctl.rc_tlp_cnt_out = 0; 22271 } 22272 } else { 22273 /* 22274 * We have just sent a TLP, mark that it is true 22275 * and make sure our in progress is set so we 22276 * continue to check the count. 22277 */ 22278 rack->rc_tlp_in_progress = 1; 22279 rack->r_ctl.rc_tlp_cnt_out++; 22280 } 22281 if (flags & (TH_SYN | TH_FIN)) { 22282 if (flags & TH_SYN) 22283 tp->snd_nxt++; 22284 if (flags & TH_FIN) { 22285 tp->snd_nxt++; 22286 tp->t_flags |= TF_SENTFIN; 22287 } 22288 } 22289 /* In the ENOBUFS case we do *not* update snd_max */ 22290 if (sack_rxmit) 22291 goto nomore; 22292 22293 tp->snd_nxt += len; 22294 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 22295 if (tp->snd_una == tp->snd_max) { 22296 /* 22297 * Update the time we just added data since 22298 * none was outstanding. 22299 */ 22300 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 22301 tp->t_acktime = ticks; 22302 } 22303 tp->snd_max = tp->snd_nxt; 22304 if (rack->rc_new_rnd_needed) { 22305 /* 22306 * Update the rnd to start ticking not 22307 * that from a time perspective all of 22308 * the preceding idle time is "in the round" 22309 */ 22310 rack->rc_new_rnd_needed = 0; 22311 rack->r_ctl.roundends = tp->snd_max; 22312 } 22313 /* 22314 * Time this transmission if not a retransmission and 22315 * not currently timing anything. 22316 * This is only relevant in case of switching back to 22317 * the base stack. 22318 */ 22319 if (tp->t_rtttime == 0) { 22320 tp->t_rtttime = ticks; 22321 tp->t_rtseq = startseq; 22322 KMOD_TCPSTAT_INC(tcps_segstimed); 22323 } 22324 if (len && 22325 ((tp->t_flags & TF_GPUTINPROG) == 0)) 22326 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 22327 } 22328 /* 22329 * If we are doing FO we need to update the mbuf position and subtract 22330 * this happens when the peer sends us duplicate information and 22331 * we thus want to send a DSACK. 22332 * 22333 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 22334 * turned off? If not then we are going to echo multiple DSACK blocks 22335 * out (with the TSO), which we should not be doing. 22336 */ 22337 if (rack->r_fast_output && len) { 22338 if (rack->r_ctl.fsb.left_to_send > len) 22339 rack->r_ctl.fsb.left_to_send -= len; 22340 else 22341 rack->r_ctl.fsb.left_to_send = 0; 22342 if (rack->r_ctl.fsb.left_to_send < segsiz) 22343 rack->r_fast_output = 0; 22344 if (rack->r_fast_output) { 22345 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 22346 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 22347 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); 22348 } 22349 } 22350 } 22351 nomore: 22352 if (error) { 22353 rack->r_ctl.rc_agg_delayed = 0; 22354 rack->r_early = 0; 22355 rack->r_late = 0; 22356 rack->r_ctl.rc_agg_early = 0; 22357 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 22358 /* 22359 * Failures do not advance the seq counter above. For the 22360 * case of ENOBUFS we will fall out and retry in 1ms with 22361 * the hpts. Everything else will just have to retransmit 22362 * with the timer. 22363 * 22364 * In any case, we do not want to loop around for another 22365 * send without a good reason. 22366 */ 22367 sendalot = 0; 22368 switch (error) { 22369 case EPERM: 22370 tp->t_softerror = error; 22371 #ifdef TCP_ACCOUNTING 22372 crtsc = get_cyclecount(); 22373 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22374 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22375 } 22376 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22377 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22378 } 22379 sched_unpin(); 22380 #endif 22381 return (error); 22382 case ENOBUFS: 22383 /* 22384 * Pace us right away to retry in a some 22385 * time 22386 */ 22387 if (rack->r_ctl.crte != NULL) { 22388 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 22389 if (tcp_bblogging_on(rack->rc_tp)) 22390 rack_log_queue_level(tp, rack, len, &tv, cts); 22391 } else 22392 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 22393 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 22394 if (rack->rc_enobuf < 0x7f) 22395 rack->rc_enobuf++; 22396 if (slot < (10 * HPTS_USEC_IN_MSEC)) 22397 slot = 10 * HPTS_USEC_IN_MSEC; 22398 if (rack->r_ctl.crte != NULL) { 22399 counter_u64_add(rack_saw_enobuf_hw, 1); 22400 tcp_rl_log_enobuf(rack->r_ctl.crte); 22401 } 22402 counter_u64_add(rack_saw_enobuf, 1); 22403 goto enobufs; 22404 case EMSGSIZE: 22405 /* 22406 * For some reason the interface we used initially 22407 * to send segments changed to another or lowered 22408 * its MTU. If TSO was active we either got an 22409 * interface without TSO capabilits or TSO was 22410 * turned off. If we obtained mtu from ip_output() 22411 * then update it and try again. 22412 */ 22413 if (tso) 22414 tp->t_flags &= ~TF_TSO; 22415 if (mtu != 0) { 22416 int saved_mtu; 22417 22418 saved_mtu = tp->t_maxseg; 22419 tcp_mss_update(tp, -1, mtu, NULL, NULL); 22420 if (saved_mtu > tp->t_maxseg) { 22421 goto again; 22422 } 22423 } 22424 slot = 10 * HPTS_USEC_IN_MSEC; 22425 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22426 #ifdef TCP_ACCOUNTING 22427 crtsc = get_cyclecount(); 22428 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22429 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22430 } 22431 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22432 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22433 } 22434 sched_unpin(); 22435 #endif 22436 return (error); 22437 case ENETUNREACH: 22438 counter_u64_add(rack_saw_enetunreach, 1); 22439 case EHOSTDOWN: 22440 case EHOSTUNREACH: 22441 case ENETDOWN: 22442 if (TCPS_HAVERCVDSYN(tp->t_state)) { 22443 tp->t_softerror = error; 22444 } 22445 /* FALLTHROUGH */ 22446 default: 22447 slot = 10 * HPTS_USEC_IN_MSEC; 22448 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 22449 #ifdef TCP_ACCOUNTING 22450 crtsc = get_cyclecount(); 22451 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22452 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 22453 } 22454 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22455 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 22456 } 22457 sched_unpin(); 22458 #endif 22459 return (error); 22460 } 22461 } else { 22462 rack->rc_enobuf = 0; 22463 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 22464 rack->r_ctl.retran_during_recovery += len; 22465 } 22466 KMOD_TCPSTAT_INC(tcps_sndtotal); 22467 22468 /* 22469 * Data sent (as far as we can tell). If this advertises a larger 22470 * window than any other segment, then remember the size of the 22471 * advertised window. Any pending ACK has now been sent. 22472 */ 22473 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 22474 tp->rcv_adv = tp->rcv_nxt + recwin; 22475 22476 tp->last_ack_sent = tp->rcv_nxt; 22477 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 22478 enobufs: 22479 if (sendalot) { 22480 /* Do we need to turn off sendalot? */ 22481 if (rack->r_ctl.rc_pace_max_segs && 22482 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 22483 /* We hit our max. */ 22484 sendalot = 0; 22485 } else if ((rack->rc_user_set_max_segs) && 22486 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 22487 /* We hit the user defined max */ 22488 sendalot = 0; 22489 } 22490 } 22491 if ((error == 0) && (flags & TH_FIN)) 22492 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 22493 if (flags & TH_RST) { 22494 /* 22495 * We don't send again after sending a RST. 22496 */ 22497 slot = 0; 22498 sendalot = 0; 22499 if (error == 0) 22500 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 22501 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 22502 /* 22503 * Get our pacing rate, if an error 22504 * occurred in sending (ENOBUF) we would 22505 * hit the else if with slot preset. Other 22506 * errors return. 22507 */ 22508 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 22509 } 22510 if (rsm && 22511 (rsm->r_flags & RACK_HAS_SYN) == 0 && 22512 rack->use_rack_rr) { 22513 /* Its a retransmit and we use the rack cheat? */ 22514 if ((slot == 0) || 22515 (rack->rc_always_pace == 0) || 22516 (rack->r_rr_config == 1)) { 22517 /* 22518 * We have no pacing set or we 22519 * are using old-style rack or 22520 * we are overridden to use the old 1ms pacing. 22521 */ 22522 slot = rack->r_ctl.rc_min_to; 22523 } 22524 } 22525 /* We have sent clear the flag */ 22526 rack->r_ent_rec_ns = 0; 22527 if (rack->r_must_retran) { 22528 if (rsm) { 22529 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 22530 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 22531 /* 22532 * We have retransmitted all. 22533 */ 22534 rack->r_must_retran = 0; 22535 rack->r_ctl.rc_out_at_rto = 0; 22536 } 22537 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22538 /* 22539 * Sending new data will also kill 22540 * the loop. 22541 */ 22542 rack->r_must_retran = 0; 22543 rack->r_ctl.rc_out_at_rto = 0; 22544 } 22545 } 22546 rack->r_ctl.fsb.recwin = recwin; 22547 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 22548 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 22549 /* 22550 * We hit an RTO and now have past snd_max at the RTO 22551 * clear all the WAS flags. 22552 */ 22553 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 22554 } 22555 if (slot) { 22556 /* set the rack tcb into the slot N */ 22557 if ((error == 0) && 22558 rack_use_rfo && 22559 ((flags & (TH_SYN|TH_FIN)) == 0) && 22560 (rsm == NULL) && 22561 (tp->snd_nxt == tp->snd_max) && 22562 (ipoptlen == 0) && 22563 (tp->rcv_numsacks == 0) && 22564 rack->r_fsb_inited && 22565 TCPS_HAVEESTABLISHED(tp->t_state) && 22566 ((IN_RECOVERY(tp->t_flags)) == 0) && 22567 (rack->r_must_retran == 0) && 22568 ((tp->t_flags & TF_NEEDFIN) == 0) && 22569 (len > 0) && (orig_len > 0) && 22570 (orig_len > len) && 22571 ((orig_len - len) >= segsiz) && 22572 ((optlen == 0) || 22573 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22574 /* We can send at least one more MSS using our fsb */ 22575 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22576 segsiz, pace_max_seg, hw_tls, flags); 22577 } else 22578 rack->r_fast_output = 0; 22579 rack_log_fsb(rack, tp, so, flags, 22580 ipoptlen, orig_len, len, error, 22581 (rsm == NULL), optlen, __LINE__, 2); 22582 } else if (sendalot) { 22583 int ret; 22584 22585 sack_rxmit = 0; 22586 if ((error == 0) && 22587 rack_use_rfo && 22588 ((flags & (TH_SYN|TH_FIN)) == 0) && 22589 (rsm == NULL) && 22590 (ipoptlen == 0) && 22591 (tp->rcv_numsacks == 0) && 22592 (tp->snd_nxt == tp->snd_max) && 22593 (rack->r_must_retran == 0) && 22594 rack->r_fsb_inited && 22595 TCPS_HAVEESTABLISHED(tp->t_state) && 22596 ((IN_RECOVERY(tp->t_flags)) == 0) && 22597 ((tp->t_flags & TF_NEEDFIN) == 0) && 22598 (len > 0) && (orig_len > 0) && 22599 (orig_len > len) && 22600 ((orig_len - len) >= segsiz) && 22601 ((optlen == 0) || 22602 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 22603 /* we can use fast_output for more */ 22604 rack_setup_fast_output(tp, rack, sb, len, orig_len, 22605 segsiz, pace_max_seg, hw_tls, flags); 22606 if (rack->r_fast_output) { 22607 error = 0; 22608 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 22609 if (ret >= 0) 22610 return (ret); 22611 else if (error) 22612 goto nomore; 22613 22614 } 22615 } 22616 goto again; 22617 } 22618 /* Assure when we leave that snd_nxt will point to top */ 22619 skip_all_send: 22620 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 22621 tp->snd_nxt = tp->snd_max; 22622 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 22623 #ifdef TCP_ACCOUNTING 22624 crtsc = get_cyclecount() - ts_val; 22625 if (tot_len_this_send) { 22626 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22627 tp->tcp_cnt_counters[SND_OUT_DATA]++; 22628 } 22629 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22630 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 22631 } 22632 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22633 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 22634 } 22635 } else { 22636 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22637 tp->tcp_cnt_counters[SND_OUT_ACK]++; 22638 } 22639 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 22640 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 22641 } 22642 } 22643 sched_unpin(); 22644 #endif 22645 if (error == ENOBUFS) 22646 error = 0; 22647 return (error); 22648 } 22649 22650 static void 22651 rack_update_seg(struct tcp_rack *rack) 22652 { 22653 uint32_t orig_val; 22654 22655 orig_val = rack->r_ctl.rc_pace_max_segs; 22656 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 22657 if (orig_val != rack->r_ctl.rc_pace_max_segs) 22658 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 22659 } 22660 22661 static void 22662 rack_mtu_change(struct tcpcb *tp) 22663 { 22664 /* 22665 * The MSS may have changed 22666 */ 22667 struct tcp_rack *rack; 22668 struct rack_sendmap *rsm; 22669 22670 rack = (struct tcp_rack *)tp->t_fb_ptr; 22671 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 22672 /* 22673 * The MTU has changed we need to resend everything 22674 * since all we have sent is lost. We first fix 22675 * up the mtu though. 22676 */ 22677 rack_set_pace_segments(tp, rack, __LINE__, NULL); 22678 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 22679 rack_remxt_tmr(tp); 22680 rack->r_fast_output = 0; 22681 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 22682 rack->r_ctl.rc_sacked); 22683 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 22684 rack->r_must_retran = 1; 22685 /* Mark all inflight to needing to be rxt'd */ 22686 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 22687 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); 22688 } 22689 } 22690 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 22691 /* We don't use snd_nxt to retransmit */ 22692 tp->snd_nxt = tp->snd_max; 22693 } 22694 22695 static int 22696 rack_set_dgp(struct tcp_rack *rack) 22697 { 22698 /* pace_always=1 */ 22699 if (rack->rc_always_pace == 0) { 22700 if (tcp_can_enable_pacing() == 0) 22701 return (EBUSY); 22702 } 22703 rack->rc_fillcw_apply_discount = 0; 22704 rack->dgp_on = 1; 22705 rack->rc_always_pace = 1; 22706 rack->use_fixed_rate = 0; 22707 if (rack->gp_ready) 22708 rack_set_cc_pacing(rack); 22709 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22710 rack->rack_attempt_hdwr_pace = 0; 22711 /* rxt settings */ 22712 rack->full_size_rxt = 1; 22713 rack->shape_rxt_to_pacing_min = 0; 22714 /* cmpack=1 */ 22715 rack->r_use_cmp_ack = 1; 22716 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 22717 rack->r_use_cmp_ack) 22718 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22719 /* scwnd=1 */ 22720 rack->rack_enable_scwnd = 1; 22721 /* dynamic=100 */ 22722 rack->rc_gp_dyn_mul = 1; 22723 /* gp_inc_ca */ 22724 rack->r_ctl.rack_per_of_gp_ca = 100; 22725 /* rrr_conf=3 */ 22726 rack->r_rr_config = 3; 22727 /* npush=2 */ 22728 rack->r_ctl.rc_no_push_at_mrtt = 2; 22729 /* fillcw=1 */ 22730 if (rack->r_cwnd_was_clamped == 0) { 22731 rack->rc_pace_to_cwnd = 1; 22732 } else { 22733 rack->rc_pace_to_cwnd = 0; 22734 /* Reset all multipliers to 100.0 so just the measured bw */ 22735 rack->r_ctl.rack_per_of_gp_ss = 100; 22736 rack->r_ctl.rack_per_of_gp_ca = 100; 22737 } 22738 rack->rc_pace_fill_if_rttin_range = 0; 22739 rack->rtt_limit_mul = 0; 22740 /* noprr=1 */ 22741 rack->rack_no_prr = 1; 22742 /* lscwnd=1 */ 22743 rack->r_limit_scw = 1; 22744 /* gp_inc_rec */ 22745 rack->r_ctl.rack_per_of_gp_rec = 90; 22746 rack_client_buffer_level_set(rack); 22747 return (0); 22748 } 22749 22750 22751 22752 static int 22753 rack_set_profile(struct tcp_rack *rack, int prof) 22754 { 22755 int err = EINVAL; 22756 if (prof == 1) { 22757 /* 22758 * Profile 1 is "standard" DGP. It ignores 22759 * client buffer level. 22760 */ 22761 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL0; 22762 err = rack_set_dgp(rack); 22763 if (err) 22764 return (err); 22765 } else if (prof == 2) { 22766 /* 22767 * Profile 2 is DGP. Less aggressive with 22768 * respect to client buffer level. 22769 */ 22770 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL1; 22771 err = rack_set_dgp(rack); 22772 if (err) 22773 return (err); 22774 } else if (prof == 3) { 22775 /* 22776 * Profile 3 is DGP. Even Less aggressive with 22777 * respect to client buffer level. 22778 */ 22779 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL2; 22780 err = rack_set_dgp(rack); 22781 if (err) 22782 return (err); 22783 } else if (prof == 4) { 22784 /* 22785 * Profile 4 is DGP with the most responsiveness 22786 * to client buffer level. 22787 */ 22788 rack->r_ctl.rc_dgp_bl_agg = DGP_LEVEL3; 22789 err = rack_set_dgp(rack); 22790 if (err) 22791 return (err); 22792 } else if (prof == 5) { 22793 err = rack_set_dgp(rack); 22794 if (err) 22795 return (err); 22796 /* 22797 * By turning DGP off we change the rate 22798 * picked to be only the one the cwnd and rtt 22799 * get us. 22800 */ 22801 rack->dgp_on = 0; 22802 } else if (prof == 6) { 22803 err = rack_set_dgp(rack); 22804 if (err) 22805 return (err); 22806 /* 22807 * Profile 6 tweaks DGP so that it will apply to 22808 * fill-cw the same settings that profile5 does 22809 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). 22810 */ 22811 rack->rc_fillcw_apply_discount = 1; 22812 } else if (prof == 0) { 22813 /* This changes things back to the default settings */ 22814 rack->dgp_on = 0; 22815 rack->rc_hybrid_mode = 0; 22816 err = 0; 22817 if (rack_fill_cw_state) 22818 rack->rc_pace_to_cwnd = 1; 22819 else 22820 rack->rc_pace_to_cwnd = 0; 22821 if (rack->rc_always_pace) { 22822 tcp_decrement_paced_conn(); 22823 rack_undo_cc_pacing(rack); 22824 rack->rc_always_pace = 0; 22825 } 22826 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 22827 rack->rc_always_pace = 1; 22828 if (rack->rack_hibeta) 22829 rack_set_cc_pacing(rack); 22830 } else 22831 rack->rc_always_pace = 0; 22832 if (rack_dsack_std_based & 0x1) { 22833 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 22834 rack->rc_rack_tmr_std_based = 1; 22835 } 22836 if (rack_dsack_std_based & 0x2) { 22837 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 22838 rack->rc_rack_use_dsack = 1; 22839 } 22840 if (rack_use_cmp_acks) 22841 rack->r_use_cmp_ack = 1; 22842 else 22843 rack->r_use_cmp_ack = 0; 22844 if (rack_disable_prr) 22845 rack->rack_no_prr = 1; 22846 else 22847 rack->rack_no_prr = 0; 22848 if (rack_gp_no_rec_chg) 22849 rack->rc_gp_no_rec_chg = 1; 22850 else 22851 rack->rc_gp_no_rec_chg = 0; 22852 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 22853 rack->r_mbuf_queue = 1; 22854 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 22855 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; 22856 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 22857 } else { 22858 rack->r_mbuf_queue = 0; 22859 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 22860 } 22861 if (rack_enable_shared_cwnd) 22862 rack->rack_enable_scwnd = 1; 22863 else 22864 rack->rack_enable_scwnd = 0; 22865 if (rack_do_dyn_mul) { 22866 /* When dynamic adjustment is on CA needs to start at 100% */ 22867 rack->rc_gp_dyn_mul = 1; 22868 if (rack_do_dyn_mul >= 100) 22869 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 22870 } else { 22871 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 22872 rack->rc_gp_dyn_mul = 0; 22873 } 22874 rack->r_rr_config = 0; 22875 rack->r_ctl.rc_no_push_at_mrtt = 0; 22876 rack->rc_pace_to_cwnd = 0; 22877 rack->rc_pace_fill_if_rttin_range = 0; 22878 rack->rtt_limit_mul = 0; 22879 22880 if (rack_enable_hw_pacing) 22881 rack->rack_hdw_pace_ena = 1; 22882 else 22883 rack->rack_hdw_pace_ena = 0; 22884 if (rack_disable_prr) 22885 rack->rack_no_prr = 1; 22886 else 22887 rack->rack_no_prr = 0; 22888 if (rack_limits_scwnd) 22889 rack->r_limit_scw = 1; 22890 else 22891 rack->r_limit_scw = 0; 22892 rack_init_retransmit_value(rack, rack_rxt_controls); 22893 err = 0; 22894 } 22895 return (err); 22896 } 22897 22898 static int 22899 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 22900 { 22901 struct deferred_opt_list *dol; 22902 22903 dol = malloc(sizeof(struct deferred_opt_list), 22904 M_TCPFSB, M_NOWAIT|M_ZERO); 22905 if (dol == NULL) { 22906 /* 22907 * No space yikes -- fail out.. 22908 */ 22909 return (0); 22910 } 22911 dol->optname = sopt_name; 22912 dol->optval = loptval; 22913 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 22914 return (1); 22915 } 22916 22917 static int 22918 process_hybrid_pacing(struct tcp_rack *rack, struct tcp_hybrid_req *hybrid) 22919 { 22920 #ifdef TCP_REQUEST_TRK 22921 struct tcp_sendfile_track *sft; 22922 struct timeval tv; 22923 tcp_seq seq; 22924 int err; 22925 22926 microuptime(&tv); 22927 22928 /* 22929 * If BB logging is not on we need to look at the DTL flag. 22930 * If its on already then those reasons override the DTL input. 22931 * We do this with any request, you can turn DTL on, but it does 22932 * not turn off at least from hybrid pacing requests. 22933 */ 22934 if (tcp_bblogging_on(rack->rc_tp) == 0) { 22935 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_DTL) { 22936 /* Turn on BB point logging */ 22937 tcp_set_bblog_state(rack->rc_tp, TCP_LOG_VIA_BBPOINTS, 22938 TCP_BBPOINT_REQ_LEVEL_LOGGING); 22939 } 22940 } 22941 /* Make sure no fixed rate is on */ 22942 rack->use_fixed_rate = 0; 22943 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; 22944 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; 22945 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; 22946 /* Now allocate or find our entry that will have these settings */ 22947 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); 22948 if (sft == NULL) { 22949 rack->rc_tp->tcp_hybrid_error++; 22950 /* no space, where would it have gone? */ 22951 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; 22952 rack_log_hybrid(rack, seq, NULL, HYBRID_LOG_NO_ROOM, __LINE__, 0); 22953 return (ENOSPC); 22954 } 22955 /* The seq will be snd_una + everything in the buffer */ 22956 seq = sft->start_seq; 22957 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { 22958 /* Disabling hybrid pacing */ 22959 if (rack->rc_hybrid_mode) { 22960 rack_set_profile(rack, 0); 22961 rack->rc_tp->tcp_hybrid_stop++; 22962 } 22963 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_TURNED_OFF, __LINE__, 0); 22964 return (0); 22965 } 22966 if (rack->dgp_on == 0) { 22967 /* 22968 * If we have not yet turned DGP on, do so 22969 * now setting pure DGP mode, no buffer level 22970 * response. 22971 */ 22972 if ((err = rack_set_profile(rack, 1)) != 0){ 22973 /* Failed to turn pacing on */ 22974 rack->rc_tp->tcp_hybrid_error++; 22975 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_NO_PACING, __LINE__, 0); 22976 return (err); 22977 } 22978 } 22979 /* Now set in our flags */ 22980 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; 22981 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) 22982 sft->cspr = hybrid->cspr; 22983 else 22984 sft->cspr = 0; 22985 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) 22986 sft->hint_maxseg = hybrid->hint_maxseg; 22987 else 22988 sft->hint_maxseg = 0; 22989 rack->rc_hybrid_mode = 1; 22990 rack->rc_tp->tcp_hybrid_start++; 22991 rack_log_hybrid(rack, seq, sft, HYBRID_LOG_RULES_SET, __LINE__,0); 22992 return (0); 22993 #else 22994 return (ENOTSUP); 22995 #endif 22996 } 22997 22998 static int 22999 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 23000 uint32_t optval, uint64_t loptval, struct tcp_hybrid_req *hybrid) 23001 23002 { 23003 struct epoch_tracker et; 23004 struct sockopt sopt; 23005 struct cc_newreno_opts opt; 23006 uint64_t val; 23007 int error = 0; 23008 uint16_t ca, ss; 23009 23010 switch (sopt_name) { 23011 case TCP_RACK_SET_RXT_OPTIONS: 23012 if ((optval >= 0) && (optval <= 2)) { 23013 rack_init_retransmit_value(rack, optval); 23014 } else { 23015 /* 23016 * You must send in 0, 1 or 2 all else is 23017 * invalid. 23018 */ 23019 error = EINVAL; 23020 } 23021 break; 23022 case TCP_RACK_DSACK_OPT: 23023 RACK_OPTS_INC(tcp_rack_dsack_opt); 23024 if (optval & 0x1) { 23025 rack->rc_rack_tmr_std_based = 1; 23026 } else { 23027 rack->rc_rack_tmr_std_based = 0; 23028 } 23029 if (optval & 0x2) { 23030 rack->rc_rack_use_dsack = 1; 23031 } else { 23032 rack->rc_rack_use_dsack = 0; 23033 } 23034 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 23035 break; 23036 case TCP_RACK_PACING_DIVISOR: 23037 RACK_OPTS_INC(tcp_rack_pacing_divisor); 23038 if (optval == 0) { 23039 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; 23040 } else { 23041 if (optval < RL_MIN_DIVISOR) 23042 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; 23043 else 23044 rack->r_ctl.pace_len_divisor = optval; 23045 } 23046 break; 23047 case TCP_RACK_HI_BETA: 23048 RACK_OPTS_INC(tcp_rack_hi_beta); 23049 if (optval > 0) { 23050 rack->rack_hibeta = 1; 23051 if ((optval >= 50) && 23052 (optval <= 100)) { 23053 /* 23054 * User wants to set a custom beta. 23055 */ 23056 rack->r_ctl.saved_hibeta = optval; 23057 if (rack->rc_pacing_cc_set) 23058 rack_undo_cc_pacing(rack); 23059 rack->r_ctl.rc_saved_beta.beta = optval; 23060 } 23061 if (rack->rc_pacing_cc_set == 0) 23062 rack_set_cc_pacing(rack); 23063 } else { 23064 rack->rack_hibeta = 0; 23065 if (rack->rc_pacing_cc_set) 23066 rack_undo_cc_pacing(rack); 23067 } 23068 break; 23069 case TCP_RACK_PACING_BETA: 23070 RACK_OPTS_INC(tcp_rack_beta); 23071 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23072 /* This only works for newreno. */ 23073 error = EINVAL; 23074 break; 23075 } 23076 if (rack->rc_pacing_cc_set) { 23077 /* 23078 * Set them into the real CC module 23079 * whats in the rack pcb is the old values 23080 * to be used on restoral/ 23081 */ 23082 sopt.sopt_dir = SOPT_SET; 23083 opt.name = CC_NEWRENO_BETA; 23084 opt.val = optval; 23085 if (CC_ALGO(tp)->ctl_output != NULL) 23086 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23087 else { 23088 error = ENOENT; 23089 break; 23090 } 23091 } else { 23092 /* 23093 * Not pacing yet so set it into our local 23094 * rack pcb storage. 23095 */ 23096 rack->r_ctl.rc_saved_beta.beta = optval; 23097 } 23098 break; 23099 case TCP_RACK_TIMER_SLOP: 23100 RACK_OPTS_INC(tcp_rack_timer_slop); 23101 rack->r_ctl.timer_slop = optval; 23102 if (rack->rc_tp->t_srtt) { 23103 /* 23104 * If we have an SRTT lets update t_rxtcur 23105 * to have the new slop. 23106 */ 23107 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 23108 rack_rto_min, rack_rto_max, 23109 rack->r_ctl.timer_slop); 23110 } 23111 break; 23112 case TCP_RACK_PACING_BETA_ECN: 23113 RACK_OPTS_INC(tcp_rack_beta_ecn); 23114 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 23115 /* This only works for newreno. */ 23116 error = EINVAL; 23117 break; 23118 } 23119 if (rack->rc_pacing_cc_set) { 23120 /* 23121 * Set them into the real CC module 23122 * whats in the rack pcb is the old values 23123 * to be used on restoral/ 23124 */ 23125 sopt.sopt_dir = SOPT_SET; 23126 opt.name = CC_NEWRENO_BETA_ECN; 23127 opt.val = optval; 23128 if (CC_ALGO(tp)->ctl_output != NULL) 23129 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 23130 else 23131 error = ENOENT; 23132 } else { 23133 /* 23134 * Not pacing yet so set it into our local 23135 * rack pcb storage. 23136 */ 23137 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 23138 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 23139 } 23140 break; 23141 case TCP_DEFER_OPTIONS: 23142 RACK_OPTS_INC(tcp_defer_opt); 23143 if (optval) { 23144 if (rack->gp_ready) { 23145 /* Too late */ 23146 error = EINVAL; 23147 break; 23148 } 23149 rack->defer_options = 1; 23150 } else 23151 rack->defer_options = 0; 23152 break; 23153 case TCP_RACK_MEASURE_CNT: 23154 RACK_OPTS_INC(tcp_rack_measure_cnt); 23155 if (optval && (optval <= 0xff)) { 23156 rack->r_ctl.req_measurements = optval; 23157 } else 23158 error = EINVAL; 23159 break; 23160 case TCP_REC_ABC_VAL: 23161 RACK_OPTS_INC(tcp_rec_abc_val); 23162 if (optval > 0) 23163 rack->r_use_labc_for_rec = 1; 23164 else 23165 rack->r_use_labc_for_rec = 0; 23166 break; 23167 case TCP_RACK_ABC_VAL: 23168 RACK_OPTS_INC(tcp_rack_abc_val); 23169 if ((optval > 0) && (optval < 255)) 23170 rack->rc_labc = optval; 23171 else 23172 error = EINVAL; 23173 break; 23174 case TCP_HDWR_UP_ONLY: 23175 RACK_OPTS_INC(tcp_pacing_up_only); 23176 if (optval) 23177 rack->r_up_only = 1; 23178 else 23179 rack->r_up_only = 0; 23180 break; 23181 case TCP_PACING_RATE_CAP: 23182 RACK_OPTS_INC(tcp_pacing_rate_cap); 23183 rack->r_ctl.bw_rate_cap = loptval; 23184 break; 23185 case TCP_HYBRID_PACING: 23186 if (hybrid == NULL) { 23187 error = EINVAL; 23188 break; 23189 } 23190 error = process_hybrid_pacing(rack, hybrid); 23191 break; 23192 case TCP_RACK_PROFILE: 23193 RACK_OPTS_INC(tcp_profile); 23194 error = rack_set_profile(rack, optval); 23195 break; 23196 case TCP_USE_CMP_ACKS: 23197 RACK_OPTS_INC(tcp_use_cmp_acks); 23198 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { 23199 /* You can't turn it off once its on! */ 23200 error = EINVAL; 23201 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 23202 rack->r_use_cmp_ack = 1; 23203 rack->r_mbuf_queue = 1; 23204 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23205 } 23206 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 23207 tp->t_flags2 |= TF2_MBUF_ACKCMP; 23208 break; 23209 case TCP_SHARED_CWND_TIME_LIMIT: 23210 RACK_OPTS_INC(tcp_lscwnd); 23211 if (optval) 23212 rack->r_limit_scw = 1; 23213 else 23214 rack->r_limit_scw = 0; 23215 break; 23216 case TCP_RACK_DGP_IN_REC: 23217 RACK_OPTS_INC(tcp_dgp_in_rec); 23218 if (optval) 23219 rack->r_ctl.full_dgp_in_rec = 1; 23220 else 23221 rack->r_ctl.full_dgp_in_rec = 0; 23222 break; 23223 case TCP_RXT_CLAMP: 23224 RACK_OPTS_INC(tcp_rxt_clamp); 23225 rack_translate_clamp_value(rack, optval); 23226 break; 23227 case TCP_RACK_PACE_TO_FILL: 23228 RACK_OPTS_INC(tcp_fillcw); 23229 if (optval == 0) 23230 rack->rc_pace_to_cwnd = 0; 23231 else { 23232 rack->rc_pace_to_cwnd = 1; 23233 if (optval > 1) 23234 rack->r_fill_less_agg = 1; 23235 } 23236 if ((optval >= rack_gp_rtt_maxmul) && 23237 rack_gp_rtt_maxmul && 23238 (optval < 0xf)) { 23239 rack->rc_pace_fill_if_rttin_range = 1; 23240 rack->rtt_limit_mul = optval; 23241 } else { 23242 rack->rc_pace_fill_if_rttin_range = 0; 23243 rack->rtt_limit_mul = 0; 23244 } 23245 break; 23246 case TCP_RACK_NO_PUSH_AT_MAX: 23247 RACK_OPTS_INC(tcp_npush); 23248 if (optval == 0) 23249 rack->r_ctl.rc_no_push_at_mrtt = 0; 23250 else if (optval < 0xff) 23251 rack->r_ctl.rc_no_push_at_mrtt = optval; 23252 else 23253 error = EINVAL; 23254 break; 23255 case TCP_SHARED_CWND_ENABLE: 23256 RACK_OPTS_INC(tcp_rack_scwnd); 23257 if (optval == 0) 23258 rack->rack_enable_scwnd = 0; 23259 else 23260 rack->rack_enable_scwnd = 1; 23261 break; 23262 case TCP_RACK_MBUF_QUEUE: 23263 /* Now do we use the LRO mbuf-queue feature */ 23264 RACK_OPTS_INC(tcp_rack_mbufq); 23265 if (optval || rack->r_use_cmp_ack) 23266 rack->r_mbuf_queue = 1; 23267 else 23268 rack->r_mbuf_queue = 0; 23269 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23270 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23271 else 23272 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23273 break; 23274 case TCP_RACK_NONRXT_CFG_RATE: 23275 RACK_OPTS_INC(tcp_rack_cfg_rate); 23276 if (optval == 0) 23277 rack->rack_rec_nonrxt_use_cr = 0; 23278 else 23279 rack->rack_rec_nonrxt_use_cr = 1; 23280 break; 23281 case TCP_NO_PRR: 23282 RACK_OPTS_INC(tcp_rack_noprr); 23283 if (optval == 0) 23284 rack->rack_no_prr = 0; 23285 else if (optval == 1) 23286 rack->rack_no_prr = 1; 23287 else if (optval == 2) 23288 rack->no_prr_addback = 1; 23289 else 23290 error = EINVAL; 23291 break; 23292 case TCP_TIMELY_DYN_ADJ: 23293 RACK_OPTS_INC(tcp_timely_dyn); 23294 if (optval == 0) 23295 rack->rc_gp_dyn_mul = 0; 23296 else { 23297 rack->rc_gp_dyn_mul = 1; 23298 if (optval >= 100) { 23299 /* 23300 * If the user sets something 100 or more 23301 * its the gp_ca value. 23302 */ 23303 rack->r_ctl.rack_per_of_gp_ca = optval; 23304 } 23305 } 23306 break; 23307 case TCP_RACK_DO_DETECTION: 23308 RACK_OPTS_INC(tcp_rack_do_detection); 23309 if (optval == 0) 23310 rack->do_detection = 0; 23311 else 23312 rack->do_detection = 1; 23313 break; 23314 case TCP_RACK_TLP_USE: 23315 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 23316 error = EINVAL; 23317 break; 23318 } 23319 RACK_OPTS_INC(tcp_tlp_use); 23320 rack->rack_tlp_threshold_use = optval; 23321 break; 23322 case TCP_RACK_TLP_REDUCE: 23323 /* RACK TLP cwnd reduction (bool) */ 23324 RACK_OPTS_INC(tcp_rack_tlp_reduce); 23325 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 23326 break; 23327 /* Pacing related ones */ 23328 case TCP_RACK_PACE_ALWAYS: 23329 /* 23330 * zero is old rack method, 1 is new 23331 * method using a pacing rate. 23332 */ 23333 RACK_OPTS_INC(tcp_rack_pace_always); 23334 if (optval > 0) { 23335 if (rack->rc_always_pace) { 23336 error = EALREADY; 23337 break; 23338 } else if (tcp_can_enable_pacing()) { 23339 rack->rc_always_pace = 1; 23340 if (rack->rack_hibeta) 23341 rack_set_cc_pacing(rack); 23342 } 23343 else { 23344 error = ENOSPC; 23345 break; 23346 } 23347 } else { 23348 if (rack->rc_always_pace) { 23349 tcp_decrement_paced_conn(); 23350 rack->rc_always_pace = 0; 23351 rack_undo_cc_pacing(rack); 23352 } 23353 } 23354 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 23355 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; 23356 else 23357 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; 23358 /* A rate may be set irate or other, if so set seg size */ 23359 rack_update_seg(rack); 23360 break; 23361 case TCP_BBR_RACK_INIT_RATE: 23362 RACK_OPTS_INC(tcp_initial_rate); 23363 val = optval; 23364 /* Change from kbits per second to bytes per second */ 23365 val *= 1000; 23366 val /= 8; 23367 rack->r_ctl.init_rate = val; 23368 if (rack->rc_init_win != rack_default_init_window) { 23369 uint32_t win, snt; 23370 23371 /* 23372 * Options don't always get applied 23373 * in the order you think. So in order 23374 * to assure we update a cwnd we need 23375 * to check and see if we are still 23376 * where we should raise the cwnd. 23377 */ 23378 win = rc_init_window(rack); 23379 if (SEQ_GT(tp->snd_max, tp->iss)) 23380 snt = tp->snd_max - tp->iss; 23381 else 23382 snt = 0; 23383 if ((snt < win) && 23384 (tp->snd_cwnd < win)) 23385 tp->snd_cwnd = win; 23386 } 23387 if (rack->rc_always_pace) 23388 rack_update_seg(rack); 23389 break; 23390 case TCP_BBR_IWINTSO: 23391 RACK_OPTS_INC(tcp_initial_win); 23392 if (optval && (optval <= 0xff)) { 23393 uint32_t win, snt; 23394 23395 rack->rc_init_win = optval; 23396 win = rc_init_window(rack); 23397 if (SEQ_GT(tp->snd_max, tp->iss)) 23398 snt = tp->snd_max - tp->iss; 23399 else 23400 snt = 0; 23401 if ((snt < win) && 23402 (tp->t_srtt | 23403 rack->r_ctl.init_rate)) { 23404 /* 23405 * We are not past the initial window 23406 * and we have some bases for pacing, 23407 * so we need to possibly adjust up 23408 * the cwnd. Note even if we don't set 23409 * the cwnd, its still ok to raise the rc_init_win 23410 * which can be used coming out of idle when we 23411 * would have a rate. 23412 */ 23413 if (tp->snd_cwnd < win) 23414 tp->snd_cwnd = win; 23415 } 23416 if (rack->rc_always_pace) 23417 rack_update_seg(rack); 23418 } else 23419 error = EINVAL; 23420 break; 23421 case TCP_RACK_FORCE_MSEG: 23422 RACK_OPTS_INC(tcp_rack_force_max_seg); 23423 if (optval) 23424 rack->rc_force_max_seg = 1; 23425 else 23426 rack->rc_force_max_seg = 0; 23427 break; 23428 case TCP_RACK_PACE_MIN_SEG: 23429 RACK_OPTS_INC(tcp_rack_min_seg); 23430 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); 23431 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23432 break; 23433 case TCP_RACK_PACE_MAX_SEG: 23434 /* Max segments size in a pace in bytes */ 23435 RACK_OPTS_INC(tcp_rack_max_seg); 23436 if (optval <= MAX_USER_SET_SEG) 23437 rack->rc_user_set_max_segs = optval; 23438 else 23439 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; 23440 rack_set_pace_segments(tp, rack, __LINE__, NULL); 23441 break; 23442 case TCP_RACK_PACE_RATE_REC: 23443 /* Set the fixed pacing rate in Bytes per second ca */ 23444 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 23445 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23446 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23447 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23448 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23449 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23450 rack->use_fixed_rate = 1; 23451 if (rack->rack_hibeta) 23452 rack_set_cc_pacing(rack); 23453 rack_log_pacing_delay_calc(rack, 23454 rack->r_ctl.rc_fixed_pacing_rate_ss, 23455 rack->r_ctl.rc_fixed_pacing_rate_ca, 23456 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23457 __LINE__, NULL,0); 23458 break; 23459 23460 case TCP_RACK_PACE_RATE_SS: 23461 /* Set the fixed pacing rate in Bytes per second ca */ 23462 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 23463 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23464 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 23465 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23466 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23467 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23468 rack->use_fixed_rate = 1; 23469 if (rack->rack_hibeta) 23470 rack_set_cc_pacing(rack); 23471 rack_log_pacing_delay_calc(rack, 23472 rack->r_ctl.rc_fixed_pacing_rate_ss, 23473 rack->r_ctl.rc_fixed_pacing_rate_ca, 23474 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23475 __LINE__, NULL, 0); 23476 break; 23477 23478 case TCP_RACK_PACE_RATE_CA: 23479 /* Set the fixed pacing rate in Bytes per second ca */ 23480 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 23481 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 23482 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 23483 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 23484 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 23485 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 23486 rack->use_fixed_rate = 1; 23487 if (rack->rack_hibeta) 23488 rack_set_cc_pacing(rack); 23489 rack_log_pacing_delay_calc(rack, 23490 rack->r_ctl.rc_fixed_pacing_rate_ss, 23491 rack->r_ctl.rc_fixed_pacing_rate_ca, 23492 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 23493 __LINE__, NULL, 0); 23494 break; 23495 case TCP_RACK_GP_INCREASE_REC: 23496 RACK_OPTS_INC(tcp_gp_inc_rec); 23497 rack->r_ctl.rack_per_of_gp_rec = optval; 23498 rack_log_pacing_delay_calc(rack, 23499 rack->r_ctl.rack_per_of_gp_ss, 23500 rack->r_ctl.rack_per_of_gp_ca, 23501 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23502 __LINE__, NULL, 0); 23503 break; 23504 case TCP_RACK_GP_INCREASE_CA: 23505 RACK_OPTS_INC(tcp_gp_inc_ca); 23506 ca = optval; 23507 if (ca < 100) { 23508 /* 23509 * We don't allow any reduction 23510 * over the GP b/w. 23511 */ 23512 error = EINVAL; 23513 break; 23514 } 23515 rack->r_ctl.rack_per_of_gp_ca = ca; 23516 rack_log_pacing_delay_calc(rack, 23517 rack->r_ctl.rack_per_of_gp_ss, 23518 rack->r_ctl.rack_per_of_gp_ca, 23519 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23520 __LINE__, NULL, 0); 23521 break; 23522 case TCP_RACK_GP_INCREASE_SS: 23523 RACK_OPTS_INC(tcp_gp_inc_ss); 23524 ss = optval; 23525 if (ss < 100) { 23526 /* 23527 * We don't allow any reduction 23528 * over the GP b/w. 23529 */ 23530 error = EINVAL; 23531 break; 23532 } 23533 rack->r_ctl.rack_per_of_gp_ss = ss; 23534 rack_log_pacing_delay_calc(rack, 23535 rack->r_ctl.rack_per_of_gp_ss, 23536 rack->r_ctl.rack_per_of_gp_ca, 23537 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 23538 __LINE__, NULL, 0); 23539 break; 23540 case TCP_RACK_RR_CONF: 23541 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 23542 if (optval && optval <= 3) 23543 rack->r_rr_config = optval; 23544 else 23545 rack->r_rr_config = 0; 23546 break; 23547 case TCP_PACING_DND: /* URL:dnd */ 23548 if (optval > 0) 23549 rack->rc_pace_dnd = 1; 23550 else 23551 rack->rc_pace_dnd = 0; 23552 break; 23553 case TCP_HDWR_RATE_CAP: 23554 RACK_OPTS_INC(tcp_hdwr_rate_cap); 23555 if (optval) { 23556 if (rack->r_rack_hw_rate_caps == 0) 23557 rack->r_rack_hw_rate_caps = 1; 23558 else 23559 error = EALREADY; 23560 } else { 23561 rack->r_rack_hw_rate_caps = 0; 23562 } 23563 break; 23564 case TCP_RACK_SPLIT_LIMIT: 23565 RACK_OPTS_INC(tcp_split_limit); 23566 rack->r_ctl.rc_split_limit = optval; 23567 break; 23568 case TCP_BBR_HDWR_PACE: 23569 RACK_OPTS_INC(tcp_hdwr_pacing); 23570 if (optval){ 23571 if (rack->rack_hdrw_pacing == 0) { 23572 rack->rack_hdw_pace_ena = 1; 23573 rack->rack_attempt_hdwr_pace = 0; 23574 } else 23575 error = EALREADY; 23576 } else { 23577 rack->rack_hdw_pace_ena = 0; 23578 #ifdef RATELIMIT 23579 if (rack->r_ctl.crte != NULL) { 23580 rack->rack_hdrw_pacing = 0; 23581 rack->rack_attempt_hdwr_pace = 0; 23582 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 23583 rack->r_ctl.crte = NULL; 23584 } 23585 #endif 23586 } 23587 break; 23588 /* End Pacing related ones */ 23589 case TCP_RACK_PRR_SENDALOT: 23590 /* Allow PRR to send more than one seg */ 23591 RACK_OPTS_INC(tcp_rack_prr_sendalot); 23592 rack->r_ctl.rc_prr_sendalot = optval; 23593 break; 23594 case TCP_RACK_MIN_TO: 23595 /* Minimum time between rack t-o's in ms */ 23596 RACK_OPTS_INC(tcp_rack_min_to); 23597 rack->r_ctl.rc_min_to = optval; 23598 break; 23599 case TCP_RACK_EARLY_SEG: 23600 /* If early recovery max segments */ 23601 RACK_OPTS_INC(tcp_rack_early_seg); 23602 rack->r_ctl.rc_early_recovery_segs = optval; 23603 break; 23604 case TCP_RACK_ENABLE_HYSTART: 23605 { 23606 if (optval) { 23607 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 23608 if (rack_do_hystart > RACK_HYSTART_ON) 23609 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 23610 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 23611 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 23612 } else { 23613 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 23614 } 23615 } 23616 break; 23617 case TCP_RACK_REORD_THRESH: 23618 /* RACK reorder threshold (shift amount) */ 23619 RACK_OPTS_INC(tcp_rack_reord_thresh); 23620 if ((optval > 0) && (optval < 31)) 23621 rack->r_ctl.rc_reorder_shift = optval; 23622 else 23623 error = EINVAL; 23624 break; 23625 case TCP_RACK_REORD_FADE: 23626 /* Does reordering fade after ms time */ 23627 RACK_OPTS_INC(tcp_rack_reord_fade); 23628 rack->r_ctl.rc_reorder_fade = optval; 23629 break; 23630 case TCP_RACK_TLP_THRESH: 23631 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 23632 RACK_OPTS_INC(tcp_rack_tlp_thresh); 23633 if (optval) 23634 rack->r_ctl.rc_tlp_threshold = optval; 23635 else 23636 error = EINVAL; 23637 break; 23638 case TCP_BBR_USE_RACK_RR: 23639 RACK_OPTS_INC(tcp_rack_rr); 23640 if (optval) 23641 rack->use_rack_rr = 1; 23642 else 23643 rack->use_rack_rr = 0; 23644 break; 23645 case TCP_RACK_PKT_DELAY: 23646 /* RACK added ms i.e. rack-rtt + reord + N */ 23647 RACK_OPTS_INC(tcp_rack_pkt_delay); 23648 rack->r_ctl.rc_pkt_delay = optval; 23649 break; 23650 case TCP_DELACK: 23651 RACK_OPTS_INC(tcp_rack_delayed_ack); 23652 if (optval == 0) 23653 tp->t_delayed_ack = 0; 23654 else 23655 tp->t_delayed_ack = 1; 23656 if (tp->t_flags & TF_DELACK) { 23657 tp->t_flags &= ~TF_DELACK; 23658 tp->t_flags |= TF_ACKNOW; 23659 NET_EPOCH_ENTER(et); 23660 rack_output(tp); 23661 NET_EPOCH_EXIT(et); 23662 } 23663 break; 23664 23665 case TCP_BBR_RACK_RTT_USE: 23666 RACK_OPTS_INC(tcp_rack_rtt_use); 23667 if ((optval != USE_RTT_HIGH) && 23668 (optval != USE_RTT_LOW) && 23669 (optval != USE_RTT_AVG)) 23670 error = EINVAL; 23671 else 23672 rack->r_ctl.rc_rate_sample_method = optval; 23673 break; 23674 case TCP_DATA_AFTER_CLOSE: 23675 RACK_OPTS_INC(tcp_data_after_close); 23676 if (optval) 23677 rack->rc_allow_data_af_clo = 1; 23678 else 23679 rack->rc_allow_data_af_clo = 0; 23680 break; 23681 default: 23682 break; 23683 } 23684 tcp_log_socket_option(tp, sopt_name, optval, error); 23685 return (error); 23686 } 23687 23688 23689 static void 23690 rack_apply_deferred_options(struct tcp_rack *rack) 23691 { 23692 struct deferred_opt_list *dol, *sdol; 23693 uint32_t s_optval; 23694 23695 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 23696 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 23697 /* Disadvantage of deferal is you loose the error return */ 23698 s_optval = (uint32_t)dol->optval; 23699 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); 23700 free(dol, M_TCPDO); 23701 } 23702 } 23703 23704 static void 23705 rack_hw_tls_change(struct tcpcb *tp, int chg) 23706 { 23707 /* Update HW tls state */ 23708 struct tcp_rack *rack; 23709 23710 rack = (struct tcp_rack *)tp->t_fb_ptr; 23711 if (chg) 23712 rack->r_ctl.fsb.hw_tls = 1; 23713 else 23714 rack->r_ctl.fsb.hw_tls = 0; 23715 } 23716 23717 static int 23718 rack_pru_options(struct tcpcb *tp, int flags) 23719 { 23720 if (flags & PRUS_OOB) 23721 return (EOPNOTSUPP); 23722 return (0); 23723 } 23724 23725 static bool 23726 rack_wake_check(struct tcpcb *tp) 23727 { 23728 struct tcp_rack *rack; 23729 struct timeval tv; 23730 uint32_t cts; 23731 23732 rack = (struct tcp_rack *)tp->t_fb_ptr; 23733 if (rack->r_ctl.rc_hpts_flags) { 23734 cts = tcp_get_usecs(&tv); 23735 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ 23736 /* 23737 * Pacing timer is up, check if we are ready. 23738 */ 23739 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) 23740 return (true); 23741 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { 23742 /* 23743 * A timer is up, check if we are ready. 23744 */ 23745 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) 23746 return (true); 23747 } 23748 } 23749 return (false); 23750 } 23751 23752 static struct tcp_function_block __tcp_rack = { 23753 .tfb_tcp_block_name = __XSTRING(STACKNAME), 23754 .tfb_tcp_output = rack_output, 23755 .tfb_do_queued_segments = ctf_do_queued_segments, 23756 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 23757 .tfb_tcp_do_segment = rack_do_segment, 23758 .tfb_tcp_ctloutput = rack_ctloutput, 23759 .tfb_tcp_fb_init = rack_init, 23760 .tfb_tcp_fb_fini = rack_fini, 23761 .tfb_tcp_timer_stop_all = rack_stopall, 23762 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 23763 .tfb_tcp_handoff_ok = rack_handoff_ok, 23764 .tfb_tcp_mtu_chg = rack_mtu_change, 23765 .tfb_pru_options = rack_pru_options, 23766 .tfb_hwtls_change = rack_hw_tls_change, 23767 .tfb_chg_query = rack_chg_query, 23768 .tfb_switch_failed = rack_switch_failed, 23769 .tfb_early_wake_check = rack_wake_check, 23770 .tfb_compute_pipe = rack_compute_pipe, 23771 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 23772 }; 23773 23774 /* 23775 * rack_ctloutput() must drop the inpcb lock before performing copyin on 23776 * socket option arguments. When it re-acquires the lock after the copy, it 23777 * has to revalidate that the connection is still valid for the socket 23778 * option. 23779 */ 23780 static int 23781 rack_set_sockopt(struct tcpcb *tp, struct sockopt *sopt) 23782 { 23783 struct inpcb *inp = tptoinpcb(tp); 23784 #ifdef INET 23785 struct ip *ip; 23786 #endif 23787 struct tcp_rack *rack; 23788 struct tcp_hybrid_req hybrid; 23789 uint64_t loptval; 23790 int32_t error = 0, optval; 23791 23792 rack = (struct tcp_rack *)tp->t_fb_ptr; 23793 if (rack == NULL) { 23794 INP_WUNLOCK(inp); 23795 return (EINVAL); 23796 } 23797 #ifdef INET 23798 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 23799 #endif 23800 23801 switch (sopt->sopt_level) { 23802 #ifdef INET6 23803 case IPPROTO_IPV6: 23804 MPASS(inp->inp_vflag & INP_IPV6PROTO); 23805 switch (sopt->sopt_name) { 23806 case IPV6_USE_MIN_MTU: 23807 tcp6_use_min_mtu(tp); 23808 break; 23809 } 23810 INP_WUNLOCK(inp); 23811 return (0); 23812 #endif 23813 #ifdef INET 23814 case IPPROTO_IP: 23815 switch (sopt->sopt_name) { 23816 case IP_TOS: 23817 /* 23818 * The DSCP codepoint has changed, update the fsb. 23819 */ 23820 ip->ip_tos = rack->rc_inp->inp_ip_tos; 23821 break; 23822 case IP_TTL: 23823 /* 23824 * The TTL has changed, update the fsb. 23825 */ 23826 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 23827 break; 23828 } 23829 INP_WUNLOCK(inp); 23830 return (0); 23831 #endif 23832 #ifdef SO_PEERPRIO 23833 case SOL_SOCKET: 23834 switch (sopt->sopt_name) { 23835 case SO_PEERPRIO: /* SC-URL:bs */ 23836 /* Already read in and sanity checked in sosetopt(). */ 23837 if (inp->inp_socket) { 23838 rack->client_bufferlvl = inp->inp_socket->so_peerprio; 23839 rack_client_buffer_level_set(rack); 23840 } 23841 break; 23842 } 23843 INP_WUNLOCK(inp); 23844 return (0); 23845 #endif 23846 case IPPROTO_TCP: 23847 switch (sopt->sopt_name) { 23848 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 23849 /* Pacing related ones */ 23850 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 23851 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 23852 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 23853 case TCP_RACK_PACE_MIN_SEG: /* URL:pace_min_seg */ 23854 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 23855 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 23856 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 23857 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 23858 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 23859 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 23860 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 23861 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 23862 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 23863 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 23864 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 23865 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 23866 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 23867 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 23868 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 23869 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 23870 case TCP_RACK_DGP_IN_REC: /* URL:dgpinrec */ 23871 /* End pacing related */ 23872 case TCP_RXT_CLAMP: /* URL:rxtclamp */ 23873 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 23874 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 23875 case TCP_RACK_MIN_TO: /* URL:min_to */ 23876 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 23877 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 23878 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 23879 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 23880 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 23881 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 23882 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 23883 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 23884 case TCP_RACK_DO_DETECTION: /* URL:detect */ 23885 case TCP_NO_PRR: /* URL:noprr */ 23886 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 23887 case TCP_DATA_AFTER_CLOSE: /* no URL */ 23888 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 23889 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 23890 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 23891 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 23892 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 23893 case TCP_RACK_PROFILE: /* URL:profile */ 23894 case TCP_HYBRID_PACING: /* URL:hybrid */ 23895 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 23896 case TCP_RACK_ABC_VAL: /* URL:labc */ 23897 case TCP_REC_ABC_VAL: /* URL:reclabc */ 23898 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 23899 case TCP_DEFER_OPTIONS: /* URL:defer */ 23900 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 23901 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 23902 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 23903 case TCP_RACK_SET_RXT_OPTIONS: /* URL:rxtsz */ 23904 case TCP_RACK_HI_BETA: /* URL:hibeta */ 23905 case TCP_RACK_SPLIT_LIMIT: /* URL:split */ 23906 case TCP_RACK_PACING_DIVISOR: /* URL:divisor */ 23907 case TCP_PACING_DND: /* URL:dnd */ 23908 goto process_opt; 23909 break; 23910 default: 23911 /* Filter off all unknown options to the base stack */ 23912 return (tcp_default_ctloutput(tp, sopt)); 23913 break; 23914 } 23915 23916 default: 23917 INP_WUNLOCK(inp); 23918 return (0); 23919 } 23920 process_opt: 23921 INP_WUNLOCK(inp); 23922 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 23923 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 23924 /* 23925 * We truncate it down to 32 bits for the socket-option trace this 23926 * means rates > 34Gbps won't show right, but thats probably ok. 23927 */ 23928 optval = (uint32_t)loptval; 23929 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { 23930 error = sooptcopyin(sopt, &hybrid, sizeof(hybrid), sizeof(hybrid)); 23931 } else { 23932 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 23933 /* Save it in 64 bit form too */ 23934 loptval = optval; 23935 } 23936 if (error) 23937 return (error); 23938 INP_WLOCK(inp); 23939 if (tp->t_fb != &__tcp_rack) { 23940 INP_WUNLOCK(inp); 23941 return (ENOPROTOOPT); 23942 } 23943 if (rack->defer_options && (rack->gp_ready == 0) && 23944 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 23945 (sopt->sopt_name != TCP_HYBRID_PACING) && 23946 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 23947 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && 23948 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 23949 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 23950 /* Options are beind deferred */ 23951 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 23952 INP_WUNLOCK(inp); 23953 return (0); 23954 } else { 23955 /* No memory to defer, fail */ 23956 INP_WUNLOCK(inp); 23957 return (ENOMEM); 23958 } 23959 } 23960 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); 23961 INP_WUNLOCK(inp); 23962 return (error); 23963 } 23964 23965 static void 23966 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 23967 { 23968 23969 INP_WLOCK_ASSERT(tptoinpcb(tp)); 23970 bzero(ti, sizeof(*ti)); 23971 23972 ti->tcpi_state = tp->t_state; 23973 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 23974 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 23975 if (tp->t_flags & TF_SACK_PERMIT) 23976 ti->tcpi_options |= TCPI_OPT_SACK; 23977 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 23978 ti->tcpi_options |= TCPI_OPT_WSCALE; 23979 ti->tcpi_snd_wscale = tp->snd_scale; 23980 ti->tcpi_rcv_wscale = tp->rcv_scale; 23981 } 23982 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 23983 ti->tcpi_options |= TCPI_OPT_ECN; 23984 if (tp->t_flags & TF_FASTOPEN) 23985 ti->tcpi_options |= TCPI_OPT_TFO; 23986 /* still kept in ticks is t_rcvtime */ 23987 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 23988 /* Since we hold everything in precise useconds this is easy */ 23989 ti->tcpi_rtt = tp->t_srtt; 23990 ti->tcpi_rttvar = tp->t_rttvar; 23991 ti->tcpi_rto = tp->t_rxtcur; 23992 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 23993 ti->tcpi_snd_cwnd = tp->snd_cwnd; 23994 /* 23995 * FreeBSD-specific extension fields for tcp_info. 23996 */ 23997 ti->tcpi_rcv_space = tp->rcv_wnd; 23998 ti->tcpi_rcv_nxt = tp->rcv_nxt; 23999 ti->tcpi_snd_wnd = tp->snd_wnd; 24000 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 24001 ti->tcpi_snd_nxt = tp->snd_nxt; 24002 ti->tcpi_snd_mss = tp->t_maxseg; 24003 ti->tcpi_rcv_mss = tp->t_maxseg; 24004 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 24005 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 24006 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 24007 ti->tcpi_total_tlp = tp->t_sndtlppack; 24008 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 24009 #ifdef NETFLIX_STATS 24010 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 24011 #endif 24012 #ifdef TCP_OFFLOAD 24013 if (tp->t_flags & TF_TOE) { 24014 ti->tcpi_options |= TCPI_OPT_TOE; 24015 tcp_offload_tcp_info(tp, ti); 24016 } 24017 #endif 24018 } 24019 24020 static int 24021 rack_get_sockopt(struct tcpcb *tp, struct sockopt *sopt) 24022 { 24023 struct inpcb *inp = tptoinpcb(tp); 24024 struct tcp_rack *rack; 24025 int32_t error, optval; 24026 uint64_t val, loptval; 24027 struct tcp_info ti; 24028 /* 24029 * Because all our options are either boolean or an int, we can just 24030 * pull everything into optval and then unlock and copy. If we ever 24031 * add a option that is not a int, then this will have quite an 24032 * impact to this routine. 24033 */ 24034 error = 0; 24035 rack = (struct tcp_rack *)tp->t_fb_ptr; 24036 if (rack == NULL) { 24037 INP_WUNLOCK(inp); 24038 return (EINVAL); 24039 } 24040 switch (sopt->sopt_name) { 24041 case TCP_INFO: 24042 /* First get the info filled */ 24043 rack_fill_info(tp, &ti); 24044 /* Fix up the rtt related fields if needed */ 24045 INP_WUNLOCK(inp); 24046 error = sooptcopyout(sopt, &ti, sizeof ti); 24047 return (error); 24048 /* 24049 * Beta is the congestion control value for NewReno that influences how 24050 * much of a backoff happens when loss is detected. It is normally set 24051 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 24052 * when you exit recovery. 24053 */ 24054 case TCP_RACK_PACING_BETA: 24055 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24056 error = EINVAL; 24057 else if (rack->rc_pacing_cc_set == 0) 24058 optval = rack->r_ctl.rc_saved_beta.beta; 24059 else { 24060 /* 24061 * Reach out into the CC data and report back what 24062 * I have previously set. Yeah it looks hackish but 24063 * we don't want to report the saved values. 24064 */ 24065 if (tp->t_ccv.cc_data) 24066 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 24067 else 24068 error = EINVAL; 24069 } 24070 break; 24071 /* 24072 * Beta_ecn is the congestion control value for NewReno that influences how 24073 * much of a backoff happens when a ECN mark is detected. It is normally set 24074 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 24075 * you exit recovery. Note that classic ECN has a beta of 50, it is only 24076 * ABE Ecn that uses this "less" value, but we do too with pacing :) 24077 */ 24078 24079 case TCP_RACK_PACING_BETA_ECN: 24080 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 24081 error = EINVAL; 24082 else if (rack->rc_pacing_cc_set == 0) 24083 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 24084 else { 24085 /* 24086 * Reach out into the CC data and report back what 24087 * I have previously set. Yeah it looks hackish but 24088 * we don't want to report the saved values. 24089 */ 24090 if (tp->t_ccv.cc_data) 24091 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 24092 else 24093 error = EINVAL; 24094 } 24095 break; 24096 case TCP_RACK_DSACK_OPT: 24097 optval = 0; 24098 if (rack->rc_rack_tmr_std_based) { 24099 optval |= 1; 24100 } 24101 if (rack->rc_rack_use_dsack) { 24102 optval |= 2; 24103 } 24104 break; 24105 case TCP_RACK_ENABLE_HYSTART: 24106 { 24107 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 24108 optval = RACK_HYSTART_ON; 24109 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 24110 optval = RACK_HYSTART_ON_W_SC; 24111 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 24112 optval = RACK_HYSTART_ON_W_SC_C; 24113 } else { 24114 optval = RACK_HYSTART_OFF; 24115 } 24116 } 24117 break; 24118 case TCP_RACK_DGP_IN_REC: 24119 optval = rack->r_ctl.full_dgp_in_rec; 24120 break; 24121 case TCP_RACK_HI_BETA: 24122 optval = rack->rack_hibeta; 24123 break; 24124 case TCP_RXT_CLAMP: 24125 optval = rack->r_ctl.saved_rxt_clamp_val; 24126 break; 24127 case TCP_DEFER_OPTIONS: 24128 optval = rack->defer_options; 24129 break; 24130 case TCP_RACK_MEASURE_CNT: 24131 optval = rack->r_ctl.req_measurements; 24132 break; 24133 case TCP_REC_ABC_VAL: 24134 optval = rack->r_use_labc_for_rec; 24135 break; 24136 case TCP_RACK_ABC_VAL: 24137 optval = rack->rc_labc; 24138 break; 24139 case TCP_HDWR_UP_ONLY: 24140 optval= rack->r_up_only; 24141 break; 24142 case TCP_PACING_RATE_CAP: 24143 loptval = rack->r_ctl.bw_rate_cap; 24144 break; 24145 case TCP_RACK_PROFILE: 24146 /* You cannot retrieve a profile, its write only */ 24147 error = EINVAL; 24148 break; 24149 case TCP_HYBRID_PACING: 24150 /* You cannot retrieve hybrid pacing information, its write only */ 24151 error = EINVAL; 24152 break; 24153 case TCP_USE_CMP_ACKS: 24154 optval = rack->r_use_cmp_ack; 24155 break; 24156 case TCP_RACK_PACE_TO_FILL: 24157 optval = rack->rc_pace_to_cwnd; 24158 if (optval && rack->r_fill_less_agg) 24159 optval++; 24160 break; 24161 case TCP_RACK_NO_PUSH_AT_MAX: 24162 optval = rack->r_ctl.rc_no_push_at_mrtt; 24163 break; 24164 case TCP_SHARED_CWND_ENABLE: 24165 optval = rack->rack_enable_scwnd; 24166 break; 24167 case TCP_RACK_NONRXT_CFG_RATE: 24168 optval = rack->rack_rec_nonrxt_use_cr; 24169 break; 24170 case TCP_NO_PRR: 24171 if (rack->rack_no_prr == 1) 24172 optval = 1; 24173 else if (rack->no_prr_addback == 1) 24174 optval = 2; 24175 else 24176 optval = 0; 24177 break; 24178 case TCP_RACK_DO_DETECTION: 24179 optval = rack->do_detection; 24180 break; 24181 case TCP_RACK_MBUF_QUEUE: 24182 /* Now do we use the LRO mbuf-queue feature */ 24183 optval = rack->r_mbuf_queue; 24184 break; 24185 case TCP_TIMELY_DYN_ADJ: 24186 optval = rack->rc_gp_dyn_mul; 24187 break; 24188 case TCP_BBR_IWINTSO: 24189 optval = rack->rc_init_win; 24190 break; 24191 case TCP_RACK_TLP_REDUCE: 24192 /* RACK TLP cwnd reduction (bool) */ 24193 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 24194 break; 24195 case TCP_BBR_RACK_INIT_RATE: 24196 val = rack->r_ctl.init_rate; 24197 /* convert to kbits per sec */ 24198 val *= 8; 24199 val /= 1000; 24200 optval = (uint32_t)val; 24201 break; 24202 case TCP_RACK_FORCE_MSEG: 24203 optval = rack->rc_force_max_seg; 24204 break; 24205 case TCP_RACK_PACE_MIN_SEG: 24206 optval = rack->r_ctl.rc_user_set_min_segs; 24207 break; 24208 case TCP_RACK_PACE_MAX_SEG: 24209 /* Max segments in a pace */ 24210 optval = rack->rc_user_set_max_segs; 24211 break; 24212 case TCP_RACK_PACE_ALWAYS: 24213 /* Use the always pace method */ 24214 optval = rack->rc_always_pace; 24215 break; 24216 case TCP_RACK_PRR_SENDALOT: 24217 /* Allow PRR to send more than one seg */ 24218 optval = rack->r_ctl.rc_prr_sendalot; 24219 break; 24220 case TCP_RACK_MIN_TO: 24221 /* Minimum time between rack t-o's in ms */ 24222 optval = rack->r_ctl.rc_min_to; 24223 break; 24224 case TCP_RACK_SPLIT_LIMIT: 24225 optval = rack->r_ctl.rc_split_limit; 24226 break; 24227 case TCP_RACK_EARLY_SEG: 24228 /* If early recovery max segments */ 24229 optval = rack->r_ctl.rc_early_recovery_segs; 24230 break; 24231 case TCP_RACK_REORD_THRESH: 24232 /* RACK reorder threshold (shift amount) */ 24233 optval = rack->r_ctl.rc_reorder_shift; 24234 break; 24235 case TCP_RACK_REORD_FADE: 24236 /* Does reordering fade after ms time */ 24237 optval = rack->r_ctl.rc_reorder_fade; 24238 break; 24239 case TCP_BBR_USE_RACK_RR: 24240 /* Do we use the rack cheat for rxt */ 24241 optval = rack->use_rack_rr; 24242 break; 24243 case TCP_RACK_RR_CONF: 24244 optval = rack->r_rr_config; 24245 break; 24246 case TCP_HDWR_RATE_CAP: 24247 optval = rack->r_rack_hw_rate_caps; 24248 break; 24249 case TCP_BBR_HDWR_PACE: 24250 optval = rack->rack_hdw_pace_ena; 24251 break; 24252 case TCP_RACK_TLP_THRESH: 24253 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 24254 optval = rack->r_ctl.rc_tlp_threshold; 24255 break; 24256 case TCP_RACK_PKT_DELAY: 24257 /* RACK added ms i.e. rack-rtt + reord + N */ 24258 optval = rack->r_ctl.rc_pkt_delay; 24259 break; 24260 case TCP_RACK_TLP_USE: 24261 optval = rack->rack_tlp_threshold_use; 24262 break; 24263 case TCP_PACING_DND: 24264 optval = rack->rc_pace_dnd; 24265 break; 24266 case TCP_RACK_PACE_RATE_CA: 24267 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 24268 break; 24269 case TCP_RACK_PACE_RATE_SS: 24270 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 24271 break; 24272 case TCP_RACK_PACE_RATE_REC: 24273 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 24274 break; 24275 case TCP_RACK_GP_INCREASE_SS: 24276 optval = rack->r_ctl.rack_per_of_gp_ca; 24277 break; 24278 case TCP_RACK_GP_INCREASE_CA: 24279 optval = rack->r_ctl.rack_per_of_gp_ss; 24280 break; 24281 case TCP_RACK_PACING_DIVISOR: 24282 optval = rack->r_ctl.pace_len_divisor; 24283 break; 24284 case TCP_BBR_RACK_RTT_USE: 24285 optval = rack->r_ctl.rc_rate_sample_method; 24286 break; 24287 case TCP_DELACK: 24288 optval = tp->t_delayed_ack; 24289 break; 24290 case TCP_DATA_AFTER_CLOSE: 24291 optval = rack->rc_allow_data_af_clo; 24292 break; 24293 case TCP_SHARED_CWND_TIME_LIMIT: 24294 optval = rack->r_limit_scw; 24295 break; 24296 case TCP_RACK_TIMER_SLOP: 24297 optval = rack->r_ctl.timer_slop; 24298 break; 24299 default: 24300 return (tcp_default_ctloutput(tp, sopt)); 24301 break; 24302 } 24303 INP_WUNLOCK(inp); 24304 if (error == 0) { 24305 if (TCP_PACING_RATE_CAP) 24306 error = sooptcopyout(sopt, &loptval, sizeof loptval); 24307 else 24308 error = sooptcopyout(sopt, &optval, sizeof optval); 24309 } 24310 return (error); 24311 } 24312 24313 static int 24314 rack_ctloutput(struct tcpcb *tp, struct sockopt *sopt) 24315 { 24316 if (sopt->sopt_dir == SOPT_SET) { 24317 return (rack_set_sockopt(tp, sopt)); 24318 } else if (sopt->sopt_dir == SOPT_GET) { 24319 return (rack_get_sockopt(tp, sopt)); 24320 } else { 24321 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 24322 } 24323 } 24324 24325 static const char *rack_stack_names[] = { 24326 __XSTRING(STACKNAME), 24327 #ifdef STACKALIAS 24328 __XSTRING(STACKALIAS), 24329 #endif 24330 }; 24331 24332 static int 24333 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 24334 { 24335 memset(mem, 0, size); 24336 return (0); 24337 } 24338 24339 static void 24340 rack_dtor(void *mem, int32_t size, void *arg) 24341 { 24342 24343 } 24344 24345 static bool rack_mod_inited = false; 24346 24347 static int 24348 tcp_addrack(module_t mod, int32_t type, void *data) 24349 { 24350 int32_t err = 0; 24351 int num_stacks; 24352 24353 switch (type) { 24354 case MOD_LOAD: 24355 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 24356 sizeof(struct rack_sendmap), 24357 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 24358 24359 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 24360 sizeof(struct tcp_rack), 24361 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 24362 24363 sysctl_ctx_init(&rack_sysctl_ctx); 24364 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 24365 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 24366 OID_AUTO, 24367 #ifdef STACKALIAS 24368 __XSTRING(STACKALIAS), 24369 #else 24370 __XSTRING(STACKNAME), 24371 #endif 24372 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 24373 ""); 24374 if (rack_sysctl_root == NULL) { 24375 printf("Failed to add sysctl node\n"); 24376 err = EFAULT; 24377 goto free_uma; 24378 } 24379 rack_init_sysctls(); 24380 num_stacks = nitems(rack_stack_names); 24381 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 24382 rack_stack_names, &num_stacks); 24383 if (err) { 24384 printf("Failed to register %s stack name for " 24385 "%s module\n", rack_stack_names[num_stacks], 24386 __XSTRING(MODNAME)); 24387 sysctl_ctx_free(&rack_sysctl_ctx); 24388 free_uma: 24389 uma_zdestroy(rack_zone); 24390 uma_zdestroy(rack_pcb_zone); 24391 rack_counter_destroy(); 24392 printf("Failed to register rack module -- err:%d\n", err); 24393 return (err); 24394 } 24395 tcp_lro_reg_mbufq(); 24396 rack_mod_inited = true; 24397 break; 24398 case MOD_QUIESCE: 24399 err = deregister_tcp_functions(&__tcp_rack, true, false); 24400 break; 24401 case MOD_UNLOAD: 24402 err = deregister_tcp_functions(&__tcp_rack, false, true); 24403 if (err == EBUSY) 24404 break; 24405 if (rack_mod_inited) { 24406 uma_zdestroy(rack_zone); 24407 uma_zdestroy(rack_pcb_zone); 24408 sysctl_ctx_free(&rack_sysctl_ctx); 24409 rack_counter_destroy(); 24410 rack_mod_inited = false; 24411 } 24412 tcp_lro_dereg_mbufq(); 24413 err = 0; 24414 break; 24415 default: 24416 return (EOPNOTSUPP); 24417 } 24418 return (err); 24419 } 24420 24421 static moduledata_t tcp_rack = { 24422 .name = __XSTRING(MODNAME), 24423 .evhand = tcp_addrack, 24424 .priv = 0 24425 }; 24426 24427 MODULE_VERSION(MODNAME, 1); 24428 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 24429 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 24430 24431 #endif /* #if !defined(INET) && !defined(INET6) */ 24432